From 7840946d94ed241aa1ff03ec99446cbc61eae766 Mon Sep 17 00:00:00 2001 From: ChrisChinchilla Date: Tue, 10 Nov 2020 18:16:19 +0100 Subject: [PATCH 01/15] Reorganise content for new theme Signed-off-by: ChrisChinchilla --- site/config.toml | 50 +- site/content/{docs => }/_index.md | 0 site/content/{docs => }/case_studies/index.md | 0 site/content/{docs => }/community/index.md | 0 site/content/{docs => }/ecosystem/index.md | 0 site/content/{docs => }/faqs/_index.md | 0 .../{docs => }/glossary/bootstrapping.md | 0 .../{docs => }/glossary/cardinality.md | 0 site/content/{docs => }/glossary/datapoint.md | 0 site/content/{docs => }/glossary/index.md | 0 site/content/{docs => }/glossary/labels.md | 0 site/content/{docs => }/glossary/m3.md | 0 .../{docs => }/glossary/m3coordinator.md | 0 site/content/{docs => }/glossary/m3db.md | 0 site/content/{docs => }/glossary/m3query.md | 0 site/content/{docs => }/glossary/metric.md | 0 site/content/{docs => }/glossary/namespace.md | 0 site/content/{docs => }/glossary/placement.md | 0 site/content/{docs => }/glossary/shard.md | 0 site/content/{docs => }/glossary/tags.md | 0 .../content/{docs => }/glossary/timeseries.md | 0 site/content/{docs => }/glossary/topology.md | 0 site/content/{docs => }/how_to/_index.md | 0 site/content/{docs => }/how_to/aggregator.md | 0 .../{docs => }/how_to/cluster_hard_way.md | 0 site/content/{docs => }/how_to/grafana.md | 0 site/content/{docs => }/how_to/graphite.md | 0 site/content/{docs => }/how_to/kubernetes.md | 0 .../{docs => }/how_to/monitoring_m3/_index.md | 0 .../{docs => }/how_to/monitoring_m3/alerts.md | 0 .../how_to/monitoring_m3/metrics.md | 0 .../how_to/monitoring_m3/tracing.md | 0 .../content/{docs => }/how_to/other/_index.md | 0 site/content/{docs => }/how_to/other/tsdb.md | 0 .../{docs => }/how_to/other/upgrading.md | 0 site/content/{docs => }/how_to/prometheus.md | 0 site/content/{docs => }/how_to/query.md | 0 site/content/{docs => }/how_to/use_as_tsdb.md | 0 .../includes/headers_optional_read_all.md | 0 .../includes/headers_optional_read_limits.md | 0 .../includes/headers_optional_read_write.md | 0 .../headers_optional_read_write_all.md | 0 .../includes/headers_optional_write_all.md | 0 .../includes/headers_placement_namespace.md | 0 site/content/{docs => }/includes/index.md | 0 .../{docs => }/includes/podtemplate.json | 0 .../includes/quickstart/create-database.sh | 0 .../includes/quickstart/write-metrics-1.sh | 0 .../includes/quickstart/write-metrics-2.sh | 0 .../includes/quickstart/write-metrics-3.sh | 0 .../content/{docs => }/integrations/_index.md | 0 .../{docs => }/integrations/grafana.md | 0 .../{docs => }/integrations/graphite.md | 0 .../content/{docs => }/integrations/influx.md | 0 .../{docs => }/integrations/prometheus.md | 0 .../{docs => }/m3coordinator/_index.md | 0 .../{docs => }/m3coordinator/api/remote.md | 0 site/content/{docs => }/m3db/_index.md | 0 .../{docs => }/m3db/architecture/_index.md | 0 .../{docs => }/m3db/architecture/caching.md | 0 .../m3db/architecture/commitlogs.md | 0 .../m3db/architecture/consistencylevels.md | 0 .../{docs => }/m3db/architecture/engine.md | 0 .../m3db/architecture/peer_streaming.md | 0 .../{docs => }/m3db/architecture/sharding.md | 0 .../{docs => }/m3db/architecture/storage.md | 0 .../m3db/monodraw/m3db-file-layout.monopic | Bin .../m3db/monodraw/m3db_structs.monopic | Bin .../m3db/monodraw/peer_bootstrap.monopic | Bin .../placement_monodraw_template.monopic | Bin .../monodraw/placement_state_machine.monopic | Bin site/content/{docs => }/m3query/_index.md | 0 site/content/{docs => }/m3query/api/query.md | 0 .../{docs => }/m3query/architecture/_index.md | 0 .../{docs => }/m3query/architecture/blocks.md | 0 .../{docs => }/m3query/architecture/fanout.md | 0 .../m3query/architecture/functions.md | 0 .../{docs => }/m3query/config/_index.md | 0 .../m3query/config/annotated_config.md | 0 .../m3query/config/annotated_config.yaml | 0 .../{docs => }/operational_guide/_index.md | 0 .../availability_consistency_durability.md | 0 .../bootstrapping_crash_recovery.md | 0 .../{docs => }/operational_guide/etcd.md | 0 .../operational_guide/fileset_migrations.md | 0 .../operational_guide/kernel_configuration.md | 0 .../operational_guide/mapping_rollup.md | 0 .../operational_guide/monitoring.md | 0 .../multiple_m3db_clusters.md | 0 .../namespace_configuration.md | 0 .../operational_guide/namespace_mgmt.md | 0 .../{docs => }/operational_guide/placement.md | 0 .../placement_configuration.md | 0 .../{docs => }/operational_guide/repairs.md | 0 .../replication_and_deployment_in_zones.md | 0 .../replication_between_clusters.md | 0 .../operational_guide/replication_global.png | Bin .../operational_guide/replication_region.png | Bin .../replication_single_zone.png | Bin .../operational_guide/resource_limits.md | 0 .../operational_guide/upgrading_m3.md | 0 site/content/{docs => }/overview/_index.md | 0 .../content/{docs => }/overview/components.md | 0 site/content/{docs => }/overview/media.md | 0 .../content/{docs => }/overview/motivation.md | 0 site/content/{docs => }/overview/roadmap.md | 0 site/content/{docs => }/quickstart/_index.md | 0 site/content/quickstart/index.md | 509 ++++++++++++++++++ .../{docs => }/troubleshooting/_index.md | 0 site/layouts/partials/logo.html | 2 +- site/static/logo.svg | 9 + 111 files changed, 562 insertions(+), 8 deletions(-) rename site/content/{docs => }/_index.md (100%) rename site/content/{docs => }/case_studies/index.md (100%) rename site/content/{docs => }/community/index.md (100%) rename site/content/{docs => }/ecosystem/index.md (100%) rename site/content/{docs => }/faqs/_index.md (100%) rename site/content/{docs => }/glossary/bootstrapping.md (100%) rename site/content/{docs => }/glossary/cardinality.md (100%) rename site/content/{docs => }/glossary/datapoint.md (100%) rename site/content/{docs => }/glossary/index.md (100%) rename site/content/{docs => }/glossary/labels.md (100%) rename site/content/{docs => }/glossary/m3.md (100%) rename site/content/{docs => }/glossary/m3coordinator.md (100%) rename site/content/{docs => }/glossary/m3db.md (100%) rename site/content/{docs => }/glossary/m3query.md (100%) rename site/content/{docs => }/glossary/metric.md (100%) rename site/content/{docs => }/glossary/namespace.md (100%) rename site/content/{docs => }/glossary/placement.md (100%) rename site/content/{docs => }/glossary/shard.md (100%) rename site/content/{docs => }/glossary/tags.md (100%) rename site/content/{docs => }/glossary/timeseries.md (100%) rename site/content/{docs => }/glossary/topology.md (100%) rename site/content/{docs => }/how_to/_index.md (100%) rename site/content/{docs => }/how_to/aggregator.md (100%) rename site/content/{docs => }/how_to/cluster_hard_way.md (100%) rename site/content/{docs => }/how_to/grafana.md (100%) rename site/content/{docs => }/how_to/graphite.md (100%) rename site/content/{docs => }/how_to/kubernetes.md (100%) rename site/content/{docs => }/how_to/monitoring_m3/_index.md (100%) rename site/content/{docs => }/how_to/monitoring_m3/alerts.md (100%) rename site/content/{docs => }/how_to/monitoring_m3/metrics.md (100%) rename site/content/{docs => }/how_to/monitoring_m3/tracing.md (100%) rename site/content/{docs => }/how_to/other/_index.md (100%) rename site/content/{docs => }/how_to/other/tsdb.md (100%) rename site/content/{docs => }/how_to/other/upgrading.md (100%) rename site/content/{docs => }/how_to/prometheus.md (100%) rename site/content/{docs => }/how_to/query.md (100%) rename site/content/{docs => }/how_to/use_as_tsdb.md (100%) rename site/content/{docs => }/includes/headers_optional_read_all.md (100%) rename site/content/{docs => }/includes/headers_optional_read_limits.md (100%) rename site/content/{docs => }/includes/headers_optional_read_write.md (100%) rename site/content/{docs => }/includes/headers_optional_read_write_all.md (100%) rename site/content/{docs => }/includes/headers_optional_write_all.md (100%) rename site/content/{docs => }/includes/headers_placement_namespace.md (100%) rename site/content/{docs => }/includes/index.md (100%) rename site/content/{docs => }/includes/podtemplate.json (100%) rename site/content/{docs => }/includes/quickstart/create-database.sh (100%) rename site/content/{docs => }/includes/quickstart/write-metrics-1.sh (100%) rename site/content/{docs => }/includes/quickstart/write-metrics-2.sh (100%) rename site/content/{docs => }/includes/quickstart/write-metrics-3.sh (100%) rename site/content/{docs => }/integrations/_index.md (100%) rename site/content/{docs => }/integrations/grafana.md (100%) rename site/content/{docs => }/integrations/graphite.md (100%) rename site/content/{docs => }/integrations/influx.md (100%) rename site/content/{docs => }/integrations/prometheus.md (100%) rename site/content/{docs => }/m3coordinator/_index.md (100%) rename site/content/{docs => }/m3coordinator/api/remote.md (100%) rename site/content/{docs => }/m3db/_index.md (100%) rename site/content/{docs => }/m3db/architecture/_index.md (100%) rename site/content/{docs => }/m3db/architecture/caching.md (100%) rename site/content/{docs => }/m3db/architecture/commitlogs.md (100%) rename site/content/{docs => }/m3db/architecture/consistencylevels.md (100%) rename site/content/{docs => }/m3db/architecture/engine.md (100%) rename site/content/{docs => }/m3db/architecture/peer_streaming.md (100%) rename site/content/{docs => }/m3db/architecture/sharding.md (100%) rename site/content/{docs => }/m3db/architecture/storage.md (100%) rename site/content/{docs => }/m3db/monodraw/m3db-file-layout.monopic (100%) rename site/content/{docs => }/m3db/monodraw/m3db_structs.monopic (100%) rename site/content/{docs => }/m3db/monodraw/peer_bootstrap.monopic (100%) rename site/content/{docs => }/m3db/monodraw/placement_monodraw_template.monopic (100%) rename site/content/{docs => }/m3db/monodraw/placement_state_machine.monopic (100%) rename site/content/{docs => }/m3query/_index.md (100%) rename site/content/{docs => }/m3query/api/query.md (100%) rename site/content/{docs => }/m3query/architecture/_index.md (100%) rename site/content/{docs => }/m3query/architecture/blocks.md (100%) rename site/content/{docs => }/m3query/architecture/fanout.md (100%) rename site/content/{docs => }/m3query/architecture/functions.md (100%) rename site/content/{docs => }/m3query/config/_index.md (100%) rename site/content/{docs => }/m3query/config/annotated_config.md (100%) rename site/content/{docs => }/m3query/config/annotated_config.yaml (100%) rename site/content/{docs => }/operational_guide/_index.md (100%) rename site/content/{docs => }/operational_guide/availability_consistency_durability.md (100%) rename site/content/{docs => }/operational_guide/bootstrapping_crash_recovery.md (100%) rename site/content/{docs => }/operational_guide/etcd.md (100%) rename site/content/{docs => }/operational_guide/fileset_migrations.md (100%) rename site/content/{docs => }/operational_guide/kernel_configuration.md (100%) rename site/content/{docs => }/operational_guide/mapping_rollup.md (100%) rename site/content/{docs => }/operational_guide/monitoring.md (100%) rename site/content/{docs => }/operational_guide/multiple_m3db_clusters.md (100%) rename site/content/{docs => }/operational_guide/namespace_configuration.md (100%) rename site/content/{docs => }/operational_guide/namespace_mgmt.md (100%) rename site/content/{docs => }/operational_guide/placement.md (100%) rename site/content/{docs => }/operational_guide/placement_configuration.md (100%) rename site/content/{docs => }/operational_guide/repairs.md (100%) rename site/content/{docs => }/operational_guide/replication_and_deployment_in_zones.md (100%) rename site/content/{docs => }/operational_guide/replication_between_clusters.md (100%) rename site/content/{docs => }/operational_guide/replication_global.png (100%) rename site/content/{docs => }/operational_guide/replication_region.png (100%) rename site/content/{docs => }/operational_guide/replication_single_zone.png (100%) rename site/content/{docs => }/operational_guide/resource_limits.md (100%) rename site/content/{docs => }/operational_guide/upgrading_m3.md (100%) rename site/content/{docs => }/overview/_index.md (100%) rename site/content/{docs => }/overview/components.md (100%) rename site/content/{docs => }/overview/media.md (100%) rename site/content/{docs => }/overview/motivation.md (100%) rename site/content/{docs => }/overview/roadmap.md (100%) rename site/content/{docs => }/quickstart/_index.md (100%) create mode 100644 site/content/quickstart/index.md rename site/content/{docs => }/troubleshooting/_index.md (100%) create mode 100644 site/static/logo.svg diff --git a/site/config.toml b/site/config.toml index fac70123cf..a3a7e87495 100644 --- a/site/config.toml +++ b/site/config.toml @@ -1,8 +1,8 @@ # Change the default theme to be use when building the site with Hugo [module] [[module.imports]] - path = "github.com/chronosphereio/docs-theme" - # path = "../../../docs-theme" + # path = "github.com/chronosphereio/docs-theme" + path = "../../../victor" [[module.mounts]] source = "content" @@ -32,8 +32,24 @@ source = "archetypes" target = "archetypes" -# theme = "docs-theme" +[permalinks] + "/" = "/docs/:section/:title/" + "case_studies" = "/docs/:section/:title/" + "community" = "/docs/:section/:title/" + "ecosystem" = "/docs/:section/:title/" + "faqs" = "/docs/:section/:title/" + "glossary" = "/docs/:section/:title/" + "how_to" = "/docs/:section/:title/" + "integrations" = "/docs/:section/:title/" + "m3coordinator" = "/docs/:section/:title/" + "m3db" = "/docs/:section/:title/" + "m3query" = "/docs/:section/:title/" + "operational_guide" = "/docs/:section/:title/" + "overview" = "/docs/:section/:title/" + "quickstart" = "/docs/:section/:title/" + "troubleshooting" = "/docs/:section/:title/" +# theme = "docs-theme" # baseURL = "/" languageCode = "en-US" defaultContentLanguage = "en" @@ -116,6 +132,7 @@ offlineSearch = false # Change default color scheme with a variant one. Can be "red", "blue", "green". themeVariant = "blue" twitter = "m3db_io" + disableHomeIcon = true [params.api] localCordinator = "http://localhost:7201/" @@ -155,14 +172,33 @@ offlineSearch = false taskList = true typographer = true -[[Languages.en.menu.shortcuts]] -name = " GitHub" +[[menu.shortcuts]] +pre = "

More

" +name = " " identifier = "ds" url = "https://github.com/m3db/m3" weight = 10 +[[menu.shortcuts]] +name = " " +url = "https://github.com/vjeantet/hugo-theme-docdock/archive/master.zip" +weight = 11 + +[[menu.shortcuts]] +name = " " +identifier = "hugodoc" +url = "https://gohugo.io/" +weight = 20 + +[[menu.shortcuts]] +name = " " +url = "/credits" +weight = 30 + + + [outputs] -home = [ "HTML", "RSS", "JSON"] +home = [ "HTML"] page = [ "HTML"] section = [ "HTML"] -chapter = [ "HTML"] +chapter = [ "HTML"] \ No newline at end of file diff --git a/site/content/docs/_index.md b/site/content/_index.md similarity index 100% rename from site/content/docs/_index.md rename to site/content/_index.md diff --git a/site/content/docs/case_studies/index.md b/site/content/case_studies/index.md similarity index 100% rename from site/content/docs/case_studies/index.md rename to site/content/case_studies/index.md diff --git a/site/content/docs/community/index.md b/site/content/community/index.md similarity index 100% rename from site/content/docs/community/index.md rename to site/content/community/index.md diff --git a/site/content/docs/ecosystem/index.md b/site/content/ecosystem/index.md similarity index 100% rename from site/content/docs/ecosystem/index.md rename to site/content/ecosystem/index.md diff --git a/site/content/docs/faqs/_index.md b/site/content/faqs/_index.md similarity index 100% rename from site/content/docs/faqs/_index.md rename to site/content/faqs/_index.md diff --git a/site/content/docs/glossary/bootstrapping.md b/site/content/glossary/bootstrapping.md similarity index 100% rename from site/content/docs/glossary/bootstrapping.md rename to site/content/glossary/bootstrapping.md diff --git a/site/content/docs/glossary/cardinality.md b/site/content/glossary/cardinality.md similarity index 100% rename from site/content/docs/glossary/cardinality.md rename to site/content/glossary/cardinality.md diff --git a/site/content/docs/glossary/datapoint.md b/site/content/glossary/datapoint.md similarity index 100% rename from site/content/docs/glossary/datapoint.md rename to site/content/glossary/datapoint.md diff --git a/site/content/docs/glossary/index.md b/site/content/glossary/index.md similarity index 100% rename from site/content/docs/glossary/index.md rename to site/content/glossary/index.md diff --git a/site/content/docs/glossary/labels.md b/site/content/glossary/labels.md similarity index 100% rename from site/content/docs/glossary/labels.md rename to site/content/glossary/labels.md diff --git a/site/content/docs/glossary/m3.md b/site/content/glossary/m3.md similarity index 100% rename from site/content/docs/glossary/m3.md rename to site/content/glossary/m3.md diff --git a/site/content/docs/glossary/m3coordinator.md b/site/content/glossary/m3coordinator.md similarity index 100% rename from site/content/docs/glossary/m3coordinator.md rename to site/content/glossary/m3coordinator.md diff --git a/site/content/docs/glossary/m3db.md b/site/content/glossary/m3db.md similarity index 100% rename from site/content/docs/glossary/m3db.md rename to site/content/glossary/m3db.md diff --git a/site/content/docs/glossary/m3query.md b/site/content/glossary/m3query.md similarity index 100% rename from site/content/docs/glossary/m3query.md rename to site/content/glossary/m3query.md diff --git a/site/content/docs/glossary/metric.md b/site/content/glossary/metric.md similarity index 100% rename from site/content/docs/glossary/metric.md rename to site/content/glossary/metric.md diff --git a/site/content/docs/glossary/namespace.md b/site/content/glossary/namespace.md similarity index 100% rename from site/content/docs/glossary/namespace.md rename to site/content/glossary/namespace.md diff --git a/site/content/docs/glossary/placement.md b/site/content/glossary/placement.md similarity index 100% rename from site/content/docs/glossary/placement.md rename to site/content/glossary/placement.md diff --git a/site/content/docs/glossary/shard.md b/site/content/glossary/shard.md similarity index 100% rename from site/content/docs/glossary/shard.md rename to site/content/glossary/shard.md diff --git a/site/content/docs/glossary/tags.md b/site/content/glossary/tags.md similarity index 100% rename from site/content/docs/glossary/tags.md rename to site/content/glossary/tags.md diff --git a/site/content/docs/glossary/timeseries.md b/site/content/glossary/timeseries.md similarity index 100% rename from site/content/docs/glossary/timeseries.md rename to site/content/glossary/timeseries.md diff --git a/site/content/docs/glossary/topology.md b/site/content/glossary/topology.md similarity index 100% rename from site/content/docs/glossary/topology.md rename to site/content/glossary/topology.md diff --git a/site/content/docs/how_to/_index.md b/site/content/how_to/_index.md similarity index 100% rename from site/content/docs/how_to/_index.md rename to site/content/how_to/_index.md diff --git a/site/content/docs/how_to/aggregator.md b/site/content/how_to/aggregator.md similarity index 100% rename from site/content/docs/how_to/aggregator.md rename to site/content/how_to/aggregator.md diff --git a/site/content/docs/how_to/cluster_hard_way.md b/site/content/how_to/cluster_hard_way.md similarity index 100% rename from site/content/docs/how_to/cluster_hard_way.md rename to site/content/how_to/cluster_hard_way.md diff --git a/site/content/docs/how_to/grafana.md b/site/content/how_to/grafana.md similarity index 100% rename from site/content/docs/how_to/grafana.md rename to site/content/how_to/grafana.md diff --git a/site/content/docs/how_to/graphite.md b/site/content/how_to/graphite.md similarity index 100% rename from site/content/docs/how_to/graphite.md rename to site/content/how_to/graphite.md diff --git a/site/content/docs/how_to/kubernetes.md b/site/content/how_to/kubernetes.md similarity index 100% rename from site/content/docs/how_to/kubernetes.md rename to site/content/how_to/kubernetes.md diff --git a/site/content/docs/how_to/monitoring_m3/_index.md b/site/content/how_to/monitoring_m3/_index.md similarity index 100% rename from site/content/docs/how_to/monitoring_m3/_index.md rename to site/content/how_to/monitoring_m3/_index.md diff --git a/site/content/docs/how_to/monitoring_m3/alerts.md b/site/content/how_to/monitoring_m3/alerts.md similarity index 100% rename from site/content/docs/how_to/monitoring_m3/alerts.md rename to site/content/how_to/monitoring_m3/alerts.md diff --git a/site/content/docs/how_to/monitoring_m3/metrics.md b/site/content/how_to/monitoring_m3/metrics.md similarity index 100% rename from site/content/docs/how_to/monitoring_m3/metrics.md rename to site/content/how_to/monitoring_m3/metrics.md diff --git a/site/content/docs/how_to/monitoring_m3/tracing.md b/site/content/how_to/monitoring_m3/tracing.md similarity index 100% rename from site/content/docs/how_to/monitoring_m3/tracing.md rename to site/content/how_to/monitoring_m3/tracing.md diff --git a/site/content/docs/how_to/other/_index.md b/site/content/how_to/other/_index.md similarity index 100% rename from site/content/docs/how_to/other/_index.md rename to site/content/how_to/other/_index.md diff --git a/site/content/docs/how_to/other/tsdb.md b/site/content/how_to/other/tsdb.md similarity index 100% rename from site/content/docs/how_to/other/tsdb.md rename to site/content/how_to/other/tsdb.md diff --git a/site/content/docs/how_to/other/upgrading.md b/site/content/how_to/other/upgrading.md similarity index 100% rename from site/content/docs/how_to/other/upgrading.md rename to site/content/how_to/other/upgrading.md diff --git a/site/content/docs/how_to/prometheus.md b/site/content/how_to/prometheus.md similarity index 100% rename from site/content/docs/how_to/prometheus.md rename to site/content/how_to/prometheus.md diff --git a/site/content/docs/how_to/query.md b/site/content/how_to/query.md similarity index 100% rename from site/content/docs/how_to/query.md rename to site/content/how_to/query.md diff --git a/site/content/docs/how_to/use_as_tsdb.md b/site/content/how_to/use_as_tsdb.md similarity index 100% rename from site/content/docs/how_to/use_as_tsdb.md rename to site/content/how_to/use_as_tsdb.md diff --git a/site/content/docs/includes/headers_optional_read_all.md b/site/content/includes/headers_optional_read_all.md similarity index 100% rename from site/content/docs/includes/headers_optional_read_all.md rename to site/content/includes/headers_optional_read_all.md diff --git a/site/content/docs/includes/headers_optional_read_limits.md b/site/content/includes/headers_optional_read_limits.md similarity index 100% rename from site/content/docs/includes/headers_optional_read_limits.md rename to site/content/includes/headers_optional_read_limits.md diff --git a/site/content/docs/includes/headers_optional_read_write.md b/site/content/includes/headers_optional_read_write.md similarity index 100% rename from site/content/docs/includes/headers_optional_read_write.md rename to site/content/includes/headers_optional_read_write.md diff --git a/site/content/docs/includes/headers_optional_read_write_all.md b/site/content/includes/headers_optional_read_write_all.md similarity index 100% rename from site/content/docs/includes/headers_optional_read_write_all.md rename to site/content/includes/headers_optional_read_write_all.md diff --git a/site/content/docs/includes/headers_optional_write_all.md b/site/content/includes/headers_optional_write_all.md similarity index 100% rename from site/content/docs/includes/headers_optional_write_all.md rename to site/content/includes/headers_optional_write_all.md diff --git a/site/content/docs/includes/headers_placement_namespace.md b/site/content/includes/headers_placement_namespace.md similarity index 100% rename from site/content/docs/includes/headers_placement_namespace.md rename to site/content/includes/headers_placement_namespace.md diff --git a/site/content/docs/includes/index.md b/site/content/includes/index.md similarity index 100% rename from site/content/docs/includes/index.md rename to site/content/includes/index.md diff --git a/site/content/docs/includes/podtemplate.json b/site/content/includes/podtemplate.json similarity index 100% rename from site/content/docs/includes/podtemplate.json rename to site/content/includes/podtemplate.json diff --git a/site/content/docs/includes/quickstart/create-database.sh b/site/content/includes/quickstart/create-database.sh similarity index 100% rename from site/content/docs/includes/quickstart/create-database.sh rename to site/content/includes/quickstart/create-database.sh diff --git a/site/content/docs/includes/quickstart/write-metrics-1.sh b/site/content/includes/quickstart/write-metrics-1.sh similarity index 100% rename from site/content/docs/includes/quickstart/write-metrics-1.sh rename to site/content/includes/quickstart/write-metrics-1.sh diff --git a/site/content/docs/includes/quickstart/write-metrics-2.sh b/site/content/includes/quickstart/write-metrics-2.sh similarity index 100% rename from site/content/docs/includes/quickstart/write-metrics-2.sh rename to site/content/includes/quickstart/write-metrics-2.sh diff --git a/site/content/docs/includes/quickstart/write-metrics-3.sh b/site/content/includes/quickstart/write-metrics-3.sh similarity index 100% rename from site/content/docs/includes/quickstart/write-metrics-3.sh rename to site/content/includes/quickstart/write-metrics-3.sh diff --git a/site/content/docs/integrations/_index.md b/site/content/integrations/_index.md similarity index 100% rename from site/content/docs/integrations/_index.md rename to site/content/integrations/_index.md diff --git a/site/content/docs/integrations/grafana.md b/site/content/integrations/grafana.md similarity index 100% rename from site/content/docs/integrations/grafana.md rename to site/content/integrations/grafana.md diff --git a/site/content/docs/integrations/graphite.md b/site/content/integrations/graphite.md similarity index 100% rename from site/content/docs/integrations/graphite.md rename to site/content/integrations/graphite.md diff --git a/site/content/docs/integrations/influx.md b/site/content/integrations/influx.md similarity index 100% rename from site/content/docs/integrations/influx.md rename to site/content/integrations/influx.md diff --git a/site/content/docs/integrations/prometheus.md b/site/content/integrations/prometheus.md similarity index 100% rename from site/content/docs/integrations/prometheus.md rename to site/content/integrations/prometheus.md diff --git a/site/content/docs/m3coordinator/_index.md b/site/content/m3coordinator/_index.md similarity index 100% rename from site/content/docs/m3coordinator/_index.md rename to site/content/m3coordinator/_index.md diff --git a/site/content/docs/m3coordinator/api/remote.md b/site/content/m3coordinator/api/remote.md similarity index 100% rename from site/content/docs/m3coordinator/api/remote.md rename to site/content/m3coordinator/api/remote.md diff --git a/site/content/docs/m3db/_index.md b/site/content/m3db/_index.md similarity index 100% rename from site/content/docs/m3db/_index.md rename to site/content/m3db/_index.md diff --git a/site/content/docs/m3db/architecture/_index.md b/site/content/m3db/architecture/_index.md similarity index 100% rename from site/content/docs/m3db/architecture/_index.md rename to site/content/m3db/architecture/_index.md diff --git a/site/content/docs/m3db/architecture/caching.md b/site/content/m3db/architecture/caching.md similarity index 100% rename from site/content/docs/m3db/architecture/caching.md rename to site/content/m3db/architecture/caching.md diff --git a/site/content/docs/m3db/architecture/commitlogs.md b/site/content/m3db/architecture/commitlogs.md similarity index 100% rename from site/content/docs/m3db/architecture/commitlogs.md rename to site/content/m3db/architecture/commitlogs.md diff --git a/site/content/docs/m3db/architecture/consistencylevels.md b/site/content/m3db/architecture/consistencylevels.md similarity index 100% rename from site/content/docs/m3db/architecture/consistencylevels.md rename to site/content/m3db/architecture/consistencylevels.md diff --git a/site/content/docs/m3db/architecture/engine.md b/site/content/m3db/architecture/engine.md similarity index 100% rename from site/content/docs/m3db/architecture/engine.md rename to site/content/m3db/architecture/engine.md diff --git a/site/content/docs/m3db/architecture/peer_streaming.md b/site/content/m3db/architecture/peer_streaming.md similarity index 100% rename from site/content/docs/m3db/architecture/peer_streaming.md rename to site/content/m3db/architecture/peer_streaming.md diff --git a/site/content/docs/m3db/architecture/sharding.md b/site/content/m3db/architecture/sharding.md similarity index 100% rename from site/content/docs/m3db/architecture/sharding.md rename to site/content/m3db/architecture/sharding.md diff --git a/site/content/docs/m3db/architecture/storage.md b/site/content/m3db/architecture/storage.md similarity index 100% rename from site/content/docs/m3db/architecture/storage.md rename to site/content/m3db/architecture/storage.md diff --git a/site/content/docs/m3db/monodraw/m3db-file-layout.monopic b/site/content/m3db/monodraw/m3db-file-layout.monopic similarity index 100% rename from site/content/docs/m3db/monodraw/m3db-file-layout.monopic rename to site/content/m3db/monodraw/m3db-file-layout.monopic diff --git a/site/content/docs/m3db/monodraw/m3db_structs.monopic b/site/content/m3db/monodraw/m3db_structs.monopic similarity index 100% rename from site/content/docs/m3db/monodraw/m3db_structs.monopic rename to site/content/m3db/monodraw/m3db_structs.monopic diff --git a/site/content/docs/m3db/monodraw/peer_bootstrap.monopic b/site/content/m3db/monodraw/peer_bootstrap.monopic similarity index 100% rename from site/content/docs/m3db/monodraw/peer_bootstrap.monopic rename to site/content/m3db/monodraw/peer_bootstrap.monopic diff --git a/site/content/docs/m3db/monodraw/placement_monodraw_template.monopic b/site/content/m3db/monodraw/placement_monodraw_template.monopic similarity index 100% rename from site/content/docs/m3db/monodraw/placement_monodraw_template.monopic rename to site/content/m3db/monodraw/placement_monodraw_template.monopic diff --git a/site/content/docs/m3db/monodraw/placement_state_machine.monopic b/site/content/m3db/monodraw/placement_state_machine.monopic similarity index 100% rename from site/content/docs/m3db/monodraw/placement_state_machine.monopic rename to site/content/m3db/monodraw/placement_state_machine.monopic diff --git a/site/content/docs/m3query/_index.md b/site/content/m3query/_index.md similarity index 100% rename from site/content/docs/m3query/_index.md rename to site/content/m3query/_index.md diff --git a/site/content/docs/m3query/api/query.md b/site/content/m3query/api/query.md similarity index 100% rename from site/content/docs/m3query/api/query.md rename to site/content/m3query/api/query.md diff --git a/site/content/docs/m3query/architecture/_index.md b/site/content/m3query/architecture/_index.md similarity index 100% rename from site/content/docs/m3query/architecture/_index.md rename to site/content/m3query/architecture/_index.md diff --git a/site/content/docs/m3query/architecture/blocks.md b/site/content/m3query/architecture/blocks.md similarity index 100% rename from site/content/docs/m3query/architecture/blocks.md rename to site/content/m3query/architecture/blocks.md diff --git a/site/content/docs/m3query/architecture/fanout.md b/site/content/m3query/architecture/fanout.md similarity index 100% rename from site/content/docs/m3query/architecture/fanout.md rename to site/content/m3query/architecture/fanout.md diff --git a/site/content/docs/m3query/architecture/functions.md b/site/content/m3query/architecture/functions.md similarity index 100% rename from site/content/docs/m3query/architecture/functions.md rename to site/content/m3query/architecture/functions.md diff --git a/site/content/docs/m3query/config/_index.md b/site/content/m3query/config/_index.md similarity index 100% rename from site/content/docs/m3query/config/_index.md rename to site/content/m3query/config/_index.md diff --git a/site/content/docs/m3query/config/annotated_config.md b/site/content/m3query/config/annotated_config.md similarity index 100% rename from site/content/docs/m3query/config/annotated_config.md rename to site/content/m3query/config/annotated_config.md diff --git a/site/content/docs/m3query/config/annotated_config.yaml b/site/content/m3query/config/annotated_config.yaml similarity index 100% rename from site/content/docs/m3query/config/annotated_config.yaml rename to site/content/m3query/config/annotated_config.yaml diff --git a/site/content/docs/operational_guide/_index.md b/site/content/operational_guide/_index.md similarity index 100% rename from site/content/docs/operational_guide/_index.md rename to site/content/operational_guide/_index.md diff --git a/site/content/docs/operational_guide/availability_consistency_durability.md b/site/content/operational_guide/availability_consistency_durability.md similarity index 100% rename from site/content/docs/operational_guide/availability_consistency_durability.md rename to site/content/operational_guide/availability_consistency_durability.md diff --git a/site/content/docs/operational_guide/bootstrapping_crash_recovery.md b/site/content/operational_guide/bootstrapping_crash_recovery.md similarity index 100% rename from site/content/docs/operational_guide/bootstrapping_crash_recovery.md rename to site/content/operational_guide/bootstrapping_crash_recovery.md diff --git a/site/content/docs/operational_guide/etcd.md b/site/content/operational_guide/etcd.md similarity index 100% rename from site/content/docs/operational_guide/etcd.md rename to site/content/operational_guide/etcd.md diff --git a/site/content/docs/operational_guide/fileset_migrations.md b/site/content/operational_guide/fileset_migrations.md similarity index 100% rename from site/content/docs/operational_guide/fileset_migrations.md rename to site/content/operational_guide/fileset_migrations.md diff --git a/site/content/docs/operational_guide/kernel_configuration.md b/site/content/operational_guide/kernel_configuration.md similarity index 100% rename from site/content/docs/operational_guide/kernel_configuration.md rename to site/content/operational_guide/kernel_configuration.md diff --git a/site/content/docs/operational_guide/mapping_rollup.md b/site/content/operational_guide/mapping_rollup.md similarity index 100% rename from site/content/docs/operational_guide/mapping_rollup.md rename to site/content/operational_guide/mapping_rollup.md diff --git a/site/content/docs/operational_guide/monitoring.md b/site/content/operational_guide/monitoring.md similarity index 100% rename from site/content/docs/operational_guide/monitoring.md rename to site/content/operational_guide/monitoring.md diff --git a/site/content/docs/operational_guide/multiple_m3db_clusters.md b/site/content/operational_guide/multiple_m3db_clusters.md similarity index 100% rename from site/content/docs/operational_guide/multiple_m3db_clusters.md rename to site/content/operational_guide/multiple_m3db_clusters.md diff --git a/site/content/docs/operational_guide/namespace_configuration.md b/site/content/operational_guide/namespace_configuration.md similarity index 100% rename from site/content/docs/operational_guide/namespace_configuration.md rename to site/content/operational_guide/namespace_configuration.md diff --git a/site/content/docs/operational_guide/namespace_mgmt.md b/site/content/operational_guide/namespace_mgmt.md similarity index 100% rename from site/content/docs/operational_guide/namespace_mgmt.md rename to site/content/operational_guide/namespace_mgmt.md diff --git a/site/content/docs/operational_guide/placement.md b/site/content/operational_guide/placement.md similarity index 100% rename from site/content/docs/operational_guide/placement.md rename to site/content/operational_guide/placement.md diff --git a/site/content/docs/operational_guide/placement_configuration.md b/site/content/operational_guide/placement_configuration.md similarity index 100% rename from site/content/docs/operational_guide/placement_configuration.md rename to site/content/operational_guide/placement_configuration.md diff --git a/site/content/docs/operational_guide/repairs.md b/site/content/operational_guide/repairs.md similarity index 100% rename from site/content/docs/operational_guide/repairs.md rename to site/content/operational_guide/repairs.md diff --git a/site/content/docs/operational_guide/replication_and_deployment_in_zones.md b/site/content/operational_guide/replication_and_deployment_in_zones.md similarity index 100% rename from site/content/docs/operational_guide/replication_and_deployment_in_zones.md rename to site/content/operational_guide/replication_and_deployment_in_zones.md diff --git a/site/content/docs/operational_guide/replication_between_clusters.md b/site/content/operational_guide/replication_between_clusters.md similarity index 100% rename from site/content/docs/operational_guide/replication_between_clusters.md rename to site/content/operational_guide/replication_between_clusters.md diff --git a/site/content/docs/operational_guide/replication_global.png b/site/content/operational_guide/replication_global.png similarity index 100% rename from site/content/docs/operational_guide/replication_global.png rename to site/content/operational_guide/replication_global.png diff --git a/site/content/docs/operational_guide/replication_region.png b/site/content/operational_guide/replication_region.png similarity index 100% rename from site/content/docs/operational_guide/replication_region.png rename to site/content/operational_guide/replication_region.png diff --git a/site/content/docs/operational_guide/replication_single_zone.png b/site/content/operational_guide/replication_single_zone.png similarity index 100% rename from site/content/docs/operational_guide/replication_single_zone.png rename to site/content/operational_guide/replication_single_zone.png diff --git a/site/content/docs/operational_guide/resource_limits.md b/site/content/operational_guide/resource_limits.md similarity index 100% rename from site/content/docs/operational_guide/resource_limits.md rename to site/content/operational_guide/resource_limits.md diff --git a/site/content/docs/operational_guide/upgrading_m3.md b/site/content/operational_guide/upgrading_m3.md similarity index 100% rename from site/content/docs/operational_guide/upgrading_m3.md rename to site/content/operational_guide/upgrading_m3.md diff --git a/site/content/docs/overview/_index.md b/site/content/overview/_index.md similarity index 100% rename from site/content/docs/overview/_index.md rename to site/content/overview/_index.md diff --git a/site/content/docs/overview/components.md b/site/content/overview/components.md similarity index 100% rename from site/content/docs/overview/components.md rename to site/content/overview/components.md diff --git a/site/content/docs/overview/media.md b/site/content/overview/media.md similarity index 100% rename from site/content/docs/overview/media.md rename to site/content/overview/media.md diff --git a/site/content/docs/overview/motivation.md b/site/content/overview/motivation.md similarity index 100% rename from site/content/docs/overview/motivation.md rename to site/content/overview/motivation.md diff --git a/site/content/docs/overview/roadmap.md b/site/content/overview/roadmap.md similarity index 100% rename from site/content/docs/overview/roadmap.md rename to site/content/overview/roadmap.md diff --git a/site/content/docs/quickstart/_index.md b/site/content/quickstart/_index.md similarity index 100% rename from site/content/docs/quickstart/_index.md rename to site/content/quickstart/_index.md diff --git a/site/content/quickstart/index.md b/site/content/quickstart/index.md new file mode 100644 index 0000000000..6a9d4d8590 --- /dev/null +++ b/site/content/quickstart/index.md @@ -0,0 +1,509 @@ +--- +linktitle: "Quickstart using Docker" +weight: 1 +--- + + + +# Creating a Single Node M3DB Cluster with Docker + +This guide shows how to install and configure M3DB, create a single-node cluster, and read and write metrics to it. + +{{% notice warning %}} +Deploying a single-node M3DB cluster is a great way to experiment with M3DB and get an idea of what it has to offer, but is not designed for production use. To run M3DB in clustered mode with a separate M3Coordinator, [read the clustered mode guide](/docs/how_to/cluster_hard_way). +{{% /notice %}} + +## Prerequisites + +- **Docker**: You don't need [Docker](https://www.docker.com/get-started) to run M3DB, but it is the simplest and quickest way. + - If you use Docker Desktop, we recommend the following minimum _Resources_ settings. + - _CPUs_: 2 + - _Memory_: 8GB + - _Swap_: 1GB + - _Disk image size_: 16GB +- **JQ**: This example uses [jq](https://stedolan.github.io/jq/) to format the output of API calls. It is not essential for using M3DB. +- **curl**: This example uses curl for communicating with M3DB endpoints. You can also use alternatives such as [Wget](https://www.gnu.org/software/wget/) and [HTTPie](https://httpie.org/). + +## Start Docker Container + +By default the official M3DB Docker image configures a single M3DB instance as one binary containing: + +- An M3DB storage instance for time series storage. It includes an embedded tag-based metrics index and an etcd server for storing the cluster topology and runtime configuration. +- A coordinator instance for writing and querying tagged metrics, as well as managing cluster topology and runtime configuration. + +The Docker container exposes three ports: + +- `7201` to manage the cluster topology, you make most API calls to this endpoint +- `7203` for Prometheus to scrape the metrics produced by M3DB and M3Coordinator + +The command below creates a persistent data directory on the host operating system to maintain durability and persistence between container restarts. + +{{< tabs name="start_container" >}} +{{% tab name="Command" %}} + +```shell +docker run -p 7201:7201 -p 7203:7203 --name m3db -v $(pwd)/m3db_data:/var/lib/m3db quay.io/m3db/m3dbnode:latest +``` + +{{% /tab %}} +{{% tab name="Output" %}} + + + +![Docker pull and run](/docker-install.gif) + +{{% /tab %}} +{{< /tabs >}} + +{{% notice info %}} +When running the command above on Docker for Mac, Docker for Windows, and some Linux distributions you may see errors about settings not being at recommended values. Unless you intend to run M3DB in production on macOS or Windows, you can ignore these warnings. +{{% /notice %}} + +## Configuration + +The single-node cluster Docker image uses this [sample configuration file](https://github.com/m3db/m3/blob/master/src/dbnode/config/m3dbnode-local-etcd.yml) by default. + +The file groups configuration into `coordinator` or `db` sections that represent the `M3Coordinator` and `M3DB` instances of single-node cluster. + + + +{{% notice tip %}} +You can find more information on configuring M3DB in the [operational guides section](/docs/operational_guide/). +{{% /notice %}} + +## Organizing Data with Placements and Namespaces + +A time series database (TSDBs) typically consist of one node (or instance) to store metrics data. This setup is simple to use but has issues with scalability over time as the quantity of metrics data written and read increases. + +As a distributed TSDB, M3DB helps solve this problem by spreading metrics data, and demand for that data, across multiple nodes in a cluster. M3DB does this by splitting data into segments that match certain criteria (such as above a certain value) across nodes into {{< glossary_tooltip text="shards" term_id="shard" >}}. + + + +If you've worked with a distributed database before, then these concepts are probably familiar to you, but M3DB uses different terminology to represent some concepts. + +- Every cluster has **one** {{< glossary_tooltip text="placement" term_id="placement" >}} that maps shards to nodes in the cluster. +- A cluster can have **0 or more** {{< glossary_tooltip text="namespaces" term_id="namespace" >}} that are similar conceptually to tables in other databases, and each node serves every namespace for the shards it owns. + + + +For example, if the cluster placement states that node A owns shards 1, 2, and 3, then node A owns shards 1, 2, 3 for all configured namespaces in the cluster. Each namespace has its own configuration options, including a name and retention time for the data. + +## Create a Placement and Namespace + +This quickstart uses the _{{% apiendpoint %}}database/create_ endpoint that creates a namespace, and the placement if it doesn't already exist based on the `type` argument. + +You can create [placements](/docs/operational_guide/placement_configuration/) and [namespaces](/docs/operational_guide/namespace_configuration/#advanced-hard-way) separately if you need more control over their settings. + +The `namespaceName` argument must match the namespace in the `local` section of the `M3Coordinator` YAML configuration. If you [add any namespaces](/docs/operational_guide/namespace_configuration) you also need to add them to the `local` section of `M3Coordinator`'s YAML config. + +In another terminal, use the following command. + +{{< tabs name="create_placement_namespace" >}} +{{% tab name="Command" %}} + +{{% codeinclude file="quickstart/create-database.sh" language="shell" %}} + +{{% notice tip %}} +The Docker command used above creates a Docker [persistent volume](https://docs.docker.com/storage/volumes/) to keep the data M3 creates on your host file system between container restarts. If you have already followed this tutorial, the namespace already exists. You can clear the data by deleting the contents of the _m3db_data_ folder, or deleting the namespace with [the DELETE endpoint](/docs/operational_guide/namespace_configuration/#deleting-a-namespace). +{{% /notice %}} + +{{% /tab %}} +{{% tab name="Output" %}} + +```json +{ + "namespace": { + "registry": { + "namespaces": { + "default": { + "bootstrapEnabled": true, + "flushEnabled": true, + "writesToCommitLog": true, + "cleanupEnabled": true, + "repairEnabled": false, + "retentionOptions": { + "retentionPeriodNanos": "43200000000000", + "blockSizeNanos": "1800000000000", + "bufferFutureNanos": "120000000000", + "bufferPastNanos": "600000000000", + "blockDataExpiry": true, + "blockDataExpiryAfterNotAccessPeriodNanos": "300000000000", + "futureRetentionPeriodNanos": "0" + }, + "snapshotEnabled": true, + "indexOptions": { + "enabled": true, + "blockSizeNanos": "1800000000000" + }, + "schemaOptions": null, + "coldWritesEnabled": false, + "runtimeOptions": null + } + } + } + }, + "placement": { + "placement": { + "instances": { + "m3db_local": { + "id": "m3db_local", + "isolationGroup": "local", + "zone": "embedded", + "weight": 1, + "endpoint": "127.0.0.1:9000", + "shards": [ + { + "id": 0, + "state": "INITIALIZING", + "sourceId": "", + "cutoverNanos": "0", + "cutoffNanos": "0" + }, + … + { + "id": 63, + "state": "INITIALIZING", + "sourceId": "", + "cutoverNanos": "0", + "cutoffNanos": "0" + } + ], + "shardSetId": 0, + "hostname": "localhost", + "port": 9000, + "metadata": { + "debugPort": 0 + } + } + }, + "replicaFactor": 1, + "numShards": 64, + "isSharded": true, + "cutoverTime": "0", + "isMirrored": false, + "maxShardSetId": 0 + }, + "version": 0 + } +} +``` + +{{< /tab >}} +{{< /tabs >}} + +Placement initialization can take a minute or two. Once all the shards have the `AVAILABLE` state, the node has finished bootstrapping, and you should see the following messages in the node console output. + + + +```shell +{"level":"info","ts":1598367624.0117292,"msg":"bootstrap marking all shards as bootstrapped","namespace":"default","namespace":"default","numShards":64} +{"level":"info","ts":1598367624.0301404,"msg":"bootstrap index with bootstrapped index segments","namespace":"default","numIndexBlocks":0} +{"level":"info","ts":1598367624.0301914,"msg":"bootstrap success","numShards":64,"bootstrapDuration":0.049208827} +{"level":"info","ts":1598367624.03023,"msg":"bootstrapped"} +``` + +You can check on the status by calling the _{{% apiendpoint %}}placement_ endpoint: + +{{< tabs name="check_placement" >}} +{{% tab name="Command" %}} + +```shell +curl {{% apiendpoint %}}placement | jq . +``` + +{{% /tab %}} +{{% tab name="Output" %}} + +```json +{ + "placement": { + "instances": { + "m3db_local": { + "id": "m3db_local", + "isolationGroup": "local", + "zone": "embedded", + "weight": 1, + "endpoint": "127.0.0.1:9000", + "shards": [ + { + "id": 0, + "state": "AVAILABLE", + "sourceId": "", + "cutoverNanos": "0", + "cutoffNanos": "0" + }, + … + { + "id": 63, + "state": "AVAILABLE", + "sourceId": "", + "cutoverNanos": "0", + "cutoffNanos": "0" + } + ], + "shardSetId": 0, + "hostname": "localhost", + "port": 9000, + "metadata": { + "debugPort": 0 + } + } + }, + "replicaFactor": 1, + "numShards": 64, + "isSharded": true, + "cutoverTime": "0", + "isMirrored": false, + "maxShardSetId": 0 + }, + "version": 2 +} +``` + +{{% /tab %}} +{{< /tabs >}} + +{{% notice tip %}} +[Read more about the bootstrapping process](/docs/operational_guide/bootstrapping_crash_recovery/). +{{% /notice %}} + +### View Details of a Namespace + +You can also view the attributes of all namespaces by calling the _{{% apiendpoint %}}namespace_ endpoint + +{{< tabs name="check_namespaces" >}} +{{% tab name="Command" %}} + +```shell +curl {{% apiendpoint %}}namespace | jq . +``` + +{{% notice tip %}} +Add `?debug=1` to the request to convert nano units in the output into standard units. +{{% /notice %}} + +{{% /tab %}} +{{% tab name="Output" %}} + +```json +{ + "registry": { + "namespaces": { + "default": { + "bootstrapEnabled": true, + "flushEnabled": true, + "writesToCommitLog": true, + "cleanupEnabled": true, + "repairEnabled": false, + "retentionOptions": { + "retentionPeriodNanos": "43200000000000", + "blockSizeNanos": "1800000000000", + "bufferFutureNanos": "120000000000", + "bufferPastNanos": "600000000000", + "blockDataExpiry": true, + "blockDataExpiryAfterNotAccessPeriodNanos": "300000000000", + "futureRetentionPeriodNanos": "0" + }, + "snapshotEnabled": true, + "indexOptions": { + "enabled": true, + "blockSizeNanos": "1800000000000" + }, + "schemaOptions": null, + "coldWritesEnabled": false, + "runtimeOptions": null + } + } + } +} +``` + +{{% /tab %}} +{{< /tabs >}} + +## Writing and Querying Metrics + +### Writing Metrics + +M3 supports ingesting [statsd](https://github.com/statsd/statsd#usage) and [Prometheus](https://prometheus.io/docs/concepts/data_model/) formatted metrics. + +This quickstart focuses on Prometheus metrics which consist of a value, a timestamp, and tags to bring context and meaning to the metric. + +You can write metrics using one of two endpoints: + +- _[{{% apiendpoint %}}prom/remote/write](/docs/m3coordinator/api/remote/)_ - Write a Prometheus remote write query to M3DB with a binary snappy compressed Prometheus WriteRequest protobuf message. +- _{{% apiendpoint %}}json/write_ - Write a JSON payload of metrics data. This endpoint is quick for testing purposes but is not as performant for production usage. + +For this quickstart, use the _{{% apiendpoint %}}json/write_ endpoint to write a tagged metric to M3DB with the following data in the request body, all fields are required: + +- `tags`: An object of at least one `name`/`value` pairs +- `timestamp`: The UNIX timestamp for the data +- `value`: The value for the data, can be of any type + +{{% notice tip %}} +The examples below use `__name__` as the name for one of the tags, which is a Prometheus reserved tag that allows you to query metrics using the value of the tag to filter results. +{{% /notice %}} + +{{% notice tip %}} +Label names may contain ASCII letters, numbers, underscores, and Unicode characters. They must match the regex `[a-zA-Z_][a-zA-Z0-9_]*`. Label names beginning with `__` are reserved for internal use. [Read more in the Prometheus documentation](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). +{{% /notice %}} + +{{< tabs name="write_metrics" >}} +{{< tab name="Command 1" >}} + +{{% codeinclude file="quickstart/write-metrics-1.sh" language="shell" %}} + +{{< /tab >}} +{{< tab name="Command 2" >}} + +{{% codeinclude file="quickstart/write-metrics-2.sh" language="shell" %}} + +{{< /tab >}} +{{< tab name="Command 3" >}} + +{{% codeinclude file="quickstart/write-metrics-3.sh" language="shell" %}} + +{{< /tab >}} +{{< /tabs >}} + +### Querying metrics + +M3DB supports three query engines: Prometheus (default), Graphite, and the M3 Query Engine. + +This quickstart uses Prometheus as the query engine, and you have access to [all the features of PromQL queries](https://prometheus.io/docs/prometheus/latest/querying/basics/). + +To query metrics, use the _{{% apiendpoint %}}query_range_ endpoint with the following data in the request body, all fields are required: + +- `query`: A PromQL query +- `start`: Timestamp in `RFC3339Nano` of start range for results +- `end`: Timestamp in `RFC3339Nano` of end range for results +- `step`: A duration or float of the query resolution, the interval between results in the timespan between `start` and `end`. + +Below are some examples using the metrics written above. + +#### Return results in past 45 seconds + +{{< tabs name="example_promql_regex" >}} +{{% tab name="Linux" %}} + + + +```shell +curl -X "POST" -G "{{% apiendpoint %}}query_range" \ + -d "query=third_avenue" \ + -d "start=$(date "+%s" -d "45 seconds ago")" \ + -d "end=$( date +%s )" \ + -d "step=5s" | jq . +``` + +{{% /tab %}} +{{% tab name="macOS/BSD" %}} + +```shell +curl -X "POST" -G "{{% apiendpoint %}}query_range" \ + -d "query=third_avenue" \ + -d "start=$( date -v -45S +%s )" \ + -d "end=$( date +%s )" \ + -d "step=5s" | jq . +``` + +{{% /tab %}} +{{% tab name="Output" %}} + +```json +{ + "status": "success", + "data": { + "resultType": "matrix", + "result": [ + { + "metric": { + "__name__": "third_avenue", + "checkout": "1", + "city": "new_york" + }, + "values": [ + [ + {{% now %}}, + "3347.26" + ], + [ + {{% now %}}, + "5347.26" + ], + [ + {{% now %}}, + "7347.26" + ] + ] + } + ] + } +} +``` + +{{% /tab %}} +{{< /tabs >}} + +#### Values above a certain number + +{{< tabs name="example_promql_range" >}} +{{% tab name="Linux" %}} + + + +```shell +curl -X "POST" -G "{{% apiendpoint %}}query_range" \ + -d "query=third_avenue > 6000" \ + -d "start=$(date "+%s" -d "45 seconds ago")" \ + -d "end=$( date +%s )" \ + -d "step=5s" | jq . +``` + +{{% /tab %}} +{{% tab name="macOS/BSD" %}} + +```shell +curl -X "POST" -G "{{% apiendpoint %}}query_range" \ + -d "query=third_avenue > 6000" \ + -d "start=$(date -v -45S "+%s")" \ + -d "end=$( date +%s )" \ + -d "step=5s" | jq . +``` + +{{% /tab %}} +{{% tab name="Output" %}} + +```json +{ + "status": "success", + "data": { + "resultType": "matrix", + "result": [ + { + "metric": { + "__name__": "third_avenue", + "checkout": "1", + "city": "new_york" + }, + "values": [ + [ + {{% now %}}, + "7347.26" + ] + ] + } + ] + } +} +``` + +{{% /tab %}} +{{< /tabs >}} + + diff --git a/site/content/docs/troubleshooting/_index.md b/site/content/troubleshooting/_index.md similarity index 100% rename from site/content/docs/troubleshooting/_index.md rename to site/content/troubleshooting/_index.md diff --git a/site/layouts/partials/logo.html b/site/layouts/partials/logo.html index ff367ee734..aa4aa56ec7 100644 --- a/site/layouts/partials/logo.html +++ b/site/layouts/partials/logo.html @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file diff --git a/site/static/logo.svg b/site/static/logo.svg new file mode 100644 index 0000000000..f35d2fdabb --- /dev/null +++ b/site/static/logo.svg @@ -0,0 +1,9 @@ + + + + + + + + + From 2e407c658769dfd918c5d0b617ce37ad6328b987 Mon Sep 17 00:00:00 2001 From: ChrisChinchilla Date: Fri, 13 Nov 2020 14:37:10 +0100 Subject: [PATCH 02/15] Fix search, add new theme Signed-off-by: ChrisChinchilla --- site/config.toml | 6 +++--- site/content/m3db/architecture/_index.md | 1 - site/content/quickstart/_index.md | 2 +- site/content/quickstart/index.md | 2 +- site/layouts/partials/logo.html | 2 +- 5 files changed, 6 insertions(+), 7 deletions(-) diff --git a/site/config.toml b/site/config.toml index a3a7e87495..bcf9d8bf0c 100644 --- a/site/config.toml +++ b/site/config.toml @@ -1,8 +1,8 @@ # Change the default theme to be use when building the site with Hugo [module] [[module.imports]] - # path = "github.com/chronosphereio/docs-theme" - path = "../../../victor" + path = "github.com/chronosphereio/victor" + # path = "../../../victor" [[module.mounts]] source = "content" @@ -198,7 +198,7 @@ weight = 30 [outputs] -home = [ "HTML"] +home = [ "HTML", "RSS", "JSON"] page = [ "HTML"] section = [ "HTML"] chapter = [ "HTML"] \ No newline at end of file diff --git a/site/content/m3db/architecture/_index.md b/site/content/m3db/architecture/_index.md index 06c8ea1023..3796fbcd83 100644 --- a/site/content/m3db/architecture/_index.md +++ b/site/content/m3db/architecture/_index.md @@ -1,7 +1,6 @@ --- title: "Architecture" weight: 2 -chapter: true --- ## Overview diff --git a/site/content/quickstart/_index.md b/site/content/quickstart/_index.md index c65b29682e..2675c7c59f 100644 --- a/site/content/quickstart/_index.md +++ b/site/content/quickstart/_index.md @@ -1,5 +1,5 @@ --- -linktitle: "Quickstart using Docker" +linktitle: "Quickstart" weight: 1 --- diff --git a/site/content/quickstart/index.md b/site/content/quickstart/index.md index 6a9d4d8590..a36db9191f 100644 --- a/site/content/quickstart/index.md +++ b/site/content/quickstart/index.md @@ -1,5 +1,5 @@ --- -linktitle: "Quickstart using Docker" +linktitle: "Using Docker" weight: 1 --- diff --git a/site/layouts/partials/logo.html b/site/layouts/partials/logo.html index aa4aa56ec7..09dc437131 100644 --- a/site/layouts/partials/logo.html +++ b/site/layouts/partials/logo.html @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file From 3dfd942a5a4ecad824152ad490225dfb29da5e05 Mon Sep 17 00:00:00 2001 From: ChrisChinchilla Date: Mon, 16 Nov 2020 11:16:18 +0100 Subject: [PATCH 03/15] Remove defunct content Signed-off-by: ChrisChinchilla --- site/config.toml | 46 ++++++------------- site/content/case_studies/index.md | 1 - site/content/community/index.md | 13 ------ site/content/ecosystem/index.md | 1 - site/content/quickstart/_index.md | 2 +- .../quickstart/{index.md => docker.md} | 2 +- 6 files changed, 15 insertions(+), 50 deletions(-) delete mode 100644 site/content/case_studies/index.md delete mode 100644 site/content/community/index.md delete mode 100644 site/content/ecosystem/index.md rename site/content/quickstart/{index.md => docker.md} (99%) diff --git a/site/config.toml b/site/config.toml index bcf9d8bf0c..d310ab9a6d 100644 --- a/site/config.toml +++ b/site/config.toml @@ -33,21 +33,19 @@ target = "archetypes" [permalinks] - "/" = "/docs/:section/:title/" - "case_studies" = "/docs/:section/:title/" - "community" = "/docs/:section/:title/" - "ecosystem" = "/docs/:section/:title/" - "faqs" = "/docs/:section/:title/" - "glossary" = "/docs/:section/:title/" - "how_to" = "/docs/:section/:title/" - "integrations" = "/docs/:section/:title/" - "m3coordinator" = "/docs/:section/:title/" - "m3db" = "/docs/:section/:title/" - "m3query" = "/docs/:section/:title/" - "operational_guide" = "/docs/:section/:title/" - "overview" = "/docs/:section/:title/" - "quickstart" = "/docs/:section/:title/" - "troubleshooting" = "/docs/:section/:title/" + "/" = "/docs/:sections/:title/" + "case_studies" = "/docs/:sections/:title/" + "faqs" = "/docs/:sections/:title/" + "glossary" = "/docs/:sections/:title/" + "how_to" = "/docs/:sections/:title/" + "integrations" = "/docs/:sections/:title/" + "m3coordinator" = "/docs/:sections/:title/" + "m3db" = "/docs/:sections/:title/" + "m3query" = "/docs/:sections/:title/" + "operational_guide" = "/docs/:sections/:title/" + "overview" = "/docs/:sections/:title/" + "quickstart" = "/docs/:sections/:title/" + "troubleshooting" = "/docs/:sections/:title/" # theme = "docs-theme" # baseURL = "/" @@ -179,24 +177,6 @@ identifier = "ds" url = "https://github.com/m3db/m3" weight = 10 -[[menu.shortcuts]] -name = " " -url = "https://github.com/vjeantet/hugo-theme-docdock/archive/master.zip" -weight = 11 - -[[menu.shortcuts]] -name = " " -identifier = "hugodoc" -url = "https://gohugo.io/" -weight = 20 - -[[menu.shortcuts]] -name = " " -url = "/credits" -weight = 30 - - - [outputs] home = [ "HTML", "RSS", "JSON"] page = [ "HTML"] diff --git a/site/content/case_studies/index.md b/site/content/case_studies/index.md deleted file mode 100644 index 3e53c4fd7b..0000000000 --- a/site/content/case_studies/index.md +++ /dev/null @@ -1 +0,0 @@ -# Case Studies diff --git a/site/content/community/index.md b/site/content/community/index.md deleted file mode 100644 index 4c0d01ba29..0000000000 --- a/site/content/community/index.md +++ /dev/null @@ -1,13 +0,0 @@ -# Community - -## How to contact the M3 team? - -Feel free to contact us through any of the following channels: - -1. Posting on the [M3 Google group](https://groups.google.com/forum/#!forum/m3db) -2. Opening issues on the [M3 GitHub page](https://github.com/m3db/m3/issues) -3. Chatting us on the official [Slack](http://bit.ly/m3slack) - -## GitHub/OSS - -Our official GitHub page can be [found here](https://github.com/m3db/m3). diff --git a/site/content/ecosystem/index.md b/site/content/ecosystem/index.md deleted file mode 100644 index e46e912ed8..0000000000 --- a/site/content/ecosystem/index.md +++ /dev/null @@ -1 +0,0 @@ -# Ecosystem diff --git a/site/content/quickstart/_index.md b/site/content/quickstart/_index.md index 2675c7c59f..98e71894eb 100644 --- a/site/content/quickstart/_index.md +++ b/site/content/quickstart/_index.md @@ -1,5 +1,5 @@ --- -linktitle: "Quickstart" +title: "Quickstart" weight: 1 --- diff --git a/site/content/quickstart/index.md b/site/content/quickstart/docker.md similarity index 99% rename from site/content/quickstart/index.md rename to site/content/quickstart/docker.md index a36db9191f..7e576ee2fc 100644 --- a/site/content/quickstart/index.md +++ b/site/content/quickstart/docker.md @@ -1,5 +1,5 @@ --- -linktitle: "Using Docker" +title: "Docker" weight: 1 --- From 18f9a9835b4a921f7dd7c00a1384b6e1449ac0f7 Mon Sep 17 00:00:00 2001 From: ChrisChinchilla Date: Mon, 16 Nov 2020 12:36:33 +0100 Subject: [PATCH 04/15] Change permalinks Signed-off-by: ChrisChinchilla --- site/config.toml | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/site/config.toml b/site/config.toml index d310ab9a6d..ca3c65e6be 100644 --- a/site/config.toml +++ b/site/config.toml @@ -32,20 +32,21 @@ source = "archetypes" target = "archetypes" +ignoreFiles = [ "\\.ttf$", "\\.woff$", "\\.woff2$", "\\.eot$" ] + [permalinks] - "/" = "/docs/:sections/:title/" - "case_studies" = "/docs/:sections/:title/" - "faqs" = "/docs/:sections/:title/" - "glossary" = "/docs/:sections/:title/" - "how_to" = "/docs/:sections/:title/" - "integrations" = "/docs/:sections/:title/" - "m3coordinator" = "/docs/:sections/:title/" - "m3db" = "/docs/:sections/:title/" - "m3query" = "/docs/:sections/:title/" - "operational_guide" = "/docs/:sections/:title/" - "overview" = "/docs/:sections/:title/" - "quickstart" = "/docs/:sections/:title/" - "troubleshooting" = "/docs/:sections/:title/" + "/" = "/docs/:section/:title/" + "faqs" = "/docs/:section/:title/" + "glossaries" = "/docs/:section/:title/" + "how_tos" = "/docs/:section/:title/" + "integrations" = "/docs/:section/:title/" + "m3coordinators" = "/docs/:section/:title/" + "m3dbs" = "/docs/:section/:title/" + "m3querys" = "/docs/:section/:title/" + "operational_guides" = "/docs/:section/:title/" + "overviews" = "/docs/:section/:title/" + "quickstarts" = "/docs/:section/:title/" + "troubleshootings" = "/docs/:section/:title/" # theme = "docs-theme" # baseURL = "/" From 6fc4a9db54c5261df5e2fd6c5a3923391e000152 Mon Sep 17 00:00:00 2001 From: ChrisChinchilla Date: Mon, 16 Nov 2020 13:40:41 +0100 Subject: [PATCH 05/15] Fix to suit new theme menu fix Signed-off-by: ChrisChinchilla --- site/config.toml | 2 +- site/content/_index.md | 2 +- site/go.mod | 5 ++- site/go.sum | 4 ++ site/layouts/partials/menu.html | 76 +++++++++++++++++++++++++++++++++ 5 files changed, 86 insertions(+), 3 deletions(-) create mode 100644 site/layouts/partials/menu.html diff --git a/site/config.toml b/site/config.toml index ca3c65e6be..95d3f39dbb 100644 --- a/site/config.toml +++ b/site/config.toml @@ -6,7 +6,7 @@ [[module.mounts]] source = "content" - target = "content" + target = "content/docs" [[module.mounts]] source = "static" diff --git a/site/content/_index.md b/site/content/_index.md index 708a9f53cc..b87505f001 100644 --- a/site/content/_index.md +++ b/site/content/_index.md @@ -1,5 +1,5 @@ --- -title: M3 Introduction +title: M3 Documentation weight: 1 --- diff --git a/site/go.mod b/site/go.mod index 92399634da..a2374339d9 100644 --- a/site/go.mod +++ b/site/go.mod @@ -2,4 +2,7 @@ module m3-site go 1.15 -require github.com/chronosphereio/docs-theme v0.0.0-20201022162748-0ed11ce73f36 // indirect +require ( + github.com/chronosphereio/docs-theme v0.0.0-20201022162748-0ed11ce73f36 // indirect + github.com/chronosphereio/victor v0.0.0-20201116123616-0454e7256e57 // indirect +) diff --git a/site/go.sum b/site/go.sum index 5ccc24cca7..b7f156e972 100644 --- a/site/go.sum +++ b/site/go.sum @@ -3,3 +3,7 @@ github.com/chronosphereio/docs-theme v0.0.0-20201009164131-d9219ac30467 h1:YtTpg github.com/chronosphereio/docs-theme v0.0.0-20201009164131-d9219ac30467/go.mod h1:vmH57xlaJmtH7jjovyuwXKe+2316CnpaFShoEAG72gQ= github.com/chronosphereio/docs-theme v0.0.0-20201022162748-0ed11ce73f36 h1:Wz/dFFd3bVR+XZ7shqLyuZwyIh5yDbhIhdbdkFEFnH4= github.com/chronosphereio/docs-theme v0.0.0-20201022162748-0ed11ce73f36/go.mod h1:vmH57xlaJmtH7jjovyuwXKe+2316CnpaFShoEAG72gQ= +github.com/chronosphereio/victor v0.0.0-20201116094105-f1b13fb86890 h1:yO288wpyv4dr3nXdXjIsEM60DmeLzC4XquvnKCvoLR0= +github.com/chronosphereio/victor v0.0.0-20201116094105-f1b13fb86890/go.mod h1:wz1ngMsk+1D1ug2ObnI3zXs+/ZdBPrWLb6R1WQW3XNM= +github.com/chronosphereio/victor v0.0.0-20201116123616-0454e7256e57 h1:EXZaeDfAkZsOYoP3zCyZlhb+PXZO/PQSmilpTX8bX+0= +github.com/chronosphereio/victor v0.0.0-20201116123616-0454e7256e57/go.mod h1:wz1ngMsk+1D1ug2ObnI3zXs+/ZdBPrWLb6R1WQW3XNM= diff --git a/site/layouts/partials/menu.html b/site/layouts/partials/menu.html new file mode 100644 index 0000000000..0d445aca76 --- /dev/null +++ b/site/layouts/partials/menu.html @@ -0,0 +1,76 @@ +{{- $currentNode := . }} +{{- $showvisitedlinks := .Site.Params.showVisitedLinks -}} +{{- $topSection := index .Site.Sections 0 }} +{{- range $topSection.Sections }} +{{- template "section-tree-nav" dict "sect" . "currentnode" $currentNode "showvisitedlinks" $showvisitedlinks}} +{{- end}} + + +{{- define "section-tree-nav" }} +{{- $showvisitedlinks := .showvisitedlinks }} +{{- $currentNode := .currentnode }} + {{- with .sect}} + {{- if and .IsSection (or (not .Params.hidden) $.showhidden)}} + {{- $numberOfPages := (add (len .Pages) (len .Sections)) }} + {{- safeHTML .Params.head}} + + {{- else}} + {{- if not .Params.Hidden }} + + {{- end}} + {{- end}} + {{- end}} +{{- end}} From 3743f2624466bff12d8382ebfa7fe6371e33b770 Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Mon, 16 Nov 2020 08:41:13 -0500 Subject: [PATCH 06/15] Add operator docs --- site/config.toml | 12 +- site/content/how_to/aggregator.md | 4 +- site/content/how_to/kubernetes.md | 326 ------------------ site/content/how_to/query.md | 5 +- site/content/m3coordinator/_index.md | 2 +- site/content/m3db/_index.md | 2 +- site/content/operator/_index.md | 6 + site/content/operator/api.md | 277 +++++++++++++++ site/content/operator/configuration/_index.md | 5 + .../configuration/configuring_m3db.md | 34 ++ .../operator/configuration/namespaces.md | 72 ++++ .../operator/configuration/node_affinity.md | 197 +++++++++++ .../operator/configuration/node_endpoint.md | 32 ++ .../operator/configuration/pod_identity.md | 69 ++++ .../operator/getting_started/_index.md | 5 + .../getting_started/create_cluster.md | 178 ++++++++++ .../getting_started/delete_cluster.md | 46 +++ .../operator/getting_started/installation.md | 34 ++ .../operator/getting_started/monitoring.md | 19 + .../operator/getting_started/requirements.md | 37 ++ .../getting_started/update_cluster.md | 48 +++ site/content/operator/operator.md | 31 ++ site/content/overview/roadmap.md | 15 +- site/content/quickstart/docker.md | 2 +- site/content/quickstart/kubernetes.md | 95 +++++ 25 files changed, 1208 insertions(+), 345 deletions(-) delete mode 100644 site/content/how_to/kubernetes.md create mode 100644 site/content/operator/_index.md create mode 100644 site/content/operator/api.md create mode 100644 site/content/operator/configuration/_index.md create mode 100644 site/content/operator/configuration/configuring_m3db.md create mode 100644 site/content/operator/configuration/namespaces.md create mode 100644 site/content/operator/configuration/node_affinity.md create mode 100644 site/content/operator/configuration/node_endpoint.md create mode 100644 site/content/operator/configuration/pod_identity.md create mode 100644 site/content/operator/getting_started/_index.md create mode 100644 site/content/operator/getting_started/create_cluster.md create mode 100644 site/content/operator/getting_started/delete_cluster.md create mode 100644 site/content/operator/getting_started/installation.md create mode 100644 site/content/operator/getting_started/monitoring.md create mode 100644 site/content/operator/getting_started/requirements.md create mode 100644 site/content/operator/getting_started/update_cluster.md create mode 100644 site/content/operator/operator.md create mode 100644 site/content/quickstart/kubernetes.md diff --git a/site/config.toml b/site/config.toml index 95d3f39dbb..8f41512277 100644 --- a/site/config.toml +++ b/site/config.toml @@ -178,8 +178,18 @@ identifier = "ds" url = "https://github.com/m3db/m3" weight = 10 +[[menu.shortcuts]] +name = " " +url = "https://bit.ly/m3slack" +weight = 11 + +[[menu.shortcuts]] +name = " " +url = "https://groups.google.com/forum/#!forum/m3db" +weight = 12 + [outputs] home = [ "HTML", "RSS", "JSON"] page = [ "HTML"] section = [ "HTML"] -chapter = [ "HTML"] \ No newline at end of file +chapter = [ "HTML"] diff --git a/site/content/how_to/aggregator.md b/site/content/how_to/aggregator.md index d20a5fe629..807ec21679 100644 --- a/site/content/how_to/aggregator.md +++ b/site/content/how_to/aggregator.md @@ -1,11 +1,9 @@ --- -title: Setting up M3Aggregator +title: Setting up M3 Aggregator menuTitle: M3Aggregator weight: 5 --- - - ## Introduction `m3aggregator` is used to cluster stateful downsampling and rollup of metrics before they are store in M3DB. The M3 Coordinator also performs this role but is not cluster aware. This means metrics will not get aggregated properly if you send metrics in round robin fashion to multiple M3 Coordinators for the same metrics ingestion source (e.g. Prometheus server). diff --git a/site/content/how_to/kubernetes.md b/site/content/how_to/kubernetes.md deleted file mode 100644 index 92fbb6d74f..0000000000 --- a/site/content/how_to/kubernetes.md +++ /dev/null @@ -1,326 +0,0 @@ ---- -title: M3DB on Kubernetes -weight: 3 ---- - - -**Please note:** If possible _[PLEASE USE THE OPERATOR](https://operator.m3db.io/)_ to deploy to Kubernetes if you -can. It is a considerably more streamlined setup. - -The [operator](https://operator.m3db.io/) leverages [custom resource definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) -(CRDs) to automatically handle operations such as managing cluster topology. - -The guide below provides static manifests to bootstrap a cluster on Kubernetes and should be considered -as a guide to running M3 on Kubernetes, if and only if you have significant custom requirements not satisfied by -the operator. - -## Prerequisites - -M3DB performs better when it has access to fast disks. Every incoming write is written to a commit log, which at high -volumes of writes can be sensitive to spikes in disk latency. Additionally the random seeks into files when loading cold -files benefit from lower random read latency. - -Because of this, the included manifests reference a -[StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/) named `fast`. Manifests are -provided to provide such a StorageClass on AWS / Azure / GCP using the respective cloud provider's premium disk class. - -If you do not already have a StorageClass named `fast`, create one using one of the provided manifests: - -```shell -# AWS EBS (class io1) -kubectl apply -f https://raw.githubusercontent.com/m3db/m3/master/kube/storage-fast-aws.yaml - -# Azure premium LRS -kubectl apply -f https://raw.githubusercontent.com/m3db/m3/master/kube/storage-fast-azure.yaml - -# GCE Persistent SSD -kubectl apply -f https://raw.githubusercontent.com/m3db/m3/master/kube/storage-fast-gcp.yaml -``` - -If you wish to use your cloud provider's default remote disk, or another disk class entirely, you'll have to modify them -manifests. - -If your Kubernetes cluster spans multiple availability zones, it's important to specify a [Volume Binding Mode](https://kubernetes.io/docs/concepts/storage/storage-classes/#volume-binding-mode) of `WaitForFirstConsumer` in your StorageClass to delay the binding of the PersistentVolume until the Pod is created. - -### Kernel Configuration - -We provide a Kubernetes daemonset that can make setting host-level sysctls easier. Please see the [kernel][/docs/operational_guide/kernel_configuration] docs -for more. - -Note that our default StatefulSet spec will give the M3DB container `CAP_SYS_RESOURCE` so it may raise its file limits. -Uncomment the `securityContext` on the `m3db` container in the StatefulSet if running with a Pod Security Policy or -similar enforcement mechanism that prevents adding capabilities to containers. - -## Deploying - -Apply the following manifest to create your cluster: - -```shell -kubectl apply -f https://raw.githubusercontent.com/m3db/m3/master/kube/bundle.yaml -``` - -Applying this bundle will create the following resources: - -1. An `m3db` [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) for - all M3DB-related resources. -2. A 3-node etcd cluster in the form of a - [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) backed by persistent - remote SSDs. This cluster stores the DB topology and other runtime configuration data. -3. A 3-node M3DB cluster in the form of a StatefulSet. -4. [Headless services](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#services) for - the etcd and m3db StatefulSets to provide stable DNS hostnames per-pod. - -Wait until all created pods are listed as ready: - -```shell -$ kubectl -n m3db get po -NAME READY STATUS RESTARTS AGE -etcd-0 1/1 Running 0 22m -etcd-1 1/1 Running 0 22m -etcd-2 1/1 Running 0 22m -m3dbnode-0 1/1 Running 0 22m -m3dbnode-1 1/1 Running 0 22m -m3dbnode-2 1/1 Running 0 22m -``` - -You can now proceed to initialize a namespace and placement for the cluster the same as you would for our other how-to -guides: - -```shell -# Open a local connection to the coordinator service: -$ kubectl -n m3db port-forward svc/m3coordinator 7201 -Forwarding from 127.0.0.1:7201 -> 7201 -Forwarding from [::1]:7201 -> 7201 -``` - -```shell -# Create an initial cluster topology -curl -sSf -X POST localhost:7201/api/v1/services/m3db/placement/init -d '{ - "num_shards": 1024, - "replication_factor": 3, - "instances": [ - { - "id": "m3dbnode-0", - "isolation_group": "pod0", - "zone": "embedded", - "weight": 100, - "endpoint": "m3dbnode-0.m3dbnode:9000", - "hostname": "m3dbnode-0.m3dbnode", - "port": 9000 - }, - { - "id": "m3dbnode-1", - "isolation_group": "pod1", - "zone": "embedded", - "weight": 100, - "endpoint": "m3dbnode-1.m3dbnode:9000", - "hostname": "m3dbnode-1.m3dbnode", - "port": 9000 - }, - { - "id": "m3dbnode-2", - "isolation_group": "pod2", - "zone": "embedded", - "weight": 100, - "endpoint": "m3dbnode-2.m3dbnode:9000", - "hostname": "m3dbnode-2.m3dbnode", - "port": 9000 - } - ] -}' -``` - -```shell -# Create a namespace to hold your metrics -curl -X POST localhost:7201/api/v1/services/m3db/namespace -d '{ - "name": "default", - "options": { - "bootstrapEnabled": true, - "flushEnabled": true, - "writesToCommitLog": true, - "cleanupEnabled": true, - "snapshotEnabled": true, - "repairEnabled": false, - "retentionOptions": { - "retentionPeriodDuration": "720h", - "blockSizeDuration": "12h", - "bufferFutureDuration": "1h", - "bufferPastDuration": "1h", - "blockDataExpiry": true, - "blockDataExpiryAfterNotAccessPeriodDuration": "5m" - }, - "indexOptions": { - "enabled": true, - "blockSizeDuration": "12h" - } - } -}' -``` - -Shortly after you should see your nodes finish bootstrapping: - -```shell -$ kubectl -n m3db logs -f m3dbnode-0 -21:36:54.831698[I] cluster database initializing topology -21:36:54.831732[I] cluster database resolving topology -21:37:22.821740[I] resolving namespaces with namespace watch -21:37:22.821813[I] updating database namespaces [{adds [metrics]} {updates []} {removals []}] -21:37:23.008109[I] node tchannelthrift: listening on 0.0.0.0:9000 -21:37:23.008384[I] cluster tchannelthrift: listening on 0.0.0.0:9001 -21:37:23.217090[I] node httpjson: listening on 0.0.0.0:9002 -21:37:23.217240[I] cluster httpjson: listening on 0.0.0.0:9003 -21:37:23.217526[I] bootstrapping shards for range starting [{run bootstrap-data} {bootstrapper filesystem} ... -... -21:37:23.239534[I] bootstrap data fetched now initializing shards with series blocks [{namespace metrics} {numShards 256} {numSeries 0}] -21:37:23.240778[I] bootstrap finished [{namespace metrics} {duration 23.325194ms}] -21:37:23.240856[I] bootstrapped -21:37:29.733025[I] successfully updated topology to 3 hosts -``` - -You can now write and read metrics using the API on the DB nodes: - -```shell -$ kubectl -n m3db port-forward svc/m3dbnode 9003 -Forwarding from 127.0.0.1:9003 -> 9003 -Forwarding from [::1]:9003 -> 9003 -``` - -```shell -curl -sSf -X POST localhost:9003/writetagged -d '{ - "namespace": "default", - "id": "foo", - "tags": [ - { - "name": "city", - "value": "new_york" - }, - { - "name": "endpoint", - "value": "/request" - } - ], - "datapoint": { - "timestamp": '"$(date "+%s")"', - "value": 42.123456789 - } -}' -``` - -```shell -$ curl -sSf -X POST http://localhost:9003/query -d '{ - "namespace": "default", - "query": { - "regexp": { - "field": "city", - "regexp": ".*" - } - }, - "rangeStart": 0, - "rangeEnd": '"$(date "+%s")"' -}' | jq . - -{ - "results": [ - { - "id": "foo", - "tags": [ - { - "name": "city", - "value": "new_york" - }, - { - "name": "endpoint", - "value": "/request" - } - ], - "datapoints": [ - { - "timestamp": 1527630053, - "value": 42.123456789 - } - ] - } - ], - "exhaustive": true -} -``` - -To read and write metrics via the Coordinator (and not directly through the DB node API), you must mark the namespace as ready: - -```shell -curl -X POST http://localhost:7201/api/v1/services/m3db/namespace/ready -d '{ - "name": "default" -} -``` - -You should now be able to use both the Coordinator and DB node API to perform reads and writes. - -### Adding nodes - -You can easily scale your M3DB cluster by scaling the StatefulSet and informing the cluster topology of the change: - -```shell -kubectl -n m3db scale --replicas=4 statefulset/m3dbnode -``` - -Once the pod is ready you can modify the cluster topology: - -```shell -kubectl -n m3db port-forward svc/m3coordinator 7201 -Forwarding from 127.0.0.1:7201 -> 7201 -Forwarding from [::1]:7201 -> 7201 -``` - -```shell -curl -sSf -X POST localhost:7201/api/v1/services/m3db/placement -d '{ - "instances": [ - { - "id": "m3dbnode-3", - "isolation_group": "pod3", - "zone": "embedded", - "weight": 100, - "endpoint": "m3dbnode-3.m3dbnode:9000", - "hostname": "m3dbnode-3.m3dbnode", - "port": 9000 - } - ] -}' -``` - -## Integrations - -### Prometheus - -As mentioned in our integrations [guide](/docs/integrations/prometheus), M3DB can be used as a [remote read/write -endpoint](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#%3Cremote_write%3E) for Prometheus. - -If you run Prometheus on your Kubernetes cluster you can easily point it at M3DB in your Prometheus server config: - -```YAML -remote_read: - - url: "http://m3coordinator.m3db.svc.cluster.local:7201/api/v1/prom/remote/read" - # To test reading even when local Prometheus has the data - read_recent: true - -remote_write: - - url: "http://m3coordinator.m3db.svc.cluster.local:7201/api/v1/prom/remote/write" - # To differentiate between local and remote storage we will add a storage label - write_relabel_configs: - - target_label: metrics_storage - replacement: m3db_remote -``` - -## Scheduling - -In some cases, you might prefer M3DB to run on certain nodes in your cluster. For example: if your cluster is comprised -of different instance types and some have more memory than others then you'd like M3DB to run on those nodes if -possible. To accommodate this, the pods created by the StatefulSets use [pod -affinities](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) and -[tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to prefer to run on -certain nodes. Specifically: - -1. The pods tolerate the taint `"dedicated-m3db"` to run on nodes that are specifically dedicated to m3db if you so - choose. -2. Via `nodeAffinity` the pods prefer to run on nodes with the label `m3db.io/dedicated-m3db="true"`. - -[kernel]: /operational_guide/kernel_configuration diff --git a/site/content/how_to/query.md b/site/content/how_to/query.md index 6e30a85a9b..41f843feca 100644 --- a/site/content/how_to/query.md +++ b/site/content/how_to/query.md @@ -1,11 +1,10 @@ --- -title: Setting up m3query +title: Setting up M3 Query menuTitle: M3Query weight: 4 --- - -m3query is used to query data that is stored in M3DB. For instance, if you are using the Prometheus remote write endpoint with [m3coordinator](/docs/integrations/prometheus), you can use m3query instead of the Prometheus remote read endpoint. By doing so, you get all of the benefits of m3query's engine such as [block processing](/docs/m3query/architecture/blocks/). Furthermore, since m3query provides a Prometheus compatible API, you can use 3rd party graphing and alerting solutions like Grafana. +M3 Query is used to query data that is stored in M3DB. For instance, if you are using the Prometheus remote write endpoint with [m3coordinator](/docs/integrations/prometheus), you can use m3query instead of the Prometheus remote read endpoint. By doing so, you get all of the benefits of m3query's engine such as [block processing](/docs/m3query/architecture/blocks/). Furthermore, since m3query provides a Prometheus compatible API, you can use 3rd party graphing and alerting solutions like Grafana. ## Configuration diff --git a/site/content/m3coordinator/_index.md b/site/content/m3coordinator/_index.md index e66a19c734..5d2a0d11dd 100644 --- a/site/content/m3coordinator/_index.md +++ b/site/content/m3coordinator/_index.md @@ -1,7 +1,7 @@ --- title: "M3 Coordinator, API for reading/writing metrics and management" menuTitle: "M3 Coordinator" -weight: 4 +weight: 5 chapter: true --- diff --git a/site/content/m3db/_index.md b/site/content/m3db/_index.md index df5693f0cb..cd43f857b7 100644 --- a/site/content/m3db/_index.md +++ b/site/content/m3db/_index.md @@ -1,7 +1,7 @@ --- title: "M3DB, a distributed time series database" menuTitle: "M3DB" -weight: 3 +weight: 4 chapter: true --- diff --git a/site/content/operator/_index.md b/site/content/operator/_index.md new file mode 100644 index 0000000000..e6adbca423 --- /dev/null +++ b/site/content/operator/_index.md @@ -0,0 +1,6 @@ +--- +title: "Kubernetes Operator" +menuTitle: "Kubernetes Operator" +weight: 3 +chapter: true +--- diff --git a/site/content/operator/api.md b/site/content/operator/api.md new file mode 100644 index 0000000000..be1670f077 --- /dev/null +++ b/site/content/operator/api.md @@ -0,0 +1,277 @@ +--- +title: "API" +menuTitle: "API" +weight: 4 +chapter: true +--- + +This document enumerates the Custom Resource Definitions used by the M3DB Operator. It is auto-generated from code comments. + +## Table of Contents +* [ClusterCondition](#clustercondition) +* [ClusterSpec](#clusterspec) +* [ExternalCoordinatorConfig](#externalcoordinatorconfig) +* [IsolationGroup](#isolationgroup) +* [M3DBCluster](#m3dbcluster) +* [M3DBClusterList](#m3dbclusterlist) +* [M3DBStatus](#m3dbstatus) +* [NodeAffinityTerm](#nodeaffinityterm) +* [AggregatedAttributes](#aggregatedattributes) +* [Aggregation](#aggregation) +* [AggregationOptions](#aggregationoptions) +* [DownsampleOptions](#downsampleoptions) +* [IndexOptions](#indexoptions) +* [Namespace](#namespace) +* [NamespaceOptions](#namespaceoptions) +* [RetentionOptions](#retentionoptions) +* [PodIdentity](#podidentity) +* [PodIdentityConfig](#podidentityconfig) + +## ClusterCondition + +ClusterCondition represents various conditions the cluster can be in. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| type | Type of cluster condition. | ClusterConditionType | false | +| status | Status of the condition (True, False, Unknown). | corev1.ConditionStatus | false | +| lastUpdateTime | Last time this condition was updated. | string | false | +| lastTransitionTime | Last time this condition transitioned from one status to another. | string | false | +| reason | Reason this condition last changed. | string | false | +| message | Human-friendly message about this condition. | string | false | + +[Back to TOC](#table-of-contents) + +## ClusterSpec + +ClusterSpec defines the desired state for a M3 cluster to be converge to. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| image | Image specifies which docker image to use with the cluster | string | false | +| replicationFactor | ReplicationFactor defines how many replicas | int32 | false | +| numberOfShards | NumberOfShards defines how many shards in total | int32 | false | +| isolationGroups | IsolationGroups specifies a map of key-value pairs. Defines which isolation groups to deploy persistent volumes for data nodes | [][IsolationGroup](#isolationgroup) | false | +| namespaces | Namespaces specifies the namespaces this cluster will hold. | [][Namespace](#namespace) | false | +| etcdEndpoints | EtcdEndpoints defines the etcd endpoints to use for service discovery. Must be set if no custom configmap is defined. If set, etcd endpoints will be templated in to the default configmap template. | []string | false | +| keepEtcdDataOnDelete | KeepEtcdDataOnDelete determines whether the operator will remove cluster metadata (placement + namespaces) in etcd when the cluster is deleted. Unless true, etcd data will be cleared when the cluster is deleted. | bool | false | +| enableCarbonIngester | EnableCarbonIngester enables the listener port for the carbon ingester | bool | false | +| configMapName | ConfigMapName specifies the ConfigMap to use for this cluster. If unset a default configmap with template variables for etcd endpoints will be used. See \"Configuring M3DB\" in the docs for more. | *string | false | +| podIdentityConfig | PodIdentityConfig sets the configuration for pod identity. If unset only pod name and UID will be used. | *PodIdentityConfig | false | +| containerResources | Resources defines memory / cpu constraints for each container in the cluster. | [corev1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#resourcerequirements-v1-core) | false | +| dataDirVolumeClaimTemplate | DataDirVolumeClaimTemplate is the volume claim template for an M3DB instance's data. It claims PersistentVolumes for cluster storage, volumes are dynamically provisioned by when the StorageClass is defined. | *[corev1.PersistentVolumeClaim](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#persistentvolumeclaim-v1-core) | false | +| podSecurityContext | PodSecurityContext allows the user to specify an optional security context for pods. | *corev1.PodSecurityContext | false | +| securityContext | SecurityContext allows the user to specify a container-level security context. | *corev1.SecurityContext | false | +| imagePullSecrets | ImagePullSecrets will be added to every pod. | [][corev1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#localobjectreference-v1-core) | false | +| envVars | EnvVars defines custom environment variables to be passed to M3DB containers. | []corev1.EnvVar | false | +| labels | Labels sets the base labels that will be applied to resources created by the cluster. // TODO(schallert): design doc on labeling scheme. | map[string]string | false | +| annotations | Annotations sets the base annotations that will be applied to resources created by the cluster. | map[string]string | false | +| tolerations | Tolerations sets the tolerations that will be applied to all M3DB pods. | []corev1.Toleration | false | +| priorityClassName | PriorityClassName sets the priority class for all M3DB pods. | string | false | +| nodeEndpointFormat | NodeEndpointFormat allows overriding of the endpoint used for a node in the M3DB placement. Defaults to \"{{ .PodName }}.{{ .M3DBService }}:{{ .Port }}\". Useful if access to the cluster from other namespaces is desired. See \"Node Endpoint\" docs for full variables available. | string | false | +| hostNetwork | HostNetwork indicates whether M3DB pods should run in the same network namespace as the node its on. This option should be used sparingly due to security concerns outlined in the linked documentation. https://kubernetes.io/docs/concepts/policy/pod-security-policy/#host-namespaces | bool | false | +| dnsPolicy | DNSPolicy allows the user to set the pod's DNSPolicy. This is often used in conjunction with HostNetwork.+optional | *corev1.DNSPolicy | false | +| externalCoordinator | Specify a \"controlling\" coordinator for the cluster. | *[ExternalCoordinatorConfig](#externalcoordinatorconfig) | false | +| initContainers | Custom setup for db nodes can be done via initContainers Provide the complete spec for the initContainer here If any storage volumes are needed in the initContainer see InitVolumes below | []corev1.Container | false | +| initVolumes | If the InitContainers require any storage volumes Provide the complete specification for the required Volumes here | []corev1.Volume | false | +| podMetadata | PodMetadata is for any Metadata that is unique to the pods, and does not belong on any other objects, such as Prometheus scrape tags | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#objectmeta-v1-meta) | false | +| parallelPodManagement | ParallelPodManagement sets StatefulSets created by the operator to have Parallel pod management instead of OrderedReady. If nil, this will default to true. | *bool | true | +| serviceAccountName | To use a non-default service account, specify the name here otherwise the service account \"default\" will be used. This is useful for advanced use-cases such as pod security policies. The service account must exist. This operator will not create it. | string | false | +| frozen | Frozen is used to stop the operator from taking any further actions on a cluster. This is useful when troubleshooting as it guarantees the operator won't make any changes to the cluster. | bool | false | + +[Back to TOC](#table-of-contents) + +## ExternalCoordinatorConfig + +ExternalCoordinatorConfig defines parameters for using an external coordinator to control the cluster.\n\n- It is expected that there is a separate standalone coordinator cluster. - It is externally managed - not managed by this operator. - It is expected to have a service endpoint.\n\nSetup this db cluster, but do not assume a co-located coordinator. Instead provide a selector here so we can point to a separate coordinator service. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| selector | | map[string]string | true | +| serviceEndpoint | | string | false | + +[Back to TOC](#table-of-contents) + +## IsolationGroup + +IsolationGroup defines the name of zone as well attributes for the zone configuration + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| name | Name is the value that will be used in StatefulSet labels, pod labels, and M3DB placement \"isolationGroup\" fields. | string | true | +| nodeAffinityTerms | NodeAffinityTerms is an array of NodeAffinityTerm requirements, which are ANDed together to indicate what nodes an isolation group can be assigned to. | [][NodeAffinityTerm](#nodeaffinityterm) | false | +| numInstances | NumInstances defines the number of instances. | int32 | true | +| storageClassName | StorageClassName is the name of the StorageClass to use for this isolation group. This allows ensuring that PVs will be created in the same zone as the pinned statefulset on Kubernetes < 1.12 (when topology aware volume scheduling was introduced). Only has effect if the clusters `dataDirVolumeClaimTemplate` is non-nil. If set, the volume claim template will have its storageClassName field overridden per-isolationgroup. If unset the storageClassName of the volumeClaimTemplate will be used. | string | false | + +[Back to TOC](#table-of-contents) + +## M3DBCluster + +M3DBCluster defines the cluster + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| metadata | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#objectmeta-v1-meta) | false | +| type | | string | true | +| spec | | [ClusterSpec](#clusterspec) | true | +| status | | [M3DBStatus](#m3dbstatus) | false | + +[Back to TOC](#table-of-contents) + +## M3DBClusterList + +M3DBClusterList represents a list of M3DB Clusters + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| metadata | | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#listmeta-v1-meta) | false | +| items | | [][M3DBCluster](#m3dbcluster) | true | + +[Back to TOC](#table-of-contents) + +## M3DBStatus + +M3DBStatus contains the current state the M3DB cluster along with a human readable message + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| state | State is a enum of green, yellow, and red denoting the health of the cluster | M3DBState | false | +| conditions | Various conditions about the cluster. | [][ClusterCondition](#clustercondition) | false | +| message | Message is a human readable message indicating why the cluster is in it's current state | string | false | +| observedGeneration | ObservedGeneration is the last generation of the cluster the controller observed. Kubernetes will automatically increment metadata.Generation every time the cluster spec is changed. | int64 | false | + +[Back to TOC](#table-of-contents) + +## NodeAffinityTerm + +NodeAffinityTerm represents a node label and a set of label values, any of which can be matched to assign a pod to a node. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| key | Key is the label of the node. | string | true | +| values | Values is an array of values, any of which a node can have for a pod to be assigned to it. | []string | true | + +[Back to TOC](#table-of-contents) + +## AggregatedAttributes + +AggregatedAttributes are attributes specifying how data points are aggregated. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| resolution | Resolution is the time range to aggregate data across. | string | false | +| downsampleOptions | DownsampleOptions stores options for downsampling data points. | *[DownsampleOptions](#downsampleoptions) | false | + +[Back to TOC](#table-of-contents) + +## Aggregation + +Aggregation describes data points within a namespace. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| aggregated | Aggregated indicates whether data points are aggregated or not. | bool | false | +| attributes | Attributes defines how data is aggregated when Aggregated is set to true. This field is ignored when aggregated is false. | [AggregatedAttributes](#aggregatedattributes) | false | + +[Back to TOC](#table-of-contents) + +## AggregationOptions + +AggregationOptions is a set of options for aggregating data within the namespace. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| aggregations | Aggregations are the aggregations for a namespace. | [][Aggregation](#aggregation) | false | + +[Back to TOC](#table-of-contents) + +## DownsampleOptions + +DownsampleOptions is a set of options related to downsampling data. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| all | All indicates whether to send data points to this namespace. If set to false, this namespace will not receive data points. In this case, data will need to be sent to the namespace via another mechanism (e.g. rollup/recording rules). | bool | false | + +[Back to TOC](#table-of-contents) + +## IndexOptions + +IndexOptions defines parameters for indexing. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| enabled | Enabled controls whether metric indexing is enabled. | bool | false | +| blockSize | BlockSize controls the index block size. | string | false | + +[Back to TOC](#table-of-contents) + +## Namespace + +Namespace defines an M3DB namespace or points to a preset M3DB namespace. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| name | Name is the namespace name. | string | false | +| preset | Preset indicates preset namespace options. | string | false | +| options | Options points to optional custom namespace configuration. | *[NamespaceOptions](#namespaceoptions) | false | + +[Back to TOC](#table-of-contents) + +## NamespaceOptions + +NamespaceOptions defines parameters for an M3DB namespace. See https://m3db.github.io/m3/operational_guide/namespace_configuration/ for more details. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| bootstrapEnabled | BootstrapEnabled control if bootstrapping is enabled. | bool | false | +| flushEnabled | FlushEnabled controls whether flushing is enabled. | bool | false | +| writesToCommitLog | WritesToCommitLog controls whether commit log writes are enabled. | bool | false | +| cleanupEnabled | CleanupEnabled controls whether cleanups are enabled. | bool | false | +| repairEnabled | RepairEnabled controls whether repairs are enabled. | bool | false | +| snapshotEnabled | SnapshotEnabled controls whether snapshotting is enabled. | bool | false | +| retentionOptions | RetentionOptions sets the retention parameters. | [RetentionOptions](#retentionoptions) | false | +| indexOptions | IndexOptions sets the indexing parameters. | [IndexOptions](#indexoptions) | false | +| coldWritesEnabled | ColdWritesEnabled controls whether cold writes are enabled. | bool | false | +| aggregationOptions | AggregationOptions sets the aggregation parameters. | [AggregationOptions](#aggregationoptions) | false | + +[Back to TOC](#table-of-contents) + +## RetentionOptions + +RetentionOptions defines parameters for data retention. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| retentionPeriod | RetentionPeriod controls how long data for the namespace is retained. | string | false | +| blockSize | BlockSize controls the block size for the namespace. | string | false | +| bufferFuture | BufferFuture controls how far in the future metrics can be written. | string | false | +| bufferPast | BufferPast controls how far in the past metrics can be written. | string | false | +| blockDataExpiry | BlockDataExpiry controls the block expiry. | bool | false | +| blockDataExpiryAfterNotAccessPeriod | BlockDataExpiry controls the not after access period for expiration. | string | false | + +[Back to TOC](#table-of-contents) + +## PodIdentity + +PodIdentity contains all the fields that may be used to identify a pod's identity in the M3DB placement. Any non-empty fields will be used to identity uniqueness of a pod for the purpose of M3DB replace operations. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| name | | string | false | +| uid | | string | false | +| nodeName | | string | false | +| nodeExternalID | | string | false | +| nodeProviderID | | string | false | + +[Back to TOC](#table-of-contents) + +## PodIdentityConfig + +PodIdentityConfig contains cluster-level configuration for deriving pod identity. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| sources | Sources enumerates the sources from which to derive pod identity. Note that a pod's name will always be used. If empty, defaults to pod name and UID. | []PodIdentitySource | true | + +[Back to TOC](#table-of-contents) diff --git a/site/content/operator/configuration/_index.md b/site/content/operator/configuration/_index.md new file mode 100644 index 0000000000..253cacbb27 --- /dev/null +++ b/site/content/operator/configuration/_index.md @@ -0,0 +1,5 @@ +--- +title: "Configuration" +weight: 3 +chapter: true +--- \ No newline at end of file diff --git a/site/content/operator/configuration/configuring_m3db.md b/site/content/operator/configuration/configuring_m3db.md new file mode 100644 index 0000000000..02af53b40c --- /dev/null +++ b/site/content/operator/configuration/configuring_m3db.md @@ -0,0 +1,34 @@ +--- +title: "Configuring M3DB" +menuTitle: "Configuring M3DB" +weight: 10 +chapter: true +--- + +By default the operator will apply a configmap with basic M3DB options and settings for the coordinator to direct +Prometheus reads/writes to the cluster. This template can be found +[here](https://github.com/m3db/m3db-operator/blob/master/assets/default-config.tmpl). + +To apply custom a configuration for the M3DB cluster, one can set the `configMapName` parameter of the cluster [spec] to +an existing configmap. + +## Environment Warning + +If providing a custom config map, the `env` you specify in your [config][config] **must** be `$NAMESPACE/$NAME`, where +`$NAMESPACE` is the Kubernetes namespace your cluster is in and `$NAME` is the name of the cluster. For example, with +the following cluster: + +```yaml +apiVersion: operator.m3db.io/v1alpha1 +kind: M3DBCluster +metadata: + name: cluster-a + namespace: production +... +``` + +The value of `env` in your config **MUST** be `production/cluster-a`. This restriction allows multiple M3DB clusters to +safely share the same etcd cluster. + +[spec]: ../api +[config]: https://github.com/m3db/m3db-operator/blob/795973f3329437ced3ac942da440810cd0865235/assets/default-config.yaml#L77 diff --git a/site/content/operator/configuration/namespaces.md b/site/content/operator/configuration/namespaces.md new file mode 100644 index 0000000000..3a75aa146c --- /dev/null +++ b/site/content/operator/configuration/namespaces.md @@ -0,0 +1,72 @@ +--- +title: "Namespaces" +menuTitle: "Namespaces" +weight: 12 +chapter: true +--- + +M3DB uses the concept of [namespaces][m3db-namespaces] to determine how metrics are stored and retained. The M3DB +operator allows a user to define their own namespaces, or to use a set of presets we consider to be suitable for +production use cases. + +Namespaces are configured as part of an `m3dbcluster` [spec][api-namespaces]. + +## Presets + +### `10s:2d` + +This preset will store metrics at 10 second resolution for 2 days. For example, in your cluster spec: + +```yaml +spec: +... + namespaces: + - name: metrics-short-term + preset: 10s:2d +``` + +### `1m:40d` + +This preset will store metrics at 1 minute resolution for 40 days. + +```yaml +spec: +... + namespaces: + - name: metrics-long-term + preset: 1m:40d +``` + +## Custom Namespaces + +You can also define your own custom namespaces by setting the `NamespaceOptions` within a cluster spec. The +[API][api-ns-options] lists all available fields. As an example, a namespace to store 7 days of data may look like: +```yaml +... +spec: +... + namespaces: + - name: custom-7d + options: + bootstrapEnabled: true + flushEnabled: true + writesToCommitLog: true + cleanupEnabled: true + snapshotEnabled: true + repairEnabled: false + retentionOptions: + retentionPeriod: 168h + blockSize: 12h + bufferFuture: 20m + bufferPast: 20m + blockDataExpiry: true + blockDataExpiryAfterNotAccessPeriod: 5m + indexOptions: + enabled: true + blockSize: 12h +``` + + +[api-namespaces]: ../api#namespace +[api-ns-options]: ../api#namespaceoptions +[m3db-namespaces]: https://docs.m3db.io/operational_guide/namespace_configuration/ diff --git a/site/content/operator/configuration/node_affinity.md b/site/content/operator/configuration/node_affinity.md new file mode 100644 index 0000000000..e51622d60e --- /dev/null +++ b/site/content/operator/configuration/node_affinity.md @@ -0,0 +1,197 @@ +--- +title: "Node Affinity & Cluster Topology" +menuTitle: "Node Affinity" +weight: 13 +chapter: true +--- + +## Node Affinity + +Kubernetes allows pods to be assigned to nodes based on various critera through [node affinity][k8s-node-affinity]. + +M3DB was built with failure tolerance as a core feature. M3DB's [isolation groups][m3db-isogroups] allow shards to be +placed across failure domains such that the loss of no single domain can cause the cluster to lose quorum. More details +on M3DB's resiliency can be found in the [deployment docs][m3db-deployment]. + +By leveraging Kubernetes' node affinity and M3DB's isolation groups, the operator can guarantee that M3DB pods are +distributed across failure domains. For example, in a Kubernetes cluster spread across 3 zones in a cloud region, the +`isolationGroups` configuration below would guarantee that no single zone failure could degrade the M3DB cluster. + +M3DB is unaware of the underlying zone topology: it just views the isolation groups as `group1`, `group2`, `group3` in +its [placement][m3db-placement]. Thanks to the Kubernetes scheduler, however, these groups are actually scheduled across +separate failure domains. + +```yaml +apiVersion: operator.m3db.io/v1alpha1 +kind: M3DBCluster +... +spec: + replicationFactor: 3 + isolationGroups: + - name: group1 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-b + - name: group2 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-c + - name: group3 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-d +``` + +## Tolerations + +In addition to allowing pods to be assigned to certain nodes via node affinity, Kubernetes allows pods to be _repelled_ +from nodes through [taints][k8s-taints] if they don't tolerate the taint. For example, the following config would ensure: + +1. Pods are spread across zones. + +2. Pods are only assigned to nodes in the `m3db-dedicated-pool` pool. + +3. No other pods could be assigned to those nodes (assuming they were tainted with the taint `m3db-dedicated-taint`). + +```yaml +apiVersion: operator.m3db.io/v1alpha1 +kind: M3DBCluster +... +spec: + replicationFactor: 3 + isolationGroups: + - name: group1 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-b + - key: nodepool + values: + - m3db-dedicated-pool + - name: group2 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-c + - key: nodepool + values: + - m3db-dedicated-pool + - name: group3 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-d + - key: nodepool + values: + - m3db-dedicated-pool + tolerations: + - key: m3db-dedicated + effect: NoSchedule + operator: Exists +``` + +## Example Affinity Configurations + +### Zonal Cluster + +The examples so far have focused on multi-zone Kubernetes clusters. Some users may only have a cluster in a single zone +and accept the reduced fault tolerance. The following configuration shows how to configure the operator in a zonal +cluster. + +```yaml +apiVersion: operator.m3db.io/v1alpha1 +kind: M3DBCluster +... +spec: + replicationFactor: 3 + isolationGroups: + - name: group1 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-b + - name: group2 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-b + - name: group3 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-b +``` + +### 6 Zone Cluster + +In the above examples we created clusters with 1 isolation group in each of 3 zones. Because `values` within a single +[NodeAffinityTerm][node-affinity-term] are OR'd, we can also spread an isolationgroup across multiple zones. For +example, if we had 6 zones available to us: + +```yaml +apiVersion: operator.m3db.io/v1alpha1 +kind: M3DBCluster +... +spec: + replicationFactor: 3 + isolationGroups: + - name: group1 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-a + - us-east1-b + - name: group2 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-c + - us-east1-d + - name: group3 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-e + - us-east1-f +``` + +### No Affinity + +If there are no failure domains available, one can have a cluster with no affinity where the pods will be scheduled however Kubernetes would place them by default: + +```yaml +apiVersion: operator.m3db.io/v1alpha1 +kind: M3DBCluster +... +spec: + replicationFactor: 3 + isolationGroups: + - name: group1 + numInstances: 3 + - name: group2 + numInstances: 3 + - name: group3 + numInstances: 3 +``` + +[k8s-node-affinity]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +[k8s-taints]: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +[m3db-deployment]: https://docs.m3db.io/operational_guide/replication_and_deployment_in_zones/ +[m3db-isogroups]: https://docs.m3db.io/operational_guide/placement_configuration/#isolation-group +[m3db-placement]: https://docs.m3db.io/operational_guide/placement/ +[node-affinity-term]: ../api/#nodeaffinityterm diff --git a/site/content/operator/configuration/node_endpoint.md b/site/content/operator/configuration/node_endpoint.md new file mode 100644 index 0000000000..0afb147b2d --- /dev/null +++ b/site/content/operator/configuration/node_endpoint.md @@ -0,0 +1,32 @@ +--- +title: "Node Endpoint" +menuTitle: "Node Endpoint" +weight: 14 +chapter: true +--- + +M3DB stores an [`endpoint`][proto] field on placement instances that is used for communication between DB nodes and from +other components such as the coordinator. + +The operator allows customizing the format of this endpoint by setting the `nodeEndpointFormat` field on a cluster spec. +The format of this field uses [Go templates], with the following template fields currently supported: + +| Field | Description | +| ----- | ----------- | +| `PodName` | Name of the pod | +| `M3DBService` | Name of the generated M3DB service | +| `PodNamespace` | Namespace the pod is in | +| `Port` | Port M3DB is serving RPCs on | + +The default format is: +``` +{{ .PodName }}.{{ .M3DBService }}:{{ .Port }} +``` + +As an example of an override, to expose an M3DB cluster to containers in other Kubernetes namespaces `nodeEndpointFormat` can be set to: +``` +{{ .PodName }}.{{ .M3DBService }}.{{ .PodNamespace }}:{{ .Port }} +``` + +[proto]: https://github.com/m3db/m3/blob/9b1dc3051a17620c0a983d60057a9a8c115af9d4/src/cluster/generated/proto/placementpb/placement.proto#L47 +[Go templates]: https://golang.org/pkg/text/template/ diff --git a/site/content/operator/configuration/pod_identity.md b/site/content/operator/configuration/pod_identity.md new file mode 100644 index 0000000000..c78585f59d --- /dev/null +++ b/site/content/operator/configuration/pod_identity.md @@ -0,0 +1,69 @@ +--- +title: "Pod Identity" +menuTitle: "Pod Identity" +weight: 11 +chapter: true +--- + +## Motivation + +M3DB assumes that if a process is started and owns sealed shards marked as `Available` that its data for those shards is +valid and does not have to be fetched from peers. Consequentially this means it will begin serving reads for that data. +For more background on M3DB topology, see the [M3DB topology docs][topology-docs]. + +In most environments in which M3DB has been deployed in production, it has been on a set of hosts predetermined by +whomever is managing the cluster. This means that an M3DB instance is identified in a toplogy by its hostname, and that +when an M3DB process comes up and finds its hostname in the cluster with `Available` shards that it can serve reads for +those shards. + +This does not work on Kubernetes, particularly when working with StatefulSets, as a pod may be rescheduled on a new node +or with new storage attached but its name may stay the same. If we were to naively use an instance's hostname (pod +name), and it were to get rescheduled on a new node with no data, it could assume that absence of data is valid and +begin returning empty results for read requests. + +To account for this, the M3DB Operator determines an M3DB instance's identity in the topology based on a configurable +set of metadata about the pod. + +## Configuration + +The M3DB operator uses a configurable set of metadata about a pod to determine its identity in the M3DB placement. This +is encapsulated in the [PodIdentityConfig][pod-id-api] field of a cluster's spec. In addition to the configures sources, +a pod's name will always be included. + +Every pod in an M3DB cluster is annotated with its identity and is passed to the M3DB instance via a downward API +volume. + +### Sources + +This section will be filled out as a number of pending PRs land. + +## Recommendations + +### No Persistent Storage + +If not using PVs, you should set `sources` to `PodUID`: +``` +podIdentityConfig: + sources: + - PodUID +``` + +This way whenever a container is rescheduled, the operator will initiate a replace and it will stream data from its +peers before serving reads. Note that not having persistent storage is not a recommended way to run M3DB. + +### Remote Persistent Storage + +If using remote storage you do not need to set sources, as it will default to just the pods name. The data for an M3DB +instance will move around with its container. + +### Local Persistent Storage + +If using persistent local volumes, you should set sources to `NodeName`. In this configuration M3DB will consider a pod +to be the same so long as it's on the same node. Replaces will only be triggered if a pod with the same name is moved to +a new host. + +Note that if using local SSDs on GKE, node names may stay the same even though a VM has been recreated. We also support +`ProviderID`, which will use the underlying VM's unique ID number in GCE to identity host uniqueness. + +[pod-id-api]: ../api/#podidentityconfig +[topology-docs]: https://docs.m3db.io/operational_guide/placement/ diff --git a/site/content/operator/getting_started/_index.md b/site/content/operator/getting_started/_index.md new file mode 100644 index 0000000000..f8bb8e1357 --- /dev/null +++ b/site/content/operator/getting_started/_index.md @@ -0,0 +1,5 @@ +--- +title: "Getting Started" +weight: 2 +chapter: true +--- \ No newline at end of file diff --git a/site/content/operator/getting_started/create_cluster.md b/site/content/operator/getting_started/create_cluster.md new file mode 100644 index 0000000000..b2f2b346e3 --- /dev/null +++ b/site/content/operator/getting_started/create_cluster.md @@ -0,0 +1,178 @@ +--- +title: "Creating a Cluster" +menuTitle: "Creating a Cluster" +weight: 12 +chapter: true +--- + +Once you've [installed](installation) the M3DB operator and read over the [requirements](requirements), you can start +creating some M3DB clusters! + +## Basic Cluster + +The following creates an M3DB cluster spread across 3 zones, with each M3DB instance being able to store up to 350gb of +data using your Kubernetes cluster's default storage class. For examples of different cluster topologies, such as zonal +clusters, see the docs on [node affinity][node-affinity]. + +### Etcd + +Create an etcd cluster with persistent volumes: +``` +kubectl apply -f https://raw.githubusercontent.com/m3db/m3db-operator/v0.10.0/example/etcd/etcd-pd.yaml +``` + +We recommend modifying the `storageClassName` in the manifest to one that matches your cloud provider's fastest remote +storage option, such as `pd-ssd` on GCP. + +### M3DB + +```yaml +apiVersion: operator.m3db.io/v1alpha1 +kind: M3DBCluster +metadata: + name: persistent-cluster +spec: + image: quay.io/m3db/m3dbnode:latest + replicationFactor: 3 + numberOfShards: 256 + isolationGroups: + - name: group1 + numInstances: 1 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - + - name: group2 + numInstances: 1 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - + - name: group3 + numInstances: 1 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - + etcdEndpoints: + - http://etcd-0.etcd:2379 + - http://etcd-1.etcd:2379 + - http://etcd-2.etcd:2379 + podIdentityConfig: + sources: [] + namespaces: + - name: metrics-10s:2d + preset: 10s:2d + dataDirVolumeClaimTemplate: + metadata: + name: m3db-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 350Gi + limits: + storage: 350Gi +``` + +## Ephemeral Cluster + +**WARNING:** This setup is not intended for production-grade clusters, but rather for "kicking the tires" with the +operator and M3DB. It is intended to work across almost any Kubernetes environment, and as such has as few dependencies +as possible (namely persistent storage). See below for instructions on creating a more durable cluster. + +### Etcd + +Create an etcd cluster in the same namespace your M3DB cluster will be created in. If you don't have persistent storage +available, this will create a cluster that will not use persistent storage and will likely become unavailable if any of +the pods die: + +``` +kubectl apply -f https://raw.githubusercontent.com/m3db/m3db-operator/v0.10.0/example/etcd/etcd-basic.yaml + +# Verify etcd health once pods available +kubectl exec etcd-0 -- env ETCDCTL_API=3 etcdctl endpoint health +# 127.0.0.1:2379 is healthy: successfully committed proposal: took = 2.94668ms +``` + +If you have remote storage available and would like to jump straight to using it, apply the following manifest for etcd +instead: +``` +kubectl apply -f https://raw.githubusercontent.com/m3db/m3db-operator/v0.10.0/example/etcd/etcd-pd.yaml +``` + +### M3DB + +Once etcd is available, you can create an M3DB cluster. An example of a very basic M3DB cluster definition is as +follows: + +```yaml +apiVersion: operator.m3db.io/v1alpha1 +kind: M3DBCluster +metadata: + name: simple-cluster +spec: + image: quay.io/m3db/m3dbnode:latest + replicationFactor: 3 + numberOfShards: 256 + etcdEndpoints: + - http://etcd-0.etcd:2379 + - http://etcd-1.etcd:2379 + - http://etcd-2.etcd:2379 + isolationGroups: + - name: group1 + numInstances: 1 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - + - name: group2 + numInstances: 1 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - + - name: group3 + numInstances: 1 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - + podIdentityConfig: + sources: + - PodUID + namespaces: + - name: metrics-10s:2d + preset: 10s:2d +``` + +This will create a highly available cluster with RF=3 spread evenly across the three given zones within a region. A +pod's UID will be used for its [identity][pod-identity]. The cluster will have 1 [namespace](namespace) that stores +metrics for 2 days at 10s resolution. + +Next, apply your manifest: +``` +$ kubectl apply -f example/simple-cluster.yaml +m3dbcluster.operator.m3db.io/simple-cluster created +``` + +Shortly after all pods are created you should see the cluster ready! + +``` +$ kubectl get po -l operator.m3db.io/app=m3db +NAME READY STATUS RESTARTS AGE +simple-cluster-rep0-0 1/1 Running 0 1m +simple-cluster-rep1-0 1/1 Running 0 56s +simple-cluster-rep2-0 1/1 Running 0 37s +``` + +We can verify that the cluster has finished streaming data by peers by checking that an instance has bootstrapped: +``` +$ kubectl exec simple-cluster-rep2-0 -- curl -sSf localhost:9002/health +{"ok":true,"status":"up","bootstrapped":true} +``` + +[pod-identity]: ../configuration/pod_identity +[local-volumes]: https://kubernetes.io/blog/2018/04/13/local-persistent-volumes-beta/ +[node-affinity]: ../configuration/node_affinity diff --git a/site/content/operator/getting_started/delete_cluster.md b/site/content/operator/getting_started/delete_cluster.md new file mode 100644 index 0000000000..d3fb8d64a4 --- /dev/null +++ b/site/content/operator/getting_started/delete_cluster.md @@ -0,0 +1,46 @@ +--- +title: "Deleting a Cluster" +menuTitle: "Deleting a Cluster" +weight: 14 +chapter: true +--- + +Delete your M3DB cluster with `kubectl`: +``` +kubectl delete m3dbcluster simple-cluster +``` + +By default, the operator will delete the placement and namespaces associated with a cluster before the CRD resource +deleted. If you do **not** want this behavior, set `keepEtcdDataOnDelete` to `true` on your cluster spec. + +Under the hood, the operator uses Kubernetes [finalizers] to ensure the cluster CRD is not deleted until the operator +has had a chance to do cleanup. + +## Debugging Stuck Cluster Deletion + +If for some reason the operator is unable to delete the placement and namespace for the cluster, the cluster CRD itself +will be stuck in a state where it can not be deleted, due to the way finalizers work in Kubernetes. The operator might +be unable to clean up the data for many reasons, for example if the M3DB cluster itself is not available to serve the +APIs for cleanup or if etcd is down and cannot fulfill the deleted. + +To allow the CRD to be deleted, you can `kubectl edit m3dbcluster $CLUSTER` and remove the +`operator.m3db.io/etcd-deletion` finalizer. For example, in the following cluster you'd remove the finalizer from `metadata.finalizers`: + +```yaml +apiVersion: operator.m3db.io/v1alpha1 +kind: M3DBCluster +metadata: + ... + finalizers: + - operator.m3db.io/etcd-deletion + name: m3db-cluster +... +``` + +Note that if you do this, you'll have to manually remove the relevant data in etcd. For a cluster in namespace `$NS` +with name `$CLUSTER`, the keys are: + +- `_sd.placement/$NS/$CLUSTER/m3db` +- `_kv/$NS/$CLUSTER/m3db.node.namespaces` + +[finalizers]: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#finalizers diff --git a/site/content/operator/getting_started/installation.md b/site/content/operator/getting_started/installation.md new file mode 100644 index 0000000000..97ea3f7c14 --- /dev/null +++ b/site/content/operator/getting_started/installation.md @@ -0,0 +1,34 @@ +--- +title: "Installation" +menuTitle: "Installation" +weight: 11 +chapter: true +--- + +Be sure to take a look at the [requirements](requirements) before installing the operator. + +## Helm + +1. Add the `m3db-operator` repo: + +``` +helm repo add m3db https://m3-helm-charts.storage.googleapis.com/stable +``` + +2. Install the `m3db-operator` chart: + +``` +helm install m3db-operator m3db/m3db-operator +``` + +**Note**: If uninstalling an instance of the operator that was installed with Helm, some resources such as the +ClusterRole, ClusterRoleBinding, and ServiceAccount may need to be deleted manually. + + +## Manually + +Install the bundled operator manifests in the current namespace: + +``` +kubectl apply -f https://raw.githubusercontent.com/m3db/m3db-operator/master/bundle.yaml +``` diff --git a/site/content/operator/getting_started/monitoring.md b/site/content/operator/getting_started/monitoring.md new file mode 100644 index 0000000000..303a2bf0ba --- /dev/null +++ b/site/content/operator/getting_started/monitoring.md @@ -0,0 +1,19 @@ +--- +title: "Monitoring" +menuTitle: "Monitoring" +weight: 15 +chapter: true +--- + +M3DB exposes metrics via a Prometheus endpoint. If using the [Prometheus Operator][prometheus-operator], you can apply a +`ServiceMonitor` to have your M3DB pods automatically scraped by Prometheus: + +``` +kubectl apply -f https://raw.githubusercontent.com/m3db/m3db-operator/master/example/prometheus-servicemonitor.yaml +``` + +You can visit the "targets" page of the Prometheus UI to verify the pods are being scraped. To view these metrics using +Grafana, follow the [M3 docs][m3-grafana] to install the M3DB Grafana dashboard. + +[prometheus-operator]: https://github.com/coreos/prometheus-operator +[m3-grafana]: https://docs.m3db.io/integrations/grafana/ diff --git a/site/content/operator/getting_started/requirements.md b/site/content/operator/getting_started/requirements.md new file mode 100644 index 0000000000..ee6a51e6d7 --- /dev/null +++ b/site/content/operator/getting_started/requirements.md @@ -0,0 +1,37 @@ +--- +title: "Requirements" +menuTitle: "Requirements" +weight: 10 +chapter: true +--- + +## Kubernetes Versions + +The M3DB operator current targets Kubernetes 1.11 and 1.12. Given the operator's current production use cases at Uber, +we typically target the two most recent minor Kubernetes versions supported by GKE. We welcome community contributions +to support more recent versions while meeting the aforementioned GKE targets! + +## Multi-Zone Kubernetes Cluster + +The M3DB operator is intended to be used with Kubernetes clusters that span at least 3 zones within a region to create +highly available clusters and maintain quorum in the event of region failures. Instructions for creating regional +clusters on GKE can be found [here][gke-regional]. + +## Etcd + +M3DB stores its cluster topology and all other runtime metadata in [etcd][etcd]. + +For *testing / non-production use cases*, we provide simple manifests for running etcd on Kubernetes in our [example +manifests][etcd-example]: one for running ephemeral etcd containers and one for running etcd using basic persistent +volumes. If using the `etcd-pd` yaml manifest, we recommend a modification to use a `StorageClass` equivalent to your +cloud provider's fastest remote disk (such as `pd-ssd` on GCP). + +For production use cases, we recommend running etcd (in order of preference): + +1. External to your Kubernetes cluster to avoid circular dependencies. +2. Using the [etcd operator][etcd-operator]. + +[etcd]: https://etcd.io +[etcd-example]: https://github.com/m3db/m3db-operator/tree/master/example/etcd +[etcd-operator]: https://github.com/coreos/etcd-operator +[gke-regional]: https://cloud.google.com/kubernetes-engine/docs/concepts/regional-clusters diff --git a/site/content/operator/getting_started/update_cluster.md b/site/content/operator/getting_started/update_cluster.md new file mode 100644 index 0000000000..9a943bfc05 --- /dev/null +++ b/site/content/operator/getting_started/update_cluster.md @@ -0,0 +1,48 @@ +--- +title: "Updating a Cluster" +menuTitle: "Updating a Cluster" +weight: 13 +chapter: true +--- + +After your cluster has been running for some time you may decide you want to change the cluster's +spec. For instance, you may want to upgrade to a newer release of M3DB or modify the cluster's +config file. The operator can be used to safely rollout such changes so you don't need to do +anything other than add an annotation to enable updates. + +The first step in updating a cluster is to update the cluster's `M3DBCluster` CRD with the changes +you want to make. If you manage your cluster via manifests stored in YAML files then this is as +simple as updating the manifest and applying your changes: + +```bash +kubectl apply -f example/my-cluster.yaml +``` + +As a precaution, the operator won't immediately begin updating a cluster after your changes have +been applied. Instead, you'll need to add the following annotation on each `StatefulSet` in the +cluster to indicate to the operator that it is safe to update that `StatefulSet`: + +```bash +kubectl annotate statefulset my-cluster-rep0 operator.m3db.io/update=enabled +``` + +When the operator sees this annotation, it will check if the current state of the `StatefulSet` +differs from its desired state as defined by the `M3DBCluster` CRD. If so, the operator will +update the `StatefulSet` to match its desired state, thereby triggering a rollout of the pods in +the `StatefulSet`. The operator will also remove the `operator.m3db.io/update=enabled` annotation +from the updated `StatefulSet`. + +If, on the other hand, the operator finds the update annotation on a `StatefulSet` but it doesn't +need to be updated then the operator will remove the annotation but perform no other actions. +Consequently, once you set the update annotation on a `StatefulSet`, you can watch for the +annotation to be removed from it to know if the operator has seen and checked for an update. + +Since M3DB rollouts can take longer periods of time, it's often more convenient to set the +annotation to enable updates on each `StatefulSet` in the cluster at once, and allow the operator +to perform the rollout safely. The operator will update only one `StatefulSet` at a time and then +wait for it to bootstrap and become healthy again before moving onto the next `StatefulSet` in the +cluster so that no two replicas are ever down at the same time. + +```bash +kubectl annotate statefulset -l operator.m3db.io/cluster=my-cluster operator.m3db.io/update=enabled +``` diff --git a/site/content/operator/operator.md b/site/content/operator/operator.md new file mode 100644 index 0000000000..36f15d78a4 --- /dev/null +++ b/site/content/operator/operator.md @@ -0,0 +1,31 @@ +--- +title: "M3DB Operator" +menuTitle: "M3DB Operator" +weight: 1 +chapter: true +--- + +This documentation is for the M3DB [Kubernetes operator][operators] which can run and operate [M3DB][m3db] on Kubernetes. + +For more background on the M3DB operator, see our [KubeCon keynote][keynote] on its origins and usage at Uber. + +## Philosophy + +The M3DB operator aims to automate everyday tasks around managing M3DB. Specifically, it aims to automate: + +- Creating M3DB clusters +- Destroying M3DB clusters +- Expanding clusters (adding instances) +- Shrinking clusters (removing instances) +- Replacing failed instances + +It explicitly does not try to automate every single edge case a user may ever run into. For example, it does not aim to +automate disaster recovery if an entire cluster is taken down. Such use cases may still require human intervention, but +the operator will aim to not conflict with such operations a human may have to take on a cluster. + +Generally speaking, the operator's philosophy is if **it would be unclear to a human what action to take, we will not +try to guess.** + +[operators]: https://coreos.com/operators/ +[m3db]: https://m3db.io/docs/ +[keynote]: https://kccna18.sched.com/event/Gsxn/keynote-smooth-operator-large-scale-automated-storage-with-kubernetes-celina-ward-software-engineer-matt-schallert-site-reliability-engineer-uber diff --git a/site/content/overview/roadmap.md b/site/content/overview/roadmap.md index 629b300581..6941e619d5 100644 --- a/site/content/overview/roadmap.md +++ b/site/content/overview/roadmap.md @@ -3,14 +3,11 @@ title: "Roadmap" weight: 4 --- -This roadmap is open for suggestions and currently just a small snapshot of what is coming up. +This roadmap is open for contributions and suggestions, it currently only defines near term goals. -Short: -- Add diagrams of what using M3 looks like (broken down by use case) -- Improve operational guides for the aggregator +Near term: +- Add more diagrams of what M3 deployment architecture by use case - Add tutorials for a variety of use cases -- Add design documentation of reverse index -- Add design documentation of aggregator - -Medium: -- Plan what a v1.0 release looks like +- Improve operational guides for the aggregator +- Overview of design for M3DB reverse index +- Overview of design for M3 aggregator diff --git a/site/content/quickstart/docker.md b/site/content/quickstart/docker.md index 7e576ee2fc..f75f30fcf6 100644 --- a/site/content/quickstart/docker.md +++ b/site/content/quickstart/docker.md @@ -5,7 +5,7 @@ weight: 1 -# Creating a Single Node M3DB Cluster with Docker +## Creating a Single Node M3DB Cluster with Docker This guide shows how to install and configure M3DB, create a single-node cluster, and read and write metrics to it. diff --git a/site/content/quickstart/kubernetes.md b/site/content/quickstart/kubernetes.md new file mode 100644 index 0000000000..5b0a935be9 --- /dev/null +++ b/site/content/quickstart/kubernetes.md @@ -0,0 +1,95 @@ +--- +title: "Kubernetes" +weight: 2 +--- + +## Creating an M3 Cluster with Kubernetes + +This guide shows you how to create an M3 cluster of 3 nodes, designed to run locally on the same machine. It is designed to show you how M3 and Kubernetes can work together, but not as a production example. + +{{% notice note %}} +This guide assumes you have read the [Docker quickstart](/docs/quickstart/docker), and builds upon the concepts in that guide. +{{% /notice %}} + +{{% notice tip %}} +We recommend you use [our Kubernetes operator](/docs/operator/operator) to deploy M3 to a cluster. It is a more streamlined setup that uses [custom resource definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) to automatically handle operations such as managing cluster placements. +{{% /notice %}} + + +## Prerequisites + +- A running Kubernetes cluster. + - For local testing, you can use [minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/), [Docker desktop](https://www.docker.com/products/docker-desktop), or [we have a script](https://raw.githubusercontent.com/m3db/m3db-operator/master/scripts/kind-create-cluster.sh) you can use to start a 3 node cluster with [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/). +- To use the M3DB operator chart, you need [Helm](https://helm.sh/) + +## Create An etcd Cluster + +M3 stores its cluster placements and runtime metadata in [etcd](https://etcd.io) and needs a running cluster to communicate with. + +We have example services and stateful sets you can use, but feel free to use your own configuration and change any later instructions accordingly. + +```shell +kubectl apply -f https://github.com/m3db/m3db-operator/blob/v0.9.0/example/etcd/etcd-minikube.yaml +``` + +If the etcd cluster is running on your local machine, update your _/etc/hosts_ file to match the domains specified in the `etcd` `--initial-cluster` argument. For example to match the `StatefulSet` declaration in the _etcd-minikube.yaml_ above, that is: + +```text +$(minikube ip) etcd-0.etcd +$(minikube ip) etcd-1.etcd +$(minikube ip) etcd-2.etcd +``` + +Verify that the cluster is running with something like the Kubernetes dashboard, or the command below: + +```shell +kubectl exec etcd-0 -- env ETCDCTL_API=3 etcdctl endpoint health +``` + +## Install the Operator with Helm + +Add the M3 operator repository: + +```shell +helm repo add m3db https://m3-helm-charts.storage.googleapis.com/stable +``` + +Install the M3 operator chart: + +```shell +helm install m3db-operator m3db/m3db-operator +``` + +## Create an M3 Cluster + +The following creates an M3 cluster with 3 replicas of data across 256 shards that connects to the 3 available etcd endpoints. + +It creates three isolated groups for nodes, each with one node instance. In a production environment you can use a variety of different options to define how nodes are spread across groups based on factors such as resource capacity, or location. + +It creates namespaces in the cluster with the `namespaces` parameter. You can use M3-provided presets, or define your own. This example creates a namespace with the `10s:2d` preset. + +The cluster derives pod identity from the `podIdentityConfig` parameter, which in this case is the UID of the Pod. + +[Read more details on all the parameters in the Operator API docs](/docs/operator/api/). + +```shell +kubectl apply -f https://github.com/m3db/m3db-operator/blob/v0.9.0/example/m3db-local.yaml +``` + +Verify that the cluster is running with something like the Kubernetes dashboard, or the command below: + +```shell +kubectl exec simple-cluster-rep2-0 -- curl -sSf localhost:9002/health +``` + +## Deleting a Cluster + +Delete the M3 cluster using kubectl: + +```shell +kubectl delete m3dbcluster simple-cluster +``` + +By default, the operator uses [finalizers](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#finalizers) to delete the placement and namespaces associated with a cluster before the custom resources. If you do not want this behavior, set `keepEtcdDataOnDelete` to `true` in the cluster configuration. + +{{% fileinclude file="quickstart/common-steps.md" %}} From 045927ff51b71e59fbf83eee0470b07b80abce5a Mon Sep 17 00:00:00 2001 From: ChrisChinchilla Date: Mon, 16 Nov 2020 13:49:01 +0100 Subject: [PATCH 07/15] Temporarily remove glossary Signed-off-by: ChrisChinchilla --- site/content/glossary/bootstrapping.md | 15 --------------- site/content/glossary/cardinality.md | 15 --------------- site/content/glossary/datapoint.md | 15 --------------- site/content/glossary/index.md | 12 ------------ site/content/glossary/labels.md | 16 ---------------- site/content/glossary/m3.md | 16 ---------------- site/content/glossary/m3coordinator.md | 15 --------------- site/content/glossary/m3db.md | 16 ---------------- site/content/glossary/m3query.md | 15 --------------- site/content/glossary/metric.md | 15 --------------- site/content/glossary/namespace.md | 13 ------------- site/content/glossary/placement.md | 14 -------------- site/content/glossary/shard.md | 13 ------------- site/content/glossary/tags.md | 15 --------------- site/content/glossary/timeseries.md | 15 --------------- site/content/glossary/topology.md | 16 ---------------- site/content/quickstart/docker.md | 6 +++--- site/static/about/index.html | 4 ++-- site/static/index.html | 4 ++-- 19 files changed, 7 insertions(+), 243 deletions(-) delete mode 100644 site/content/glossary/bootstrapping.md delete mode 100644 site/content/glossary/cardinality.md delete mode 100644 site/content/glossary/datapoint.md delete mode 100644 site/content/glossary/index.md delete mode 100644 site/content/glossary/labels.md delete mode 100644 site/content/glossary/m3.md delete mode 100644 site/content/glossary/m3coordinator.md delete mode 100644 site/content/glossary/m3db.md delete mode 100644 site/content/glossary/m3query.md delete mode 100644 site/content/glossary/metric.md delete mode 100644 site/content/glossary/namespace.md delete mode 100644 site/content/glossary/placement.md delete mode 100644 site/content/glossary/shard.md delete mode 100644 site/content/glossary/tags.md delete mode 100644 site/content/glossary/timeseries.md delete mode 100644 site/content/glossary/topology.md diff --git a/site/content/glossary/bootstrapping.md b/site/content/glossary/bootstrapping.md deleted file mode 100644 index ff28f8fb3c..0000000000 --- a/site/content/glossary/bootstrapping.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Bootstrapping -id: bootstrapping -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -Process by which an M3DB node is brought up. Bootstrapping consists of determining the integrity of data that the node has, replay writes from the commit log, and/or stream missing data from its peers. \ No newline at end of file diff --git a/site/content/glossary/cardinality.md b/site/content/glossary/cardinality.md deleted file mode 100644 index b19d87d362..0000000000 --- a/site/content/glossary/cardinality.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Cardinality -id: cardinality -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -The number of unique metrics within the M3DB index. Cardinality increases with the number of unique tag/value combinations that are being emitted. \ No newline at end of file diff --git a/site/content/glossary/datapoint.md b/site/content/glossary/datapoint.md deleted file mode 100644 index bd6db25015..0000000000 --- a/site/content/glossary/datapoint.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Datapoint -id: datapoint -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -A single timestamp/value. Timeseries are composed of multiple datapoints and a series of tag/value pairs \ No newline at end of file diff --git a/site/content/glossary/index.md b/site/content/glossary/index.md deleted file mode 100644 index 225afb5e2d..0000000000 --- a/site/content/glossary/index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Standardized Glossary -layout: glossary -noedit: true -default_active_tag: test -weight: 5 -card: - name: reference - weight: 10 - title: Glossary ---- - diff --git a/site/content/glossary/labels.md b/site/content/glossary/labels.md deleted file mode 100644 index 3ce06367d8..0000000000 --- a/site/content/glossary/labels.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Labels -id: labels -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -Pairs of descriptive words that give meaning to a metric. Tags and Labels are interchangeable terms. - diff --git a/site/content/glossary/m3.md b/site/content/glossary/m3.md deleted file mode 100644 index 56a8077411..0000000000 --- a/site/content/glossary/m3.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: M3 -id: m3 -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -Highly scalable, distributed metrics platform that is comprised of a native, distributed time series database, a highly-dynamic and performant aggregation service, a query engine, and other supporting infrastructure. - diff --git a/site/content/glossary/m3coordinator.md b/site/content/glossary/m3coordinator.md deleted file mode 100644 index b6f0e21394..0000000000 --- a/site/content/glossary/m3coordinator.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: M3Coordinator -id: m3coordinator -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -A service within M3 that coordinates reads and writes between upstream systems, such as Prometheus, and downstream systems, such as M3DB. \ No newline at end of file diff --git a/site/content/glossary/m3db.md b/site/content/glossary/m3db.md deleted file mode 100644 index 6b7d90df94..0000000000 --- a/site/content/glossary/m3db.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: M3DB -id: m3db -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -Distributed time series database influenced by Gorilla and Cassandra released as open source by Uber Technologies. - diff --git a/site/content/glossary/m3query.md b/site/content/glossary/m3query.md deleted file mode 100644 index 094fcdf1fb..0000000000 --- a/site/content/glossary/m3query.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: M3Query -id: m3query -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -A distributed query engine for M3DB. Unlike M3Coordinator, M3Query only provides supports for reads. \ No newline at end of file diff --git a/site/content/glossary/metric.md b/site/content/glossary/metric.md deleted file mode 100644 index da6eb1f0f9..0000000000 --- a/site/content/glossary/metric.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Metric -id: metric -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -A collection of uniquely identifiable tags. \ No newline at end of file diff --git a/site/content/glossary/namespace.md b/site/content/glossary/namespace.md deleted file mode 100644 index 83ad0941e4..0000000000 --- a/site/content/glossary/namespace.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Namespace -id: namespace -date: 2019-06-15 -full_link: -short_description: Configuration for a set of data -aka: -tags: -- table -- configuration ---- - -Similar to a table in other types of databases, namespaces in M3DB have a unique name and a set of configuration options, such as data retention and block size. \ No newline at end of file diff --git a/site/content/glossary/placement.md b/site/content/glossary/placement.md deleted file mode 100644 index da138d379d..0000000000 --- a/site/content/glossary/placement.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Placement -id: placement -date: 2019-06-15 -full_link: -short_description: Map of cluster shard replicas to nodes -aka: -- topology -tags: -- topology -- shards ---- - -Map of the M3DB cluster's shard replicas to nodes. Each M3DB cluster has only one placement. Placement and Topology are interchangeable terms. \ No newline at end of file diff --git a/site/content/glossary/shard.md b/site/content/glossary/shard.md deleted file mode 100644 index 0e7e016122..0000000000 --- a/site/content/glossary/shard.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Shard -id: shard -date: 2019-06-15 -full_link: -short_description: Distribution of time series data -aka: -tags: -- example -- illustration ---- - -Effectively the same as a "virtual shard" in Cassandra in that it provides an arbitrary distribution of time series data via a simple hash of the series ID. \ No newline at end of file diff --git a/site/content/glossary/tags.md b/site/content/glossary/tags.md deleted file mode 100644 index 7e69e8609a..0000000000 --- a/site/content/glossary/tags.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Tags -id: tags -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -Pairs of descriptive words that give meaning to a metric. Tags and Labels are interchangeable terms. \ No newline at end of file diff --git a/site/content/glossary/timeseries.md b/site/content/glossary/timeseries.md deleted file mode 100644 index 63a7b24a23..0000000000 --- a/site/content/glossary/timeseries.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Timeseries -id: timeseries -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -A series of data points tracking a particular metric over time. \ No newline at end of file diff --git a/site/content/glossary/topology.md b/site/content/glossary/topology.md deleted file mode 100644 index 0f3770d2f8..0000000000 --- a/site/content/glossary/topology.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Topology -id: topology -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -Map of the M3DB cluster's shard replicas to nodes. Each M3DB cluster has only one placement. Placement and Topology are interchangeable terms. - diff --git a/site/content/quickstart/docker.md b/site/content/quickstart/docker.md index f75f30fcf6..e0460e05d7 100644 --- a/site/content/quickstart/docker.md +++ b/site/content/quickstart/docker.md @@ -75,14 +75,14 @@ You can find more information on configuring M3DB in the [operational guides sec A time series database (TSDBs) typically consist of one node (or instance) to store metrics data. This setup is simple to use but has issues with scalability over time as the quantity of metrics data written and read increases. -As a distributed TSDB, M3DB helps solve this problem by spreading metrics data, and demand for that data, across multiple nodes in a cluster. M3DB does this by splitting data into segments that match certain criteria (such as above a certain value) across nodes into {{< glossary_tooltip text="shards" term_id="shard" >}}. +As a distributed TSDB, M3DB helps solve this problem by spreading metrics data, and demand for that data, across multiple nodes in a cluster. M3DB does this by splitting data into segments that match certain criteria (such as above a certain value) across nodes into shards. If you've worked with a distributed database before, then these concepts are probably familiar to you, but M3DB uses different terminology to represent some concepts. -- Every cluster has **one** {{< glossary_tooltip text="placement" term_id="placement" >}} that maps shards to nodes in the cluster. -- A cluster can have **0 or more** {{< glossary_tooltip text="namespaces" term_id="namespace" >}} that are similar conceptually to tables in other databases, and each node serves every namespace for the shards it owns. +- Every cluster has **one** placement that maps shards to nodes in the cluster. +- A cluster can have **0 or more** namespaces that are similar conceptually to tables in other databases, and each node serves every namespace for the shards it owns. diff --git a/site/static/about/index.html b/site/static/about/index.html index 30417da50f..ce6d8255f8 100644 --- a/site/static/about/index.html +++ b/site/static/about/index.html @@ -201,7 +201,7 @@

Getting Started @@ -472,7 +472,7 @@

target="_self" rel="noopener" target="_blank" - href="/docs/quickstart" + href="/docs/quickstart/docker" class="crunch-button crunch-button__full-background crunch-button__full-background--gradient-color crunch-button__full-background--arrow-right crunch-button__full-background--large text-white"> Getting Started diff --git a/site/static/index.html b/site/static/index.html index 995f69ea62..f2094d82c9 100644 --- a/site/static/index.html +++ b/site/static/index.html @@ -131,7 +131,7 @@

Open Source Metrics Engine

M3 is a Prometheus compatible, easy to adopt metrics engine that provides visibility for some of the world’s largest brands.
- Getting Started @@ -886,7 +886,7 @@

- Getting Started From 509e050517d37ad654587183dee36f8e1d9583be Mon Sep 17 00:00:00 2001 From: ChrisChinchilla Date: Mon, 16 Nov 2020 13:56:25 +0100 Subject: [PATCH 08/15] Update module Signed-off-by: ChrisChinchilla --- site/go.mod | 2 +- site/go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/site/go.mod b/site/go.mod index a2374339d9..9158b62a16 100644 --- a/site/go.mod +++ b/site/go.mod @@ -4,5 +4,5 @@ go 1.15 require ( github.com/chronosphereio/docs-theme v0.0.0-20201022162748-0ed11ce73f36 // indirect - github.com/chronosphereio/victor v0.0.0-20201116123616-0454e7256e57 // indirect + github.com/chronosphereio/victor v0.0.0-20201116125303-247fa0ea9ed5 // indirect ) diff --git a/site/go.sum b/site/go.sum index b7f156e972..b595e36b4c 100644 --- a/site/go.sum +++ b/site/go.sum @@ -7,3 +7,5 @@ github.com/chronosphereio/victor v0.0.0-20201116094105-f1b13fb86890 h1:yO288wpyv github.com/chronosphereio/victor v0.0.0-20201116094105-f1b13fb86890/go.mod h1:wz1ngMsk+1D1ug2ObnI3zXs+/ZdBPrWLb6R1WQW3XNM= github.com/chronosphereio/victor v0.0.0-20201116123616-0454e7256e57 h1:EXZaeDfAkZsOYoP3zCyZlhb+PXZO/PQSmilpTX8bX+0= github.com/chronosphereio/victor v0.0.0-20201116123616-0454e7256e57/go.mod h1:wz1ngMsk+1D1ug2ObnI3zXs+/ZdBPrWLb6R1WQW3XNM= +github.com/chronosphereio/victor v0.0.0-20201116125303-247fa0ea9ed5 h1:/eksfMA9uddkIKZ5A6zcpVHjASfV6sVuNDXHSAgMtx0= +github.com/chronosphereio/victor v0.0.0-20201116125303-247fa0ea9ed5/go.mod h1:wz1ngMsk+1D1ug2ObnI3zXs+/ZdBPrWLb6R1WQW3XNM= From 26b02f66c2340b4ff18579f052331dc0e2a63856 Mon Sep 17 00:00:00 2001 From: ChrisChinchilla Date: Mon, 16 Nov 2020 15:13:49 +0100 Subject: [PATCH 09/15] Remove K8s guide Signed-off-by: ChrisChinchilla --- site/content/quickstart/kubernetes.md | 95 --------------------------- 1 file changed, 95 deletions(-) delete mode 100644 site/content/quickstart/kubernetes.md diff --git a/site/content/quickstart/kubernetes.md b/site/content/quickstart/kubernetes.md deleted file mode 100644 index 5b0a935be9..0000000000 --- a/site/content/quickstart/kubernetes.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -title: "Kubernetes" -weight: 2 ---- - -## Creating an M3 Cluster with Kubernetes - -This guide shows you how to create an M3 cluster of 3 nodes, designed to run locally on the same machine. It is designed to show you how M3 and Kubernetes can work together, but not as a production example. - -{{% notice note %}} -This guide assumes you have read the [Docker quickstart](/docs/quickstart/docker), and builds upon the concepts in that guide. -{{% /notice %}} - -{{% notice tip %}} -We recommend you use [our Kubernetes operator](/docs/operator/operator) to deploy M3 to a cluster. It is a more streamlined setup that uses [custom resource definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) to automatically handle operations such as managing cluster placements. -{{% /notice %}} - - -## Prerequisites - -- A running Kubernetes cluster. - - For local testing, you can use [minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/), [Docker desktop](https://www.docker.com/products/docker-desktop), or [we have a script](https://raw.githubusercontent.com/m3db/m3db-operator/master/scripts/kind-create-cluster.sh) you can use to start a 3 node cluster with [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/). -- To use the M3DB operator chart, you need [Helm](https://helm.sh/) - -## Create An etcd Cluster - -M3 stores its cluster placements and runtime metadata in [etcd](https://etcd.io) and needs a running cluster to communicate with. - -We have example services and stateful sets you can use, but feel free to use your own configuration and change any later instructions accordingly. - -```shell -kubectl apply -f https://github.com/m3db/m3db-operator/blob/v0.9.0/example/etcd/etcd-minikube.yaml -``` - -If the etcd cluster is running on your local machine, update your _/etc/hosts_ file to match the domains specified in the `etcd` `--initial-cluster` argument. For example to match the `StatefulSet` declaration in the _etcd-minikube.yaml_ above, that is: - -```text -$(minikube ip) etcd-0.etcd -$(minikube ip) etcd-1.etcd -$(minikube ip) etcd-2.etcd -``` - -Verify that the cluster is running with something like the Kubernetes dashboard, or the command below: - -```shell -kubectl exec etcd-0 -- env ETCDCTL_API=3 etcdctl endpoint health -``` - -## Install the Operator with Helm - -Add the M3 operator repository: - -```shell -helm repo add m3db https://m3-helm-charts.storage.googleapis.com/stable -``` - -Install the M3 operator chart: - -```shell -helm install m3db-operator m3db/m3db-operator -``` - -## Create an M3 Cluster - -The following creates an M3 cluster with 3 replicas of data across 256 shards that connects to the 3 available etcd endpoints. - -It creates three isolated groups for nodes, each with one node instance. In a production environment you can use a variety of different options to define how nodes are spread across groups based on factors such as resource capacity, or location. - -It creates namespaces in the cluster with the `namespaces` parameter. You can use M3-provided presets, or define your own. This example creates a namespace with the `10s:2d` preset. - -The cluster derives pod identity from the `podIdentityConfig` parameter, which in this case is the UID of the Pod. - -[Read more details on all the parameters in the Operator API docs](/docs/operator/api/). - -```shell -kubectl apply -f https://github.com/m3db/m3db-operator/blob/v0.9.0/example/m3db-local.yaml -``` - -Verify that the cluster is running with something like the Kubernetes dashboard, or the command below: - -```shell -kubectl exec simple-cluster-rep2-0 -- curl -sSf localhost:9002/health -``` - -## Deleting a Cluster - -Delete the M3 cluster using kubectl: - -```shell -kubectl delete m3dbcluster simple-cluster -``` - -By default, the operator uses [finalizers](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#finalizers) to delete the placement and namespaces associated with a cluster before the custom resources. If you do not want this behavior, set `keepEtcdDataOnDelete` to `true` in the cluster configuration. - -{{% fileinclude file="quickstart/common-steps.md" %}} From 1ae27655f09aeb4f68006e1f20ba933c606ed3e8 Mon Sep 17 00:00:00 2001 From: ChrisChinchilla Date: Mon, 16 Nov 2020 15:20:48 +0100 Subject: [PATCH 10/15] Remove edit url for now Signed-off-by: ChrisChinchilla --- site/config.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/site/config.toml b/site/config.toml index 8f41512277..88999c2e34 100644 --- a/site/config.toml +++ b/site/config.toml @@ -104,7 +104,8 @@ offlineSearch = false # Useful to give opportunity to people to create merge request for your doc. # See the config.toml file from this documentation site to have an example. # TODO: pattern to branch? - editURL = "https://github.com/m3db/m3/tree/master/site/content/" + # TODO: bring back + # editURL = "https://github.com/m3db/m3/tree/master/site/content/" # Author of the site, will be used in meta information author = "m3" # Description of the site, will be used in meta information From 5a9ee333bd46b2eeff153061854054b378c0b237 Mon Sep 17 00:00:00 2001 From: ChrisChinchilla Date: Mon, 16 Nov 2020 15:31:53 +0100 Subject: [PATCH 11/15] Fix broken operator links Signed-off-by: ChrisChinchilla --- site/content/_index.md | 2 +- site/content/docs.md | 43 +++++++++++++++++++ site/content/how_to/use_as_tsdb.md | 2 +- site/content/index.md | 42 ++++++++++++++++++ .../configuration/configuring_m3db.md | 2 +- .../operator/configuration/namespaces.md | 4 +- .../operator/configuration/node_affinity.md | 2 +- .../operator/configuration/pod_identity.md | 2 +- .../getting_started/create_cluster.md | 8 ++-- .../operator/getting_started/installation.md | 2 +- 10 files changed, 97 insertions(+), 12 deletions(-) create mode 100644 site/content/docs.md create mode 100644 site/content/index.md diff --git a/site/content/_index.md b/site/content/_index.md index b87505f001..371301c2c6 100644 --- a/site/content/_index.md +++ b/site/content/_index.md @@ -29,7 +29,7 @@ Getting started with M3 is as easy as following one of the How-To guides. - [Single M3DB node deployment](/docs/quickstart) - [Clustered M3DB deployment](/docs/how_to/cluster_hard_way) -- [M3DB on Kubernetes](/docs/how_to/kubernetes) +- [M3DB on Kubernetes](/docs/operator - [Isolated M3Query on deployment](/docs/how_to/query) ## Support diff --git a/site/content/docs.md b/site/content/docs.md new file mode 100644 index 0000000000..fb49ffef69 --- /dev/null +++ b/site/content/docs.md @@ -0,0 +1,43 @@ +--- +title: M3 Introduction +weight: 1 +permalink: /docs/ +--- + + +## About + +After using open-source metrics solutions and finding issues with them at scale – such as reliability, cost, and +operational complexity – [M3](https://github.com/m3db/m3) was created from the ground up to provide Uber with a +native, distributed time series database, a highly-dynamic and performant aggregation service, a query engine, and +other supporting infrastructure. + +## Key Features + +M3 has several features, provided as discrete components, which make it an ideal platform for time series data at scale: + +- A distributed time series database, [M3DB](/docs/m3db/), that provides scalable storage for time series data and a reverse index. +- A sidecar process, [M3Coordinator](/docs/integrations/prometheus), that allows M3DB to act as the long-term storage for Prometheus. +- A distributed query engine, [M3Query](/docs/m3query), with native support for PromQL and Graphite (M3QL coming soon). + +- An aggregation tier, M3Aggregator, that runs as a dedicated metrics aggregator/downsampler allowing metrics to be stored at various retentions at different resolutions. + +## Getting Started + +**Note:** Make sure to read our [Operational Guides](/docs/operational_guide) before running in production! + +Getting started with M3 is as easy as following one of the How-To guides. + +- [Single M3DB node deployment](/docs/quickstart) +- [Clustered M3DB deployment](/docs/how_to/cluster_hard_way) +- [M3DB on Kubernetes](/docs/operator +- [Isolated M3Query on deployment](/docs/how_to/query) + +## Support + +For support with any issues, questions about M3 or its operation, or to leave any comments, the team can be +reached in a variety of ways: + +- [Slack (main chat channel)](http://bit.ly/m3slack) +- [Email](https://groups.google.com/forum/#!forum/m3db) +- [Github issues](https://github.com/m3db/m3/issues) diff --git a/site/content/how_to/use_as_tsdb.md b/site/content/how_to/use_as_tsdb.md index ba67d35fef..1298542b76 100644 --- a/site/content/how_to/use_as_tsdb.md +++ b/site/content/how_to/use_as_tsdb.md @@ -114,7 +114,7 @@ For more details on the compression scheme and its limitations, review [the docu #### M3DB setup -For more advanced setups, it's best to follow the guides on how to configure an M3DB cluster [manually](/docs/how_to/cluster_hard_way) or [using Kubernetes](/docs/how_to/kubernetes). However, this tutorial will walk you through configuring a single node setup locally for development. +For more advanced setups, it's best to follow the guides on how to configure an M3DB cluster [manually](/docs/how_to/cluster_hard_way) or [using Kubernetes](/docs/operator. However, this tutorial will walk you through configuring a single node setup locally for development. First, run the following command to pull the latest M3DB image: diff --git a/site/content/index.md b/site/content/index.md new file mode 100644 index 0000000000..c49e850e22 --- /dev/null +++ b/site/content/index.md @@ -0,0 +1,42 @@ +--- +title: M3 Introduction +weight: 1 +--- + + +## About + +After using open-source metrics solutions and finding issues with them at scale – such as reliability, cost, and +operational complexity – [M3](https://github.com/m3db/m3) was created from the ground up to provide Uber with a +native, distributed time series database, a highly-dynamic and performant aggregation service, a query engine, and +other supporting infrastructure. + +## Key Features + +M3 has several features, provided as discrete components, which make it an ideal platform for time series data at scale: + +- A distributed time series database, [M3DB](/docs/m3db/), that provides scalable storage for time series data and a reverse index. +- A sidecar process, [M3Coordinator](/docs/integrations/prometheus), that allows M3DB to act as the long-term storage for Prometheus. +- A distributed query engine, [M3Query](/docs/m3query), with native support for PromQL and Graphite (M3QL coming soon). + +- An aggregation tier, M3Aggregator, that runs as a dedicated metrics aggregator/downsampler allowing metrics to be stored at various retentions at different resolutions. + +## Getting Started + +**Note:** Make sure to read our [Operational Guides](/docs/operational_guide) before running in production! + +Getting started with M3 is as easy as following one of the How-To guides. + +- [Single M3DB node deployment](/docs/quickstart) +- [Clustered M3DB deployment](/docs/how_to/cluster_hard_way) +- [M3DB on Kubernetes](/docs/operator +- [Isolated M3Query on deployment](/docs/how_to/query) + +## Support + +For support with any issues, questions about M3 or its operation, or to leave any comments, the team can be +reached in a variety of ways: + +- [Slack (main chat channel)](http://bit.ly/m3slack) +- [Email](https://groups.google.com/forum/#!forum/m3db) +- [Github issues](https://github.com/m3db/m3/issues) diff --git a/site/content/operator/configuration/configuring_m3db.md b/site/content/operator/configuration/configuring_m3db.md index 02af53b40c..3392eadcee 100644 --- a/site/content/operator/configuration/configuring_m3db.md +++ b/site/content/operator/configuration/configuring_m3db.md @@ -30,5 +30,5 @@ metadata: The value of `env` in your config **MUST** be `production/cluster-a`. This restriction allows multiple M3DB clusters to safely share the same etcd cluster. -[spec]: ../api +[spec]: /docs/operator/api [config]: https://github.com/m3db/m3db-operator/blob/795973f3329437ced3ac942da440810cd0865235/assets/default-config.yaml#L77 diff --git a/site/content/operator/configuration/namespaces.md b/site/content/operator/configuration/namespaces.md index 3a75aa146c..feca9d58f2 100644 --- a/site/content/operator/configuration/namespaces.md +++ b/site/content/operator/configuration/namespaces.md @@ -67,6 +67,6 @@ spec: ``` -[api-namespaces]: ../api#namespace -[api-ns-options]: ../api#namespaceoptions +[api-namespaces]: /docs/operator/api#namespace +[api-ns-options]: /docs/operator/api#namespaceoptions [m3db-namespaces]: https://docs.m3db.io/operational_guide/namespace_configuration/ diff --git a/site/content/operator/configuration/node_affinity.md b/site/content/operator/configuration/node_affinity.md index e51622d60e..460fb5dc4d 100644 --- a/site/content/operator/configuration/node_affinity.md +++ b/site/content/operator/configuration/node_affinity.md @@ -194,4 +194,4 @@ spec: [m3db-deployment]: https://docs.m3db.io/operational_guide/replication_and_deployment_in_zones/ [m3db-isogroups]: https://docs.m3db.io/operational_guide/placement_configuration/#isolation-group [m3db-placement]: https://docs.m3db.io/operational_guide/placement/ -[node-affinity-term]: ../api/#nodeaffinityterm +[node-affinity-term]: /docs/operator/api/#nodeaffinityterm diff --git a/site/content/operator/configuration/pod_identity.md b/site/content/operator/configuration/pod_identity.md index c78585f59d..b35a8722ff 100644 --- a/site/content/operator/configuration/pod_identity.md +++ b/site/content/operator/configuration/pod_identity.md @@ -65,5 +65,5 @@ a new host. Note that if using local SSDs on GKE, node names may stay the same even though a VM has been recreated. We also support `ProviderID`, which will use the underlying VM's unique ID number in GCE to identity host uniqueness. -[pod-id-api]: ../api/#podidentityconfig +[pod-id-api]: /docs/operator/api/#podidentityconfig [topology-docs]: https://docs.m3db.io/operational_guide/placement/ diff --git a/site/content/operator/getting_started/create_cluster.md b/site/content/operator/getting_started/create_cluster.md index b2f2b346e3..7973a41d95 100644 --- a/site/content/operator/getting_started/create_cluster.md +++ b/site/content/operator/getting_started/create_cluster.md @@ -5,7 +5,7 @@ weight: 12 chapter: true --- -Once you've [installed](installation) the M3DB operator and read over the [requirements](requirements), you can start +Once you've [installed](/docs/operator/getting_started/installation) the M3DB operator and read over the [requirements](/docs/operator/getting_started/requirements), you can start creating some M3DB clusters! ## Basic Cluster @@ -148,7 +148,7 @@ spec: ``` This will create a highly available cluster with RF=3 spread evenly across the three given zones within a region. A -pod's UID will be used for its [identity][pod-identity]. The cluster will have 1 [namespace](namespace) that stores +pod's UID will be used for its [identity][pod-identity]. The cluster will have 1 [namespace](/docs/operator/configuration/namespaces) that stores metrics for 2 days at 10s resolution. Next, apply your manifest: @@ -173,6 +173,6 @@ $ kubectl exec simple-cluster-rep2-0 -- curl -sSf localhost:9002/health {"ok":true,"status":"up","bootstrapped":true} ``` -[pod-identity]: ../configuration/pod_identity +[pod-identity]: /docs/operator/configuration/pod_identity [local-volumes]: https://kubernetes.io/blog/2018/04/13/local-persistent-volumes-beta/ -[node-affinity]: ../configuration/node_affinity +[node-affinity]: /docs/operator/configuration/node_affinity diff --git a/site/content/operator/getting_started/installation.md b/site/content/operator/getting_started/installation.md index 97ea3f7c14..120353aa33 100644 --- a/site/content/operator/getting_started/installation.md +++ b/site/content/operator/getting_started/installation.md @@ -5,7 +5,7 @@ weight: 11 chapter: true --- -Be sure to take a look at the [requirements](requirements) before installing the operator. +Be sure to take a look at the [requirements](/docs/operator/getting_started/requirements) before installing the operator. ## Helm From b9515c124110fe0ff66c0c3954f035eb32a255b4 Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Mon, 16 Nov 2020 09:27:16 -0500 Subject: [PATCH 12/15] Fix broken Kubernetes URLs --- site/content/operator/api.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/site/content/operator/api.md b/site/content/operator/api.md index be1670f077..6c699f08df 100644 --- a/site/content/operator/api.md +++ b/site/content/operator/api.md @@ -58,11 +58,11 @@ ClusterSpec defines the desired state for a M3 cluster to be converge to. | enableCarbonIngester | EnableCarbonIngester enables the listener port for the carbon ingester | bool | false | | configMapName | ConfigMapName specifies the ConfigMap to use for this cluster. If unset a default configmap with template variables for etcd endpoints will be used. See \"Configuring M3DB\" in the docs for more. | *string | false | | podIdentityConfig | PodIdentityConfig sets the configuration for pod identity. If unset only pod name and UID will be used. | *PodIdentityConfig | false | -| containerResources | Resources defines memory / cpu constraints for each container in the cluster. | [corev1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#resourcerequirements-v1-core) | false | -| dataDirVolumeClaimTemplate | DataDirVolumeClaimTemplate is the volume claim template for an M3DB instance's data. It claims PersistentVolumes for cluster storage, volumes are dynamically provisioned by when the StorageClass is defined. | *[corev1.PersistentVolumeClaim](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#persistentvolumeclaim-v1-core) | false | +| containerResources | Resources defines memory / cpu constraints for each container in the cluster. | [corev1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.15/#resourcerequirements-v1-core) | false | +| dataDirVolumeClaimTemplate | DataDirVolumeClaimTemplate is the volume claim template for an M3DB instance's data. It claims PersistentVolumes for cluster storage, volumes are dynamically provisioned by when the StorageClass is defined. | *[corev1.PersistentVolumeClaim](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.15/#persistentvolumeclaim-v1-core) | false | | podSecurityContext | PodSecurityContext allows the user to specify an optional security context for pods. | *corev1.PodSecurityContext | false | | securityContext | SecurityContext allows the user to specify a container-level security context. | *corev1.SecurityContext | false | -| imagePullSecrets | ImagePullSecrets will be added to every pod. | [][corev1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#localobjectreference-v1-core) | false | +| imagePullSecrets | ImagePullSecrets will be added to every pod. | [][corev1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.15/#localobjectreference-v1-core) | false | | envVars | EnvVars defines custom environment variables to be passed to M3DB containers. | []corev1.EnvVar | false | | labels | Labels sets the base labels that will be applied to resources created by the cluster. // TODO(schallert): design doc on labeling scheme. | map[string]string | false | | annotations | Annotations sets the base annotations that will be applied to resources created by the cluster. | map[string]string | false | @@ -74,7 +74,7 @@ ClusterSpec defines the desired state for a M3 cluster to be converge to. | externalCoordinator | Specify a \"controlling\" coordinator for the cluster. | *[ExternalCoordinatorConfig](#externalcoordinatorconfig) | false | | initContainers | Custom setup for db nodes can be done via initContainers Provide the complete spec for the initContainer here If any storage volumes are needed in the initContainer see InitVolumes below | []corev1.Container | false | | initVolumes | If the InitContainers require any storage volumes Provide the complete specification for the required Volumes here | []corev1.Volume | false | -| podMetadata | PodMetadata is for any Metadata that is unique to the pods, and does not belong on any other objects, such as Prometheus scrape tags | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#objectmeta-v1-meta) | false | +| podMetadata | PodMetadata is for any Metadata that is unique to the pods, and does not belong on any other objects, such as Prometheus scrape tags | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.15/#objectmeta-v1-meta) | false | | parallelPodManagement | ParallelPodManagement sets StatefulSets created by the operator to have Parallel pod management instead of OrderedReady. If nil, this will default to true. | *bool | true | | serviceAccountName | To use a non-default service account, specify the name here otherwise the service account \"default\" will be used. This is useful for advanced use-cases such as pod security policies. The service account must exist. This operator will not create it. | string | false | | frozen | Frozen is used to stop the operator from taking any further actions on a cluster. This is useful when troubleshooting as it guarantees the operator won't make any changes to the cluster. | bool | false | @@ -111,7 +111,7 @@ M3DBCluster defines the cluster | Field | Description | Scheme | Required | | ----- | ----------- | ------ | -------- | -| metadata | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#objectmeta-v1-meta) | false | +| metadata | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.15/#objectmeta-v1-meta) | false | | type | | string | true | | spec | | [ClusterSpec](#clusterspec) | true | | status | | [M3DBStatus](#m3dbstatus) | false | @@ -124,7 +124,7 @@ M3DBClusterList represents a list of M3DB Clusters | Field | Description | Scheme | Required | | ----- | ----------- | ------ | -------- | -| metadata | | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#listmeta-v1-meta) | false | +| metadata | | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.15/#listmeta-v1-meta) | false | | items | | [][M3DBCluster](#m3dbcluster) | true | [Back to TOC](#table-of-contents) From beb4925de5c5153bb277b508a1792b993cb58b2e Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Mon, 16 Nov 2020 10:48:49 -0500 Subject: [PATCH 13/15] Fix malformed links --- site/content/_index.md | 2 +- site/content/docs.md | 2 +- site/content/how_to/use_as_tsdb.md | 2 +- site/content/index.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/site/content/_index.md b/site/content/_index.md index 371301c2c6..7272ba3512 100644 --- a/site/content/_index.md +++ b/site/content/_index.md @@ -29,7 +29,7 @@ Getting started with M3 is as easy as following one of the How-To guides. - [Single M3DB node deployment](/docs/quickstart) - [Clustered M3DB deployment](/docs/how_to/cluster_hard_way) -- [M3DB on Kubernetes](/docs/operator +- [M3DB on Kubernetes](/docs/operator) - [Isolated M3Query on deployment](/docs/how_to/query) ## Support diff --git a/site/content/docs.md b/site/content/docs.md index fb49ffef69..4446d83bf7 100644 --- a/site/content/docs.md +++ b/site/content/docs.md @@ -30,7 +30,7 @@ Getting started with M3 is as easy as following one of the How-To guides. - [Single M3DB node deployment](/docs/quickstart) - [Clustered M3DB deployment](/docs/how_to/cluster_hard_way) -- [M3DB on Kubernetes](/docs/operator +- [M3DB on Kubernetes](/docs/operator) - [Isolated M3Query on deployment](/docs/how_to/query) ## Support diff --git a/site/content/how_to/use_as_tsdb.md b/site/content/how_to/use_as_tsdb.md index 1298542b76..53a5da02be 100644 --- a/site/content/how_to/use_as_tsdb.md +++ b/site/content/how_to/use_as_tsdb.md @@ -114,7 +114,7 @@ For more details on the compression scheme and its limitations, review [the docu #### M3DB setup -For more advanced setups, it's best to follow the guides on how to configure an M3DB cluster [manually](/docs/how_to/cluster_hard_way) or [using Kubernetes](/docs/operator. However, this tutorial will walk you through configuring a single node setup locally for development. +For more advanced setups, it's best to follow the guides on how to configure an M3DB cluster [manually](/docs/how_to/cluster_hard_way) or [using Kubernetes](/docs/operator). However, this tutorial will walk you through configuring a single node setup locally for development. First, run the following command to pull the latest M3DB image: diff --git a/site/content/index.md b/site/content/index.md index c49e850e22..79fd428af7 100644 --- a/site/content/index.md +++ b/site/content/index.md @@ -29,7 +29,7 @@ Getting started with M3 is as easy as following one of the How-To guides. - [Single M3DB node deployment](/docs/quickstart) - [Clustered M3DB deployment](/docs/how_to/cluster_hard_way) -- [M3DB on Kubernetes](/docs/operator +- [M3DB on Kubernetes](/docs/operator) - [Isolated M3Query on deployment](/docs/how_to/query) ## Support From 060b1d76e94bf5564d44ef6c566eb71bcd9d1d26 Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Mon, 16 Nov 2020 10:50:06 -0500 Subject: [PATCH 14/15] Fix quickstart merge --- site/content/quickstart/_index.md | 529 ------------------------------ site/content/quickstart/docker.md | 25 ++ 2 files changed, 25 insertions(+), 529 deletions(-) diff --git a/site/content/quickstart/_index.md b/site/content/quickstart/_index.md index 98e71894eb..1bf4f0ac01 100644 --- a/site/content/quickstart/_index.md +++ b/site/content/quickstart/_index.md @@ -2,532 +2,3 @@ title: "Quickstart" weight: 1 --- - - - -# Creating a Single Node M3DB Cluster with Docker - -This guide shows how to install and configure M3DB, create a single-node cluster, and read and write metrics to it. - -{{% notice warning %}} -Deploying a single-node M3DB cluster is a great way to experiment with M3DB and get an idea of what it has to offer, but is not designed for production use. To run M3DB in clustered mode with a separate M3Coordinator, [read the clustered mode guide](/docs/how_to/cluster_hard_way). -{{% /notice %}} - -## Prerequisites - -- **Docker**: You don't need [Docker](https://www.docker.com/get-started) to run M3DB, but it is the simplest and quickest way. - - If you use Docker Desktop, we recommend the following minimum _Resources_ settings. - - _CPUs_: 2 - - _Memory_: 8GB - - _Swap_: 1GB - - _Disk image size_: 16GB -- **JQ**: This example uses [jq](https://stedolan.github.io/jq/) to format the output of API calls. It is not essential for using M3DB. -- **curl**: This example uses curl for communicating with M3DB endpoints. You can also use alternatives such as [Wget](https://www.gnu.org/software/wget/) and [HTTPie](https://httpie.org/). - -## Start Docker Container - -By default the official M3DB Docker image configures a single M3DB instance as one binary containing: - -- An M3DB storage instance for time series storage. It includes an embedded tag-based metrics index and an etcd server for storing the cluster topology and runtime configuration. -- A coordinator instance for writing and querying tagged metrics, as well as managing cluster topology and runtime configuration. - -The Docker container exposes three ports: - -- `7201` to manage the cluster topology, you make most API calls to this endpoint -- `7203` for Prometheus to scrape the metrics produced by M3DB and M3Coordinator - -The command below creates a persistent data directory on the host operating system to maintain durability and persistence between container restarts. - -{{< tabs name="start_container" >}} -{{% tab name="Command" %}} - -```shell -docker run -p 7201:7201 -p 7203:7203 --name m3db -v $(pwd)/m3db_data:/var/lib/m3db quay.io/m3db/m3dbnode:latest -``` - -{{% /tab %}} -{{% tab name="Output" %}} - - - -![Docker pull and run](/docker-install.gif) - -{{% /tab %}} -{{< /tabs >}} - -{{% notice info %}} -When running the command above on Docker for Mac, Docker for Windows, and some Linux distributions you may see errors about settings not being at recommended values. Unless you intend to run M3DB in production on macOS or Windows, you can ignore these warnings. -{{% /notice %}} - -## Configuration - -The single-node cluster Docker image uses this [sample configuration file](https://github.com/m3db/m3/blob/master/src/dbnode/config/m3dbnode-local-etcd.yml) by default. - -The file groups configuration into `coordinator` or `db` sections that represent the `M3Coordinator` and `M3DB` instances of single-node cluster. - - - -{{% notice tip %}} -You can find more information on configuring M3DB in the [operational guides section](/docs/operational_guide/). -{{% /notice %}} - -## Organizing Data with Placements and Namespaces - -A time series database (TSDBs) typically consist of one node (or instance) to store metrics data. This setup is simple to use but has issues with scalability over time as the quantity of metrics data written and read increases. - -As a distributed TSDB, M3DB helps solve this problem by spreading metrics data, and demand for that data, across multiple nodes in a cluster. M3DB does this by splitting data into segments that match certain criteria (such as above a certain value) across nodes into {{< glossary_tooltip text="shards" term_id="shard" >}}. - - - -If you've worked with a distributed database before, then these concepts are probably familiar to you, but M3DB uses different terminology to represent some concepts. - -- Every cluster has **one** {{< glossary_tooltip text="placement" term_id="placement" >}} that maps shards to nodes in the cluster. -- A cluster can have **0 or more** {{< glossary_tooltip text="namespaces" term_id="namespace" >}} that are similar conceptually to tables in other databases, and each node serves every namespace for the shards it owns. - - - -For example, if the cluster placement states that node A owns shards 1, 2, and 3, then node A owns shards 1, 2, 3 for all configured namespaces in the cluster. Each namespace has its own configuration options, including a name and retention time for the data. - -## Create a Placement and Namespace - -This quickstart uses the _{{% apiendpoint %}}database/create_ endpoint that creates a namespace, and the placement if it doesn't already exist based on the `type` argument. - -You can create [placements](/docs/operational_guide/placement_configuration/) and [namespaces](/docs/operational_guide/namespace_configuration/#advanced-hard-way) separately if you need more control over their settings. - - -In another terminal, use the following command. - -{{< tabs name="create_placement_namespace" >}} -{{% tab name="Command" %}} - -{{% codeinclude file="quickstart/create-database.sh" language="shell" %}} - -{{% notice tip %}} -The Docker command used above creates a Docker [persistent volume](https://docs.docker.com/storage/volumes/) to keep the data M3 creates on your host file system between container restarts. If you have already followed this tutorial, the namespace already exists. You can clear the data by deleting the contents of the _m3db_data_ folder, or deleting the namespace with [the DELETE endpoint](/docs/operational_guide/namespace_configuration/#deleting-a-namespace). -{{% /notice %}} - -{{% /tab %}} -{{% tab name="Output" %}} - -```json -{ - "namespace": { - "registry": { - "namespaces": { - "default": { - "bootstrapEnabled": true, - "flushEnabled": true, - "writesToCommitLog": true, - "cleanupEnabled": true, - "repairEnabled": false, - "retentionOptions": { - "retentionPeriodNanos": "43200000000000", - "blockSizeNanos": "1800000000000", - "bufferFutureNanos": "120000000000", - "bufferPastNanos": "600000000000", - "blockDataExpiry": true, - "blockDataExpiryAfterNotAccessPeriodNanos": "300000000000", - "futureRetentionPeriodNanos": "0" - }, - "snapshotEnabled": true, - "indexOptions": { - "enabled": true, - "blockSizeNanos": "1800000000000" - }, - "schemaOptions": null, - "coldWritesEnabled": false, - "runtimeOptions": null - } - } - } - }, - "placement": { - "placement": { - "instances": { - "m3db_local": { - "id": "m3db_local", - "isolationGroup": "local", - "zone": "embedded", - "weight": 1, - "endpoint": "127.0.0.1:9000", - "shards": [ - { - "id": 0, - "state": "INITIALIZING", - "sourceId": "", - "cutoverNanos": "0", - "cutoffNanos": "0" - }, - … - { - "id": 63, - "state": "INITIALIZING", - "sourceId": "", - "cutoverNanos": "0", - "cutoffNanos": "0" - } - ], - "shardSetId": 0, - "hostname": "localhost", - "port": 9000, - "metadata": { - "debugPort": 0 - } - } - }, - "replicaFactor": 1, - "numShards": 64, - "isSharded": true, - "cutoverTime": "0", - "isMirrored": false, - "maxShardSetId": 0 - }, - "version": 0 - } -} -``` - -{{< /tab >}} -{{< /tabs >}} - -Placement initialization can take a minute or two. Once all the shards have the `AVAILABLE` state, the node has finished bootstrapping, and you should see the following messages in the node console output. - - - -```shell -{"level":"info","ts":1598367624.0117292,"msg":"bootstrap marking all shards as bootstrapped","namespace":"default","namespace":"default","numShards":64} -{"level":"info","ts":1598367624.0301404,"msg":"bootstrap index with bootstrapped index segments","namespace":"default","numIndexBlocks":0} -{"level":"info","ts":1598367624.0301914,"msg":"bootstrap success","numShards":64,"bootstrapDuration":0.049208827} -{"level":"info","ts":1598367624.03023,"msg":"bootstrapped"} -``` - -You can check on the status by calling the _{{% apiendpoint %}}placement_ endpoint: - -{{< tabs name="check_placement" >}} -{{% tab name="Command" %}} - -```shell -curl {{% apiendpoint %}}placement | jq . -``` - -{{% /tab %}} -{{% tab name="Output" %}} - -```json -{ - "placement": { - "instances": { - "m3db_local": { - "id": "m3db_local", - "isolationGroup": "local", - "zone": "embedded", - "weight": 1, - "endpoint": "127.0.0.1:9000", - "shards": [ - { - "id": 0, - "state": "AVAILABLE", - "sourceId": "", - "cutoverNanos": "0", - "cutoffNanos": "0" - }, - … - { - "id": 63, - "state": "AVAILABLE", - "sourceId": "", - "cutoverNanos": "0", - "cutoffNanos": "0" - } - ], - "shardSetId": 0, - "hostname": "localhost", - "port": 9000, - "metadata": { - "debugPort": 0 - } - } - }, - "replicaFactor": 1, - "numShards": 64, - "isSharded": true, - "cutoverTime": "0", - "isMirrored": false, - "maxShardSetId": 0 - }, - "version": 2 -} -``` - -{{% /tab %}} -{{< /tabs >}} - -{{% notice tip %}} -[Read more about the bootstrapping process](/docs/operational_guide/bootstrapping_crash_recovery/). -{{% /notice %}} - -### Readying a Namespace - -Once a namespace has finished bootstrapping, it must be marked as ready before receiving traffic. This can be done by calling the _{{% apiendpoint %}}namespace/ready_. - -{{< tabs name="ready_namespaces" >}} -{{% tab name="Command" %}} - -```shell -curl -X POST http://localhost:7201/api/v1/services/m3db/namespace/ready -d '{ - "name": "default" -}' | jq . -``` - -{{% /tab %}} -{{% tab name="Output" %}} - -```json -{ - "ready": true -} -``` - -{{% /tab %}} -{{< /tabs >}} - -### View Details of a Namespace - -You can also view the attributes of all namespaces by calling the _{{% apiendpoint %}}namespace_ endpoint - -{{< tabs name="check_namespaces" >}} -{{% tab name="Command" %}} - -```shell -curl {{% apiendpoint %}}namespace | jq . -``` - -{{% notice tip %}} -Add `?debug=1` to the request to convert nano units in the output into standard units. -{{% /notice %}} - -{{% /tab %}} -{{% tab name="Output" %}} - -```json -{ - "registry": { - "namespaces": { - "default": { - "bootstrapEnabled": true, - "flushEnabled": true, - "writesToCommitLog": true, - "cleanupEnabled": true, - "repairEnabled": false, - "retentionOptions": { - "retentionPeriodNanos": "43200000000000", - "blockSizeNanos": "1800000000000", - "bufferFutureNanos": "120000000000", - "bufferPastNanos": "600000000000", - "blockDataExpiry": true, - "blockDataExpiryAfterNotAccessPeriodNanos": "300000000000", - "futureRetentionPeriodNanos": "0" - }, - "snapshotEnabled": true, - "indexOptions": { - "enabled": true, - "blockSizeNanos": "1800000000000" - }, - "schemaOptions": null, - "coldWritesEnabled": false, - "runtimeOptions": null - } - } - } -} -``` - -{{% /tab %}} -{{< /tabs >}} - -## Writing and Querying Metrics - -### Writing Metrics - -M3 supports ingesting [statsd](https://github.com/statsd/statsd#usage) and [Prometheus](https://prometheus.io/docs/concepts/data_model/) formatted metrics. - -This quickstart focuses on Prometheus metrics which consist of a value, a timestamp, and tags to bring context and meaning to the metric. - -You can write metrics using one of two endpoints: - -- _[{{% apiendpoint %}}prom/remote/write](/docs/m3coordinator/api/remote/)_ - Write a Prometheus remote write query to M3DB with a binary snappy compressed Prometheus WriteRequest protobuf message. -- _{{% apiendpoint %}}json/write_ - Write a JSON payload of metrics data. This endpoint is quick for testing purposes but is not as performant for production usage. - -For this quickstart, use the _{{% apiendpoint %}}json/write_ endpoint to write a tagged metric to M3DB with the following data in the request body, all fields are required: - -- `tags`: An object of at least one `name`/`value` pairs -- `timestamp`: The UNIX timestamp for the data -- `value`: The value for the data, can be of any type - -{{% notice tip %}} -The examples below use `__name__` as the name for one of the tags, which is a Prometheus reserved tag that allows you to query metrics using the value of the tag to filter results. -{{% /notice %}} - -{{% notice tip %}} -Label names may contain ASCII letters, numbers, underscores, and Unicode characters. They must match the regex `[a-zA-Z_][a-zA-Z0-9_]*`. Label names beginning with `__` are reserved for internal use. [Read more in the Prometheus documentation](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). -{{% /notice %}} - -{{< tabs name="write_metrics" >}} -{{< tab name="Command 1" >}} - -{{% codeinclude file="quickstart/write-metrics-1.sh" language="shell" %}} - -{{< /tab >}} -{{< tab name="Command 2" >}} - -{{% codeinclude file="quickstart/write-metrics-2.sh" language="shell" %}} - -{{< /tab >}} -{{< tab name="Command 3" >}} - -{{% codeinclude file="quickstart/write-metrics-3.sh" language="shell" %}} - -{{< /tab >}} -{{< /tabs >}} - -### Querying metrics - -M3DB supports three query engines: Prometheus (default), Graphite, and the M3 Query Engine. - -This quickstart uses Prometheus as the query engine, and you have access to [all the features of PromQL queries](https://prometheus.io/docs/prometheus/latest/querying/basics/). - -To query metrics, use the _{{% apiendpoint %}}query_range_ endpoint with the following data in the request body, all fields are required: - -- `query`: A PromQL query -- `start`: Timestamp in `RFC3339Nano` of start range for results -- `end`: Timestamp in `RFC3339Nano` of end range for results -- `step`: A duration or float of the query resolution, the interval between results in the timespan between `start` and `end`. - -Below are some examples using the metrics written above. - -#### Return results in past 45 seconds - -{{< tabs name="example_promql_regex" >}} -{{% tab name="Linux" %}} - - - -```shell -curl -X "POST" -G "{{% apiendpoint %}}query_range" \ - -d "query=third_avenue" \ - -d "start=$(date "+%s" -d "45 seconds ago")" \ - -d "end=$( date +%s )" \ - -d "step=5s" | jq . -``` - -{{% /tab %}} -{{% tab name="macOS/BSD" %}} - -```shell -curl -X "POST" -G "{{% apiendpoint %}}query_range" \ - -d "query=third_avenue" \ - -d "start=$( date -v -45S +%s )" \ - -d "end=$( date +%s )" \ - -d "step=5s" | jq . -``` - -{{% /tab %}} -{{% tab name="Output" %}} - -```json -{ - "status": "success", - "data": { - "resultType": "matrix", - "result": [ - { - "metric": { - "__name__": "third_avenue", - "checkout": "1", - "city": "new_york" - }, - "values": [ - [ - {{% now %}}, - "3347.26" - ], - [ - {{% now %}}, - "5347.26" - ], - [ - {{% now %}}, - "7347.26" - ] - ] - } - ] - } -} -``` - -{{% /tab %}} -{{< /tabs >}} - -#### Values above a certain number - -{{< tabs name="example_promql_range" >}} -{{% tab name="Linux" %}} - - - -```shell -curl -X "POST" -G "{{% apiendpoint %}}query_range" \ - -d "query=third_avenue > 6000" \ - -d "start=$(date "+%s" -d "45 seconds ago")" \ - -d "end=$( date +%s )" \ - -d "step=5s" | jq . -``` - -{{% /tab %}} -{{% tab name="macOS/BSD" %}} - -```shell -curl -X "POST" -G "{{% apiendpoint %}}query_range" \ - -d "query=third_avenue > 6000" \ - -d "start=$(date -v -45S "+%s")" \ - -d "end=$( date +%s )" \ - -d "step=5s" | jq . -``` - -{{% /tab %}} -{{% tab name="Output" %}} - -```json -{ - "status": "success", - "data": { - "resultType": "matrix", - "result": [ - { - "metric": { - "__name__": "third_avenue", - "checkout": "1", - "city": "new_york" - }, - "values": [ - [ - {{% now %}}, - "7347.26" - ] - ] - } - ] - } -} -``` - -{{% /tab %}} -{{< /tabs >}} - - diff --git a/site/content/quickstart/docker.md b/site/content/quickstart/docker.md index e0460e05d7..104f57995b 100644 --- a/site/content/quickstart/docker.md +++ b/site/content/quickstart/docker.md @@ -267,6 +267,31 @@ curl {{% apiendpoint %}}placement | jq . [Read more about the bootstrapping process](/docs/operational_guide/bootstrapping_crash_recovery/). {{% /notice %}} +### Readying a Namespace + +Once a namespace has finished bootstrapping, it must be marked as ready before receiving traffic. This can be done by calling the _{{% apiendpoint %}}namespace/ready_. + +{{< tabs name="ready_namespaces" >}} +{{% tab name="Command" %}} + +```shell +curl -X POST http://localhost:7201/api/v1/services/m3db/namespace/ready -d '{ + "name": "default" +}' | jq . +``` + +{{% /tab %}} +{{% tab name="Output" %}} + +```json +{ + "ready": true +} +``` + +{{% /tab %}} +{{< /tabs >}} + ### View Details of a Namespace You can also view the attributes of all namespaces by calling the _{{% apiendpoint %}}namespace_ endpoint From 5793b654ba8e532e7ee87b111eebdcc70accc66e Mon Sep 17 00:00:00 2001 From: Rob Skillington Date: Mon, 16 Nov 2020 10:57:19 -0500 Subject: [PATCH 15/15] Add barebones kubernetes guide --- site/content/quickstart/kubernetes.md | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 site/content/quickstart/kubernetes.md diff --git a/site/content/quickstart/kubernetes.md b/site/content/quickstart/kubernetes.md new file mode 100644 index 0000000000..812995fe56 --- /dev/null +++ b/site/content/quickstart/kubernetes.md @@ -0,0 +1,10 @@ +--- +title: "Kubernetes" +weight: 2 +--- + +## Create a M3DB Cluster on Kubernetes + +1. Meet the M3DB Kubernetes operator [requirements guide](/docs/operator/getting_started/requirements). +2. Follow the M3DB Kubernetes operator [installation guide](/docs/operator/getting_started/installation). +3. Read the M3DB Kubernetes operator [configuration guide](/docs/operator/configuration/configuring_m3db) and configure [namespaces](/docs/operator/configuration/namespaces).