From 08d47425d9d04687e43f9976c22baad7160170fe Mon Sep 17 00:00:00 2001 From: yeya24 Date: Sat, 26 Dec 2020 15:55:11 -0500 Subject: [PATCH] fix some typos Signed-off-by: yeya24 --- cmd/agentctl/main.go | 4 ++-- docs/configuration-reference.md | 2 +- docs/getting-started.md | 4 ++-- docs/maintaining.md | 2 +- docs/operation-guide.md | 4 ++-- docs/overview.md | 2 +- pkg/config/config.go | 2 +- pkg/integrations/manager.go | 2 +- pkg/prom/instance/instance.go | 2 +- pkg/tempo/promsdprocessor/prom_sd_processor.go | 6 +++--- pkg/tempo/tempo.go | 2 +- 11 files changed, 16 insertions(+), 16 deletions(-) diff --git a/cmd/agentctl/main.go b/cmd/agentctl/main.go index 8ff91517f0f7..215901823895 100644 --- a/cmd/agentctl/main.go +++ b/cmd/agentctl/main.go @@ -152,8 +152,8 @@ func targetStatsCmd() *cobra.Command { cmd := &cobra.Command{ Use: "target-stats [WAL directory]", - Short: "Discover statitics on a specific target within the WAL.", - Long: `target-stats computes statitics on a specific target within the WAL at + Short: "Discover statistics on a specific target within the WAL.", + Long: `target-stats computes statistics on a specific target within the WAL at greater detail than the general wal-stats. The statistics computed is the cardinality of all series within that target. diff --git a/docs/configuration-reference.md b/docs/configuration-reference.md index cf11d88005c4..f313ab0c4ce4 100644 --- a/docs/configuration-reference.md +++ b/docs/configuration-reference.md @@ -330,7 +330,7 @@ grpc_client_config: ### global_config The `global_config` block configures global values for all launched Prometheus -instanes. +instances. ```yaml # How frequently should Prometheus instances scrape. diff --git a/docs/getting-started.md b/docs/getting-started.md index db34f1c51f86..b2a53c851253 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -120,7 +120,7 @@ instance). When the Agent is run with this file, it will collect metrics from itself and send those metrics to the `remote_write` endpoint. All metrics will have (by default) an `agent_hostname` label equal to the hostname of the machine the -Agent is running on. This label helps to uniquly identify the source of metrics +Agent is running on. This label helps to uniquely identify the source of metrics if you run multiple Agent processes across multiple machines. Full configuration options can be found in the @@ -274,7 +274,7 @@ docker run \ ### Locally -This section is only relavant if you installed the static binary of the +This section is only relevant if you installed the static binary of the Agent. We do not yet provide system packages or configurations to run the Agent as a daemon process. diff --git a/docs/maintaining.md b/docs/maintaining.md index bb0626028ddb..c8e5cc722cf6 100644 --- a/docs/maintaining.md +++ b/docs/maintaining.md @@ -13,7 +13,7 @@ before releasing the Grafana Cloud Agent. #### Prerelease testing For testing a release, run the [K3d example](../example/k3d/README.md) locally. -Let it run for about 90 minutes, keeping an occassional eye on the Agent +Let it run for about 90 minutes, keeping an occasional eye on the Agent Operational dashboard (noting that metrics from the scraping service will take time to show up). After 90 minutes, if nothing has crashed and you see metrics for both the scraping service and the non-scraping service, the Agent is ready diff --git a/docs/operation-guide.md b/docs/operation-guide.md index 58b2b7ba707d..4d4324a0bde5 100644 --- a/docs/operation-guide.md +++ b/docs/operation-guide.md @@ -26,7 +26,7 @@ isn't defined, the Agent will use Go's [os.Hostname](https://golang.org/pkg/os/# to determine the hostname. The following meta-labels are used to determine if a target is running on the -same machine as the the target: +same machine as the target: - `__address__` - `__meta_consul_node` @@ -61,7 +61,7 @@ Prometheus service discovery, scraping, a WAL for storage, and `remote_write`. Instances allow for fine grained control of what data gets scraped and where it gets sent. Users can easily define two Instances that scrape different subsets -of metrics and send them two two completely different remote_write systems. +of metrics and send them to two completely different remote_write systems. Instances are especially relevant to the [scraping service mode](./scraping-service.md), where breaking up your scrape configs into diff --git a/docs/overview.md b/docs/overview.md index 040b836202e6..9a2c237226dc 100644 --- a/docs/overview.md +++ b/docs/overview.md @@ -101,7 +101,7 @@ and metadata label propagation. While these features could theoretically be added to Telegraf as OSS contributions, there would be a lot of forced hacks involved due to its current design. -Additonally, Telegraf is a much larger project with its own goals for its community, +Additionally, Telegraf is a much larger project with its own goals for its community, so any changes need to fit the general use cases it was designed for. With the Grafana Cloud Agent as its own project, we can deliver a more curated agent diff --git a/pkg/config/config.go b/pkg/config/config.go index edd690fe3f50..460b9ad6e2d6 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -97,7 +97,7 @@ func Load(fs *flag.FlagSet, args []string) (*Config, error) { return load(fs, args, LoadFile) } -// load allows for tests to inject a function for retreiving the config file that +// load allows for tests to inject a function for retrieving the config file that // doesn't require having a literal file on disk. func load(fs *flag.FlagSet, args []string, loader func(string, bool, *Config) error) (*Config, error) { var ( diff --git a/pkg/integrations/manager.go b/pkg/integrations/manager.go index ac373163410d..357da18d0203 100644 --- a/pkg/integrations/manager.go +++ b/pkg/integrations/manager.go @@ -363,7 +363,7 @@ func (m *Manager) WireAPI(r *mux.Router) error { return nil } -// Stop stops the maanger and all of its integrations. +// Stop stops the manager and all of its integrations. func (m *Manager) Stop() { m.cancel() <-m.done diff --git a/pkg/prom/instance/instance.go b/pkg/prom/instance/instance.go index e895dffae6f3..1ef2f1815475 100644 --- a/pkg/prom/instance/instance.go +++ b/pkg/prom/instance/instance.go @@ -746,7 +746,7 @@ func getHash(data interface{}) (string, error) { // pulling metric values from a given metric name and label matchers. // // This is used by the agent instances to find the most recent timestamp -// successfully remote_written to for pruposes of safely truncating the WAL. +// successfully remote_written to for purposes of safely truncating the WAL. // // MetricValueCollector is only intended for use with Gauges and Counters. type MetricValueCollector struct { diff --git a/pkg/tempo/promsdprocessor/prom_sd_processor.go b/pkg/tempo/promsdprocessor/prom_sd_processor.go index 60e395378472..e09dc38a826a 100644 --- a/pkg/tempo/promsdprocessor/prom_sd_processor.go +++ b/pkg/tempo/promsdprocessor/prom_sd_processor.go @@ -145,10 +145,10 @@ func (p *promServiceDiscoProcessor) watchServiceDiscovery() { for { // p.discoveryMgr.SyncCh() is never closed so we need to watch the context as well to properly exit this goroutine select { - case targetGoups := <-p.discoveryMgr.SyncCh(): + case targetGroups := <-p.discoveryMgr.SyncCh(): hostLabels := make(map[string]model.LabelSet) - level.Debug(p.logger).Log("msg", "syncing target groups", "count", len(targetGoups)) - for jobName, groups := range targetGoups { + level.Debug(p.logger).Log("msg", "syncing target groups", "count", len(targetGroups)) + for jobName, groups := range targetGroups { p.syncGroups(jobName, groups, hostLabels) } p.mtx.Lock() diff --git a/pkg/tempo/tempo.go b/pkg/tempo/tempo.go index 2f2613f8623f..4eb8c68ea18d 100644 --- a/pkg/tempo/tempo.go +++ b/pkg/tempo/tempo.go @@ -23,7 +23,7 @@ import ( "go.opentelemetry.io/collector/service/builder" ) -// Tempo wraps the OpenTelemetry collector to enablet tracing pipelines +// Tempo wraps the OpenTelemetry collector to enable tracing pipelines type Tempo struct { logger *zap.Logger metricViews []*view.View