-
Notifications
You must be signed in to change notification settings - Fork 812
/
datadog.conf.example
368 lines (303 loc) · 14.5 KB
/
datadog.conf.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
[Main]
# The host of the Datadog intake server to send Agent data to
dd_url: https://app.datadoghq.com
# If you need a proxy to connect to the Internet, provide the settings here (default: disabled)
# proxy_host: my-proxy.com
# proxy_port: 3128
# proxy_user: user
# proxy_password: password
# To be used with some proxys that return a 302 which make curl switch from POST to GET
# See http://stackoverflow.com/questions/8156073/curl-violate-rfc-2616-10-3-2-and-switch-from-post-to-get
# proxy_forbid_method_switch: no
# If you run the agent behind haproxy, you might want to enable this
# skip_ssl_validation: no
# The Datadog api key to associate your Agent's data with your organization.
# Can be found here:
# https://app.datadoghq.com/account/settings
# (default: None, the agent doesn't start without it)
api_key:
# Force the hostname to whatever you want. (default: auto-detected)
# hostname: mymachine.mydomain
# Enable the trace agent.
# apm_enabled: false
# Enable gohai metadata collection
# enable_gohai: true
# Enable the process agent
# process_agent_enabled: false
# Set the host's tags (optional)
# tags: mytag, env:prod, role:database
# Collect docker orchestrator name and version as agent tags
# collect_orchestrator_tags: yes
# Set timeout in seconds for outgoing requests to Datadog. (default: 20)
# When a request timeout, it will be retried after some time.
# It will only be deleted if the forwarder queue becomes too big. (30 MB by default)
# forwarder_timeout: 20
# Set timeout in seconds for integrations that use HTTP to fetch metrics, since
# unbounded timeouts can potentially block the collector indefinitely and cause
# problems!
# default_integration_http_timeout: 9
# Add one "dd_check:checkname" tag per running check. It makes it possible to slice
# and dice per monitored app (= running Agent Check) on Datadog's backend.
# create_dd_check_tags: no
# Collect AWS EC2 custom tags as agent tags (requires an IAM role associated with the instance)
# collect_ec2_tags: no
# Incorporate security-groups into tags collected from AWS EC2
# collect_security_groups: no
# Enable Agent Developer Mode
# Agent Developer Mode collects and sends more fine-grained metrics about agent and check performance
# developer_mode: no
# In developer mode, the number of runs to be included in a single collector profile
# collector_profile_interval: 20
# In developer mode, disable profiling altogether, but still collect fine-grained agent metrics
# allow_profiling: False
# use unique hostname for GCE hosts, see http://dtdg.co/2h3Bjpd
# when not specified, default: no
gce_updated_hostname: yes
# when using OpenStack, use the instance's "uuid"
# the OpenStack instance-id can be recycled but the uuid is not
# openstack_use_uuid: no
# when using OpenStack, use the instance's metadata as tags
# openstack_use_metadata_tags: yes
# Set the threshold for accepting points to allow anything
# within recent_point_threshold seconds (default: 30)
# recent_point_threshold: 30
# Use mount points instead of volumes to track disk and fs metrics
# DEPRECATED: use conf.d/disk.yaml instead to configure it
# use_mount: no
# Forwarder listening port
# listen_port: 17123
# Graphite listener port
# graphite_listen_port: 17124
# Additional directory to look for Datadog checks (optional)
# additional_checksd: /etc/dd-agent/checks.d/
# Allow non-local traffic to this Agent
# This is required when using this Agent as a proxy for other Agents
# that might not have an internet connection
# For more information, please see
# https://github.com/DataDog/dd-agent/wiki/Network-Traffic-and-Proxy-Configuration
# non_local_traffic: no
# Select the Tornado HTTP Client to be used in the Forwarder,
# between curl client and simple http client (default: simple http client)
# use_curl_http_client: no
# Select whether the Tornado HTTP Client used in the Forwarder should use IPv6, if available (default: yes)
# allow_ipv6: yes
# The loopback address the Forwarder and Dogstatsd will bind.
# Optional, it is mainly used when running the agent on Openshift
# bind_host: localhost
# If enabled the collector will capture a metric for check run times.
# check_timings: no
# If you want to remove the 'ww' flag from ps catching the arguments of processes
# for instance for security reasons
# exclude_process_args: no
# histogram_aggregates: max, median, avg, count
# histogram_percentiles: 0.95
# ========================================================================== #
# Service Discovery #
# See https://docs.datadoghq.com/guides/autodiscovery/ for details #
# ========================================================================== #
#
# Service discovery allows the agent to look for running services
# and load a configuration object for the one it recognizes.
# This feature is disabled by default.
# Uncomment this line to enable it (works for docker containers only for now).
# service_discovery_backend: docker
#
# Define which key/value store must be used to look for configuration templates.
# Default is etcd. Consul is also supported.
# sd_config_backend: etcd
#
# Settings for connecting to the service discovery backend.
# sd_backend_host: 127.0.0.1
# sd_backend_port: 4001
#
# If you use etcd, you can set a username and password for auth
#
# sd_backend_username:
# sd_backend_password:
#
# By default, the agent will look for the configuration templates under the
# `/datadog/check_configs` key in the back-end. If you wish otherwise, uncomment this option
# and modify its value.
# sd_template_dir: /datadog/check_configs
#
# If you Consul store requires token authentication for service discovery, you can define that token here.
# consul_token:
#
# Enable JMX checks for service discovery
# sd_jmx_enable: no
#
# If you require custom jars to be loaded for JMX checks, you can set their paths here
# instead of setting them in the checks' custom_jar_paths option
# jmx_custom_jars: /jmx-jars/jboss-cli-client.jar:/my/other/custom.jar
#
#
# JMXFetch collects multiples instances concurrently. The following metrics may
# help fine-tune the level of concurrency and timeouts that come into play during the
# collection of metrics from configured instances:
#
# Defines the maximum level of concurrency. Higher concurrency will increase CPU
# utilization during metric collection. Lower concurrency will result in lower CPU
# usage but also fewer workers in the event of timeouts to continue work - a value of
# 1 will process instances serially, each taking up to `jmx_collection_timeout`
# seconds.
# jmx_thread_pool_size: 3
#
# Defines the maximum waiting period in seconds before timing up on metric collection.
# jmx_collection_timeout: 60
#
# Defines the maximum level of concurrency in the reconnection pool. Higher concurrency
# will increase CPU utilization during instance recovery/reconnection. Lower concurrency
# will result in lower CPU usage but also fewer workers in the event of timeouts to continue
# work - a value of 1 will process instances serially, each taking up to `jmx_reconnection_timeout`
# seconds.
# jmx_reconnection_thread_pool_size: 3
#
# Determines the maximum waiting period in seconds before timing up on instance reconnection.
# jmx_reconnection_timeout: 10
#
#
# Docker labels as tags
# We can extract docker labels and add them as tags to all metrics reported by service discovery.
# All you have to do is supply a comma-separated list of label names to extract from containers when found.
# These tags will also be applied to the docker_daemon check.
# docker_labels_as_tags: label_name
#
# Example:
# docker_labels_as_tags: com.docker.compose.service, com.docker.compose.project
#
# ========================================================================== #
# Other #
# ========================================================================== #
#
# In some environments we may have the procfs file system mounted in a
# miscellaneous location. The procfs_path configuration paramenter allows
# us to override the standard default location '/proc'
# procfs_path: /proc
# Disable Python3 validation of python checks
# disable_py3_validation: false
# ========================================================================== #
# DogStatsd configuration #
# DogStatsd is a small server that aggregates your custom app metrics. For #
# usage information, check out http://docs.datadoghq.com/guides/dogstatsd/ #
# ========================================================================== #
# If you don't want to enable the DogStatsd server, set this option to no
# use_dogstatsd: yes
# Make sure your client is sending to the same port.
# dogstatsd_port: 8125
# By default dogstatsd will post aggregate metrics to the Agent (which handles
# errors/timeouts/retries/etc). To send directly to the datadog api, set this
# to https://app.datadoghq.com.
# dogstatsd_target: http://localhost:17123
# If you want to forward every packet received by the dogstatsd server
# to another statsd server, uncomment these lines.
# WARNING: Make sure that forwarded packets are regular statsd packets and not "dogstatsd" packets,
# as your other statsd server might not be able to handle them.
# statsd_forward_host: address_of_own_statsd_server
# statsd_forward_port: 8125
# you may want all statsd metrics coming from this host to be namespaced
# in some way; if so, configure your namespace here. a metric that looks
# like `metric.name` will instead become `namespace.metric.name`
# statsd_metric_namespace:
# By default, dogstatsd supports only plain ASCII packets. However, most
# (dog)statsd client support UTF8 by encoding packets before sending them
# this option enables UTF8 decoding in case you need it.
# However, it comes with a performance overhead of ~10% in the dogstatsd
# server. This will be taken care of properly in the new gen agent core.
# utf8_decoding: false
# The number of bytes allocated to the statsd socket receive buffer. By default,
# this value is set to the value of `/proc/sys/net/core/rmem_default`. If you
# need to increase the size of this buffer but keep the OS default value the
# same, you can set dogstats's receive buffer size here. The maximum allowed
# value is the value of `/proc/sys/net/core/rmem_max`.
# statsd_so_rcvbuf:
# ========================================================================== #
# Service-specific configuration #
# ========================================================================== #
# -------------------------------------------------------------------------- #
# Ganglia #
# DEPRECATED feature, removed from Agent v6 #
# -------------------------------------------------------------------------- #
# Ganglia host where gmetad is running
# ganglia_host: localhost
# Ganglia port where gmetad is running
# ganglia_port: 8651
# -------------------------------------------------------------------------- #
# Dogstream (log file parser) #
# DEPRECATED feature, removed from Agent v6 #
# -------------------------------------------------------------------------- #
# Comma-separated list of logs to parse and optionally custom parsers to use.
# The form should look like this:
#
# dogstreams: /path/to/log1:parsers_module:custom_parser, /path/to/log2, /path/to/log3, ...
#
# Or this:
#
# dogstreams: /path/to/log1:/path/to/my/parsers_module.py:custom_parser, /path/to/log2, /path/to/log3, ...
#
# Each entry is a path to a log file and optionally a Python module/function pair
# separated by colons.
#
# Custom parsers should take a 2 parameters, a logger object and
# a string parameter of the current line to parse. It should return a tuple of
# the form:
# (metric (str), timestamp (unix timestamp), value (float), attributes (dict))
# where attributes should at least contain the key 'metric_type', specifying
# whether the given metric is a 'counter' or 'gauge'.
#
# Unless parsers are specified with an absolute path, the modules must exist in
# the Agent's PYTHONPATH. You can set this as an environment variable when
# starting the Agent. If the name of the custom parser function is not passed,
# 'parser' is assumed.
#
# If this value isn't specified, the default parser assumes this log format:
# metric timestamp value key0=val0 key1=val1 ...
#
# ========================================================================== #
# Custom Emitters #
# ========================================================================== #
# Comma-separated list of emitters to be used in addition to the standard one
#
# Expected to be passed as a comma-separated list of colon-delimited
# name/object pairs.
#
# custom_emitters: /usr/local/my-code/emitters/rabbitmq.py:RabbitMQEmitter
#
# If the name of the emitter function is not specified, 'emitter' is assumed.
# ========================================================================== #
# Logging
# ========================================================================== #
# log_level: INFO
# collector_log_file: /var/log/datadog/collector.log
# forwarder_log_file: /var/log/datadog/forwarder.log
# dogstatsd_log_file: /var/log/datadog/dogstatsd.log
# if syslog is enabled but a host and port are not set, a local domain socket
# connection will be attempted
#
# log_to_syslog: yes
# syslog_host:
# syslog_port:
# ========================================================================== #
# Tracing
# ========================================================================== #
# [trace.config]
# Configure a non-default environment for the traces reported from this Agent
# env: none
# File destination of trace-agent logs
# log_file: /var/log/datadog/trace-agent.log
# [trace.receiver]
# Port that the trace-agent will listen
# receiver_port: 8126
# [trace.sampler]
# Extra global sample rate to apply on all the traces
# This sample rate is combined to the sample rate from the sampler logic, still promoting interesting traces
# From 1 (no extra rate) to 0 (don't sample at all)
# extra_sample_rate: 1
# Maximum number of traces per second to sample.
# The limit is applied over an average over a few minutes ; much bigger spikes are possible.
# Set to 0 to disable the limit.
# max_traces_per_second: 10
# [trace.ignore]
# Blacklist of regular expressions can be provided to disable certain traces based on their resource name
# all entries must be surrounded by double quotes and separated by commas
# Example: "(GET|POST) /healthcheck","GET /V1"
# resource: