forked from grafana/intro-to-mltp
-
Notifications
You must be signed in to change notification settings - Fork 0
/
config.alloy
131 lines (116 loc) · 3.47 KB
/
config.alloy
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
///////////////////////////////////////////////////////////////////////////////
// Configuration file
local.file "endpoints" {
filename = "/etc/alloy/endpoints.json"
}
///////////////////////////////////////////////////////////////////////////////
// Metrics, Logging, and Tracing from .NET Application
// This section is for scraping the incoming data from .NET services.
prometheus.scrape "dotnet_services" {
targets = [
{"__address__" = "alloy:4317", group = "services", service = "nvrweb"},
]
scrape_interval = "15s"
forward_to = [prometheus.remote_write.mimir.receiver]
job_name = "nvrweb_services"
}
///////////////////////////////////////////////////////////////////////////////
// Logging from .NET Application
otelcol.receiver.otlp "otlp_receiver" {
grpc {
endpoint = "0.0.0.0:4317"
}
http {
endpoint = "0.0.0.0:4318"
}
output {
logs = [otelcol.processor.batch.default.input]
traces = [otelcol.processor.batch.default.input]
metrics = [otelcol.processor.batch.default.input]
}
}
otelcol.processor.batch "default" {
send_batch_size = 16384
send_batch_max_size = 16384
timeout = "2s"
output {
traces = [otelcol.exporter.otlp.tempo.input]
logs = [otelcol.exporter.loki.default.input]
metrics = [otelcol.exporter.prometheus.mimir.input]
}
}
// OpenTelemetry Traces Exporter (to Tempo)
otelcol.exporter.otlp "tempo" {
client {
endpoint = json_path(local.file.endpoints.content, ".traces.url")[0]
tls {
insecure = true
insecure_skip_verify = true
}
}
}
// OpenTelemetry Logs Exporter (to Loki)
otelcol.exporter.loki "default" {
forward_to = [loki.write.autologging.receiver]
}
loki.write "autologging" {
endpoint {
url = json_path(local.file.endpoints.content, ".logs.url")[0]
basic_auth {
username = json_path(local.file.endpoints.content, ".logs.basicAuth.username")[0]
password = json_path(local.file.endpoints.content, ".logs.basicAuth.password")[0]
}
}
}
otelcol.exporter.prometheus "mimir" {
forward_to = [prometheus.remote_write.mimir.receiver]
}
// Metrics Exporter to Mimir
prometheus.remote_write "mimir" {
endpoint {
url = json_path(local.file.endpoints.content, ".metrics.url")[0]
basic_auth {
username = json_path(local.file.endpoints.content, ".metrics.basicAuth.username")[0]
password = json_path(local.file.endpoints.content, ".metrics.basicAuth.password")[0]
}
}
}
///////////////////////////////////////////////////////////////////////////////
// OpenTelemetry Processors
otelcol.processor.tail_sampling "errors" {
decision_wait = "30s"
policy {
name = "sample-erroring-traces"
type = "status_code"
status_code {
status_codes = ["ERROR"]
}
}
output {
traces = [otelcol.processor.batch.default.input]
}
}
otelcol.connector.spanmetrics "tracemetrics" {
namespace = "traces.spanmetrics"
dimension {
name = "http.method"
}
dimension {
name = "http.target"
}
dimension {
name = "http.status_code"
}
histogram {
explicit {}
}
exemplars {
enabled = true
}
output {
metrics = [otelcol.exporter.prometheus.tracemetrics.input]
}
}
otelcol.exporter.prometheus "tracemetrics" {
forward_to = [prometheus.remote_write.mimir.receiver]
}