This repository has been archived by the owner on Feb 22, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 16.8k
/
values.yaml
511 lines (437 loc) · 18.6 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
# ------------------------------------------------------------------------------
# Kafka:
# ------------------------------------------------------------------------------
## The StatefulSet installs 3 pods by default
replicas: 3
## The kafka image repository
image: "confluentinc/cp-kafka"
## The kafka image tag
imageTag: "5.0.1" # Confluent image for Kafka 2.0.0
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
imagePullPolicy: "IfNotPresent"
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources: {}
# limits:
# cpu: 200m
# memory: 1536Mi
# requests:
# cpu: 100m
# memory: 1024Mi
kafkaHeapOptions: "-Xmx1G -Xms1G"
## Optional Container Security context
securityContext: {}
## The StatefulSet Update Strategy which Kafka will use when changes are applied.
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
updateStrategy:
type: "OnDelete"
## Start and stop pods in Parallel or OrderedReady (one-by-one.) Note - Can not change after first release.
## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy
podManagementPolicy: OrderedReady
## Useful if using any custom authorizer
## Pass in some secrets to use (if required)
# secrets:
# - name: myKafkaSecret
# keys:
# - username
# - password
# # mountPath: /opt/kafka/secret
# - name: myZkSecret
# keys:
# - user
# - pass
# mountPath: /opt/zookeeper/secret
## The subpath within the Kafka container's PV where logs will be stored.
## This is combined with `persistence.mountPath`, to create, by default: /opt/kafka/data/logs
logSubPath: "logs"
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Use an alternate serviceAccount
## Useful when using images in custom repositories
# serviceAccountName:
## Set a pod priorityClassName
# priorityClassName: high-priority
## Pod scheduling preferences (by default keep pods within a release on separate nodes).
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## By default we don't set affinity
affinity: {}
## Alternatively, this typical example defines:
## antiAffinity (to keep Kafka pods on separate pods)
## and affinity (to encourage Kafka pods to be collocated with Zookeeper pods)
# affinity:
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: app
# operator: In
# values:
# - kafka
# topologyKey: "kubernetes.io/hostname"
# podAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 50
# podAffinityTerm:
# labelSelector:
# matchExpressions:
# - key: app
# operator: In
# values:
# - zookeeper
# topologyKey: "kubernetes.io/hostname"
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
## Readiness probe config.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
##
readinessProbe:
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
## Period to wait for broker graceful shutdown (sigterm) before pod is killed (sigkill)
## ref: https://kubernetes-v1-4.github.io/docs/user-guide/production-pods/#lifecycle-hooks-and-termination-notice
## ref: https://kafka.apache.org/10/documentation.html#brokerconfigs controlled.shutdown.*
terminationGracePeriodSeconds: 60
# Tolerations for nodes that have taints on them.
# Useful if you want to dedicate nodes to just run kafka
# https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
# tolerations:
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
## Headless service.
##
headless:
# annotations:
# targetPort:
port: 9092
## External access.
##
external:
enabled: false
# type can be either NodePort or LoadBalancer
type: NodePort
# annotations:
# service.beta.kubernetes.io/openstack-internal-load-balancer: "true"
# Labels to be added to external services
# labels:
# aLabel: "value"
dns:
useInternal: false
useExternal: true
# If using external service type LoadBalancer and external dns, set distinct to true below.
# This creates an A record for each statefulset pod/broker. You should then map the
# A record of the broker to the EXTERNAL IP given by the LoadBalancer in your DNS server.
distinct: false
servicePort: 19092
firstListenerPort: 31090
domain: cluster.local
loadBalancerIP: []
loadBalancerSourceRanges: []
init:
image: "lwolf/kubectl_deployer"
imageTag: "0.4"
imagePullPolicy: "IfNotPresent"
# Annotation to be added to Kafka pods
podAnnotations: {}
# Labels to be added to Kafka pods
podLabels: {}
# service: broker
# team: developers
podDisruptionBudget: {}
# maxUnavailable: 1 # Limits how many Kafka pods may be unavailable due to voluntary disruptions.
## Configuration Overrides. Specify any Kafka settings you would like set on the StatefulSet
## here in map format, as defined in the official docs.
## ref: https://kafka.apache.org/documentation/#brokerconfigs
##
configurationOverrides:
"confluent.support.metrics.enable": false # Disables confluent metric submission
# "auto.leader.rebalance.enable": true
# "auto.create.topics.enable": true
# "controlled.shutdown.enable": true
# "controlled.shutdown.max.retries": 100
## Options required for external access via NodePort
## ref:
## - http://kafka.apache.org/documentation/#security_configbroker
## - https://cwiki.apache.org/confluence/display/KAFKA/KIP-103%3A+Separation+of+Internal+and+External+traffic
##
## Setting "advertised.listeners" here appends to "PLAINTEXT://${POD_IP}:9092,", ensure you update the domain
## If external service type is Nodeport:
# "advertised.listeners": |-
# EXTERNAL://kafka.cluster.local:$((31090 + ${KAFKA_BROKER_ID}))
## If external service type is LoadBalancer and distinct is true:
# "advertised.listeners": |-
# EXTERNAL://kafka-$((${KAFKA_BROKER_ID})).cluster.local:19092
## If external service type is LoadBalancer and distinct is false:
# "advertised.listeners": |-
# EXTERNAL://${LOAD_BALANCER_IP}:31090
## Uncomment to define the EXTERNAL Listener protocol
# "listener.security.protocol.map": |-
# PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT
## set extra ENVs
# key: "value"
envOverrides: {}
## A collection of additional ports to expose on brokers (formatted as normal containerPort yaml)
# Useful when the image exposes metrics (like prometheus, etc.) through a javaagent instead of a sidecar
additionalPorts: {}
## Persistence configuration. Specify if and how to persist data to a persistent volume.
##
persistence:
enabled: true
## The size of the PersistentVolume to allocate to each Kafka Pod in the StatefulSet. For
## production servers this number should likely be much larger.
##
size: "1Gi"
## The location within the Kafka container where the PV will mount its storage and Kafka will
## store its logs.
##
mountPath: "/opt/kafka/data"
## Kafka data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass:
jmx:
## Rules to apply to the Prometheus JMX Exporter. Note while lots of stats have been cleaned and exposed,
## there are still more stats to clean up and expose, others will never get exposed. They keep lots of duplicates
## that can be derived easily. The configMap in this chart cleans up the metrics it exposes to be in a Prometheus
## format, eg topic, broker are labels and not part of metric name. Improvements are gladly accepted and encouraged.
configMap:
## Allows disabling the default configmap, note a configMap is needed
enabled: true
## Allows setting values to generate confimap
## To allow all metrics through (warning its crazy excessive) comment out below `overrideConfig` and set
## `whitelistObjectNames: []`
overrideConfig: {}
# jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi
# lowercaseOutputName: true
# lowercaseOutputLabelNames: true
# ssl: false
# rules:
# - pattern: ".*"
## If you would like to supply your own ConfigMap for JMX metrics, supply the name of that
## ConfigMap as an `overrideName` here.
overrideName: ""
## Port the jmx metrics are exposed in native jmx format, not in Prometheus format
port: 5555
## JMX Whitelist Objects, can be set to control which JMX metrics are exposed. Only whitelisted
## values will be exposed via JMX Exporter. They must also be exposed via Rules. To expose all metrics
## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []`
## (2) commented out above `overrideConfig`.
whitelistObjectNames: # []
- kafka.controller:*
- kafka.server:*
- java.lang:*
- kafka.network:*
- kafka.log:*
## Prometheus Exporters / Metrics
##
prometheus:
## Prometheus JMX Exporter: exposes the majority of Kafkas metrics
jmx:
enabled: false
## The image to use for the metrics collector
image: solsson/kafka-prometheus-jmx-exporter@sha256
## The image tag to use for the metrics collector
imageTag: a23062396cd5af1acdf76512632c20ea6be76885dfc20cd9ff40fb23846557e8
## Interval at which Prometheus scrapes metrics, note: only used by Prometheus Operator
interval: 10s
## Timeout at which Prometheus timeouts scrape run, note: only used by Prometheus Operator
scrapeTimeout: 10s
## Port jmx-exporter exposes Prometheus format metrics to scrape
port: 5556
resources: {}
# limits:
# cpu: 200m
# memory: 1Gi
# requests:
# cpu: 100m
# memory: 100Mi
## Prometheus Kafka Exporter: exposes complimentary metrics to JMX Exporter
kafka:
enabled: false
## The image to use for the metrics collector
image: danielqsj/kafka-exporter
## The image tag to use for the metrics collector
imageTag: v1.2.0
## Interval at which Prometheus scrapes metrics, note: only used by Prometheus Operator
interval: 10s
## Timeout at which Prometheus timeouts scrape run, note: only used by Prometheus Operator
scrapeTimeout: 10s
## Port kafka-exporter exposes for Prometheus to scrape metrics
port: 9308
## Resource limits
resources: {}
# limits:
# cpu: 200m
# memory: 1Gi
# requests:
# cpu: 100m
# memory: 100Mi
# Tolerations for nodes that have taints on them.
# Useful if you want to dedicate nodes to just run kafka-exporter
# https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
# tolerations:
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
## Pod scheduling preferences (by default keep pods within a release on separate nodes).
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## By default we don't set affinity
affinity: {}
## Alternatively, this typical example defines:
## affinity (to encourage Kafka Exporter pods to be collocated with Kafka pods)
# affinity:
# podAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 50
# podAffinityTerm:
# labelSelector:
# matchExpressions:
# - key: app
# operator: In
# values:
# - kafka
# topologyKey: "kubernetes.io/hostname"
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
operator:
## Are you using Prometheus Operator?
enabled: false
serviceMonitor:
# Namespace in which to install the ServiceMonitor resource.
namespace: monitoring
# Use release namespace instead
releaseNamespace: false
## Defaults to whats used if you follow CoreOS [Prometheus Install Instructions](https://github.com/coreos/prometheus-operator/tree/master/helm#tldr)
## [Prometheus Selector Label](https://github.com/coreos/prometheus-operator/blob/master/helm/prometheus/templates/prometheus.yaml#L65)
## [Kube Prometheus Selector Label](https://github.com/coreos/prometheus-operator/blob/master/helm/kube-prometheus/values.yaml#L298)
selector:
prometheus: kube-prometheus
prometheusRule:
## Add Prometheus Rules?
enabled: false
## Namespace in which to install the PrometheusRule resource.
namespace: monitoring
# Use release namespace instead
releaseNamespace: false
## Defaults to whats used if you follow CoreOS [Prometheus Install Instructions](https://github.com/coreos/prometheus-operator/tree/master/helm#tldr)
## [Prometheus Selector Label](https://github.com/coreos/prometheus-operator/blob/master/helm/prometheus/templates/prometheus.yaml#L65)
## [Kube Prometheus Selector Label](https://github.com/coreos/prometheus-operator/blob/master/helm/kube-prometheus/values.yaml#L298)
selector:
prometheus: kube-prometheus
## Some example rules.
## e.g. max(kafka_controller_kafkacontroller_activecontrollercount_value{service="my-kafka-release"}) by (service) < 1
rules:
- alert: KafkaNoActiveControllers
annotations:
message: The number of active controllers in {{ "{{" }} $labels.namespace {{ "}}" }} is less than 1. This usually means that some of the Kafka nodes aren't communicating properly. If it doesn't resolve itself you can try killing the pods (one by one whilst monitoring the under-replicated partitions graph).
expr: max(kafka_controller_kafkacontroller_activecontrollercount_value) by (namespace) < 1
for: 5m
labels:
severity: critical
- alert: KafkaMultipleActiveControllers
annotations:
message: The number of active controllers in {{ "{{" }} $labels.namespace {{ "}}" }} is greater than 1. This usually means that some of the Kafka nodes aren't communicating properly. If it doesn't resolve itself you can try killing the pods (one by one whilst monitoring the under-replicated partitions graph).
expr: max(kafka_controller_kafkacontroller_activecontrollercount_value) by (namespace) > 1
for: 5m
labels:
severity: critical
## Kafka Config job configuration
##
configJob:
## Specify the number of retries before considering kafka-config job as failed.
## https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#pod-backoff-failure-policy
backoffLimit: 6
## Topic creation and configuration.
## The job will be run on a deployment only when the config has been changed.
## - If 'partitions' and 'replicationFactor' are specified we create the topic (with --if-not-exists.)
## - If 'partitions', 'replicationFactor' and 'reassignPartitions' are specified we reassign the partitions to
## increase the replication factor of an existing topic.
## - If 'partitions' is specified we 'alter' the number of partitions. This will
## silently and safely fail if the new setting isn’t strictly larger than the old (i.e. a NOOP.) Do be aware of the
## implications for keyed topics (ref: https://docs.confluent.io/current/kafka/post-deployment.html#admin-operations)
## - If 'defaultConfig' is specified it's deleted from the topic configuration. If it isn't present,
## it will silently and safely fail.
## - If 'config' is specified it's added to the topic configuration.
##
## Note: To increase the 'replicationFactor' of a topic, 'reassignPartitions' must be set to true (see above).
##
topics: []
# - name: myExistingTopicConfig
# config: "cleanup.policy=compact,delete.retention.ms=604800000"
# - name: myExistingTopicReassignPartitions
# partitions: 8
# replicationFactor: 5
# reassignPartitions: true
# - name: myExistingTopicPartitions
# partitions: 8
# - name: myNewTopicWithConfig
# partitions: 8
# replicationFactor: 3
# defaultConfig: "segment.bytes,segment.ms"
# config: "cleanup.policy=compact,delete.retention.ms=604800000"
# - name: myAclTopicPartitions
# partitions: 8
# acls:
# - user: read
# operations: [ Read ]
# - user: read_and_write
# operations:
# - Read
# - Write
# - user: all
# operations: [ All ]
## Enable/disable the chart's tests. Useful if using this chart as a dependency of
## another chart and you don't want these tests running when trying to develop and
## test your own chart.
testsEnabled: true
# ------------------------------------------------------------------------------
# Zookeeper:
# ------------------------------------------------------------------------------
zookeeper:
## If true, install the Zookeeper chart alongside Kafka
## ref: https://github.com/kubernetes/charts/tree/master/incubator/zookeeper
enabled: true
## Configure Zookeeper resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources: ~
## Environmental variables to set in Zookeeper
env:
## The JVM heap size to allocate to Zookeeper
ZK_HEAP_SIZE: "1G"
persistence:
enabled: false
## The amount of PV storage allocated to each Zookeeper pod in the statefulset
# size: "2Gi"
## Specify a Zookeeper imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
image:
PullPolicy: "IfNotPresent"
## If the Zookeeper Chart is disabled a URL and port are required to connect
url: ""
port: 2181
## Pod scheduling preferences (by default keep pods within a release on separate nodes).
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## By default we don't set affinity:
affinity: {} # Criteria by which pod label-values influence scheduling for zookeeper pods.
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - topologyKey: "kubernetes.io/hostname"
# labelSelector:
# matchLabels:
# release: zookeeper