Skip to content

Commit

Permalink
feat(configV4): migrate config file to v4 (#139)
Browse files Browse the repository at this point in the history
- migrate config file to v4.
- move definition files to legacy folder.
  • Loading branch information
paologallinaharbur authored Aug 30, 2021
1 parent 97403b0 commit af23be6
Show file tree
Hide file tree
Showing 9 changed files with 425 additions and 204 deletions.
6 changes: 6 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,12 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).

## 2.17.0 (2021-06-27)
### Changed
- Moved default config.sample to [V4](https://docs.newrelic.com/docs/create-integrations/infrastructure-integrations-sdk/specifications/host-integrations-newer-configuration-format/), added a dependency for infra-agent version 1.20.0

Please notice that old [V3](https://docs.newrelic.com/docs/create-integrations/infrastructure-integrations-sdk/specifications/host-integrations-standard-configuration-format/) configuration format is deprecated, but still supported.

## 2.16.2 (2021-06-08)
### Changed
- Support for ARM
Expand Down
1 change: 0 additions & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ FROM newrelic/infrastructure:latest
ENV NRIA_IS_FORWARD_ONLY true
ENV NRIA_K8S_INTEGRATION true
COPY --from=builder-kafka /go/src/github.com/newrelic/nri-kafka/bin/nri-kafka /nri-sidecar/newrelic-infra/newrelic-integrations/bin/nri-kafka
COPY --from=builder-kafka /go/src/github.com/newrelic/nri-kafka/kafka-definition.yml /nri-sidecar/newrelic-infra/newrelic-integrations/definition.yaml
COPY --from=builder-jmx /nrjmx/bin /usr/bin/
RUN apk update && apk add openjdk8-jre
USER 1000
14 changes: 9 additions & 5 deletions build/.goreleaser.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ nfpms:
- nri-nix

dependencies:
- "newrelic-infra"
- newrelic-infra (>= 1.20.0)
- "nrjmx"

bindir: "/var/db/newrelic-infra/newrelic-integrations/bin"
Expand All @@ -55,7 +55,7 @@ nfpms:
dst: "/usr/share/doc/nri-kafka/README.md"
- src: "LICENSE"
dst: "/usr/share/doc/nri-kafka/LICENSE"
- src: "kafka-definition.yml"
- src: "legacy/kafka-definition.yml"
dst: "/var/db/newrelic-infra/newrelic-integrations/kafka-definition.yml"
type: config

Expand All @@ -77,16 +77,20 @@ archives:
name_template: "{{ .ProjectName }}_{{ .Os }}_{{ .Version }}_{{ .Arch }}_dirty"
files:
- kafka-config.yml.sample
- kafka-definition.yml
- src: 'legacy/kafka-definition.yml'
dst: .
strip_parent: true
format: tar.gz

- id: nri-win
builds:
- nri-win
name_template: "{{ .ProjectName }}-{{ .Arch }}.{{ .Version }}_dirty"
files:
- kafka-config.yml.sample
- kafka-win-definition.yml
- kafka-win-config.yml.sample
- src: 'legacy/kafka-win-definition.yml'
dst: .
strip_parent: true
format: zip

# we use custom publisher for fixing archives and signing them
Expand Down
2 changes: 1 addition & 1 deletion build/release.mk
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
BUILD_DIR := ./bin/
GORELEASER_VERSION := v0.169.0
GORELEASER_VERSION := v0.174.1
GORELEASER_BIN ?= bin/goreleaser

bin:
Expand Down
1 change: 1 addition & 0 deletions build/windows/fix_archives.sh
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ find dist -regex ".*_dirty\.zip" | while read zip_dirty; do
echo "===> Move files inside ${zip_file_name}"
mv ${ZIP_CONTENT_PATH}/nri-${INTEGRATION}.exe "${AGENT_DIR_IN_ZIP_PATH}/bin"
mv ${ZIP_CONTENT_PATH}/${INTEGRATION}-win-definition.yml "${AGENT_DIR_IN_ZIP_PATH}"
mv ${ZIP_CONTENT_PATH}/${INTEGRATION}-win-config.yml.sample ${ZIP_CONTENT_PATH}/${INTEGRATION}-config.yml.sample
mv ${ZIP_CONTENT_PATH}/${INTEGRATION}-config.yml.sample "${CONF_IN_ZIP_PATH}"

echo "===> Creating zip ${ZIP_CLEAN}"
Expand Down
398 changes: 201 additions & 197 deletions kafka-config.yml.sample

Large diffs are not rendered by default.

207 changes: 207 additions & 0 deletions kafka-win-config.yml.sample
Original file line number Diff line number Diff line change
@@ -0,0 +1,207 @@
integrations:

# Example kafka-inventory. This gives an example of collecting inventory with the integration
- name: nri-kafka
env:
NR_JMX: "C:\\Program Files\\New Relic\\nrjmx\\nrjmx.bat"
INVENTORY: "true"
CLUSTER_NAME: "testcluster2"
ZOOKEEPER_HOSTS: '[{"host": "localhost", "port": 2181}]'
ZOOKEEPER_AUTH_SECRET: "username:password"
# Below are the fields used to fine tune/toggle topic inventory collection.
# In order to collect topics the "topic_mode" field must be set to "all", "list", or "regex"
TOPIC_MODE: 'all'
interval: 15s
labels:
env: production
role: kafka
inventory_source: config/kafka


# Example kafka-consumer-offsets. This gives an example configuration for collecting consumer offsets for the cluster
- name: nri-kafka
env:
NR_JMX: "C:\\Program Files\\New Relic\\nrjmx\\nrjmx.bat"
METRICS: "false"
INVENTORY: "false"
CONSUMER_OFFSET: "true"
CLUSTER_NAME: "testcluster3"
AUTODISCOVER_STRATEGY: "bootstrap"
BOOTSTRAP_BROKER_HOST: "localhost"
BOOTSTRAP_BROKER_KAFKA_PORT: 9092
BOOTSTRAP_BROKER_KAFKA_PROTOCOL: PLAINTEXT
# A regex pattern that matches the consumer groups to collect metrics from
CONSUMER_GROUP_REGEX: '.*'
interval: 15s
labels:
env: production
role: kafka
inventory_source: config/kafka


# Example kafka-metrics-zookeeper-discovery. This gives an example of autodiscovery of brokers with zookeeper
- name: nri-kafka
env:
NR_JMX: "C:\\Program Files\\New Relic\\nrjmx\\nrjmx.bat"
METRICS: "true"
# A cluster name is required to uniquely identify this collection result in Insights
CLUSTER_NAME: "testcluster1"
# Override the kafka API version to target. Defaults to 1.0.0, which will work for all post-1.0.0 versions. Older versions of the API may be missing features.
KAFKA_VERSION: "1.0.0"
# How to find brokers. Either "bootstrap" or "zookeeper"
AUTODISCOVER_STRATEGY: "zookeeper"
# A list of zookeeper hosts to discover brokers from.
# Only required and used if `autodiscover_mechanism` is "zookeeper"
#
# The "zookeeper_hosts" field is a JSON array, each entry in the array connection information for a Zookeeper node.
# Each entry should have the following fields:
# - host: The IP or Hostname of a Zookeeper node, if the New Relic agent is installed on a Zookeeper node "localhost" is an acceptable value
# - port: The port Zookeeper is listening on for incoming requests. If omitted, a default port of 2181 will be used.
ZOOKEEPER_HOSTS: '[{"host": "localhost", "port": 2181}]'
# If using "user" authentication, the credentials must be specified as a string of the form "<user>:<password>"
# Example: 'zookeeperuser:zookeeperpass'
ZOOKEEPER_AUTH_SECRET: "username:password"
# If the Kafka configuration files are not in the root node of Zookeeper, an alternative root node can be specified.
# The alternative root must have a leading slash.
ZOOKEEPER_PATH: "/kafka-root"
# It is common to use the same JMX configuration across a Kafka cluster
# The default username and password are the credentials that will be used to make
# a JMX connection to each broker found by Zookeeper. Theses values will also
# be used when connecting to a consumer and/or producer if the "username" or "password"
# field are omitted.
DEFAULT_JMX_USER: "username"
DEFAULT_JMX_PASSWORD: "password"
# This field is used to toggle the collection of broker and topic metrics. This is on by default and should only be set to "false"
# for the specific case where only producers/consumers are being monitored and "topic_mode" is set to "All".
# Any other case this field can be omitted.
COLLECT_BROKER_TOPIC_DATA: true
# Below are the fields used to fine tune/toggle topic metric collection.
# In order to collect topics the "topic_mode" field must be set to "all" or "list". If the field is set to "all"
# a Zookeeper connection is required, at least the "zookeeper_hosts" field is required, as topics are looked up via Zookeeper.
#
# It is recommended to use the "List" option to monitor a specific set of topics. If using "List" mode the "topic_list"
# field should be filled out. The "topic_list" is a JSON array of topic names to be monitored.
# Example of topic_list: '["topic1", "topic2"]'
#
# If monitoring topics via the "all" or "list" option for "topic_mode", the topic size can be collected from zookeeper by setting
"collect_topic_size" to true. This operation is intensive and can take a while to collect for a larger number of topics.
# It is recommended to only enable this feature if using a small "topic_list".
# If the field is omitted it will default to false.
TOPIC_MODE: "regex"
# topic_list: `["topic1", "topic2", "topic3"]`
TOPIC_REGEX: 'topic\d+'
# collect_topic_size collects the on-disk size for the topics collected. This can be very time intensive for large clusters,
# so it is disabled by default
COLLECT_TOPIC_SIZE: false
# topic_bucket is used to split topic metric collection across multiple instances. This is useful when the number of topics you want to collect
# is too large for a single collection, and are not easily partitionable with regex. It works by hashing the topic name, then using it to split
# the topics across a number of buckets. The first number is the index of the current instance, the second is the total number of instances the
# topics are split across. For example, if you want the topics matched by `topic_regex: 'mytopic.*'` to be split across three instances, one
# instance will be configured with `topic_bucket: 1/3`, one with `2/3`, and one with `3/3`
TOPIC_BUCKET: '1/3'
interval: 15s
labels:
env: production
role: kafka
inventory_source: config/kafka


# Example kafka-metrics-bootstrap-discovery. This gives an example of autodiscovery of brokers with a bootstrap broker
- name: nri-kafka
env:
NR_JMX: "C:\\Program Files\\New Relic\\nrjmx\\nrjmx.bat"
METRICS: "true"
# A cluster name is required to uniquely identify this collection result in Insights
CLUSTER_NAME: "testcluster1"
AUTODISCOVER_STRATEGY: "bootstrap"
# Bootstrap broker arguments. These configure a connection to a single broker. The rest of the brokers in the cluster
# will be discovered using that connection.
BOOTSTRAP_BROKER_HOST: "localhost"
BOOTSTRAP_BROKER_KAFKA_PORT: 9092
BOOTSTRAP_BROKER_KAFKA_PROTOCOL: PLAINTEXT # Currently support PLAINTEXT and SSL
BOOTSTRAP_BROKER_JMX_PORT: 9999
# JMX user and password default to `default_jmx_user` and `default_jmx_password` if unset
BOOTSTRAP_BROKER_JMX_USER: admin
BOOTSTRAP_BROKER_JMX_PASSWORD: password
# Only collect metrics from the bootstrap broker configured. The integration will not attempt to collect metrics
# for any other broker, nor will it collect cluster-level metrics like topic metrics. This is useful for things
# like deployment to kubernetes, where a single integration instance is desired per broker.
LOCAL_ONLY_COLLECTION: false
# See above for more information on topic collection
COLLECT_BROKER_TOPIC_DATA: true
TOPIC_MODE: "all"
COLLECT_TOPIC_SIZE: false
interval: 15s
labels:
env: production
role: kafka
inventory_source: config/kafka

# Example kafka-producer-consumer-metrics. This gives an example for collecting JMX metrics form consumers and producers
- name: nri-kafka
env:
NR_JMX: "C:\\Program Files\\New Relic\\nrjmx\\nrjmx.bat"
METRICS: "true"
CLUSTER_NAME: "testcluster3"
# In order to collect Java producer and consumer metrics the "producers" and "consumers" fields should be filled out.
# Both fields are JSON arrays with each entry being a separate JAVA producer or consumer, in it's respective field.
# Each entry should have the following fields:
# - name: This is the actual name of the producer/consumer as it appears in Kafka
# - host: The IP or Hostname of the producer/consumser. If omitted, will use the value of the "default_jmx_host" field
# - port: The port in which JMX is setup for on the producer/consumer. If omitted will, use the value of the "default_jmx_port" field
# - username: The username used to connect to JMX. If omitted, will use the value of the "default_jmx_user" field
# - password: The password used to connect to JMX. If omitted, will use the value of the "default_jmx_password" field
# Example: {"name": "myProducer", "host": "localhost", "port": 24, "username": "me', "password": "secret"}
PRODUCERS: '[{"name": "myProducer", "host": "localhost", "port": 24, "username": "me", "password": "secret"}]'
CONSUMERS: '[{"name": "myConsumer", "host": "localhost", "port": 24, "username": "me", "password": "secret"}]'
# If several producers/consumers are on the same host an agent can be installed on that host and the
# "default_jmx_host" and "default_jmx_port" field can be set once and used for all producers/consumers that
# do not have the "host" or "port" field repsectively.
# These fields can be removed if each producer/consumer has it's own "host" and/or "port" field filled out.
DEFAULT_JMX_HOST: "localhost"
DEFAULT_JMX_PORT: "9999"
interval: 15s
labels:
env: production
role: kafka
inventory_source: config/kafka

# Example kafka-kerberos-auth
- name: nri-kafka
env:
NR_JMX: "C:\\Program Files\\New Relic\\nrjmx\\nrjmx.bat"
METRICS: "true"
# A cluster name is required to uniquely identify this collection result in Insights
CLUSTER_NAME: "testcluster1"
AUTODISCOVER_STRATEGY: "bootstrap"
# Bootstrap broker arguments. These configure a connection to a single broker. The rest of the brokers in the cluster
# will be discovered using that connection.
BOOTSTRAP_BROKER_HOST: "localhost"
BOOTSTRAP_BROKER_KAFKA_PORT: 9092
BOOTSTRAP_BROKER_KAFKA_PROTOCOL: PLAINTEXT # Currently support PLAINTEXT and SSL
BOOTSTRAP_BROKER_JMX_PORT: 9999
# JMX user and password default to `default_jmx_user` and `default_jmx_password` if unset
BOOTSTRAP_BROKER_JMX_USER: admin
BOOTSTRAP_BROKER_JMX_PASSWORD: password
# Kerberos authentication arguments
SASL_MECHANISM: GSSAPI
SASL_GSSAPI_REALM: SOMECORP.COM
SASL_GSSAPI_SERVICE_NAME: Kafka
SASL_GSSAPI_USERNAME: kafka
SASL_GSSAPI_KEY_TAB_PATH: /etc/newrelic-infra/kafka.keytab
SASL_GSSAPI_KERBEROS_CONFIG_PATH: /etc/krb5.conf
# disables FAST negotiation that causes issues with Active Directory
# sasl_gssapi_disable_fast_negotiation: false
# Only collect metrics from the bootstrap broker configured. The integration will not attempt to collect metrics
# for any other broker, nor will it collect cluster-level metrics like topic metrics. This is useful for things
# like deployment to kubernetes, where a single integration instance is desired per broker.
LOCAL_ONLY_COLLECTION: false
# See above for more information on topic collection
COLLECT_BROKER_TOPIC_DATA: true
TOPIC_MODE: "all"
COLLECT_TOPIC_SIZE: false
interval: 15s
labels:
env: production
role: kafka
inventory_source: config/kafka
File renamed without changes.
File renamed without changes.

0 comments on commit af23be6

Please sign in to comment.