diff --git a/CHANGELOG.md b/CHANGELOG.md index d31a5317..0abf16ac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,12 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). +## 2.17.0 (2021-06-27) +### Changed +- Moved default config.sample to [V4](https://docs.newrelic.com/docs/create-integrations/infrastructure-integrations-sdk/specifications/host-integrations-newer-configuration-format/), added a dependency for infra-agent version 1.20.0 + +Please notice that old [V3](https://docs.newrelic.com/docs/create-integrations/infrastructure-integrations-sdk/specifications/host-integrations-standard-configuration-format/) configuration format is deprecated, but still supported. + ## 2.16.2 (2021-06-08) ### Changed - Support for ARM diff --git a/Dockerfile b/Dockerfile index b4b902c0..54c693ee 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,7 +13,6 @@ FROM newrelic/infrastructure:latest ENV NRIA_IS_FORWARD_ONLY true ENV NRIA_K8S_INTEGRATION true COPY --from=builder-kafka /go/src/github.com/newrelic/nri-kafka/bin/nri-kafka /nri-sidecar/newrelic-infra/newrelic-integrations/bin/nri-kafka -COPY --from=builder-kafka /go/src/github.com/newrelic/nri-kafka/kafka-definition.yml /nri-sidecar/newrelic-infra/newrelic-integrations/definition.yaml COPY --from=builder-jmx /nrjmx/bin /usr/bin/ RUN apk update && apk add openjdk8-jre USER 1000 diff --git a/build/.goreleaser.yml b/build/.goreleaser.yml index a80955d5..7564b793 100644 --- a/build/.goreleaser.yml +++ b/build/.goreleaser.yml @@ -41,7 +41,7 @@ nfpms: - nri-nix dependencies: - - "newrelic-infra" + - newrelic-infra (>= 1.20.0) - "nrjmx" bindir: "/var/db/newrelic-infra/newrelic-integrations/bin" @@ -55,7 +55,7 @@ nfpms: dst: "/usr/share/doc/nri-kafka/README.md" - src: "LICENSE" dst: "/usr/share/doc/nri-kafka/LICENSE" - - src: "kafka-definition.yml" + - src: "legacy/kafka-definition.yml" dst: "/var/db/newrelic-infra/newrelic-integrations/kafka-definition.yml" type: config @@ -77,7 +77,9 @@ archives: name_template: "{{ .ProjectName }}_{{ .Os }}_{{ .Version }}_{{ .Arch }}_dirty" files: - kafka-config.yml.sample - - kafka-definition.yml + - src: 'legacy/kafka-definition.yml' + dst: . + strip_parent: true format: tar.gz - id: nri-win @@ -85,8 +87,10 @@ archives: - nri-win name_template: "{{ .ProjectName }}-{{ .Arch }}.{{ .Version }}_dirty" files: - - kafka-config.yml.sample - - kafka-win-definition.yml + - kafka-win-config.yml.sample + - src: 'legacy/kafka-win-definition.yml' + dst: . + strip_parent: true format: zip # we use custom publisher for fixing archives and signing them diff --git a/build/release.mk b/build/release.mk index 09cf5645..7f34354d 100644 --- a/build/release.mk +++ b/build/release.mk @@ -1,5 +1,5 @@ BUILD_DIR := ./bin/ -GORELEASER_VERSION := v0.169.0 +GORELEASER_VERSION := v0.174.1 GORELEASER_BIN ?= bin/goreleaser bin: diff --git a/build/windows/fix_archives.sh b/build/windows/fix_archives.sh index 222f27ec..91e3c8f8 100644 --- a/build/windows/fix_archives.sh +++ b/build/windows/fix_archives.sh @@ -28,6 +28,7 @@ find dist -regex ".*_dirty\.zip" | while read zip_dirty; do echo "===> Move files inside ${zip_file_name}" mv ${ZIP_CONTENT_PATH}/nri-${INTEGRATION}.exe "${AGENT_DIR_IN_ZIP_PATH}/bin" mv ${ZIP_CONTENT_PATH}/${INTEGRATION}-win-definition.yml "${AGENT_DIR_IN_ZIP_PATH}" + mv ${ZIP_CONTENT_PATH}/${INTEGRATION}-win-config.yml.sample ${ZIP_CONTENT_PATH}/${INTEGRATION}-config.yml.sample mv ${ZIP_CONTENT_PATH}/${INTEGRATION}-config.yml.sample "${CONF_IN_ZIP_PATH}" echo "===> Creating zip ${ZIP_CLEAN}" diff --git a/kafka-config.yml.sample b/kafka-config.yml.sample index 66bf77e8..b005fe60 100644 --- a/kafka-config.yml.sample +++ b/kafka-config.yml.sample @@ -1,197 +1,201 @@ ---- -integration_name: com.newrelic.kafka - -instances: - # This instance gives an example of autodiscovery of brokers with zookeeper - - name: kafka-metrics-zookeeper-discovery - command: metrics - arguments: - # A cluster name is required to uniquely identify this collection result in Insights - cluster_name: "testcluster1" - - # Override the kafka API version to target. Defaults to 1.0.0, which will work for all post-1.0.0 versions. Older versions of the API may be missing features. - kafka_version: "1.0.0" - - - # How to find brokers. Either "bootstrap" or "zookeeper" - autodiscover_strategy: "zookeeper" - - # A list of zookeeper hosts to discover brokers from. - # Only required and used if `autodiscover_mechanism` is "zookeeper" - # - # The "zookeeper_hosts" field is a JSON array, each entry in the array connection information for a Zookeeper node. - # Each entry should have the following fields: - # - host: The IP or Hostname of a Zookeeper node, if the New Relic agent is installed on a Zookeeper node "localhost" is an acceptable value - # - port: The port Zookeeper is listening on for incoming requests. If omitted, a default port of 2181 will be used. - zookeeper_hosts: '[{"host": "localhost", "port": 2181}]' - - # If using "user" authentication, the credentials must be specified as a string of the form ":" - # Example: 'zookeeperuser:zookeeperpass' - zookeeper_auth_secret: "username:password" - - # If the Kafka configuration files are not in the root node of Zookeeper, an alternative root node can be specified. - # The alternative root must have a leading slash. - zookeeper_path: "/kafka-root" - - # It is common to use the same JMX configuration across a Kafka cluster - # The default username and password are the credentials that will be used to make - # a JMX connection to each broker found by Zookeeper. Theses values will also - # be used when connecting to a consumer and/or producer if the "username" or "password" - # field are omitted. - default_jmx_user: "username" - default_jmx_password: "password" - - # This field is used to toggle the collection of broker and topic metrics. This is on by default and should only be set to "false" - # for the specific case where only producers/consumers are being monitored and "topic_mode" is set to "All". - # Any other case this field can be omitted. - collect_broker_topic_data: true - - # Below are the fields used to fine tune/toggle topic metric collection. - # In order to collect topics the "topic_mode" field must be set to "all" or "list". If the field is set to "all" - # a Zookeeper connection is required, at least the "zookeeper_hosts" field is required, as topics are looked up via Zookeeper. - # - # It is recommended to use the "List" option to monitor a specific set of topics. If using "List" mode the "topic_list" - # field should be filled out. The "topic_list" is a JSON array of topic names to be monitored. - # Example of topic_list: '["topic1", "topic2"]' - # - # If monitoring topics via the "all" or "list" option for "topic_mode", the topic size can be collected from zookeeper by setting - # "collect_topic_size" to true. This operation is intensive and can take a while to collect for a larger number of topics. - # It is recommended to only enable this feature if using a small "topic_list". - # If the field is omitted it will default to false. - topic_mode: "regex" - # topic_list: `["topic1", "topic2", "topic3"]` - topic_regex: 'topic\d+' - - # collect_topic_size collects the on-disk size for the topics collected. This can be very time intensive for large clusters, - # so it is disabled by default - collect_topic_size: false - - # topic_bucket is used to split topic metric collection across multiple instances. This is useful when the number of topics you want to collect - # is too large for a single collection, and are not easily partitionable with regex. It works by hashing the topic name, then using it to split - # the topics across a number of buckets. The first number is the index of the current instance, the second is the total number of instances the - # topics are split across. For example, if you want the topics matched by `topic_regex: 'mytopic.*'` to be split across three instances, one - # instance will be configured with `topic_bucket: 1/3`, one with `2/3`, and one with `3/3` - topic_bucket: '1/3' - - # Additionally, custom labels can be added to further identify your data - labels: - env: production - role: kafka - - # This instance gives an example of autodiscovery of brokers with a bootstrap broker - - name: kafka-metrics-bootstrap-discovery - command: metrics - arguments: - # A cluster name is required to uniquely identify this collection result in Insights - cluster_name: "testcluster1" - - autodiscover_strategy: "bootstrap" - - # Bootstrap broker arguments. These configure a connection to a single broker. The rest of the brokers in the cluster - # will be discovered using that connection. - bootstrap_broker_host: "localhost" - bootstrap_broker_kafka_port: 9092 - bootstrap_broker_kafka_protocol: PLAINTEXT # Currently support PLAINTEXT and SSL - bootstrap_broker_jmx_port: 9999 - # JMX user and password default to `default_jmx_user` and `default_jmx_password` if unset - bootstrap_broker_jmx_user: admin - bootstrap_broker_jmx_password: password - - # Only collect metrics from the bootstrap broker configured. The integration will not attempt to collect metrics - # for any other broker, nor will it collect cluster-level metrics like topic metrics. This is useful for things - # like deployment to kubernetes, where a single integration instance is desired per broker. - local_only_collection: false - - # See above for more information on topic collection - collect_broker_topic_data: true - topic_mode: "all" - collect_topic_size: false - - # This instance gives an example of collecting inventory with the integration - - name: kafka-inventory - command: inventory - arguments: - cluster_name: "testcluster2" - zookeeper_hosts: '[{"host": "localhost", "port": 2181}]' - zookeeper_auth_secret: "username:password" - - # Below are the fields used to fine tune/toggle topic inventory collection. - # In order to collect topics the "topic_mode" field must be set to "all", "list", or "regex" - topic_mode: 'all' - - # Example configuration for collecting consumer offsets for the cluster - - name: kafka-consumer-offsets - command: consumer_offset - arguments: - cluster_name: "testcluster3" - - autodiscover_strategy: "bootstrap" - bootstrap_broker_host: "localhost" - bootstrap_broker_kafka_port: 9092 - bootstrap_broker_kafka_protocol: PLAINTEXT - - # A regex pattern that matches the consumer groups to collect metrics from - consumer_group_regex: '.*' - - # Example configuration for collecting JMX metrics form consumers and producers - - name: kafka-producer-consumer-metrics - command: metrics - arguments: - cluster_name: "testcluster3" - - # In order to collect Java producer and consumer metrics the "producers" and "consumers" fields should be filled out. - # Both fields are JSON arrays with each entry being a separate JAVA producer or consumer, in it's respective field. - # Each entry should have the following fields: - # - name: This is the actual name of the producer/consumer as it appears in Kafka - # - host: The IP or Hostname of the producer/consumser. If omitted, will use the value of the "default_jmx_host" field - # - port: The port in which JMX is setup for on the producer/consumer. If omitted will, use the value of the "default_jmx_port" field - # - username: The username used to connect to JMX. If omitted, will use the value of the "default_jmx_user" field - # - password: The password used to connect to JMX. If omitted, will use the value of the "default_jmx_password" field - # Example: {"name": "myProducer", "host": "localhost", "port": 24, "username": "me', "password": "secret"} - producers: '[{"name": "myProducer", "host": "localhost", "port": 24, "username": "me", "password": "secret"}]' - consumers: '[{"name": "myConsumer", "host": "localhost", "port": 24, "username": "me", "password": "secret"}]' - - # If several producers/consumers are on the same host an agent can be installed on that host and the - # "default_jmx_host" and "default_jmx_port" field can be set once and used for all producers/consumers that - # do not have the "host" or "port" field repsectively. - # These fields can be removed if each producer/consumer has it's own "host" and/or "port" field filled out. - default_jmx_host: "localhost" - default_jmx_port: "9999" - - - name: kafka-kerberos-auth - command: metrics - arguments: - # A cluster name is required to uniquely identify this collection result in Insights - cluster_name: "testcluster1" - - autodiscover_strategy: "bootstrap" - - # Bootstrap broker arguments. These configure a connection to a single broker. The rest of the brokers in the cluster - # will be discovered using that connection. - bootstrap_broker_host: "localhost" - bootstrap_broker_kafka_port: 9092 - bootstrap_broker_kafka_protocol: PLAINTEXT # Currently support PLAINTEXT and SSL - bootstrap_broker_jmx_port: 9999 - # JMX user and password default to `default_jmx_user` and `default_jmx_password` if unset - bootstrap_broker_jmx_user: admin - bootstrap_broker_jmx_password: password - - # Kerberos authentication arguments - sasl_mechanism: GSSAPI - sasl_gssapi_realm: SOMECORP.COM - sasl_gssapi_service_name: Kafka - sasl_gssapi_username: kafka - sasl_gssapi_key_tab_path: /etc/newrelic-infra/kafka.keytab - sasl_gssapi_kerberos_config_path: /etc/krb5.conf - # disables FAST negotiation that causes issues with Active Directory - # sasl_gssapi_disable_fast_negotiation: false - - # Only collect metrics from the bootstrap broker configured. The integration will not attempt to collect metrics - # for any other broker, nor will it collect cluster-level metrics like topic metrics. This is useful for things - # like deployment to kubernetes, where a single integration instance is desired per broker. - local_only_collection: false - - # See above for more information on topic collection - collect_broker_topic_data: true - topic_mode: "all" - collect_topic_size: false \ No newline at end of file +integrations: + +# Example kafka-inventory. This gives an example of collecting inventory with the integration +- name: nri-kafka + env: + INVENTORY: "true" + CLUSTER_NAME: "testcluster2" + ZOOKEEPER_HOSTS: '[{"host": "localhost", "port": 2181}]' + ZOOKEEPER_AUTH_SECRET: "username:password" + # Below are the fields used to fine tune/toggle topic inventory collection. + # In order to collect topics the "topic_mode" field must be set to "all", "list", or "regex" + TOPIC_MODE: 'all' + interval: 15s + labels: + env: production + role: kafka + inventory_source: config/kafka + + +# Example kafka-consumer-offsets. This gives an example configuration for collecting consumer offsets for the cluster +- name: nri-kafka + env: + METRICS: "false" + INVENTORY: "false" + CONSUMER_OFFSET: "true" + CLUSTER_NAME: "testcluster3" + AUTODISCOVER_STRATEGY: "bootstrap" + BOOTSTRAP_BROKER_HOST: "localhost" + BOOTSTRAP_BROKER_KAFKA_PORT: 9092 + BOOTSTRAP_BROKER_KAFKA_PROTOCOL: PLAINTEXT + # A regex pattern that matches the consumer groups to collect metrics from + CONSUMER_GROUP_REGEX: '.*' + interval: 15s + labels: + env: production + role: kafka + inventory_source: config/kafka + + +# Example kafka-metrics-zookeeper-discovery. This gives an example of autodiscovery of brokers with zookeeper +- name: nri-kafka + env: + METRICS: "true" + # A cluster name is required to uniquely identify this collection result in Insights + CLUSTER_NAME: "testcluster1" + # Override the kafka API version to target. Defaults to 1.0.0, which will work for all post-1.0.0 versions. Older versions of the API may be missing features. + KAFKA_VERSION: "1.0.0" + # How to find brokers. Either "bootstrap" or "zookeeper" + AUTODISCOVER_STRATEGY: "zookeeper" + # A list of zookeeper hosts to discover brokers from. + # Only required and used if `autodiscover_mechanism` is "zookeeper" + # + # The "zookeeper_hosts" field is a JSON array, each entry in the array connection information for a Zookeeper node. + # Each entry should have the following fields: + # - host: The IP or Hostname of a Zookeeper node, if the New Relic agent is installed on a Zookeeper node "localhost" is an acceptable value + # - port: The port Zookeeper is listening on for incoming requests. If omitted, a default port of 2181 will be used. + ZOOKEEPER_HOSTS: '[{"host": "localhost", "port": 2181}]' + # If using "user" authentication, the credentials must be specified as a string of the form ":" + # Example: 'zookeeperuser:zookeeperpass' + ZOOKEEPER_AUTH_SECRET: "username:password" + # If the Kafka configuration files are not in the root node of Zookeeper, an alternative root node can be specified. + # The alternative root must have a leading slash. + ZOOKEEPER_PATH: "/kafka-root" + # It is common to use the same JMX configuration across a Kafka cluster + # The default username and password are the credentials that will be used to make + # a JMX connection to each broker found by Zookeeper. Theses values will also + # be used when connecting to a consumer and/or producer if the "username" or "password" + # field are omitted. + DEFAULT_JMX_USER: "username" + DEFAULT_JMX_PASSWORD: "password" + # This field is used to toggle the collection of broker and topic metrics. This is on by default and should only be set to "false" + # for the specific case where only producers/consumers are being monitored and "topic_mode" is set to "All". + # Any other case this field can be omitted. + COLLECT_BROKER_TOPIC_DATA: true + # Below are the fields used to fine tune/toggle topic metric collection. + # In order to collect topics the "topic_mode" field must be set to "all" or "list". If the field is set to "all" + # a Zookeeper connection is required, at least the "zookeeper_hosts" field is required, as topics are looked up via Zookeeper. + # + # It is recommended to use the "List" option to monitor a specific set of topics. If using "List" mode the "topic_list" + # field should be filled out. The "topic_list" is a JSON array of topic names to be monitored. + # Example of topic_list: '["topic1", "topic2"]' + # + # If monitoring topics via the "all" or "list" option for "topic_mode", the topic size can be collected from zookeeper by setting + "collect_topic_size" to true. This operation is intensive and can take a while to collect for a larger number of topics. + # It is recommended to only enable this feature if using a small "topic_list". + # If the field is omitted it will default to false. + TOPIC_MODE: "regex" + # topic_list: `["topic1", "topic2", "topic3"]` + TOPIC_REGEX: 'topic\d+' + # collect_topic_size collects the on-disk size for the topics collected. This can be very time intensive for large clusters, + # so it is disabled by default + COLLECT_TOPIC_SIZE: false + # topic_bucket is used to split topic metric collection across multiple instances. This is useful when the number of topics you want to collect + # is too large for a single collection, and are not easily partitionable with regex. It works by hashing the topic name, then using it to split + # the topics across a number of buckets. The first number is the index of the current instance, the second is the total number of instances the + # topics are split across. For example, if you want the topics matched by `topic_regex: 'mytopic.*'` to be split across three instances, one + # instance will be configured with `topic_bucket: 1/3`, one with `2/3`, and one with `3/3` + TOPIC_BUCKET: '1/3' + interval: 15s + labels: + env: production + role: kafka + inventory_source: config/kafka + + +# Example kafka-metrics-bootstrap-discovery. This gives an example of autodiscovery of brokers with a bootstrap broker +- name: nri-kafka + env: + METRICS: "true" + # A cluster name is required to uniquely identify this collection result in Insights + CLUSTER_NAME: "testcluster1" + AUTODISCOVER_STRATEGY: "bootstrap" + # Bootstrap broker arguments. These configure a connection to a single broker. The rest of the brokers in the cluster + # will be discovered using that connection. + BOOTSTRAP_BROKER_HOST: "localhost" + BOOTSTRAP_BROKER_KAFKA_PORT: 9092 + BOOTSTRAP_BROKER_KAFKA_PROTOCOL: PLAINTEXT # Currently support PLAINTEXT and SSL + BOOTSTRAP_BROKER_JMX_PORT: 9999 + # JMX user and password default to `default_jmx_user` and `default_jmx_password` if unset + BOOTSTRAP_BROKER_JMX_USER: admin + BOOTSTRAP_BROKER_JMX_PASSWORD: password + # Only collect metrics from the bootstrap broker configured. The integration will not attempt to collect metrics + # for any other broker, nor will it collect cluster-level metrics like topic metrics. This is useful for things + # like deployment to kubernetes, where a single integration instance is desired per broker. + LOCAL_ONLY_COLLECTION: false + # See above for more information on topic collection + COLLECT_BROKER_TOPIC_DATA: true + TOPIC_MODE: "all" + COLLECT_TOPIC_SIZE: false + interval: 15s + labels: + env: production + role: kafka + inventory_source: config/kafka + +# Example kafka-producer-consumer-metrics. This gives an example for collecting JMX metrics form consumers and producers +- name: nri-kafka + env: + METRICS: "true" + CLUSTER_NAME: "testcluster3" + # In order to collect Java producer and consumer metrics the "producers" and "consumers" fields should be filled out. + # Both fields are JSON arrays with each entry being a separate JAVA producer or consumer, in it's respective field. + # Each entry should have the following fields: + # - name: This is the actual name of the producer/consumer as it appears in Kafka + # - host: The IP or Hostname of the producer/consumser. If omitted, will use the value of the "default_jmx_host" field + # - port: The port in which JMX is setup for on the producer/consumer. If omitted will, use the value of the "default_jmx_port" field + # - username: The username used to connect to JMX. If omitted, will use the value of the "default_jmx_user" field + # - password: The password used to connect to JMX. If omitted, will use the value of the "default_jmx_password" field + # Example: {"name": "myProducer", "host": "localhost", "port": 24, "username": "me', "password": "secret"} + PRODUCERS: '[{"name": "myProducer", "host": "localhost", "port": 24, "username": "me", "password": "secret"}]' + CONSUMERS: '[{"name": "myConsumer", "host": "localhost", "port": 24, "username": "me", "password": "secret"}]' + # If several producers/consumers are on the same host an agent can be installed on that host and the + # "default_jmx_host" and "default_jmx_port" field can be set once and used for all producers/consumers that + # do not have the "host" or "port" field repsectively. + # These fields can be removed if each producer/consumer has it's own "host" and/or "port" field filled out. + DEFAULT_JMX_HOST: "localhost" + DEFAULT_JMX_PORT: "9999" + interval: 15s + labels: + env: production + role: kafka + inventory_source: config/kafka + +# Example kafka-kerberos-auth +- name: nri-kafka + env: + METRICS: "true" + # A cluster name is required to uniquely identify this collection result in Insights + CLUSTER_NAME: "testcluster1" + AUTODISCOVER_STRATEGY: "bootstrap" + # Bootstrap broker arguments. These configure a connection to a single broker. The rest of the brokers in the cluster + # will be discovered using that connection. + BOOTSTRAP_BROKER_HOST: "localhost" + BOOTSTRAP_BROKER_KAFKA_PORT: 9092 + BOOTSTRAP_BROKER_KAFKA_PROTOCOL: PLAINTEXT # Currently support PLAINTEXT and SSL + BOOTSTRAP_BROKER_JMX_PORT: 9999 + # JMX user and password default to `default_jmx_user` and `default_jmx_password` if unset + BOOTSTRAP_BROKER_JMX_USER: admin + BOOTSTRAP_BROKER_JMX_PASSWORD: password + # Kerberos authentication arguments + SASL_MECHANISM: GSSAPI + SASL_GSSAPI_REALM: SOMECORP.COM + SASL_GSSAPI_SERVICE_NAME: Kafka + SASL_GSSAPI_USERNAME: kafka + SASL_GSSAPI_KEY_TAB_PATH: /etc/newrelic-infra/kafka.keytab + SASL_GSSAPI_KERBEROS_CONFIG_PATH: /etc/krb5.conf + # disables FAST negotiation that causes issues with Active Directory + # sasl_gssapi_disable_fast_negotiation: false + # Only collect metrics from the bootstrap broker configured. The integration will not attempt to collect metrics + # for any other broker, nor will it collect cluster-level metrics like topic metrics. This is useful for things + # like deployment to kubernetes, where a single integration instance is desired per broker. + LOCAL_ONLY_COLLECTION: false + # See above for more information on topic collection + COLLECT_BROKER_TOPIC_DATA: true + TOPIC_MODE: "all" + COLLECT_TOPIC_SIZE: false + interval: 15s + labels: + env: production + role: kafka + inventory_source: config/kafka diff --git a/kafka-win-config.yml.sample b/kafka-win-config.yml.sample new file mode 100644 index 00000000..49a65a36 --- /dev/null +++ b/kafka-win-config.yml.sample @@ -0,0 +1,207 @@ +integrations: + +# Example kafka-inventory. This gives an example of collecting inventory with the integration +- name: nri-kafka + env: + NR_JMX: "C:\\Program Files\\New Relic\\nrjmx\\nrjmx.bat" + INVENTORY: "true" + CLUSTER_NAME: "testcluster2" + ZOOKEEPER_HOSTS: '[{"host": "localhost", "port": 2181}]' + ZOOKEEPER_AUTH_SECRET: "username:password" + # Below are the fields used to fine tune/toggle topic inventory collection. + # In order to collect topics the "topic_mode" field must be set to "all", "list", or "regex" + TOPIC_MODE: 'all' + interval: 15s + labels: + env: production + role: kafka + inventory_source: config/kafka + + +# Example kafka-consumer-offsets. This gives an example configuration for collecting consumer offsets for the cluster +- name: nri-kafka + env: + NR_JMX: "C:\\Program Files\\New Relic\\nrjmx\\nrjmx.bat" + METRICS: "false" + INVENTORY: "false" + CONSUMER_OFFSET: "true" + CLUSTER_NAME: "testcluster3" + AUTODISCOVER_STRATEGY: "bootstrap" + BOOTSTRAP_BROKER_HOST: "localhost" + BOOTSTRAP_BROKER_KAFKA_PORT: 9092 + BOOTSTRAP_BROKER_KAFKA_PROTOCOL: PLAINTEXT + # A regex pattern that matches the consumer groups to collect metrics from + CONSUMER_GROUP_REGEX: '.*' + interval: 15s + labels: + env: production + role: kafka + inventory_source: config/kafka + + +# Example kafka-metrics-zookeeper-discovery. This gives an example of autodiscovery of brokers with zookeeper +- name: nri-kafka + env: + NR_JMX: "C:\\Program Files\\New Relic\\nrjmx\\nrjmx.bat" + METRICS: "true" + # A cluster name is required to uniquely identify this collection result in Insights + CLUSTER_NAME: "testcluster1" + # Override the kafka API version to target. Defaults to 1.0.0, which will work for all post-1.0.0 versions. Older versions of the API may be missing features. + KAFKA_VERSION: "1.0.0" + # How to find brokers. Either "bootstrap" or "zookeeper" + AUTODISCOVER_STRATEGY: "zookeeper" + # A list of zookeeper hosts to discover brokers from. + # Only required and used if `autodiscover_mechanism` is "zookeeper" + # + # The "zookeeper_hosts" field is a JSON array, each entry in the array connection information for a Zookeeper node. + # Each entry should have the following fields: + # - host: The IP or Hostname of a Zookeeper node, if the New Relic agent is installed on a Zookeeper node "localhost" is an acceptable value + # - port: The port Zookeeper is listening on for incoming requests. If omitted, a default port of 2181 will be used. + ZOOKEEPER_HOSTS: '[{"host": "localhost", "port": 2181}]' + # If using "user" authentication, the credentials must be specified as a string of the form ":" + # Example: 'zookeeperuser:zookeeperpass' + ZOOKEEPER_AUTH_SECRET: "username:password" + # If the Kafka configuration files are not in the root node of Zookeeper, an alternative root node can be specified. + # The alternative root must have a leading slash. + ZOOKEEPER_PATH: "/kafka-root" + # It is common to use the same JMX configuration across a Kafka cluster + # The default username and password are the credentials that will be used to make + # a JMX connection to each broker found by Zookeeper. Theses values will also + # be used when connecting to a consumer and/or producer if the "username" or "password" + # field are omitted. + DEFAULT_JMX_USER: "username" + DEFAULT_JMX_PASSWORD: "password" + # This field is used to toggle the collection of broker and topic metrics. This is on by default and should only be set to "false" + # for the specific case where only producers/consumers are being monitored and "topic_mode" is set to "All". + # Any other case this field can be omitted. + COLLECT_BROKER_TOPIC_DATA: true + # Below are the fields used to fine tune/toggle topic metric collection. + # In order to collect topics the "topic_mode" field must be set to "all" or "list". If the field is set to "all" + # a Zookeeper connection is required, at least the "zookeeper_hosts" field is required, as topics are looked up via Zookeeper. + # + # It is recommended to use the "List" option to monitor a specific set of topics. If using "List" mode the "topic_list" + # field should be filled out. The "topic_list" is a JSON array of topic names to be monitored. + # Example of topic_list: '["topic1", "topic2"]' + # + # If monitoring topics via the "all" or "list" option for "topic_mode", the topic size can be collected from zookeeper by setting + "collect_topic_size" to true. This operation is intensive and can take a while to collect for a larger number of topics. + # It is recommended to only enable this feature if using a small "topic_list". + # If the field is omitted it will default to false. + TOPIC_MODE: "regex" + # topic_list: `["topic1", "topic2", "topic3"]` + TOPIC_REGEX: 'topic\d+' + # collect_topic_size collects the on-disk size for the topics collected. This can be very time intensive for large clusters, + # so it is disabled by default + COLLECT_TOPIC_SIZE: false + # topic_bucket is used to split topic metric collection across multiple instances. This is useful when the number of topics you want to collect + # is too large for a single collection, and are not easily partitionable with regex. It works by hashing the topic name, then using it to split + # the topics across a number of buckets. The first number is the index of the current instance, the second is the total number of instances the + # topics are split across. For example, if you want the topics matched by `topic_regex: 'mytopic.*'` to be split across three instances, one + # instance will be configured with `topic_bucket: 1/3`, one with `2/3`, and one with `3/3` + TOPIC_BUCKET: '1/3' + interval: 15s + labels: + env: production + role: kafka + inventory_source: config/kafka + + +# Example kafka-metrics-bootstrap-discovery. This gives an example of autodiscovery of brokers with a bootstrap broker +- name: nri-kafka + env: + NR_JMX: "C:\\Program Files\\New Relic\\nrjmx\\nrjmx.bat" + METRICS: "true" + # A cluster name is required to uniquely identify this collection result in Insights + CLUSTER_NAME: "testcluster1" + AUTODISCOVER_STRATEGY: "bootstrap" + # Bootstrap broker arguments. These configure a connection to a single broker. The rest of the brokers in the cluster + # will be discovered using that connection. + BOOTSTRAP_BROKER_HOST: "localhost" + BOOTSTRAP_BROKER_KAFKA_PORT: 9092 + BOOTSTRAP_BROKER_KAFKA_PROTOCOL: PLAINTEXT # Currently support PLAINTEXT and SSL + BOOTSTRAP_BROKER_JMX_PORT: 9999 + # JMX user and password default to `default_jmx_user` and `default_jmx_password` if unset + BOOTSTRAP_BROKER_JMX_USER: admin + BOOTSTRAP_BROKER_JMX_PASSWORD: password + # Only collect metrics from the bootstrap broker configured. The integration will not attempt to collect metrics + # for any other broker, nor will it collect cluster-level metrics like topic metrics. This is useful for things + # like deployment to kubernetes, where a single integration instance is desired per broker. + LOCAL_ONLY_COLLECTION: false + # See above for more information on topic collection + COLLECT_BROKER_TOPIC_DATA: true + TOPIC_MODE: "all" + COLLECT_TOPIC_SIZE: false + interval: 15s + labels: + env: production + role: kafka + inventory_source: config/kafka + +# Example kafka-producer-consumer-metrics. This gives an example for collecting JMX metrics form consumers and producers +- name: nri-kafka + env: + NR_JMX: "C:\\Program Files\\New Relic\\nrjmx\\nrjmx.bat" + METRICS: "true" + CLUSTER_NAME: "testcluster3" + # In order to collect Java producer and consumer metrics the "producers" and "consumers" fields should be filled out. + # Both fields are JSON arrays with each entry being a separate JAVA producer or consumer, in it's respective field. + # Each entry should have the following fields: + # - name: This is the actual name of the producer/consumer as it appears in Kafka + # - host: The IP or Hostname of the producer/consumser. If omitted, will use the value of the "default_jmx_host" field + # - port: The port in which JMX is setup for on the producer/consumer. If omitted will, use the value of the "default_jmx_port" field + # - username: The username used to connect to JMX. If omitted, will use the value of the "default_jmx_user" field + # - password: The password used to connect to JMX. If omitted, will use the value of the "default_jmx_password" field + # Example: {"name": "myProducer", "host": "localhost", "port": 24, "username": "me', "password": "secret"} + PRODUCERS: '[{"name": "myProducer", "host": "localhost", "port": 24, "username": "me", "password": "secret"}]' + CONSUMERS: '[{"name": "myConsumer", "host": "localhost", "port": 24, "username": "me", "password": "secret"}]' + # If several producers/consumers are on the same host an agent can be installed on that host and the + # "default_jmx_host" and "default_jmx_port" field can be set once and used for all producers/consumers that + # do not have the "host" or "port" field repsectively. + # These fields can be removed if each producer/consumer has it's own "host" and/or "port" field filled out. + DEFAULT_JMX_HOST: "localhost" + DEFAULT_JMX_PORT: "9999" + interval: 15s + labels: + env: production + role: kafka + inventory_source: config/kafka + +# Example kafka-kerberos-auth +- name: nri-kafka + env: + NR_JMX: "C:\\Program Files\\New Relic\\nrjmx\\nrjmx.bat" + METRICS: "true" + # A cluster name is required to uniquely identify this collection result in Insights + CLUSTER_NAME: "testcluster1" + AUTODISCOVER_STRATEGY: "bootstrap" + # Bootstrap broker arguments. These configure a connection to a single broker. The rest of the brokers in the cluster + # will be discovered using that connection. + BOOTSTRAP_BROKER_HOST: "localhost" + BOOTSTRAP_BROKER_KAFKA_PORT: 9092 + BOOTSTRAP_BROKER_KAFKA_PROTOCOL: PLAINTEXT # Currently support PLAINTEXT and SSL + BOOTSTRAP_BROKER_JMX_PORT: 9999 + # JMX user and password default to `default_jmx_user` and `default_jmx_password` if unset + BOOTSTRAP_BROKER_JMX_USER: admin + BOOTSTRAP_BROKER_JMX_PASSWORD: password + # Kerberos authentication arguments + SASL_MECHANISM: GSSAPI + SASL_GSSAPI_REALM: SOMECORP.COM + SASL_GSSAPI_SERVICE_NAME: Kafka + SASL_GSSAPI_USERNAME: kafka + SASL_GSSAPI_KEY_TAB_PATH: /etc/newrelic-infra/kafka.keytab + SASL_GSSAPI_KERBEROS_CONFIG_PATH: /etc/krb5.conf + # disables FAST negotiation that causes issues with Active Directory + # sasl_gssapi_disable_fast_negotiation: false + # Only collect metrics from the bootstrap broker configured. The integration will not attempt to collect metrics + # for any other broker, nor will it collect cluster-level metrics like topic metrics. This is useful for things + # like deployment to kubernetes, where a single integration instance is desired per broker. + LOCAL_ONLY_COLLECTION: false + # See above for more information on topic collection + COLLECT_BROKER_TOPIC_DATA: true + TOPIC_MODE: "all" + COLLECT_TOPIC_SIZE: false + interval: 15s + labels: + env: production + role: kafka + inventory_source: config/kafka diff --git a/kafka-definition.yml b/legacy/kafka-definition.yml similarity index 100% rename from kafka-definition.yml rename to legacy/kafka-definition.yml diff --git a/kafka-win-definition.yml b/legacy/kafka-win-definition.yml similarity index 100% rename from kafka-win-definition.yml rename to legacy/kafka-win-definition.yml