diff --git a/.ansible-lint b/.ansible-lint index 6bc960c..787817f 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -4,4 +4,3 @@ exclude_paths: skip_list: - '106' # Role name does not match ``^[a-z][a-z0-9_]+$`` pattern - \ No newline at end of file diff --git a/.github/workflows/ansible-lint.yaml b/.github/workflows/ansible-lint.yaml deleted file mode 100644 index 57ab3ca..0000000 --- a/.github/workflows/ansible-lint.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- -name: Ansible Lint - -# -# Documentation: -# https://github.com/ansible/ansible-lint-action -# -on: - push: - branches: [master] - pull_request: - branches: [master] - -jobs: - build: - name: Ansible Lint - runs-on: ubuntu-latest - - steps: - - name: Checkout Code - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: Lint Ansible Role - uses: ansible/ansible-lint-action@master - with: - targets: '' diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index e4f4e10..0000000 --- a/.travis.yml +++ /dev/null @@ -1,115 +0,0 @@ ---- - -sudo: required - -services: - - docker - -before_install: - # Update the host with latest versions - - sudo apt-get update -qq - -install: - # Install Ansible on host - - pip install ansible --user - - # Install docker-py - - pip install --upgrade docker-py --user - - # Add ansible.cfg to pick up roles path. - - printf '[defaults]\nroles_path = ../' > ansible.cfg - - # Pull a CentOS image with systemd installed for the Docker containers - - docker pull centos/systemd - -script: - # Install role dependencies. - - ansible-galaxy install -r tests/requirements.yml - - # Check syntax of Ansible role - - ansible-playbook tests/test.yaml -i tests/inventory --syntax-check - - # Run Ansible role - - ansible-playbook tests/test.yaml -i tests/inventory --verbose - - # Run the playbook and role again to ensure that it is idempotent and nothing has changed - - > - ansible-playbook tests/test.yaml -i tests/inventory --verbose - | grep -q 'changed=0.*failed=0' - && (echo 'Idempotence test: pass' && exit 0) - || (echo 'Idempotence test: fail' && exit 1) - - # Check that the zookeeper nodes are running - - > - docker exec zookeeper-1 systemctl status zookeeper 2>&1 - | awk 'FNR == 3 {print}' | grep "active (running)" - && (echo "ZooKeeper service running - pass" && exit 0) - || (echo "ZooKeeper service running - fail" && exit 1) - - > - docker exec zookeeper-2 systemctl status zookeeper 2>&1 - | awk 'FNR == 3 {print}' | grep "active (running)" - && (echo "ZooKeeper service running - pass" && exit 0) - || (echo "ZooKeeper service running - fail" && exit 1) - - > - docker exec zookeeper-3 systemctl status zookeeper 2>&1 - | awk 'FNR == 3 {print}' | grep "active (running)" - && (echo "ZooKeeper service running - pass" && exit 0) - || (echo "ZooKeeper service running - fail" && exit 1) - - # Check that the kafka nodes are running - - > - docker exec kafka-1 systemctl status kafka 2>&1 - | awk 'FNR == 3 {print}' | grep "active (running)" - && (echo "Kafka service running - pass" && exit 0) - || (echo "Kafka service running - fail" && exit 1) - - > - docker exec kafka-2 systemctl status kafka 2>&1 - | awk 'FNR == 3 {print}' | grep "active (running)" - && (echo "Kafka service running - pass" && exit 0) - || (echo "Kafka service running - fail" && exit 1) - - > - docker exec kafka-3 systemctl status kafka 2>&1 - | awk 'FNR == 3 {print}' | grep "active (running)" - && (echo "Kafka service running - pass" && exit 0) - || (echo "Kafka service running - fail" && exit 1) - - # Check that a Znode can be successfully created, this is to ensure that ZooKeeper is up and functioning - - > - docker exec zookeeper-1 /usr/share/zookeeper/bin/zkCli.sh create /TestZnode1 "test-node-1" 2>&1 - | awk -F\" '/Created/ {print $1}' | grep "Created" - && (echo "Znode ceate test - pass" && exit 0) - || (echo "Znode create test - fail" && exit 1) - - # Create a Kafka topic named "test" using node 1 and confirm that it was created successfully - - docker exec kafka-1 /opt/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-1:2181 --replication-factor 1 --partitions 1 --topic test - - > - docker exec kafka-1 /opt/kafka/bin/kafka-topics.sh --list --zookeeper zookeeper-1:2181 2>&1 - | awk 'FNR == 1 {print}' | grep "test" - && (echo "Topic created - pass" && exit 0) - || (echo "Topic created - fail" && exit 1) - - # Send a test message to this topic using node 2 - - docker exec kafka-2 sh -c 'echo "test message" | /opt/kafka/bin/kafka-console-producer.sh --broker-list kafka-2:9092 --topic test' - - # Consume the message from the topic using node 3 to confirm it was sent successfully - - > - docker exec kafka-3 /opt/kafka/bin/kafka-console-consumer.sh --topic test --from-beginning --max-messages 1 --bootstrap-server kafka-3:9092 2>&1 - | awk 'FNR == 1 {print}' | grep "test message" - && (echo "Message consumed - pass" && exit 0) - || (echo "Message consumed - fail" && exit 1) - -after_script: - # Stop and remove the Docker containers - - docker stop zookeeper-1 && docker rm zookeeper-1 - - docker stop zookeeper-2 && docker rm zookeeper-2 - - docker stop zookeeper-3 && docker rm zookeeper-3 - - docker stop kafka-1 && docker rm kafka-1 - - docker stop kafka-2 && docker rm kafka-2 - - docker stop kafka-3 && docker rm kafka-3 - - # Remove the Docker network - - docker network rm kafka - - -notifications: - webhooks: https://galaxy.ansible.com/api/v1/notifications/ diff --git a/README.md b/README.md index 6c92974..cefb7fa 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ [![Build Status]](https://travis-ci.org/sleighzy/ansible-kafka) ![Lint Code Base] ![Ansible Lint] ![Molecule] -Ansible role to install and configure [Apache Kafka] 2.8.0 +Ansible role to install and configure [Apache Kafka] 2.8.1 [Apache Kafka] is a distributed event streaming platform using publish-subscribe topics. Applications and streaming components can produce and consume messages @@ -45,7 +45,7 @@ See for more information. | Variable | Default | | ---------------------------------------------- | ------------------------------------- | | kafka_download_base_url | | -| kafka_version | 2.8.0 | +| kafka_version | 2.8.1 | | kafka_scala_version | 2.13 | | kafka_create_user_group | true | | kafka_user | kafka | @@ -85,6 +85,9 @@ See for more information. | kafka_bootstrap_servers | localhost:9092 | | kafka_consumer_group_id | kafka-consumer-group | +See [log4j.yml](./defaults/main/002-log4j.yml) for detailled +log4j-related available variables. + ## Starting and Stopping Kafka services using systemd - The Kafka service can be started via: `systemctl start kafka` diff --git a/defaults/main.yaml b/defaults/main/001-kafka.yml similarity index 99% rename from defaults/main.yaml rename to defaults/main/001-kafka.yml index 84dbf36..45a302c 100644 --- a/defaults/main.yaml +++ b/defaults/main/001-kafka.yml @@ -2,7 +2,7 @@ # The Apache Kafka version to be downloaded and installed # kafka_download_base_url should be set to https://archive.apache.org/dist/kafka/ for older versions than the current kafka_download_base_url: http://www-eu.apache.org/dist/kafka -kafka_version: 2.8.0 +kafka_version: 2.8.1 kafka_scala_version: 2.13 # The kafka user and group to create files/dirs with and for running the kafka service diff --git a/defaults/main/002-log4j.yml b/defaults/main/002-log4j.yml new file mode 100644 index 0000000..150ec80 --- /dev/null +++ b/defaults/main/002-log4j.yml @@ -0,0 +1,74 @@ +--- +log4j_rootlogger: "INFO, stdout, kafkaAppender" + +log4j_appender_stdout: "org.apache.log4j.ConsoleAppender" +log4j_appender_stdout_layout: "org.apache.log4j.PatternLayout" +log4j_appender_stdout_layout_conversionpattern: "[%d] %p %m (%c)%n" + +log4j_appender_kafkaappender: "org.apache.log4j.DailyRollingFileAppender" +log4j_appender_kafkaappender_datepattern: "'.'yyyy-MM-dd-HH" +log4j_appender_kafkaappender_file: "${kafka.logs.dir}/server.log" +log4j_appender_kafkaappender_layout: "org.apache.log4j.PatternLayout" +log4j_appender_kafkaappender_layout_conversionpattern: "[%d] %p %m (%c)%n" + +log4j_appender_statechangeappender: "org.apache.log4j.DailyRollingFileAppender" +log4j_appender_statechangeappender_datepattern: "'.'yyyy-MM-dd-HH" +log4j_appender_statechangeappender_file: "${kafka.logs.dir}/state-change.log" +log4j_appender_statechangeappender_layout: "org.apache.log4j.PatternLayout" +log4j_appender_statechangeappender_layout_conversionpattern: "[%d] %p %m (%c)%n" + +log4j_appender_requestappender: "org.apache.log4j.DailyRollingFileAppender" +log4j_appender_requestappender_datepattern: "'.'yyyy-MM-dd-HH" +log4j_appender_requestappender_file: "${kafka.logs.dir}/kafka-request.log" +log4j_appender_requestappender_layout: "org.apache.log4j.PatternLayout" +log4j_appender_requestappender_layout_conversionpattern: "[%d] %p %m (%c)%n" + +log4j_appender_cleanerappender: "org.apache.log4j.DailyRollingFileAppender" +log4j_appender_cleanerappender_datepattern: "'.'yyyy-MM-dd-HH" +log4j_appender_cleanerappender_file: "${kafka.logs.dir}/log-cleaner.log" +log4j_appender_cleanerappender_layout: "org.apache.log4j.PatternLayout" +log4j_appender_cleanerappender_layout_conversionpattern: "[%d] %p %m (%c)%n" + +log4j_appender_controllerappender: "org.apache.log4j.DailyRollingFileAppender" +log4j_appender_controllerappender_datepattern: "'.'yyyy-MM-dd-HH" +log4j_appender_controllerappender_file: "${kafka.logs.dir}/controller.log" +log4j_appender_controllerappender_layout: "org.apache.log4j.PatternLayout" +log4j_appender_controllerappender_layout_conversionpattern: "[%d] %p %m (%c)%n" + +log4j_appender_authorizerappender: "org.apache.log4j.DailyRollingFileAppender" +log4j_appender_authorizerappender_datepattern: "'.'yyyy-MM-dd-HH" +log4j_appender_authorizerappender_file: "${kafka.logs.dir}/kafka-authorizer.log" +log4j_appender_authorizerappender_layout: "org.apache.log4j.PatternLayout" +log4j_appender_authorizerappender_layout_conversionpattern: "[%d] %p %m (%c)%n" + +# Change the line below to adjust ZK client logging +log4j_logger_org_apache_zookeeper: "INFO" + +# Change the two lines below to adjust the general broker logging level (output to server.log and stdout) +log4j_logger_kafka: "INFO" +log4j_logger_org_apache_kafka: "INFO" + +# Change to DEBUG or TRACE to enable request logging +log4j_logger_kafka_request_logger: "WARN, requestAppender" +log4j_additivity_kafka_request_logger: false + +# Uncomment the lines below and change log4j_logger_kafka_network_requestchannel$ to TRACE for additional output +# related to the handling of requests +#log4j_logger_kafka_network_processor: TRACE, requestAppender +#log4j_logger_kafka_server_kafkaapis: TRACE, requestAppender +#log4j_additivity_kafka_server_kafkaapis: false +log4j_logger_kafka_network_requestchannel: "WARN, requestAppender" +log4j_additivity_kafka_network_requestchannel: false + +log4j_logger_kafka_controller: "TRACE, controllerAppender" +log4j_additivity_kafka_controller: false + +log4j_logger_kafka_log_logcleaner: "INFO, cleanerAppender" +log4j_additivity_kafka_log_logcleaner: false + +log4j_logger_state_change_logger: "INFO, stateChangeAppender" +log4j_additivity_state_change_logger: false + +# Access denials are logged at INFO level, change to DEBUG to also log allowed accesses +log4j_logger_kafka_authorizer_logger: "INFO, authorizerAppender" +log4j_additivity_kafka_authorizer_logger: false diff --git a/meta/main.yml b/meta/main.yml index cff7993..af78c1f 100644 --- a/meta/main.yml +++ b/meta/main.yml @@ -1,6 +1,8 @@ --- galaxy_info: author: Simon Leigh + namespace: sleighzy + role_name: kafka description: Apache Kafka installation for RHEL/CentOS and Debian/Ubuntu license: MIT min_ansible_version: 2.10.4 diff --git a/molecule/default/Dockerfile.j2 b/molecule/default/Dockerfile.j2 deleted file mode 100644 index 0de39e6..0000000 --- a/molecule/default/Dockerfile.j2 +++ /dev/null @@ -1,22 +0,0 @@ -# Molecule managed - -{% if item.registry is defined %} -FROM {{ item.registry.url }}/{{ item.image }} -{% else %} -FROM {{ item.image }} -{% endif %} - -{% if item.env is defined %} -{% for var, value in item.env.items() %} -{% if value %} -ENV {{ var }} {{ value }} -{% endif %} -{% endfor %} -{% endif %} - -RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates iproute2 && apt-get clean; \ - elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python sudo python-devel python*-dnf bash iproute && dnf clean all; \ - elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python sudo yum-plugin-ovl bash iproute && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \ - elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python sudo bash python-xml iproute2 && zypper clean -a; \ - elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; \ - elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates iproute2 && xbps-remove -O; fi diff --git a/molecule/default/INSTALL.rst b/molecule/default/INSTALL.rst deleted file mode 100644 index dff1da3..0000000 --- a/molecule/default/INSTALL.rst +++ /dev/null @@ -1,22 +0,0 @@ -******* -Docker driver installation guide -******* - -Requirements -============ - -* Docker Engine - -Install -======= - -Please refer to the `Virtual environment`_ documentation for installation best -practices. If not using a virtual environment, please consider passing the -widely recommended `'--user' flag`_ when invoking ``pip``. - -.. _Virtual environment: https://virtualenv.pypa.io/en/latest/ -.. _'--user' flag: https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site - -.. code-block:: bash - - $ python3 -m pip install --user "molecule[docker,lint]" diff --git a/molecule/default/molecule.yml b/molecule/default/molecule.yml index 467c150..c4313b7 100644 --- a/molecule/default/molecule.yml +++ b/molecule/default/molecule.yml @@ -90,7 +90,7 @@ provisioner: lint: | set -e yamllint -c ./.yamllint.yaml . - ansible-lint -c ./.ansible-lint . + ansible-lint -c ./.ansible-lint verifier: name: ansible scenario: diff --git a/molecule/default/requirements.yml b/molecule/default/requirements.yml index 58da51e..7ad9f68 100644 --- a/molecule/default/requirements.yml +++ b/molecule/default/requirements.yml @@ -1,4 +1,5 @@ roles: - sleighzy.zookeeper + collections: - - community.docker \ No newline at end of file + - community.docker diff --git a/molecule/default/verify.yml b/molecule/default/verify.yml index f5b3798..8895952 100644 --- a/molecule/default/verify.yml +++ b/molecule/default/verify.yml @@ -16,12 +16,12 @@ - "'kafka' in getent_passwd" - "'kafka' in getent_group" - - name: Register '/opt/kafka_2.13-2.8.0' installation directory status + - name: Register '/opt/kafka_2.13-2.8.1' installation directory status stat: - path: '/opt/kafka_2.13-2.8.0' + path: '/opt/kafka_2.13-2.8.1' register: install_dir - - name: Assert that '/opt/kafka_2.13-2.8.0' directory is created + - name: Assert that '/opt/kafka_2.13-2.8.1' directory is created assert: that: - install_dir.stat.exists @@ -39,7 +39,7 @@ that: - kafka_dir.stat.exists - kafka_dir.stat.islnk - - kafka_dir.stat.lnk_target == '/opt/kafka_2.13-2.8.0' + - kafka_dir.stat.lnk_target == '/opt/kafka_2.13-2.8.1' - name: Register '/var/log/kafka' directory status stat: diff --git a/tasks/main.yaml b/tasks/main.yaml index 4396d04..f991b28 100755 --- a/tasks/main.yaml +++ b/tasks/main.yaml @@ -172,6 +172,16 @@ tags: - kafka_config +- name: Template configuration file to log4j.properties + template: + src: log4j.properties.j2 + dest: '{{ kafka_dir }}/config/log4j.properties' + group: '{{ kafka_group }}' + owner: '{{ kafka_user }}' + mode: 0644 + tags: + - kafka_config + - name: Create symlink to kafka server properties file file: src: '{{ kafka_dir }}/config/server.properties' @@ -222,6 +232,16 @@ tags: - kafka_config +- name: Create symlink to kafka log4j properties file + file: + src: '{{ kafka_dir }}/config/log4j.properties' + dest: /etc/kafka/log4j.properties + state: link + group: '{{ kafka_group }}' + owner: '{{ kafka_user }}' + tags: + - kafka_config + - name: Template Kafka init.d service file template: src: kafka.initd.j2 diff --git a/templates/log4j.properties.j2 b/templates/log4j.properties.j2 new file mode 100644 index 0000000..6a985f2 --- /dev/null +++ b/templates/log4j.properties.j2 @@ -0,0 +1,99 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Unspecified loggers and loggers with additivity=true output to server.log and stdout +# Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise +log4j.rootLogger={{ log4j_rootlogger }} + +log4j.appender.stdout={{ log4j_appender_stdout }} +log4j.appender.stdout.layout={{ log4j_appender_stdout_layout }} +log4j.appender.stdout.layout.ConversionPattern={{ log4j_appender_stdout_layout_conversionpattern }} + +log4j.appender.kafkaAppender={{ log4j_appender_kafkaappender }} +log4j.appender.kafkaAppender.DatePattern={{ log4j_appender_kafkaappender_datepattern }} +log4j.appender.kafkaAppender.File={{ log4j_appender_kafkaappender_file }} +log4j.appender.kafkaAppender.layout={{ log4j_appender_kafkaappender_layout }} +log4j.appender.kafkaAppender.layout.ConversionPattern={{ log4j_appender_kafkaappender_layout_conversionpattern }} + +log4j.appender.stateChangeAppender={{ log4j_appender_statechangeappender}} +log4j.appender.stateChangeAppender.DatePattern={{ log4j_appender_statechangeappender_datepattern }} +log4j.appender.stateChangeAppender.File={{ log4j_appender_statechangeappender_file }} +log4j.appender.stateChangeAppender.layout={{ log4j_appender_statechangeappender_layout }} +log4j.appender.stateChangeAppender.layout.ConversionPattern={{ log4j_appender_statechangeappender_layout_conversionpattern }} + +log4j.appender.requestAppender={{ log4j_appender_requestappender }} +log4j.appender.requestAppender.DatePattern={{ log4j_appender_requestappender_datepattern }} +log4j.appender.requestAppender.File={{ log4j_appender_requestappender_file }} +log4j.appender.requestAppender.layout={{ log4j_appender_requestappender_layout }} +log4j.appender.requestAppender.layout.ConversionPattern={{ log4j_appender_requestappender_layout_conversionpattern }} + +log4j.appender.cleanerAppender={{ log4j_appender_cleanerappender }} +log4j.appender.cleanerAppender.DatePattern={{ log4j_appender_cleanerappender_datepattern }} +log4j.appender.cleanerAppender.File={{ log4j_appender_cleanerappender_file }} +log4j.appender.cleanerAppender.layout={{ log4j_appender_cleanerappender_layout }} +log4j.appender.cleanerAppender.layout.ConversionPattern={{ log4j_appender_cleanerappender_layout_conversionpattern }} + +log4j.appender.controllerAppender={{ log4j_appender_controllerappender }} +log4j.appender.controllerAppender.DatePattern={{ log4j_appender_controllerappender_datepattern }} +log4j.appender.controllerAppender.File={{ log4j_appender_controllerappender_file }} +log4j.appender.controllerAppender.layout={{ log4j_appender_controllerappender_layout }} +log4j.appender.controllerAppender.layout.ConversionPattern={{ log4j_appender_controllerappender_layout_conversionpattern }} + +log4j.appender.authorizerAppender={{ log4j_appender_authorizerappender }} +log4j.appender.authorizerAppender.DatePattern={{ log4j_appender_authorizerappender_datepattern }} +log4j.appender.authorizerAppender.File={{ log4j_appender_authorizerappender_file }} +log4j.appender.authorizerAppender.layout={{ log4j_appender_authorizerappender_layout }} +log4j.appender.authorizerAppender.layout.ConversionPattern={{ log4j_appender_authorizerappender_layout_conversionpattern }} + +# Change the line below to adjust ZK client logging +log4j.logger.org.apache.zookeeper={{ log4j_logger_org_apache_zookeeper }} + +# Change the two lines below to adjust the general broker logging level (output to server.log and stdout) +log4j.logger.kafka={{ log4j_logger_kafka }} +log4j.logger.org.apache.kafka={{ log4j_logger_org_apache_kafka }} + +# Change to DEBUG or TRACE to enable request logging +log4j.logger.kafka.request.logger={{ log4j_logger_kafka_request_logger }} +log4j.additivity.kafka.request.logger={{ log4j_additivity_kafka_request_logger }} + +# Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output +# related to the handling of requests +#log4j.logger.kafka.network.Processor=false +#log4j.logger.kafka.server.KafkaApis=false +#log4j.additivity.kafka.server.KafkaApis=false +{% if log4j_logger_kafka_network_processor is defined %} +log4j.logger.kafka.network.Processor={{ log4j_logger_kafka_network_processor }} +{% endif %} +{% if log4j_logger_kafka_server_kafkaapis is defined %} +log4j.logger.kafka.server.KafkaApis={{ log4j_logger_kafka_server_kafkaapis }} +{% endif %} +{% if log4j_additivity_kafka_server_kafkaapis is defined %} +log4j.additivity.kafka.server.KafkaApis={{ log4j_additivity_kafka_server_kafkaapis }} +{% endif %} +log4j.logger.kafka.network.RequestChannel$={{ log4j_logger_kafka_network_requestchannel }} +log4j.additivity.kafka.network.RequestChannel$={{ log4j_additivity_kafka_network_requestchannel }} + +log4j.logger.kafka.controller={{ log4j_logger_kafka_controller }} +log4j.additivity.kafka.controller={{ log4j_additivity_kafka_controller }} + +log4j.logger.kafka.log.LogCleaner={{ log4j_logger_kafka_log_logcleaner }} +log4j.additivity.kafka.log.LogCleaner={{ log4j_additivity_kafka_log_logcleaner }} + +log4j.logger.state.change.logger={{ log4j_logger_state_change_logger }} +log4j.additivity.state.change.logger={{ log4j_additivity_state_change_logger }} + +# Access denials are logged at INFO level, change to DEBUG to also log allowed accesses +log4j.logger.kafka.authorizer.logger={{ log4j_logger_kafka_authorizer_logger }} +log4j.additivity.kafka.authorizer.logger={{ log4j_additivity_kafka_authorizer_logger }} diff --git a/tests/inventory b/tests/inventory deleted file mode 100644 index d699d28..0000000 --- a/tests/inventory +++ /dev/null @@ -1,20 +0,0 @@ -zookeeper-1 ansible_host=zookeeper-1 ansible_connection=docker -zookeeper-2 ansible_host=zookeeper-2 ansible_connection=docker -zookeeper-3 ansible_host=zookeeper-3 ansible_connection=docker -kafka-1 ansible_host=kafka-1 ansible_connection=docker -kafka-2 ansible_host=kafka-2 ansible_connection=docker -kafka-3 ansible_host=kafka-3 ansible_connection=docker - -[zookeeper-nodes] -zookeeper-1 zookeeper_id=1 -zookeeper-2 zookeeper_id=2 -zookeeper-3 zookeeper_id=3 - -[kafka-nodes] -kafka-1 kafka_broker_id=1 kafka_listener_hostname=kafka-1 -kafka-2 kafka_broker_id=2 kafka_listener_hostname=kafka-2 -kafka-3 kafka_broker_id=3 kafka_listener_hostname=kafka-3 - -[kafka-nodes:vars] -kafka_zookeeper_connect='zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' -kafka_bootstrap_servers='kafka-1:9092,kafka-2:9092,kafka-3:9092' diff --git a/tests/requirements.yml b/tests/requirements.yml deleted file mode 100644 index 05906fc..0000000 --- a/tests/requirements.yml +++ /dev/null @@ -1,2 +0,0 @@ -- src: sleighzy.zookeeper - name: zookeeper diff --git a/tests/roles/ansible-kafka b/tests/roles/ansible-kafka deleted file mode 120000 index b1c5c43..0000000 --- a/tests/roles/ansible-kafka +++ /dev/null @@ -1 +0,0 @@ -../../../ansible-kafka \ No newline at end of file diff --git a/tests/test.yaml b/tests/test.yaml deleted file mode 100644 index 7ad3401..0000000 --- a/tests/test.yaml +++ /dev/null @@ -1,92 +0,0 @@ ---- -- hosts: localhost - - tasks: - # Create a Docker network that the containers will connect to. This will enable the - # containers to be able to see and access each other. - # This requires Ansible 2.2 for this docker_network module. - - name: Create Docker network - docker_network: - name: kafka - ipam_config: - - subnet: '172.25.0.0/16' - - # The centos/systemd image used to create these containers is required so - # that systemd is available. This is used for the systemctl commands to - # install and run the zookeeper services for this role. The privileged container - # and "/sys/fs/cgroup" volume mount is also required for systemd support. - # The container needs to be started with the "/usr/lib/systemd/systemd" so that - # this service is initialized. - - name: Create ZooKeeper Docker containers - docker_container: - name: '{{ item.1 }}' - hostname: '{{ item.1 }}' - image: centos/systemd - state: started - privileged: yes - volumes: - - /sys/fs/cgroup:/sys/fs/cgroup:ro - networks: - - name: kafka - ipv4_address: 172.25.10.{{ item.0 + 1 }} - purge_networks: yes - exposed_ports: - - 2181 - - 2888 - - 3888 - etc_hosts: - zookeeper-1: 172.25.10.1 - zookeeper-2: 172.25.10.2 - zookeeper-3: 172.25.10.3 - kafka-1: 172.25.20.1 - kafka-2: 172.25.20.2 - kafka-3: 172.25.20.3 - command: /usr/lib/systemd/systemd - with_indexed_items: "{{ groups['zookeeper-nodes'] }}" - - # The centos/systemd image used to create these containers is required so - # that systemd is available. This is used for the systemctl commands to - # install and run the kafka services for this role. The privileged container - # and "/sys/fs/cgroup" volume mount is also requird for systemd support. - # Port 9092 is exposed as the Kafka broker port. - # The container needs to be started with the "/usr/lib/systemd/systemd" so that - # this service is initialized. - - name: Create Kafka Docker containers - docker_container: - name: '{{ item.1 }}' - hostname: '{{ item.1 }}' - image: centos/systemd - state: started - privileged: yes - volumes: - - /sys/fs/cgroup:/sys/fs/cgroup:ro - networks: - - name: kafka - ipv4_address: 172.25.20.{{ item.0 + 1 }} - purge_networks: yes - exposed_ports: - - 9092 - etc_hosts: - zookeeper-1: 172.25.10.1 - zookeeper-2: 172.25.10.2 - zookeeper-3: 172.25.10.3 - kafka-1: 172.25.20.1 - kafka-2: 172.25.20.2 - kafka-3: 172.25.20.3 - command: /usr/lib/systemd/systemd - with_indexed_items: "{{ groups['kafka-nodes'] }}" - -- hosts: all - tasks: - - name: Install Java 8 (OpenJDK) - yum: - name: java-1.8.0-openjdk - state: installed - -- hosts: zookeeper-nodes - roles: - - zookeeper - -- hosts: kafka-nodes - roles: - - ansible-kafka