From 61f99b9e5596ae77c39c7a683cfb536d2ad170ef Mon Sep 17 00:00:00 2001 From: Wojtek Oledzki Date: Tue, 4 Dec 2018 23:54:04 +0000 Subject: [PATCH] [GITHUB-40] Add support for custom environment varaibles When for instance you want to use NewRelic java agent, you can use ```YAML - role: sansible.kafka sansible_kafka_environment_vars: - "NEWRELIC_OPTS=\"-javaagent:/home/{{ sansible_kafka_user }}/newrelic/newrelic.jar\"" - "export KAFKA_OPTS=\"${KAFKA_OPTS} ${NEWRELIC_OPTS}\"" ``` --- .version | 2 +- README.md | 92 +++++--------------- defaults/main.yml | 3 +- molecule/default/playbook.yml | 4 + molecule/default/tests/test_configuration.py | 29 ++++++ molecule/default/tests/test_default.py | 13 --- templates/environment.j2 | 2 + 7 files changed, 61 insertions(+), 84 deletions(-) create mode 100644 molecule/default/tests/test_configuration.py diff --git a/.version b/.version index d9fb9c7..8616689 100644 --- a/.version +++ b/.version @@ -1 +1 @@ -v3.0 +v3.1 diff --git a/README.md b/README.md index f57138e..9c462bd 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,6 @@ Develop: [![Build Status](https://travis-ci.org/sansible/kafka.svg?branch=develo * [Installation and Dependencies](#installation-and-dependencies) * [Tags](#tags) -* [Maintenance scripts](#maintenance-scripts) * [Examples](#examples) This roles installs Apache Kafka server. @@ -32,38 +31,11 @@ and run `ansible-galaxy install -p ./roles -r roles.yml` ### AWS Setup -This role has AWS support built in, it supports two methods for -deployment/discovery. - -#### AWS Cluster Autodiscovery - -This method is designed for use with a single ASG controlling a cluster of -Kafka instances, the idea being that instances can come and go without issue. - -The [AWS Autodiscover script](/files/aws_cluster_autodiscover) allows machines -to pick an ID and hostname/Route53 entry from a predefined list, AWS tags are -used to mark machines that have claimed an ID/host. - -This script allows for a static set of hostnames with consistent IDs to be -maintained across a dynamic set of instances in an ASG. - -```YAML -- role: sansible.kafka - sansible_kafka_aws_cluster_autodiscover_enabled: yes - sansible_kafka_aws_cluster_autodiscover_hosts: - - 01.kafka.io.internal - - 02.kafka.io.internal - - 03.kafka.io.internal - sansible_kafka_aws_cluster_autodiscover_lookup_filter: "Name=tag:Environment,Values=dev Name=tag:Role,Values=kafka" - sansible_kafka_aws_cluster_autodiscover_r53_zone_id: xxxxxxxx - # A ZK cluster behind an ELB - sansible_kafka_zookeeper_hosts: - - zookeeper.app.internal -``` +This role has AWS support built in for deployment/discovery. #### AWS Tag Discovery -Designed for instances that are stacially defined either as direct EC2 +Designed for instances that are statically defined either as direct EC2 instances or via a single ASG per instance. The broker.id is derived from a tag attached to the instance, you can turn on @@ -87,44 +59,6 @@ This role uses two tags: **build** and **configure** * `configure` - Configure and ensures that the Kafka service is running. -## Maintenance scripts - -These scripts are used in conjunction with the -[AWS Cluster Autodiscovery](aws-cluster-autodiscovery) deployment method. - -* kafka_maintenance_at_start - - Intention behind this script is to introduce a new node to the cluster and - evenly redistribute data. It's included in Configure stage of Ansible role. - The new node contacts Zookeeper (ZK) and requests all brokers IDs currently - holding data. Once information is received json file is generated and - information provided to ZK. - - -* kafka_maintenance_at_stop - - Intention behind this script is to allow node to remove itself from cluster - during shutdown and evenly redistribute data to remaining nodes. Script is - triggered by stop_kafka included in relevant runlevels. - Node contacts Zookeeper (ZK) and requests all brokers IDs currently holding - data. Once information is received json file is generated and information - provided to ZK. - -* remove_dns_record - - After kafka_maintenance_at_stop is executed during shutdown (stop_kafka) node - removes itself from Route53 (AWS). - -* TODO: - Becaue kafka_maintenance_start/stop are almost identical they can be merged. - Depends on use argument could be provided. - Example: - kafka_maintenance at_start - - To remove node from Route53 (AWS) Ansible module can be also used. - This will require tests. - - ## Examples ```YAML @@ -142,10 +76,30 @@ These scripts are used in conjunction with the roles: - name: sansible.kafka - sansible_kafka_aws_cluster_autodiscover_hosts: + sansible_kafka_zookeeper_hosts: - my.zookeeper.host ``` +```YAML +- name: Install Kafka with NewRelic integration + hosts: sandbox + + pre_tasks: + - name: Update apt + become: yes + apt: + cache_valid_time: 1800 + update_cache: yes + tags: + - build + + roles: + - name: sansible.kafka + sansible_kafka_environment_vars: + - "NEWRELIC_OPTS=\"-javaagent:/home/{{ sansible_kafka_user }}/newrelic/newrelic.jar\"" + - "export KAFKA_OPTS=\"${KAFKA_OPTS} ${NEWRELIC_OPTS}\"" +``` + If you just want to test Kafka service build both Zookeeper and Kafka on the same machine. diff --git a/defaults/main.yml b/defaults/main.yml index 7818f2a..0ac1ee0 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -4,6 +4,7 @@ sansible_kafka_apache_mirror: https://archive.apache.org/dist/ sansible_kafka_aws_cluster_assigned_id_enabled: no sansible_kafka_aws_cluster_assigned_id_tag_name: instanceindex sansible_kafka_conf_dir: /home/kafka/etc +sansible_kafka_environment_vars: [] sansible_kafka_group: kafka sansible_kafka_heap_opts: "-Xmx{{ (ansible_memtotal_mb / 2) | int }}m -Xms{{ (ansible_memtotal_mb / 2) | int }}m" sansible_kafka_interface_advertise: ~ @@ -18,7 +19,7 @@ sansible_kafka_port: 9092 sansible_kafka_server_properties: {} sansible_kafka_tarball_location: /home/kafka/tmp sansible_kafka_user: kafka -sansible_kafka_version_kafka: 2.0.0 +sansible_kafka_version_kafka: 2.1.0 sansible_kafka_version_scala: 2.11 sansible_kafka_wait_for_kafka_port: 120 sansible_kafka_zookeeper_connection_timeout_ms: 1000000 diff --git a/molecule/default/playbook.yml b/molecule/default/playbook.yml index 72f34a8..9c3df6c 100644 --- a/molecule/default/playbook.yml +++ b/molecule/default/playbook.yml @@ -4,6 +4,10 @@ hosts: all # Take a look at hosts vars in molecule.yml as well + vars: + sansible_kafka_environment_vars: + - "NEWRELIC_OPTS=\"-javaagent:/home/{{ sansible_kafka_user }}/newrelic/newrelic.jar\"" + - "export TEST_KAFKA_OPTS=\"${KAFKA_OPTS} ${NEWRELIC_OPTS}\"" roles: - role: sansible.zookeeper diff --git a/molecule/default/tests/test_configuration.py b/molecule/default/tests/test_configuration.py new file mode 100644 index 0000000..9436e7c --- /dev/null +++ b/molecule/default/tests/test_configuration.py @@ -0,0 +1,29 @@ +import os +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_server_properties(host): + server_properties = host.file( + '/home/kafka/etc/server.properties' + ).content_string + + assert 'listeners=PLAINTEXT://127.0.0.1:9092' \ + in server_properties + assert 'broker.id=11' \ + in server_properties + assert 'zookeeper.connect=' \ + in server_properties + + +def test_environment_properties(host): + server_environments = host.file( + '/home/kafka/etc/environment' + ).content_string + + assert 'NEWRELIC_OPTS="-javaagent:/home/kafka/newrelic/newrelic.jar"' \ + in server_environments + assert 'export TEST_KAFKA_OPTS="${KAFKA_OPTS} ${NEWRELIC_OPTS}"' \ + in server_environments diff --git a/molecule/default/tests/test_default.py b/molecule/default/tests/test_default.py index 8f10a70..9631730 100644 --- a/molecule/default/tests/test_default.py +++ b/molecule/default/tests/test_default.py @@ -14,16 +14,3 @@ def test_listening(host): assert host.socket('tcp://0.0.0.0:2181').is_listening assert host.socket('tcp://127.0.0.1:9092').is_listening assert host.socket('tcp://0.0.0.0:9999').is_listening - - -def test_server_properties(host): - server_properties = host.file( - '/home/kafka/etc/server.properties' - ).content_string - - assert 'listeners=PLAINTEXT://127.0.0.1:9092' \ - in server_properties - assert 'broker.id=11' \ - in server_properties - assert 'zookeeper.connect=' \ - in server_properties diff --git a/templates/environment.j2 b/templates/environment.j2 index 269c03d..53a2f11 100644 --- a/templates/environment.j2 +++ b/templates/environment.j2 @@ -6,3 +6,5 @@ GROUP=$NAME KAFKA_HEAP_OPTS="{{ sansible_kafka_heap_opts }}" JMX_PORT="{{ sansible_kafka_jmx_port }}" MAX_OPEN_FILES="{{ sansible_kafka_max_open_files }}" + +{{ sansible_kafka_environment_vars | join("\n") }}