summaryrefslogtreecommitdiff
path: root/bigdata-scripts_test3
diff options
context:
space:
mode:
Diffstat (limited to 'bigdata-scripts_test3')
-rw-r--r--bigdata-scripts_test3/deploy.yml21
-rw-r--r--bigdata-scripts_test3/roles/jdk1.8.0_73/files/jdk-8u73-linux-x64.tar.gzbin0 -> 181310701 bytes
-rw-r--r--bigdata-scripts_test3/roles/jdk1.8.0_73/files/set_java_enviroment5
-rw-r--r--bigdata-scripts_test3/roles/jdk1.8.0_73/tasks/main.yml31
-rw-r--r--bigdata-scripts_test3/roles/kafka-manager/files/kafka-manager-1.3.3.18.zipbin0 -> 79563367 bytes
-rw-r--r--bigdata-scripts_test3/roles/kafka-manager/tasks/main.yml35
-rw-r--r--bigdata-scripts_test3/roles/kafka-manager/templates/application.conf.j247
-rwxr-xr-xbigdata-scripts_test3/roles/kafka-manager/templates/create-topic.sh.j221
-rw-r--r--bigdata-scripts_test3/roles/kafka-manager/templates/start.sh.j29
-rwxr-xr-xbigdata-scripts_test3/roles/kafka/files/dae-kafka.sh30
-rw-r--r--bigdata-scripts_test3/roles/kafka/files/kafka-message.tar.gzbin0 -> 9036 bytes
-rw-r--r--bigdata-scripts_test3/roles/kafka/files/kafka_2.11-1.0.0.tgzbin0 -> 49475271 bytes
-rwxr-xr-xbigdata-scripts_test3/roles/kafka/files/kflogdelete.sh14
-rw-r--r--bigdata-scripts_test3/roles/kafka/files/log4j.properties95
-rw-r--r--bigdata-scripts_test3/roles/kafka/tasks/main.yml74
-rwxr-xr-xbigdata-scripts_test3/roles/kafka/templates/kafka-server-start.sh.j244
-rwxr-xr-xbigdata-scripts_test3/roles/kafka/templates/keepkafalive.j211
-rw-r--r--bigdata-scripts_test3/roles/kafka/templates/server.properties.j2153
-rwxr-xr-xbigdata-scripts_test3/roles/kafka/templates/set_kafka_profile.sh.j26
-rwxr-xr-xbigdata-scripts_test3/roles/zookeeper/files/dae-zookeeper.sh22
-rw-r--r--bigdata-scripts_test3/roles/zookeeper/files/log4j.properties63
-rw-r--r--bigdata-scripts_test3/roles/zookeeper/files/restart_sum.log1
-rwxr-xr-xbigdata-scripts_test3/roles/zookeeper/files/zkEnv.sh116
-rwxr-xr-xbigdata-scripts_test3/roles/zookeeper/files/zkServer.sh225
-rwxr-xr-xbigdata-scripts_test3/roles/zookeeper/files/zklogdelete.sh20
-rw-r--r--bigdata-scripts_test3/roles/zookeeper/files/zookeeper-3.4.9.tar.gzbin0 -> 22724574 bytes
-rw-r--r--bigdata-scripts_test3/roles/zookeeper/tasks/main.yml91
-rwxr-xr-xbigdata-scripts_test3/roles/zookeeper/templates/keepzkalive.j211
-rwxr-xr-xbigdata-scripts_test3/roles/zookeeper/templates/set_zk_profile.sh.j26
-rwxr-xr-xbigdata-scripts_test3/roles/zookeeper/templates/zoo.cfg.j231
-rw-r--r--bigdata-scripts_test3/test_host/group_vars/all.yml24
-rw-r--r--bigdata-scripts_test3/test_host/hosts13
32 files changed, 1219 insertions, 0 deletions
diff --git a/bigdata-scripts_test3/deploy.yml b/bigdata-scripts_test3/deploy.yml
new file mode 100644
index 0000000..0260e3e
--- /dev/null
+++ b/bigdata-scripts_test3/deploy.yml
@@ -0,0 +1,21 @@
+- hosts: server1
+ roles:
+ - jdk1.8.0_73
+ - zookeeper
+ - kafka
+
+- hosts: server2
+ roles:
+ - jdk1.8.0_73
+ - zookeeper
+ - kafka
+
+- hosts: server3
+ roles:
+ - jdk1.8.0_73
+ - zookeeper
+ - kafka
+
+- hosts: server1
+ roles:
+ - kafka-manager
diff --git a/bigdata-scripts_test3/roles/jdk1.8.0_73/files/jdk-8u73-linux-x64.tar.gz b/bigdata-scripts_test3/roles/jdk1.8.0_73/files/jdk-8u73-linux-x64.tar.gz
new file mode 100644
index 0000000..eb7b639
--- /dev/null
+++ b/bigdata-scripts_test3/roles/jdk1.8.0_73/files/jdk-8u73-linux-x64.tar.gz
Binary files differ
diff --git a/bigdata-scripts_test3/roles/jdk1.8.0_73/files/set_java_enviroment b/bigdata-scripts_test3/roles/jdk1.8.0_73/files/set_java_enviroment
new file mode 100644
index 0000000..34c61f8
--- /dev/null
+++ b/bigdata-scripts_test3/roles/jdk1.8.0_73/files/set_java_enviroment
@@ -0,0 +1,5 @@
+#set java enviroment
+export JAVA_HOME=/usr/lib/jvm/jdk1.8.0_73
+export JRE_HOME=$JAVA_HOME/jre
+export CLASSPATH=.:$JAVA_HOME/lib:$JRE_HOME/lib
+export PATH=$PATH:$JAVA_HOME/bin
diff --git a/bigdata-scripts_test3/roles/jdk1.8.0_73/tasks/main.yml b/bigdata-scripts_test3/roles/jdk1.8.0_73/tasks/main.yml
new file mode 100644
index 0000000..a3a6497
--- /dev/null
+++ b/bigdata-scripts_test3/roles/jdk1.8.0_73/tasks/main.yml
@@ -0,0 +1,31 @@
+- name: "copy JDK install files to destination server"
+ copy:
+ src: "{{ role_path }}/files/"
+ dest: /tmp
+ mode: 0755
+
+- name: "judge old java"
+ shell: rpm -qa | grep openjdk
+ register: return
+ ignore_errors: true
+
+- name: "remove old java"
+ shell: rpm -qa | grep openjdk | xargs rpm -e --nodeps
+ when: return.rc == 0
+
+- name: "install JDK1.8.0_73"
+ unarchive:
+ src: "/tmp/jdk-8u73-linux-x64.tar.gz"
+ dest: /usr/lib/jvm/
+ mode: 0755
+
+- name: "judge java enviroment"
+ shell: grep "/usr/lib/jvm/jdk1.8.0_73" /etc/profile
+ register: return
+
+- name: "set java enviroment"
+ shell: cat /tmp/set_java_enviroment >> /etc/profile
+ when: return.rc != 0
+
+- name: "source /etc/profile"
+ shell: source /etc/profile
diff --git a/bigdata-scripts_test3/roles/kafka-manager/files/kafka-manager-1.3.3.18.zip b/bigdata-scripts_test3/roles/kafka-manager/files/kafka-manager-1.3.3.18.zip
new file mode 100644
index 0000000..fa14d14
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka-manager/files/kafka-manager-1.3.3.18.zip
Binary files differ
diff --git a/bigdata-scripts_test3/roles/kafka-manager/tasks/main.yml b/bigdata-scripts_test3/roles/kafka-manager/tasks/main.yml
new file mode 100644
index 0000000..aab641a
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka-manager/tasks/main.yml
@@ -0,0 +1,35 @@
+- name: "copy kafka_manager install package to destination server"
+ copy:
+ src: "{{ role_path }}/files/{{ kafka_manager.package_name }}"
+ dest: /tmp
+
+- name: "install kafka_manager"
+ unarchive:
+ src: "/tmp/{{ kafka_manager.package_name }}"
+ dest: '{{ kafka.base_dir }}'
+ copy: no
+ mode: 0755
+
+- name: "template application.conf"
+ template:
+ src: "{{ role_path }}/templates/application.conf.j2"
+ dest: '{{ kafka.base_dir }}/{{ kafka_manager.version }}/conf/application.conf
+ mode: 0755
+
+- name: "template application.conf"
+ template:
+ src: "{{ role_path }}/templates/start.sh.j2"
+ dest: '{{ kafka.base_dir }}/{{ kafka_manager.version }}/start.sh'
+ mode: 0755
+
+- name: "start kafka_manager"
+ shell: /bin/bash {{ kafka.base_dir }}/{{ kafka_manager.version }}/start.sh
+
+- name: "template create-topic.sh"
+ template:
+ src: "{{ role_path }}/templates/create-topic.sh.j2"
+ dest: '{{ kafka.base_dir }}/{{ kafka.version }}/bin/create-topic.sh'
+ mode: 0755
+
+- name: "create topic"
+ shell: source /etc/profile;/bin/bash {{ kafka.base_dir }}/{{ kafka.version }}/bin/create-topic.sh
diff --git a/bigdata-scripts_test3/roles/kafka-manager/templates/application.conf.j2 b/bigdata-scripts_test3/roles/kafka-manager/templates/application.conf.j2
new file mode 100644
index 0000000..ff0779a
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka-manager/templates/application.conf.j2
@@ -0,0 +1,47 @@
+
+# Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
+# See accompanying LICENSE file.
+
+# This is the main configuration file for the application.
+# ~~~~~
+
+# Secret key
+# ~~~~~
+# The secret key is used to secure cryptographics functions.
+# If you deploy your application to several instances be sure to use the same key!
+play.crypto.secret="^<csmm5Fx4d=r2HEX8pelM3iBkFVv?k[mc;IZE<_Qoq8EkX_/7@Zt6dP05Pzea3U"
+play.crypto.secret=${?APPLICATION_SECRET}
+
+# The application languages
+# ~~~~~
+play.i18n.langs=["en"]
+
+play.http.requestHandler = "play.http.DefaultHttpRequestHandler"
+play.http.context = "/"
+play.application.loader=loader.KafkaManagerLoader
+
+kafka-manager.zkhosts="{{ zookeeper.iplist[0] }}:2181"
+kafka-manager.zkhosts=${?ZK_HOSTS}
+pinned-dispatcher.type="PinnedDispatcher"
+pinned-dispatcher.executor="thread-pool-executor"
+application.features=["KMClusterManagerFeature","KMTopicManagerFeature","KMPreferredReplicaElectionFeature","KMReassignPartitionsFeature"]
+
+akka {
+ loggers = ["akka.event.slf4j.Slf4jLogger"]
+ loglevel = "INFO"
+}
+
+akka.logger-startup-timeout = 60s
+
+basicAuthentication.enabled=false
+basicAuthentication.enabled=${?KAFKA_MANAGER_AUTH_ENABLED}
+basicAuthentication.username="admin"
+basicAuthentication.username=${?KAFKA_MANAGER_USERNAME}
+basicAuthentication.password="password"
+basicAuthentication.password=${?KAFKA_MANAGER_PASSWORD}
+basicAuthentication.realm="Kafka-Manager"
+basicAuthentication.excluded=["/api/health"] # ping the health of your instance without authentification
+
+kafka-manager.consumer.properties.file=${?CONSUMER_PROPERTIES_FILE}
+
+http.port=\"9998\"
diff --git a/bigdata-scripts_test3/roles/kafka-manager/templates/create-topic.sh.j2 b/bigdata-scripts_test3/roles/kafka-manager/templates/create-topic.sh.j2
new file mode 100755
index 0000000..7af90cd
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka-manager/templates/create-topic.sh.j2
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+
+./kafka-topics.sh '--create' '--zookeeper' ''{{ zookeeper.iplist[0] }}':2181/kafka' '--replication-factor' 1 '--partitions' 3 '--topic' CONNECTION-RECORD-LOG
+./kafka-topics.sh '--create' '--zookeeper' ''{{ zookeeper.iplist[0] }}':2181/kafka' '--replication-factor' 1 '--partitions' 3 '--topic' PROXY-EVENT-LOG
+./kafka-topics.sh '--create' '--zookeeper' ''{{ zookeeper.iplist[0] }}':2181/kafka' '--replication-factor' 1 '--partitions' 3 '--topic' RADIUS-RECORD-LOG
+./kafka-topics.sh '--create' '--zookeeper' ''{{ zookeeper.iplist[0] }}':2181/kafka' '--replication-factor' 1 '--partitions' 3 '--topic' SECURITY-EVENT-LOG
+./kafka-topics.sh '--create' '--zookeeper' ''{{ zookeeper.iplist[0] }}':2181/kafka' '--replication-factor' 1 '--partitions' 3 '--topic' CONNECTION-RECORD-COMPLETED-LOG
+./kafka-topics.sh '--create' '--zookeeper' ''{{ zookeeper.iplist[0] }}':2181/kafka' '--replication-factor' 1 '--partitions' 3 '--topic' PROXY-EVENT-COMPLETED-LOG
+./kafka-topics.sh '--create' '--zookeeper' ''{{ zookeeper.iplist[0] }}':2181/kafka' '--replication-factor' 1 '--partitions' 3 '--topic' RADIUS-RECORD-COMPLETED-LOG
+./kafka-topics.sh '--create' '--zookeeper' ''{{ zookeeper.iplist[0] }}':2181/kafka' '--replication-factor' 1 '--partitions' 3 '--topic' SECURITY-EVENT-COMPLETED-LOG
+./kafka-topics.sh '--create' '--zookeeper' ''{{ zookeeper.iplist[0] }}':2181/kafka' '--replication-factor' 1 '--partitions' 3 '--topic' TRAFFIC-METRICS-LOG
+./kafka-topics.sh '--create' '--zookeeper' ''{{ zookeeper.iplist[0] }}':2181/kafka' '--replication-factor' 1 '--partitions' 3 '--topic' SYS-DATA-STORAGE-LOG
+./kafka-topics.sh '--create' '--zookeeper' ''{{ zookeeper.iplist[0] }}':2181/kafka' '--replication-factor' 1 '--partitions' 3 '--topic' SYS-STORAGE-LOG
+./kafka-topics.sh '--create' '--zookeeper' ''{{ zookeeper.iplist[0] }}':2181/kafka' '--replication-factor' 1 '--partitions' 3 '--topic' PXY-EXCH-INTERMEDIA-CERT
+./kafka-topics.sh '--create' '--zookeeper' ''{{ zookeeper.iplist[0] }}':2181/kafka' '--replication-factor' 1 '--partitions' 3 '--topic' TOP-EXTERNAL-HOST-LOG
+./kafka-topics.sh '--create' '--zookeeper' ''{{ zookeeper.iplist[0] }}':2181/kafka' '--replication-factor' 1 '--partitions' 3 '--topic' TOP-INTERNAL-HOST-LOG
+./kafka-topics.sh '--create' '--zookeeper' ''{{ zookeeper.iplist[0] }}':2181/kafka' '--replication-factor' 1 '--partitions' 3 '--topic' TOP-USER-LOG
+./kafka-topics.sh '--create' '--zookeeper' ''{{ zookeeper.iplist[0] }}':2181/kafka' '--replication-factor' 1 '--partitions' 3 '--topic' TOP-URLS-LOG
+./kafka-topics.sh '--create' '--zookeeper' ''{{ zookeeper.iplist[0] }}':2181/kafka' '--replication-factor' 1 '--partitions' 3 '--topic' TOP-WEBSITE-DOMAIN-LOG
+./kafka-topics.sh '--create' '--zookeeper' ''{{ zookeeper.iplist[0] }}':2181/kafka' '--replication-factor' 1 '--partitions' 3 '--topic' RADIUS-ONFF-LOG
diff --git a/bigdata-scripts_test3/roles/kafka-manager/templates/start.sh.j2 b/bigdata-scripts_test3/roles/kafka-manager/templates/start.sh.j2
new file mode 100644
index 0000000..7e162e6
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka-manager/templates/start.sh.j2
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+base_dir='{{ kafka.base_dir }}'
+
+if [ -f "$base_dir/kafka-manager-1.3.3.18/RUNNING_PID" ]; then
+ rm -rf $base_dir/kafka-manager-1.3.3.18/RUNNING_PID
+fi
+
+nohup $base_dir/kafka-manager-1.3.3.18/bin/kafka-manager -Dconfig.file=$base_dir/kafka-manager-1.3.3.18/conf/application.conf > /dev/null 2>&1 &
diff --git a/bigdata-scripts_test3/roles/kafka/files/dae-kafka.sh b/bigdata-scripts_test3/roles/kafka/files/dae-kafka.sh
new file mode 100755
index 0000000..56f4df2
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka/files/dae-kafka.sh
@@ -0,0 +1,30 @@
+#!/bin/sh
+
+#修改BASE_DIR为安装的路径
+#启动命令端口可自行指定
+#JMX_PORT=9191 nohup $BASE_DIR/bin/kafka-server-start.sh $BASE_DIR/config/server.properties > /dev/null 2>&1 &
+
+PRO_NAME=Kafka
+BASE_DIR=$1
+VERSION="kafka_2.11-1.0.0"
+
+#string=`cat $BASE_DIR/$VERSION/config/server.properties | grep broker.id`
+#array=(${string//=/ })
+#echo ${array[1]}
+#ssh [email protected] "source /etc/profile ; zkCli.sh ls /kafka/brokers/ids | grep -v 2181 | grep 4 | wc -l"
+source /etc/profile
+while true ; do
+ NUM=`jps | grep -w ${PRO_NAME} | grep -v grep |wc -l`
+
+ if [ "${NUM}" -lt "1" ];then
+ JMX_PORT=9191 nohup $BASE_DIR/$VERSION/bin/kafka-server-start.sh $BASE_DIR/$VERSION/config/server.properties > /dev/null 2>&1 &
+ OLD_NUM=`cat $BASE_DIR/$VERSION/logs/restart_sum.log`
+ RESTART_NUM=`expr $OLD_NUM + 1`
+ echo $RESTART_NUM > $BASE_DIR/$VERSION/logs/restart_sum.log
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - kafka服务启动/异常重启 - 重启次数 -> $RESTART_NUM" >> $BASE_DIR/$VERSION/restart.log
+ #大于1,杀掉所有进程,重启
+ elif [ "${NUM}" -gt "1" ];then
+ killall -9 ${PRO_NAME}
+ fi
+ sleep 60
+done
diff --git a/bigdata-scripts_test3/roles/kafka/files/kafka-message.tar.gz b/bigdata-scripts_test3/roles/kafka/files/kafka-message.tar.gz
new file mode 100644
index 0000000..a81a58b
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka/files/kafka-message.tar.gz
Binary files differ
diff --git a/bigdata-scripts_test3/roles/kafka/files/kafka_2.11-1.0.0.tgz b/bigdata-scripts_test3/roles/kafka/files/kafka_2.11-1.0.0.tgz
new file mode 100644
index 0000000..09991f7
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka/files/kafka_2.11-1.0.0.tgz
Binary files differ
diff --git a/bigdata-scripts_test3/roles/kafka/files/kflogdelete.sh b/bigdata-scripts_test3/roles/kafka/files/kflogdelete.sh
new file mode 100755
index 0000000..e6aacbe
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka/files/kflogdelete.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+#只保留最近三天的日志,如需要多保留几天修改8行最后 -n days
+#将此脚本加载到系统定时任务中 /etc/crontab
+#脚本会读取环境变量,固需要配置环境变量。
+#. /etc/profile
+
+day=$(date +"%Y-%m-%d" -d "-3 days")
+
+kafka=`jps | grep Kafka | wc -l`
+if [[ $kafka = "1" ]];then
+ rm -rf $KAFKA_HOME/logs/*.$day*
+fi
+
diff --git a/bigdata-scripts_test3/roles/kafka/files/log4j.properties b/bigdata-scripts_test3/roles/kafka/files/log4j.properties
new file mode 100644
index 0000000..f8d573b
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka/files/log4j.properties
@@ -0,0 +1,95 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Unspecified loggers and loggers with additivity=true output to server.log and stdout
+# Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise
+log4j.rootLogger=INFO, stdout, kafkaAppender
+
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log
+log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log
+log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log
+log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log
+log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log
+log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log
+log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+# Change the two lines below to adjust ZK client logging
+log4j.logger.org.I0Itec.zkclient.ZkClient=INFO
+log4j.logger.org.apache.zookeeper=INFO
+
+# Change the two lines below to adjust the general broker logging level (output to server.log and stdout)
+log4j.logger.kafka=INFO
+log4j.logger.org.apache.kafka=INFO
+
+# Change to DEBUG or TRACE to enable request logging
+log4j.logger.kafka.request.logger=WARN, requestAppender
+log4j.additivity.kafka.request.logger=false
+
+# Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output
+# related to the handling of requests
+#log4j.logger.kafka.network.Processor=TRACE, requestAppender
+#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
+#log4j.additivity.kafka.server.KafkaApis=false
+#log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender
+log4j.logger.kafka.network.RequestChannel$=INFO, requestAppender
+log4j.additivity.kafka.network.RequestChannel$=false
+
+#log4j.logger.kafka.controller=TRACE, controllerAppender
+log4j.logger.kafka.controller=INFO, controllerAppender
+log4j.additivity.kafka.controller=false
+
+log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender
+log4j.additivity.kafka.log.LogCleaner=false
+
+#log4j.logger.state.change.logger=TRACE, stateChangeAppender
+log4j.logger.state.change.logger=INFO, stateChangeAppender
+log4j.additivity.state.change.logger=false
+
+# Access denials are logged at INFO level, change to DEBUG to also log allowed accesses
+log4j.logger.kafka.authorizer.logger=INFO, authorizerAppender
+log4j.additivity.kafka.authorizer.logger=false
+
diff --git a/bigdata-scripts_test3/roles/kafka/tasks/main.yml b/bigdata-scripts_test3/roles/kafka/tasks/main.yml
new file mode 100644
index 0000000..b393b68
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka/tasks/main.yml
@@ -0,0 +1,74 @@
+- name: "copy kafka install package to destination server"
+ copy:
+ src: "{{ role_path }}/files/{{ kafka.package_name }}"
+ dest: /tmp
+
+- name: "create kafka base_dir"
+ file:
+ path: '{{ kafka.base_dir }}'
+ state: directory
+
+- name: "install kafka"
+ unarchive:
+ src: "/tmp/{{ kafka.package_name }}"
+ dest: '{{ kafka.base_dir }}'
+ copy: no
+ mode: 0755
+
+- name: "create logs dir"
+ file:
+ path: '{{ item.path }}'
+ state: '{{ item.state }}'
+ with_items:
+ - { path: '{{ kafka.base_dir }}/{{ kafka.version }}/logs', state: directory }
+ - { path: '{{ kafka.base_dir }}/{{ kafka.version }}/kafka-logs', state: directory }
+
+- name: "copy any scripts and log4j.properties"
+ copy:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ with_items:
+ - { src: '{{ role_path }}/files/dae-kafka.sh', dest: '{{ kafka.base_dir }}/{{ kafka.version }}/bin/' }
+ - { src: '{{ role_path }}/files/kflogdelete.sh', dest: '{{ kafka.base_dir }}/{{ kafka.version }}/log/' }
+ - { src: '{{ role_path }}/files/log4j.properties', dest: '{{ kafka.base_dir }}/{{ kafka.version }}/config/' }
+ mode: 0755
+
+- name: "echo 0 > restart_sum.log"
+ shell: echo 0 > '{{ kafka.base_dir }}/{{ kafka.version }}/logs/restart_sum.log'
+
+- name: "template server.properties"
+ template:
+ src: "{{ role_path }}/templates/server.properties.j2"
+ dest: '{{ kafka.base_dir }}/{{ kafka.version }}/config/server.properties'
+
+- name: "template kafka-server-start.sh"
+ template:
+ src: "{{ role_path }}/templates/kafka-server-start.sh.j2"
+ dest: '{{ kafka.base_dir }}/{{ kafka.version }}/bin/kafka-server-start.sh'
+
+- name: "template keepkafalive"
+ template:
+ src: "{{ role_path }}/templates/keepkafalive.j2"
+ dest: /etc/init.d/keepkafalive.j2
+ mode: 0755
+
+- name: "start keepkafalive"
+ service:
+ name: keepkafalive
+ state: restarted
+ enabled: true
+
+- name: "template set_kafka_profile.sh"
+ template:
+ src: "{{ role_path }}/templates/set_kafka_profile.sh.j2"
+ dest: /tmp/set_kafka_profile.sh
+ mode: 0755
+
+- name: "judge kafka enviroment"
+ shell: grep "#kafka" /etc/profile
+ register: return
+ ignore_errors: true
+
+- name: "set_kafka_profile"
+ shell: /bin/bash /tmp/set_kafka_profile.sh
+ when: return.rc != 0
diff --git a/bigdata-scripts_test3/roles/kafka/templates/kafka-server-start.sh.j2 b/bigdata-scripts_test3/roles/kafka/templates/kafka-server-start.sh.j2
new file mode 100755
index 0000000..9608df9
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka/templates/kafka-server-start.sh.j2
@@ -0,0 +1,44 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ $# -lt 1 ];
+then
+ echo "USAGE: $0 [-daemon] server.properties [--override property=value]*"
+ exit 1
+fi
+base_dir=$(dirname $0)
+
+if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
+ export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties"
+fi
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+ export KAFKA_HEAP_OPTS="-Xmx{{ kafka.mem_max }} -Xms{{ kafka.mem_min }}"
+fi
+
+EXTRA_ARGS=${EXTRA_ARGS-'-name kafkaServer -loggc'}
+
+COMMAND=$1
+case $COMMAND in
+ -daemon)
+ EXTRA_ARGS="-daemon "$EXTRA_ARGS
+ shift
+ ;;
+ *)
+ ;;
+esac
+
+exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@"
diff --git a/bigdata-scripts_test3/roles/kafka/templates/keepkafalive.j2 b/bigdata-scripts_test3/roles/kafka/templates/keepkafalive.j2
new file mode 100755
index 0000000..6ed1ff8
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka/templates/keepkafalive.j2
@@ -0,0 +1,11 @@
+#!/bin/bash
+#
+# netconsole This loads the netconsole module with the configured parameters.
+#
+# chkconfig:123456 30 70
+# description: keepkafkaalive
+source /etc/profile
+PRO_NAME=keepkafkaalive
+
+killall -9 dae-kafka.sh
+{{ kafka.base_dir }}/{{ kafka.version }}/bin/dae-kafka.sh {{ kafka.base_dir }} > /dev/null 2>&1 &
diff --git a/bigdata-scripts_test3/roles/kafka/templates/server.properties.j2 b/bigdata-scripts_test3/roles/kafka/templates/server.properties.j2
new file mode 100644
index 0000000..7419936
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka/templates/server.properties.j2
@@ -0,0 +1,153 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# see kafka.server.KafkaConfig for additional details and defaults
+
+############################# Server Basics #############################
+
+# The id of the broker. This must be set to a unique integer for each broker.
+broker.id={{ broker-id }}
+
+############################# Socket Server Settings #############################
+
+# The address the socket server listens on. It will get the value returned from
+# java.net.InetAddress.getCanonicalHostName() if not configured.
+# FORMAT:
+# listeners = listener_name://host_name:port
+# EXAMPLE:
+# listeners = PLAINTEXT://your.host.name:9092
+
+# Hostname and port the broker will advertise to producers and consumers. If not set,
+# it uses the value for "listeners" if configured. Otherwise, it will use the value
+# returned from java.net.InetAddress.getCanonicalHostName().
+#advertised.listeners=PLAINTEXT://your.host.name:9092
+
+# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
+#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
+
+# The number of threads that the server uses for receiving requests from the network and sending responses to the network
+num.network.threads=3
+
+# The number of threads that the server uses for processing requests, which may include disk I/O
+num.io.threads=8
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=10485760
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=10485760
+
+# The maximum size of a request that the socket server will accept (protection against OOM)
+socket.request.max.bytes=104857600
+
+
+############################# Log Basics #############################
+
+# A comma seperated list of directories under which to store log files
+log.dirs={{ kafka.base_dir }}/{{ kafka.version }}/kafka-logs/
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions=1
+
+# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
+# This value is recommended to be increased for installations with data dirs located in RAID array.
+num.recovery.threads.per.data.dir=1
+
+############################# Internal Topic Settings #############################
+# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
+# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
+offsets.topic.replication.factor={{ kafka.replica }}
+transaction.state.log.replication.factor=1
+transaction.state.log.min.isr=1
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data to disk.
+# There are a few important trade-offs here:
+# 1. Durability: Unflushed data may be lost if you are not using replication.
+# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
+# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
+# The settings below allow one to configure the flush policy to flush data after a period of time or
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+#log.flush.interval.messages=10000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+#log.flush.interval.ms=1000
+
+############################# Log Retention Policy #############################
+
+# The following configurations control the disposal of log segments. The policy can
+# be set to delete segments after a period of time, or after a given size has accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion due to age
+log.retention.hours={{ kafka.log_retentionHours }}
+
+# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
+# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
+log.retention.bytes={{ kafka.log_retentionBytes }}
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+log.segment.bytes=1073741824
+
+# The interval at which log segments are checked to see if they can be deleted according
+# to the retention policies
+log.retention.check.interval.ms={{ kafka.log_retentionCheckInterval_ms }}
+
+############################# Zookeeper #############################
+
+# Zookeeper connection string (see zookeeper docs for details).
+# This is a comma separated host:port pairs, each corresponding to a zk
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
+# You can also append an optional chroot string to the urls to specify the
+# root directory for all kafka znodes.
+zookeeper.connect={{ zookeeper.iplist[0] }}:2181,{{ zookeeper.iplist[1] }}:2181,{{ zookeeper.iplist[2] }}:2181/kafka
+
+# Timeout in ms for connecting to zookeeper
+zookeeper.connection.timeout.ms=30000
+
+
+############################# Group Coordinator Settings #############################
+
+# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
+# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
+# The default value for this is 3 seconds.
+# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
+# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
+group.initial.rebalance.delay.ms=0
+
+
+#######################################################################################
+listeners=PLAINTEXT://{{ vars["ansible_"+kafka.ehtname]['ipv4']['address'] }}:9092
+advertised.listeners=PLAINTEXT://{{ vars["ansible_"+kafka.ethname]['ipv4']['address'] }}:9092
+#kafka port
+port=9092
+#Is it deleted directlytopic
+delete.topic.enable=true
+#Are you allowed to create automatically topic
+auto.create.topics.enable=false
+#Enable log periodic deletion strategy
+log.cleanup.policy=delete
+#The maximum size of a message body, unit byte.
+message.max.bytes=10485760
+#replicas Maximum size of data obtained eachtime
+replica.fetch.max.bytes=20485760
+
diff --git a/bigdata-scripts_test3/roles/kafka/templates/set_kafka_profile.sh.j2 b/bigdata-scripts_test3/roles/kafka/templates/set_kafka_profile.sh.j2
new file mode 100755
index 0000000..3ae55a0
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka/templates/set_kafka_profile.sh.j2
@@ -0,0 +1,6 @@
+#!/bin/bash
+#
+echo -e "\n#kafka" >> /etc/profile
+echo -e "export KAFKA_HOME={{ kafka.base_dir }}/{{ kafka.version }}" >> /etc/profile
+echo -e "export PATH=\$KAFKA_HOME/bin:\$PATH" >> /etc/profile
+source /etc/profile
diff --git a/bigdata-scripts_test3/roles/zookeeper/files/dae-zookeeper.sh b/bigdata-scripts_test3/roles/zookeeper/files/dae-zookeeper.sh
new file mode 100755
index 0000000..24d3527
--- /dev/null
+++ b/bigdata-scripts_test3/roles/zookeeper/files/dae-zookeeper.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+#进程名称
+PRO_NAME=QuorumPeerMain
+#安装路径
+BASE_DIR=$1
+VERSION="zookeeper-3.4.9"
+source /etc/profile
+while true ; do
+ NUM=`$BASE_DIR/$VERSION/bin/zkServer.sh status | egrep 'leader|follower' | wc -l`
+ if [ "${NUM}" -lt "1" ];then
+ $BASE_DIR/$VERSION/bin/zkServer.sh start
+ OLD_NUM=`cat $BASE_DIR/$VERSION/logs/restart_sum.log`
+ RESTART_NUM=`expr $OLD_NUM + 1`
+ echo $RESTART_NUM > $BASE_DIR/$VERSION/logs/restart_sum.log
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - zookeeper服务启动/异常重启 - 重启次数 -> $RESTART_NUM" >> $BASE_DIR/$VERSION/restart.log
+ #大于1,杀掉所有进程,重启
+ elif [ "${NUM}" -gt "1" ];then
+ killall-9 ${PRO_NAME}
+ fi
+ sleep 60
+done
diff --git a/bigdata-scripts_test3/roles/zookeeper/files/log4j.properties b/bigdata-scripts_test3/roles/zookeeper/files/log4j.properties
new file mode 100644
index 0000000..6f535ec
--- /dev/null
+++ b/bigdata-scripts_test3/roles/zookeeper/files/log4j.properties
@@ -0,0 +1,63 @@
+# Define some default values that can be overridden by system properties
+#zookeeper.root.logger=INFO, CONSOLE
+zookeeper.root.logger=INFO, ROLLINGFILE
+zookeeper.console.threshold=INFO
+zookeeper.log.dir=.
+zookeeper.log.file=zookeeper.log
+zookeeper.log.threshold=DEBUG
+zookeeper.tracelog.dir=.
+zookeeper.tracelog.file=zookeeper_trace.log
+
+#
+# ZooKeeper Logging Configuration
+#
+
+# Format is "<default threshold> (, <appender>)+
+
+# DEFAULT: console appender only
+log4j.rootLogger=${zookeeper.root.logger}
+
+# Example with rolling log file
+#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
+
+# Example with rolling log file and tracing
+#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
+
+#
+# Log INFO level and above messages to the console
+#
+log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
+log4j.appender.CONSOLE.Threshold=${zookeeper.console.threshold}
+log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
+log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
+
+#
+# Add ROLLINGFILE to rootLogger to get log file output
+# Log DEBUG level and above messages to a log file
+#log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
+log4j.appender.ROLLINGFILE=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.ROLLINGFILE.Threshold=${zookeeper.log.threshold}
+log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/${zookeeper.log.file}
+log4j.appender.ROLLINGFILE.DataPattern='.'yyyy-MM-dd-HH
+log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.ROLLINGFILE.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+# Max log file size of 10MB
+log4j.appender.ROLLINGFILE.MaxFileSize=10MB
+# uncomment the next line to limit number of backup files
+#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
+
+log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
+
+
+#
+# Add TRACEFILE to rootLogger to get log file output
+# Log DEBUG level and above messages to a log file
+log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
+log4j.appender.TRACEFILE.Threshold=TRACE
+log4j.appender.TRACEFILE.File=${zookeeper.tracelog.dir}/${zookeeper.tracelog.file}
+
+log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
+### Notice we are including log4j's NDC here (%x)
+log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L][%x] - %m%n
diff --git a/bigdata-scripts_test3/roles/zookeeper/files/restart_sum.log b/bigdata-scripts_test3/roles/zookeeper/files/restart_sum.log
new file mode 100644
index 0000000..573541a
--- /dev/null
+++ b/bigdata-scripts_test3/roles/zookeeper/files/restart_sum.log
@@ -0,0 +1 @@
+0
diff --git a/bigdata-scripts_test3/roles/zookeeper/files/zkEnv.sh b/bigdata-scripts_test3/roles/zookeeper/files/zkEnv.sh
new file mode 100755
index 0000000..d6918aa
--- /dev/null
+++ b/bigdata-scripts_test3/roles/zookeeper/files/zkEnv.sh
@@ -0,0 +1,116 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script should be sourced into other zookeeper
+# scripts to setup the env variables
+
+# We use ZOOCFGDIR if defined,
+# otherwise we use /etc/zookeeper
+# or the conf directory that is
+# a sibling of this script's directory
+
+ZOOBINDIR="${ZOOBINDIR:-/usr/bin}"
+ZOOKEEPER_PREFIX="${ZOOBINDIR}/.."
+
+if [ "x$ZOOCFGDIR" = "x" ]
+then
+ if [ -e "${ZOOKEEPER_PREFIX}/conf" ]; then
+ ZOOCFGDIR="$ZOOBINDIR/../conf"
+ else
+ ZOOCFGDIR="$ZOOBINDIR/../etc/zookeeper"
+ fi
+fi
+
+if [ -f "${ZOOCFGDIR}/zookeeper-env.sh" ]; then
+ . "${ZOOCFGDIR}/zookeeper-env.sh"
+fi
+
+if [ "x$ZOOCFG" = "x" ]
+then
+ ZOOCFG="zoo.cfg"
+fi
+
+ZOOCFG="$ZOOCFGDIR/$ZOOCFG"
+
+if [ -f "$ZOOCFGDIR/java.env" ]
+then
+ . "$ZOOCFGDIR/java.env"
+fi
+
+if [ "x${ZOO_LOG_DIR}" = "x" ]
+then
+ ZOO_LOG_DIR="${ZOOKEEPER_PREFIX}/logs/system"
+fi
+
+if [ "x${ZOO_LOG4J_PROP}" = "x" ]
+then
+ #ZOO_LOG4J_PROP="INFO,CONSOLE"
+ ZOO_LOG4J_PROP="INFO,ROLLINGFILE"
+fi
+
+if [ "$JAVA_HOME" != "" ]; then
+ JAVA="$JAVA_HOME/bin/java"
+else
+ JAVA=java
+fi
+
+#add the zoocfg dir to classpath
+CLASSPATH="$ZOOCFGDIR:$CLASSPATH"
+
+for i in "$ZOOBINDIR"/../src/java/lib/*.jar
+do
+ CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work in the binary package
+#(use array for LIBPATH to account for spaces within wildcard expansion)
+if [ -e "${ZOOKEEPER_PREFIX}"/share/zookeeper/zookeeper-*.jar ]; then
+ LIBPATH=("${ZOOKEEPER_PREFIX}"/share/zookeeper/*.jar)
+else
+ #release tarball format
+ for i in "$ZOOBINDIR"/../zookeeper-*.jar
+ do
+ CLASSPATH="$i:$CLASSPATH"
+ done
+ LIBPATH=("${ZOOBINDIR}"/../lib/*.jar)
+fi
+
+for i in "${LIBPATH[@]}"
+do
+ CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work for developers
+for d in "$ZOOBINDIR"/../build/lib/*.jar
+do
+ CLASSPATH="$d:$CLASSPATH"
+done
+
+#make it work for developers
+CLASSPATH="$ZOOBINDIR/../build/classes:$CLASSPATH"
+
+case "`uname`" in
+ CYGWIN*) cygwin=true ;;
+ *) cygwin=false ;;
+esac
+
+if $cygwin
+then
+ CLASSPATH=`cygpath -wp "$CLASSPATH"`
+fi
+
+#echo "CLASSPATH=$CLASSPATH"
diff --git a/bigdata-scripts_test3/roles/zookeeper/files/zkServer.sh b/bigdata-scripts_test3/roles/zookeeper/files/zkServer.sh
new file mode 100755
index 0000000..396aedd
--- /dev/null
+++ b/bigdata-scripts_test3/roles/zookeeper/files/zkServer.sh
@@ -0,0 +1,225 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# If this scripted is run out of /usr/bin or some other system bin directory
+# it should be linked to and not copied. Things like java jar files are found
+# relative to the canonical path of this script.
+#
+
+
+
+# use POSTIX interface, symlink is followed automatically
+ZOOBIN="${BASH_SOURCE-$0}"
+ZOOBIN="$(dirname "${ZOOBIN}")"
+ZOOBINDIR="$(cd "${ZOOBIN}"; pwd)"
+
+if [ -e "$ZOOBIN/../libexec/zkEnv.sh" ]; then
+ . "$ZOOBINDIR/../libexec/zkEnv.sh"
+else
+ . "$ZOOBINDIR/zkEnv.sh"
+fi
+
+# See the following page for extensive details on setting
+# up the JVM to accept JMX remote management:
+# http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+# by default we allow local JMX connections
+if [ "x$JMXLOCALONLY" = "x" ]
+then
+ JMXLOCALONLY=false
+fi
+
+if [ "x$JMXDISABLE" = "x" ] || [ "$JMXDISABLE" = 'false' ]
+then
+ echo "ZooKeeper JMX enabled by default" >&2
+ if [ "x$JMXPORT" = "x" ]
+ then
+ # for some reason these two options are necessary on jdk6 on Ubuntu
+ # accord to the docs they are not necessary, but otw jconsole cannot
+ # do a local attach
+ ZOOMAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY org.apache.zookeeper.server.quorum.QuorumPeerMain"
+ else
+ if [ "x$JMXAUTH" = "x" ]
+ then
+ JMXAUTH=false
+ fi
+ if [ "x$JMXSSL" = "x" ]
+ then
+ JMXSSL=false
+ fi
+ if [ "x$JMXLOG4J" = "x" ]
+ then
+ JMXLOG4J=true
+ fi
+ echo "ZooKeeper remote JMX Port set to $JMXPORT" >&2
+ echo "ZooKeeper remote JMX authenticate set to $JMXAUTH" >&2
+ echo "ZooKeeper remote JMX ssl set to $JMXSSL" >&2
+ echo "ZooKeeper remote JMX log4j set to $JMXLOG4J" >&2
+ ZOOMAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=$JMXPORT -Dcom.sun.management.jmxremote.authenticate=$JMXAUTH -Dcom.sun.management.jmxremote.ssl=$JMXSSL -Dzookeeper.jmx.log4j.disable=$JMXLOG4J org.apache.zookeeper.server.quorum.QuorumPeerMain"
+ fi
+else
+ echo "JMX disabled by user request" >&2
+ ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
+fi
+
+if [ "x$SERVER_JVMFLAGS" != "x" ]
+then
+ JVMFLAGS="$SERVER_JVMFLAGS $JVMFLAGS"
+fi
+
+if [ "x$2" != "x" ]
+then
+ ZOOCFG="$ZOOCFGDIR/$2"
+fi
+
+# if we give a more complicated path to the config, don't screw around in $ZOOCFGDIR
+if [ "x$(dirname "$ZOOCFG")" != "x$ZOOCFGDIR" ]
+then
+ ZOOCFG="$2"
+fi
+
+if $cygwin
+then
+ ZOOCFG=`cygpath -wp "$ZOOCFG"`
+ # cygwin has a "kill" in the shell itself, gets confused
+ KILL=/bin/kill
+else
+ KILL=kill
+fi
+
+echo "Using config: $ZOOCFG" >&2
+
+case "$OSTYPE" in
+*solaris*)
+ GREP=/usr/xpg4/bin/grep
+ ;;
+*)
+ GREP=grep
+ ;;
+esac
+if [ -z "$ZOOPIDFILE" ]; then
+ ZOO_DATADIR="$($GREP "^[[:space:]]*dataDir" "$ZOOCFG" | sed -e 's/.*=//')"
+ if [ ! -d "$ZOO_DATADIR" ]; then
+ mkdir -p "$ZOO_DATADIR"
+ fi
+ ZOOPIDFILE="$ZOO_DATADIR/zookeeper_server.pid"
+else
+ # ensure it exists, otw stop will fail
+ mkdir -p "$(dirname "$ZOOPIDFILE")"
+fi
+
+if [ ! -w "$ZOO_LOG_DIR" ] ; then
+mkdir -p "$ZOO_LOG_DIR"
+fi
+
+_ZOO_DAEMON_OUT="$ZOO_LOG_DIR/zookeeper.log"
+
+case $1 in
+start)
+ echo -n "Starting zookeeper ... "
+ if [ -f "$ZOOPIDFILE" ]; then
+ if kill -0 `cat "$ZOOPIDFILE"` > /dev/null 2>&1; then
+ echo $command already running as process `cat "$ZOOPIDFILE"`.
+ exit 0
+ fi
+ fi
+ nohup "$JAVA" "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+ -cp "$CLASSPATH" $JVMFLAGS $ZOOMAIN "$ZOOCFG" > "$_ZOO_DAEMON_OUT" 2>&1 < /dev/null &
+ if [ $? -eq 0 ]
+ then
+ case "$OSTYPE" in
+ *solaris*)
+ /bin/echo "${!}\\c" > "$ZOOPIDFILE"
+ ;;
+ *)
+ /bin/echo -n $! > "$ZOOPIDFILE"
+ ;;
+ esac
+ if [ $? -eq 0 ];
+ then
+ sleep 1
+ echo STARTED
+ else
+ echo FAILED TO WRITE PID
+ exit 1
+ fi
+ else
+ echo SERVER DID NOT START
+ exit 1
+ fi
+ ;;
+start-foreground)
+ ZOO_CMD=(exec "$JAVA")
+ if [ "${ZOO_NOEXEC}" != "" ]; then
+ ZOO_CMD=("$JAVA")
+ fi
+ "${ZOO_CMD[@]}" "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+ -cp "$CLASSPATH" $JVMFLAGS $ZOOMAIN "$ZOOCFG"
+ ;;
+print-cmd)
+ echo "\"$JAVA\" -Dzookeeper.log.dir=\"${ZOO_LOG_DIR}\" -Dzookeeper.root.logger=\"${ZOO_LOG4J_PROP}\" -cp \"$CLASSPATH\" $JVMFLAGS $ZOOMAIN \"$ZOOCFG\" > \"$_ZOO_DAEMON_OUT\" 2>&1 < /dev/null"
+ ;;
+stop)
+ echo -n "Stopping zookeeper ... "
+ if [ ! -f "$ZOOPIDFILE" ]
+ then
+ echo "no zookeeper to stop (could not find file $ZOOPIDFILE)"
+ else
+ $KILL -9 $(cat "$ZOOPIDFILE")
+ rm "$ZOOPIDFILE"
+ echo STOPPED
+ fi
+ exit 0
+ ;;
+upgrade)
+ shift
+ echo "upgrading the servers to 3.*"
+ "$JAVA" "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+ -cp "$CLASSPATH" $JVMFLAGS org.apache.zookeeper.server.upgrade.UpgradeMain ${@}
+ echo "Upgrading ... "
+ ;;
+restart)
+ shift
+ "$0" stop ${@}
+ sleep 3
+ "$0" start ${@}
+ ;;
+status)
+ # -q is necessary on some versions of linux where nc returns too quickly, and no stat result is output
+ clientPortAddress=`$GREP "^[[:space:]]*clientPortAddress[^[:alpha:]]" "$ZOOCFG" | sed -e 's/.*=//'`
+ if ! [ $clientPortAddress ]
+ then
+ clientPortAddress="localhost"
+ fi
+ clientPort=`$GREP "^[[:space:]]*clientPort[^[:alpha:]]" "$ZOOCFG" | sed -e 's/.*=//'`
+ STAT=`"$JAVA" "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+ -cp "$CLASSPATH" $JVMFLAGS org.apache.zookeeper.client.FourLetterWordMain \
+ $clientPortAddress $clientPort srvr 2> /dev/null \
+ | $GREP Mode`
+ if [ "x$STAT" = "x" ]
+ then
+ echo "Error contacting service. It is probably not running."
+ exit 1
+ else
+ echo $STAT
+ exit 0
+ fi
+ ;;
+*)
+ echo "Usage: $0 {start|start-foreground|stop|restart|status|upgrade|print-cmd}" >&2
+
+esac
diff --git a/bigdata-scripts_test3/roles/zookeeper/files/zklogdelete.sh b/bigdata-scripts_test3/roles/zookeeper/files/zklogdelete.sh
new file mode 100755
index 0000000..7551d41
--- /dev/null
+++ b/bigdata-scripts_test3/roles/zookeeper/files/zklogdelete.sh
@@ -0,0 +1,20 @@
+#!/bin/sh
+
+#只保留最近三天的日志,如需要多保留几天修改最后 -n days
+#将此脚本加载到系统定时任务中 /etc/crontab
+#脚本会读取环境变量,固需要配置环境变量。
+#. /etc/profile
+day=$(date +"%Y-%m-%d" -d "-3 days")
+
+zk=`jps | grep QuorumPeerMain | wc -l`
+if [[ $zk = "1" ]];then
+#echo $ZOOKEEPER_HOME/logs/system/*.$day*
+ rm -rf $ZOOKEEPER_HOME/logs/system/*.$day*
+fi
+
+kafka=`jps | grep Kafka | wc -l`
+if [[ $kafka = "1" ]];then
+#echo $KAFKA_HOME/logs/*.$day*
+ rm -rf $KAFKA_HOME/logs/*.$day*
+fi
+
diff --git a/bigdata-scripts_test3/roles/zookeeper/files/zookeeper-3.4.9.tar.gz b/bigdata-scripts_test3/roles/zookeeper/files/zookeeper-3.4.9.tar.gz
new file mode 100644
index 0000000..77bed6e
--- /dev/null
+++ b/bigdata-scripts_test3/roles/zookeeper/files/zookeeper-3.4.9.tar.gz
Binary files differ
diff --git a/bigdata-scripts_test3/roles/zookeeper/tasks/main.yml b/bigdata-scripts_test3/roles/zookeeper/tasks/main.yml
new file mode 100644
index 0000000..c338247
--- /dev/null
+++ b/bigdata-scripts_test3/roles/zookeeper/tasks/main.yml
@@ -0,0 +1,91 @@
+- name: "copy zookeeper install package to destination server"
+ copy:
+ src: "{{ role_path }}/files/{{ zookeeper.package_name }}"
+ dest: /tmp
+
+- name: "create zookeeper base_dir"
+ file:
+ path: '{{ zookeeper.base_dir }}'
+ state: touch
+
+- name: "install zookeeper"
+ unarchive:
+ src: "/tmp/{{ zookeeper.package_name }}"
+ dest: '{{ zookeeper.base_dir }}'
+ copy: no
+ mode: 0755
+
+- name: "copy any scripts"
+ copy:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: 0755
+ with_item:
+ - { src: '{{ role_path }}/files/dae-zookeeper.sh', dest: '{{ zookeeper.base_dir }}/{{ zookeeper.version }}/bin/' }
+ - { src: '{{ role_path }}/files/zkEnv.sh', dest: '{{ zookeeper.base_dir }}/{{ zookeeper.version }}/bin/' }
+ - { src: '{{ role_path }}/files/zklogdelete.sh', dest: '{{ zookeeper.base_dir }}/{{ zookeeper.version }}/bin/' }
+ - { src: '{{ role_path }}/files/zkServer.sh', dest: '{{ zookeeper.base_dir }}/{{ zookeeper.version }}/bin/' }
+
+- name: "copy log4j.properties"
+ copy:
+ src: "{{ role_path }}/files/log4j.properties"
+ dest: '{{ zookeeper.base_dir }}/{{ zookeeper.version }}/conf/'
+
+- name: "create zoo.cfg"
+ template:
+ src: "{{ role_path }}/templates/zoo.cfg.j2"
+ dest: '{{ zookeeper.base_dir }}/{{ zookeeper.version }}/conf/zoo.cfg'
+ mode: 0755
+
+- name: "create any dir"
+ file:
+ path: '{{ item.path }}'
+ state: '{{ item.state }}'
+ with_items:
+ - { path: '{{ zookeeper.base_dir }}/{{ zookeeper.version }}/data', state: directory }
+ - { path: '{{ zookeeper.base_dir }}/{{ zookeeper.version }}/logs', state: directory }
+ - { path: '{{ zookeeper.base_dir }}/{{ zookeeper.version }}/data/myid', state: touch }
+
+- name: "echo 0 > restart_sum.log"
+ shell: echo 0 > '{{ zookeeper.base_dir }}/{{ zookeeper.version }}/logs/restart_sum.log'
+
+- name: "judge zoo.cfg id and ip"
+ shell: grep ":2888:3888" {{ zookeeper.base_dir }}/{{ zookeeper.version }}/conf/zoo.cfg
+ register: return
+ ignore_errors: true
+
+- name: "traverse ip/port"
+ shell: echo "server.{{ item.0 }}={{ item.1 }}:2888:3888" >> {{ zookeeper.base_dir }}/{{ zookeeper.version }}/conf/zoo.cfg
+ with_together:
+ - '{{ zookeeper.idlist }}'
+ - '{{ zookeeper.iplist }}'
+ when: return.rc != 0
+
+- name: "traverse zookeeper myid"
+ shell: echo "{{ zk_myid }}" > {{ zookeeper.base_dir }}/{{ zookeeper.version }}/data/myid
+
+- name: "template keepzkalive"
+ template:
+ src: "{{ role_path }}/templates/keepzkalive.j2"
+ dest: /etc/init.d/keepzkalive
+
+- name: "start keepzkalive"
+ service:
+ name: keepzkalive
+ state: restarted
+ enabled: true
+
+- name: "template set_zk_profile.sh"
+ template:
+ src: "{{ role_path }}/templates/set_zk_profile.sh.j2"
+ dest: /tmp/set_zk_profile.sh
+ mode: 0755
+
+- name: "judge zookeeper enviroment"
+ shell: grep "#zookeeper" /etc/profile
+ register: return
+ ignore_errors: true
+
+- name: "set_zk_profile"
+ shell: /bin/bash /tmp/set_zk_profile.sh
+ when: return.rc != 0
diff --git a/bigdata-scripts_test3/roles/zookeeper/templates/keepzkalive.j2 b/bigdata-scripts_test3/roles/zookeeper/templates/keepzkalive.j2
new file mode 100755
index 0000000..7cb89a6
--- /dev/null
+++ b/bigdata-scripts_test3/roles/zookeeper/templates/keepzkalive.j2
@@ -0,0 +1,11 @@
+#!/bin/bash
+#
+# netconsole This loads the netconsole module with the configured parameters.
+#
+# chkconfig:123456 20 80
+# description: keepzkalive
+source /etc/profile
+PRO_NAME=keepzklive
+
+killall -9 dae-zookeeper.sh
+{{ zookeeper.base_dir }}/{{ zookeeper.version }}/bin/dae-zookeeper.sh {{ zookeeper.base_dir }} > /dev/null 2>&1 &
diff --git a/bigdata-scripts_test3/roles/zookeeper/templates/set_zk_profile.sh.j2 b/bigdata-scripts_test3/roles/zookeeper/templates/set_zk_profile.sh.j2
new file mode 100755
index 0000000..e5e950f
--- /dev/null
+++ b/bigdata-scripts_test3/roles/zookeeper/templates/set_zk_profile.sh.j2
@@ -0,0 +1,6 @@
+#!/bin/bash
+#
+echo -e ""##zookeeper"" >> /etc/profile
+echo -e "export ZOOKEEPER_HOME='{{ zookeeper.base_dir }}'/'{{ zookeeper.version }}'" >> /etc/profile
+echo -e "export PATH=\$ZOOKEEPER_HOME/bin:\$PATH" >> /etc/profile
+source /etc/profile
diff --git a/bigdata-scripts_test3/roles/zookeeper/templates/zoo.cfg.j2 b/bigdata-scripts_test3/roles/zookeeper/templates/zoo.cfg.j2
new file mode 100755
index 0000000..7db718c
--- /dev/null
+++ b/bigdata-scripts_test3/roles/zookeeper/templates/zoo.cfg.j2
@@ -0,0 +1,31 @@
+# The number of milliseconds of each tick
+tickTime=2000
+# The number of ticks that the initial
+# synchronization phase can take
+initLimit=10
+# The number of ticks that can pass between
+# sending a request and getting an acknowledgement
+syncLimit=5
+# the directory where the snapshot is stored.
+# do not use /tmp for storage, /tmp here is just
+# example sakes.
+dataDir={{ zookeeper.base_dir }}/{{ version }}/data
+# the port at which the clients will connect
+clientPort=2181
+# the maximum number of client connections.
+# increase this if you need to handle more clients
+#maxClientCnxns=60
+#
+# Be sure to read the maintenance section of the
+# administrator guide before turning on autopurge.
+#
+# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
+#
+# The number of snapshots to retain in dataDir
+#autopurge.snapRetainCount=3
+# Purge task interval in hours
+# Set to "0" to disable auto purge feature
+#autopurge.purgeInterval=1
+dataLogDir={{ zookeeper.base_dir }}/{{ zookeeper.version }}/log
+autopurge.purgeInterval=24
+
diff --git a/bigdata-scripts_test3/test_host/group_vars/all.yml b/bigdata-scripts_test3/test_host/group_vars/all.yml
new file mode 100644
index 0000000..c9e7024
--- /dev/null
+++ b/bigdata-scripts_test3/test_host/group_vars/all.yml
@@ -0,0 +1,24 @@
+zookeeper:
+ package_name: zookeeper-3.4.9.tar.gz
+ base_dir: /home/bigdata
+ version: zookeeper
+ idlist: [ '0','1','2' ]
+ iplist: [ '172.16.124.129','172.16.124.130','172.16.124.131' ]
+
+kafka:
+ ethname: ens33
+ package_name: kafka_2.11-1.0.0.tgz
+ base_dir: /home/bigdata
+ version: kafka_2.11-1.0.0
+ ethname: ens33
+ replica: 3
+ log_retentionBytes: 5368709120
+ log_retentionHours: 168
+ log_retentionCheckInterval_ms: 300000
+ mem_max: 1G
+ mem_min: 1G
+
+kafka_manager:
+ package_name: kafka-manager-1.3.3.18.zip
+ version: kafka-manager-1.3.3.18
+
diff --git a/bigdata-scripts_test3/test_host/hosts b/bigdata-scripts_test3/test_host/hosts
new file mode 100644
index 0000000..640a7c1
--- /dev/null
+++ b/bigdata-scripts_test3/test_host/hosts
@@ -0,0 +1,13 @@
+[all:vars]
+ansible_user=root
+
+[server1]
+172.16.124.129 zk_myid=0 broker_id=0
+
+[server2]
+172.16.124.130 zk_myid=1 broker_id=1
+
+[server3]
+172.16.124.131 zk_myid=2 broker_id=2
+
+