summaryrefslogtreecommitdiff
path: root/bigdata-scripts_test3/roles/kafka
diff options
context:
space:
mode:
authorzhangzhihan <[email protected]>2020-03-10 19:52:21 +0800
committerzhangzhihan <[email protected]>2020-03-10 19:52:21 +0800
commit03867d8afb6fc55855f080626c51bb5b3294d17b (patch)
tree15ff4ae33756a78b926c1479643aa495ceec3768 /bigdata-scripts_test3/roles/kafka
parent93c88826d397135969d9288d437e164d0852c870 (diff)
ansible test3HEADmaster
Diffstat (limited to 'bigdata-scripts_test3/roles/kafka')
-rwxr-xr-xbigdata-scripts_test3/roles/kafka/files/dae-kafka.sh30
-rw-r--r--bigdata-scripts_test3/roles/kafka/files/kafka-message.tar.gzbin0 -> 9036 bytes
-rw-r--r--bigdata-scripts_test3/roles/kafka/files/kafka_2.11-1.0.0.tgzbin0 -> 49475271 bytes
-rwxr-xr-xbigdata-scripts_test3/roles/kafka/files/kflogdelete.sh14
-rw-r--r--bigdata-scripts_test3/roles/kafka/files/log4j.properties95
-rw-r--r--bigdata-scripts_test3/roles/kafka/tasks/main.yml74
-rwxr-xr-xbigdata-scripts_test3/roles/kafka/templates/kafka-server-start.sh.j244
-rwxr-xr-xbigdata-scripts_test3/roles/kafka/templates/keepkafalive.j211
-rw-r--r--bigdata-scripts_test3/roles/kafka/templates/server.properties.j2153
-rwxr-xr-xbigdata-scripts_test3/roles/kafka/templates/set_kafka_profile.sh.j26
10 files changed, 427 insertions, 0 deletions
diff --git a/bigdata-scripts_test3/roles/kafka/files/dae-kafka.sh b/bigdata-scripts_test3/roles/kafka/files/dae-kafka.sh
new file mode 100755
index 0000000..56f4df2
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka/files/dae-kafka.sh
@@ -0,0 +1,30 @@
+#!/bin/sh
+
+#修改BASE_DIR为安装的路径
+#启动命令端口可自行指定
+#JMX_PORT=9191 nohup $BASE_DIR/bin/kafka-server-start.sh $BASE_DIR/config/server.properties > /dev/null 2>&1 &
+
+PRO_NAME=Kafka
+BASE_DIR=$1
+VERSION="kafka_2.11-1.0.0"
+
+#string=`cat $BASE_DIR/$VERSION/config/server.properties | grep broker.id`
+#array=(${string//=/ })
+#echo ${array[1]}
+#ssh [email protected] "source /etc/profile ; zkCli.sh ls /kafka/brokers/ids | grep -v 2181 | grep 4 | wc -l"
+source /etc/profile
+while true ; do
+ NUM=`jps | grep -w ${PRO_NAME} | grep -v grep |wc -l`
+
+ if [ "${NUM}" -lt "1" ];then
+ JMX_PORT=9191 nohup $BASE_DIR/$VERSION/bin/kafka-server-start.sh $BASE_DIR/$VERSION/config/server.properties > /dev/null 2>&1 &
+ OLD_NUM=`cat $BASE_DIR/$VERSION/logs/restart_sum.log`
+ RESTART_NUM=`expr $OLD_NUM + 1`
+ echo $RESTART_NUM > $BASE_DIR/$VERSION/logs/restart_sum.log
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - kafka服务启动/异常重启 - 重启次数 -> $RESTART_NUM" >> $BASE_DIR/$VERSION/restart.log
+ #大于1,杀掉所有进程,重启
+ elif [ "${NUM}" -gt "1" ];then
+ killall -9 ${PRO_NAME}
+ fi
+ sleep 60
+done
diff --git a/bigdata-scripts_test3/roles/kafka/files/kafka-message.tar.gz b/bigdata-scripts_test3/roles/kafka/files/kafka-message.tar.gz
new file mode 100644
index 0000000..a81a58b
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka/files/kafka-message.tar.gz
Binary files differ
diff --git a/bigdata-scripts_test3/roles/kafka/files/kafka_2.11-1.0.0.tgz b/bigdata-scripts_test3/roles/kafka/files/kafka_2.11-1.0.0.tgz
new file mode 100644
index 0000000..09991f7
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka/files/kafka_2.11-1.0.0.tgz
Binary files differ
diff --git a/bigdata-scripts_test3/roles/kafka/files/kflogdelete.sh b/bigdata-scripts_test3/roles/kafka/files/kflogdelete.sh
new file mode 100755
index 0000000..e6aacbe
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka/files/kflogdelete.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+#只保留最近三天的日志,如需要多保留几天修改8行最后 -n days
+#将此脚本加载到系统定时任务中 /etc/crontab
+#脚本会读取环境变量,固需要配置环境变量。
+#. /etc/profile
+
+day=$(date +"%Y-%m-%d" -d "-3 days")
+
+kafka=`jps | grep Kafka | wc -l`
+if [[ $kafka = "1" ]];then
+ rm -rf $KAFKA_HOME/logs/*.$day*
+fi
+
diff --git a/bigdata-scripts_test3/roles/kafka/files/log4j.properties b/bigdata-scripts_test3/roles/kafka/files/log4j.properties
new file mode 100644
index 0000000..f8d573b
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka/files/log4j.properties
@@ -0,0 +1,95 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Unspecified loggers and loggers with additivity=true output to server.log and stdout
+# Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise
+log4j.rootLogger=INFO, stdout, kafkaAppender
+
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log
+log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log
+log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log
+log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log
+log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log
+log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log
+log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+# Change the two lines below to adjust ZK client logging
+log4j.logger.org.I0Itec.zkclient.ZkClient=INFO
+log4j.logger.org.apache.zookeeper=INFO
+
+# Change the two lines below to adjust the general broker logging level (output to server.log and stdout)
+log4j.logger.kafka=INFO
+log4j.logger.org.apache.kafka=INFO
+
+# Change to DEBUG or TRACE to enable request logging
+log4j.logger.kafka.request.logger=WARN, requestAppender
+log4j.additivity.kafka.request.logger=false
+
+# Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output
+# related to the handling of requests
+#log4j.logger.kafka.network.Processor=TRACE, requestAppender
+#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
+#log4j.additivity.kafka.server.KafkaApis=false
+#log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender
+log4j.logger.kafka.network.RequestChannel$=INFO, requestAppender
+log4j.additivity.kafka.network.RequestChannel$=false
+
+#log4j.logger.kafka.controller=TRACE, controllerAppender
+log4j.logger.kafka.controller=INFO, controllerAppender
+log4j.additivity.kafka.controller=false
+
+log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender
+log4j.additivity.kafka.log.LogCleaner=false
+
+#log4j.logger.state.change.logger=TRACE, stateChangeAppender
+log4j.logger.state.change.logger=INFO, stateChangeAppender
+log4j.additivity.state.change.logger=false
+
+# Access denials are logged at INFO level, change to DEBUG to also log allowed accesses
+log4j.logger.kafka.authorizer.logger=INFO, authorizerAppender
+log4j.additivity.kafka.authorizer.logger=false
+
diff --git a/bigdata-scripts_test3/roles/kafka/tasks/main.yml b/bigdata-scripts_test3/roles/kafka/tasks/main.yml
new file mode 100644
index 0000000..b393b68
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka/tasks/main.yml
@@ -0,0 +1,74 @@
+- name: "copy kafka install package to destination server"
+ copy:
+ src: "{{ role_path }}/files/{{ kafka.package_name }}"
+ dest: /tmp
+
+- name: "create kafka base_dir"
+ file:
+ path: '{{ kafka.base_dir }}'
+ state: directory
+
+- name: "install kafka"
+ unarchive:
+ src: "/tmp/{{ kafka.package_name }}"
+ dest: '{{ kafka.base_dir }}'
+ copy: no
+ mode: 0755
+
+- name: "create logs dir"
+ file:
+ path: '{{ item.path }}'
+ state: '{{ item.state }}'
+ with_items:
+ - { path: '{{ kafka.base_dir }}/{{ kafka.version }}/logs', state: directory }
+ - { path: '{{ kafka.base_dir }}/{{ kafka.version }}/kafka-logs', state: directory }
+
+- name: "copy any scripts and log4j.properties"
+ copy:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ with_items:
+ - { src: '{{ role_path }}/files/dae-kafka.sh', dest: '{{ kafka.base_dir }}/{{ kafka.version }}/bin/' }
+ - { src: '{{ role_path }}/files/kflogdelete.sh', dest: '{{ kafka.base_dir }}/{{ kafka.version }}/log/' }
+ - { src: '{{ role_path }}/files/log4j.properties', dest: '{{ kafka.base_dir }}/{{ kafka.version }}/config/' }
+ mode: 0755
+
+- name: "echo 0 > restart_sum.log"
+ shell: echo 0 > '{{ kafka.base_dir }}/{{ kafka.version }}/logs/restart_sum.log'
+
+- name: "template server.properties"
+ template:
+ src: "{{ role_path }}/templates/server.properties.j2"
+ dest: '{{ kafka.base_dir }}/{{ kafka.version }}/config/server.properties'
+
+- name: "template kafka-server-start.sh"
+ template:
+ src: "{{ role_path }}/templates/kafka-server-start.sh.j2"
+ dest: '{{ kafka.base_dir }}/{{ kafka.version }}/bin/kafka-server-start.sh'
+
+- name: "template keepkafalive"
+ template:
+ src: "{{ role_path }}/templates/keepkafalive.j2"
+ dest: /etc/init.d/keepkafalive.j2
+ mode: 0755
+
+- name: "start keepkafalive"
+ service:
+ name: keepkafalive
+ state: restarted
+ enabled: true
+
+- name: "template set_kafka_profile.sh"
+ template:
+ src: "{{ role_path }}/templates/set_kafka_profile.sh.j2"
+ dest: /tmp/set_kafka_profile.sh
+ mode: 0755
+
+- name: "judge kafka enviroment"
+ shell: grep "#kafka" /etc/profile
+ register: return
+ ignore_errors: true
+
+- name: "set_kafka_profile"
+ shell: /bin/bash /tmp/set_kafka_profile.sh
+ when: return.rc != 0
diff --git a/bigdata-scripts_test3/roles/kafka/templates/kafka-server-start.sh.j2 b/bigdata-scripts_test3/roles/kafka/templates/kafka-server-start.sh.j2
new file mode 100755
index 0000000..9608df9
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka/templates/kafka-server-start.sh.j2
@@ -0,0 +1,44 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ $# -lt 1 ];
+then
+ echo "USAGE: $0 [-daemon] server.properties [--override property=value]*"
+ exit 1
+fi
+base_dir=$(dirname $0)
+
+if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
+ export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties"
+fi
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+ export KAFKA_HEAP_OPTS="-Xmx{{ kafka.mem_max }} -Xms{{ kafka.mem_min }}"
+fi
+
+EXTRA_ARGS=${EXTRA_ARGS-'-name kafkaServer -loggc'}
+
+COMMAND=$1
+case $COMMAND in
+ -daemon)
+ EXTRA_ARGS="-daemon "$EXTRA_ARGS
+ shift
+ ;;
+ *)
+ ;;
+esac
+
+exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@"
diff --git a/bigdata-scripts_test3/roles/kafka/templates/keepkafalive.j2 b/bigdata-scripts_test3/roles/kafka/templates/keepkafalive.j2
new file mode 100755
index 0000000..6ed1ff8
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka/templates/keepkafalive.j2
@@ -0,0 +1,11 @@
+#!/bin/bash
+#
+# netconsole This loads the netconsole module with the configured parameters.
+#
+# chkconfig:123456 30 70
+# description: keepkafkaalive
+source /etc/profile
+PRO_NAME=keepkafkaalive
+
+killall -9 dae-kafka.sh
+{{ kafka.base_dir }}/{{ kafka.version }}/bin/dae-kafka.sh {{ kafka.base_dir }} > /dev/null 2>&1 &
diff --git a/bigdata-scripts_test3/roles/kafka/templates/server.properties.j2 b/bigdata-scripts_test3/roles/kafka/templates/server.properties.j2
new file mode 100644
index 0000000..7419936
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka/templates/server.properties.j2
@@ -0,0 +1,153 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# see kafka.server.KafkaConfig for additional details and defaults
+
+############################# Server Basics #############################
+
+# The id of the broker. This must be set to a unique integer for each broker.
+broker.id={{ broker-id }}
+
+############################# Socket Server Settings #############################
+
+# The address the socket server listens on. It will get the value returned from
+# java.net.InetAddress.getCanonicalHostName() if not configured.
+# FORMAT:
+# listeners = listener_name://host_name:port
+# EXAMPLE:
+# listeners = PLAINTEXT://your.host.name:9092
+
+# Hostname and port the broker will advertise to producers and consumers. If not set,
+# it uses the value for "listeners" if configured. Otherwise, it will use the value
+# returned from java.net.InetAddress.getCanonicalHostName().
+#advertised.listeners=PLAINTEXT://your.host.name:9092
+
+# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
+#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
+
+# The number of threads that the server uses for receiving requests from the network and sending responses to the network
+num.network.threads=3
+
+# The number of threads that the server uses for processing requests, which may include disk I/O
+num.io.threads=8
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=10485760
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=10485760
+
+# The maximum size of a request that the socket server will accept (protection against OOM)
+socket.request.max.bytes=104857600
+
+
+############################# Log Basics #############################
+
+# A comma seperated list of directories under which to store log files
+log.dirs={{ kafka.base_dir }}/{{ kafka.version }}/kafka-logs/
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions=1
+
+# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
+# This value is recommended to be increased for installations with data dirs located in RAID array.
+num.recovery.threads.per.data.dir=1
+
+############################# Internal Topic Settings #############################
+# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
+# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
+offsets.topic.replication.factor={{ kafka.replica }}
+transaction.state.log.replication.factor=1
+transaction.state.log.min.isr=1
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data to disk.
+# There are a few important trade-offs here:
+# 1. Durability: Unflushed data may be lost if you are not using replication.
+# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
+# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
+# The settings below allow one to configure the flush policy to flush data after a period of time or
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+#log.flush.interval.messages=10000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+#log.flush.interval.ms=1000
+
+############################# Log Retention Policy #############################
+
+# The following configurations control the disposal of log segments. The policy can
+# be set to delete segments after a period of time, or after a given size has accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion due to age
+log.retention.hours={{ kafka.log_retentionHours }}
+
+# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
+# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
+log.retention.bytes={{ kafka.log_retentionBytes }}
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+log.segment.bytes=1073741824
+
+# The interval at which log segments are checked to see if they can be deleted according
+# to the retention policies
+log.retention.check.interval.ms={{ kafka.log_retentionCheckInterval_ms }}
+
+############################# Zookeeper #############################
+
+# Zookeeper connection string (see zookeeper docs for details).
+# This is a comma separated host:port pairs, each corresponding to a zk
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
+# You can also append an optional chroot string to the urls to specify the
+# root directory for all kafka znodes.
+zookeeper.connect={{ zookeeper.iplist[0] }}:2181,{{ zookeeper.iplist[1] }}:2181,{{ zookeeper.iplist[2] }}:2181/kafka
+
+# Timeout in ms for connecting to zookeeper
+zookeeper.connection.timeout.ms=30000
+
+
+############################# Group Coordinator Settings #############################
+
+# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
+# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
+# The default value for this is 3 seconds.
+# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
+# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
+group.initial.rebalance.delay.ms=0
+
+
+#######################################################################################
+listeners=PLAINTEXT://{{ vars["ansible_"+kafka.ehtname]['ipv4']['address'] }}:9092
+advertised.listeners=PLAINTEXT://{{ vars["ansible_"+kafka.ethname]['ipv4']['address'] }}:9092
+#kafka port
+port=9092
+#Is it deleted directlytopic
+delete.topic.enable=true
+#Are you allowed to create automatically topic
+auto.create.topics.enable=false
+#Enable log periodic deletion strategy
+log.cleanup.policy=delete
+#The maximum size of a message body, unit byte.
+message.max.bytes=10485760
+#replicas Maximum size of data obtained eachtime
+replica.fetch.max.bytes=20485760
+
diff --git a/bigdata-scripts_test3/roles/kafka/templates/set_kafka_profile.sh.j2 b/bigdata-scripts_test3/roles/kafka/templates/set_kafka_profile.sh.j2
new file mode 100755
index 0000000..3ae55a0
--- /dev/null
+++ b/bigdata-scripts_test3/roles/kafka/templates/set_kafka_profile.sh.j2
@@ -0,0 +1,6 @@
+#!/bin/bash
+#
+echo -e "\n#kafka" >> /etc/profile
+echo -e "export KAFKA_HOME={{ kafka.base_dir }}/{{ kafka.version }}" >> /etc/profile
+echo -e "export PATH=\$KAFKA_HOME/bin:\$PATH" >> /etc/profile
+source /etc/profile