summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorqidaijie <[email protected]>2024-01-18 15:35:33 +0800
committerqidaijie <[email protected]>2024-01-18 15:35:34 +0800
commit0cc392df5c7ddf13531c6cff0db803cdba8f36bb (patch)
treedb7e397d9759e700bfd0b51146ebf7d243e25c76
parentf0bd05d5650a1dcbab81c1c87b0fbfd9c1aca496 (diff)
提交各组件部署Ansible剧本初版
-rw-r--r--Apache Druid/26.0.0/druid/hosts11
-rw-r--r--Apache Druid/26.0.0/druid/install.yml6
-rw-r--r--Apache Druid/26.0.0/druid/role/defaults/main.yml44
-rw-r--r--Apache Druid/26.0.0/druid/role/files/conf.zipbin0 -> 117356 bytes
-rw-r--r--Apache Druid/26.0.0/druid/role/files/mysqlbin0 -> 15580175 bytes
-rw-r--r--Apache Druid/26.0.0/druid/role/handlers/main.yml38
-rw-r--r--Apache Druid/26.0.0/druid/role/tasks/deploy.yml156
-rw-r--r--Apache Druid/26.0.0/druid/role/tasks/main.yml19
-rw-r--r--Apache Druid/26.0.0/druid/role/tasks/standalone/deploy.yml93
-rw-r--r--Apache Druid/26.0.0/druid/role/tasks/standalone/uninstall.yml50
-rw-r--r--Apache Druid/26.0.0/druid/role/tasks/status-check.yml41
-rw-r--r--Apache Druid/26.0.0/druid/role/tasks/uninstall.yml64
-rw-r--r--Apache Druid/26.0.0/druid/role/templates/broker_jvm.j29
-rw-r--r--Apache Druid/26.0.0/druid/role/templates/broker_runtime.j241
-rw-r--r--Apache Druid/26.0.0/druid/role/templates/common.runtime.properties.j2169
-rw-r--r--Apache Druid/26.0.0/druid/role/templates/coordinator_jvm.j210
-rw-r--r--Apache Druid/26.0.0/druid/role/templates/docker-compose.yml.j218
-rw-r--r--Apache Druid/26.0.0/druid/role/templates/docker-compose_exporter.yml.j217
-rw-r--r--Apache Druid/26.0.0/druid/role/templates/historical_jvm.j29
-rw-r--r--Apache Druid/26.0.0/druid/role/templates/historical_runtime.j242
-rw-r--r--Apache Druid/26.0.0/druid/role/templates/middleManager_jvm.j27
-rw-r--r--Apache Druid/26.0.0/druid/role/templates/middleManager_runtime.properties.j243
-rw-r--r--Apache Druid/26.0.0/druid/role/templates/router_runtime.properties.j234
-rw-r--r--Apache Druid/26.0.0/druid/role/vars/main.yml23
-rw-r--r--Apache HBase/2.2.3/hbase/hosts8
-rw-r--r--Apache HBase/2.2.3/hbase/install.yml7
-rw-r--r--Apache HBase/2.2.3/hbase/role/defaults/main.yml22
-rw-r--r--Apache HBase/2.2.3/hbase/role/files/conf.zipbin0 -> 6719 bytes
-rw-r--r--Apache HBase/2.2.3/hbase/role/handlers/main.yml27
-rw-r--r--Apache HBase/2.2.3/hbase/role/tasks/deploy-cluster.yml88
-rw-r--r--Apache HBase/2.2.3/hbase/role/tasks/deploy-standalone.yml44
-rw-r--r--Apache HBase/2.2.3/hbase/role/tasks/main.yml11
-rw-r--r--Apache HBase/2.2.3/hbase/role/tasks/standalone/deploy.yml47
-rw-r--r--Apache HBase/2.2.3/hbase/role/tasks/standalone/uninstall.yml31
-rw-r--r--Apache HBase/2.2.3/hbase/role/tasks/status-check.yml36
-rw-r--r--Apache HBase/2.2.3/hbase/role/tasks/uninstall.yml45
-rw-r--r--Apache HBase/2.2.3/hbase/role/templates/backup-masters.j22
-rw-r--r--Apache HBase/2.2.3/hbase/role/templates/docker-compose.yml.j245
-rw-r--r--Apache HBase/2.2.3/hbase/role/templates/hbase-env.sh.j2143
-rw-r--r--Apache HBase/2.2.3/hbase/role/templates/hbase-site.xml.j2274
-rw-r--r--Apache HBase/2.2.3/hbase/role/templates/regionservers.j23
-rw-r--r--Apache HBase/2.2.3/hbase/role/templates/rsgroup.sh.j223
-rw-r--r--Apache HBase/2.2.3/hbase/role/templates/startsql.sh.j223
-rw-r--r--Apache HBase/2.2.3/hbase/role/vars/main.yml15
-rw-r--r--Apache Hadoop/2.7.1/hdfs/hosts5
-rw-r--r--Apache Hadoop/2.7.1/hdfs/install.yml7
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/defaults/main.yml23
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/tasks/deploy.yml223
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/tasks/main.yml9
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/tasks/status-check.yml53
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/tasks/uninstall.yml38
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/templates/core-site.xml.j267
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/dae-hdfsjournal.sh.j242
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/dae-hdfsmaster.sh.j253
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/dae-hdfsslave.sh.j260
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/dae-hdfsworker.sh.j247
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/keephdfsjournal.j247
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/keephdfsmaster.j242
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/keephdfsslave.j242
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/keephdfsworker.j247
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/templates/hadoop-env.sh.j2105
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/templates/hdfs-site.xml.j2142
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/templates/ini_hdfs.sh.j246
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/templates/mapred-site.xml.j233
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/templates/set_hdfs_env.sh.j271
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/templates/slaves.j24
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/templates/unload_hdfs.sh.j286
-rw-r--r--Apache Hadoop/2.7.1/hdfs/role/vars/main.yml8
-rw-r--r--Apache Hadoop/2.7.1/yarn/hosts7
-rw-r--r--Apache Hadoop/2.7.1/yarn/install.yml7
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/defaults/main.yml56
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/tasks/deploy-cluster.yml194
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/tasks/deploy-standalone.yml136
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/tasks/main.yml12
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/tasks/status-check.yml57
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/tasks/uninstall.yml55
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/templates/capacity-scheduler.xml.j2134
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/templates/core-site.xml.j277
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/dae-yarnhistory.sh.j241
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/dae-yarnmaster.sh.j241
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/dae-yarnworker.sh.j241
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/keepyarnhistory.j246
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/keepyarnmaster.j240
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/keepyarnworker.j246
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/templates/flink/flink-conf.yaml.j2198
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/templates/flink/flink.sh.j24
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/templates/hadoop-env.sh.j2105
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/templates/hdfs-site.xml.j2142
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/templates/mapred-site.xml.j233
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/templates/set_yarn_env.sh.j258
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/templates/slaves.j24
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/templates/standalone/core-site.xml.j265
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/templates/standalone/hdfs-site.xml.j222
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/templates/standalone/yarn-site.xml.j2183
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/templates/unload_hadoop_yarn.sh.j279
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/templates/yarn-env.sh.j2127
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/templates/yarn-site.xml.j2232
-rw-r--r--Apache Hadoop/2.7.1/yarn/role/vars/main.yml15
-rw-r--r--Apache Ignite/2.15.0/ignite/hosts5
-rw-r--r--Apache Ignite/2.15.0/ignite/install.yml7
-rw-r--r--Apache Ignite/2.15.0/ignite/role/defaults/main.yml12
-rw-r--r--Apache Ignite/2.15.0/ignite/role/handlers/main.yml25
-rw-r--r--Apache Ignite/2.15.0/ignite/role/tasks/deploy.yml48
-rw-r--r--Apache Ignite/2.15.0/ignite/role/tasks/main.yml10
-rw-r--r--Apache Ignite/2.15.0/ignite/role/tasks/status-check.yml17
-rw-r--r--Apache Ignite/2.15.0/ignite/role/tasks/uninstall.yml28
-rw-r--r--Apache Ignite/2.15.0/ignite/role/templates/default-config.xml.j2128
-rw-r--r--Apache Ignite/2.15.0/ignite/role/templates/docker-compose.yml.j226
-rw-r--r--Apache Ignite/2.15.0/ignite/role/vars/.main.yml.swpbin0 -> 12288 bytes
-rw-r--r--Apache Ignite/2.15.0/ignite/role/vars/main.yml11
-rw-r--r--Apache Kafka/3.4.1/kafka/hosts5
-rw-r--r--Apache Kafka/3.4.1/kafka/install.yml7
-rw-r--r--Apache Kafka/3.4.1/kafka/role/defaults/main.yml13
-rw-r--r--Apache Kafka/3.4.1/kafka/role/handlers/main.yml38
-rw-r--r--Apache Kafka/3.4.1/kafka/role/tasks/deploy.yml72
-rw-r--r--Apache Kafka/3.4.1/kafka/role/tasks/main.yml10
-rw-r--r--Apache Kafka/3.4.1/kafka/role/tasks/status-check.yml17
-rw-r--r--Apache Kafka/3.4.1/kafka/role/tasks/uninstall.yml39
-rw-r--r--Apache Kafka/3.4.1/kafka/role/templates/docker-compose.yml.j215
-rw-r--r--Apache Kafka/3.4.1/kafka/role/templates/docker-compose_exporter.yml.j221
-rw-r--r--Apache Kafka/3.4.1/kafka/role/templates/kafka-operation.sh.j260
-rw-r--r--Apache Kafka/3.4.1/kafka/role/templates/kafka.sh.j23
-rw-r--r--Apache Kafka/3.4.1/kafka/role/templates/server.properties.j2190
-rw-r--r--Apache Kafka/3.4.1/kafka/role/vars/main.yml23
-rw-r--r--Apache Zookeeper/3.5.9/zookeeper/hosts2
-rw-r--r--Apache Zookeeper/3.5.9/zookeeper/install.yml7
-rw-r--r--Apache Zookeeper/3.5.9/zookeeper/role/defaults/main.yml9
-rw-r--r--Apache Zookeeper/3.5.9/zookeeper/role/handlers/main.yml38
-rw-r--r--Apache Zookeeper/3.5.9/zookeeper/role/tasks/deploy.yml55
-rw-r--r--Apache Zookeeper/3.5.9/zookeeper/role/tasks/main.yml9
-rw-r--r--Apache Zookeeper/3.5.9/zookeeper/role/tasks/status-check.yml13
-rw-r--r--Apache Zookeeper/3.5.9/zookeeper/role/tasks/uninstall.yml27
-rw-r--r--Apache Zookeeper/3.5.9/zookeeper/role/templates/docker-compose.yml.j216
-rw-r--r--Apache Zookeeper/3.5.9/zookeeper/role/templates/docker-compose_exporter.yml.j217
-rw-r--r--Apache Zookeeper/3.5.9/zookeeper/role/templates/myid.j21
-rw-r--r--Apache Zookeeper/3.5.9/zookeeper/role/templates/zoo.cfg.j255
-rw-r--r--Apache Zookeeper/3.5.9/zookeeper/role/vars/main.yml11
-rw-r--r--ArangoDB/3.6.4/arangodb/hosts2
-rw-r--r--ArangoDB/3.6.4/arangodb/install.yml7
-rw-r--r--ArangoDB/3.6.4/arangodb/role/defaults/main.yml32
-rw-r--r--ArangoDB/3.6.4/arangodb/role/files/init.zipbin0 -> 612944 bytes
-rw-r--r--ArangoDB/3.6.4/arangodb/role/handlers/main.yml24
-rw-r--r--ArangoDB/3.6.4/arangodb/role/tasks/deploy.yml43
-rw-r--r--ArangoDB/3.6.4/arangodb/role/tasks/main.yml10
-rw-r--r--ArangoDB/3.6.4/arangodb/role/tasks/status-check.yml17
-rw-r--r--ArangoDB/3.6.4/arangodb/role/tasks/uninstall.yml16
-rw-r--r--ArangoDB/3.6.4/arangodb/role/templates/docker-compose.yml.j217
-rw-r--r--ArangoDB/3.6.4/arangodb/role/vars/main.yml8
-rw-r--r--CMAK/3.0.0.6/cmak/hosts5
-rw-r--r--CMAK/3.0.0.6/cmak/install.yml7
-rw-r--r--CMAK/3.0.0.6/cmak/role/defaults/main.yml6
-rw-r--r--CMAK/3.0.0.6/cmak/role/files/kafka_client_jaas.conf5
-rw-r--r--CMAK/3.0.0.6/cmak/role/handlers/main.yml24
-rw-r--r--CMAK/3.0.0.6/cmak/role/tasks/deploy.yml34
-rw-r--r--CMAK/3.0.0.6/cmak/role/tasks/main.yml9
-rw-r--r--CMAK/3.0.0.6/cmak/role/tasks/status-check.yml13
-rw-r--r--CMAK/3.0.0.6/cmak/role/tasks/unload.yml28
-rw-r--r--CMAK/3.0.0.6/cmak/role/templates/docker-compose.yml.j224
-rw-r--r--CMAK/3.0.0.6/cmak/role/vars/main.yml9
-rw-r--r--Chproxy/21.06.30/chproxy/hosts5
-rw-r--r--Chproxy/21.06.30/chproxy/install.yml7
-rw-r--r--Chproxy/21.06.30/chproxy/role/defaults/main.yml6
-rw-r--r--Chproxy/21.06.30/chproxy/role/handlers/main.yml24
-rw-r--r--Chproxy/21.06.30/chproxy/role/tasks/deploy.yml31
-rw-r--r--Chproxy/21.06.30/chproxy/role/tasks/main.yml10
-rw-r--r--Chproxy/21.06.30/chproxy/role/tasks/status-check.yml17
-rw-r--r--Chproxy/21.06.30/chproxy/role/tasks/uninstall.yml16
-rw-r--r--Chproxy/21.06.30/chproxy/role/templates/config.yml.j258
-rw-r--r--Chproxy/21.06.30/chproxy/role/templates/docker-compose.yml.j218
-rw-r--r--Chproxy/21.06.30/chproxy/role/vars/main.yml8
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/hosts5
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/install.yml7
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/defaults/main.yml12
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/handlers/main.yml38
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/tasks/cluster/deploy.yml132
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/tasks/cluster/status-check.yml14
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/tasks/cluster/uninstall.yml49
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/tasks/main.yml19
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/tasks/standalone/deploy.yml89
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/tasks/standalone/status-check.yml14
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/tasks/standalone/uninstall.yml50
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/templates/ck_monitor.sh.j228
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/templates/clickhouse-server.j2355
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/templates/config.xml.j2403
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/templates/docker-compose_exporter.yml.j220
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/templates/metrika_data.xml.j247
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/templates/metrika_query.xml.j296
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/templates/standalone/ck_monitor.sh.j228
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/templates/standalone/config.xml.j2403
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/templates/standalone/docker-compose.yml.j215
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/templates/standalone/metrika_standalone.xml.j287
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/templates/unload_ck.sh.j243
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/templates/users.xml.j2214
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/vars/.main.yml.swpbin0 -> 12288 bytes
-rw-r--r--Clickhouse/21.8.13.1/clickhouse/role/vars/main.yml12
-rw-r--r--GrootStream/1.0/grootstream/hosts2
-rw-r--r--GrootStream/1.0/grootstream/install.yml7
-rw-r--r--GrootStream/1.0/grootstream/role/defaults/main.yml9
-rw-r--r--GrootStream/1.0/grootstream/role/tasks/deploy.yml32
-rw-r--r--GrootStream/1.0/grootstream/role/tasks/main.yml8
-rw-r--r--GrootStream/1.0/grootstream/role/tasks/uninstall.yml10
-rw-r--r--GrootStream/1.0/grootstream/role/templates/groot-stream.sh.j24
-rw-r--r--GrootStream/1.0/grootstream/role/templates/grootstream.yaml.j221
-rw-r--r--GrootStream/1.0/grootstream/role/vars/main.yml5
-rw-r--r--MariaDB/10.5.3/mariadb/hosts2
-rw-r--r--MariaDB/10.5.3/mariadb/install.yml7
-rw-r--r--MariaDB/10.5.3/mariadb/role/defaults/main.yml9
-rw-r--r--MariaDB/10.5.3/mariadb/role/files/pyMysql.zipbin0 -> 46172 bytes
-rw-r--r--MariaDB/10.5.3/mariadb/role/handlers/main.yml29
-rw-r--r--MariaDB/10.5.3/mariadb/role/tasks/deploy-cluster.yml153
-rw-r--r--MariaDB/10.5.3/mariadb/role/tasks/deploy-standalone.yml66
-rw-r--r--MariaDB/10.5.3/mariadb/role/tasks/main.yml11
-rw-r--r--MariaDB/10.5.3/mariadb/role/tasks/status-check.yml14
-rw-r--r--MariaDB/10.5.3/mariadb/role/tasks/uninstall.yml27
-rw-r--r--MariaDB/10.5.3/mariadb/role/templates/docker-compose.yml.j215
-rw-r--r--MariaDB/10.5.3/mariadb/role/templates/exporter_docker-compose.yml.j217
-rw-r--r--MariaDB/10.5.3/mariadb/role/templates/keepalived/check_mariadb.sh.j215
-rw-r--r--MariaDB/10.5.3/mariadb/role/templates/keepalived/keepalived-mariadb.conf.j248
-rw-r--r--MariaDB/10.5.3/mariadb/role/templates/keepalived/unload_balancer.sh.j218
-rw-r--r--MariaDB/10.5.3/mariadb/role/templates/my.cnf.j2198
-rw-r--r--MariaDB/10.5.3/mariadb/role/vars/main.yml9
-rw-r--r--Nacos/2.0.2/hosts5
-rw-r--r--Nacos/2.0.2/install.yml7
-rw-r--r--Nacos/2.0.2/role/defaults/main.yml18
-rw-r--r--Nacos/2.0.2/role/files/mysqlbin0 -> 11042816 bytes
-rw-r--r--Nacos/2.0.2/role/handlers/main.yml24
-rw-r--r--Nacos/2.0.2/role/tasks/deploy.yml50
-rw-r--r--Nacos/2.0.2/role/tasks/main.yml10
-rw-r--r--Nacos/2.0.2/role/tasks/status-check.yml15
-rw-r--r--Nacos/2.0.2/role/tasks/uninstall.yml21
-rw-r--r--Nacos/2.0.2/role/templates/application.properties.j2228
-rw-r--r--Nacos/2.0.2/role/templates/cluster.conf.j220
-rw-r--r--Nacos/2.0.2/role/templates/docker-compose.yml.j214
-rw-r--r--Nacos/2.0.2/role/templates/nacos-logback.xml.j2642
-rw-r--r--Nacos/2.0.2/role/templates/nacos-mysql.sql.j2220
-rw-r--r--Nacos/2.0.2/role/vars/main.yml13
-rw-r--r--Pushgateway/1.4.2/pushgateway/hosts3
-rw-r--r--Pushgateway/1.4.2/pushgateway/install.yml7
-rw-r--r--Pushgateway/1.4.2/pushgateway/role/defaults/main.yml6
-rw-r--r--Pushgateway/1.4.2/pushgateway/role/handlers/main.yml25
-rw-r--r--Pushgateway/1.4.2/pushgateway/role/tasks/deploy.yml22
-rw-r--r--Pushgateway/1.4.2/pushgateway/role/tasks/main.yml10
-rw-r--r--Pushgateway/1.4.2/pushgateway/role/tasks/status-check.yml17
-rw-r--r--Pushgateway/1.4.2/pushgateway/role/tasks/uninstall.yml27
-rw-r--r--Pushgateway/1.4.2/pushgateway/role/templates/docker-compose.yml.j215
-rw-r--r--Pushgateway/1.4.2/pushgateway/role/vars/.main.yml.swpbin0 -> 12288 bytes
-rw-r--r--Pushgateway/1.4.2/pushgateway/role/vars/main.yml11
-rw-r--r--Redis/6.2.5/redis/hosts2
-rw-r--r--Redis/6.2.5/redis/install.yml7
-rw-r--r--Redis/6.2.5/redis/role/defaults/main.yml6
-rw-r--r--Redis/6.2.5/redis/role/handlers/main.yml24
-rw-r--r--Redis/6.2.5/redis/role/tasks/deploy-cluster.yml48
-rw-r--r--Redis/6.2.5/redis/role/tasks/deploy-standalone.yml30
-rw-r--r--Redis/6.2.5/redis/role/tasks/main.yml11
-rw-r--r--Redis/6.2.5/redis/role/tasks/status-check.yml17
-rw-r--r--Redis/6.2.5/redis/role/tasks/uninstall.yml16
-rw-r--r--Redis/6.2.5/redis/role/templates/docker-compose.yml.j212
-rw-r--r--Redis/6.2.5/redis/role/templates/redis-master.conf.j22051
-rw-r--r--Redis/6.2.5/redis/role/templates/redis-slave.conf.j22052
-rw-r--r--Redis/6.2.5/redis/role/vars/main.yml19
-rw-r--r--full_config.yml143
-rw-r--r--full_hosts118
262 files changed, 15927 insertions, 0 deletions
diff --git a/Apache Druid/26.0.0/druid/hosts b/Apache Druid/26.0.0/druid/hosts
new file mode 100644
index 0000000..ff27d09
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/hosts
@@ -0,0 +1,11 @@
+[zookeeper]
+192.168.45.102
+
+[mariadb]
+192.168.45.102
+
+[hdfs]
+
+[druid]
+192.168.45.102
+
diff --git a/Apache Druid/26.0.0/druid/install.yml b/Apache Druid/26.0.0/druid/install.yml
new file mode 100644
index 0000000..b42d0d5
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/install.yml
@@ -0,0 +1,6 @@
+- hosts: druid
+ remote_user: root
+ roles:
+ - role
+ vars_files:
+ - role/vars/main.yml
diff --git a/Apache Druid/26.0.0/druid/role/defaults/main.yml b/Apache Druid/26.0.0/druid/role/defaults/main.yml
new file mode 100644
index 0000000..c8cdaff
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/defaults/main.yml
@@ -0,0 +1,44 @@
+#The default installation location
+deploy_dir: /data/olap
+
+#The default data storage location,use storing application data,logs and configuration files
+data_dir: /data/olap
+
+druid:
+ common:
+ druid.zk.service.host: '{% for dev_info in groups.zookeeper -%}{% if loop.last -%}{{dev_info}}:2181{%- else %}{{dev_info}}:2181,{%- endif %}{%- endfor %}'
+ druid.metadata.storage.connector.connectURI: 'jdbc:mysql://{{ vrrp_instance.default.virtual_ipaddress }}:3306/druid'
+ druid.metadata.storage.connector.password: '{{ mariadb_default_pin }}'
+ broker:
+ #Running memory of the Druid-Broker.
+ java_opts: -Xmx1024m -Xms1024m
+ #Worker tasks also use off-heap ("direct") memory. Set the amount of direct memory available (-XX:MaxDirectMemorySize) to at least (druid.processing.numThreads + 1) * druid.processing.buffer.sizeBytes
+ MaxDirectMemorySize: 512m
+ #This specifies a buffer size (less than 2GiB), for the storage of intermediate results
+ druid.processing.buffer.sizeBytes: 50000000
+ #The number of direct memory buffers available for merging query results.
+ druid.processing.numMergeBuffers: 4
+ #The number of processing threads to have available for parallel processing of segments.
+ druid.processing.numThreads: 5
+ coordinator:
+ #Running memory of the Druid-Coordinator.
+ java_opts: -Xmx1024m -Xms1024m
+ historical:
+ #Running memory of the Druid-Historical.
+ java_opts: -Xmx1024m -Xms1024m
+ #The size of the process's temporary cache data on disk
+ druid.segmentCache.locations: 300000000000
+ #Worker tasks also use off-heap ("direct") memory. Set the amount of direct memory available (-XX:MaxDirectMemorySize) to at least (druid.processing.numThreads + 1) * druid.processing.buffer.sizeBytes
+ MaxDirectMemorySize: 512m
+ #This specifies a buffer size (less than 2GiB), for the storage of intermediate results
+ druid.processing.buffer.sizeBytes: 50000000
+ #The number of direct memory buffers available for merging query results.
+ druid.processing.numMergeBuffers: 4
+ #The number of processing threads to have available for parallel processing of segments.
+ druid.processing.numThreads: 5
+ middlemanager:
+ #Running memory of the Druid-Middlemanager.
+ java_opts: -Xmx1024m -Xms1024m
+ druid.indexer.fork.property.druid.processing.numMergeBuffers: 2
+ druid.indexer.fork.property.druid.processing.buffer.sizeBytes: 20000000
+ druid.indexer.fork.property.druid.processing.numThreads: 1
diff --git a/Apache Druid/26.0.0/druid/role/files/conf.zip b/Apache Druid/26.0.0/druid/role/files/conf.zip
new file mode 100644
index 0000000..2eccc67
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/files/conf.zip
Binary files differ
diff --git a/Apache Druid/26.0.0/druid/role/files/mysql b/Apache Druid/26.0.0/druid/role/files/mysql
new file mode 100644
index 0000000..eb26146
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/files/mysql
Binary files differ
diff --git a/Apache Druid/26.0.0/druid/role/handlers/main.yml b/Apache Druid/26.0.0/druid/role/handlers/main.yml
new file mode 100644
index 0000000..351d22d
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/handlers/main.yml
@@ -0,0 +1,38 @@
+- name: Loading Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ load_path: '{{ deploy_dir }}/{{ container_name }}/{{ image_name }}-{{ image_tag }}.tar'
+ source: load
+ force_tag: yes
+ force_source: yes
+ timeout: 300
+
+- name: Stop Container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+- name: Start Container
+ docker_compose:
+ project_src: '{{ deploy_dir }}/{{ container_name }}/'
+
+- name: Removing Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
+
+- name: Loading Exporter Image
+ docker_image:
+ name: 'druid_exporter'
+ tag: '1.0.0'
+ load_path: '{{ deploy_dir }}/{{ container_name }}/monitor/druid_exporter-1.0.0.tar'
+ source: load
+ force_tag: yes
+ force_source: yes
+ timeout: 300
+
+- name: Start Exporter Container
+ docker_compose:
+ project_src: '{{ deploy_dir }}/{{ container_name }}/monitor/'
diff --git a/Apache Druid/26.0.0/druid/role/tasks/deploy.yml b/Apache Druid/26.0.0/druid/role/tasks/deploy.yml
new file mode 100644
index 0000000..1376099
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/tasks/deploy.yml
@@ -0,0 +1,156 @@
+- block:
+ - name: To terminate execution
+ fail:
+ msg: "Druid Cluster mode at least 3 nodes,please checking configurations/hosts -> druid"
+ when: node_nums < (min_cluster_num)
+
+ - name: Check the Zookeeper status
+ shell: netstat -anlp | egrep "2181" | grep LISTEN | wc -l
+ register: port_out
+ delegate_to: "{{ groups.zookeeper[0] }}"
+
+ - name: To terminate execution
+ fail:
+ msg: "Port 2181 of the zookeeper node is not monitored. The status may be abnormal"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: port_out.stdout != '1'
+
+ - name: Checking Hadoop DataNode status
+ shell: source /etc/profile && hadoop dfsadmin -report | grep "Live datanodes" | grep -E -o "[0-9]"
+ async: 10
+ register: datanode_out
+ run_once: true
+ delegate_to: "{{ groups.hdfs[0] }}"
+
+ - name: Checking Hadoop NameNode status
+ shell: source /etc/profile && hadoop dfsadmin -report |grep 50010 | wc -l
+ async: 10
+ register: namenode_out
+ run_once: true
+ delegate_to: "{{ groups.hdfs[0] }}"
+
+ - name: To terminate execution
+ fail:
+ msg: "If the dependency test fails, check whether the Hadoop cluster is normal"
+ when: datanode_out.stdout <= '1' and namenode_out.stdout <= '1'
+
+
+- name: Creating directory
+ file:
+ state: directory
+ path: '{{ deploy_dir }}/{{ container_name }}/{{ item.dir }}'
+ with_items:
+ - { dir: 'var' }
+ - { dir: 'log' }
+ - { dir: 'monitor' }
+
+- name: Copying config
+ unarchive:
+ src: 'files/conf.zip'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
+ copy:
+ src: '{{ role_path }}/../../../software-packages/{{ image_name }}-{{ image_tag }}.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: true
+ notify:
+ - Loading Image
+
+- name: copying druid config files
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ deploy_dir }}/{{ container_name }}/conf/druid/cluster/{{ item.dest }}'
+ backup: false
+ with_items:
+ - { src: 'common.runtime.properties.j2', dest: '_common/common.runtime.properties' }
+ - { src: 'broker_runtime.j2', dest: 'query/broker/runtime.properties' }
+ - { src: 'broker_jvm.j2', dest: 'query/broker/jvm.config' }
+ - { src: 'historical_runtime.j2', dest: 'data/historical/runtime.properties' }
+ - { src: 'historical_jvm.j2', dest: 'data/historical/jvm.config' }
+ - { src: 'middleManager_jvm.j2', dest: 'data/middleManager/jvm.config' }
+ - { src: 'middleManager_runtime.properties.j2', dest: 'data/middleManager/runtime.properties' }
+ - { src: 'coordinator_jvm.j2', dest: 'master/coordinator-overlord/jvm.config' }
+ - { src: 'router_runtime.properties.j2', dest: 'query/router/runtime.properties' }
+
+- name: Fetching Hadoop config files to /tmp
+ ansible.builtin.fetch:
+ src: "{{ deploy_dir }}/hadoop-2.7.1/etc/hadoop/{{ item.filename }}"
+ dest: "/tmp/"
+ flat: yes
+ loop: "{{ hadoop_config_files }}"
+ run_once: true
+ delegate_to: "{{ groups.hdfs[0] }}"
+
+- name: Copying Hadoop config files to other nodes
+ ansible.builtin.copy:
+ src: "/tmp/{{ item.filename }}"
+ dest: "{{ deploy_dir }}/{{ container_name }}/conf/druid/cluster/_common/"
+ loop: "{{ hadoop_config_files }}"
+
+- name: Create a new database with name {{ druid_database }}
+ shell: mysql -uroot -p{{ mariadb_default_pin }} -P3306 -h{{ groups.mariadb[0] }} -e "create database {{ druid_database }} default character set utf8mb4 collate utf8mb4_general_ci;"
+ run_once: true
+ delegate_to: "{{ groups.mariadb[0] }}"
+
+- block:
+ - name: Setting startup_mode variable
+ set_fact: startup_mode="cluster-all-server"
+
+ - name: Copying Druid docker-compose
+ template:
+ src: 'docker-compose.yml.j2'
+ dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml'
+ mode: 0644
+ notify:
+ - Loading Image
+ - Start Container
+ when: node_nums <= (min_cluster_num)
+
+- block:
+ - name: Setting startup_mode variable
+ set_fact: startup_mode="cluster-query-server"
+
+ - name: Copying Druid docker-compose
+ template:
+ src: 'docker-compose.yml.j2'
+ dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml'
+ mode: 0644
+ notify:
+ - Loading Image
+ - Start Container
+ when: node_nums > (min_cluster_num) and inventory_hostname in groups['druid'][:2]
+
+- block:
+ - name: Setting startup_mode variable
+ set_fact: startup_mode="cluster-data-server"
+
+ - name: Copying Druid docker-compose
+ template:
+ src: 'docker-compose.yml.j2'
+ dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml'
+ mode: 0644
+ notify:
+ - Loading Image
+ - Start Container
+ when: node_nums > (min_cluster_num) and inventory_hostname not in groups['druid'][:2]
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/monitor
+ copy:
+ src: '{{ role_path }}/../../../software-packages/druid_exporter-1.0.0.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/monitor/'
+ force: true
+ notify:
+ - Loading Exporter Image
+
+- name: Config exporter config files
+ template:
+ src: 'docker-compose_exporter.yml.j2'
+ dest: '{{ deploy_dir }}/{{ container_name }}/monitor/docker-compose.yml'
+ mode: 0644
+ notify:
+ - Start Exporter Container
+
+- meta: flush_handlers
+
diff --git a/Apache Druid/26.0.0/druid/role/tasks/main.yml b/Apache Druid/26.0.0/druid/role/tasks/main.yml
new file mode 100644
index 0000000..67c67cc
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/tasks/main.yml
@@ -0,0 +1,19 @@
+- block:
+ - include: uninstall.yml
+ - include: deploy.yml
+ - include: status-check.yml
+ when: (operation) == "install" and (groups.druid|length) > 1
+
+- block:
+ - include: uninstall.yml
+ when: (operation) == "uninstall" and (groups.druid|length) > 1
+
+- block:
+ - include: standalone/uninstall.yml
+ - include: standalone/deploy.yml
+ - include: status-check.yml
+ when: (operation) == "install" and (groups.druid|length) == 1
+
+- block:
+ - include: standalone/uninstall.yml
+ when: (operation) == "uninstall" and (groups.druid|length) == 1
diff --git a/Apache Druid/26.0.0/druid/role/tasks/standalone/deploy.yml b/Apache Druid/26.0.0/druid/role/tasks/standalone/deploy.yml
new file mode 100644
index 0000000..b044978
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/tasks/standalone/deploy.yml
@@ -0,0 +1,93 @@
+- name: Setting node_nums variable
+ set_fact: node_nums="{{groups.druid|length}}"
+
+- block:
+ - name: To terminate execution
+ fail:
+ msg: "Druid Standanloe mode at max 1 nodes,please checking configurations/hosts -> druid"
+ when: node_nums != '1'
+
+ - name: Check the Zookeeper status
+ shell: netstat -anlp | egrep "2181" | grep LISTEN | wc -l
+ register: port_out
+ delegate_to: "{{ groups.zookeeper[0] }}"
+
+ - name: To terminate execution
+ fail:
+ msg: "Port 2181 of the zookeeper node is not monitored. The status may be abnormal"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: port_out.stdout != '1'
+
+- name: Creating directory
+ file:
+ state: directory
+ path: '{{ deploy_dir }}/{{ container_name }}/{{ item.dir }}'
+ with_items:
+ - { dir: 'var' }
+ - { dir: 'log' }
+ - { dir: 'monitor' }
+
+- name: Copying config
+ unarchive:
+ src: 'files/conf.zip'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
+ copy:
+ src: 'files/{{ image_name }}-{{ image_tag }}.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: true
+ notify:
+ - Loading Image
+
+- name: copying druid config files
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ deploy_dir }}/{{ container_name }}/conf/druid/single-server/medium/{{ item.dest }}'
+ backup: false
+ with_items:
+ - { src: 'common.runtime.properties.j2', dest: '_common/common.runtime.properties' }
+ - { src: 'broker_runtime.j2', dest: 'broker/runtime.properties' }
+ - { src: 'broker_jvm.j2', dest: 'broker/jvm.config' }
+ - { src: 'historical_runtime.j2', dest: 'historical/runtime.properties' }
+ - { src: 'historical_jvm.j2', dest: 'historical/jvm.config' }
+ - { src: 'middleManager_jvm.j2', dest: 'middleManager/jvm.config' }
+ - { src: 'middleManager_runtime.properties.j2', dest: 'middleManager/runtime.properties' }
+ - { src: 'coordinator_jvm.j2', dest: 'coordinator-overlord/jvm.config' }
+ - { src: 'router_runtime.properties.j2', dest: 'router/runtime.properties' }
+
+- name: Create a new database with name {{ druid_database }}
+ shell: mysql -uroot -p{{ mariadb_default_pin }} -P3306 -h{{ groups.mariadb[0] }} -e "create database {{ druid_database }} default character set utf8mb4 collate utf8mb4_general_ci;"
+ run_once: true
+ delegate_to: "{{ groups.mariadb[0] }}"
+
+- name: Setting startup_mode variable
+ set_fact: startup_mode="single-server-medium"
+
+- name: Copying Druid docker-compose
+ template:
+ src: 'docker-compose.yml.j2'
+ dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml'
+ mode: 0644
+ notify:
+ - Loading Image
+ - Start Container
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/monitor
+ copy:
+ src: 'files/druid_exporter-1.0.0.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/monitor/'
+ force: true
+ notify:
+ - Loading Exporter Image
+
+- name: Config exporter config files
+ template:
+ src: 'docker-compose_exporter.yml.j2'
+ dest: '{{ deploy_dir }}/{{ container_name }}/monitor/docker-compose.yml'
+ mode: 0644
+ notify:
+ - Start Exporter Container
+
+- meta: flush_handlers
diff --git a/Apache Druid/26.0.0/druid/role/tasks/standalone/uninstall.yml b/Apache Druid/26.0.0/druid/role/tasks/standalone/uninstall.yml
new file mode 100644
index 0000000..1abe02a
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/tasks/standalone/uninstall.yml
@@ -0,0 +1,50 @@
+- name: copy mysql to /usr/bin/
+ copy:
+ src: 'files/mysql'
+ dest: '/usr/bin/'
+ force: true
+ mode: 0755
+
+- name: Stopping and removing {{ container_name }} container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+- name: Removing old {{ image_name }} image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
+
+- name: Ansible delete old {{ deploy_dir }}/{{ container_name }}
+ file:
+ path: '{{ deploy_dir }}/{{ container_name }}'
+ state: absent
+
+- name: check database
+ shell: mysql -s -uroot -p{{ mariadb_default_pin }} -P3306 -h{{ groups.mariadb[0] }} -e "DROP DATABASE IF EXISTS {{ druid_database }};"
+ run_once: true
+ delegate_to: "{{ groups.druid[0] }}"
+
+- name: Checking ZooKeeper has druid nodes
+ shell: "docker exec -it zookeeper zkCli.sh ls / | grep druid | wc -l"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ register: has_zknode
+
+- name: Delete druid nodes in ZooKeeper
+ shell: "docker exec -it zookeeper zkCli.sh rmr /druid"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ when: has_zknode.stdout >= '1'
+
+- name: Check if the Druid service already exists
+ shell: ps -ef |grep "org.apache.druid.cli.Main server" | grep -v grep | grep -v json | wc -l
+ register: check_out
+
+- name: To terminate execution
+ fail:
+ msg: "Uninstall failed, the Druid process is still running, please check!"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: check_out.stdout >= '1'
diff --git a/Apache Druid/26.0.0/druid/role/tasks/status-check.yml b/Apache Druid/26.0.0/druid/role/tasks/status-check.yml
new file mode 100644
index 0000000..9a5686a
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/tasks/status-check.yml
@@ -0,0 +1,41 @@
+- name: Waitting for Druid running,60s
+ shell: sleep 60
+
+- block:
+ - name: Check if the Druid already exists
+ shell: ps -ef | grep -v grep | grep "org.apache.druid.cli.Main server" | wc -l
+ register: process_out
+
+ - name: To terminate execution
+ fail:
+ msg: "Druid on node {{ inventory_hostname }} is not started. Please check"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: process_out.stdout != '5'
+ when: node_nums <= (min_cluster_num)
+
+- block:
+ - name: Check if the Druid already exists
+ shell: ps -ef | grep -v grep | grep "org.apache.druid.cli.Main server" | wc -l
+ register: process_out
+
+ - name: To terminate execution
+ fail:
+ msg: "Druid on node {{ inventory_hostname }} is not started. Please check"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: process_out.stdout != '3'
+ when: node_nums > (min_cluster_num) and inventory_hostname in groups['druid'][:2]
+
+- block:
+ - name: Check if the Druid already exists
+ shell: ps -ef | grep -v grep | grep "org.apache.druid.cli.Main server" | wc -l
+ register: process_out
+
+ - name: To terminate execution
+ fail:
+ msg: "Druid on node {{ inventory_hostname }} is not started. Please check"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: process_out.stdout != '2'
+ when: node_nums > (min_cluster_num) and inventory_hostname not in groups['druid'][:2]
diff --git a/Apache Druid/26.0.0/druid/role/tasks/uninstall.yml b/Apache Druid/26.0.0/druid/role/tasks/uninstall.yml
new file mode 100644
index 0000000..8127669
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/tasks/uninstall.yml
@@ -0,0 +1,64 @@
+- name: copy mysql to /usr/bin/
+ copy:
+ src: 'files/mysql'
+ dest: '/usr/bin/'
+ force: true
+ mode: 0755
+
+- block:
+ - name: Stopping and removing {{ container_name }} container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+ - name: Removing old {{ image_name }} image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
+
+ - name: Ansible delete old {{ deploy_dir }}/{{ container_name }}
+ file:
+ path: '{{ deploy_dir }}/{{ container_name }}'
+ state: absent
+
+- block:
+ - name: check database
+ shell: mysql -s -uroot -p{{ mariadb_default_pin }} -P3306 -h{{ groups.mariadb[0] }} -e "DROP DATABASE IF EXISTS {{ druid_database }};"
+ run_once: true
+ delegate_to: "{{ groups.druid[0] }}"
+
+ - name: Checking ZooKeeper has druid nodes
+ shell: "docker exec -it zookeeper zkCli.sh ls / | grep druid | wc -l"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ register: has_zknode
+
+ - name: Delete druid nodes in ZooKeeper
+ shell: "docker exec -it zookeeper zkCli.sh rmr /druid"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ when: has_zknode.stdout >= '1'
+
+ - name: Checking HDFS has Druid folder
+ shell: source /etc/profile && hdfs dfs -ls / | grep druid | wc -l
+ register: folder_exists
+ run_once: true
+ delegate_to: "{{ groups.hdfs[0] }}"
+
+ - name: Delete Druid data folder in HDFS
+ shell: source /etc/profile && hadoop fs -rm -r /druid
+ run_once: true
+ delegate_to: "{{ groups.hdfs[0] }}"
+ when: folder_exists.stdout >= '1'
+
+ - name: Check if the Druid service already exists
+ shell: ps -ef |grep "org.apache.druid.cli.Main server" | grep -v grep | grep -v json | wc -l
+ register: check_out
+
+ - name: To terminate execution
+ fail:
+ msg: "Uninstall failed, the Druid process is still running, please check!"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: check_out.stdout >= '1'
diff --git a/Apache Druid/26.0.0/druid/role/templates/broker_jvm.j2 b/Apache Druid/26.0.0/druid/role/templates/broker_jvm.j2
new file mode 100644
index 0000000..4b4e75f
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/templates/broker_jvm.j2
@@ -0,0 +1,9 @@
+-server
+{{ druid.broker.java_opts }}
+-XX:MaxDirectMemorySize={{ druid.broker.MaxDirectMemorySize }}
+-Duser.timezone=UTC
+-Dfile.encoding=UTF-8
+-Djava.io.tmpdir=var/tmp
+-Dlogfile.name=broker
+-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
+
diff --git a/Apache Druid/26.0.0/druid/role/templates/broker_runtime.j2 b/Apache Druid/26.0.0/druid/role/templates/broker_runtime.j2
new file mode 100644
index 0000000..76bd121
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/templates/broker_runtime.j2
@@ -0,0 +1,41 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+druid.service=druid/broker
+druid.plaintextPort=8082
+
+# HTTP server settings
+druid.server.http.numThreads=60
+
+# HTTP client settings
+druid.broker.http.numConnections=50
+druid.broker.http.maxQueuedBytes=10000000
+
+# Processing threads and buffers
+druid.processing.buffer.sizeBytes={{ druid.broker['druid.processing.buffer.sizeBytes'] }}
+druid.processing.numMergeBuffers={{ druid.broker['druid.processing.numMergeBuffers'] }}
+druid.processing.numThreads={{ druid.broker['druid.processing.numThreads'] }}
+druid.processing.tmpDir=var/druid/processing
+
+# Query cache disabled -- push down caching and merging instead
+druid.broker.cache.useCache=false
+druid.broker.cache.populateCache=false
+
+druid.query.groupBy.maxMergingDictionarySize=10000000000
+druid.query.groupBy.maxOnDiskStorage=10000000000
diff --git a/Apache Druid/26.0.0/druid/role/templates/common.runtime.properties.j2 b/Apache Druid/26.0.0/druid/role/templates/common.runtime.properties.j2
new file mode 100644
index 0000000..cd168de
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/templates/common.runtime.properties.j2
@@ -0,0 +1,169 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# Extensions specified in the load list will be loaded by Druid
+# We are using local fs for deep storage - not recommended for production - use S3, HDFS, or NFS instead
+# We are using local derby for the metadata store - not recommended for production - use MySQL or Postgres instead
+
+# If you specify `druid.extensions.loadList=[]`, Druid won't load any extension from file system.
+# If you don't specify `druid.extensions.loadList`, Druid will load all the extensions under root extension directory.
+# More info: https://druid.apache.org/docs/latest/operations/including-extensions.html
+druid.extensions.loadList=["druid-hdfs-storage", "druid-kafka-indexing-service", "druid-datasketches", "druid-multi-stage-query","mysql-metadata-storage","druid-hlld", "druid-hdrhistogram"]
+
+# If you have a different version of Hadoop, place your Hadoop client jar files in your hadoop-dependencies directory
+# and uncomment the line below to point to your directory.
+#druid.extensions.hadoopDependenciesDir=/my/dir/hadoop-dependencies
+
+
+#
+# Hostname
+#
+druid.host={{ inventory_hostname }}
+
+#
+# Logging
+#
+
+# Log all runtime properties on startup. Disable to avoid logging properties on startup:
+druid.startup.logging.logProperties=true
+
+#
+# Zookeeper
+#
+
+druid.zk.service.host={{ druid.common['druid.zk.service.host'] }}
+
+druid.zk.paths.base=/druid
+
+#
+# Metadata storage
+#
+
+# For Derby server on your Druid Coordinator (only viable in a cluster with a single Coordinator, no fail-over):
+#druid.metadata.storage.type=derby
+#druid.metadata.storage.connector.connectURI=jdbc:derby://localhost:1527/var/druid/metadata.db;create=true
+#druid.metadata.storage.connector.host=localhost
+#druid.metadata.storage.connector.port=1527
+
+# For MySQL (make sure to include the MySQL JDBC driver on the classpath):
+druid.metadata.storage.type=mysql
+druid.metadata.storage.connector.connectURI={{ druid.common['druid.metadata.storage.connector.connectURI'] }}
+druid.metadata.storage.connector.user=root
+druid.metadata.storage.connector.password={{ druid.common['druid.metadata.storage.connector.password'] }}
+
+# For PostgreSQL:
+#druid.metadata.storage.type=postgresql
+#druid.metadata.storage.connector.connectURI=jdbc:postgresql://db.example.com:5432/druid
+#druid.metadata.storage.connector.user=...
+#druid.metadata.storage.connector.password=...
+
+#
+# Deep storage
+#
+
+# For local disk (only viable in a cluster if this is a network mount):
+{% if groups.druid | length == 1 %}
+druid.storage.type=local
+druid.storage.storageDirectory=var/druid/segments
+{% elif groups.druid | length >= 3 %}
+# For HDFS:
+druid.storage.type=hdfs
+druid.storage.storageDirectory=/druid/segments
+{% endif %}
+
+# For S3:
+#druid.storage.type=s3
+#druid.storage.bucket=your-bucket
+#druid.storage.baseKey=druid/segments
+#druid.s3.accessKey=...
+#druid.s3.secretKey=...
+
+#
+# Indexing service logs
+#
+
+# For local disk (only viable in a cluster if this is a network mount):
+{% if groups.druid | length == 1 %}
+druid.indexer.logs.type=file
+druid.indexer.logs.directory=var/druid/indexing-logs
+{% elif groups.druid | length >= 3 %}
+# For HDFS:
+druid.indexer.logs.type=hdfs
+druid.indexer.logs.directory=/druid/indexing-logs
+{% endif %}
+
+druid.indexer.logs.kill.enabled=true
+druid.indexer.logs.kill.durationToRetain=604800000
+druid.indexer.logs.kill.delay=21600000
+
+# For S3:
+#druid.indexer.logs.type=s3
+#druid.indexer.logs.s3Bucket=your-bucket
+#druid.indexer.logs.s3Prefix=druid/indexing-logs
+
+
+#
+# Service discovery
+#
+
+druid.selectors.indexing.serviceName=druid/overlord
+druid.selectors.coordinator.serviceName=druid/coordinator
+
+#
+# Monitoring
+#
+
+druid.monitoring.monitors=["org.apache.druid.java.util.metrics.JvmMonitor"]
+druid.emitter=http
+druid.emitter.logging.logLevel=info
+druid.emitter.http.recipientBaseUrl=http://{{ inventory_hostname }}:9903
+
+# Storage type of double columns
+# ommiting this will lead to index double as float at the storage layer
+
+druid.indexing.doubleStorage=double
+
+#
+# Security
+#
+druid.server.hiddenProperties=["druid.s3.accessKey","druid.s3.secretKey","druid.metadata.storage.connector.password", "password", "key", "token", "pwd"]
+
+
+#
+# SQL
+#
+druid.sql.enable=true
+
+#
+# Lookups
+#
+druid.lookup.enableLookupSyncOnStartup=false
+
+# Planning SQL query when there is aggregate distinct in the statement
+druid.sql.planner.useGroupingSetForExactDistinct=true
+
+# Expression processing config
+druid.expressions.useStrictBooleans=true
+
+# Http client
+druid.global.http.eagerInitialization=false
+
+#Set to false to store and query data in SQL compatible mode. When set to true (legacy mode), null values will be stored as '' for string columns and 0 for numeric columns.
+druid.generic.useDefaultValueForNull=false
+
diff --git a/Apache Druid/26.0.0/druid/role/templates/coordinator_jvm.j2 b/Apache Druid/26.0.0/druid/role/templates/coordinator_jvm.j2
new file mode 100644
index 0000000..b1d3a63
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/templates/coordinator_jvm.j2
@@ -0,0 +1,10 @@
+-server
+{{ druid.coordinator.java_opts }}
+-XX:+UseG1GC
+-Duser.timezone=UTC
+-Dfile.encoding=UTF-8
+-Djava.io.tmpdir=var/tmp
+-Dlogfile.name=coordinator
+-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
+-Dderby.stream.error.file=var/druid/derby.log
+
diff --git a/Apache Druid/26.0.0/druid/role/templates/docker-compose.yml.j2 b/Apache Druid/26.0.0/druid/role/templates/docker-compose.yml.j2
new file mode 100644
index 0000000..3fe0fa7
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/templates/docker-compose.yml.j2
@@ -0,0 +1,18 @@
+version: '3'
+
+services:
+ druid-master:
+ image: {{ image_name }}:{{ image_tag }}
+ restart: always
+ container_name: {{ container_name }}
+ privileged: true
+ user: root
+ environment:
+ #cluster-data-server,cluster-query-server,cluster-all-server,single-server-small,single-server-medium,single-server-large,single-server-xlarge
+ MODE: {{ startup_mode }}
+ volumes:
+ - "{{ deploy_dir }}/{{ container_name }}/conf:/{{ component_version }}/conf"
+ - "{{ deploy_dir }}/{{ container_name }}/var:/{{ component_version }}/var"
+ - "{{ deploy_dir }}/{{ container_name }}/log:/{{ component_version }}/log"
+ network_mode: "host"
+
diff --git a/Apache Druid/26.0.0/druid/role/templates/docker-compose_exporter.yml.j2 b/Apache Druid/26.0.0/druid/role/templates/docker-compose_exporter.yml.j2
new file mode 100644
index 0000000..012a0c6
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/templates/docker-compose_exporter.yml.j2
@@ -0,0 +1,17 @@
+version: '3.3'
+
+services:
+ druid_exporter:
+ image: druid_exporter:1.0.0
+ container_name: druid_exporter
+ restart: always
+ ports:
+ - 9903:9903
+ environment:
+ JVM_MEM: "-Xmx1024m -Xms128m"
+ networks:
+ olap:
+ ipv4_address: 172.20.88.11
+networks:
+ olap:
+ external: true
diff --git a/Apache Druid/26.0.0/druid/role/templates/historical_jvm.j2 b/Apache Druid/26.0.0/druid/role/templates/historical_jvm.j2
new file mode 100644
index 0000000..e35bf2b
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/templates/historical_jvm.j2
@@ -0,0 +1,9 @@
+-server
+{{ druid.historical.java_opts }}
+-XX:MaxDirectMemorySize={{ druid.historical.MaxDirectMemorySize }}
+-Duser.timezone=UTC
+-Dfile.encoding=UTF-8
+-Djava.io.tmpdir=var/tmp
+-Dlogfile.name=historical
+-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
+
diff --git a/Apache Druid/26.0.0/druid/role/templates/historical_runtime.j2 b/Apache Druid/26.0.0/druid/role/templates/historical_runtime.j2
new file mode 100644
index 0000000..edf7cc3
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/templates/historical_runtime.j2
@@ -0,0 +1,42 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+druid.service=druid/historical
+druid.plaintextPort=8083
+
+# HTTP server threads
+druid.server.http.numThreads=60
+
+# Processing threads and buffers
+druid.processing.buffer.sizeBytes={{ druid.historical['druid.processing.buffer.sizeBytes'] }}
+druid.processing.numMergeBuffers={{ druid.historical['druid.processing.numMergeBuffers'] }}
+druid.processing.numThreads={{ druid.historical['druid.processing.numThreads'] }}
+druid.processing.tmpDir=var/druid/processing
+
+# Segment storage
+druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":{{ druid.historical['druid.segmentCache.locations'] }}}]
+
+# Query cache
+druid.historical.cache.useCache=true
+druid.historical.cache.populateCache=true
+druid.cache.type=caffeine
+druid.cache.sizeInBytes=256000000
+
+druid.query.groupBy.maxMergingDictionarySize=10000000000
+druid.query.groupBy.maxOnDiskStorage=10000000000
diff --git a/Apache Druid/26.0.0/druid/role/templates/middleManager_jvm.j2 b/Apache Druid/26.0.0/druid/role/templates/middleManager_jvm.j2
new file mode 100644
index 0000000..711678e
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/templates/middleManager_jvm.j2
@@ -0,0 +1,7 @@
+-server
+{{ druid.middlemanager.java_opts }}
+-Duser.timezone=UTC
+-Dfile.encoding=UTF-8
+-Djava.io.tmpdir=var/tmp
+-Dlogfile.name=middleManager
+-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
diff --git a/Apache Druid/26.0.0/druid/role/templates/middleManager_runtime.properties.j2 b/Apache Druid/26.0.0/druid/role/templates/middleManager_runtime.properties.j2
new file mode 100644
index 0000000..583eacc
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/templates/middleManager_runtime.properties.j2
@@ -0,0 +1,43 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+druid.service=druid/middleManager
+druid.plaintextPort=8091
+
+# Number of tasks per middleManager
+druid.worker.capacity=200
+
+# Task launch parameters
+druid.worker.baseTaskDirs=[\"var/druid/task\"]
+druid.indexer.runner.javaOptsArray=["-server","-Xms1024m","-Xmx1024m","-XX:MaxDirectMemorySize=1024m","-Duser.timezone=UTC","-Dfile.encoding=UTF-8","-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager","-Dlog4j.configurationFile=conf/druid/cluster/_common/log4j2-task.xml"]
+
+# HTTP server threads
+druid.server.http.numThreads=60
+
+# Processing threads and buffers on Peons
+druid.indexer.fork.property.druid.processing.numMergeBuffers={{ druid.middlemanager['druid.indexer.fork.property.druid.processing.numMergeBuffers'] }}
+druid.indexer.fork.property.druid.processing.buffer.sizeBytes={{ druid.middlemanager['druid.indexer.fork.property.druid.processing.buffer.sizeBytes'] }}
+druid.indexer.fork.property.druid.processing.numThreads={{ druid.middlemanager['druid.indexer.fork.property.druid.processing.numThreads'] }}
+
+# Hadoop indexing
+druid.indexer.task.hadoopWorkingPath=var/druid/hadoop-tmp
+
+druid.query.groupBy.maxMergingDictionarySize=10000000000
+druid.query.groupBy.maxOnDiskStorage=10000000000
+
diff --git a/Apache Druid/26.0.0/druid/role/templates/router_runtime.properties.j2 b/Apache Druid/26.0.0/druid/role/templates/router_runtime.properties.j2
new file mode 100644
index 0000000..52deab5
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/templates/router_runtime.properties.j2
@@ -0,0 +1,34 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+druid.service=druid/router
+druid.plaintextPort=8088
+
+# HTTP proxy
+druid.router.http.numConnections=50
+druid.router.http.readTimeout=PT5M
+druid.router.http.numMaxThreads=100
+druid.server.http.numThreads=100
+
+# Service discovery
+druid.router.defaultBrokerServiceName=druid/broker
+druid.router.coordinatorServiceName=druid/coordinator
+
+# Management proxy to coordinator / overlord: required for unified web console.
+druid.router.managementProxy.enabled=true
diff --git a/Apache Druid/26.0.0/druid/role/vars/main.yml b/Apache Druid/26.0.0/druid/role/vars/main.yml
new file mode 100644
index 0000000..ed0bd9f
--- /dev/null
+++ b/Apache Druid/26.0.0/druid/role/vars/main.yml
@@ -0,0 +1,23 @@
+#镜像名称
+image_name: druid
+
+#镜像版本号
+image_tag: 26.0.0
+
+#容器名称
+container_name: druid
+
+#组件版本
+component_version: apache-druid-26.0.0
+
+#最小集群数量
+min_cluster_num: '3'
+
+#mysql数据库名称
+druid_database: druid
+
+#集群模式下需要用到的hdfs配置文件
+hadoop_config_files:
+ - { filename: 'hdfs-site.xml' }
+ - { filename: 'core-site.xml' }
+
diff --git a/Apache HBase/2.2.3/hbase/hosts b/Apache HBase/2.2.3/hbase/hosts
new file mode 100644
index 0000000..3210797
--- /dev/null
+++ b/Apache HBase/2.2.3/hbase/hosts
@@ -0,0 +1,8 @@
+[zookeeper]
+192.168.45.102
+
+[hdfs]
+
+
+[hbase]
+192.168.45.102
diff --git a/Apache HBase/2.2.3/hbase/install.yml b/Apache HBase/2.2.3/hbase/install.yml
new file mode 100644
index 0000000..aebfd45
--- /dev/null
+++ b/Apache HBase/2.2.3/hbase/install.yml
@@ -0,0 +1,7 @@
+- hosts: hbase
+ remote_user: root
+ roles:
+ - role
+ vars_files:
+ - role/vars/main.yml
+
diff --git a/Apache HBase/2.2.3/hbase/role/defaults/main.yml b/Apache HBase/2.2.3/hbase/role/defaults/main.yml
new file mode 100644
index 0000000..6021737
--- /dev/null
+++ b/Apache HBase/2.2.3/hbase/role/defaults/main.yml
@@ -0,0 +1,22 @@
+#The default installation location
+deploy_dir: /data/olap
+
+#The default data storage location,use storing application data,logs and configuration files
+data_dir: /data/olap
+
+hbase:
+ common:
+ #The HBase resource isolation function is used to group tables for storage.
+ enable_rsgroup: true
+ hmaster:
+ #Running memory of the HBase HMaster.
+ java_opt: '-Xmx1024m -Xms1024m'
+ regionserver:
+ #Running memory of the HBase HRegionserver.
+ java_opt: '-Xmx1024m -Xms1024m -Xmn128m'
+ #This defines the number of threads the region server keeps open to serve requests to tables,It should generally be set to (number of cores - 1)
+ hbase.regionserver.handler.count: 40
+ #If any one of a column families' HStoreFiles has grown to exceed this value, the hosting HRegion is split in two.
+ hbase.hregion.max.filesize: 10737418240
+ #Indicates the memory used by all read caches. The value can be the actual memory value, expressed in MB
+ hbase.bucketcache.size: 100
diff --git a/Apache HBase/2.2.3/hbase/role/files/conf.zip b/Apache HBase/2.2.3/hbase/role/files/conf.zip
new file mode 100644
index 0000000..c49066e
--- /dev/null
+++ b/Apache HBase/2.2.3/hbase/role/files/conf.zip
Binary files differ
diff --git a/Apache HBase/2.2.3/hbase/role/handlers/main.yml b/Apache HBase/2.2.3/hbase/role/handlers/main.yml
new file mode 100644
index 0000000..dea1a8f
--- /dev/null
+++ b/Apache HBase/2.2.3/hbase/role/handlers/main.yml
@@ -0,0 +1,27 @@
+- name: Loading Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ load_path: '{{ deploy_dir }}/{{ container_name }}/{{ image_name }}-{{ image_tag }}.tar'
+ source: load
+ force_tag: yes
+ force_source: yes
+ timeout: 300
+
+- name: Stop Container
+ docker_container:
+ name: '{{ item }}'
+ state: absent
+ with_items:
+ - ['HMaster']
+ - ['HRegionServer']
+
+- name: Start Container
+ docker_compose:
+ project_src: '{{ deploy_dir }}/{{ container_name }}/'
+
+- name: Removing Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
diff --git a/Apache HBase/2.2.3/hbase/role/tasks/deploy-cluster.yml b/Apache HBase/2.2.3/hbase/role/tasks/deploy-cluster.yml
new file mode 100644
index 0000000..7101650
--- /dev/null
+++ b/Apache HBase/2.2.3/hbase/role/tasks/deploy-cluster.yml
@@ -0,0 +1,88 @@
+- name: Setting node_nums variable
+ set_fact: node_nums="{{groups.hbase|length}}"
+
+- name: To terminate execution
+ fail:
+ msg: "HBase Cluster mode at least 3 nodes,please checking configurations/hosts -> hbase"
+ when: node_nums < 3
+
+- name: Checking Hadoop DataNode status
+ shell: source /etc/profile && hadoop dfsadmin -report | grep "Live datanodes" | grep -E -o "[0-9]"
+ async: 10
+ register: datanode_out
+ run_once: true
+ delegate_to: "{{ groups.hdfs[0] }}"
+
+- name: Checking Hadoop NameNode status
+ shell: source /etc/profile && hadoop dfsadmin -report |grep 50010 | wc -l
+ async: 10
+ register: namenode_out
+ run_once: true
+ delegate_to: "{{ groups.hdfs[0] }}"
+
+- name: To terminate execution
+ fail:
+ msg: "If the dependency test fails, check whether the Hadoop cluster is normal"
+ when: datanode_out.stdout <= '1' and namenode_out.stdout <= '1'
+
+- name: Creating directory
+ file:
+ state: directory
+ path: '{{ deploy_dir }}/{{ container_name }}/{{ item.dir }}'
+ with_items:
+ - { dir: 'logs' }
+ - { dir: 'data' }
+ - { dir: 'conf' }
+ - { dir: 'init' }
+
+- name: Unarchiving phoenix and conf
+ unarchive:
+ src: 'files/phoenix-hbase-2.2-5.1.2-bin.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: yes
+ with_items:
+ - { file_name: 'phoenix-hbase-2.2-5.1.2-bin.tar' }
+ - { file_name: 'conf.zip' }
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
+ copy:
+ src: 'files/{{ image_name }}-{{ image_tag }}.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: true
+ notify:
+ - Loading Image
+
+- name: Fetching Hadoop config files to /tmp
+ ansible.builtin.fetch:
+ src: "{{ deploy_dir }}/hadoop-2.7.1/etc/hadoop/{{ item.filename }}"
+ dest: "/tmp/"
+ flat: yes
+ loop: "{{ hadoop_config_files }}"
+ run_once: true
+ delegate_to: "{{ groups.hdfs[0] }}"
+
+- name: Copying Hadoop config files to other nodes
+ ansible.builtin.copy:
+ src: "/tmp/{{ item.filename }}"
+ dest: "{{ deploy_dir }}/{{ container_name }}/conf/"
+ loop: "{{ hadoop_config_files }}"
+
+- name: Copying HBase config files
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: '{{ item.mode }}'
+ with_items:
+ - { src: 'hbase-site.xml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/phoenix-hbase-2.2-5.1.2-bin/bin/hbase-site.xml', mode: '0644' }
+ - { src: 'startsql.sh.j2', dest: '{{ deploy_dir }}/{{ container_name }}/phoenix-hbase-2.2-5.1.2-bin/bin/startsql.sh', mode: '0755' }
+ - { src: 'hbase-site.xml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/hbase-site.xml', mode: '0644' }
+ - { src: 'regionservers.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/regionservers', mode: '0644' }
+ - { src: 'backup-masters.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/backup-masters', mode: '0644' }
+ - { src: 'hbase-env.sh.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/hbase-env.sh', mode: '0755' }
+ - { src: 'rsgroup.sh.j2', dest: '{{ deploy_dir }}/{{ container_name }}/init/rsgroup.sh', mode: '0755' }
+ - { src: 'docker-compose.yml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml', mode: '0644' }
+ notify:
+ - Loading Image
+ - Start Container
+
+- meta: flush_handlers
diff --git a/Apache HBase/2.2.3/hbase/role/tasks/deploy-standalone.yml b/Apache HBase/2.2.3/hbase/role/tasks/deploy-standalone.yml
new file mode 100644
index 0000000..d6ccec1
--- /dev/null
+++ b/Apache HBase/2.2.3/hbase/role/tasks/deploy-standalone.yml
@@ -0,0 +1,44 @@
+- name: Creating directory
+ file:
+ state: directory
+ path: '{{ deploy_dir }}/{{ container_name }}/{{ item.dir }}'
+ with_items:
+ - { dir: 'logs' }
+ - { dir: 'data' }
+ - { dir: 'conf' }
+ - { dir: 'init' }
+
+- name: Unarchiving phoenix and conf
+ unarchive:
+ src: 'files/phoenix-hbase-2.2-5.1.2-bin.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: yes
+ with_items:
+ - { file_name: 'phoenix-hbase-2.2-5.1.2-bin.tar' }
+ - { file_name: 'conf.zip' }
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
+ copy:
+ src: 'files/{{ image_name }}-{{ image_tag }}.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: true
+ notify:
+ - Loading Image
+
+- name: Copying HBase config files
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: '{{ item.mode }}'
+ with_items:
+ - { src: 'hbase-site.xml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/phoenix-hbase-2.2-5.1.2-bin/bin/hbase-site.xml', mode: '0644' }
+ - { src: 'startsql.sh.j2', dest: '{{ deploy_dir }}/{{ container_name }}/phoenix-hbase-2.2-5.1.2-bin/bin/startsql.sh', mode: '0755' }
+ - { src: 'hbase-site.xml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/hbase-site.xml', mode: '0644' }
+ - { src: 'regionservers.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/regionservers', mode: '0644' }
+ - { src: 'hbase-env.sh.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/hbase-env.sh', mode: '0755' }
+ - { src: 'docker-compose.yml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml', mode: '0644' }
+ notify:
+ - Loading Image
+ - Start Container
+
+- meta: flush_handlers
diff --git a/Apache HBase/2.2.3/hbase/role/tasks/main.yml b/Apache HBase/2.2.3/hbase/role/tasks/main.yml
new file mode 100644
index 0000000..86bd24d
--- /dev/null
+++ b/Apache HBase/2.2.3/hbase/role/tasks/main.yml
@@ -0,0 +1,11 @@
+- block:
+ - include: uninstall.yml
+ - include: "{{ playbook_name }}"
+ vars:
+ playbook_name: "{{ 'deploy-cluster.yml' if groups.hbase | length > 1 else 'deploy-standalone.yml' }}"
+ - include: status-check.yml
+ when: (operation) == "install"
+
+- block:
+ - include: uninstall.yml
+ when: (operation) == "uninstall"
diff --git a/Apache HBase/2.2.3/hbase/role/tasks/standalone/deploy.yml b/Apache HBase/2.2.3/hbase/role/tasks/standalone/deploy.yml
new file mode 100644
index 0000000..1aa777d
--- /dev/null
+++ b/Apache HBase/2.2.3/hbase/role/tasks/standalone/deploy.yml
@@ -0,0 +1,47 @@
+- name: Creating directory
+ file:
+ state: directory
+ path: '{{ deploy_dir }}/{{ container_name }}/{{ item.dir }}'
+ with_items:
+ - { dir: 'logs' }
+ - { dir: 'data' }
+ - { dir: 'conf' }
+ - { dir: 'init' }
+
+- name: Copying conf to {{ deploy_dir }}/{{ container_name }}/
+ copy:
+ src: 'files/conf'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: true
+
+- name: Unarchiving phoenix
+ unarchive:
+ src: '{{ role_path }}/../../../software-packages/phoenix-hbase-2.2-5.1.2-bin.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: yes
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
+ copy:
+ src: '{{ role_path }}/../../../software-packages/{{ image_name }}-{{ image_tag }}.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: true
+ notify:
+ - Loading Image
+
+- name: Copying HBase config files
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: '{{ item.mode }}'
+ with_items:
+ - { src: 'hbase-site.xml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/phoenix-hbase-2.2-5.1.2-bin/bin/hbase-site.xml', mode: '0644' }
+ - { src: 'startsql.sh.j2', dest: '{{ deploy_dir }}/{{ container_name }}/phoenix-hbase-2.2-5.1.2-bin/bin/startsql.sh', mode: '0755' }
+ - { src: 'hbase-site.xml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/hbase-site.xml', mode: '0644' }
+ - { src: 'regionservers.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/regionservers', mode: '0644' }
+ - { src: 'hbase-env.sh.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/hbase-env.sh', mode: '0755' }
+ - { src: 'docker-compose.yml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml', mode: '0644' }
+ notify:
+ - Loading Image
+ - Start Container
+
+- meta: flush_handlers
diff --git a/Apache HBase/2.2.3/hbase/role/tasks/standalone/uninstall.yml b/Apache HBase/2.2.3/hbase/role/tasks/standalone/uninstall.yml
new file mode 100644
index 0000000..11948b4
--- /dev/null
+++ b/Apache HBase/2.2.3/hbase/role/tasks/standalone/uninstall.yml
@@ -0,0 +1,31 @@
+- block:
+ - name: Stopping and removing container
+ docker_container:
+ name: '{{ item }}'
+ state: absent
+ with_items:
+ - ['HMaster']
+ - ['HRegionServer']
+
+ - name: Removing old {{ image_name }} image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
+
+ - name: Ansible delete old {{ deploy_dir }}/{{ container_name }}
+ file:
+ path: '{{ deploy_dir }}/{{ container_name }}'
+ state: absent
+
+ - name: Checking ZooKeeper has HBase nodes
+ shell: "docker exec -it zookeeper zkCli.sh ls / | grep hbase | wc -l"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ register: has_zknode
+
+ - name: Delete HBase nodes in ZooKeeper
+ shell: "docker exec -it zookeeper zkCli.sh rmr /hbase"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ when: has_zknode.stdout >= '1'
diff --git a/Apache HBase/2.2.3/hbase/role/tasks/status-check.yml b/Apache HBase/2.2.3/hbase/role/tasks/status-check.yml
new file mode 100644
index 0000000..5d14102
--- /dev/null
+++ b/Apache HBase/2.2.3/hbase/role/tasks/status-check.yml
@@ -0,0 +1,36 @@
+- name: Waitting for HBase running,10s
+ shell: sleep 10
+
+- block:
+ - name: Check the HBase Master node status
+ shell: ps -ef | grep "org.apache.hadoop.hbase.master.HMaster" | grep -v grep |wc -l
+ register: check_master
+
+ - name: To terminate execution
+ fail:
+ msg: "检测到{{ inventory_hostname }}节点HBase未正常启动;请保留日志反馈,路径:{{ deploy_dir }}/{{ container_name }}/logs"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: check_master.stdout != '1'
+ when: inventory_hostname in groups['hbase'][0:3]
+
+- block:
+ - name: Check the HBase HRegionServer node status
+ shell: ps -ef | egrep "org.apache.hadoop.hbase.regionserver.HRegionServer" | grep -v grep |wc -l
+ register: check_region
+
+ - name: To terminate execution
+ fail:
+ msg: "检测到{{ inventory_hostname }}节点HBase未正常启动;请保留日志反馈,路径:{{ deploy_dir }}/{{ container_name }}/logs"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: check_region.stdout != '1'
+
+- name: Initializing phoenix
+ shell: cd {{ deploy_dir }}/{{ container_name }}/phoenix-hbase-2.2-5.1.2-bin/bin/ && ./startsql.sh
+
+- name: Enable RsGroup
+ shell: cd {{ deploy_dir }}/{{ container_name }}/init/ && ./rsgroup.sh | grep ERROR | egrep -v "already exists|Target RSGroup important is same as source|Source RSGroup important is same as target"
+ register: result
+ failed_when: "'ERROR' in result.stdout"
+ when: hbase.common.enable_rsgroup
diff --git a/Apache HBase/2.2.3/hbase/role/tasks/uninstall.yml b/Apache HBase/2.2.3/hbase/role/tasks/uninstall.yml
new file mode 100644
index 0000000..0362f45
--- /dev/null
+++ b/Apache HBase/2.2.3/hbase/role/tasks/uninstall.yml
@@ -0,0 +1,45 @@
+- block:
+ - name: Stopping and removing container
+ docker_container:
+ name: '{{ item }}'
+ state: absent
+ with_items:
+ - ['HMaster']
+ - ['HRegionServer']
+
+ - name: Removing old {{ image_name }} image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
+
+ - name: Ansible delete old {{ deploy_dir }}/{{ container_name }}
+ file:
+ path: '{{ deploy_dir }}/{{ container_name }}'
+ state: absent
+
+ - name: Checking ZooKeeper has HBase nodes
+ shell: "docker exec -it zookeeper zkCli.sh ls / | grep hbase | wc -l"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ register: has_zknode
+
+ - name: Delete HBase nodes in ZooKeeper
+ shell: "docker exec -it zookeeper zkCli.sh rmr /hbase"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ when: has_zknode.stdout >= '1'
+
+- block:
+ - name: Checking HDFS has hbase folder
+ shell: source /etc/profile && hdfs dfs -ls / | grep hbase | wc -l
+ register: folder_exists
+ run_once: true
+ delegate_to: "{{ groups.hdfs[0] }}"
+
+ - name: Delete HBase data folder in HDFS
+ shell: source /etc/profile && hadoop fs -rm -r /hbase
+ run_once: true
+ delegate_to: "{{ groups.hdfs[0] }}"
+ when: folder_exists.stdout >= '1'
+ when: (groups.hbase) | length > 1
diff --git a/Apache HBase/2.2.3/hbase/role/templates/backup-masters.j2 b/Apache HBase/2.2.3/hbase/role/templates/backup-masters.j2
new file mode 100644
index 0000000..d1014e7
--- /dev/null
+++ b/Apache HBase/2.2.3/hbase/role/templates/backup-masters.j2
@@ -0,0 +1,2 @@
+{{ groups.hbase[1] }}
+{{ groups.hbase[2] }}
diff --git a/Apache HBase/2.2.3/hbase/role/templates/docker-compose.yml.j2 b/Apache HBase/2.2.3/hbase/role/templates/docker-compose.yml.j2
new file mode 100644
index 0000000..4707caf
--- /dev/null
+++ b/Apache HBase/2.2.3/hbase/role/templates/docker-compose.yml.j2
@@ -0,0 +1,45 @@
+version: "3"
+
+services:
+{% if inventory_hostname in groups['hbase'][0:3] %}
+ hmaster:
+ image: {{ image_name }}:{{ image_tag }}
+ restart: always
+ container_name: HMaster
+ hostname: {{ansible_hostname}}
+ environment:
+ MODE: master
+ volumes:
+ - "{{ deploy_dir }}/{{ container_name }}/data:/opt/hbase-2.2.3/data"
+ - "{{ deploy_dir }}/{{ container_name }}/logs:/opt/hbase-2.2.3/logs"
+ - "{{ deploy_dir }}/{{ container_name }}/conf:/opt/hbase-2.2.3/conf"
+ network_mode: "host"
+
+ regionserver:
+ image: {{ image_name }}:{{ image_tag }}
+ restart: always
+ container_name: HRegionServer
+ hostname: {{ansible_hostname}}
+ environment:
+ MODE: regionserver
+ volumes:
+ - "{{ deploy_dir }}/{{ container_name }}/data:/opt/hbase-2.2.3/data"
+ - "{{ deploy_dir }}/{{ container_name }}/logs:/opt/hbase-2.2.3/logs"
+ - "{{ deploy_dir }}/{{ container_name }}/conf:/opt/hbase-2.2.3/conf"
+ network_mode: "host"
+ depends_on:
+ - hmaster
+{% else %}
+ regionserver:
+ image: {{ image_name }}:{{ image_tag }}
+ restart: always
+ container_name: HRegionServer
+ hostname: {{ansible_hostname}}
+ environment:
+ MODE: regionserver
+ volumes:
+ - "{{ deploy_dir }}/{{ container_name }}/data:/opt/hbase-2.2.3/data"
+ - "{{ deploy_dir }}/{{ container_name }}/logs:/opt/hbase-2.2.3/logs"
+ - "{{ deploy_dir }}/{{ container_name }}/conf:/opt/hbase-2.2.3/conf"
+ network_mode: "host"
+{% endif %}
diff --git a/Apache HBase/2.2.3/hbase/role/templates/hbase-env.sh.j2 b/Apache HBase/2.2.3/hbase/role/templates/hbase-env.sh.j2
new file mode 100644
index 0000000..6c1ac19
--- /dev/null
+++ b/Apache HBase/2.2.3/hbase/role/templates/hbase-env.sh.j2
@@ -0,0 +1,143 @@
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Set environment variables here.
+
+# This script sets variables multiple times over the course of starting an hbase process,
+# so try to keep things idempotent unless you want to take an even deeper look
+# into the startup scripts (bin/hbase, etc.)
+
+# The java implementation to use. Java 1.7+ required.
+export JAVA_HOME=/opt/jdk1.8.0_202
+
+# Extra Java CLASSPATH elements. Optional.
+# export HBASE_CLASSPATH=
+
+# The maximum amount of heap to use. Default is left to JVM default.
+#export HBASE_HEAPSIZE={heap}
+
+# Uncomment below if you intend to use off heap cache. For example, to allocate 8G of
+# offheap, set the value to "8G".
+#export HBASE_OFFHEAPSIZE=5G
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export HBASE_OPTS="-XX:+UseConcMarkSweepGC "
+
+# Configure PermSize. Only needed in JDK7. You can safely remove it for JDK8+
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS {{ hbase.regionserver.java_opt }} -Xss256k -XX:MetaspaceSize=512m -XX:MaxMetaspaceSize=512m -XX:SurvivorRatio=2 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:MaxTenuringThreshold=15 -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=1 -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:-DisableExplicitGC -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:/opt/{{ component_version }}/logs/gc-regionserver-%t.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=100M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/opt/{{ component_version }}/logs/"
+
+export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS $HBASE_JMX_BASE {{ hbase.hmaster.java_opt }} -Xss256k -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=128m -XX:SurvivorRatio=2 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:MaxTenuringThreshold=15 -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=1 -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:-DisableExplicitGC -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:/opt/{{ component_version }}/logs/gc-master-%t.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=100M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/opt/{{ component_version }}/logs/ -javaagent:/opt/{{ component_version }}/monitor/jmx_prometheus_javaagent-0.12.0.jar=9907:/opt/{{ component_version }}/monitor/hbase.yaml"
+
+export HBASE_REGIONSERVER_JMX_OPTS="$HBASE_JMX_BASE -javaagent:/opt/{{ component_version }}/monitor/jmx_prometheus_javaagent-0.12.0.jar=9908:/opt/{{ component_version }}/monitor/hbase.yaml"
+
+# Uncomment one of the below three options to enable java garbage collection logging for the server-side processes.
+
+# This enables basic gc logging to the .out file.
+# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps"
+
+# This enables basic gc logging to its own file.
+# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
+# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH>"
+
+# This enables basic GC logging to its own file with automatic log rolling. Only applies to jdk 1.6.0_34+ and 1.7.0_2+.
+# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
+# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M"
+
+# Uncomment one of the below three options to enable java garbage collection logging for the client processes.
+
+# This enables basic gc logging to the .out file.
+# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps"
+
+# This enables basic gc logging to its own file.
+# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
+# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH>"
+
+# This enables basic GC logging to its own file with automatic log rolling. Only applies to jdk 1.6.0_34+ and 1.7.0_2+.
+# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
+# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M"
+
+# See the package documentation for org.apache.hadoop.hbase.io.hfile for other configurations
+# needed setting up off-heap block caching.
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+# NOTE: HBase provides an alternative JMX implementation to fix the random ports issue, please see JMX
+# section in HBase Reference Guide for instructions.
+
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+# export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10101"
+# export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10102"
+# export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+# export HBASE_REST_OPTS="$HBASE_REST_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10105"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+# export HBASE_REGIONSERVERS=${HBASE_HOME}/conf/regionservers
+
+# Uncomment and adjust to keep all the Region Server pages mapped to be memory resident
+#HBASE_REGIONSERVER_MLOCK=true
+#HBASE_REGIONSERVER_UID="hbase"
+
+# File naming hosts on which backup HMaster will run. $HBASE_HOME/conf/backup-masters by default.
+# export HBASE_BACKUP_MASTERS=${HBASE_HOME}/conf/backup-masters
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR=/opt/{{ component_version }}/logs
+
+# Enable remote JDWP debugging of major HBase processes. Meant for Core Developers
+# export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8070"
+# export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8071"
+# export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8072"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8073"
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR=/opt/{{ component_version }}/pids
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false
+
+# The default log rolling policy is RFA, where the log file is rolled as per the size defined for the
+# RFA appender. Please refer to the log4j.properties file to see more details on this appender.
+# In case one needs to do log rolling on a date change, one should set the environment property
+# HBASE_ROOT_LOGGER to "<DESIRED_LOG LEVEL>,DRFA".
+# For example:
+#HBASE_ROOT_LOGGER=INFO,DRFA
+HBASE_ROOT_LOGGER=ERROR,DRFA
+# The reason for changing default to RFA is to avoid the boundary case of filling out disk space as
+# DRFA doesn't put any cap on the log size. Please refer to HBase-5655 for more context.
diff --git a/Apache HBase/2.2.3/hbase/role/templates/hbase-site.xml.j2 b/Apache HBase/2.2.3/hbase/role/templates/hbase-site.xml.j2
new file mode 100644
index 0000000..f657a67
--- /dev/null
+++ b/Apache HBase/2.2.3/hbase/role/templates/hbase-site.xml.j2
@@ -0,0 +1,274 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+{% if groups.hbase | length > 1 %}
+ <property>
+ <name>hbase.rootdir</name>
+ <value>hdfs://ns1/hbase</value>
+ </property>
+{% elif groups.hbase | length == 1 %}
+ <property>
+ <name>hbase.rootdir</name>
+ <value>/opt/hbase-2.2.3/data</value>
+ </property>
+{% endif %}
+
+ <property>
+ <name>hbase.cluster.distributed</name>
+ <value>true</value>
+ </property>
+
+{% if groups.hbase | length > 1 %}
+ <property>
+ <name>hbase.zookeeper.quorum</name>
+{% for dev_info in groups.zookeeper -%}
+ {% if loop.last -%}
+{{dev_info}}</value>
+ {% elif loop.first %}
+ <value>{{dev_info}},
+ {%- else %}
+{{dev_info}},
+ {%- endif %}
+{%- endfor %}
+ </property>
+
+{% elif groups.hbase | length == 1 %}
+ <property>
+ <name>hbase.zookeeper.quorum</name>
+ <value>{{inventory_hostname}}</value>
+ </property>
+
+{% endif %}
+ <property>
+ <name>hbase.zookeeper.property.clientPort</name>
+ <value>2181</value>
+ </property>
+
+ <property>
+ <name>hbase.master.info.port</name>
+ <value>60010</value>
+ </property>
+
+ <property>
+ <name>hbase.server.keyvalue.maxsize</name>
+ <value>5368709120</value>
+ </property>
+
+ <property>
+ <name>zookeeper.znode.parent</name>
+ <value>/hbase</value>
+ </property>
+
+ <property>
+ <name>hbase.rpc.timeout</name>
+ <value>300000</value>
+ </property>
+
+ <property>
+ <name>zookeeper.session.timeout</name>
+ <value>300000</value>
+ </property>
+
+ <!--小于该值的文件将在mob compaction中合并-->
+ <property>
+ <name>hbase.mob.compaction.mergeable.threshold</name>
+ <value>1342177280</value>
+ </property>
+
+ <property>
+ <name>hbase.mob.file.cache.size</name>
+ <value>1000</value>
+ </property>
+
+ <!--mob cache回收缓存周期-->
+ <property>
+ <name>hbase.mob.cache.evict.period</name>
+ <value>3600</value>
+ </property>
+
+ <!--mob cache回收之后cache中保留文件个数比例,cache数量超过hbase.mob.file.cache.size会回收-->
+ <property>
+ <name>hbase.mob.cache.evict.remain.ratio</name>
+ <value>0.5f</value>
+ </property>
+
+ <!--开启mob-->
+ <property>
+ <name>hfile.format.version</name>
+ <value>3</value>
+ </property>
+
+ <property>
+ <name>hbase.hregion.memstore.flush.size</name>
+ <value>534217728</value>
+ </property>
+
+ <!-- flush线程数 -->
+ <property>
+ <name>hbase.hstore.flusher.count</name>
+ <value>8</value>
+ </property>
+
+ <property>
+ <name>hbase.regionserver.global.memstore.size.lower.limit</name>
+ <value>0.95</value>
+ </property>
+
+ <property>
+ <name>hbase.regionserver.global.memstore.size</name>
+ <value>0.45</value>
+ </property>
+
+ <property>
+ <name>hfile.block.cache.size</name>
+ <value>0.3</value>
+ </property>
+
+ <property>
+ <name>hbase.hregion.memstore.block.multiplier</name>
+ <value>10</value>
+ </property>
+
+ <property>
+ <name>hbase.ipc.server.max.callqueue.length</name>
+ <value>1073741824</value>
+ </property>
+
+ <property>
+ <name>hbase.regionserver.handler.count</name>
+ <value>{{ hbase.regionserver['hbase.regionserver.handler.count'] }}</value>
+ <description>Count of RPC Listener instances spun up on RegionServers.
+ Same property is used by the Master for count of master handlers.</description>
+ </property>
+
+ <property>
+ <name>hbase.zookeeper.property.maxClientCnxns</name>
+ <value>1000</value>
+ </property>
+
+ <property>
+ <name>hbase.ipc.max.request.size</name>
+ <value>1173741824</value>
+ </property>
+
+ <property>
+ <name>hbase.hstore.blockingWaitTime</name>
+ <value>30000</value>
+ </property>
+ <property>
+ <name>hbase.hstore.blockingStoreFiles</name>
+ <value>100</value>
+ </property>
+
+ <!--split参数-->
+ <property>
+  <name>hbase.hregion.max.filesize</name>
+  <value>{{ hbase.regionserver['hbase.hregion.max.filesize'] }}</value>
+ </property>
+
+ <property>
+ <name>hbase.regionserver.regionSplitLimit</name>
+ <value>1000</value>
+ </property>
+
+<!-- phoenix -->
+ <property>
+    <name>phoenix.schema.isNamespaceMappingEnabled</name>
+    <value>true</value>
+ </property>
+ <property>
+   <name>phoenix.schema.mapSystemTablesToNamespace</name>
+   <value>true</value>
+ </property>
+
+<!-- RsGroup -->
+ <property>
+ <name>hbase.coprocessor.master.classes</name>
+ <value>org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint</value>
+ </property>
+
+ <property>
+ <name>hbase.master.loadbalancer.class</name>
+ <value>org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer</value>
+ </property>
+
+<!--表region自动平衡-->
+ <property>
+   <name>hbase.master.loadbalance.bytable</name>
+   <value>true</value>
+ </property>
+
+ <property>
+ <name>hbase.bucketcache.ioengine</name>
+ <value>offheap</value>
+ </property>
+
+ <property>
+ <name>hbase.bucketcache.size</name>
+ <value>{{ hbase.regionserver['hbase.bucketcache.size'] }}</value>
+ </property>
+
+ <!-- storefile数量大于该值执行compact -->
+ <property>
+ <name>hbase.hstore.compactionThreshold</name>
+ <value>5</value>
+ </property>
+
+ <property>
+ <name>hbase.hstore.compaction.min</name>
+ <value>5</value>
+ </property>
+
+ <!-- 最多选取多少个storefile进行compace -->
+ <property>
+ <name>hbase.hstore.compaction.max</name>
+ <value>20</value>
+ </property>
+
+ <property>
+ <name>hbase.hstore.compaction.min.size</name>
+ <value>134217728</value>
+ </property>
+
+ <property>
+ <name>hbase.hstore.compaction.max.size</name>
+ <value>10737418240</value>
+ </property>
+
+ <property>
+ <name>hbase.regionserver.thread.compaction.small</name>
+ <value>5</value>
+ </property>
+
+ <property>
+ <name>hbase.regionserver.thread.compaction.large</name>
+ <value>5</value>
+ </property>
+
+ <property>
+ <name>hbase.hregion.majorcompaction</name>
+ <value>604800000</value>
+ </property>
+
+</configuration>
+
diff --git a/Apache HBase/2.2.3/hbase/role/templates/regionservers.j2 b/Apache HBase/2.2.3/hbase/role/templates/regionservers.j2
new file mode 100644
index 0000000..974c253
--- /dev/null
+++ b/Apache HBase/2.2.3/hbase/role/templates/regionservers.j2
@@ -0,0 +1,3 @@
+{% for dev_info in groups.hbase %}
+{{dev_info}}
+{% endfor %}
diff --git a/Apache HBase/2.2.3/hbase/role/templates/rsgroup.sh.j2 b/Apache HBase/2.2.3/hbase/role/templates/rsgroup.sh.j2
new file mode 100644
index 0000000..e6ef382
--- /dev/null
+++ b/Apache HBase/2.2.3/hbase/role/templates/rsgroup.sh.j2
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+source /etc/profile
+
+docker exec -it hbase hbase shell <<EOF
+
+add_rsgroup 'important'
+
+move_servers_rsgroup 'important',['{{ hostvars[groups.hbase[0]]['ansible_hostname'] }}:16020']
+
+move_servers_rsgroup 'important',['{{ hostvars[groups.hbase[1]]['ansible_hostname'] }}:16020']
+
+flush 'tsg:report_result'
+
+move_tables_rsgroup 'important',['tsg:report_result']
+
+flush 'tsg_galaxy:job_result'
+
+move_tables_rsgroup 'important',['tsg_galaxy:job_result']
+
+
+EOF
+
diff --git a/Apache HBase/2.2.3/hbase/role/templates/startsql.sh.j2 b/Apache HBase/2.2.3/hbase/role/templates/startsql.sh.j2
new file mode 100644
index 0000000..445888f
--- /dev/null
+++ b/Apache HBase/2.2.3/hbase/role/templates/startsql.sh.j2
@@ -0,0 +1,23 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR=$(cd $(dirname $0); pwd)
+
+{% for dev_info in groups.zookeeper -%}
+ {% if loop.last -%}
+{{dev_info}}
+ {% elif loop.first %}
+ZK_SERVER={{dev_info}},
+ {%- else %}
+{{dev_info}},
+ {%- endif %}
+{%- endfor %}
+
+
+cd $BASE_DIR
+
+exec python sqlline.py $ZK_SERVER <<EOF
+
+!quit
+
+EOF
diff --git a/Apache HBase/2.2.3/hbase/role/vars/main.yml b/Apache HBase/2.2.3/hbase/role/vars/main.yml
new file mode 100644
index 0000000..8c7d3d7
--- /dev/null
+++ b/Apache HBase/2.2.3/hbase/role/vars/main.yml
@@ -0,0 +1,15 @@
+#镜像名称
+image_name: hbase
+
+#镜像版本号
+image_tag: 2.2.3-alp-2
+
+#容器名称
+container_name: hbase
+
+#组件版本
+component_version: hbase-2.2.3
+
+hadoop_config_files:
+ - { filename: 'hdfs-site.xml' }
+ - { filename: 'core-site.xml' }
diff --git a/Apache Hadoop/2.7.1/hdfs/hosts b/Apache Hadoop/2.7.1/hdfs/hosts
new file mode 100644
index 0000000..886e72e
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/hosts
@@ -0,0 +1,5 @@
+[zookeeper]
+192.168.45.102
+
+[hdfs]
+192.168.45.102
diff --git a/Apache Hadoop/2.7.1/hdfs/install.yml b/Apache Hadoop/2.7.1/hdfs/install.yml
new file mode 100644
index 0000000..c37b0fa
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/install.yml
@@ -0,0 +1,7 @@
+- hosts: hdfs
+ remote_user: root
+ roles:
+ - role
+ vars_files:
+ - role/vars/main.yml
+
diff --git a/Apache Hadoop/2.7.1/hdfs/role/defaults/main.yml b/Apache Hadoop/2.7.1/hdfs/role/defaults/main.yml
new file mode 100644
index 0000000..58678e1
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/defaults/main.yml
@@ -0,0 +1,23 @@
+#The default installation location
+deploy_dir: /data/olap
+
+#The default data storage location,use storing application data,logs and configuration files
+data_dir: /data/olap
+
+hadoop:
+ namenode:
+ #Running memory of the Hadoop Namenode.
+ java_opt: '-Xmx1024m -Xms1024m'
+ #The number of Namenode RPC server threads that listen to requests from clients.
+ dfs.namenode.handler.count: 30
+ datanode:
+ #Running memory of the Hadoop Datanode.
+ java_opt: '-Xmx1024m -Xms1024m'
+ #The number of server threads for the datanode.
+ dfs.datanode.handler.count: 40
+ journalnode:
+ #Running memory of the Hadoop JournalNode.
+ java_opt: '-Xmx1024m -Xms1024m'
+ zkfc:
+ #Running memory of the Hadoop DFSZKFailoverController.
+ java_opt: '-Xmx1024m -Xms1024m'
diff --git a/Apache Hadoop/2.7.1/hdfs/role/tasks/deploy.yml b/Apache Hadoop/2.7.1/hdfs/role/tasks/deploy.yml
new file mode 100644
index 0000000..fe637ca
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/tasks/deploy.yml
@@ -0,0 +1,223 @@
+- name: Setting node_nums variable
+ set_fact: node_nums="{{groups.hdfs|length}}"
+
+- name: To terminate execution
+ fail:
+ msg: "Fully Distributed Mode at least 3 nodes, please checking configurations/hosts -> hdfs"
+ when: node_nums < 3
+
+- name: check Jdk version
+ shell: source /etc/profile && java -version 2>&1 | grep {{ java_version }} | wc -l
+ ignore_errors: false
+ register: jdk_out
+
+- name: To terminate execution
+ fail:
+ msg: "JDK is not installed in the target cluster, please check!"
+ when: jdk_out.stdout != '2'
+ run_once: true
+ delegate_to: 127.0.0.1
+
+- name: create hadoop package path:{{ deploy_dir }}
+ file:
+ state: directory
+ path: '{{ item.path }}'
+ with_items:
+ - { path: '{{ hdfs_data_dir }}' }
+ - { path: '{{ deploy_dir }}' }
+
+- name: master_ip to ansible variable
+ set_fact: master_ip={{groups.hdfs[0]}}
+
+- name: slave1_ip to ansible variable
+ set_fact: slave1_ip={{groups.hdfs[1]}}
+
+- name: slave2_ip to ansible variable
+ set_fact: slave2_ip={{groups.hdfs[2]}}
+
+#解压tar
+- name: unpack hadoop-2.7.1.tar.gz to {{ deploy_dir }}/
+ unarchive:
+ src: 'files/{{ hadoop_version }}.tar.gz'
+ dest: '{{ deploy_dir }}/'
+
+- name: Copying hadoop config files
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ item.dest }}'
+ mode: '{{ item.mode }}'
+ backup: false
+ with_items:
+ - { src: 'core-site.xml.j2', dest: 'etc/hadoop/core-site.xml', mode: '0644' }
+ - { src: 'hdfs-site.xml.j2', dest: 'etc/hadoop/hdfs-site.xml', mode: '0644' }
+ - { src: 'mapred-site.xml.j2', dest: 'etc/hadoop/mapred-site.xml', mode: '0644' }
+ - { src: 'slaves.j2', dest: 'etc/hadoop/slaves', mode: '0644' }
+ - { src: 'hadoop-env.sh.j2', dest: 'etc/hadoop/hadoop-env.sh', mode: '0755' }
+ - { src: 'set_hdfs_env.sh.j2', dest: 'bin/set_hdfs_env.sh', mode: '0755' }
+
+- name: Copying HDFS config to {{ master_ip }}
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: 0755
+ backup: false
+ with_items:
+ - { src: 'daemonscript/dae-hdfsjournal.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsjournal.sh' }
+ - { src: 'daemonscript/dae-hdfsmaster.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsmaster.sh' }
+ - { src: 'daemonscript/keephdfsmaster.j2', dest: '/etc/init.d/keephdfsmaster' }
+ - { src: 'daemonscript/keephdfsjournal.j2', dest: '/etc/init.d/keephdfsjournal' }
+ - { src: 'ini_hdfs.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/bin/ini_hdfs.sh' }
+ run_once: true
+ delegate_to: "{{ master_ip }}"
+
+- name: Copying HDFS config to {{ slave1_ip }}
+ template:
+ src: 'daemonscript/{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: 0755
+ backup: yes
+ with_items:
+ - { src: 'dae-hdfsjournal.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsjournal.sh' }
+ - { src: 'dae-hdfsslave.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsslave.sh' }
+ - { src: 'keephdfsslave.j2', dest: '/etc/init.d/keephdfsslave' }
+ - { src: 'keephdfsjournal.j2', dest: '/etc/init.d/keephdfsjournal' }
+ run_once: true
+ delegate_to: "{{ slave1_ip }}"
+
+- name: Copying HDFS config to {{ slave2_ip }}
+ template:
+ src: 'daemonscript/{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: 0755
+ backup: yes
+ with_items:
+ - { src: 'dae-hdfsjournal.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsjournal.sh' }
+ - { src: 'keephdfsjournal.j2', dest: '/etc/init.d/keephdfsjournal' }
+ run_once: true
+ delegate_facts: true
+ delegate_to: "{{ slave2_ip }}"
+
+- name: Copying HDFS config to worker nodes
+ template:
+ src: 'daemonscript/{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: 0755
+ backup: yes
+ with_items:
+ - { src: 'dae-hdfsworker.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsworker.sh' }
+ - { src: 'keephdfsworker.j2', dest: '/etc/init.d/keephdfsworker' }
+
+- name: set hadoop env
+ shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_hdfs_env.sh {{ item.opeation }}
+ with_items:
+ - { opeation: 'chkconfig' }
+ - { opeation: 'journal' }
+
+- name: Waiting for the JournalNode start,sleep 10s
+ shell: sleep 10
+
+- block:
+ - name: checking JournalNode status
+ shell: source /etc/profile && jps | grep JournalNode | grep -v grep | wc -l
+ register: status_out
+
+ - name: checking JournalNode
+ fail:
+ msg: "JournalNode节点启动异常,请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*journalnode*"
+ when: status_out.stdout != '1'
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: inventory_hostname in [master_ip,slave1_ip,slave2_ip]
+
+- name: Initialization NameNode/ZKFC,Start master NameNode
+ block:
+ - name: initialization hadoop NameNode
+ shell: sh {{ deploy_dir }}/{{ hadoop_version }}/bin/ini_hdfs.sh namenode | grep "yes" | grep -v grep | wc -l
+ register: ini_namenode_out
+
+ - name: checking namenode init status
+ fail:
+ msg: "namenode 初始化异常,请登陆[{{ master_ip }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*namenode*"
+ when: ini_namenode_out.stdout != '1'
+ run_once: true
+ delegate_to: 127.0.0.1
+
+ - name: initialization hadoop ZKFC
+ shell: sh {{ deploy_dir }}/{{ hadoop_version }}/bin/ini_hdfs.sh zkfc | grep "yes" | grep -v grep | wc -l
+ register: ini_zkfc_out
+
+ - name: checking hadoop-zk init status
+ fail:
+ msg: "hadoop-zk 初始化异常,请登陆[{{ master_ip }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
+ when: ini_zkfc_out.stdout != '1'
+ run_once: true
+ delegate_to: 127.0.0.1
+
+ - name: start hadoop Master node
+ shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_hdfs_env.sh master
+
+ - name: Waiting for the Master-namenode start,sleep 20s
+ shell: sleep 20
+
+ - name: checking {{ master_ip }} NameNode status
+ shell: source /etc/profile && jps | grep NameNode | grep -v grep | wc -l
+ register: master_namenode_status
+
+ - name: checking master NameNode
+ fail:
+ msg: "NameNode-master未启动,请登陆[{{ master_ip }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*namenode*"
+ when: master_namenode_status.stdout != '1'
+ run_once: true
+ delegate_to: 127.0.0.1
+ run_once: true
+ delegate_facts: true
+ delegate_to: "{{ master_ip }}"
+
+- name: Start slave NameNode
+ block:
+ - name: copying {{ master_ip }} NameNode files to Slave
+ shell: "yes | {{ deploy_dir }}/{{ hadoop_version }}/bin/hdfs namenode -bootstrapStandby"
+
+ - name: start hadoop Slave node
+ shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_hdfs_env.sh slave
+
+ - name: Waiting for the Slave-namenode start,sleep 60s
+ shell: sleep 60
+
+ - name: checking {{ slave1_ip }} NameNode status
+ shell: source /etc/profile && jps | grep NameNode | grep -v grep | wc -l
+ register: slave1_namenode_status
+
+ - name: checking slavel NameNode
+ fail:
+ msg: "NameNode-slave未启动,请登陆[{{ slave1_ip }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*namenode*"
+ when: slave1_namenode_status.stdout != '1'
+ run_once: true
+ delegate_to: 127.0.0.1
+ run_once: true
+ delegate_facts: true
+ delegate_to: "{{ slave1_ip }}"
+
+- name: Start DataNode
+ block:
+ - name: start hadoop Worker nodes
+ shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_hdfs_env.sh worker
+
+ - name: Waiting for the DataNode start,sleep 60s
+ shell: sleep 60
+
+ - name: checking DataNode status
+ shell: source /etc/profile && jps | grep DataNode | grep -v grep | wc -l
+ register: datanode_status
+
+ - name: checking DataNode
+ fail:
+ msg: "DataNode未启动,请登陆[{{ inventory_hostname }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*datanode*"
+ when: datanode_status.stdout != '1'
+ run_once: true
+ delegate_to: 127.0.0.1
+
+- name: delete {{ deploy_dir }}/hadoop-2.7.1.tar.gz
+ file:
+ path: "{{ deploy_dir }}/{{ hadoop_version }}.tar.gz"
+ state: absent
diff --git a/Apache Hadoop/2.7.1/hdfs/role/tasks/main.yml b/Apache Hadoop/2.7.1/hdfs/role/tasks/main.yml
new file mode 100644
index 0000000..99e4655
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/tasks/main.yml
@@ -0,0 +1,9 @@
+- block:
+ - include: uninstall.yml
+ - include: deploy.yml
+ - include: status-check.yml
+ when: (operation) == "install"
+
+- block:
+ - include: uninstall.yml
+ when: (operation) == "uninstall"
diff --git a/Apache Hadoop/2.7.1/hdfs/role/tasks/status-check.yml b/Apache Hadoop/2.7.1/hdfs/role/tasks/status-check.yml
new file mode 100644
index 0000000..4d77936
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/tasks/status-check.yml
@@ -0,0 +1,53 @@
+- name: Setting node_nums variable
+ set_fact: node_nums="{{groups.hdfs|length}}"
+
+- name: Waiting for the HDFS start,sleep 30s
+ shell: sleep 30
+
+- block:
+ - name: checking JournalNode status
+ shell: source /etc/profile && jps | grep JournalNode | grep -v grep | wc -l
+ register: status_out
+
+ - name: checking JournalNode
+ fail:
+ msg: "JournalNode节点启动异常,请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*journalnode*"
+ when: status_out.stdout != '1'
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: inventory_hostname in groups['hdfs'][0:3]
+
+- block:
+ - name: checking DFSZKFailoverController status
+ shell: source /etc/profile && jps | grep DFSZKFailoverController | grep -v grep | wc -l
+ register: status_out
+
+ - name: checking DFSZKFailoverController
+ fail:
+ msg: "DFSZKFailoverController节点启动异常,请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*zkfc*"
+ when: status_out.stdout != '1'
+ run_once: true
+ delegate_to: 127.0.0.1
+
+ - name: checking NameNode status
+ shell: source /etc/profile && jps | grep NameNode | grep -v grep | wc -l
+ register: status_out
+
+ - name: checking NameNode
+ fail:
+ msg: "NameNode节点启动异常,请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*namenode*"
+ when: status_out.stdout != '1'
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: inventory_hostname in groups['hdfs'][0:2]
+
+- name: checking DataNode status
+ shell: source /etc/profile && jps | grep DataNode | grep -v grep | wc -l
+ register: status_out
+
+- name: checking DataNode
+ fail:
+ msg: "DFSZKFailoverController节点启动异常,请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*datanode*"
+ when: status_out.stdout != '1'
+ run_once: true
+ delegate_to: 127.0.0.1
diff --git a/Apache Hadoop/2.7.1/hdfs/role/tasks/uninstall.yml b/Apache Hadoop/2.7.1/hdfs/role/tasks/uninstall.yml
new file mode 100644
index 0000000..3ddde36
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/tasks/uninstall.yml
@@ -0,0 +1,38 @@
+- block:
+ - name: copy unload_hdfs.sh to {{ deploy_dir }}/
+ template:
+ src: 'unload_hdfs.sh.j2'
+ dest: '{{ deploy_dir }}/unload_hdfs.sh'
+ force: true
+ mode: 0755
+
+ - name: unload hadoop
+ shell: cd {{ deploy_dir }} && sh unload_hdfs.sh
+
+ - name: Ansible delete {{ deploy_dir }}/unload_hdfs.sh
+ file:
+ path: "{{ deploy_dir }}/unload_hdfs.sh"
+ state: absent
+
+ - name: Checking ZooKeeper has Hadoop nodes
+ shell: docker exec zookeeper zkCli.sh ls / | grep -w "hadoop-ha" | wc -l
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ register: has_zknode
+
+ - name: Delete Hadoop nodes in ZooKeeper
+ shell: "docker exec zookeeper zkCli.sh rmr /hadoop-ha"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ when: has_zknode.stdout >= '1'
+
+ - name: Check if the Hadoop service already exists
+ shell: source /etc/profile && jps -l | egrep "org.apache.hadoop.hdfs.qjournal.server.JournalNode|org.apache.hadoop.hdfs.tools.DFSZKFailoverController|org.apache.hadoop.hdfs.server.datanode.DataNode|org.apache.hadoop.hdfs.server.namenode.NameNode" | wc -l
+ register: check_out
+
+ - name: To terminate execution
+ fail:
+ msg: "卸载失败,组件可能非本安装部署,请手动卸载后继续安装"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: check_out.stdout >= '1'
diff --git a/Apache Hadoop/2.7.1/hdfs/role/templates/core-site.xml.j2 b/Apache Hadoop/2.7.1/hdfs/role/templates/core-site.xml.j2
new file mode 100644
index 0000000..1c0486a
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/templates/core-site.xml.j2
@@ -0,0 +1,67 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>fs.defaultFS</name>
+ <value>hdfs://ns1</value>
+ </property>
+ <property>
+ <name>hadoop.tmp.dir</name>
+ <value>file:{{ hdfs_data_dir }}/tmp</value>
+ </property>
+ <property>
+ <name>io.file.buffer.size</name>
+ <value>131702</value>
+ </property>
+ <property>
+ <name>hadoop.proxyuser.root.hosts</name>
+ <value>*</value>
+ </property>
+ <property>
+ <name>hadoop.proxyuser.root.groups</name>
+ <value>*</value>
+ </property>
+ <property>
+ <name>hadoop.logfile.size</name>
+ <value>10000000</value>
+ <description>The max size of each log file</description>
+ </property>
+ <property>
+ <name>hadoop.logfile.count</name>
+ <value>1</value>
+ <description>The max number of log files</description>
+ </property>
+ <property>
+ <name>ha.zookeeper.quorum</name>
+{% for dev_info in groups.zookeeper -%}
+ {% if loop.last -%}
+{{dev_info}}:2181</value>
+ {% elif loop.first %}
+ <value>{{dev_info}}:2181,
+ {%- else %}
+{{dev_info}}:2181,
+ {%- endif %}
+{%- endfor %}
+ </property>
+ <property>
+ <name>ipc.client.connect.timeout</name>
+ <value>90000</value>
+ </property>
+</configuration>
+
diff --git a/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/dae-hdfsjournal.sh.j2 b/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/dae-hdfsjournal.sh.j2
new file mode 100644
index 0000000..596efda
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/dae-hdfsjournal.sh.j2
@@ -0,0 +1,42 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR={{ deploy_dir }}
+
+VERSION={{ hadoop_version }}
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/$1
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/$1`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/$1
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_JN=`ps -ef | grep JournalNode | grep -v grep | wc -l`
+
+if [ $HAS_JN -eq "0" ];then
+ yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start journalnode > /dev/null
+ set_log jnRes_sum JournalNode
+fi
+
+sleep 60
+done
+
diff --git a/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/dae-hdfsmaster.sh.j2 b/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/dae-hdfsmaster.sh.j2
new file mode 100644
index 0000000..bcee032
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/dae-hdfsmaster.sh.j2
@@ -0,0 +1,53 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR={{ deploy_dir }}
+
+VERSION={{ hadoop_version }}
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/$1
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/$1`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/$1
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_NN=`ps -ef | grep NameNode | grep -v grep | wc -l`
+HAS_ZKFC=`ps -ef | grep DFSZKFailoverController | grep -v grep | wc -l`
+#HAS_NM=`ps -ef | grep NodeManager | grep -v grep | wc -l`
+
+if [ $HAS_NN -eq "0" ];then
+ yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start namenode > /dev/null
+ set_log nnRes_sum NameNode
+fi
+
+if [ $HAS_ZKFC -eq "0" ];then
+ yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start zkfc > /dev/null
+ set_log zkfcRes_sum DFSZKFailoverController
+fi
+
+#if [ $HAS_NM -eq "0" ];then
+# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null
+# set_log nmRes_sum NodeManager
+#fi
+
+sleep 60
+done
diff --git a/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/dae-hdfsslave.sh.j2 b/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/dae-hdfsslave.sh.j2
new file mode 100644
index 0000000..334d1bf
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/dae-hdfsslave.sh.j2
@@ -0,0 +1,60 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR={{ deploy_dir }}
+
+VERSION={{ hadoop_version }}
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/$1
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/$1`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/$1
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_NN=`ps -ef | grep NameNode | grep -v grep | wc -l`
+HAS_ZKFC=`ps -ef | grep DFSZKFailoverController | grep -v grep | wc -l`
+#HAS_NM=`ps -ef | grep NodeManager | grep -v grep | wc -l`
+#HAS_RM=`ps -ef | grep ResourceManager | grep -v grep | wc -l`
+
+if [ $HAS_NN -eq "0" ];then
+ yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start namenode > /dev/null
+ set_log nnRes_sum NameNode
+fi
+
+if [ $HAS_ZKFC -eq "0" ];then
+ yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start zkfc > /dev/null
+ set_log zkfcRes_sum DFSZKFailoverController
+fi
+
+#if [ $HAS_NM -eq "0" ];then
+# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null
+# set_log nmRes_sum NodeManager
+#fi
+
+#if [ $HAS_RM -eq "0" ];then
+# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start resourcemanager > /dev/null
+# set_log RMRes_sum ResourceManager
+#fi
+
+sleep 60
+done
+
diff --git a/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/dae-hdfsworker.sh.j2 b/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/dae-hdfsworker.sh.j2
new file mode 100644
index 0000000..a2f4c99
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/dae-hdfsworker.sh.j2
@@ -0,0 +1,47 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR={{ deploy_dir }}
+
+VERSION={{ hadoop_version }}
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/$1
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/$1`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/$1
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_DN=`ps -ef | grep DataNode | grep -v grep | wc -l`
+#HAS_NM=`ps -ef | grep NodeManager | grep -v grep | wc -l`
+
+if [ $HAS_DN -eq "0" ];then
+ yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start datanode > /dev/null
+ set_log dnRes_sum DataNode
+fi
+
+#if [ $HAS_NM -eq "0" ];then
+# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null
+# set_log nmRes_sum NodeManager
+#fi
+
+sleep 60
+done
diff --git a/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/keephdfsjournal.j2 b/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/keephdfsjournal.j2
new file mode 100644
index 0000000..d63d18c
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/keephdfsjournal.j2
@@ -0,0 +1,47 @@
+#!/bin/bash
+#
+# netconsole This loads the netconsole module with the configured parameters.
+#
+# chkconfig:123456 40 60
+# description: keephdfsjournal
+source /etc/profile
+PRO_NAME=keephdfsjournal
+
+INS_DIR={{ deploy_dir }}
+#版本
+VERSION={{ hadoop_version }}
+
+case $1 in
+start)
+journal=`ps -ef | grep dae-hdfsjournal.sh | grep -v grep | wc -l`
+if [ $journal -lt 1 ];then
+nohup $INS_DIR/$VERSION/sbin/dae-hdfsjournal.sh > /dev/null 2>&1 &
+fi
+
+;;
+
+stop)
+HAS_KEEP_SHELL=`ps -ef | grep dae-hdfsjournal.sh | grep -v grep | awk '{print $2}'`
+if [ $HAS_KEEP_SHELL ];then
+echo "守护进程PID:$HAS_KEEP_SHELL"
+kill -9 $HAS_KEEP_SHELL
+fi
+
+sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop journalnode > /dev/null
+;;
+
+status)
+num=`ps -ef | grep JournalNode | grep -v grep | wc -l`
+if [ "$num" -eq "1" ];then
+echo "JournalNode进程已启动"
+else
+echo "JournalNode进程未启动"
+fi
+
+;;
+
+* )
+echo "use keephdfsjournal [start|stop|status]"
+;;
+esac
+
diff --git a/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/keephdfsmaster.j2 b/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/keephdfsmaster.j2
new file mode 100644
index 0000000..241044d
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/keephdfsmaster.j2
@@ -0,0 +1,42 @@
+#!/bin/bash
+#
+# netconsole This loads the netconsole module with the configured parameters.
+#
+# chkconfig:123456 40 60
+# description: keephdfsmaster
+source /etc/profile
+PRO_NAME=keephdfsmaster
+
+INS_DIR={{ deploy_dir }}
+#版本
+VERSION={{ hadoop_version }}
+
+case $1 in
+start)
+master=`ps -ef | grep dae-hdfsmaster.sh | grep -v grep | wc -l`
+if [ $master -lt 1 ];then
+nohup $INS_DIR/$VERSION/sbin/dae-hdfsmaster.sh > /dev/null 2>&1 &
+fi
+;;
+
+stop)
+HAS_KEEP_SHELL=`ps -ef | grep dae-hdfsmaster.sh | grep -v grep | awk '{print $2}'`
+if [ $HAS_KEEP_SHELL ];then
+echo "守护进程PID:$HAS_KEEP_SHELL"
+kill -9 $HAS_KEEP_SHELL
+fi
+
+sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop namenode > /dev/null
+sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop zkfc > /dev/null
+;;
+
+status)
+hdfs haadmin -getServiceState nn1
+hdfs dfsadmin -report
+;;
+
+* )
+echo "use keephdfsmaster [start|stop|status]"
+;;
+esac
+
diff --git a/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/keephdfsslave.j2 b/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/keephdfsslave.j2
new file mode 100644
index 0000000..31733f5
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/keephdfsslave.j2
@@ -0,0 +1,42 @@
+#!/bin/bash
+#
+# netconsole This loads the netconsole module with the configured parameters.
+#
+# chkconfig:123456 40 60
+# description: keephdfsslave
+source /etc/profile
+PRO_NAME=keephdfsslave
+
+INS_DIR={{ deploy_dir }}
+#版本
+VERSION={{ hadoop_version }}
+
+case $1 in
+start)
+slave=`ps -ef | grep dae-hdfsslave.sh | grep -v grep | wc -l`
+if [ $slave -lt 1 ];then
+nohup $INS_DIR/$VERSION/sbin/dae-hdfsslave.sh > /dev/null 2>&1 &
+fi
+;;
+
+stop)
+HAS_KEEP_SHELL=`ps -ef | grep dae-hdfsslave.sh | grep -v grep | awk '{print $2}'`
+if [ $HAS_KEEP_SHELL ];then
+echo "守护进程PID:$HAS_KEEP_SHELL"
+kill -9 $HAS_KEEP_SHELL
+fi
+
+sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop namenode > /dev/null
+sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop zkfc > /dev/null
+;;
+
+status)
+hdfs haadmin -getServiceState nn2
+hdfs dfsadmin -report
+;;
+
+* )
+echo "use keephdfsslave [start|stop|status]"
+;;
+esac
+
diff --git a/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/keephdfsworker.j2 b/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/keephdfsworker.j2
new file mode 100644
index 0000000..e60deeb
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/templates/daemonscript/keephdfsworker.j2
@@ -0,0 +1,47 @@
+#!/bin/bash
+#
+# netconsole This loads the netconsole module with the configured parameters.
+#
+# chkconfig:123456 40 60
+# description: keephdfsworker
+source /etc/profile
+PRO_NAME=keephdfsworker
+
+INS_DIR={{ deploy_dir }}
+#版本
+VERSION={{ hadoop_version }}
+
+case $1 in
+start)
+worker=`ps -ef | grep dae-hdfsworker.sh | grep -v grep | wc -l`
+if [ $worker -lt 1 ];then
+nohup $INS_DIR/$VERSION/sbin/dae-hdfsworker.sh > /dev/null 2>&1 &
+fi
+
+;;
+
+stop)
+HAS_KEEP_SHELL=`ps -ef | grep dae-hdfsworker.sh | grep -v grep | awk '{print $2}'`
+if [ $HAS_KEEP_SHELL ];then
+echo "守护进程PID:$HAS_KEEP_SHELL"
+kill -9 $HAS_KEEP_SHELL
+fi
+
+sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop datanode > /dev/null
+;;
+
+status)
+num=`ps -ef | grep DataNode | grep -v grep | wc -l`
+if [ "$num" -eq "1" ];then
+echo "DataNode进程已启动"
+else
+echo "DataNode进程未启动"
+fi
+
+;;
+
+* )
+echo "use keephdfsworker [start|stop|status]"
+;;
+esac
+
diff --git a/Apache Hadoop/2.7.1/hdfs/role/templates/hadoop-env.sh.j2 b/Apache Hadoop/2.7.1/hdfs/role/templates/hadoop-env.sh.j2
new file mode 100644
index 0000000..6c18711
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/templates/hadoop-env.sh.j2
@@ -0,0 +1,105 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME. All others are
+# optional. When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+export HADOOP_NAMENODE_JMX_OPTS="-Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false -javaagent:{{ deploy_dir }}/{{ hadoop_version }}/monitor/jmx_prometheus_javaagent-0.12.0.jar=9905:{{ deploy_dir }}/{{ hadoop_version }}/monitor/hdfs.yaml"
+export HADOOP_DATANODE_JMX_OPTS="-Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false -javaagent:{{ deploy_dir }}/{{ hadoop_version }}/monitor/jmx_prometheus_javaagent-0.12.0.jar=9906:{{ deploy_dir }}/{{ hadoop_version }}/monitor/hdfs.yaml"
+
+# The java implementation to use.
+#export HADOOP_HEAPSIZE=m
+#export JAVA_HOME=/usr/local/jdk/jdk1.8.0_73
+export JAVA_HOME=$JAVA_HOME
+# The jsvc implementation to use. Jsvc is required to run secure datanodes
+# that bind to privileged ports to provide authentication of data transfer
+# protocol. Jsvc is not required if SASL is configured for authentication of
+# data transfer protocol using non-privileged ports.
+#export JSVC_HOME=${JSVC_HOME}
+
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
+
+# Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
+for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
+ if [ "$HADOOP_CLASSPATH" ]; then
+ export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
+ else
+ export HADOOP_CLASSPATH=$f
+ fi
+done
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+#export HADOOP_HEAPSIZE=
+#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+# Extra Java runtime options. Empty by default.
+export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="$HADOOP_NAMENODE_OPTS {{ hadoop.namenode.java_opt }} -Xss256k -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:SurvivorRatio=2 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:MaxTenuringThreshold=15 -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=1 -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:-DisableExplicitGC -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{ deploy_dir }}/{{ hadoop_version }}/logs/gc-namenode-%t.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=100M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath={{ deploy_dir }}/{{ hadoop_version }}/logs/ -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender}"
+
+export HADOOP_DATANODE_OPTS="$HADOOP_DATANODE_OPTS {{ hadoop.datanode.java_opt }} -Xss256k -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:SurvivorRatio=2 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:MaxTenuringThreshold=15 -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=1 -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{ deploy_dir }}/{{ hadoop_version }}/logs/gc-datanode-%t.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=100M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath={{ deploy_dir }}/{{ hadoop_version }}/logs/ -Dhadoop.security.logger=ERROR,RFAS"
+
+export HADOOP_JOURNALNODE_OPTS="$HADOOP_JOURNALNODE_OPTS {{ hadoop.journalnode.java_opt }}"
+
+export HADOOP_ZKFC_OPTS="$HADOOP_ZKFC_OPTS {{ hadoop.zkfc.java_opt }}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
+
+export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
+export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
+#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
+
+# On secure datanodes, user to run the datanode as after dropping privileges.
+# This **MUST** be uncommented to enable secure HDFS if using privileged ports
+# to provide authentication of data transfer protocol. This **MUST NOT** be
+# defined if SASL is configured for authentication of data transfer protocol
+# using non-privileged ports.
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
+
+# Where log files are stored. $HADOOP_HOME/logs by default.
+#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
+
+###
+# HDFS Mover specific parameters
+###
+# Specify the JVM options to be used when starting the HDFS Mover.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_MOVER_OPTS=""
+
+###
+# Advanced Users Only!
+###
+
+# The directory where pid files are stored. /tmp by default.
+# NOTE: this should be set to a directory that can only be written to by
+# the user that will run the hadoop daemons. Otherwise there is the
+# potential for a symlink attack.
+export HADOOP_PID_DIR={{ deploy_dir }}/{{ hadoop_version }}/pids
+export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
diff --git a/Apache Hadoop/2.7.1/hdfs/role/templates/hdfs-site.xml.j2 b/Apache Hadoop/2.7.1/hdfs/role/templates/hdfs-site.xml.j2
new file mode 100644
index 0000000..28a9b32
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/templates/hdfs-site.xml.j2
@@ -0,0 +1,142 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>dfs.namenode.name.dir</name>
+ <value>file:{{ hdfs_data_dir }}/dfs/name</value>
+ </property>
+ <property>
+ <name>dfs.datanode.data.dir</name>
+ <value>file:{{ hdfs_data_dir }}/dfs/data</value>
+ </property>
+ <property>
+ <name>dfs.replication</name>
+ <value>2</value>
+ </property>
+ <property>
+ <name>dfs.webhdfs.enabled</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>dfs.permissions</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>dfs.permissions.enabled</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>dfs.nameservices</name>
+ <value>ns1</value>
+ </property>
+ <property>
+ <name>dfs.blocksize</name>
+ <value>134217728</value>
+ </property>
+ <property>
+ <name>dfs.ha.namenodes.ns1</name>
+ <value>nn1,nn2</value>
+ </property>
+ <!-- nn1的RPC通信地址,nn1所在地址 -->
+ <property>
+ <name>dfs.namenode.rpc-address.ns1.nn1</name>
+ <value>{{ groups.hdfs[0] }}:9000</value>
+ </property>
+ <!-- nn1的http通信地址,外部访问地址 -->
+ <property>
+ <name>dfs.namenode.http-address.ns1.nn1</name>
+ <value>{{ groups.hdfs[0] }}:50070</value>
+ </property>
+ <!-- nn2的RPC通信地址,nn2所在地址 -->
+ <property>
+ <name>dfs.namenode.rpc-address.ns1.nn2</name>
+ <value>{{ groups.hdfs[1] }}:9000</value>
+ </property>
+ <!-- nn2的http通信地址,外部访问地址 -->
+ <property>
+ <name>dfs.namenode.http-address.ns1.nn2</name>
+ <value>{{ groups.hdfs[1] }}:50070</value>
+ </property>
+ <!-- 指定NameNode的元数据在JournalNode日志上的存放位置(一般和zookeeper部署在一起) -->
+ <property>
+ <name>dfs.namenode.shared.edits.dir</name>
+ <value>qjournal://{{groups.hdfs[0]}}:8485;{{groups.hdfs[1]}}:8485;{{groups.hdfs[2]}}:8485/ns1</value>
+ </property>
+ <!-- 指定JournalNode在本地磁盘存放数据的位置 -->
+ <property>
+ <name>dfs.journalnode.edits.dir</name>
+ <value>{{ hdfs_data_dir }}/journal</value>
+ </property>
+ <!--客户端通过代理访问namenode,访问文件系统,HDFS 客户端与Active 节点通信的Java 类,使用其确定Active 节点是否活跃 -->
+ <property>
+ <name>dfs.client.failover.proxy.provider.ns1</name>
+ <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
+ </property>
+ <!--这是配置自动切换的方法,有多种使用方法,具体可以看官网,在文末会给地址,这里是远程登录杀死的方法 -->
+ <property>
+ <name>dfs.ha.fencing.methods</name>
+ <value>sshfence</value>
+ <value>shell(true)</value>
+ </property>
+ <!-- 这个是使用sshfence隔离机制时才需要配置ssh免登陆 -->
+ <property>
+ <name>dfs.ha.fencing.ssh.private-key-files</name>
+ <value>/root/.ssh/id_rsa</value>
+ </property>
+ <!-- 配置sshfence隔离机制超时时间,这个属性同上,如果你是用脚本的方法切换,这个应该是可以不配置的 -->
+ <property>
+ <name>dfs.ha.fencing.ssh.connect-timeout</name>
+ <value>30000</value>
+ </property>
+ <!-- 这个是开启自动故障转移,如果你没有自动故障转移,这个可以先不配 -->
+ <property>
+ <name>dfs.ha.automatic-failover.enabled</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>dfs.datanode.max.transfer.threads</name>
+ <value>8192</value>
+ </property>
+ <!-- namenode处理RPC请求线程数,增大该值资源占用不大 -->
+ <property>
+ <name>dfs.namenode.handler.count</name>
+ <value>{{ hadoop.namenode['dfs.namenode.handler.count'] }}</value>
+ </property>
+ <!-- datanode处理RPC请求线程数,增大该值会占用更多内存 -->
+ <property>
+ <name>dfs.datanode.handler.count</name>
+ <value>{{ hadoop.datanode['dfs.datanode.handler.count'] }}</value>
+ </property>
+ <!-- balance时可占用的带宽 -->
+ <property>
+ <name>dfs.balance.bandwidthPerSec</name>
+ <value>104857600</value>
+ </property>
+ <!-- 磁盘预留空间,该空间不会被hdfs占用,单位字节-->
+ <property>
+ <name>dfs.datanode.du.reserved</name>
+ <value>53687091200</value>
+ </property>
+ <!-- datanode与namenode连接超时时间,单位毫秒 2 * heartbeat.recheck.interval + 30000 -->
+ <property>
+ <name>heartbeat.recheck.interval</name>
+ <value>100000</value>
+ </property>
+</configuration>
+
diff --git a/Apache Hadoop/2.7.1/hdfs/role/templates/ini_hdfs.sh.j2 b/Apache Hadoop/2.7.1/hdfs/role/templates/ini_hdfs.sh.j2
new file mode 100644
index 0000000..b7fd79c
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/templates/ini_hdfs.sh.j2
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+
+MASTER_IP={{ groups.hdfs[0] }}
+SLAVE1_IP={{ groups.hdfs[1] }}
+
+BASE_DIR={{ deploy_dir }}
+VERSION={{ hadoop_version }}
+
+function ini_namenode() {
+
+cd $BASE_DIR/$VERSION/bin
+yes | ./hadoop namenode -format
+
+if [ $? -eq "0" ];then
+# scp -r $BASE_DIR/hadoop/ root@$SLAVE1_IP:$BASE_DIR/
+ echo yes
+else
+ echo no
+fi
+}
+
+function ini_zk() {
+
+cd $BASE_DIR/$VERSION/bin
+yes | ./hdfs zkfc -formatZK
+
+if [ $? -eq "0" ];then
+ echo yes
+else
+ echo no
+fi
+}
+
+case $1 in
+[namenode]*)
+ini_namenode
+;;
+[zkfc]*)
+ini_zk
+;;
+* )
+echo "请输入已有的指令."
+;;
+esac
+
diff --git a/Apache Hadoop/2.7.1/hdfs/role/templates/mapred-site.xml.j2 b/Apache Hadoop/2.7.1/hdfs/role/templates/mapred-site.xml.j2
new file mode 100644
index 0000000..9dcce77
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/templates/mapred-site.xml.j2
@@ -0,0 +1,33 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>mapreduce.framework.name</name>
+ <value>yarn</value>
+ </property>
+ <property>
+ <name>mapreduce.jobhistory.address</name>
+ <value>{{ groups.hdfs[0] }}:10020</value>
+ </property>
+ <property>
+ <name>mapreduce.jobhistory.webapp.address</name>
+ <value>{{ groups.hdfs[0] }}:19888</value>
+ </property>
+</configuration>
+
diff --git a/Apache Hadoop/2.7.1/hdfs/role/templates/set_hdfs_env.sh.j2 b/Apache Hadoop/2.7.1/hdfs/role/templates/set_hdfs_env.sh.j2
new file mode 100644
index 0000000..4f458f4
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/templates/set_hdfs_env.sh.j2
@@ -0,0 +1,71 @@
+#!/bin/bash
+
+source /etc/profile
+
+function setChkconfig(){
+echo -e "\n#hadoop\nexport HADOOP_HOME={{ deploy_dir }}/{{ hadoop_version }}\nexport PATH=\$HADOOP_HOME/sbin:\$PATH\nexport PATH=\$HADOOP_HOME/bin:\$PATH\nexport HADOOP_CLASSPATH=\`hadoop classpath\`" >> /etc/profile.d/hadoop.sh
+chmod +x /etc/profile.d/hadoop.sh
+
+if [ -x '/etc/init.d/keephdfsmaster' ];then
+ chkconfig --add keephdfsmaster
+ chkconfig keephdfsmaster on
+fi
+
+if [ -x '/etc/init.d/keephdfsslave' ];then
+ chkconfig --add keephdfsslave
+ chkconfig keephdfsslave on
+fi
+
+if [ -x '/etc/init.d/keephdfsworker' ];then
+ chkconfig --add keephdfsworker
+ chkconfig keephdfsworker on
+fi
+
+if [ -x '/etc/init.d/keephdfsjournal' ];then
+ chkconfig --add keephdfsjournal
+ chkconfig keephdfsjournal on
+fi
+}
+
+case $1 in
+journal)
+if [ -x '/etc/init.d/keephdfsjournal' ];then
+ service keephdfsjournal start && sleep 5
+ journal_dae=`ps -ef | grep dae-hdfsjournal.sh | grep -v grep | wc -l`
+ if [ $journal_dae -lt 1 ];then
+ nohup {{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsjournal.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+master)
+if [ -x '/etc/init.d/keephdfsmaster' ];then
+ service keephdfsmaster start && sleep 5
+ master_dae=`ps -ef | grep dae-hdfsmaster.sh | grep -v grep | wc -l`
+ if [ $master_dae -lt 1 ];then
+ nohup {{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsmaster.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+slave)
+if [ -x '/etc/init.d/keephdfsslave' ];then
+ service keephdfsslave start && sleep 5
+ slave_dae=`ps -ef | grep dae-hdfsslave.sh | grep -v grep | wc -l`
+ if [ $slave_dae -lt 1 ];then
+ nohup {{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsslave.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+worker)
+if [ -x '/etc/init.d/keephdfsworker' ];then
+ service keephdfsworker start && sleep 5
+ worker_dae=`ps -ef | grep dae-hdfsworker.sh | grep -v grep | wc -l`
+ if [ $worker_dae -lt 1 ];then
+ nohup {{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsworker.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+chkconfig)
+ setChkconfig;;
+* )
+;;
+esac
diff --git a/Apache Hadoop/2.7.1/hdfs/role/templates/slaves.j2 b/Apache Hadoop/2.7.1/hdfs/role/templates/slaves.j2
new file mode 100644
index 0000000..53b6d2c
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/templates/slaves.j2
@@ -0,0 +1,4 @@
+{% set combined_group = groups.hdfs %}
+{% for dev_info in combined_group %}
+{{dev_info}}
+{% endfor %}
diff --git a/Apache Hadoop/2.7.1/hdfs/role/templates/unload_hdfs.sh.j2 b/Apache Hadoop/2.7.1/hdfs/role/templates/unload_hdfs.sh.j2
new file mode 100644
index 0000000..827a32e
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/templates/unload_hdfs.sh.j2
@@ -0,0 +1,86 @@
+#!/bin/bash
+source /etc/profile
+
+function killService(){
+keeppath='/etc/init.d/keephdfsjournal'
+if [ -x $keeppath ];then
+service keephdfsjournal stop
+chkconfig keephdfsjournal off
+systemctl daemon-reload
+rm -rf /etc/init.d/keephdfsjournal
+fi
+
+keeppath='/etc/init.d/keephdfsmaster'
+if [ -x $keeppath ];then
+service keephdfsmaster stop
+chkconfig keephdfsmaster off
+systemctl daemon-reload
+rm -rf /etc/init.d/keephdfsmaster
+fi
+
+keeppath='/etc/init.d/keephdfsslave'
+if [ -x $keeppath ];then
+service keephdfsslave stop
+chkconfig keephdfsslave off
+systemctl daemon-reload
+rm -rf /etc/init.d/keephdfsslave
+fi
+
+keeppath='/etc/init.d/keephdfsworker'
+if [ -x $keeppath ];then
+service keephdfsworker stop
+chkconfig keephdfsworker off
+systemctl daemon-reload
+rm -rf /etc/init.d/keephdfsworker
+fi
+}
+
+
+function killPid(){
+livenum=`jps -l | egrep -w "org.apache.hadoop.hdfs.qjournal.server.JournalNode" | grep -v grep |wc -l`
+if [ $livenum -ne 0 ];then
+keeppid=`jps -l |egrep -w "org.apache.hadoop.hdfs.qjournal.server.JournalNode" | awk '{print $1}'`
+kill -9 $keeppid
+fi
+
+livenum=`jps -l | egrep -w "org.apache.hadoop.hdfs.tools.DFSZKFailoverController" | grep -v grep |wc -l`
+if [ $livenum -ne 0 ];then
+keeppid=`jps -l |egrep -w "org.apache.hadoop.hdfs.tools.DFSZKFailoverController" | awk '{print $1}'`
+kill -9 $keeppid
+fi
+
+livenum=`jps -l | egrep -w "org.apache.hadoop.hdfs.server.datanode.DataNode" | grep -v grep |wc -l`
+if [ $livenum -ne 0 ];then
+keeppid=`jps -l |egrep -w "org.apache.hadoop.hdfs.server.datanode.DataNode" | awk '{print $1}'`
+kill -9 $keeppid
+fi
+
+livenum=`jps -l | egrep -w "org.apache.hadoop.hdfs.server.namenode.NameNode" | grep -v grep |wc -l`
+if [ $livenum -ne 0 ];then
+keeppid=`jps -l |egrep -w "org.apache.hadoop.hdfs.server.namenode.NameNode" | awk '{print $1}'`
+kill -9 $keeppid
+fi
+}
+
+function drop_folder(){
+FOLDER_NAME=$1
+
+if [ -d "$FOLDER_NAME" ];then
+ rm -rf $FOLDER_NAME
+fi
+}
+
+function drop_file(){
+FILE_NAME=$1
+
+if [ -f "$FILE_NAME" ];then
+ rm -rf $FILE_NAME
+fi
+}
+
+killService
+sleep 15
+killPid
+drop_folder {{ deploy_dir }}/{{ hadoop_version }}
+drop_folder {{ data_dir }}/{{ hadoop_version }}
+drop_file /etc/profile.d/hadoop.sh
diff --git a/Apache Hadoop/2.7.1/hdfs/role/vars/main.yml b/Apache Hadoop/2.7.1/hdfs/role/vars/main.yml
new file mode 100644
index 0000000..6bb5c97
--- /dev/null
+++ b/Apache Hadoop/2.7.1/hdfs/role/vars/main.yml
@@ -0,0 +1,8 @@
+#hadoop版本
+hadoop_version: hadoop-2.7.1
+
+#数据目录
+hdfs_data_dir: "{{ data_dir }}/{{ hadoop_version }}/data/hadoop"
+
+#jdk版本
+java_version: 1.8.0_73
diff --git a/Apache Hadoop/2.7.1/yarn/hosts b/Apache Hadoop/2.7.1/yarn/hosts
new file mode 100644
index 0000000..a5dee21
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/hosts
@@ -0,0 +1,7 @@
+[zookeeper]
+192.168.45.102
+
+[hdfs]
+
+[yarn]
+192.168.45.102
diff --git a/Apache Hadoop/2.7.1/yarn/install.yml b/Apache Hadoop/2.7.1/yarn/install.yml
new file mode 100644
index 0000000..72d6ec7
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/install.yml
@@ -0,0 +1,7 @@
+- hosts: yarn
+ remote_user: root
+ roles:
+ - role
+ vars_files:
+ - role/vars/main.yml
+
diff --git a/Apache Hadoop/2.7.1/yarn/role/defaults/main.yml b/Apache Hadoop/2.7.1/yarn/role/defaults/main.yml
new file mode 100644
index 0000000..27a6333
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/defaults/main.yml
@@ -0,0 +1,56 @@
+#The default installation location
+deploy_dir: /data/olap
+
+#The default data storage location,use storing application data,logs and configuration files
+data_dir: /data/olap
+
+hadoop:
+ namenode:
+ #Running memory of the Hadoop Namenode.
+ java_opt: '-Xmx1024m -Xms1024m'
+ #The number of Namenode RPC server threads that listen to requests from clients.
+ dfs.namenode.handler.count: 30
+ datanode:
+ #Running memory of the Hadoop Datanode.
+ java_opt: '-Xmx1024m -Xms1024m'
+ #The number of server threads for the datanode.
+ dfs.datanode.handler.count: 40
+ journalnode:
+ #Running memory of the Hadoop JournalNode.
+ java_opt: '-Xmx1024m -Xms1024m'
+ zkfc:
+ #Running memory of the Hadoop DFSZKFailoverController.
+ java_opt: '-Xmx1024m -Xms1024m'
+ yarn:
+ resourcemanager:
+ #Running memory of the Hadoop ResourceManager.
+ java_opt: '-Xmx1024m -Xms1024m'
+ nodemanager:
+ #Running memory of the Hadoop NodeManager.
+ java_opt: '-Xmx1024m -Xms1024m'
+ #Amount of physical memory, in MB, that can be allocated for containers.
+ yarn.nodemanager.resource.memory-mb: 16384
+ #The maximum allocation for every container request at the RM in MBs.
+ yarn.scheduler.maximum-allocation-mb: 16384
+ #Number of vcores that can be allocated for containers. This is used by the RM scheduler when allocating resources for containers.
+ yarn.nodemanager.resource.cpu-vcores: 48
+ #The maximum allocation for every container request at the RM in terms of virtual CPU cores.
+ yarn.scheduler.maximum-allocation-vcores: 48
+
+flink:
+ #Total Process Memory size for the JobManager.
+ jobmanager.memory.process.size: 1024M
+ #Total Process Memory size for the TaskExecutors.
+ taskmanager.memory.process.size: 2048M
+ #This is the size of off-heap memory managed for sorting, hash tables, caching of intermediate results and state backend.
+ taskmanager.memory.managed.size: 128M
+ #Framework Off-Heap Memory size for TaskExecutors. This is the size of off-heap memory reserved for TaskExecutor framework
+ taskmanager.memory.framework.off-heap.size: 128M
+ #JVM Metaspace Size for the TaskExecutors.
+ taskmanager.memory.jvm-metaspace.size: 256M
+ #Max Network Memory size for TaskExecutors. Network Memory is off-heap memory reserved for ShuffleEnvironment.
+ taskmanager.memory.network.max: 256M
+ #The number of parallel operator or user function instances that a single TaskManager can run.
+ #This value is typically proportional to the number of physical CPU cores that the TaskManager's machine has (e.g., equal to the number of cores, or half the number of cores).
+ taskmanager.numberOfTaskSlots: 1
+
diff --git a/Apache Hadoop/2.7.1/yarn/role/tasks/deploy-cluster.yml b/Apache Hadoop/2.7.1/yarn/role/tasks/deploy-cluster.yml
new file mode 100644
index 0000000..0c73969
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/tasks/deploy-cluster.yml
@@ -0,0 +1,194 @@
+- name: Setting node_nums variable
+ set_fact: node_nums="{{groups.hdfs|length}}"
+
+- name: To terminate execution
+ fail:
+ msg: "Fully Distributed Mode at least 3 nodes, please checking configurations/hosts -> hdfs"
+ when: node_nums < 3
+
+- name: check Jdk version
+ shell: source /etc/profile && java -version 2>&1 | grep {{ java_version }} | wc -l
+ ignore_errors: false
+ register: jdk_out
+
+- name: To terminate execution
+ fail:
+ msg: "JDK is not installed in the target cluster, please check!"
+ when: jdk_out.stdout != '2'
+ run_once: true
+ delegate_to: 127.0.0.1
+
+- name: create hadoop package path:{{ deploy_dir }}
+ file:
+ state: directory
+ path: '{{ deploy_dir }}'
+
+- block:
+ - name: unpack hadoop-2.7.1.tar.gz to {{ deploy_dir }}/
+ unarchive:
+ src: 'files/{{ hadoop_version }}.tar.gz'
+ dest: '{{ deploy_dir }}/'
+
+ - name: copying yarn master config files
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ item.dest }}'
+ mode: '{{ item.mode }}'
+ backup: false
+ with_items:
+ - { src: 'yarn-site.xml.j2', dest: 'etc/hadoop/yarn-site.xml', mode: '0644' }
+ - { src: 'slaves.j2', dest: 'etc/hadoop/slaves', mode: '0644' }
+ - { src: 'set_yarn_env.sh.j2', dest: 'bin/set_yarn_env.sh', mode: '0755' }
+ - { src: 'core-site.xml.j2', dest: 'etc/hadoop/core-site.xml', mode: '0644' }
+ - { src: 'hdfs-site.xml.j2', dest: 'etc/hadoop/hdfs-site.xml', mode: '0644' }
+ - { src: 'mapred-site.xml.j2', dest: 'etc/hadoop/mapred-site.xml', mode: '0644' }
+ - { src: 'capacity-scheduler.xml.j2', dest: 'etc/hadoop/capacity-scheduler.xml', mode: '0644' }
+ - { src: 'yarn-env.sh.j2', dest: 'etc/hadoop/yarn-env.sh', mode: '0755' }
+ - { src: 'hadoop-env.sh.j2', dest: 'etc/hadoop/hadoop-env.sh', mode: '0755' }
+ when: inventory_hostname not in groups['hdfs']
+
+- name: copying yarn master config files
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ item.dest }}'
+ mode: '{{ item.mode }}'
+ backup: false
+ with_items:
+ - { src: 'yarn-site.xml.j2', dest: 'etc/hadoop/yarn-site.xml', mode: '0644' }
+ - { src: 'slaves.j2', dest: 'etc/hadoop/slaves', mode: '0644' }
+ - { src: 'mapred-site.xml.j2', dest: 'etc/hadoop/mapred-site.xml', mode: '0644' }
+ - { src: 'yarn-env.sh.j2', dest: 'etc/hadoop/yarn-env.sh', mode: '0755' }
+ - { src: 'set_yarn_env.sh.j2', dest: 'bin/set_yarn_env.sh', mode: '0755' }
+ - { src: 'capacity-scheduler.xml.j2', dest: 'etc/hadoop/capacity-scheduler.xml', mode: '0644' }
+ when: inventory_hostname in groups['hdfs']
+
+- block:
+ - name: copying yarn worker
+ template:
+ src: 'daemonscript/{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: 0755
+ backup: yes
+ with_items:
+ - { src: 'dae-yarnhistory.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-yarnhistory.sh' }
+ - { src: 'dae-yarnmaster.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-yarnmaster.sh' }
+ - { src: 'keepyarnhistory.j2', dest: '/etc/init.d/keepyarnhistory' }
+ - { src: 'keepyarnmaster.j2', dest: '/etc/init.d/keepyarnmaster' }
+ when: inventory_hostname in groups['yarn'][0:2]
+
+- block:
+ - name: Start ResourceManager and JobHistoryServer
+ shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_yarn_env.sh {{ item.opeation }}
+ with_items:
+ - { opeation: 'chkconfig' }
+ - { opeation: 'master' }
+ - { opeation: 'history' }
+
+ - name: Waiting for the ResourceManager start,sleep 60s
+ shell: sleep 60
+
+ - name: checking ResourceManager status
+ shell: source /etc/profile && jps -l | grep "org.apache.hadoop.yarn.server.resourcemanager.ResourceManager" | grep -v grep | wc -l
+ register: resourcemanager_check
+
+ - name: checking ResourceManager
+ fail:
+ msg: "ResourceManager节点启动异常,请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
+ when: resourcemanager_check.stdout != '1'
+ run_once: true
+ delegate_to: 127.0.0.1
+
+ - name: checking JobHistoryServer status
+ shell: source /etc/profile && jps -l | grep "org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer" | grep -v grep | wc -l
+ register: history_check
+
+ - name: checking JobHistoryServer
+ fail:
+ msg: "JobHistoryServer节点启动异常,请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
+ when: history_check.stdout != '1'
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: inventory_hostname in groups['yarn'][0:2]
+
+- block:
+ - name: copying yarn worker
+ template:
+ src: 'daemonscript/{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: 0755
+ backup: yes
+ with_items:
+ - { src: 'dae-yarnworker.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-yarnworker.sh' }
+ - { src: 'keepyarnworker.j2', dest: '/etc/init.d/keepyarnworker' }
+
+ - name: Start NodeManager
+ shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_yarn_env.sh {{ item.opeation }}
+ with_items:
+ - { opeation: 'chkconfig' }
+ - { opeation: 'worker' }
+
+ - name: Waiting for the NodeManager start,sleep 60s
+ shell: sleep 60
+
+ - name: checking NodeManager status
+ shell: source /etc/profile && jps -l | grep "org.apache.hadoop.yarn.server.nodemanager.NodeManager" | grep -v grep | wc -l
+ register: datanode_status
+
+ - name: checking NodeManager
+ fail:
+ msg: "NodeManager未启动,请登陆[{{ inventory_hostname }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
+ when: datanode_status.stdout != '1'
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: node_nums >= cluster_limit and inventory_hostname not in groups['yarn'][0:2]
+
+- block:
+ - name: copying yarn worker
+ template:
+ src: 'daemonscript/{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: 0755
+ backup: yes
+ with_items:
+ - { src: 'dae-yarnworker.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-yarnworker.sh' }
+ - { src: 'keepyarnworker.j2', dest: '/etc/init.d/keepyarnworker' }
+
+ - name: Start NodeManager
+ shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_yarn_env.sh {{ item.opeation }}
+ with_items:
+ - { opeation: 'chkconfig' }
+ - { opeation: 'worker' }
+
+ - name: Waiting for the NodeManager start,sleep 60s
+ shell: sleep 60
+
+ - name: checking NodeManager status
+ shell: source /etc/profile && jps -l | grep "org.apache.hadoop.yarn.server.nodemanager.NodeManager" | grep -v grep | wc -l
+ register: datanode_status
+
+ - name: checking NodeManager
+ fail:
+ msg: "NodeManager未启动,请登陆[{{ inventory_hostname }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
+ when: datanode_status.stdout != '1'
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: node_nums < cluster_limit
+
+#--------------------------------------------Flink----------------------------------------------#
+- name: Copying Flink installation package
+ unarchive:
+ src: 'files/{{ flink_version }}.tgz'
+ dest: '{{ deploy_dir }}/{{ hadoop_version }}/'
+
+- name: Config flink configuration
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: '{{ item.mode }}'
+ with_items:
+ - { src: 'flink/flink.sh.j2', dest: '/etc/profile.d/flink.sh', mode: '0755' }
+ - { src: 'flink/flink-conf.yaml.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ flink_version }}/conf/flink-conf.yaml', mode: '0644' }
+ - { src: 'yarn-site.xml.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ flink_version }}/conf/yarn-site.xml', mode: '0644' }
+ - { src: 'core-site.xml.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ flink_version }}/conf/core-site.xml', mode: '0644' }
+ - { src: 'hdfs-site.xml.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ flink_version }}/conf/hdfs-site.xml', mode: '0644' }
+
diff --git a/Apache Hadoop/2.7.1/yarn/role/tasks/deploy-standalone.yml b/Apache Hadoop/2.7.1/yarn/role/tasks/deploy-standalone.yml
new file mode 100644
index 0000000..97c8d9e
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/tasks/deploy-standalone.yml
@@ -0,0 +1,136 @@
+- name: check Jdk version
+ shell: source /etc/profile && java -version 2>&1 | grep {{ java_version }} | wc -l
+ ignore_errors: false
+ register: jdk_out
+
+- name: To terminate execution
+ fail:
+ msg: "JDK is not installed in the target cluster, please check!"
+ when: jdk_out.stdout != '2'
+ run_once: true
+ delegate_to: 127.0.0.1
+
+- name: create hadoop package path:{{ deploy_dir }}
+ file:
+ state: directory
+ path: '{{ deploy_dir }}'
+
+- name: unpack hadoop-2.7.1.tar.gz to {{ deploy_dir }}/
+ unarchive:
+ src: 'files/{{ hadoop_version }}.tar.gz'
+ dest: '{{ deploy_dir }}/'
+
+- name: copying yarn master config files
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ item.dest }}'
+ mode: '{{ item.mode }}'
+ backup: false
+ with_items:
+ - { src: 'standalone/yarn-site.xml.j2', dest: 'etc/hadoop/yarn-site.xml', mode: '0644' }
+ - { src: 'standalone/hdfs-site.xml.j2', dest: 'etc/hadoop/hdfs-site.xml', mode: '0644' }
+ - { src: 'standalone/core-site.xml.j2', dest: 'etc/hadoop/core-site.xml', mode: '0644' }
+ - { src: 'slaves.j2', dest: 'etc/hadoop/slaves', mode: '0644' }
+ - { src: 'set_yarn_env.sh.j2', dest: 'bin/set_yarn_env.sh', mode: '0755' }
+ - { src: 'mapred-site.xml.j2', dest: 'etc/hadoop/mapred-site.xml', mode: '0644' }
+ - { src: 'capacity-scheduler.xml.j2', dest: 'etc/hadoop/capacity-scheduler.xml', mode: '0644' }
+ - { src: 'yarn-env.sh.j2', dest: 'etc/hadoop/yarn-env.sh', mode: '0755' }
+ - { src: 'hadoop-env.sh.j2', dest: 'etc/hadoop/hadoop-env.sh', mode: '0755' }
+
+- name: copying yarn worker
+ template:
+ src: 'daemonscript/{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: 0755
+ backup: yes
+ with_items:
+ - { src: 'dae-yarnhistory.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-yarnhistory.sh' }
+ - { src: 'dae-yarnmaster.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-yarnmaster.sh' }
+ - { src: 'keepyarnhistory.j2', dest: '/etc/init.d/keepyarnhistory' }
+ - { src: 'keepyarnmaster.j2', dest: '/etc/init.d/keepyarnmaster' }
+
+- block:
+ - name: Start ResourceManager and JobHistoryServer
+ shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_yarn_env.sh {{ item.opeation }}
+ with_items:
+ - { opeation: 'chkconfig' }
+ - { opeation: 'master' }
+ - { opeation: 'history' }
+
+ - name: Waiting for the ResourceManager start,sleep 60s
+ shell: sleep 60
+
+ - name: checking ResourceManager status
+ shell: source /etc/profile && jps -l | grep "org.apache.hadoop.yarn.server.resourcemanager.ResourceManager" | grep -v grep | wc -l
+ register: resourcemanager_check
+
+ - name: checking ResourceManager
+ fail:
+ msg: "ResourceManager节点启动异常,请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
+ when: resourcemanager_check.stdout != '1'
+ run_once: true
+ delegate_to: 127.0.0.1
+
+ - name: checking JobHistoryServer status
+ shell: source /etc/profile && jps -l | grep "org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer" | grep -v grep | wc -l
+ register: history_check
+
+ - name: checking JobHistoryServer
+ fail:
+ msg: "JobHistoryServer节点启动异常,请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
+ when: history_check.stdout != '1'
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: inventory_hostname in groups['yarn'][0:2]
+
+- block:
+ - name: copying yarn worker
+ template:
+ src: 'daemonscript/{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: 0755
+ backup: yes
+ with_items:
+ - { src: 'dae-yarnworker.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-yarnworker.sh' }
+ - { src: 'keepyarnworker.j2', dest: '/etc/init.d/keepyarnworker' }
+
+ - name: Start NodeManager
+ shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_yarn_env.sh {{ item.opeation }}
+ with_items:
+ - { opeation: 'chkconfig' }
+ - { opeation: 'worker' }
+
+ - name: Waiting for the NodeManager start,sleep 60s
+ shell: sleep 60
+
+ - name: checking NodeManager status
+ shell: source /etc/profile && jps -l | grep "org.apache.hadoop.yarn.server.nodemanager.NodeManager" | grep -v grep | wc -l
+ register: datanode_status
+
+ - name: checking NodeManager
+ fail:
+ msg: "NodeManager未启动,请登陆[{{ inventory_hostname }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
+ when: datanode_status.stdout != '1'
+ run_once: true
+ delegate_to: 127.0.0.1
+
+#--------------------------------------------Flink----------------------------------------------#
+- name: Copying Flink installation package
+ unarchive:
+ src: 'files/{{ flink_version }}.tgz'
+ dest: '{{ deploy_dir }}/{{ hadoop_version }}/'
+
+- name: Config flink configuration
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: '{{ item.mode }}'
+ with_items:
+ - { src: 'flink/flink.sh.j2', dest: '/etc/profile.d/flink.sh', mode: '0755' }
+ - { src: 'flink/flink-conf.yaml.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ flink_version }}/conf/flink-conf.yaml', mode: '0644' }
+ - { src: 'standalone/yarn-site.xml.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ flink_version }}/conf/yarn-site.xml', mode: '0644' }
+ - { src: 'standalone/core-site.xml.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ flink_version }}/conf/core-site.xml', mode: '0644' }
+ - { src: 'standalone/hdfs-site.xml.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ flink_version }}/conf/hdfs-site.xml', mode: '0644' }
+
+- name: Start flink session
+ shell: source /etc/profile && cd {{ deploy_dir }}/{{ hadoop_version }}/{{ flink_version }}/bin/ && ./yarn-session.sh -d
diff --git a/Apache Hadoop/2.7.1/yarn/role/tasks/main.yml b/Apache Hadoop/2.7.1/yarn/role/tasks/main.yml
new file mode 100644
index 0000000..dafb261
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/tasks/main.yml
@@ -0,0 +1,12 @@
+- block:
+ - include: uninstall.yml
+ - include: "{{ playbook_name }}"
+ vars:
+ playbook_name: "{{ 'deploy-cluster.yml' if groups.yarn | length > 1 else 'deploy-standalone.yml' }}"
+ - include: status-check.yml
+ when: (operation) == "install"
+
+- block:
+ - include: uninstall.yml
+ when: (operation) == "uninstall"
+
diff --git a/Apache Hadoop/2.7.1/yarn/role/tasks/status-check.yml b/Apache Hadoop/2.7.1/yarn/role/tasks/status-check.yml
new file mode 100644
index 0000000..f0020c7
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/tasks/status-check.yml
@@ -0,0 +1,57 @@
+- name: Setting node_nums variable
+ set_fact: node_nums="{{groups.yarn|length}}"
+
+- name: Waiting for the Yarn start,sleep 30s
+ shell: sleep 30
+
+- block:
+ - name: checking ResourceManager status
+ shell: source /etc/profile && jps -l | grep "org.apache.hadoop.yarn.server.resourcemanager.ResourceManager" | grep -v grep | wc -l
+ register: resourcemanager_check
+
+ - name: checking ResourceManager
+ fail:
+ msg: "ResourceManager节点启动异常,请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
+ when: resourcemanager_check.stdout != '1'
+ run_once: true
+ delegate_to: 127.0.0.1
+
+ - name: checking JobHistoryServer status
+ shell: source /etc/profile && jps -l | grep "org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer" | grep -v grep | wc -l
+ register: history_check
+
+ - name: checking JobHistoryServer
+ fail:
+ msg: "JobHistoryServer节点启动异常,请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
+ when: history_check.stdout != '1'
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: inventory_hostname in groups['yarn'][0:2]
+
+- block:
+ - name: checking NodeManager status
+ shell: source /etc/profile && jps -l | grep "org.apache.hadoop.yarn.server.nodemanager.NodeManager" | grep -v grep | wc -l
+ register: datanode_status
+
+ - name: checking NodeManager
+ fail:
+ msg: "NodeManager未启动,请登陆[{{ inventory_hostname }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
+ when: datanode_status.stdout != '1'
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: node_nums >= cluster_limit and inventory_hostname not in groups['yarn'][0:2]
+
+- block:
+ - name: checking NodeManager status
+ shell: source /etc/profile && jps -l | grep "org.apache.hadoop.yarn.server.nodemanager.NodeManager" | grep -v grep | wc -l
+ register: datanode_status
+
+ - name: checking NodeManager
+ fail:
+ msg: "NodeManager未启动,请登陆[{{ inventory_hostname }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
+ when: datanode_status.stdout != '1'
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: node_nums < cluster_limit
+
+
diff --git a/Apache Hadoop/2.7.1/yarn/role/tasks/uninstall.yml b/Apache Hadoop/2.7.1/yarn/role/tasks/uninstall.yml
new file mode 100644
index 0000000..f36e777
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/tasks/uninstall.yml
@@ -0,0 +1,55 @@
+- block:
+ - name: copy unload_hadoop_yarn.sh to {{ deploy_dir }}/
+ template:
+ src: 'unload_hadoop_yarn.sh.j2'
+ dest: '{{ deploy_dir }}/unload_hadoop_yarn.sh'
+ force: true
+ mode: 0755
+
+ - name: unload hadoop
+ shell: cd {{ deploy_dir }} && sh unload_hadoop_yarn.sh
+
+ - name: Ansible delete {{ deploy_dir }}/unload_hadoop_yarn.sh
+ file:
+ path: "{{ deploy_dir }}/unload_hadoop_yarn.sh"
+ state: absent
+
+ - name: Ansible delete old /etc/profile.d/flink.sh
+ file:
+ path: '/etc/profile.d/flink.sh'
+ state: absent
+
+ - name: Checking ZooKeeper has yarn nodes
+ shell: "docker exec zookeeper zkCli.sh ls / | grep rmstore | wc -l"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ register: rmstore_zknode
+
+ - name: Delete Hadoop nodes in ZooKeeper
+ shell: "docker exec zookeeper zkCli.sh rmr /rmstore"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ when: rmstore_zknode.stdout >= '1'
+
+ - name: Checking ZooKeeper has yarn nodes
+ shell: docker exec zookeeper zkCli.sh ls / | grep "yarn-leader-election" | wc -l
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ register: leader_zknode
+
+ - name: Delete Hadoop nodes in ZooKeeper
+ shell: "docker exec zookeeper zkCli.sh rmr /yarn-leader-election"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ when: leader_zknode.stdout >= '1'
+
+ - name: Check if the Hadoop service already exists
+ shell: source /etc/profile && jps -l | egrep "org.apache.hadoop.yarn.server.resourcemanager.ResourceManager|org.apache.hadoop.yarn.server.nodemanager.NodeManager|org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer" | wc -l
+ register: check_out
+
+ - name: To terminate execution
+ fail:
+ msg: "卸载失败,组件可能非本安装部署,请手动卸载后继续安装"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: check_out.stdout >= '1'
diff --git a/Apache Hadoop/2.7.1/yarn/role/templates/capacity-scheduler.xml.j2 b/Apache Hadoop/2.7.1/yarn/role/templates/capacity-scheduler.xml.j2
new file mode 100644
index 0000000..1e97505
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/templates/capacity-scheduler.xml.j2
@@ -0,0 +1,134 @@
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+ <property>
+ <name>yarn.scheduler.capacity.maximum-applications</name>
+ <value>10000</value>
+ <description>
+ Maximum number of applications that can be pending and running.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
+ <value>0.5</value>
+ <description>
+ Maximum percent of resources in the cluster which can be used to run
+ application masters i.e. controls number of concurrent running
+ applications.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.resource-calculator</name>
+ <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+ <description>
+ The ResourceCalculator implementation to be used to compare
+ Resources in the scheduler.
+ The default i.e. DefaultResourceCalculator only uses Memory while
+ DominantResourceCalculator uses dominant-resource to compare
+ multi-dimensional resources such as Memory, CPU etc.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.queues</name>
+ <value>default</value>
+ <description>
+ The queues at the this level (root is the root queue).
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.default.capacity</name>
+ <value>100</value>
+ <description>Default queue target capacity.</description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
+ <value>1</value>
+ <description>
+ Default queue user limit a percentage from 0.0 to 1.0.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
+ <value>100</value>
+ <description>
+ The maximum capacity of the default queue.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.default.state</name>
+ <value>RUNNING</value>
+ <description>
+ The state of the default queue. State can be one of RUNNING or STOPPED.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
+ <value>*</value>
+ <description>
+ The ACL of who can submit jobs to the default queue.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.default.acl_administer_queue</name>
+ <value>*</value>
+ <description>
+ The ACL of who can administer jobs on the default queue.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.node-locality-delay</name>
+ <value>40</value>
+ <description>
+ Number of missed scheduling opportunities after which the CapacityScheduler
+ attempts to schedule rack-local containers.
+ Typically this should be set to number of nodes in the cluster, By default is setting
+ approximately number of nodes in one rack which is 40.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.queue-mappings</name>
+ <value></value>
+ <description>
+ A list of mappings that will be used to assign jobs to queues
+ The syntax for this list is [u|g]:[name]:[queue_name][,next mapping]*
+ Typically this list will be used to map users to queues,
+ for example, u:%user:%user maps all users to queues with the same name
+ as the user.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.queue-mappings-override.enable</name>
+ <value>false</value>
+ <description>
+ If a queue mapping is present, will it override the value specified
+ by the user? This can be used by administrators to place jobs in queues
+ that are different than the one specified by the user.
+ The default is false.
+ </description>
+ </property>
+
+</configuration>
diff --git a/Apache Hadoop/2.7.1/yarn/role/templates/core-site.xml.j2 b/Apache Hadoop/2.7.1/yarn/role/templates/core-site.xml.j2
new file mode 100644
index 0000000..9b54e41
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/templates/core-site.xml.j2
@@ -0,0 +1,77 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>fs.defaultFS</name>
+ <value>hdfs://ns1</value>
+ </property>
+
+ <property>
+ <name>hadoop.tmp.dir</name>
+ <value>file:{{ hdfs_data_dir }}/tmp</value>
+ </property>
+
+ <property>
+ <name>io.file.buffer.size</name>
+ <value>131702</value>
+ </property>
+
+ <property>
+ <name>hadoop.proxyuser.root.hosts</name>
+ <value>*</value>
+ </property>
+
+ <property>
+ <name>hadoop.proxyuser.root.groups</name>
+ <value>*</value>
+ </property>
+
+ <property>
+ <name>hadoop.logfile.size</name>
+ <value>10000000</value>
+ <description>The max size of each log file</description>
+ </property>
+
+ <property>
+ <name>hadoop.logfile.count</name>
+ <value>1</value>
+ <description>The max number of log files</description>
+ </property>
+
+ <property>
+ <name>ha.zookeeper.quorum</name>
+{% for dev_info in groups.zookeeper -%}
+ {% if loop.last -%}
+{{dev_info}}:2181</value>
+ {% elif loop.first %}
+ <value>{{dev_info}}:2181,
+ {%- else %}
+{{dev_info}}:2181,
+ {%- endif %}
+{%- endfor %}
+ </property>
+
+ <property>
+ <name>ipc.client.connect.timeout</name>
+ <value>90000</value>
+ </property>
+
+</configuration>
+
+
diff --git a/Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/dae-yarnhistory.sh.j2 b/Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/dae-yarnhistory.sh.j2
new file mode 100644
index 0000000..2c3f4f7
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/dae-yarnhistory.sh.j2
@@ -0,0 +1,41 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR={{ deploy_dir }}
+
+VERSION={{ hadoop_version }}
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/$1
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/$1`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/$1
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Yarn $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Yarn $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_HISTORY=`ps -ef | grep "org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer" | grep -v grep | wc -l`
+
+if [ $HAS_HISTORY -eq "0" ];then
+ $BASE_DIR/$VERSION/sbin/mr-jobhistory-daemon.sh start historyserver > /dev/null
+ set_log nmRes_sum JobHistoryServer
+fi
+
+sleep 60
+done
diff --git a/Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/dae-yarnmaster.sh.j2 b/Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/dae-yarnmaster.sh.j2
new file mode 100644
index 0000000..7f7004b
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/dae-yarnmaster.sh.j2
@@ -0,0 +1,41 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR={{ deploy_dir }}
+
+VERSION={{ hadoop_version }}
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/$1
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/$1`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/$1
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Yarn $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Yarn $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_RM=`ps -ef | grep "org.apache.hadoop.yarn.server.resourcemanager.ResourceManager" | grep -v grep | wc -l`
+
+if [ $HAS_RM -eq "0" ];then
+ $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start resourcemanager > /dev/null
+ set_log nmRes_sum ResourceManager
+fi
+
+sleep 60
+done
diff --git a/Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/dae-yarnworker.sh.j2 b/Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/dae-yarnworker.sh.j2
new file mode 100644
index 0000000..38a51e2
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/dae-yarnworker.sh.j2
@@ -0,0 +1,41 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR={{ deploy_dir }}
+
+VERSION={{ hadoop_version }}
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/$1
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/$1`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/$1
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Yarn $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Yarn $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_NM=`ps -ef | grep "org.apache.hadoop.yarn.server.nodemanager.NodeManager" | grep -v grep | wc -l`
+
+if [ $HAS_NM -eq "0" ];then
+ $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null
+ set_log nmRes_sum NodeManager
+fi
+
+sleep 60
+done
diff --git a/Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/keepyarnhistory.j2 b/Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/keepyarnhistory.j2
new file mode 100644
index 0000000..b9f52d8
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/keepyarnhistory.j2
@@ -0,0 +1,46 @@
+#!/bin/bash
+#
+# netconsole This loads the netconsole module with the configured parameters.
+#
+# chkconfig:123456 40 60
+# description: keepyarnhistory
+source /etc/profile
+PRO_NAME=keepyarnhistory
+
+INS_DIR={{ deploy_dir }}
+#版本
+VERSION={{ hadoop_version }}
+
+case $1 in
+start)
+master=`ps -ef | grep "dae-yarnhistory.sh" | grep -v grep | wc -l`
+if [ $master -lt 1 ];then
+nohup $INS_DIR/$VERSION/sbin/dae-yarnhistory.sh > /dev/null 2>&1 &
+fi
+;;
+
+stop)
+HAS_KEEP_SHELL=`ps -ef | grep "dae-yarnhistory.sh" | grep -v grep | awk '{print $2}'`
+if [ $HAS_KEEP_SHELL ];then
+echo "守护进程PID:$HAS_KEEP_SHELL"
+kill -9 $HAS_KEEP_SHELL
+fi
+
+sh $INS_DIR/$VERSION/sbin/mr-jobhistory-daemon.sh stop historyserver > /dev/null
+;;
+
+status)
+HAS_HISTORY=`ps -ef | grep "org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer" | grep -v grep | wc -l`
+
+if [ $HAS_HISTORY -eq "0" ];then
+echo "JobHistoryServer not running!"
+else
+echo "JobHistoryServer is running!"
+fi
+;;
+
+* )
+echo "use keepyarnhistory [start|stop|status]"
+;;
+esac
+
diff --git a/Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/keepyarnmaster.j2 b/Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/keepyarnmaster.j2
new file mode 100644
index 0000000..f04ae63
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/keepyarnmaster.j2
@@ -0,0 +1,40 @@
+#!/bin/bash
+#
+# netconsole This loads the netconsole module with the configured parameters.
+#
+# chkconfig:123456 40 60
+# description: keepyarnmaster
+source /etc/profile
+PRO_NAME=keepyarnmaster
+
+INS_DIR={{ deploy_dir }}
+#版本
+VERSION={{ hadoop_version }}
+
+case $1 in
+start)
+master=`ps -ef | grep "dae-yarnmaster.sh" | grep -v grep | wc -l`
+if [ $master -lt 1 ];then
+nohup $INS_DIR/$VERSION/sbin/dae-yarnmaster.sh > /dev/null 2>&1 &
+fi
+;;
+
+stop)
+HAS_KEEP_SHELL=`ps -ef | grep "dae-yarnmaster.sh" | grep -v grep | awk '{print $2}'`
+if [ $HAS_KEEP_SHELL ];then
+echo "守护进程PID:$HAS_KEEP_SHELL"
+kill -9 $HAS_KEEP_SHELL
+fi
+
+sh $INS_DIR/$VERSION/sbin/yarn-daemon.sh stop resourcemanager > /dev/null
+;;
+
+status)
+yarn rmadmin -getServiceState rsm1
+;;
+
+* )
+echo "use keepyarnmaster [start|stop|status]"
+;;
+esac
+
diff --git a/Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/keepyarnworker.j2 b/Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/keepyarnworker.j2
new file mode 100644
index 0000000..95b11b9
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/templates/daemonscript/keepyarnworker.j2
@@ -0,0 +1,46 @@
+#!/bin/bash
+#
+# netconsole This loads the netconsole module with the configured parameters.
+#
+# chkconfig:123456 40 60
+# description: keepyarnworker
+source /etc/profile
+PRO_NAME=keepyarnworker
+
+INS_DIR={{ deploy_dir }}
+#版本
+VERSION={{ hadoop_version }}
+
+case $1 in
+start)
+master=`ps -ef | grep "dae-yarnworker.sh" | grep -v grep | wc -l`
+if [ $master -lt 1 ];then
+nohup $INS_DIR/$VERSION/sbin/dae-yarnworker.sh > /dev/null 2>&1 &
+fi
+;;
+
+stop)
+HAS_KEEP_SHELL=`ps -ef | grep "dae-yarnworker.sh" | grep -v grep | awk '{print $2}'`
+if [ $HAS_KEEP_SHELL ];then
+echo "守护进程PID:$HAS_KEEP_SHELL"
+kill -9 $HAS_KEEP_SHELL
+fi
+
+sh $INS_DIR/$VERSION/sbin/yarn-daemon.sh stop nodemanager > /dev/null
+;;
+
+status)
+HAS_NM=`ps -ef | grep "org.apache.hadoop.yarn.server.nodemanager.NodeManager" | grep -v grep | wc -l`
+
+if [ $HAS_NM -eq "0" ];then
+echo "NodeManager not running!"
+else
+echo "NodeManager is running!"
+fi
+;;
+
+* )
+echo "use keepyarnworker [start|stop|status]"
+;;
+esac
+
diff --git a/Apache Hadoop/2.7.1/yarn/role/templates/flink/flink-conf.yaml.j2 b/Apache Hadoop/2.7.1/yarn/role/templates/flink/flink-conf.yaml.j2
new file mode 100644
index 0000000..5c26715
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/templates/flink/flink-conf.yaml.j2
@@ -0,0 +1,198 @@
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+
+#==============================================================================
+# Common
+#==============================================================================
+
+# The external address of the host on which the JobManager runs and can be
+# reached by the TaskManagers and any clients which want to connect. This setting
+# is only used in Standalone mode and may be overwritten on the JobManager side
+# by specifying the --host <hostname> parameter of the bin/jobmanager.sh executable.
+# In high availability mode, if you use the bin/start-cluster.sh script and setup
+# the conf/masters file, this will be taken care of automatically. Yarn/Mesos
+# automatically configure the host name based on the hostname of the node where the
+# JobManager runs.
+
+jobmanager.rpc.address: {{ groups.yarn[0] }}
+
+#jobmanager rpc 端口
+jobmanager.rpc.port: 6123
+
+#允许任务在所有taskmanager上均匀分布
+cluster.evenly-spread-out-slots: true
+
+#避免报出metaspace oom而是flink jvm进程挂掉
+classloader.fail-on-metaspace-oom-error: false
+
+#规避第三方库堆栈泄漏问题
+classloader.check-leaked-classloader: false
+
+#避免由于task不能正常取消而使taskmanager服务挂掉
+task.cancellation.timeout: 0
+
+#JobManager进程占用的所有与Flink相关的内存
+jobmanager.memory.process.size: {{ flink['jobmanager.memory.process.size'] }}
+
+#TaskManager进程占用的所有与Flink相关的内存
+taskmanager.memory.process.size: {{ flink['taskmanager.memory.process.size'] }}
+
+#taskmanager使用的堆外内存的大小
+taskmanager.memory.managed.size: 128M
+
+#taskmanager.memory.off-heap默认为false,主要指的是Flink Managed Memory使用Heap还是Non-heap,
+#默认使用Heap,如果开启使用Non-heap将再减少一部分资源
+taskmanager.memory.off-heap: false
+
+#堆外部分(Framework Off-Heap),以直接内存形式分配
+taskmanager.memory.framework.off-heap.size: {{ flink['taskmanager.memory.framework.off-heap.size'] }}
+
+#taskmanager元数据大小 默认256M
+taskmanager.memory.jvm-metaspace.size: {{ flink['taskmanager.memory.jvm-metaspace.size'] }}
+
+#每个排序合并阻塞结果分区所需的最小网络缓冲区数,默认64。对于生产使用,建议将该配置值增加到2048,以提高数据压缩比并减少较小的网络数据包。增加该参数值,需要增加总网络内存大小。
+taskmanager.network.sort-shuffle.min-buffers: 64
+
+#用于读取shuffle数据的内存大小(目前只用于排序合并shuffle)。该内存参数占用framework.off-heap.size内存,默认32M,当更改该参数时,需要增加framework.off-heap.size内存大小。
+taskmanager.memory.framework.off-heap.batch-shuffle.size: 8M
+
+#每个通道可以使用的最大缓冲区数,默认为10。该参数可以通过防止在数据倾斜和配置的浮动缓冲区数量高的情况下缓冲的动态数据的过度增长来加速检查点对齐。
+taskmanager.network.memory.max-buffers-per-channel: 10
+
+# The number of task slots that each TaskManager offers. Each slot runs one parallel pipeline.
+taskmanager.numberOfTaskSlots: {{ flink['taskmanager.numberOfTaskSlots'] }}
+
+# The parallelism used for programs that did not specify and other parallelism.
+parallelism.default: 1
+
+# The default file system scheme and authority.
+#
+# By default file paths without scheme are interpreted relative to the local
+# root file system 'file:///'. Use this to override the default and interpret
+# relative paths relative to a different file system,
+# for example 'hdfs://mynamenode:12345'
+#
+# fs.default-scheme
+
+#==============================================================================
+# NetWork
+#==============================================================================
+
+#网络缓冲区数目,默认为8。帮助缓解由于子分区之间的数据分布不均匀造成的背压。
+taskmanager.network.memory.floating-buffers-per-gate: 8
+
+#输入/输出通道使用的独占网络缓冲区的数量。至少配置2。
+taskmanager.network.memory.buffers-per-channel: 2
+
+#用于TaskManager之间(shuffle、广播等)及与外部组件的数据传输
+#Min
+taskmanager.memory.network.min: 128M
+#Max
+taskmanager.memory.network.max: {{ flink['taskmanager.memory.network.max'] }}
+
+#==============================================================================
+# High Availability
+#==============================================================================
+
+# The high-availability mode. Possible options are 'NONE' or 'zookeeper'.
+#
+# high-availability: zookeeper
+
+# The path where metadata for master recovery is persisted. While ZooKeeper stores
+# the small ground truth for checkpoint and leader election, this location stores
+# the larger objects, like persisted dataflow graphs.
+#
+# Must be a durable file system that is accessible from all nodes
+# (like HDFS, S3, Ceph, nfs, ...)
+#
+# high-availability.storageDir: hdfs:///flink/ha/
+
+# The list of ZooKeeper quorum peers that coordinate the high-availability
+# setup. This must be a list of the form:
+# "host1:clientPort,host2:clientPort,..." (default clientPort: 2181)
+{% if groups.yarn | length > 1 %}
+state.checkpoints.dir: hdfs:///flink/checkpoint/
+{% elif groups.yarn | length == 1 %}
+state.checkpoints.dir: file://{{ deploy_dir }}/{{ hadoop_version }}/{{ flink_version }}/checkpoint
+{% endif %}
+
+heartbeat.timeout: 180000
+
+heartbeat.interval: 20000
+
+akka.ask.timeout: 300 s
+
+# ACL options are based on https://zookeeper.apache.org/doc/r3.1.2/zookeeperProgrammers.html#sc_BuiltinACLSchemes
+# It can be either "creator" (ZOO_CREATE_ALL_ACL) or "open" (ZOO_OPEN_ACL_UNSAFE)
+# The default value is "open" and it can be changed to "creator" if ZK security is enabled
+#
+# high-availability.zookeeper.client.acl: open
+
+# The failover strategy, i.e., how the job computation recovers from task failures.
+# Only restart tasks that may have been affected by the task failure, which typically includes
+# downstream tasks and potentially upstream tasks if their produced data is no longer available for consumption.
+jobmanager.execution.failover-strategy: region
+
+restart-strategy: fixed-delay
+
+restart-strategy.fixed-delay.attempts: 2147483647
+
+yarn.application-attempts: 10000
+
+restart-strategy.fixed-delay.delay: 5 s
+
+web.submit.enable: false
+
+#==============================================================================
+# Advanced
+#==============================================================================
+
+# Override the directories for temporary files. If not specified, the
+# system-specific Java temporary directory (java.io.tmpdir property) is taken.
+#
+# For framework setups on Yarn or Mesos, Flink will automatically pick up the
+# containers' temp directories without any need for configuration.
+#
+# Add a delimited list for multiple directories, using the system directory
+# delimiter (colon ':' on unix) or a comma, e.g.:
+# /data1/tmp:/data2/tmp:/data3/tmp
+#
+# Note: Each directory entry is read from and written to by a different I/O
+# thread. You can include the same directory multiple times in order to create
+# multiple I/O threads against that directory. This is for example relevant for
+# high-throughput RAIDs.
+#
+# io.tmp.dirs: /tmp
+
+# The classloading resolve order. Possible values are 'child-first' (Flink's default)
+# and 'parent-first' (Java's default).
+#
+# Child first classloading allows users to use different dependency/library
+# versions in their application than those in the classpath. Switching back
+# to 'parent-first' may help with debugging dependency issues.
+#
+# classloader.resolve-order: child-first
+classloader.resolve-order: parent-first
+
+metrics.reporter.promgateway.class: org.apache.flink.metrics.prometheus.PrometheusPushGatewayReporter
+metrics.reporter.promgateway.randomJobNameSuffix: true
+metrics.reporter.promgateway.deleteOnShutdown: true
+metrics.reporter.promgateway.interval: 10 SECONDS
+metrics.reporter.promgateway.host: 127.0.0.1
+metrics.reporter.promgateway.port: 9091
diff --git a/Apache Hadoop/2.7.1/yarn/role/templates/flink/flink.sh.j2 b/Apache Hadoop/2.7.1/yarn/role/templates/flink/flink.sh.j2
new file mode 100644
index 0000000..d38b0d4
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/templates/flink/flink.sh.j2
@@ -0,0 +1,4 @@
+
+#flink
+export FLINK_HOME={{ deploy_dir }}/{{ hadoop_version }}/{{ flink_version }}
+export PATH=$FLINK_HOME/bin:$PATH
diff --git a/Apache Hadoop/2.7.1/yarn/role/templates/hadoop-env.sh.j2 b/Apache Hadoop/2.7.1/yarn/role/templates/hadoop-env.sh.j2
new file mode 100644
index 0000000..6c18711
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/templates/hadoop-env.sh.j2
@@ -0,0 +1,105 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME. All others are
+# optional. When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+export HADOOP_NAMENODE_JMX_OPTS="-Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false -javaagent:{{ deploy_dir }}/{{ hadoop_version }}/monitor/jmx_prometheus_javaagent-0.12.0.jar=9905:{{ deploy_dir }}/{{ hadoop_version }}/monitor/hdfs.yaml"
+export HADOOP_DATANODE_JMX_OPTS="-Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false -javaagent:{{ deploy_dir }}/{{ hadoop_version }}/monitor/jmx_prometheus_javaagent-0.12.0.jar=9906:{{ deploy_dir }}/{{ hadoop_version }}/monitor/hdfs.yaml"
+
+# The java implementation to use.
+#export HADOOP_HEAPSIZE=m
+#export JAVA_HOME=/usr/local/jdk/jdk1.8.0_73
+export JAVA_HOME=$JAVA_HOME
+# The jsvc implementation to use. Jsvc is required to run secure datanodes
+# that bind to privileged ports to provide authentication of data transfer
+# protocol. Jsvc is not required if SASL is configured for authentication of
+# data transfer protocol using non-privileged ports.
+#export JSVC_HOME=${JSVC_HOME}
+
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
+
+# Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
+for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
+ if [ "$HADOOP_CLASSPATH" ]; then
+ export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
+ else
+ export HADOOP_CLASSPATH=$f
+ fi
+done
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+#export HADOOP_HEAPSIZE=
+#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+# Extra Java runtime options. Empty by default.
+export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="$HADOOP_NAMENODE_OPTS {{ hadoop.namenode.java_opt }} -Xss256k -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:SurvivorRatio=2 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:MaxTenuringThreshold=15 -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=1 -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:-DisableExplicitGC -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{ deploy_dir }}/{{ hadoop_version }}/logs/gc-namenode-%t.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=100M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath={{ deploy_dir }}/{{ hadoop_version }}/logs/ -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender}"
+
+export HADOOP_DATANODE_OPTS="$HADOOP_DATANODE_OPTS {{ hadoop.datanode.java_opt }} -Xss256k -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:SurvivorRatio=2 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:MaxTenuringThreshold=15 -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=1 -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{ deploy_dir }}/{{ hadoop_version }}/logs/gc-datanode-%t.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=100M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath={{ deploy_dir }}/{{ hadoop_version }}/logs/ -Dhadoop.security.logger=ERROR,RFAS"
+
+export HADOOP_JOURNALNODE_OPTS="$HADOOP_JOURNALNODE_OPTS {{ hadoop.journalnode.java_opt }}"
+
+export HADOOP_ZKFC_OPTS="$HADOOP_ZKFC_OPTS {{ hadoop.zkfc.java_opt }}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
+
+export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
+export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
+#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
+
+# On secure datanodes, user to run the datanode as after dropping privileges.
+# This **MUST** be uncommented to enable secure HDFS if using privileged ports
+# to provide authentication of data transfer protocol. This **MUST NOT** be
+# defined if SASL is configured for authentication of data transfer protocol
+# using non-privileged ports.
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
+
+# Where log files are stored. $HADOOP_HOME/logs by default.
+#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
+
+###
+# HDFS Mover specific parameters
+###
+# Specify the JVM options to be used when starting the HDFS Mover.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_MOVER_OPTS=""
+
+###
+# Advanced Users Only!
+###
+
+# The directory where pid files are stored. /tmp by default.
+# NOTE: this should be set to a directory that can only be written to by
+# the user that will run the hadoop daemons. Otherwise there is the
+# potential for a symlink attack.
+export HADOOP_PID_DIR={{ deploy_dir }}/{{ hadoop_version }}/pids
+export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
diff --git a/Apache Hadoop/2.7.1/yarn/role/templates/hdfs-site.xml.j2 b/Apache Hadoop/2.7.1/yarn/role/templates/hdfs-site.xml.j2
new file mode 100644
index 0000000..28a9b32
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/templates/hdfs-site.xml.j2
@@ -0,0 +1,142 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>dfs.namenode.name.dir</name>
+ <value>file:{{ hdfs_data_dir }}/dfs/name</value>
+ </property>
+ <property>
+ <name>dfs.datanode.data.dir</name>
+ <value>file:{{ hdfs_data_dir }}/dfs/data</value>
+ </property>
+ <property>
+ <name>dfs.replication</name>
+ <value>2</value>
+ </property>
+ <property>
+ <name>dfs.webhdfs.enabled</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>dfs.permissions</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>dfs.permissions.enabled</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>dfs.nameservices</name>
+ <value>ns1</value>
+ </property>
+ <property>
+ <name>dfs.blocksize</name>
+ <value>134217728</value>
+ </property>
+ <property>
+ <name>dfs.ha.namenodes.ns1</name>
+ <value>nn1,nn2</value>
+ </property>
+ <!-- nn1的RPC通信地址,nn1所在地址 -->
+ <property>
+ <name>dfs.namenode.rpc-address.ns1.nn1</name>
+ <value>{{ groups.hdfs[0] }}:9000</value>
+ </property>
+ <!-- nn1的http通信地址,外部访问地址 -->
+ <property>
+ <name>dfs.namenode.http-address.ns1.nn1</name>
+ <value>{{ groups.hdfs[0] }}:50070</value>
+ </property>
+ <!-- nn2的RPC通信地址,nn2所在地址 -->
+ <property>
+ <name>dfs.namenode.rpc-address.ns1.nn2</name>
+ <value>{{ groups.hdfs[1] }}:9000</value>
+ </property>
+ <!-- nn2的http通信地址,外部访问地址 -->
+ <property>
+ <name>dfs.namenode.http-address.ns1.nn2</name>
+ <value>{{ groups.hdfs[1] }}:50070</value>
+ </property>
+ <!-- 指定NameNode的元数据在JournalNode日志上的存放位置(一般和zookeeper部署在一起) -->
+ <property>
+ <name>dfs.namenode.shared.edits.dir</name>
+ <value>qjournal://{{groups.hdfs[0]}}:8485;{{groups.hdfs[1]}}:8485;{{groups.hdfs[2]}}:8485/ns1</value>
+ </property>
+ <!-- 指定JournalNode在本地磁盘存放数据的位置 -->
+ <property>
+ <name>dfs.journalnode.edits.dir</name>
+ <value>{{ hdfs_data_dir }}/journal</value>
+ </property>
+ <!--客户端通过代理访问namenode,访问文件系统,HDFS 客户端与Active 节点通信的Java 类,使用其确定Active 节点是否活跃 -->
+ <property>
+ <name>dfs.client.failover.proxy.provider.ns1</name>
+ <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
+ </property>
+ <!--这是配置自动切换的方法,有多种使用方法,具体可以看官网,在文末会给地址,这里是远程登录杀死的方法 -->
+ <property>
+ <name>dfs.ha.fencing.methods</name>
+ <value>sshfence</value>
+ <value>shell(true)</value>
+ </property>
+ <!-- 这个是使用sshfence隔离机制时才需要配置ssh免登陆 -->
+ <property>
+ <name>dfs.ha.fencing.ssh.private-key-files</name>
+ <value>/root/.ssh/id_rsa</value>
+ </property>
+ <!-- 配置sshfence隔离机制超时时间,这个属性同上,如果你是用脚本的方法切换,这个应该是可以不配置的 -->
+ <property>
+ <name>dfs.ha.fencing.ssh.connect-timeout</name>
+ <value>30000</value>
+ </property>
+ <!-- 这个是开启自动故障转移,如果你没有自动故障转移,这个可以先不配 -->
+ <property>
+ <name>dfs.ha.automatic-failover.enabled</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>dfs.datanode.max.transfer.threads</name>
+ <value>8192</value>
+ </property>
+ <!-- namenode处理RPC请求线程数,增大该值资源占用不大 -->
+ <property>
+ <name>dfs.namenode.handler.count</name>
+ <value>{{ hadoop.namenode['dfs.namenode.handler.count'] }}</value>
+ </property>
+ <!-- datanode处理RPC请求线程数,增大该值会占用更多内存 -->
+ <property>
+ <name>dfs.datanode.handler.count</name>
+ <value>{{ hadoop.datanode['dfs.datanode.handler.count'] }}</value>
+ </property>
+ <!-- balance时可占用的带宽 -->
+ <property>
+ <name>dfs.balance.bandwidthPerSec</name>
+ <value>104857600</value>
+ </property>
+ <!-- 磁盘预留空间,该空间不会被hdfs占用,单位字节-->
+ <property>
+ <name>dfs.datanode.du.reserved</name>
+ <value>53687091200</value>
+ </property>
+ <!-- datanode与namenode连接超时时间,单位毫秒 2 * heartbeat.recheck.interval + 30000 -->
+ <property>
+ <name>heartbeat.recheck.interval</name>
+ <value>100000</value>
+ </property>
+</configuration>
+
diff --git a/Apache Hadoop/2.7.1/yarn/role/templates/mapred-site.xml.j2 b/Apache Hadoop/2.7.1/yarn/role/templates/mapred-site.xml.j2
new file mode 100644
index 0000000..5922c15
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/templates/mapred-site.xml.j2
@@ -0,0 +1,33 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>mapreduce.framework.name</name>
+ <value>yarn</value>
+ </property>
+ <property>
+ <name>mapreduce.jobhistory.address</name>
+ <value>{{ groups.yarn[0] }}:10020</value>
+ </property>
+ <property>
+ <name>mapreduce.jobhistory.webapp.address</name>
+ <value>{{ groups.yarn[0] }}:19888</value>
+ </property>
+</configuration>
+
diff --git a/Apache Hadoop/2.7.1/yarn/role/templates/set_yarn_env.sh.j2 b/Apache Hadoop/2.7.1/yarn/role/templates/set_yarn_env.sh.j2
new file mode 100644
index 0000000..70972ec
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/templates/set_yarn_env.sh.j2
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+source /etc/profile
+
+function setChkconfig(){
+echo -e "\n#hadoop\nexport HADOOP_HOME={{ deploy_dir }}/{{ hadoop_version }}\nexport PATH=\$HADOOP_HOME/sbin:\$PATH\nexport PATH=\$HADOOP_HOME/bin:\$PATH\nexport HADOOP_CLASSPATH=\`hadoop classpath\`" >> /etc/profile.d/hadoop.sh
+chmod +x /etc/profile.d/hadoop.sh
+
+if [ -x '/etc/init.d/keepyarnhistory' ];then
+ chkconfig --add keepyarnhistory
+ chkconfig keepyarnhistory on
+fi
+
+if [ -x '/etc/init.d/keepyarnmaster' ];then
+ chkconfig --add keepyarnmaster
+ chkconfig keepyarnmaster on
+fi
+
+if [ -x '/etc/init.d/keepyarnworker' ];then
+ chkconfig --add keepyarnworker
+ chkconfig keepyarnworker on
+fi
+}
+
+case $1 in
+history)
+if [ -x '/etc/init.d/keepyarnhistory' ];then
+ service keepyarnhistory start && sleep 5
+ history_dae=`ps -ef | grep "dae-yarnhistory.sh" | grep -v grep | wc -l`
+ if [ $history_dae -lt 1 ];then
+ nohup {{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-yarnhistory.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+master)
+if [ -x '/etc/init.d/keepyarnmaster' ];then
+ service keepyarnmaster start && sleep 5
+ master_dae=`ps -ef | grep "dae-yarnmaster.sh" | grep -v grep | wc -l`
+ if [ $master_dae -lt 1 ];then
+ nohup {{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-yarnmaster.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+worker)
+if [ -x '/etc/init.d/keepyarnworker' ];then
+ service keepyarnworker start && sleep 5
+ worker_dae=`ps -ef | grep dae-yarnworker.sh | grep -v grep | wc -l`
+ if [ $worker_dae -lt 1 ];then
+ nohup {{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-yarnworker.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+chkconfig)
+ setChkconfig;;
+* )
+;;
+esac
+
diff --git a/Apache Hadoop/2.7.1/yarn/role/templates/slaves.j2 b/Apache Hadoop/2.7.1/yarn/role/templates/slaves.j2
new file mode 100644
index 0000000..911e41c
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/templates/slaves.j2
@@ -0,0 +1,4 @@
+{% set combined_group = groups.yarn | union(groups.hdfs) %}
+{% for dev_info in combined_group %}
+{{dev_info}}
+{% endfor %}
diff --git a/Apache Hadoop/2.7.1/yarn/role/templates/standalone/core-site.xml.j2 b/Apache Hadoop/2.7.1/yarn/role/templates/standalone/core-site.xml.j2
new file mode 100644
index 0000000..ccbfe45
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/templates/standalone/core-site.xml.j2
@@ -0,0 +1,65 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+ <property>
+ <name>hadoop.tmp.dir</name>
+ <value>file:{{ hdfs_data_dir }}/tmp</value>
+ </property>
+
+ <property>
+ <name>io.file.buffer.size</name>
+ <value>131702</value>
+ </property>
+
+ <property>
+ <name>hadoop.proxyuser.root.hosts</name>
+ <value>*</value>
+ </property>
+
+ <property>
+ <name>hadoop.proxyuser.root.groups</name>
+ <value>*</value>
+ </property>
+
+ <property>
+ <name>hadoop.logfile.size</name>
+ <value>10000000</value>
+ <description>The max size of each log file</description>
+ </property>
+
+ <property>
+ <name>hadoop.logfile.count</name>
+ <value>1</value>
+ <description>The max number of log files</description>
+ </property>
+
+ <property>
+ <name>ha.zookeeper.quorum</name>
+ <value>{{inventory_hostname}}:2181</value>
+ </property>
+
+ <property>
+ <name>ipc.client.connect.timeout</name>
+ <value>90000</value>
+ </property>
+
+</configuration>
+
+
diff --git a/Apache Hadoop/2.7.1/yarn/role/templates/standalone/hdfs-site.xml.j2 b/Apache Hadoop/2.7.1/yarn/role/templates/standalone/hdfs-site.xml.j2
new file mode 100644
index 0000000..833e624
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/templates/standalone/hdfs-site.xml.j2
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+</configuration>
+
diff --git a/Apache Hadoop/2.7.1/yarn/role/templates/standalone/yarn-site.xml.j2 b/Apache Hadoop/2.7.1/yarn/role/templates/standalone/yarn-site.xml.j2
new file mode 100644
index 0000000..57b079e
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/templates/standalone/yarn-site.xml.j2
@@ -0,0 +1,183 @@
+<?xml version="1.0"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+ <property>
+ <name>yarn.nodemanager.aux-services</name>
+ <value>mapreduce_shuffle</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.enabled</name>
+ <value>false</value>
+ </property>
+
+ <!--声明两台resourcemanager的地址-->
+ <property>
+ <name>yarn.resourcemanager.cluster-id</name>
+ <value>rsmcluster</value>
+ </property>
+
+ <!-- 配置rm1-->
+ <!-- 配置rm1 hostname-->
+ <property>
+ <name>yarn.resourcemanager.hostname</name>
+ <value>{{ groups.yarn[0] }}</value>
+ </property>
+
+ <!-- 配置rm1 web application-->
+ <property>
+ <name>yarn.resourcemanager.webapp.address</name>
+ <value>{{ groups.yarn[0] }}:8080</value>
+ </property>
+
+ <!-- 配置rm1 调度端口,默认8030-->
+ <property>
+ <name>yarn.resourcemanager.scheduler.address</name>
+ <value>{{ groups.yarn[0] }}:8030</value>
+ </property>
+
+ <!-- 默认端口8031-->
+ <property>
+ <name>yarn.resourcemanager.resource-tracker.address</name>
+ <value>{{ groups.yarn[0] }}:8031</value>
+ </property>
+
+ <!-- 配置rm1 应用程序管理器接口的地址端口,默认8032-->
+ <property>
+ <name>yarn.resourcemanager.address</name>
+ <value>{{ groups.yarn[0] }}:8032</value>
+ </property>
+
+ <!-- 配置rm1 管理端口,默认8033-->
+ <property>
+ <name>yarn.resourcemanager.admin.address</name>
+ <value>{{ groups.yarn[0] }}:8033</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.admin.address</name>
+ <value>{{ groups.yarn[0] }}:23142</value>
+ </property>
+
+ <!--指定zookeeper集群的地址-->
+ <property>
+ <name>yarn.resourcemanager.zk-address</name>
+ <value>{{inventory_hostname}}:2181</value>
+ </property>
+
+ <!--启用自动恢复,当任务进行一半,rm坏掉,就要启动自动恢复,默认是false-->
+ <property>
+ <name>yarn.resourcemanager.recovery.enabled</name>
+ <value>true</value>
+ </property>
+
+ <!--启用Nodemanager自动恢复,默认是false-->
+ <property>
+ <name>yarn.nodemanager.recovery.enabled</name>
+ <value>true</value>
+ </property>
+
+ <!--配置NodeManager保存运行状态的本地文件系统目录路径 -->
+ <property>
+ <name>yarn.nodemanager.recovery.dir</name>
+ <value>{{ deploy_dir }}/{{ hadoop_version }}/yarn</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.store.class</name>
+ <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
+ </property>
+
+ <!--配置nm可用的RPC地址,默认${yarn.nodemanager.hostname}:0,为临时端口。集群重启后,nm与rm连接的端口会变化,这里指定端口,保障nm restart功能 -->
+ <property>
+ <name>yarn.nodemanager.address</name>
+ <value>${yarn.nodemanager.hostname}:9923</value>
+ </property>
+
+ <property>
+ <name>yarn.log-aggregation-enable</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
+ <value>3600</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.remote-app-log-dir</name>
+ <value>{{ deploy_dir }}/{{ hadoop_version }}/logs/app-logs/</value>
+ </property>
+
+ <!--NM可以为容器分配的物理内存量,以MB为单位 ,默认8192-->
+ <property>
+ <name>yarn.nodemanager.resource.memory-mb</name>
+ <value>{{ hadoop.yarn.nodemanager['yarn.nodemanager.resource.memory-mb'] }}</value>
+ </property>
+
+ <!-- RM上每个容器请求的最小分配,以mb为单位,默认1024-->
+ <property>
+ <name>yarn.scheduler.minimum-allocation-mb</name>
+ <value>1024</value>
+ </property>
+
+ <!-- RM上每个容器请求的最大分配,以mb为单位,一般设置为 yarn.nodemanager.resource.memory-mb 一致,默认8192-->
+ <property>
+ <name>yarn.scheduler.maximum-allocation-mb</name>
+ <value>{{ hadoop.yarn.nodemanager['yarn.scheduler.maximum-allocation-mb'] }}</value>
+ </property>
+
+ <!--可为容器分配的vcore数。RM调度器在为容器分配资源时使用它。这不是用来限制YARN容器使用的物理内核的数量,默认8,一般配置为服务器cpu总核数一致 -->
+ <property>
+ <name>yarn.nodemanager.resource.cpu-vcores</name>
+ <value>{{ hadoop.yarn.nodemanager['yarn.nodemanager.resource.cpu-vcores'] }}</value>
+ </property>
+
+ <!--RM上每个容器请求的最小分配(以虚拟CPU内核为单位) ,默认1-->
+ <property>
+ <name>yarn.scheduler.minimum-allocation-vcores</name>
+ <value>1</value>
+ </property>
+
+ <!--RM上每个容器请求的最大分配(以虚拟CPU内核为单位) ,默认32,一般配置为略小于yarn.nodemanager.resource.cpu-vcores,同时指定任务的slot不应超过该值-->
+ <property>
+ <name>yarn.scheduler.maximum-allocation-vcores</name>
+ <value>{{ hadoop.yarn.nodemanager['yarn.scheduler.maximum-allocation-vcores'] }}</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.vmem-check-enabled</name>
+ <value>false</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.pmem-check-enabled</name>
+ <value>false</value>
+ </property>
+
+ <!--ApplicationMaster重启次数,配置HA后默认为2,生产环境可增大该值-->
+ <property>
+ <name>yarn.resourcemanager.am.max-attempts</name>
+ <value>10000</value>
+ </property>
+
+ <property>
+ <name>yarn.log.server.url</name>
+ <value>http://{{ groups.yarn[0] }}:19888/jobhistory/logs</value>
+ </property>
+
+</configuration>
+
diff --git a/Apache Hadoop/2.7.1/yarn/role/templates/unload_hadoop_yarn.sh.j2 b/Apache Hadoop/2.7.1/yarn/role/templates/unload_hadoop_yarn.sh.j2
new file mode 100644
index 0000000..1491774
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/templates/unload_hadoop_yarn.sh.j2
@@ -0,0 +1,79 @@
+#!/bin/bash
+
+source /etc/profile
+
+function killService(){
+keeppath='/etc/init.d/keepyarnhistory'
+if [ -x $keeppath ];then
+service keepyarnhistory stop
+chkconfig keepyarnhistory off
+systemctl daemon-reload
+rm -rf /etc/init.d/keepyarnhistory
+fi
+
+keeppath='/etc/init.d/keepyarnmaster'
+if [ -x $keeppath ];then
+service keepyarnmaster stop
+chkconfig keepyarnmaster off
+systemctl daemon-reload
+rm -rf /etc/init.d/keepyarnmaster
+fi
+
+keeppath='/etc/init.d/keepyarnworker'
+if [ -x $keeppath ];then
+service keepyarnworker stop
+chkconfig keepyarnworker off
+systemctl daemon-reload
+rm -rf /etc/init.d/keepyarnworker
+fi
+}
+
+function killPid(){
+livenum=`jps -l | egrep -w "org.apache.hadoop.yarn.server.resourcemanager.ResourceManager" | grep -v grep |wc -l`
+if [ $livenum -ne 0 ];then
+keeppid=`jps -l |egrep -w "org.apache.hadoop.yarn.server.resourcemanager.ResourceManager" | awk '{print $1}'`
+kill -9 $keeppid
+fi
+
+livenum=`jps -l | egrep -w "org.apache.hadoop.yarn.server.nodemanager.NodeManager" | grep -v grep |wc -l`
+if [ $livenum -ne 0 ];then
+keeppid=`jps -l |egrep -w "org.apache.hadoop.yarn.server.nodemanager.NodeManager" | awk '{print $1}'`
+kill -9 $keeppid
+fi
+
+livenum=`jps -l | egrep -w "org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer" | grep -v grep |wc -l`
+if [ $livenum -ne 0 ];then
+keeppid=`jps -l |egrep -w "org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer" | awk '{print $1}'`
+kill -9 $keeppid
+fi
+}
+
+function drop_folder(){
+FOLDER_NAME=$1
+
+if [ -d "$FOLDER_NAME" ];then
+ rm -rf $FOLDER_NAME
+fi
+}
+
+function drop_file(){
+FILE_NAME=$1
+
+if [ -f "$FILE_NAME" ];then
+ rm -rf $FILE_NAME
+fi
+}
+
+killService
+sleep 15
+killPid
+
+HAS_HDFS=`jps -l | egrep "org.apache.hadoop.hdfs.qjournal.server.JournalNode|org.apache.hadoop.hdfs.tools.DFSZKFailoverController|org.apache.hadoop.hdfs.server.datanode.DataNode|org.apache.hadoop.hdfs.server.namenode.NameNode" | wc -l`
+if [ $HAS_HDFS -eq "0" ];then
+
+drop_folder {{ deploy_dir }}/{{ hadoop_version }}
+drop_folder {{ deploy_dir }}/hadoop
+drop_folder {{ data_dir }}/hadoop
+drop_file /etc/profile.d/hadoop.sh
+
+fi
diff --git a/Apache Hadoop/2.7.1/yarn/role/templates/yarn-env.sh.j2 b/Apache Hadoop/2.7.1/yarn/role/templates/yarn-env.sh.j2
new file mode 100644
index 0000000..39b00e8
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/templates/yarn-env.sh.j2
@@ -0,0 +1,127 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+export YARN_RESOURCEMANAGER_JMX_OPTS="-Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false -javaagent:{{ deploy_dir }}/{{ hadoop_version }}/monitor/jmx_prometheus_javaagent-0.12.0.jar=9909:{{ deploy_dir }}/{{ hadoop_version }}/monitor/yarn.yaml"
+
+export YARN_NODEMANAGER_JMX_OPTS="-Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false -javaagent:{{ deploy_dir }}/{{ hadoop_version }}/monitor/jmx_prometheus_javaagent-0.12.0.jar=9910:{{ deploy_dir }}/{{ hadoop_version }}/monitor/yarn.yaml"
+
+# User for YARN daemons
+export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+
+# resolve links - $0 may be a softlink
+export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
+
+# some Java parameters
+export JAVA_HOME=$JAVA_HOME
+if [ "$JAVA_HOME" != "" ]; then
+ #echo "run java in $JAVA_HOME"
+ JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+ echo "Error: JAVA_HOME is not set."
+ exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# For setting YARN specific HEAP sizes please use this
+# Parameter and set appropriately
+# YARN_HEAPSIZE=1000
+
+# check envvars which might override default args
+if [ "$YARN_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+fi
+
+# Resource Manager specific parameters
+
+# Specify the max Heapsize for the ResourceManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_RESOURCEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+#export YARN_RESOURCEMANAGER_HEAPSIZE=1000
+export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS {{ hadoop.yarn.resourcemanager.java_opt }}"
+
+# Specify the max Heapsize for the timeline server using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_TIMELINESERVER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+#export YARN_TIMELINESERVER_HEAPSIZE=1000
+
+# Specify the JVM options to be used when starting the ResourceManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_RESOURCEMANAGER_OPTS=
+
+# Node Manager specific parameters
+
+# Specify the max Heapsize for the NodeManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_NODEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+#export YARN_NODEMANAGER_HEAPSIZE=1000
+
+export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS {{ hadoop.yarn.nodemanager.java_opt }}"
+
+# Specify the JVM options to be used when starting the NodeManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_NODEMANAGER_OPTS=
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+
+# default log directory & file
+if [ "$YARN_LOG_DIR" = "" ]; then
+ YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+if [ "$YARN_LOGFILE" = "" ]; then
+ YARN_LOGFILE='yarn.log'
+fi
+
+# default policy file for service-level authorization
+if [ "$YARN_POLICYFILE" = "" ]; then
+ YARN_POLICYFILE="hadoop-policy.xml"
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
+YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+ YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
diff --git a/Apache Hadoop/2.7.1/yarn/role/templates/yarn-site.xml.j2 b/Apache Hadoop/2.7.1/yarn/role/templates/yarn-site.xml.j2
new file mode 100644
index 0000000..b77d3dd
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/templates/yarn-site.xml.j2
@@ -0,0 +1,232 @@
+<?xml version="1.0"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+ <property>
+ <name>yarn.nodemanager.aux-services</name>
+ <value>mapreduce_shuffle</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.enabled</name>
+ <value>true</value>
+ </property>
+
+ <!--声明两台resourcemanager的地址-->
+ <property>
+ <name>yarn.resourcemanager.cluster-id</name>
+ <value>rsmcluster</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.rm-ids</name>
+ <value>rsm1,rsm2</value>
+ </property>
+
+ <!-- 配置rm1-->
+ <!-- 配置rm1 hostname-->
+ <property>
+ <name>yarn.resourcemanager.hostname.rsm1</name>
+ <value>{{ groups.yarn[0] }}</value>
+ </property>
+
+ <!-- 配置rm1 web application-->
+ <property>
+ <name>yarn.resourcemanager.webapp.address.rsm1</name>
+ <value>{{ groups.yarn[0] }}:8080</value>
+ </property>
+
+ <!-- 配置rm1 调度端口,默认8030-->
+ <property>
+ <name>yarn.resourcemanager.scheduler.address.rsm1</name>
+ <value>{{ groups.yarn[0] }}:8030</value>
+ </property>
+
+ <!-- 默认端口8031-->
+ <property>
+ <name>yarn.resourcemanager.resource-tracker.address.rsm1</name>
+ <value>{{ groups.yarn[0] }}:8031</value>
+ </property>
+
+ <!-- 配置rm1 应用程序管理器接口的地址端口,默认8032-->
+ <property>
+ <name>yarn.resourcemanager.address.rsm1</name>
+ <value>{{ groups.yarn[0] }}:8032</value>
+ </property>
+
+ <!-- 配置rm1 管理端口,默认8033-->
+ <property>
+ <name>yarn.resourcemanager.admin.address.rsm1</name>
+ <value>{{ groups.yarn[0] }}:8033</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.admin.address.rsm1</name>
+ <value>{{ groups.yarn[0] }}:23142</value>
+ </property>
+
+ <!-- 配置rm2-->
+ <property>
+ <name>yarn.resourcemanager.hostname.rsm2</name>
+ <value>{{ groups.yarn[1] }}</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.webapp.address.rsm2</name>
+ <value>{{ groups.yarn[1] }}:8080</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.scheduler.address.rsm2</name>
+ <value>{{ groups.yarn[1] }}:8030</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.resource-tracker.address.rsm2</name>
+ <value>{{ groups.yarn[1] }}:8031</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.address.rsm2</name>
+ <value>{{ groups.yarn[1] }}:8032</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.admin.address.rsm2</name>
+ <value>{{ groups.yarn[1] }}:8033</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.admin.address.rsm2</name>
+ <value>{{ groups.yarn[1] }}:23142</value>
+ </property>
+
+ <!--指定zookeeper集群的地址-->
+ <property>
+ <name>yarn.resourcemanager.zk-address</name>
+{% for dev_info in groups.zookeeper -%}
+ {% if loop.last -%}
+{{dev_info}}:2181</value>
+ {% elif loop.first %}
+ <value>{{dev_info}}:2181,
+ {%- else %}
+{{dev_info}}:2181,
+ {%- endif %}
+{%- endfor %}
+ </property>
+
+ <!--启用自动恢复,当任务进行一半,rm坏掉,就要启动自动恢复,默认是false-->
+ <property>
+ <name>yarn.resourcemanager.recovery.enabled</name>
+ <value>true</value>
+ </property>
+
+ <!--启用Nodemanager自动恢复,默认是false-->
+ <property>
+ <name>yarn.nodemanager.recovery.enabled</name>
+ <value>true</value>
+ </property>
+
+ <!--配置NodeManager保存运行状态的本地文件系统目录路径 -->
+ <property>
+ <name>yarn.nodemanager.recovery.dir</name>
+ <value>{{ deploy_dir }}/{{ hadoop_version }}/yarn</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.store.class</name>
+ <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
+ </property>
+
+ <!--配置nm可用的RPC地址,默认${yarn.nodemanager.hostname}:0,为临时端口。集群重启后,nm与rm连接的端口会变化,这里指定端口,保障nm restart功能 -->
+ <property>
+ <name>yarn.nodemanager.address</name>
+ <value>${yarn.nodemanager.hostname}:9923</value>
+ </property>
+
+ <property>
+ <name>yarn.log-aggregation-enable</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
+ <value>3600</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.remote-app-log-dir</name>
+ <value>{{ deploy_dir }}/{{ hadoop_version }}/logs/app-logs/</value>
+ </property>
+
+ <!--NM可以为容器分配的物理内存量,以MB为单位 ,默认8192-->
+ <property>
+ <name>yarn.nodemanager.resource.memory-mb</name>
+ <value>{{ hadoop.yarn.nodemanager['yarn.nodemanager.resource.memory-mb'] }}</value>
+ </property>
+
+ <!-- RM上每个容器请求的最小分配,以mb为单位,默认1024-->
+ <property>
+ <name>yarn.scheduler.minimum-allocation-mb</name>
+ <value>1024</value>
+ </property>
+
+ <!-- RM上每个容器请求的最大分配,以mb为单位,一般设置为 yarn.nodemanager.resource.memory-mb 一致,默认8192-->
+ <property>
+ <name>yarn.scheduler.maximum-allocation-mb</name>
+ <value>{{ hadoop.yarn.nodemanager['yarn.scheduler.maximum-allocation-mb'] }}</value>
+ </property>
+
+ <!--可为容器分配的vcore数。RM调度器在为容器分配资源时使用它。这不是用来限制YARN容器使用的物理内核的数量,默认8,一般配置为服务器cpu总核数一致 -->
+ <property>
+ <name>yarn.nodemanager.resource.cpu-vcores</name>
+ <value>{{ hadoop.yarn.nodemanager['yarn.nodemanager.resource.cpu-vcores'] }}</value>
+ </property>
+
+ <!--RM上每个容器请求的最小分配(以虚拟CPU内核为单位) ,默认1-->
+ <property>
+ <name>yarn.scheduler.minimum-allocation-vcores</name>
+ <value>1</value>
+ </property>
+
+ <!--RM上每个容器请求的最大分配(以虚拟CPU内核为单位) ,默认32,一般配置为略小于yarn.nodemanager.resource.cpu-vcores,同时指定任务的slot不应超过该值-->
+ <property>
+ <name>yarn.scheduler.maximum-allocation-vcores</name>
+ <value>{{ hadoop.yarn.nodemanager['yarn.scheduler.maximum-allocation-vcores'] }}</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.vmem-check-enabled</name>
+ <value>false</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.pmem-check-enabled</name>
+ <value>false</value>
+ </property>
+
+ <!--ApplicationMaster重启次数,配置HA后默认为2,生产环境可增大该值-->
+ <property>
+ <name>yarn.resourcemanager.am.max-attempts</name>
+ <value>10000</value>
+ </property>
+
+ <property>
+ <name>yarn.log.server.url</name>
+ <value>http://{{ groups.yarn[0] }}:19888/jobhistory/logs</value>
+ </property>
+
+</configuration>
+
diff --git a/Apache Hadoop/2.7.1/yarn/role/vars/main.yml b/Apache Hadoop/2.7.1/yarn/role/vars/main.yml
new file mode 100644
index 0000000..76538cd
--- /dev/null
+++ b/Apache Hadoop/2.7.1/yarn/role/vars/main.yml
@@ -0,0 +1,15 @@
+#Hadoop版本
+hadoop_version: hadoop-2.7.1
+
+#Flink版本
+flink_version: flink-1.13.1
+
+#Jdk版本
+java_version: 1.8.0_73
+
+#数据目录
+hdfs_data_dir: "{{ data_dir }}/{{ hadoop_version }}/data/hadoop"
+
+#大于5台的集群,前两台启动ResourceManager,其余启动NodeManager
+#小于5台的集群,前两台启动ResourceManager,每台都启动NodeManager
+cluster_limit: "5"
diff --git a/Apache Ignite/2.15.0/ignite/hosts b/Apache Ignite/2.15.0/ignite/hosts
new file mode 100644
index 0000000..e47f818
--- /dev/null
+++ b/Apache Ignite/2.15.0/ignite/hosts
@@ -0,0 +1,5 @@
+[zookeeper]
+192.168.45.102
+
+[ignite]
+192.168.45.102
diff --git a/Apache Ignite/2.15.0/ignite/install.yml b/Apache Ignite/2.15.0/ignite/install.yml
new file mode 100644
index 0000000..8f96427
--- /dev/null
+++ b/Apache Ignite/2.15.0/ignite/install.yml
@@ -0,0 +1,7 @@
+- hosts: ignite
+ remote_user: root
+ roles:
+ - role
+ vars_files:
+ - role/vars/main.yml
+
diff --git a/Apache Ignite/2.15.0/ignite/role/defaults/main.yml b/Apache Ignite/2.15.0/ignite/role/defaults/main.yml
new file mode 100644
index 0000000..dfe1036
--- /dev/null
+++ b/Apache Ignite/2.15.0/ignite/role/defaults/main.yml
@@ -0,0 +1,12 @@
+#The default installation location
+deploy_dir: /data/olap
+
+#The default data storage location,use storing application data,logs and configuration files
+data_dir: /data/olap
+
+ignite:
+ #Running memory of the Nacos.
+ java_opt: '-Xmx1024m -Xms1024m'
+ #Setting region max size equal to physical RAM size(5 GB).
+ maxSize: '#{5L * 1024 * 1024 * 1024}'
+
diff --git a/Apache Ignite/2.15.0/ignite/role/handlers/main.yml b/Apache Ignite/2.15.0/ignite/role/handlers/main.yml
new file mode 100644
index 0000000..5d63377
--- /dev/null
+++ b/Apache Ignite/2.15.0/ignite/role/handlers/main.yml
@@ -0,0 +1,25 @@
+- name: Loading Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ load_path: '{{ deploy_dir }}/{{ container_name }}/{{ image_name }}-{{ image_tag }}.tar'
+ source: load
+ force_tag: yes
+ force_source: yes
+ timeout: 300
+
+- name: Stop Container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+- name: Start Container
+ docker_compose:
+ project_src: '{{ deploy_dir }}/{{ container_name }}/'
+
+- name: Removing Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
+
diff --git a/Apache Ignite/2.15.0/ignite/role/tasks/deploy.yml b/Apache Ignite/2.15.0/ignite/role/tasks/deploy.yml
new file mode 100644
index 0000000..f20422d
--- /dev/null
+++ b/Apache Ignite/2.15.0/ignite/role/tasks/deploy.yml
@@ -0,0 +1,48 @@
+- block:
+ - name: Check the Zookeeper status
+ shell: netstat -anlp | egrep "2181" | grep LISTEN | wc -l
+ register: port_out
+ delegate_to: "{{ groups.zookeeper[0] }}"
+
+ - name: To terminate execution
+ fail:
+ msg: "Port 2181 of the zookeeper node is not monitored. The status may be abnormal"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: port_out.stdout != '1'
+
+- name: Creating directory
+ file:
+ state: directory
+ path: '{{ deploy_dir }}/{{ container_name }}/{{ item.dir }}'
+ with_items:
+ - { dir: 'config' }
+
+- name: unpack libs.zip
+ unarchive:
+ src: 'files/libs.zip'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
+ copy:
+ src: 'files/{{ image_name }}-{{ image_tag }}.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: true
+ notify:
+ - Loading Image
+
+
+- name: Copying config files
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: 0644
+ with_items:
+ - { src: 'docker-compose.yml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml' }
+ - { src: 'default-config.xml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/config/default-config.xml' }
+ notify:
+ - Start Container
+
+- meta: flush_handlers
+
+
diff --git a/Apache Ignite/2.15.0/ignite/role/tasks/main.yml b/Apache Ignite/2.15.0/ignite/role/tasks/main.yml
new file mode 100644
index 0000000..9254dff
--- /dev/null
+++ b/Apache Ignite/2.15.0/ignite/role/tasks/main.yml
@@ -0,0 +1,10 @@
+- block:
+ - include: uninstall.yml
+ - include: deploy.yml
+# - include: status-check.yml
+ when: (operation) == "install"
+
+- block:
+ - include: uninstall.yml
+ when: (operation) == "uninstall"
+
diff --git a/Apache Ignite/2.15.0/ignite/role/tasks/status-check.yml b/Apache Ignite/2.15.0/ignite/role/tasks/status-check.yml
new file mode 100644
index 0000000..08aaf29
--- /dev/null
+++ b/Apache Ignite/2.15.0/ignite/role/tasks/status-check.yml
@@ -0,0 +1,17 @@
+- name: Waitting for Kafka running,60s
+ shell: sleep 60
+
+- name: Check if the Kafka already exists
+ shell: ps -ef | grep -v grep | grep kafka.Kafka | wc -l
+ register: process_out
+
+- name: Check if the Kafka already exists
+ shell: netstat -anlp | egrep "9092|9094|9095" | grep LISTEN | wc -l
+ register: port_out
+
+- name: To terminate execution
+ fail:
+ msg: "Kafka on node {{ inventory_hostname }} is not started. Please check"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: process_out.stdout != '1' or port_out.stdout != '3'
diff --git a/Apache Ignite/2.15.0/ignite/role/tasks/uninstall.yml b/Apache Ignite/2.15.0/ignite/role/tasks/uninstall.yml
new file mode 100644
index 0000000..13559a6
--- /dev/null
+++ b/Apache Ignite/2.15.0/ignite/role/tasks/uninstall.yml
@@ -0,0 +1,28 @@
+- block:
+ - name: Stopping and removing {{ container_name }} container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+ - name: Removing old {{ image_name }} image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
+
+ - name: Ansible delete old {{ deploy_dir }}/{{ container_name }}
+ file:
+ path: '{{ deploy_dir }}/{{ container_name }}'
+ state: absent
+
+ - name: Checking ZooKeeper has Ignite nodes
+ shell: "docker exec -it zookeeper zkCli.sh ls / | grep apacheIgnite | wc -l"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ register: has_zknode
+
+ - name: Delete Ignite nodes in ZooKeeper
+ shell: "docker exec -it zookeeper zkCli.sh rmr /apacheIgnite"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ when: has_zknode.stdout >= '1'
diff --git a/Apache Ignite/2.15.0/ignite/role/templates/default-config.xml.j2 b/Apache Ignite/2.15.0/ignite/role/templates/default-config.xml.j2
new file mode 100644
index 0000000..2350dc1
--- /dev/null
+++ b/Apache Ignite/2.15.0/ignite/role/templates/default-config.xml.j2
@@ -0,0 +1,128 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<beans
+ xmlns="http://www.springframework.org/schema/beans"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:util="http://www.springframework.org/schema/util"
+ xsi:schemaLocation="
+ http://www.springframework.org/schema/beans
+ http://www.springframework.org/schema/beans/spring-beans.xsd
+ http://www.springframework.org/schema/util
+ http://www.springframework.org/schema/util/spring-util.xsd">
+ <bean id="grid.cfg" class="org.apache.ignite.configuration.IgniteConfiguration">
+ <property name="cacheConfiguration">
+ <list>
+ <bean abstract="true" class="org.apache.ignite.configuration.CacheConfiguration" id="cache-template-bean">
+ <!-- when you create a template via XML configuration, you must add an asterisk to the name of the template -->
+ <property name="name" value="SanityDirectoryCacheTemplate*"/>
+ <property name="cacheMode" value="PARTITIONED"/>
+ <property name="backups" value="1"/>
+ <property name="onheapCacheEnabled" value="true"/>
+ <property name="rebalanceMode" value="ASYNC"/>
+ <!-- 副本同步模式: -->
+ <!-- PRIMARY_SYNC (默认模式,primary 写成功即可算成功,从backup节点读数据,有可能读到的任然是旧数据) -->
+ <!-- FULL_SYNC (写cache的操作在primary节点和backup节点都成功写入后返回, 保证了写入成功后节点之间的数据都一样) -->
+ <!-- FULL_ASYNC (写cache的操作不用等primary节点和backup节点成功写入即可返回, 读primary节点的数据都有可能读到旧数据) -->
+ <property name="writeSynchronizationMode" value="PRIMARY_SYNC"/>
+ <!-- 分区丢失处理: -->
+ <!-- IGNORE (默认模式,即使出现了partition loss的情况,Ignite会自动忽略并且会清空和partion loss相关的状态不会触发EVT_CACHE_REBALANCE_PART_DATA_LOST 事件) -->
+ <!-- READ_WRITE_ALL (Ignite允许所有的读写操作,就好像partition loss没发生过) -->
+ <!-- READ_WRITE_SAFE (允许对没有丢失的partition的读写操作,但是对已经丢失的partition的读写操作会失败并抛异常) -->
+ <!-- READ_ONLY_ALL (允许对丢失的和正常的partition的读操作,但是写操作会失败并抛异常) -->
+ <!-- READ_ONLY_SAFE (所有的写操作和对丢失partition的读操作都会失败并抛异常。允许对正常的partition的读操作) -->
+ <property name="partitionLossPolicy" value="READ_WRITE_ALL"/>
+ <!-- Other cache parameters -->
+ <!-- Enable statistics for the cache. -->
+ <property name="statisticsEnabled" value="true"/>
+ </bean>
+ </list>
+ </property>
+ <property name="metricsLogFrequency" value="0"/>
+ <!--失败检测 超时时长-->
+ <property name="failureDetectionTimeout" value="#{60 * 60 * 1000}"/>
+ <!-- 服务worker 之间交互 timeout 时间,默认 10s -->
+ <property name="systemWorkerBlockedTimeout" value="#{60 * 60 * 1000}"/>
+ <!-- 服务出现故障自动重启 -->
+ <property name="failureHandler">
+ <bean class="org.apache.ignite.failure.RestartProcessFailureHandler"/>
+ </property>
+ <!-- Set batch size. -->
+ <property name="rebalanceBatchSize" value="#{1 * 1024 * 1024 * 1024}"/>
+ <!-- Set throttle interval. -->
+ <!-- 查询线程池大小 (max(8, total number of cores)) -->
+ <property name="queryThreadPoolSize" value="208"/>
+
+ <property name="rebalanceThrottle" value="100"/>
+ <property name="dataStorageConfiguration">
+ <bean class="org.apache.ignite.configuration.DataStorageConfiguration">
+ <!--并发性水平 可由自己实际情况而定 -->
+ <property name="concurrencyLevel" value="10000"/>
+
+ <!-- 设置内存页大小 (getconf PAGESIZE) -->
+ <property name="pageSize" value="#{4 * 1024}"/>
+ <!-- Size of the WAL (Write Ahead Log) segment -->
+ <property name="walSegmentSize" value="#{1024 * 1024 * 1024}"/>
+ <!--In our experience LOG_ONLY is a good compromise between durability and performance.-->
+ <property name="walMode" value="LOG_ONLY"/>
+ <!-- Enable write throttling. -->
+ <property name="writeThrottlingEnabled" value="true"/>
+ <!-- 检查点频率-->
+ <!--Checkpointing frequency which is a minimal interval when the dirty pages will be written to the Persistent Store.-->
+ <property name="checkpointFrequency" value="180000"/>
+ <!-- persistent storage metrics -->
+ <property name="metricsEnabled" value="true"/>
+ <property name="defaultDataRegionConfiguration">
+ <bean class="org.apache.ignite.configuration.DataRegionConfiguration">
+ <!-- 100 MB initial size. -->
+ <property name="initialSize" value="#{100L * 1024 * 1024}"/>
+ <!-- Setting region max size equal to physical RAM size(5 GB). -->
+ <property name="maxSize" value="{{ ignite.maxSize }}"/>
+ <property name="checkpointPageBufferSize" value="#{20L *1024* 1024 * 1024L}" />
+ <property name="persistenceEnabled" value="true"/>
+ </bean>
+ </property>
+ <!-- Defining several data regions for different memory regions 持久化数据存储目录 -->
+ <property name="storagePath" value="/persistence/storage" />
+ <property name="walArchivePath" value="/persistence/walArchive" />
+ <property name="walPath" value="/persistence/wal" />
+ </bean>
+ </property>
+ <property name="clientConnectorConfiguration">
+ <bean class="org.apache.ignite.configuration.ClientConnectorConfiguration">
+ <property name="port" value="10800"/>
+ </bean>
+ </property>
+ <!-- 指标导出器. -->
+ <property name="metricExporterSpi">
+ <list>
+ <bean class="org.apache.ignite.spi.metric.jmx.JmxMetricExporterSpi"/>
+ <bean class="org.apache.ignite.spi.metric.log.LogExporterSpi"/>
+ <bean class="org.apache.ignite.spi.metric.opencensus.OpenCensusMetricExporterSpi"/>
+ </list>
+ </property>
+ <!-- 节点发现. -->
+ <property name="discoverySpi">
+ <bean class="org.apache.ignite.spi.discovery.zk.ZookeeperDiscoverySpi">
+ <property name="zkConnectionString" value="{% for dev_info in groups.zookeeper -%}
+ {% if loop.last -%}
+{{dev_info}}:2181"/>
+ {%- else %}
+{{dev_info}}:2181,
+ {%- endif %}
+{%- endfor %}
+
+ <property name="sessionTimeout" value="30000"/>
+ <property name="zkRootPath" value="/apacheIgnite"/>
+ <property name="joinTimeout" value="10000"/>
+ </bean>
+ </property>
+ </bean>
+ <!-- 监控配置. -->
+ <bean id="opencensusWrapper" class="org.springframework.beans.factory.config.MethodInvokingBean">
+ <property name="staticMethod" value="io.opencensus.exporter.stats.prometheus.PrometheusStatsCollector.createAndRegister"/>
+ </bean>
+ <bean id="httpServer" class="io.prometheus.client.exporter.HTTPServer">
+ <constructor-arg type="java.lang.String" value="{{ inventory_hostname }}"/>
+ <constructor-arg type="int" value="9916"/>
+ <constructor-arg type="boolean" value="true"/>
+ </bean>
+</beans>
diff --git a/Apache Ignite/2.15.0/ignite/role/templates/docker-compose.yml.j2 b/Apache Ignite/2.15.0/ignite/role/templates/docker-compose.yml.j2
new file mode 100644
index 0000000..57096e5
--- /dev/null
+++ b/Apache Ignite/2.15.0/ignite/role/templates/docker-compose.yml.j2
@@ -0,0 +1,26 @@
+version: '3.3'
+
+services:
+ ignite:
+ image: ignite:2.15.0
+ container_name: ignite
+ restart: always
+ ports:
+ - "47103:47100"
+ - "47503:47500"
+ - "49114:49112"
+ - "10800:10800"
+ - "9999:9999"
+ environment:
+ - JVM_OPTS=-server {{ ignite.java_opt }} -XX:+AlwaysPreTouch -XX:+UseG1GC -XX:+ScavengeBeforeFullGC -XX:+DisableExplicitGC -Duser.timezone=UTC
+ - IGNITE_QUIET=false
+ - IGNITE_CONFIG_FILE=/opt/ignite/apache-ignite/config/default-config.xml
+ - IGNITE_WORK_DIR=/persistence
+ - IGNITE_LIBS=/opt/ignite/apache-ignite/libs/*
+ - JAVA_OPTS=-Duser.timezone=Asia/Shanghai
+ - timezone=Asia/Shanghai
+ volumes:
+ - {{ deploy_dir }}/{{ container_name }}/config/default-config.xml:/opt/ignite/apache-ignite/config/default-config.xml:rw
+ - {{ deploy_dir }}/{{ container_name }}/data:/persistence:rw
+ - {{ deploy_dir }}/{{ container_name }}/libs:/opt/ignite/apache-ignite/libs
+ network_mode: "host"
diff --git a/Apache Ignite/2.15.0/ignite/role/vars/.main.yml.swp b/Apache Ignite/2.15.0/ignite/role/vars/.main.yml.swp
new file mode 100644
index 0000000..2a7e014
--- /dev/null
+++ b/Apache Ignite/2.15.0/ignite/role/vars/.main.yml.swp
Binary files differ
diff --git a/Apache Ignite/2.15.0/ignite/role/vars/main.yml b/Apache Ignite/2.15.0/ignite/role/vars/main.yml
new file mode 100644
index 0000000..28deabd
--- /dev/null
+++ b/Apache Ignite/2.15.0/ignite/role/vars/main.yml
@@ -0,0 +1,11 @@
+#镜像名称
+image_name: ignite
+
+#镜像版本号
+image_tag: 2.15.0
+
+#容器名称
+container_name: ignite
+
+#组件版本
+component_version: ignite-2.15.0
diff --git a/Apache Kafka/3.4.1/kafka/hosts b/Apache Kafka/3.4.1/kafka/hosts
new file mode 100644
index 0000000..8004593
--- /dev/null
+++ b/Apache Kafka/3.4.1/kafka/hosts
@@ -0,0 +1,5 @@
+[zookeeper]
+192.168.45.102
+
+[kafka]
+192.168.45.102
diff --git a/Apache Kafka/3.4.1/kafka/install.yml b/Apache Kafka/3.4.1/kafka/install.yml
new file mode 100644
index 0000000..32356ac
--- /dev/null
+++ b/Apache Kafka/3.4.1/kafka/install.yml
@@ -0,0 +1,7 @@
+- hosts: kafka
+ remote_user: root
+ roles:
+ - role
+ vars_files:
+ - role/vars/main.yml
+
diff --git a/Apache Kafka/3.4.1/kafka/role/defaults/main.yml b/Apache Kafka/3.4.1/kafka/role/defaults/main.yml
new file mode 100644
index 0000000..ad2b1da
--- /dev/null
+++ b/Apache Kafka/3.4.1/kafka/role/defaults/main.yml
@@ -0,0 +1,13 @@
+#The default installation location
+deploy_dir: /data/olap
+
+#The default data storage location,use storing application data,logs and configuration files
+data_dir: /data/olap
+
+kafka:
+ #Running memory of the Kafka.
+ java_opt: '-Xmx1024m -Xms1024m'
+ #The minimum age of a log file to be eligible for deletion due to age
+ log.retention.hours: 168
+ #A size-based retention policy for logs,unit byte
+ log.retention.bytes: 10737418240
diff --git a/Apache Kafka/3.4.1/kafka/role/handlers/main.yml b/Apache Kafka/3.4.1/kafka/role/handlers/main.yml
new file mode 100644
index 0000000..5b929a6
--- /dev/null
+++ b/Apache Kafka/3.4.1/kafka/role/handlers/main.yml
@@ -0,0 +1,38 @@
+- name: Loading Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ load_path: '{{ deploy_dir }}/{{ container_name }}/{{ image_name }}-{{ image_tag }}.tar'
+ source: load
+ force_tag: yes
+ force_source: yes
+ timeout: 300
+
+- name: Stop Container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+- name: Start Container
+ docker_compose:
+ project_src: '{{ deploy_dir }}/{{ container_name }}/'
+
+- name: Removing Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
+
+- name: Loading Exporter Image
+ docker_image:
+ name: 'kafka_exporter'
+ tag: 'v2.0'
+ load_path: '{{ deploy_dir }}/{{ container_name }}/monitor/kafka_exporter-v2.0.tar'
+ source: load
+ force_tag: yes
+ force_source: yes
+ timeout: 300
+
+- name: Start Exporter Container
+ docker_compose:
+ project_src: '{{ deploy_dir }}/{{ container_name }}/monitor/'
diff --git a/Apache Kafka/3.4.1/kafka/role/tasks/deploy.yml b/Apache Kafka/3.4.1/kafka/role/tasks/deploy.yml
new file mode 100644
index 0000000..09bad99
--- /dev/null
+++ b/Apache Kafka/3.4.1/kafka/role/tasks/deploy.yml
@@ -0,0 +1,72 @@
+- name: Setting node_nums variable
+ set_fact: node_nums="{{groups.kafka|length}}"
+
+- block:
+ - name: Check the Zookeeper status
+ shell: netstat -anlp | egrep "2181" | grep LISTEN | wc -l
+ register: port_out
+ delegate_to: "{{ groups.zookeeper[0] }}"
+
+ - name: To terminate execution
+ fail:
+ msg: "Port 2181 of the zookeeper node is not monitored. The status may be abnormal"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: port_out.stdout != '1'
+
+- name: Creating directory
+ file:
+ state: directory
+ path: '{{ deploy_dir }}/{{ container_name }}/{{ item.dir }}'
+ with_items:
+ - { dir: 'kafka-logs' }
+ - { dir: 'config' }
+ - { dir: 'logs' }
+ - { dir: 'monitor' }
+
+- name: unpack {{ component_version }}.tgz to /usr/local/
+ unarchive:
+ src: 'files/{{ component_version }}.tgz'
+ dest: '/usr/local/'
+
+- name: Copying Kafka config files
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: 0755
+ with_items:
+ - { src: 'kafka-operation.sh.j2', dest: '/usr/local/{{ component_version }}/bin/kafka-operation.sh' }
+ - { src: 'kafka.sh.j2', dest: '/etc/profile.d/kafka.sh' }
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
+ copy:
+ src: 'files/{{ image_name }}-{{ image_tag }}.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: true
+ notify:
+ - Loading Image
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/monitor
+ copy:
+ src: 'files/kafka_exporter-v2.0.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/monitor/'
+ force: true
+ notify:
+ - Loading Exporter Image
+
+- name: Copying Kafka config files
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: 0644
+ with_items:
+ - { src: 'server.properties.j2', dest: '{{ deploy_dir }}/{{ container_name }}/config/server.properties' }
+ - { src: 'docker-compose.yml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml' }
+ - { src: 'docker-compose_exporter.yml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/monitor/docker-compose.yml' }
+ notify:
+ - Start Container
+ - Start Exporter Container
+
+- meta: flush_handlers
+
+
diff --git a/Apache Kafka/3.4.1/kafka/role/tasks/main.yml b/Apache Kafka/3.4.1/kafka/role/tasks/main.yml
new file mode 100644
index 0000000..7d9aec3
--- /dev/null
+++ b/Apache Kafka/3.4.1/kafka/role/tasks/main.yml
@@ -0,0 +1,10 @@
+- block:
+ - include: uninstall.yml
+ - include: deploy.yml
+ - include: status-check.yml
+ when: (operation) == "install"
+
+- block:
+ - include: uninstall.yml
+ when: (operation) == "uninstall"
+
diff --git a/Apache Kafka/3.4.1/kafka/role/tasks/status-check.yml b/Apache Kafka/3.4.1/kafka/role/tasks/status-check.yml
new file mode 100644
index 0000000..08aaf29
--- /dev/null
+++ b/Apache Kafka/3.4.1/kafka/role/tasks/status-check.yml
@@ -0,0 +1,17 @@
+- name: Waitting for Kafka running,60s
+ shell: sleep 60
+
+- name: Check if the Kafka already exists
+ shell: ps -ef | grep -v grep | grep kafka.Kafka | wc -l
+ register: process_out
+
+- name: Check if the Kafka already exists
+ shell: netstat -anlp | egrep "9092|9094|9095" | grep LISTEN | wc -l
+ register: port_out
+
+- name: To terminate execution
+ fail:
+ msg: "Kafka on node {{ inventory_hostname }} is not started. Please check"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: process_out.stdout != '1' or port_out.stdout != '3'
diff --git a/Apache Kafka/3.4.1/kafka/role/tasks/uninstall.yml b/Apache Kafka/3.4.1/kafka/role/tasks/uninstall.yml
new file mode 100644
index 0000000..64d5dfa
--- /dev/null
+++ b/Apache Kafka/3.4.1/kafka/role/tasks/uninstall.yml
@@ -0,0 +1,39 @@
+- block:
+ - name: Stopping and removing {{ container_name }} container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+ - name: Removing old {{ image_name }} image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
+
+ - name: Stopping and removing exporter container
+ docker_container:
+ name: 'kafka_exporter'
+ state: absent
+
+ - name: Removing old exporter image
+ docker_image:
+ name: 'kafka_exporter'
+ tag: 'v2.0'
+ state: absent
+
+ - name: Ansible delete old {{ deploy_dir }}/{{ container_name }}
+ file:
+ path: '{{ deploy_dir }}/{{ container_name }}'
+ state: absent
+
+ - name: Checking ZooKeeper has Kafka nodes
+ shell: "docker exec -it zookeeper zkCli.sh ls / | grep kafka | wc -l"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ register: has_zknode
+
+ - name: Delete Kafka nodes in ZooKeeper
+ shell: "docker exec -it zookeeper zkCli.sh rmr /kafka"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ when: has_zknode.stdout >= '1'
diff --git a/Apache Kafka/3.4.1/kafka/role/templates/docker-compose.yml.j2 b/Apache Kafka/3.4.1/kafka/role/templates/docker-compose.yml.j2
new file mode 100644
index 0000000..cb9b070
--- /dev/null
+++ b/Apache Kafka/3.4.1/kafka/role/templates/docker-compose.yml.j2
@@ -0,0 +1,15 @@
+version: '2'
+
+services:
+ kafka:
+ image: {{ image_name }}:{{ image_tag }}
+ restart: always
+ container_name: {{ container_name }}
+ environment:
+ KAFKA_JVM_MEM: "{{ kafka.java_opt }}"
+ volumes:
+ - "{{ deploy_dir }}/{{ container_name }}/config/server.properties:/opt/{{ component_version }}/config/server.properties"
+ - "{{ deploy_dir }}/{{ container_name }}/kafka-logs:/opt/{{ component_version }}/kafka-logs"
+ - "{{ deploy_dir }}/{{ container_name }}/logs:/opt/{{ component_version }}/logs"
+ network_mode: "host"
+
diff --git a/Apache Kafka/3.4.1/kafka/role/templates/docker-compose_exporter.yml.j2 b/Apache Kafka/3.4.1/kafka/role/templates/docker-compose_exporter.yml.j2
new file mode 100644
index 0000000..8b3595c
--- /dev/null
+++ b/Apache Kafka/3.4.1/kafka/role/templates/docker-compose_exporter.yml.j2
@@ -0,0 +1,21 @@
+version: '3.3'
+
+services:
+ kafka-exporter:
+ image: kafka_exporter:v2.0
+ container_name: kafka_exporter
+ ports:
+ - 9982:9308
+ restart: always
+ command:
+ - --kafka.server={{ inventory_hostname }}:9094
+ - --sasl.username=admin
+ - --sasl.password=galaxy2019
+ - --sasl.mechanism=plain
+ - --sasl.enabled
+ networks:
+ olap:
+ ipv4_address: 172.20.88.7
+networks:
+ olap:
+ external: true
diff --git a/Apache Kafka/3.4.1/kafka/role/templates/kafka-operation.sh.j2 b/Apache Kafka/3.4.1/kafka/role/templates/kafka-operation.sh.j2
new file mode 100644
index 0000000..5decb88
--- /dev/null
+++ b/Apache Kafka/3.4.1/kafka/role/templates/kafka-operation.sh.j2
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+LOCAL_IP={{ inventory_hostname }}:9094
+
+ZK_SERVER={% for dev_info in groups.zookeeper -%}
+ {% if loop.last -%}
+{{dev_info}}:2181/kafka
+ {%- else %}
+{{dev_info}}:2181,
+ {%- endif %}
+{%- endfor %}
+
+
+KAFKA_SERVER={% for dev_info in groups.kafka -%}
+ {% if loop.last -%}
+{{dev_info}}:9092
+ {%- else %}
+{{dev_info}}:9092,
+ {%- endif %}
+{%- endfor %}
+
+PARTITIONS={{groups.kafka|length}}
+
+
+case $1 in
+ producer)
+ kafka-console-producer.sh --producer.config $KAFKA_HOME/config/producer.properties --broker-list $LOCAL_IP --topic $2
+ ;;
+ consumer)
+ kafka-console-consumer.sh --consumer.config $KAFKA_HOME/config/consumer.properties --bootstrap-server $LOCAL_IP --topic $2
+ ;;
+ consumer-begin)
+ kafka-console-consumer.sh --consumer.config $KAFKA_HOME/config/consumer.properties --from-beginning --bootstrap-server $LOCAL_IP --topic $2
+ ;;
+ create)
+ kafka-topics.sh --create --bootstrap-server $KAFKA_SERVER --replication-factor 1 --partitions $PARTITIONS --topic $2
+ ;;
+ delete)
+ kafka-topics.sh --delete --bootstrap-server $KAFKA_SERVER --topic $2
+ ;;
+ list)
+ kafka-topics.sh --list --bootstrap-server $KAFKA_SERVER
+ ;;
+ groups)
+ kafka-consumer-groups.sh --all-groups --all-topics --list --bootstrap-server $KAFKA_SERVER
+ ;;
+ group)
+ kafka-consumer-groups.sh --bootstrap-server $KAFKA_SERVER --describe --group $2
+ ;;
+ election-leader)
+ kafka-leader-election.sh --bootstrap-server $KAFKA_SERVER --all-topic-partitions --election-type PREFERRED
+ ;;
+ *)
+ echo 'Usage: kafka-operation.sh {producer|consumer|consumer-begin|create|delete} {topic-name}'
+ echo 'Status: kafka-operation.sh {list|groups}'
+ echo 'Status: kafka-operation.sh {group} {group name}'
+ echo 'maintenance: kafka-operation.sh {election-leader}'
+esac
+
+
diff --git a/Apache Kafka/3.4.1/kafka/role/templates/kafka.sh.j2 b/Apache Kafka/3.4.1/kafka/role/templates/kafka.sh.j2
new file mode 100644
index 0000000..cd5b5f0
--- /dev/null
+++ b/Apache Kafka/3.4.1/kafka/role/templates/kafka.sh.j2
@@ -0,0 +1,3 @@
+#kafka
+export KAFKA_HOME=/usr/local/{{ component_version }}
+export PATH=$KAFKA_HOME/bin:$PATH
diff --git a/Apache Kafka/3.4.1/kafka/role/templates/server.properties.j2 b/Apache Kafka/3.4.1/kafka/role/templates/server.properties.j2
new file mode 100644
index 0000000..dfedf76
--- /dev/null
+++ b/Apache Kafka/3.4.1/kafka/role/templates/server.properties.j2
@@ -0,0 +1,190 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# see kafka.server.KafkaConfig for additional details and defaults
+
+############################# Server Basics #############################
+listeners=SASL_PLAINTEXT://{{ inventory_hostname }}:9094,PLAINTEXT://{{ inventory_hostname }}:9092,SSL://{{ inventory_hostname }}:9095
+advertised.listeners=SASL_PLAINTEXT://{{ inventory_hostname }}:9094,PLAINTEXT://{{ inventory_hostname }}:9092,SSL://{{ inventory_hostname }}:9095
+ssl.keystore.location=/opt/{{ component_version }}/config/keystore.jks
+ssl.keystore.password=galaxy2019
+ssl.key.password=galaxy2019
+ssl.truststore.location=/opt/{{ component_version }}/config/truststore.jks
+ssl.truststore.password=galaxy2019
+#ssl.client.auth=required
+ssl.enabled.protocols=TLSv1.2,TLSv1.1,TLSv1
+ssl.keystore.type=JKS
+ssl.truststore.type=JKS
+
+# kafka2.0.x开始,将ssl.endpoint.identification.algorithm设置为了HTTPS,即:需要验证主机名
+# 如果不需要验证主机名,那么可以这么设置 ssl.endpoint.identification.algorithm=即可
+ssl.endpoint.identification.algorithm=
+
+# 设置内部访问也用SSL,默认值为security.inter.broker.protocol=PLAINTEXT
+security.inter.broker.protocol=SASL_PLAINTEXT
+
+#sasl配置
+sasl.mechanism.inter.broker.protocol=PLAIN
+sasl.enabled.mechanisms=PLAIN
+
+# The id of the broker. This must be set to a unique integer for each broker.
+broker.id={{ groups['kafka'].index(inventory_hostname) +1 }}
+
+############################# Socket Server Settings #############################
+#Is it deleted directlytopic
+delete.topic.enable=true
+
+#Are you allowed to create automatically topic
+auto.create.topics.enable=false
+
+#Enable log periodic deletion strategy
+log.cleanup.policy=delete
+
+# The number of threads that the server uses for receiving requests from the network and sending responses to the network
+num.network.threads=3
+
+# The number of threads that the server uses for processing requests, which may include disk I/O
+num.io.threads=8
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=10485760
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=10485760
+
+# The maximum size of a request that the socket server will accept (protection against OOM)
+#socket.request.max.bytes=2147483600
+socket.request.max.bytes=104857600
+
+#The maximum size of a message body, unit byte.
+message.max.bytes=10485760
+
+#replicas Maximum size of data obtained eachtime
+replica.fetch.max.bytes=20485760
+
+############################# Log Basics #############################
+
+# A comma seperated list of directories under which to store log files
+log.dirs=/opt/{{ component_version }}/kafka-logs
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions=1
+
+# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
+# This value is recommended to be increased for installations with data dirs located in RAID array.
+num.recovery.threads.per.data.dir=1
+
+############################# Internal Topic Settings #############################
+{% if groups.kafka | length >= 3 %}
+# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
+# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
+offsets.topic.replication.factor=3
+
+#事务主题的复制因子(设置更高以确保可用性)。 内部主题创建将失败,直到群集大小满足此复制因素要求
+transaction.state.log.replication.factor=3
+
+#覆盖事务主题的min.insync.replicas配置,在min.insync.replicas中,replicas数量为1,该参数将默认replicas定义为2
+transaction.state.log.min.isr=2
+{% elif groups.kafka | length == 1 %}
+# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
+# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
+offsets.topic.replication.factor=1
+
+#事务主题的复制因子(设置更高以确保可用性)。 内部主题创建将失败,直到群集大小满足此复制因素要求
+transaction.state.log.replication.factor=1
+
+#覆盖事务主题的min.insync.replicas配置,在min.insync.replicas中,replicas数量为1,该参数将默认replicas定义为2
+transaction.state.log.min.isr=1
+{% endif %}
+
+#是否允许非ISR的Replica参与竞选Leader。
+unclean.leader.election.enable=true
+
+#如果某个Partition的Leader挂掉,则当原来挂掉的Broker恢复正常以后,可以夺回Leader
+auto.leader.rebalance.enable=true
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data to disk.
+# There are a few important trade-offs here:
+# 1. Durability: Unflushed data may be lost if you are not using replication.
+# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
+# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
+# The settings below allow one to configure the flush policy to flush data after a period of time or
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+#log.flush.interval.messages=10000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+#log.flush.interval.ms=1000
+
+############################# Log Retention Policy #############################
+
+# The following configurations control the disposal of log segments. The policy can
+# be set to delete segments after a period of time, or after a given size has accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion due to age
+log.retention.hours={{ kafka['log.retention.hours'] }}
+
+# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
+# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
+log.retention.bytes={{ kafka['log.retention.bytes'] }}
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+log.segment.bytes=1073741824
+
+# The interval at which log segments are checked to see if they can be deleted according
+# to the retention policies
+log.retention.check.interval.ms=300000
+
+############################# Zookeeper #############################
+
+# Zookeeper connection string (see zookeeper docs for details).
+# This is a comma separated host:port pairs, each corresponding to a zk
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
+# You can also append an optional chroot string to the urls to specify the
+# root directory for all kafka znodes.
+zookeeper.connect={% for dev_info in groups.zookeeper -%}
+ {% if loop.last -%}
+{{dev_info}}:2181/kafka
+ {%- else %}
+{{dev_info}}:2181,
+ {%- endif %}
+{%- endfor %}
+
+# Timeout in ms for connecting to zookeeper
+zookeeper.connection.timeout.ms=60000
+
+#zookeeper session超时时间
+zookeeper.session.timeout.ms=60000
+
+#Set zookeeper client to use secure ACLs
+zookeeper.set.acl=false
+
+############################# Group Coordinator Settings #############################
+
+# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
+# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
+# The default value for this is 3 seconds.
+# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
+# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
+group.initial.rebalance.delay.ms=0
+
diff --git a/Apache Kafka/3.4.1/kafka/role/vars/main.yml b/Apache Kafka/3.4.1/kafka/role/vars/main.yml
new file mode 100644
index 0000000..deb23e9
--- /dev/null
+++ b/Apache Kafka/3.4.1/kafka/role/vars/main.yml
@@ -0,0 +1,23 @@
+#镜像名称
+image_name: kafka
+
+#镜像版本号
+image_tag: 2.12-3.4.1
+
+#容器名称
+container_name: kafka
+
+#组件版本
+component_version: kafka_2.12-3.4.1
+
+#备份目录
+backup_path: "{{ deploy_dir }}/backup/platform/{{ old_version }}/{{ container_name }}"
+
+#待备份的文件
+backup_items:
+ - "{{ deploy_dir }}/{{ container_name }}/config"
+ - "{{ deploy_dir }}/{{ container_name }}/docker-compose.yml"
+
+#平台版本对应镜像的sha256
+version_sha256_items:
+ "1.0.0": "d91a3183b1f625ab57829db7ffb51f53671e4c2b4c19d3b8511dbb8601593611"
diff --git a/Apache Zookeeper/3.5.9/zookeeper/hosts b/Apache Zookeeper/3.5.9/zookeeper/hosts
new file mode 100644
index 0000000..272c1db
--- /dev/null
+++ b/Apache Zookeeper/3.5.9/zookeeper/hosts
@@ -0,0 +1,2 @@
+[zookeeper]
+192.168.45.102
diff --git a/Apache Zookeeper/3.5.9/zookeeper/install.yml b/Apache Zookeeper/3.5.9/zookeeper/install.yml
new file mode 100644
index 0000000..7f4aa66
--- /dev/null
+++ b/Apache Zookeeper/3.5.9/zookeeper/install.yml
@@ -0,0 +1,7 @@
+- hosts: zookeeper
+ remote_user: root
+ roles:
+ - role
+ vars_files:
+ - role/vars/main.yml
+
diff --git a/Apache Zookeeper/3.5.9/zookeeper/role/defaults/main.yml b/Apache Zookeeper/3.5.9/zookeeper/role/defaults/main.yml
new file mode 100644
index 0000000..4ca4ece
--- /dev/null
+++ b/Apache Zookeeper/3.5.9/zookeeper/role/defaults/main.yml
@@ -0,0 +1,9 @@
+#The default installation location
+deploy_dir: /data/olap
+
+#The default data storage location,use storing application data,logs and configuration files
+data_dir: /data/olap
+
+zookeeper:
+ #Running memory of the Zookeeper.
+ java_opts: -Xmx1024m -Xms1024m
diff --git a/Apache Zookeeper/3.5.9/zookeeper/role/handlers/main.yml b/Apache Zookeeper/3.5.9/zookeeper/role/handlers/main.yml
new file mode 100644
index 0000000..fbe2297
--- /dev/null
+++ b/Apache Zookeeper/3.5.9/zookeeper/role/handlers/main.yml
@@ -0,0 +1,38 @@
+- name: Loading Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ load_path: '{{ deploy_dir }}/{{ container_name }}/{{ image_name }}-{{ image_tag }}.tar'
+ source: load
+ force_tag: yes
+ force_source: yes
+ timeout: 300
+
+- name: Stop Container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+- name: Start Container
+ docker_compose:
+ project_src: '{{ deploy_dir }}/{{ container_name }}/'
+
+- name: Removing Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
+
+- name: Loading Exporter Image
+ docker_image:
+ name: 'zookeeper_exporter'
+ tag: 'v1.0'
+ load_path: '{{ deploy_dir }}/{{ container_name }}/monitor/zookeeper_exporter-v1.0.tar'
+ source: load
+ force_tag: yes
+ force_source: yes
+ timeout: 300
+
+- name: Start Exporter Container
+ docker_compose:
+ project_src: '{{ deploy_dir }}/{{ container_name }}/monitor/'
diff --git a/Apache Zookeeper/3.5.9/zookeeper/role/tasks/deploy.yml b/Apache Zookeeper/3.5.9/zookeeper/role/tasks/deploy.yml
new file mode 100644
index 0000000..fa2193c
--- /dev/null
+++ b/Apache Zookeeper/3.5.9/zookeeper/role/tasks/deploy.yml
@@ -0,0 +1,55 @@
+- block:
+ - name: check zookeeper nodes is odd number
+ shell: if [ `expr {{groups.zookeeper|length}} % 2` -eq 0 ];then echo 1 ;else echo 0 ;fi
+ ignore_errors: false
+ register: nums_out
+
+ - name: To terminate execution
+ fail:
+ msg: "zookeeper 节点配置数量必须为奇数台,请修改。"
+ when: nums_out.stdout != '0'
+ run_once: true
+ delegate_to: 127.0.0.1
+
+- name: Creating directory
+ file:
+ state: directory
+ path: '{{ deploy_dir }}/{{ container_name }}/{{ item.dir }}'
+ with_items:
+ - { dir: 'conf' }
+ - { dir: 'data' }
+ - { dir: 'logs' }
+ - { dir: 'monitor' }
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
+ copy:
+ src: 'files/{{ image_name }}-{{ image_tag }}.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: true
+ notify:
+ - Loading Image
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/monitor
+ copy:
+ src: 'files/zookeeper_exporter-v1.0.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/monitor/'
+ force: true
+ notify:
+ - Loading Exporter Image
+
+- name: Config config files
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: 0644
+ with_items:
+ - { src: 'myid.j2', dest: '{{ deploy_dir }}/{{ container_name }}/data/myid' }
+ - { src: 'zoo.cfg.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/zoo.cfg' }
+ - { src: 'docker-compose.yml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml' }
+ - { src: 'docker-compose_exporter.yml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/monitor/docker-compose.yml' }
+ notify:
+ - Loading Image
+ - Start Container
+ - Start Exporter Container
+
+- meta: flush_handlers
diff --git a/Apache Zookeeper/3.5.9/zookeeper/role/tasks/main.yml b/Apache Zookeeper/3.5.9/zookeeper/role/tasks/main.yml
new file mode 100644
index 0000000..9a7c58b
--- /dev/null
+++ b/Apache Zookeeper/3.5.9/zookeeper/role/tasks/main.yml
@@ -0,0 +1,9 @@
+- block:
+ - include: uninstall.yml
+ - include: deploy.yml
+ - include: status-check.yml
+ when: (operation) == "install"
+
+- block:
+ - include: uninstall.yml
+ when: (operation) == "unload"
diff --git a/Apache Zookeeper/3.5.9/zookeeper/role/tasks/status-check.yml b/Apache Zookeeper/3.5.9/zookeeper/role/tasks/status-check.yml
new file mode 100644
index 0000000..9f945d1
--- /dev/null
+++ b/Apache Zookeeper/3.5.9/zookeeper/role/tasks/status-check.yml
@@ -0,0 +1,13 @@
+- name: Waitting for Zookeeper running,60s
+ shell: sleep 60
+
+- name: Check the zookeeper service status
+ shell: docker exec -it zookeeper zkServer.sh status | egrep "follower|leader|standalone" | wc -l
+ register: check_status
+
+- name: To terminate execution
+ fail:
+ msg: "检测到{{ inventory_hostname }}节点Zookeeper未正常启动;请保留日志反馈,路径:{{ deploy_dir }}/{{ container_name }}/logs"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: check_status.stdout != '1'
diff --git a/Apache Zookeeper/3.5.9/zookeeper/role/tasks/uninstall.yml b/Apache Zookeeper/3.5.9/zookeeper/role/tasks/uninstall.yml
new file mode 100644
index 0000000..0f7bb68
--- /dev/null
+++ b/Apache Zookeeper/3.5.9/zookeeper/role/tasks/uninstall.yml
@@ -0,0 +1,27 @@
+- block:
+ - name: Stopping and removing exporter container
+ docker_container:
+ name: 'zookeeper_exporter'
+ state: absent
+
+ - name: Removing old exporter image
+ docker_image:
+ name: 'zookeeper_exporter'
+ tag: 'v1.0'
+ state: absent
+
+ - name: Stopping and removing {{ container_name }} container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+ - name: Removing old {{ image_name }} image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
+
+ - name: Ansible delete old {{ deploy_dir }}/{{ container_name }}
+ file:
+ path: '{{ deploy_dir }}/{{ container_name }}'
+ state: absent
diff --git a/Apache Zookeeper/3.5.9/zookeeper/role/templates/docker-compose.yml.j2 b/Apache Zookeeper/3.5.9/zookeeper/role/templates/docker-compose.yml.j2
new file mode 100644
index 0000000..fa1f934
--- /dev/null
+++ b/Apache Zookeeper/3.5.9/zookeeper/role/templates/docker-compose.yml.j2
@@ -0,0 +1,16 @@
+version: '2'
+
+services:
+ zookeeper:
+ image: {{ image_name }}:{{ image_tag }}
+ restart: always
+ container_name: zookeeper
+ ports:
+ - 2181:2181
+ environment:
+ ZOOKEEPER_JVM_MEM: "{{ zookeeper.java_opts }}"
+ volumes:
+ - "{{ deploy_dir }}/zookeeper/conf/zoo.cfg:/opt/{{ component_version }}/conf/zoo.cfg"
+ - "{{ deploy_dir }}/zookeeper/data:/opt/{{ component_version }}/data"
+ - "{{ deploy_dir }}/zookeeper/logs:/opt/{{ component_version }}/logs"
+ network_mode: "host"
diff --git a/Apache Zookeeper/3.5.9/zookeeper/role/templates/docker-compose_exporter.yml.j2 b/Apache Zookeeper/3.5.9/zookeeper/role/templates/docker-compose_exporter.yml.j2
new file mode 100644
index 0000000..014f1d7
--- /dev/null
+++ b/Apache Zookeeper/3.5.9/zookeeper/role/templates/docker-compose_exporter.yml.j2
@@ -0,0 +1,17 @@
+version: '3.3'
+
+services:
+ zookeeper-exporter:
+ image: zookeeper_exporter:v1.0
+ container_name: zookeeper_exporter
+ ports:
+ - 9902:9114
+ restart: always
+ command:
+ - {{ inventory_hostname }}:2181
+ networks:
+ olap:
+ ipv4_address: 172.20.88.5
+networks:
+ olap:
+ external: true
diff --git a/Apache Zookeeper/3.5.9/zookeeper/role/templates/myid.j2 b/Apache Zookeeper/3.5.9/zookeeper/role/templates/myid.j2
new file mode 100644
index 0000000..1df9911
--- /dev/null
+++ b/Apache Zookeeper/3.5.9/zookeeper/role/templates/myid.j2
@@ -0,0 +1 @@
+{{ groups.zookeeper.index(inventory_hostname) + 1 }}
diff --git a/Apache Zookeeper/3.5.9/zookeeper/role/templates/zoo.cfg.j2 b/Apache Zookeeper/3.5.9/zookeeper/role/templates/zoo.cfg.j2
new file mode 100644
index 0000000..497f843
--- /dev/null
+++ b/Apache Zookeeper/3.5.9/zookeeper/role/templates/zoo.cfg.j2
@@ -0,0 +1,55 @@
+# The number of milliseconds of each tick
+# Zookeeper 服务器之间或客户端与服务器之间维持心跳的时间间隔,也就是每个 tickTime 时间就会发送一个心跳。tickTime以毫秒为单位。
+tickTime=9000
+
+# The number of ticks that the initial synchronization phase can take
+# 集群中的follower服务器(F)与leader服务器(L)之间初始连接时能容忍的最多心跳数(tickTime的数量)。
+initLimit=10
+
+# The number of ticks that can pass between sending a request and getting an acknowledgement
+# 集群中的follower服务器与leader服务器之间请求和应答之间能容忍的最多心跳数(tickTime的数量)。
+syncLimit=5
+
+#ZooKeeper将会对客户端进行限流,系统中未处理的请求数量不超过设置的值。(default:1000)
+globalOutstandingLimit=1000
+
+# the maximum number of client connections.increase this if you need to handle more clients
+# socket级别限制单个客户端到ZooKeeper集群中单台服务器的并发连接数量.(default:60)
+maxClientCnxns=5000
+
+#忽略ACL验证,可以减少权限验证的相关操作,提升性能
+skipACL=yes
+
+#yes:每次写请求的数据都要从pagecache中固化到磁盘上,才算成功返回.后续写请求会等待前面写请求.
+#no:数据写到pagecache后就返回,提升性能,但是机器断电的时候,pagecache中的数据有可能丢失。
+forceSync=yes
+
+#当事务日志(WAL)中的fsync时间超过此值时,将向日志输出警告消息,需要forceSync为yes。
+fsync.warningthresholdms=20
+
+# Zookeeper保存数据的目录,默认情况下,Zookeeper将写数据的日志文件也保存在这个目录里。
+dataDir=/opt/{{ component_version }}/data
+
+#Zookeeper保存日志文件的目录。
+dataLogDir=/opt/{{ component_version }}/logs
+
+# the port at which the clients will connect
+#客户端连接 Zookeeper 服务器的端口
+clientPort=2181
+
+#指定需要保留的文件数目(default:3)
+autopurge.snapRetainCount=3
+
+#指定清理频率,单位为小时(default:0 表示不开启自动清理)
+autopurge.purgeInterval=1
+
+#嵌入式Jetty服务,为四字命令服务提供HTTP接口
+admin.enableServer=false
+
+#四字命令白名单,用于监控
+4lw.commands.whitelist=stat,ruok,conf,isro,cons,crst,dump,envi,srvr,srst,mntr
+
+#the servers
+{% for host in ansible_play_hosts %}
+server.{{ loop.index }}={{ host }}:2888:3888
+{% endfor %}
diff --git a/Apache Zookeeper/3.5.9/zookeeper/role/vars/main.yml b/Apache Zookeeper/3.5.9/zookeeper/role/vars/main.yml
new file mode 100644
index 0000000..e480a39
--- /dev/null
+++ b/Apache Zookeeper/3.5.9/zookeeper/role/vars/main.yml
@@ -0,0 +1,11 @@
+#镜像名称
+image_name: zookeeper
+
+#镜像版本号
+image_tag: 3.5.9
+
+#容器名称
+container_name: zookeeper
+
+#组件版本
+component_version: zookeeper-3.5.9 \ No newline at end of file
diff --git a/ArangoDB/3.6.4/arangodb/hosts b/ArangoDB/3.6.4/arangodb/hosts
new file mode 100644
index 0000000..d529102
--- /dev/null
+++ b/ArangoDB/3.6.4/arangodb/hosts
@@ -0,0 +1,2 @@
+[arangodb]
+192.168.45.102
diff --git a/ArangoDB/3.6.4/arangodb/install.yml b/ArangoDB/3.6.4/arangodb/install.yml
new file mode 100644
index 0000000..38ef73e
--- /dev/null
+++ b/ArangoDB/3.6.4/arangodb/install.yml
@@ -0,0 +1,7 @@
+- hosts: arangodb
+ remote_user: root
+ roles:
+ - role
+ vars_files:
+ - role/vars/main.yml
+
diff --git a/ArangoDB/3.6.4/arangodb/role/defaults/main.yml b/ArangoDB/3.6.4/arangodb/role/defaults/main.yml
new file mode 100644
index 0000000..29794bd
--- /dev/null
+++ b/ArangoDB/3.6.4/arangodb/role/defaults/main.yml
@@ -0,0 +1,32 @@
+#The default installation location
+deploy_dir: /data/olap
+
+#The default data storage location,use storing application data,logs and configuration files
+data_dir: /data/olap
+
+#arangodb 管理用户名
+arangodb_default_username: root
+
+#arangodb 管理用户密码
+arangodb_default_pin: galaxy_2019
+
+#arangodb 管理用户密码(加密)
+arangodb_default_pin_encrypt: PBdMaxfC3u+HMzjjij2tyuJWeooSuZNW
+
+#arangodb 只读用户名
+arangodb_query_username: query
+
+#arangodb 只读用户密码
+arangodb_query_pin: galaxy2018
+
+#arangodb 只读用户密码(加密)
+arangodb_query_pin_encrypt: qUA355VopKSx6kwwwXZwqWWEYSu76Slz
+
+#arangodb 只写用户名
+arangodb_upsert_username: upsert
+
+#arangodb 只写用户密码
+arangodb_upsert_pin: galaxy2019
+
+#arangodb 只写用户密码(加密)
+arangodb_upsert_pin_encrypt: LDEb2OekU7iZWiFw6pUYBSozVKP27r1y
diff --git a/ArangoDB/3.6.4/arangodb/role/files/init.zip b/ArangoDB/3.6.4/arangodb/role/files/init.zip
new file mode 100644
index 0000000..13259bb
--- /dev/null
+++ b/ArangoDB/3.6.4/arangodb/role/files/init.zip
Binary files differ
diff --git a/ArangoDB/3.6.4/arangodb/role/handlers/main.yml b/ArangoDB/3.6.4/arangodb/role/handlers/main.yml
new file mode 100644
index 0000000..aa0145c
--- /dev/null
+++ b/ArangoDB/3.6.4/arangodb/role/handlers/main.yml
@@ -0,0 +1,24 @@
+- name: Loading Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ load_path: '{{ deploy_dir }}/{{ container_name }}/{{ image_name }}-{{ image_tag }}.tar'
+ source: load
+ force_tag: yes
+ force_source: yes
+ timeout: 300
+
+- name: Stop Container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+- name: Start Container
+ docker_compose:
+ project_src: '{{ deploy_dir }}/{{ container_name }}/'
+
+- name: Removing Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
diff --git a/ArangoDB/3.6.4/arangodb/role/tasks/deploy.yml b/ArangoDB/3.6.4/arangodb/role/tasks/deploy.yml
new file mode 100644
index 0000000..e1f7e36
--- /dev/null
+++ b/ArangoDB/3.6.4/arangodb/role/tasks/deploy.yml
@@ -0,0 +1,43 @@
+- name: Validating ArangoDB server nodes
+ fail:
+ msg: "ArangoDB only supports single instance deployment,please checking configurations/hosts -> arangodb"
+ when: (groups.arangodb|length) != 1
+
+- name: Creating directory
+ file:
+ state: directory
+ path: '{{ deploy_dir }}/{{ container_name }}'
+
+- name: Copying ArangoDB docker image
+ copy:
+ src: 'files/{{image_name}}-{{image_tag}}.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: true
+
+- name: Loading {{ image_name }} image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ load_path: '{{ deploy_dir }}/{{ container_name }}/{{image_name}}-{{image_tag}}.tar'
+ source: load
+ force_tag: yes
+ force_source: yes
+ notify:
+ - Loading Image
+
+- name: Copying docker-compose.yml
+ template:
+ src: docker-compose.yml.j2
+ dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml'
+ backup: false
+ force: true
+
+- name: Initialize arangodb
+ unarchive:
+ src: 'files/init.zip'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ notify:
+ - Loading Image
+ - Start Container
+
+- meta: flush_handlers
diff --git a/ArangoDB/3.6.4/arangodb/role/tasks/main.yml b/ArangoDB/3.6.4/arangodb/role/tasks/main.yml
new file mode 100644
index 0000000..0d4f3b6
--- /dev/null
+++ b/ArangoDB/3.6.4/arangodb/role/tasks/main.yml
@@ -0,0 +1,10 @@
+- block:
+ - include: uninstall.yml
+ - include: deploy.yml
+ - include: status-check.yml
+ when: (operation) == "install"
+
+- block:
+ - include: uninstall.yml
+ when: (operation) == "unload"
+
diff --git a/ArangoDB/3.6.4/arangodb/role/tasks/status-check.yml b/ArangoDB/3.6.4/arangodb/role/tasks/status-check.yml
new file mode 100644
index 0000000..0f386f7
--- /dev/null
+++ b/ArangoDB/3.6.4/arangodb/role/tasks/status-check.yml
@@ -0,0 +1,17 @@
+- name: Waitting for Arangodb running,10s
+ shell: sleep 10
+
+- name: Check if the Arangodb process already exists
+ shell: ps -ef | grep -v grep | grep -w "arangod" | wc -l
+ register: process_out
+
+- name: Check if the Arangodb port already exists
+ shell: netstat -anlp | grep "8529" | grep LISTEN | wc -l
+ register: port_out
+
+- name: To terminate execution
+ fail:
+ msg: "Arangodb on node {{ inventory_hostname }} is not started. Please check"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: process_out.stdout != '1' or port_out.stdout <= '0'
diff --git a/ArangoDB/3.6.4/arangodb/role/tasks/uninstall.yml b/ArangoDB/3.6.4/arangodb/role/tasks/uninstall.yml
new file mode 100644
index 0000000..5015eb6
--- /dev/null
+++ b/ArangoDB/3.6.4/arangodb/role/tasks/uninstall.yml
@@ -0,0 +1,16 @@
+- block:
+ - name: Stopping and removing {{ container_name }} container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+ - name: Removing old {{ image_name }} image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
+
+ - name: Ansible delete old {{ deploy_dir }}/{{ container_name }}
+ file:
+ path: '{{ deploy_dir }}/{{ container_name }}'
+ state: absent
diff --git a/ArangoDB/3.6.4/arangodb/role/templates/docker-compose.yml.j2 b/ArangoDB/3.6.4/arangodb/role/templates/docker-compose.yml.j2
new file mode 100644
index 0000000..84b1791
--- /dev/null
+++ b/ArangoDB/3.6.4/arangodb/role/templates/docker-compose.yml.j2
@@ -0,0 +1,17 @@
+version: '2'
+
+services:
+ arangodb:
+ image: {{ image_name }}:{{ image_tag }}
+ restart: always
+ container_name: {{ container_name }}
+ environment:
+ ARANGO_ROOT_PASSWORD: "{{ arangodb_default_pin }}"
+ ports:
+ - "8529:8529"
+ volumes:
+ - "{{ deploy_dir }}/{{ container_name }}/conf:/etc/arangodb3"
+ - "{{ deploy_dir }}/{{ container_name }}/data:/var/lib/arangodb3"
+ - "{{ deploy_dir }}/{{ container_name }}/log:/var/log/arangodb3"
+ restart: always
+ network_mode: "host"
diff --git a/ArangoDB/3.6.4/arangodb/role/vars/main.yml b/ArangoDB/3.6.4/arangodb/role/vars/main.yml
new file mode 100644
index 0000000..944b10a
--- /dev/null
+++ b/ArangoDB/3.6.4/arangodb/role/vars/main.yml
@@ -0,0 +1,8 @@
+#镜像名称
+image_name: arangodb
+
+#镜像版本号
+image_tag: 3.6.4
+
+#容器名称
+container_name: arangodb
diff --git a/CMAK/3.0.0.6/cmak/hosts b/CMAK/3.0.0.6/cmak/hosts
new file mode 100644
index 0000000..8004593
--- /dev/null
+++ b/CMAK/3.0.0.6/cmak/hosts
@@ -0,0 +1,5 @@
+[zookeeper]
+192.168.45.102
+
+[kafka]
+192.168.45.102
diff --git a/CMAK/3.0.0.6/cmak/install.yml b/CMAK/3.0.0.6/cmak/install.yml
new file mode 100644
index 0000000..32356ac
--- /dev/null
+++ b/CMAK/3.0.0.6/cmak/install.yml
@@ -0,0 +1,7 @@
+- hosts: kafka
+ remote_user: root
+ roles:
+ - role
+ vars_files:
+ - role/vars/main.yml
+
diff --git a/CMAK/3.0.0.6/cmak/role/defaults/main.yml b/CMAK/3.0.0.6/cmak/role/defaults/main.yml
new file mode 100644
index 0000000..d636519
--- /dev/null
+++ b/CMAK/3.0.0.6/cmak/role/defaults/main.yml
@@ -0,0 +1,6 @@
+#The default installation location
+deploy_dir: /data/olap
+
+#The default data storage location,use storing application data,logs and configuration files
+data_dir: /data/olap
+
diff --git a/CMAK/3.0.0.6/cmak/role/files/kafka_client_jaas.conf b/CMAK/3.0.0.6/cmak/role/files/kafka_client_jaas.conf
new file mode 100644
index 0000000..5f8cde7
--- /dev/null
+++ b/CMAK/3.0.0.6/cmak/role/files/kafka_client_jaas.conf
@@ -0,0 +1,5 @@
+KafkaClient {
+ org.apache.kafka.common.security.plain.PlainLoginModule required
+ username="admin"
+ password="galaxy2019";
+};
diff --git a/CMAK/3.0.0.6/cmak/role/handlers/main.yml b/CMAK/3.0.0.6/cmak/role/handlers/main.yml
new file mode 100644
index 0000000..aa0145c
--- /dev/null
+++ b/CMAK/3.0.0.6/cmak/role/handlers/main.yml
@@ -0,0 +1,24 @@
+- name: Loading Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ load_path: '{{ deploy_dir }}/{{ container_name }}/{{ image_name }}-{{ image_tag }}.tar'
+ source: load
+ force_tag: yes
+ force_source: yes
+ timeout: 300
+
+- name: Stop Container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+- name: Start Container
+ docker_compose:
+ project_src: '{{ deploy_dir }}/{{ container_name }}/'
+
+- name: Removing Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
diff --git a/CMAK/3.0.0.6/cmak/role/tasks/deploy.yml b/CMAK/3.0.0.6/cmak/role/tasks/deploy.yml
new file mode 100644
index 0000000..8a7a4a8
--- /dev/null
+++ b/CMAK/3.0.0.6/cmak/role/tasks/deploy.yml
@@ -0,0 +1,34 @@
+- name: Creating directory
+ file:
+ state: directory
+ path: '{{ deploy_dir }}/{{ container_name }}/conf'
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
+ copy:
+ src: 'files/kafka_client_jaas.conf'
+ dest: '{{ deploy_dir }}/{{ container_name }}/conf/kafka_client_jaas.conf'
+ force: true
+
+- name: Create CMAK in zookeeper nodes
+ shell: docker exec zookeeper zkCli.sh create /kafka-manager "" && docker exec zookeeper zkCli.sh create /kafka-manager/mutex "" && docker exec zookeeper zkCli.sh create /kafka-manager/mutex/locks "" && docker exec zookeeper zkCli.sh create /kafka-manager/mutex/leases ""
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
+ copy:
+ src: 'files/{{ image_name }}-{{ image_tag }}.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: true
+ notify:
+ - Loading Image
+
+- name: Copying Kafka config files
+ template:
+ src: 'docker-compose.yml.j2'
+ dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml'
+ mode: 0644
+ notify:
+ - Loading Image
+ - Start Container
+
+- meta: flush_handlers
diff --git a/CMAK/3.0.0.6/cmak/role/tasks/main.yml b/CMAK/3.0.0.6/cmak/role/tasks/main.yml
new file mode 100644
index 0000000..4725441
--- /dev/null
+++ b/CMAK/3.0.0.6/cmak/role/tasks/main.yml
@@ -0,0 +1,9 @@
+- block:
+ - include: unload.yml
+ - include: deploy.yml
+ - include: status-check.yml
+ when: (operation) == "install"
+
+- block:
+ - include: unload.yml
+ when: (operation) == "uninstall"
diff --git a/CMAK/3.0.0.6/cmak/role/tasks/status-check.yml b/CMAK/3.0.0.6/cmak/role/tasks/status-check.yml
new file mode 100644
index 0000000..22b3bcf
--- /dev/null
+++ b/CMAK/3.0.0.6/cmak/role/tasks/status-check.yml
@@ -0,0 +1,13 @@
+- name: Waitting for cmak running,30s
+ shell: sleep 30
+
+- name: Check if the Kafka already exists
+ shell: netstat -anlp | grep "9998" | grep java | grep LISTEN | wc -l
+ register: port_out
+
+- name: To terminate execution
+ fail:
+ msg: "CMAK on node {{ inventory_hostname }} is not started. Please check"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: port_out.stdout != '1'
diff --git a/CMAK/3.0.0.6/cmak/role/tasks/unload.yml b/CMAK/3.0.0.6/cmak/role/tasks/unload.yml
new file mode 100644
index 0000000..894fb5e
--- /dev/null
+++ b/CMAK/3.0.0.6/cmak/role/tasks/unload.yml
@@ -0,0 +1,28 @@
+- block:
+ - name: Stopping and removing {{ container_name }} container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+ - name: Removing old {{ image_name }} image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
+
+ - name: Ansible delete old {{ deploy_dir }}/{{ container_name }}
+ file:
+ path: '{{ deploy_dir }}/{{ container_name }}'
+ state: absent
+
+ - name: Checking ZooKeeper has Kafka nodes
+ shell: "docker exec -it zookeeper zkCli.sh ls / | grep kafka-manager | wc -l"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ register: has_zknode
+
+ - name: Delete Kafka nodes in ZooKeeper
+ shell: "docker exec -it zookeeper zkCli.sh rmr /kafka-manager"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ when: has_zknode.stdout >= '1'
diff --git a/CMAK/3.0.0.6/cmak/role/templates/docker-compose.yml.j2 b/CMAK/3.0.0.6/cmak/role/templates/docker-compose.yml.j2
new file mode 100644
index 0000000..0f0724b
--- /dev/null
+++ b/CMAK/3.0.0.6/cmak/role/templates/docker-compose.yml.j2
@@ -0,0 +1,24 @@
+version: '3.6'
+
+services:
+ cmak:
+ image: {{ image_name }}:{{ image_tag }}
+ container_name: {{ container_name }}
+ restart: always
+ command:
+ - "-Dhttp.port=9998"
+ - "-Dcmak.zkhosts={% for dev_info in groups.zookeeper -%}
+{% if loop.last -%}
+{{dev_info}}:2181
+{%- else %}
+{{dev_info}}:2181,
+{%- endif %}
+{%- endfor %}"
+ - "-DbasicAuthentication.enabled=true"
+ - "-DbasicAuthentication.username={{ cmak_default_username }}"
+ - "-DbasicAuthentication.password={{ cmak_default_pin }}"
+ - "-Djava.security.auth.login.config=/cmak/conf/kafka_client_jaas.conf"
+ volumes:
+ - "{{ deploy_dir }}/{{ container_name }}/conf/kafka_client_jaas.conf:/cmak/conf/kafka_client_jaas.conf"
+ - "{{ deploy_dir }}/{{ container_name }}/logs:/cmak/logs"
+ network_mode: "host"
diff --git a/CMAK/3.0.0.6/cmak/role/vars/main.yml b/CMAK/3.0.0.6/cmak/role/vars/main.yml
new file mode 100644
index 0000000..bf2482b
--- /dev/null
+++ b/CMAK/3.0.0.6/cmak/role/vars/main.yml
@@ -0,0 +1,9 @@
+#镜像名称
+image_name: cmak
+
+#镜像版本号
+image_tag: 3.0.0.6
+
+#容器名称
+container_name: cmak
+
diff --git a/Chproxy/21.06.30/chproxy/hosts b/Chproxy/21.06.30/chproxy/hosts
new file mode 100644
index 0000000..112e187
--- /dev/null
+++ b/Chproxy/21.06.30/chproxy/hosts
@@ -0,0 +1,5 @@
+[clickhouse]
+192.168.45.102
+
+[chproxy]
+192.168.45.102
diff --git a/Chproxy/21.06.30/chproxy/install.yml b/Chproxy/21.06.30/chproxy/install.yml
new file mode 100644
index 0000000..70f84c2
--- /dev/null
+++ b/Chproxy/21.06.30/chproxy/install.yml
@@ -0,0 +1,7 @@
+- hosts: chproxy
+ remote_user: root
+ roles:
+ - role
+ vars_files:
+ - role/vars/main.yml
+
diff --git a/Chproxy/21.06.30/chproxy/role/defaults/main.yml b/Chproxy/21.06.30/chproxy/role/defaults/main.yml
new file mode 100644
index 0000000..d636519
--- /dev/null
+++ b/Chproxy/21.06.30/chproxy/role/defaults/main.yml
@@ -0,0 +1,6 @@
+#The default installation location
+deploy_dir: /data/olap
+
+#The default data storage location,use storing application data,logs and configuration files
+data_dir: /data/olap
+
diff --git a/Chproxy/21.06.30/chproxy/role/handlers/main.yml b/Chproxy/21.06.30/chproxy/role/handlers/main.yml
new file mode 100644
index 0000000..aa0145c
--- /dev/null
+++ b/Chproxy/21.06.30/chproxy/role/handlers/main.yml
@@ -0,0 +1,24 @@
+- name: Loading Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ load_path: '{{ deploy_dir }}/{{ container_name }}/{{ image_name }}-{{ image_tag }}.tar'
+ source: load
+ force_tag: yes
+ force_source: yes
+ timeout: 300
+
+- name: Stop Container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+- name: Start Container
+ docker_compose:
+ project_src: '{{ deploy_dir }}/{{ container_name }}/'
+
+- name: Removing Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
diff --git a/Chproxy/21.06.30/chproxy/role/tasks/deploy.yml b/Chproxy/21.06.30/chproxy/role/tasks/deploy.yml
new file mode 100644
index 0000000..ce1cd6b
--- /dev/null
+++ b/Chproxy/21.06.30/chproxy/role/tasks/deploy.yml
@@ -0,0 +1,31 @@
+- name: Creating directory
+ file:
+ state: directory
+ path: '{{ deploy_dir }}/{{ container_name }}/config'
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
+ copy:
+ src: 'files/{{ image_name }}-{{ image_tag }}.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: true
+ notify:
+ - Loading Image
+
+- name: Copying Chproxy config files
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: '{{ item.mode }}'
+ with_items:
+ - { src: 'config.yml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/config/config.yml', mode: "0644" }
+
+- name: Copying Chproxy docker-compose.yml
+ template:
+ src: 'docker-compose.yml.j2'
+ dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml'
+ mode: 0644
+ notify:
+ - Loading Image
+ - Start Container
+
+- meta: flush_handlers
diff --git a/Chproxy/21.06.30/chproxy/role/tasks/main.yml b/Chproxy/21.06.30/chproxy/role/tasks/main.yml
new file mode 100644
index 0000000..0d4f3b6
--- /dev/null
+++ b/Chproxy/21.06.30/chproxy/role/tasks/main.yml
@@ -0,0 +1,10 @@
+- block:
+ - include: uninstall.yml
+ - include: deploy.yml
+ - include: status-check.yml
+ when: (operation) == "install"
+
+- block:
+ - include: uninstall.yml
+ when: (operation) == "unload"
+
diff --git a/Chproxy/21.06.30/chproxy/role/tasks/status-check.yml b/Chproxy/21.06.30/chproxy/role/tasks/status-check.yml
new file mode 100644
index 0000000..f1b12e9
--- /dev/null
+++ b/Chproxy/21.06.30/chproxy/role/tasks/status-check.yml
@@ -0,0 +1,17 @@
+- name: Waitting for Chproxy running,10s
+ shell: sleep 10
+
+- name: Check if the Chproxy process already exists
+ shell: docker ps -a | grep -w "galaxy-chproxy" | wc -l
+ register: process_out
+
+- name: Check if the Chproxy port already exists
+ shell: netstat -anlp | grep "8124" | grep LISTEN | wc -l
+ register: port_out
+
+- name: To terminate execution
+ fail:
+ msg: "Chproxy on node {{ inventory_hostname }} is not started. Please check"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: process_out.stdout != '1' or port_out.stdout != '1'
diff --git a/Chproxy/21.06.30/chproxy/role/tasks/uninstall.yml b/Chproxy/21.06.30/chproxy/role/tasks/uninstall.yml
new file mode 100644
index 0000000..5015eb6
--- /dev/null
+++ b/Chproxy/21.06.30/chproxy/role/tasks/uninstall.yml
@@ -0,0 +1,16 @@
+- block:
+ - name: Stopping and removing {{ container_name }} container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+ - name: Removing old {{ image_name }} image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
+
+ - name: Ansible delete old {{ deploy_dir }}/{{ container_name }}
+ file:
+ path: '{{ deploy_dir }}/{{ container_name }}'
+ state: absent
diff --git a/Chproxy/21.06.30/chproxy/role/templates/config.yml.j2 b/Chproxy/21.06.30/chproxy/role/templates/config.yml.j2
new file mode 100644
index 0000000..8d3bf4a
--- /dev/null
+++ b/Chproxy/21.06.30/chproxy/role/templates/config.yml.j2
@@ -0,0 +1,58 @@
+log_debug: false
+hack_me_please: true
+server:
+ http:
+ listen_addr: ":8124"
+ read_timeout: 6h
+ idle_timeout: 10m
+users:
+ - name: "tsg_report"
+ to_cluster: "report"
+ to_user: "tsg_report"
+ password: "{{ clickhouse_default_pin }}"
+ max_queue_size: 100
+ max_queue_time: 21700s
+
+ - name: "tsg_query"
+ to_cluster: "query"
+ to_user: "tsg_query"
+ password: "{{ clickhouse_query_pin }}"
+ max_queue_size: 100
+ max_queue_time: 610s
+
+ - name: "default"
+ to_cluster: "report"
+ to_user: "default"
+ password: "{{ clickhouse_default_pin }}"
+ max_queue_size: 100
+ max_queue_time: 3610s
+# by default each cluster has `default` user which can be overridden by section `users`
+clusters:
+ - name: "report"
+{% if groups.clickhouse|length >= 3 %}
+ nodes: [ "{{groups.clickhouse[0]}}:8123", "{{groups.clickhouse[1]}}:8123" ]
+ masternode: 1
+{% elif groups.clickhouse|length == 1 %}
+ nodes: [ "{{groups.clickhouse[0]}}:8123" ]
+{% endif %}
+ kill_query_user:
+ name: "default"
+ password: "{{ clickhouse_default_pin }}"
+ users:
+ - name: "default"
+ password: "{{ clickhouse_default_pin }}"
+ - name: "tsg_report"
+ password: "{{ clickhouse_default_pin }}"
+ - name: "query"
+{% if groups.clickhouse|length >= 3 %}
+ nodes: [ "{{groups.clickhouse[1]}}:8123", "{{groups.clickhouse[0]}}:8123" ]
+ masternode: 1
+{% elif groups.clickhouse|length == 1 %}
+ nodes: [ "{{groups.clickhouse[0]}}:8123" ]
+{% endif %}
+ kill_query_user:
+ name: "default"
+ password: "galaxy2019"
+ users:
+ - name: "tsg_query"
+ password: "{{ clickhouse_query_pin }}"
diff --git a/Chproxy/21.06.30/chproxy/role/templates/docker-compose.yml.j2 b/Chproxy/21.06.30/chproxy/role/templates/docker-compose.yml.j2
new file mode 100644
index 0000000..bd32576
--- /dev/null
+++ b/Chproxy/21.06.30/chproxy/role/templates/docker-compose.yml.j2
@@ -0,0 +1,18 @@
+version: '3'
+
+services:
+ chproxy:
+ image: {{ image_name }}:{{ image_tag }}
+ container_name: {{ container_name }}
+ ports:
+ - "8124:8124"
+ volumes:
+ - "{{ deploy_dir }}/{{ container_name }}/config:/home/config"
+ - "{{ deploy_dir }}/{{ container_name }}/log:/home/log"
+ restart: always
+ networks:
+ olap:
+ ipv4_address: 172.20.88.9
+networks:
+ olap:
+ external: true
diff --git a/Chproxy/21.06.30/chproxy/role/vars/main.yml b/Chproxy/21.06.30/chproxy/role/vars/main.yml
new file mode 100644
index 0000000..0d1786c
--- /dev/null
+++ b/Chproxy/21.06.30/chproxy/role/vars/main.yml
@@ -0,0 +1,8 @@
+#镜像名称
+image_name: chproxy
+
+#镜像版本号
+image_tag: 21.06.30
+
+#容器名称
+container_name: galaxy-chproxy
diff --git a/Clickhouse/21.8.13.1/clickhouse/hosts b/Clickhouse/21.8.13.1/clickhouse/hosts
new file mode 100644
index 0000000..499fccd
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/hosts
@@ -0,0 +1,5 @@
+[zookeeper]
+192.168.45.102
+
+[clickhouse]
+192.168.45.102
diff --git a/Clickhouse/21.8.13.1/clickhouse/install.yml b/Clickhouse/21.8.13.1/clickhouse/install.yml
new file mode 100644
index 0000000..5e5a764
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/install.yml
@@ -0,0 +1,7 @@
+- hosts: clickhouse
+ remote_user: root
+ roles:
+ - role
+ vars_files:
+ - role/vars/main.yml
+
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/defaults/main.yml b/Clickhouse/21.8.13.1/clickhouse/role/defaults/main.yml
new file mode 100644
index 0000000..91e512d
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/defaults/main.yml
@@ -0,0 +1,12 @@
+#The default installation location
+deploy_dir: /data/olap
+
+#The default data storage location,use storing application data,logs and configuration files
+data_dir: /data/olap
+
+clickhouse:
+ #Limit on total memory usage. Zero means Unlimited.
+ max_server_memory_usage: 30000000000
+ #Sets the number of threads performing background merges and mutations for tables with MergeTree engines.
+ background_pool_size: 16
+
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/handlers/main.yml b/Clickhouse/21.8.13.1/clickhouse/role/handlers/main.yml
new file mode 100644
index 0000000..5f188f1
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/handlers/main.yml
@@ -0,0 +1,38 @@
+- name: Loading Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ load_path: '{{ deploy_dir }}/{{ container_name }}/{{ image_name }}-{{ image_tag }}.tar'
+ source: load
+ force_tag: yes
+ force_source: yes
+ timeout: 300
+
+- name: Stop Container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+- name: Start Container
+ docker_compose:
+ project_src: '{{ deploy_dir }}/{{ container_name }}/'
+
+- name: Removing Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
+
+- name: Loading Exporter Image
+ docker_image:
+ name: 'clickhouse_exporter'
+ tag: 'v2.0'
+ load_path: '{{ deploy_dir }}/clickhouse/monitor/clickhouse_exporter-2.0.tar'
+ source: load
+ force_tag: yes
+ force_source: yes
+ timeout: 300
+
+- name: Start Exporter Container
+ docker_compose:
+ project_src: '{{ deploy_dir }}/clickhouse/monitor/'
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/tasks/cluster/deploy.yml b/Clickhouse/21.8.13.1/clickhouse/role/tasks/cluster/deploy.yml
new file mode 100644
index 0000000..5b09396
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/tasks/cluster/deploy.yml
@@ -0,0 +1,132 @@
+- block:
+ - name: Checking system requirements,if current CPU has support for SSE 4.2
+ shell: lscpu |grep sse4_2 | wc -l
+ register: check_cpu_out
+
+ - name: Checking system requirements result
+ fail:
+ msg: "Running ClickHouse on processors that do not support SSE 4.2 or have AArch64 or PowerPC64LE architecture."
+ when: check_cpu_out.stdout < '1'
+
+ - name: Check the Zookeeper status
+ shell: netstat -anlp | egrep "2181" | grep LISTEN | wc -l
+ register: port_out
+ delegate_to: '{{ groups.zookeeper[0] }}'
+
+ - name: To terminate execution
+ fail:
+ msg: "Port 2181 of the zookeeper node is not monitored. The status may be abnormal"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: port_out.stdout != '1'
+
+
+- name: Getting readonly key sha256
+ shell: echo -n "{{ clickhouse_query_pin }}"|sha256sum | tr -d '-' | sed -e 's/^[ ]*//g' | sed -e 's/[ ]*$//g'
+ register: readonly_key_out
+
+- name: Setting readonly_key_sha variable
+ set_fact: readonly_key_sha="{{readonly_key_out.stdout}}"
+
+- name: Getting root key sha256
+ shell: echo -n "{{ clickhouse_default_pin }}"|sha256sum| tr -d '-' | sed -e 's/^[ ]*//g' | sed -e 's/[ ]*$//g'
+ register: root_key_out
+
+- name: Setting root_key_sha variable
+ set_fact: root_key_sha="{{root_key_out.stdout}}"
+
+- name: Creating ClickHouse install path
+ file:
+ state: directory
+ path: '{{ deploy_dir }}/{{ item.path }}'
+ with_items:
+ - { path: 'clickhouse/clickhouse-server' }
+ - { path: 'clickhouse/rpm' }
+ - { path: 'clickhouse/logs' }
+ - { path: 'clickhouse/monitor' }
+
+- name: Copying ClickHouse config files
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: 0755
+ force: true
+ with_items:
+ - { src: 'clickhouse-server.j2', dest: '{{ deploy_dir }}/clickhouse/clickhouse-server/clickhouse-server' }
+ - { src: 'clickhouse-server.j2', dest: '/etc/init.d/clickhouse-server' }
+ - { src: 'ck_monitor.sh.j2', dest: '{{ deploy_dir }}/clickhouse/monitor/ck_monitor.sh' }
+
+- name: Copying ClickHouse configuration files to {{ deploy_dir }}/clickhouse/clickhouse-server/
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ force: true
+ with_items:
+ - { src: 'config.xml.j2', dest: '{{ deploy_dir }}/clickhouse/clickhouse-server/config.xml' }
+ - { src: 'users.xml.j2', dest: '{{ deploy_dir }}/clickhouse/clickhouse-server/users.xml' }
+ - { src: 'docker-compose_exporter.yml.j2', dest: '{{ deploy_dir }}/clickhouse/monitor/docker-compose.yml' }
+
+- name: Copying ClickHouse config files
+ template:
+ src: 'metrika_query.xml.j2'
+ dest: '{{ deploy_dir }}/clickhouse/clickhouse-server/metrika.xml'
+ force: true
+ when: inventory_hostname in groups['clickhouse'][:2]
+
+- name: Copying ClickHouse config files
+ template:
+ src: 'metrika_data.xml.j2'
+ dest: '{{ deploy_dir }}/clickhouse/clickhouse-server/metrika.xml'
+ force: true
+ when: inventory_hostname not in groups['clickhouse'][:2]
+
+- name: Modify clickhouse process limits
+ shell: if [ `cat /etc/security/limits.d/20-nproc.conf | grep clickhouse | wc -l` -eq "0" ];then echo "clickhouse soft nproc 65535" >> /etc/security/limits.d/20-nproc.conf ;fi
+
+#复制tar到目标服务器
+- name: Copying clickhouse rpm files
+ copy:
+ src: 'files/{{ item.file }}'
+ dest: '{{ deploy_dir }}/clickhouse/rpm/'
+ force: true
+ with_items:
+ - { file: 'clickhouse-client-21.8.13.1.altinitystable-2.noarch.rpm' }
+ - { file: 'clickhouse-common-static-21.8.13.1.altinitystable-2.x86_64.rpm' }
+ - { file: 'clickhouse-server-21.8.13.1.altinitystable-2.noarch.rpm' }
+
+
+- name: Installing Data nodes Clickhouse
+ shell: "if [ `rpm -qa | grep {{ item.file }} | wc -l` -eq '0' ]; then rpm -ivh --nodeps {{ deploy_dir }}/clickhouse/rpm/{{ item.file }} ; fi"
+ with_items:
+ - { file: 'clickhouse-client-21.8.13.1.altinitystable-2.noarch.rpm' }
+ - { file: 'clickhouse-common-static-21.8.13.1.altinitystable-2.x86_64.rpm' }
+ - { file: 'clickhouse-server-21.8.13.1.altinitystable-2.noarch.rpm' }
+ ignore_errors: true
+
+- name: Ansible delete new version clickhouse start shell
+ file:
+ path: "{{ item.filename }}"
+ state: absent
+ with_items:
+ - { filename: '/etc/systemd/system/clickhouse-server.service' }
+ - { filename: '/usr/lib/systemd/system/clickhouse-server.service' }
+ - { filename: '/etc/clickhouse-server' }
+
+- name: enable clickhouse bootstrap
+ shell: chkconfig --add clickhouse-server && chkconfig clickhouse-server on && service clickhouse-server start
+
+- name: Copying clickhouse_exporter-2.0.tar
+ copy:
+ src: 'files/clickhouse_exporter-2.0.tar'
+ dest: '{{ deploy_dir }}/clickhouse/monitor/'
+ force: true
+ notify:
+ - Loading Exporter Image
+ - Start Exporter Container
+
+- name: Adding a cron -> Check Clickhouse up to node_exporter
+ cron:
+ name: 'Check Clickhouse up to node_exporter'
+ minute: "*/5"
+ job: '{{ deploy_dir }}/clickhouse/monitor/ck_monitor.sh'
+ user: root
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/tasks/cluster/status-check.yml b/Clickhouse/21.8.13.1/clickhouse/role/tasks/cluster/status-check.yml
new file mode 100644
index 0000000..62dc352
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/tasks/cluster/status-check.yml
@@ -0,0 +1,14 @@
+- name: Waitting for Clickhouse running,30s
+ shell: sleep 30
+
+- name: Check the Clickhouse service status
+ shell: clickhouse-client -h {{ inventory_hostname }} --port 9001 -m -u default --password {{ clickhouse_default_pin }} --query "SELECT version();" | grep "21.8.13.1.altinitystable" | wc -l
+ register: check_mode
+
+- name: To terminate execution
+ fail:
+ msg: "检测到 {{ inventory_hostname }} 节点Clickhouse未正常启动;请保留日志反馈,路径:{{ deploy_dir }}/clickhouse/logs"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: check_mode.stdout != '1'
+
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/tasks/cluster/uninstall.yml b/Clickhouse/21.8.13.1/clickhouse/role/tasks/cluster/uninstall.yml
new file mode 100644
index 0000000..faddb70
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/tasks/cluster/uninstall.yml
@@ -0,0 +1,49 @@
+- block:
+ - name: Stopping and removing exporter container
+ docker_container:
+ name: 'clickhouse_exporter'
+ state: absent
+
+ - name: Removing old exporter image
+ docker_image:
+ name: 'clickhouse_exporter'
+ tag: 'v2.0'
+ state: absent
+
+ - name: Copying unload_ck.sh to {{ deploy_dir }}/
+ template:
+ src: 'unload_ck.sh.j2'
+ dest: '{{ deploy_dir }}/unload_ck.sh'
+ force: true
+ mode: 0755
+
+ - name: Uninstalling ClickHouse
+ shell: cd {{ deploy_dir }} && sh unload_ck.sh
+
+ - name: Ansible delete {{ deploy_dir }}/unload_ck.sh
+ file:
+ path: "{{ deploy_dir }}/unload_ck.sh"
+ state: absent
+
+ - name: Checking ZooKeeper has Clickhouse nodes
+ shell: "docker exec zookeeper zkCli.sh ls / | grep clickhouse | wc -l"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ register: has_zknode
+
+ - name: Delete Clickhouse nodes in ZooKeeper
+ shell: "docker exec zookeeper zkCli.sh rmr /clickhouse"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ when: has_zknode.stdout >= '1'
+
+ - name: Checking if the Clickhouse service already exists
+ shell: rpm -qa | grep clickhouse | wc -l
+ register: check_out
+
+ - name: To terminate execution
+ fail:
+ msg: "Uninstalling ClickHouse fails.Please uninstall manually with yum remove"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: check_out.stdout >= '1'
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/tasks/main.yml b/Clickhouse/21.8.13.1/clickhouse/role/tasks/main.yml
new file mode 100644
index 0000000..7ee1cde
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/tasks/main.yml
@@ -0,0 +1,19 @@
+- block:
+ - include: cluster/uninstall.yml
+ - include: cluster/deploy.yml
+ - include: cluster/status-check.yml
+ when: (operation) == "install" and (groups.clickhouse|length) > 1
+
+- block:
+ - include: cluster/uninstall.yml
+ when: (operation) == "uninstall" and (groups.clickhouse|length) > 1
+
+- block:
+ - include: standalone/uninstall.yml
+ - include: standalone/deploy.yml
+ - include: standalone/status-check.yml
+ when: (operation) == "install" and (groups.clickhouse|length) == 1
+
+- block:
+ - include: standalone/uninstall.yml
+ when: (operation) == "uninstall" and (groups.clickhouse|length) == 1
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/tasks/standalone/deploy.yml b/Clickhouse/21.8.13.1/clickhouse/role/tasks/standalone/deploy.yml
new file mode 100644
index 0000000..53cfec8
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/tasks/standalone/deploy.yml
@@ -0,0 +1,89 @@
+- block:
+ - name: Check the Zookeeper status
+ shell: netstat -anlp | egrep "2181" | grep LISTEN | wc -l
+ register: port_out
+ delegate_to: "{{ groups.zookeeper[0] }}"
+
+ - name: To terminate execution
+ fail:
+ msg: "Port 2181 of the zookeeper node is not monitored. The status may be abnormal"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: port_out.stdout != '1'
+
+- name: Getting readonly key sha256
+ shell: echo -n "{{ clickhouse_query_pin }}"|sha256sum | tr -d '-' | sed -e 's/^[ ]*//g' | sed -e 's/[ ]*$//g'
+ register: readonly_key_out
+
+- name: Setting readonly_key_sha variable
+ set_fact: readonly_key_sha="{{readonly_key_out.stdout}}"
+
+- name: Getting root key sha256
+ shell: echo -n "{{ clickhouse_default_pin }}"|sha256sum| tr -d '-' | sed -e 's/^[ ]*//g' | sed -e 's/[ ]*$//g'
+ register: root_key_out
+
+- name: Setting root_key_sha variable
+ set_fact: root_key_sha="{{root_key_out.stdout}}"
+
+- name: Modify clickhouse process limits
+ shell: if [ `cat /etc/security/limits.d/20-nproc.conf | grep clickhouse | wc -l` -eq "0" ];then echo "clickhouse soft nproc 65535" >> /etc/security/limits.d/20-nproc.conf ;fi
+
+- name: Creating ClickHouse install path
+ file:
+ state: directory
+ path: '{{ deploy_dir }}/{{ container_name }}/{{ item.path }}'
+ with_items:
+ - { path: 'clickhouse-server' }
+ - { path: 'logs' }
+ - { path: 'monitor' }
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
+ copy:
+ src: 'files/{{ image_name }}-{{ image_tag }}.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: true
+ notify:
+ - Loading Image
+
+- name: Copying ClickHouse configuration files
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ force: true
+ with_items:
+ - { src: 'standalone/config.xml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/clickhouse-server/config.xml' }
+ - { src: 'users.xml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/clickhouse-server/users.xml' }
+ - { src: 'standalone/metrika_standalone.xml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/clickhouse-server/metrika.xml' }
+ - { src: 'standalone/docker-compose.yml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml' }
+ - { src: 'docker-compose_exporter.yml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/monitor/docker-compose.yml' }
+ notify:
+ - Start Container
+
+- name: Copying clickhouse_exporter-2.0.tar
+ copy:
+ src: 'files/clickhouse_exporter-2.0.tar'
+ dest: '{{ deploy_dir }}/clickhouse/monitor/'
+ force: true
+ notify:
+ - Loading Exporter Image
+
+- name: Copying ClickHouse monitor files
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: '{{ item.mode }}'
+ force: true
+ with_items:
+ - { src: 'standalone/ck_monitor.sh.j2', dest: '{{ deploy_dir }}/clickhouse/monitor/ck_monitor.sh', mode: '0755' }
+ - { src: 'docker-compose_exporter.yml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/monitor/docker-compose.yml', mode: '0644' }
+ notify:
+ - Start Exporter Container
+
+- name: Adding a cron -> Check Clickhouse up to node_exporter
+ cron:
+ name: 'Check Clickhouse up to node_exporter'
+ minute: "*/5"
+ job: '{{ deploy_dir }}/clickhouse/monitor/ck_monitor.sh'
+ user: root
+
+- meta: flush_handlers
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/tasks/standalone/status-check.yml b/Clickhouse/21.8.13.1/clickhouse/role/tasks/standalone/status-check.yml
new file mode 100644
index 0000000..bdd462f
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/tasks/standalone/status-check.yml
@@ -0,0 +1,14 @@
+- name: Waitting for Clickhouse running,30s
+ shell: sleep 30
+
+- name: Check the Clickhouse service status
+ shell: docker exec -it clickhouse clickhouse-client -h {{ inventory_hostname }} --port 9001 -m -u default --password {{ clickhouse_default_pin }} --query "SELECT version();" | grep "21.8.13.1.altinitystable" | wc -l
+ register: check_mode
+
+- name: To terminate execution
+ fail:
+ msg: "检测到 {{ inventory_hostname }} 节点Clickhouse未正常启动;请保留日志反馈,路径:{{ deploy_dir }}/clickhouse/logs"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: check_mode.stdout != '1'
+
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/tasks/standalone/uninstall.yml b/Clickhouse/21.8.13.1/clickhouse/role/tasks/standalone/uninstall.yml
new file mode 100644
index 0000000..1b3fb5f
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/tasks/standalone/uninstall.yml
@@ -0,0 +1,50 @@
+- block:
+ - name: Stopping and removing {{ container_name }} container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+ - name: Removing old {{ image_name }} image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
+
+ - name: Stopping and removing exporter container
+ docker_container:
+ name: 'clickhouse_exporter'
+ state: absent
+
+ - name: Removing old exporter image
+ docker_image:
+ name: 'clickhouse_exporter'
+ tag: 'v2.0'
+ state: absent
+
+ - name: Ansible delete old {{ deploy_dir }}/{{ container_name }}
+ file:
+ path: '{{ deploy_dir }}/{{ container_name }}'
+ state: absent
+
+ - name: Checking ZooKeeper has Clickhouse nodes
+ shell: "docker exec zookeeper zkCli.sh ls / | grep clickhouse | wc -l"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ register: has_zknode
+
+ - name: Delete Clickhouse nodes in ZooKeeper
+ shell: "docker exec zookeeper zkCli.sh rmr /clickhouse"
+ run_once: true
+ delegate_to: "{{ groups.zookeeper[0] }}"
+ when: has_zknode.stdout >= '1'
+
+ - name: Checking if the Clickhouse service already exists
+ shell: rpm -qa | grep clickhouse | wc -l
+ register: check_out
+
+ - name: To terminate execution
+ fail:
+ msg: "Uninstalling ClickHouse fails.Please uninstall manually with yum remove"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: check_out.stdout >= '1'
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/templates/ck_monitor.sh.j2 b/Clickhouse/21.8.13.1/clickhouse/role/templates/ck_monitor.sh.j2
new file mode 100644
index 0000000..9f0d532
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/templates/ck_monitor.sh.j2
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+CK_USER="default"
+CK_PIN="{{ clickhouse_default_pin }}"
+CK_MONITOR_PROM_FILE="{{ deploy_dir }}/node-exporter/prom/ck_monitor.prom"
+CK_STATUS=`ps -ef |grep "clickhouse-server/config.xml" | grep -v grep | wc -l`
+
+if [ $CK_STATUS -eq "1" ];then
+echo "clickhouse_up 1" > $CK_MONITOR_PROM_FILE
+else
+echo "clickhouse_up 0" > $CK_MONITOR_PROM_FILE
+fi
+
+current1=`date "+%Y-%m-%d %H:%M:%S"`
+startDate=`date -d "${current1}" +%s`
+pcount=` clickhouse-client -h 127.0.0.1 --port 9001 -m -u $CK_USER --password $CK_PIN --max_execution_time=100 --query="select count(*) from system.processes" `
+
+current2=`date "+%Y-%m-%d %H:%M:%S"`
+endDate=`date -d "${current2}" +%s`
+diff=`expr $endDate - $startDate`
+
+mcount=` clickhouse-client -h 127.0.0.1 --port 9001 -m -u $CK_USER --password $CK_PIN --max_execution_time=100 --query="select count(*) from system.merges" `
+
+
+echo ck_processes_count $pcount >> $CK_MONITOR_PROM_FILE
+echo ck_merges_count $mcount >> $CK_MONITOR_PROM_FILE
+echo ck_connect_time $diff >> $CK_MONITOR_PROM_FILE
+
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/templates/clickhouse-server.j2 b/Clickhouse/21.8.13.1/clickhouse/role/templates/clickhouse-server.j2
new file mode 100644
index 0000000..4b41673
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/templates/clickhouse-server.j2
@@ -0,0 +1,355 @@
+#!/bin/sh
+### BEGIN INIT INFO
+# Provides: clickhouse-server
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Required-Start:
+# Required-Stop:
+# Short-Description: Yandex clickhouse-server daemon
+### END INIT INFO
+
+
+CLICKHOUSE_USER=clickhouse
+CLICKHOUSE_GROUP=${CLICKHOUSE_USER}
+SHELL=/bin/bash
+PROGRAM=clickhouse-server
+GENERIC_PROGRAM=clickhouse
+EXTRACT_FROM_CONFIG=${GENERIC_PROGRAM}-extract-from-config
+SYSCONFDIR={{ deploy_dir }}/clickhouse/$PROGRAM
+CLICKHOUSE_LOGDIR={{ deploy_dir }}/clickhouse/clickhouse-server
+CLICKHOUSE_LOGDIR_USER=root
+CLICKHOUSE_DATADIR_OLD={{ deploy_dir }}/clickhouse/clickhouse_old
+LOCALSTATEDIR=/var/lock
+BINDIR=/usr/bin
+CLICKHOUSE_CRONFILE=/etc/cron.d/clickhouse-server
+CLICKHOUSE_CONFIG={{ deploy_dir }}/clickhouse/clickhouse-server/config.xml
+LOCKFILE=$LOCALSTATEDIR/$PROGRAM
+RETVAL=0
+CLICKHOUSE_PIDDIR={{ deploy_dir }}/clickhouse/$PROGRAM
+CLICKHOUSE_PIDFILE="$CLICKHOUSE_PIDDIR/$PROGRAM.pid"
+
+# Some systems lack "flock"
+command -v flock >/dev/null && FLOCK=flock
+
+
+# Override defaults from optional config file
+test -f /etc/default/clickhouse && . /etc/default/clickhouse
+
+# On x86_64, check for required instruction set.
+if uname -mpi | grep -q 'x86_64'; then
+ if ! grep -q 'sse4_2' /proc/cpuinfo; then
+ # On KVM, cpuinfo could falsely not report SSE 4.2 support, so skip the check.
+ if ! grep -q 'Common KVM processor' /proc/cpuinfo; then
+
+ # Some other VMs also report wrong flags in cpuinfo.
+ # Tricky way to test for instruction set:
+ # create temporary binary and run it;
+ # if it get caught illegal instruction signal,
+ # then required instruction set is not supported really.
+ #
+ # Generated this way:
+ # gcc -xc -Os -static -nostdlib - <<< 'void _start() { __asm__("pcmpgtq %%xmm0, %%xmm1; mov $0x3c, %%rax; xor %%rdi, %%rdi; syscall":::"memory"); }' && strip -R .note.gnu.build-id -R .comment -R .eh_frame -s ./a.out && gzip -c -9 ./a.out | base64 -w0; echo
+
+ if ! (echo -n 'H4sICAwAW1cCA2Eub3V0AKt39XFjYmRkgAEmBjsGEI+H0QHMd4CKGyCUAMUsGJiBJDNQNUiYlQEZOKDQclB9cnD9CmCSBYqJBRxQOvBpSQobGfqIAWn8FuYnPI4fsAGyPQz/87MeZtArziguKSpJTGLQK0mtKGGgGHADMSgoYH6AhTMPNHyE0NQzYuEzYzEXFr6CBPQDANAsXKTwAQAA' | base64 -d | gzip -d > /tmp/clickhouse_test_sse42 && chmod a+x /tmp/clickhouse_test_sse42 && /tmp/clickhouse_test_sse42); then
+ echo 'Warning! SSE 4.2 instruction set is not supported'
+ #exit 3
+ fi
+ fi
+ fi
+fi
+
+
+SUPPORTED_COMMANDS="{start|stop|status|restart|forcestop|forcerestart|reload|condstart|condstop|condrestart|condreload|initdb}"
+is_supported_command()
+{
+ echo "$SUPPORTED_COMMANDS" | grep -E "(\{|\|)$1(\||})" &> /dev/null
+}
+
+
+is_running()
+{
+ [ -r "$CLICKHOUSE_PIDFILE" ] && pgrep -s $(cat "$CLICKHOUSE_PIDFILE") 1> /dev/null 2> /dev/null
+}
+
+
+wait_for_done()
+{
+ while is_running; do
+ sleep 1
+ done
+}
+
+
+die()
+{
+ echo $1 >&2
+ exit 1
+}
+
+
+# Check that configuration file is Ok.
+check_config()
+{
+ if [ -x "$BINDIR/$EXTRACT_FROM_CONFIG" ]; then
+ su -s $SHELL ${CLICKHOUSE_USER} -c "$BINDIR/$EXTRACT_FROM_CONFIG --config-file=\"$CLICKHOUSE_CONFIG\" --key=path" >/dev/null || die "Configuration file ${CLICKHOUSE_CONFIG} doesn't parse successfully. Won't restart server. You may use forcerestart if you are sure.";
+ fi
+}
+
+
+initdb()
+{
+ if [ -d ${SYSCONFDIR} ]; then
+ su -s /bin/sh ${CLICKHOUSE_USER} -c "test -w ${SYSCONFDIR}" || chown ${CLICKHOUSE_USER}:${CLICKHOUSE_GROUP} ${SYSCONFDIR}
+ fi
+
+ if [ -x "$BINDIR/$EXTRACT_FROM_CONFIG" ]; then
+ CLICKHOUSE_DATADIR_FROM_CONFIG=$(su -s $SHELL ${CLICKHOUSE_USER} -c "$BINDIR/$EXTRACT_FROM_CONFIG --config-file=\"$CLICKHOUSE_CONFIG\" --key=path")
+ if [ "(" "$?" -ne "0" ")" -o "(" -z "${CLICKHOUSE_DATADIR_FROM_CONFIG}" ")" ]; then
+ die "Cannot obtain value of path from config file: ${CLICKHOUSE_CONFIG}";
+ fi
+ echo "Path to data directory in ${CLICKHOUSE_CONFIG}: ${CLICKHOUSE_DATADIR_FROM_CONFIG}"
+ else
+ CLICKHOUSE_DATADIR_FROM_CONFIG="/var/lib/clickhouse"
+ fi
+
+ if ! getent group ${CLICKHOUSE_USER} >/dev/null; then
+ echo "Can't chown to non-existing user ${CLICKHOUSE_USER}"
+ return
+ fi
+ if ! getent passwd ${CLICKHOUSE_GROUP} >/dev/null; then
+ echo "Can't chown to non-existing group ${CLICKHOUSE_GROUP}"
+ return
+ fi
+
+ if ! $(su -s $SHELL ${CLICKHOUSE_USER} -c "test -r ${CLICKHOUSE_CONFIG}"); then
+ echo "Warning! clickhouse config [${CLICKHOUSE_CONFIG}] not readable by user [${CLICKHOUSE_USER}]"
+ fi
+
+ if ! $(su -s $SHELL ${CLICKHOUSE_USER} -c "test -O \"${CLICKHOUSE_DATADIR_FROM_CONFIG}\" && test -G \"${CLICKHOUSE_DATADIR_FROM_CONFIG}\""); then
+ if [ $(dirname "${CLICKHOUSE_DATADIR_FROM_CONFIG}") == "/" ]; then
+ echo "Directory ${CLICKHOUSE_DATADIR_FROM_CONFIG} seems too dangerous to chown."
+ else
+ if [ ! -e "${CLICKHOUSE_DATADIR_FROM_CONFIG}" ]; then
+ echo "Creating directory ${CLICKHOUSE_DATADIR_FROM_CONFIG}"
+ mkdir -p "${CLICKHOUSE_DATADIR_FROM_CONFIG}"
+ fi
+
+ echo "Changing owner of [${CLICKHOUSE_DATADIR_FROM_CONFIG}] to [${CLICKHOUSE_USER}:${CLICKHOUSE_GROUP}]"
+ chown -R ${CLICKHOUSE_USER}:${CLICKHOUSE_GROUP} "${CLICKHOUSE_DATADIR_FROM_CONFIG}"
+ fi
+ fi
+
+ if ! $(su -s $SHELL ${CLICKHOUSE_USER} -c "test -w ${CLICKHOUSE_LOGDIR}"); then
+ echo "Changing owner of [${CLICKHOUSE_LOGDIR}/*] to [${CLICKHOUSE_USER}:${CLICKHOUSE_GROUP}]"
+ chown -R ${CLICKHOUSE_USER}:${CLICKHOUSE_GROUP} ${CLICKHOUSE_LOGDIR}/*
+ echo "Changing owner of [${CLICKHOUSE_LOGDIR}] to [${CLICKHOUSE_LOGDIR_USER}:${CLICKHOUSE_GROUP}]"
+ chown ${CLICKHOUSE_LOGDIR_USER}:${CLICKHOUSE_GROUP} ${CLICKHOUSE_LOGDIR}
+ fi
+}
+
+
+start()
+{
+ [ -x $BINDIR/$PROGRAM ] || exit 0
+ local EXIT_STATUS
+ EXIT_STATUS=0
+
+ echo -n "Start $PROGRAM service: "
+
+ if is_running; then
+ echo -n "already running "
+ EXIT_STATUS=1
+ else
+ ulimit -n 262144
+ mkdir -p $CLICKHOUSE_PIDDIR
+ chown -R $CLICKHOUSE_USER:$CLICKHOUSE_GROUP $CLICKHOUSE_PIDDIR
+ initdb
+ if ! is_running; then
+ # Lock should not be held while running child process, so we release the lock. Note: obviously, there is race condition.
+ # But clickhouse-server has protection from simultaneous runs with same data directory.
+ su -s $SHELL ${CLICKHOUSE_USER} -c "$FLOCK -u 9; exec -a \"$PROGRAM\" \"$BINDIR/$PROGRAM\" --daemon --pid-file=\"$CLICKHOUSE_PIDFILE\" --config-file=\"$CLICKHOUSE_CONFIG\""
+ EXIT_STATUS=$?
+ if [ $EXIT_STATUS -ne 0 ]; then
+ break
+ fi
+ fi
+ fi
+
+ if [ $EXIT_STATUS -eq 0 ]; then
+ echo "DONE"
+ else
+ echo "FAILED"
+ fi
+
+ return $EXIT_STATUS
+}
+
+
+stop()
+{
+ local EXIT_STATUS
+ EXIT_STATUS=0
+
+ if [ -f $CLICKHOUSE_PIDFILE ]; then
+
+ echo -n "Stop $PROGRAM service: "
+
+ kill -TERM $(cat "$CLICKHOUSE_PIDFILE")
+
+ wait_for_done
+
+ echo "DONE"
+ fi
+ return $EXIT_STATUS
+}
+
+
+restart()
+{
+ check_config
+ stop
+ start
+}
+
+
+forcestop()
+{
+ local EXIT_STATUS
+ EXIT_STATUS=0
+
+ echo -n "Stop forcefully $PROGRAM service: "
+
+ kill -KILL $(cat "$CLICKHOUSE_PIDFILE")
+
+ wait_for_done
+
+ echo "DONE"
+ return $EXIT_STATUS
+}
+
+
+forcerestart()
+{
+ forcestop
+ start
+}
+
+use_cron()
+{
+ # 1. running systemd
+ if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then
+ return 1
+ fi
+ # 2. disabled by config
+ if [ -z "$CLICKHOUSE_CRONFILE" ]; then
+ return 2
+ fi
+ return 0
+}
+
+enable_cron()
+{
+ use_cron && sed -i 's/^#*//' "$CLICKHOUSE_CRONFILE"
+}
+
+
+disable_cron()
+{
+ use_cron && sed -i 's/^#*/#/' "$CLICKHOUSE_CRONFILE"
+}
+
+
+is_cron_disabled()
+{
+ use_cron || return 0
+
+ # Assumes that either no lines are commented or all lines are commented.
+ # Also please note, that currently cron file for ClickHouse has only one line (but some time ago there was more).
+ grep -q -E '^#' "$CLICKHOUSE_CRONFILE";
+}
+
+
+main()
+{
+ # See how we were called.
+ EXIT_STATUS=0
+ case "$1" in
+ start)
+ start && enable_cron
+ ;;
+ stop)
+ disable_cron && stop
+ ;;
+ restart)
+ restart && enable_cron
+ ;;
+ forcestop)
+ disable_cron && forcestop
+ ;;
+ forcerestart)
+ forcerestart && enable_cron
+ ;;
+ reload)
+ restart
+ ;;
+ condstart)
+ is_running || start
+ ;;
+ condstop)
+ is_running && stop
+ ;;
+ condrestart)
+ is_running && restart
+ ;;
+ condreload)
+ is_running && restart
+ ;;
+ initdb)
+ initdb
+ ;;
+ enable_cron)
+ enable_cron
+ ;;
+ disable_cron)
+ disable_cron
+ ;;
+ *)
+ echo "Usage: $0 $SUPPORTED_COMMANDS"
+ exit 2
+ ;;
+ esac
+
+ exit $EXIT_STATUS
+}
+
+
+status()
+{
+ if is_running; then
+ echo "$PROGRAM service is running"
+ else
+ if is_cron_disabled; then
+ echo "$PROGRAM service is stopped";
+ else
+ echo "$PROGRAM: process unexpectedly terminated"
+ fi
+ fi
+}
+
+
+# Running commands without need of locking
+case "$1" in
+status)
+ status
+ exit 0
+ ;;
+esac
+
+
+(
+ if $FLOCK -n 9; then
+ main "$@"
+ else
+ echo "Init script is already running" && exit 1
+ fi
+) 9> $LOCKFILE
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/templates/config.xml.j2 b/Clickhouse/21.8.13.1/clickhouse/role/templates/config.xml.j2
new file mode 100644
index 0000000..8e294c8
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/templates/config.xml.j2
@@ -0,0 +1,403 @@
+<?xml version="1.0"?>
+<yandex>
+ <logger>
+ <!-- Possible levels: https://github.com/pocoproject/poco/blob/develop/Foundation/include/Poco/Logger.h#L105 -->
+ <level>error</level>
+ <log>{{ deploy_dir }}/clickhouse/logs/clickhouse-server.log</log>
+ <errorlog>{{ deploy_dir }}/clickhouse/logs/clickhouse-server.err.log</errorlog>
+ <size>200M</size>
+ <count>10</count>
+ <!-- <console>1</console> --> <!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
+ </logger>
+ <!--display_name>production</display_name--> <!-- It is the name that will be shown in the client -->
+ <http_port>8123</http_port>
+ <tcp_port>9001</tcp_port>
+ <max_server_memory_usage>{{ clickhouse.max_server_memory_usage }}</max_server_memory_usage>
+
+ <!-- For HTTPS and SSL over native protocol. -->
+ <!--
+ <https_port>8443</https_port>
+ <tcp_port_secure>9440</tcp_port_secure>
+ -->
+
+ <!-- Used with https_port and tcp_port_secure. Full ssl options list: https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->
+ <openSSL>
+ <server> <!-- Used for https server AND secure tcp port -->
+ <!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
+ <certificateFile>{{ deploy_dir }}/clickhouse/clickhouse-server/server.crt</certificateFile>
+ <privateKeyFile>{{ deploy_dir }}/clickhouse/clickhouse-server/server.key</privateKeyFile>
+ <!-- openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096 -->
+ <dhParamsFile>{{ deploy_dir }}/clickhouse/clickhouse-server/dhparam.pem</dhParamsFile>
+ <verificationMode>none</verificationMode>
+ <loadDefaultCAFile>true</loadDefaultCAFile>
+ <cacheSessions>true</cacheSessions>
+ <disableProtocols>sslv2,sslv3</disableProtocols>
+ <preferServerCiphers>true</preferServerCiphers>
+ </server>
+
+ <client> <!-- Used for connecting to https dictionary source -->
+ <loadDefaultCAFile>true</loadDefaultCAFile>
+ <cacheSessions>true</cacheSessions>
+ <disableProtocols>sslv2,sslv3</disableProtocols>
+ <preferServerCiphers>true</preferServerCiphers>
+ <!-- Use for self-signed: <verificationMode>none</verificationMode> -->
+ <invalidCertificateHandler>
+ <!-- Use for self-signed: <name>AcceptCertificateHandler</name> -->
+ <name>RejectCertificateHandler</name>
+ </invalidCertificateHandler>
+ </client>
+ </openSSL>
+
+ <!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 -->
+ <!--
+ <http_server_default_response><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></http_server_default_response>
+ -->
+
+ <!-- Port for communication between replicas. Used for data exchange. -->
+ <interserver_http_port>9009</interserver_http_port>
+
+ <!-- Hostname that is used by other replicas to request this server.
+ If not specified, than it is determined analoguous to 'hostname -f' command.
+ This setting could be used to switch replication to another network interface.
+ -->
+
+ <interserver_http_host>{{ inventory_hostname }}</interserver_http_host>
+
+
+ <!-- Listen specified host. use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere. -->
+ <listen_host>0.0.0.0</listen_host>
+ <!-- Same for hosts with disabled ipv6: -->
+ <!--<listen_host>0.0.0.0</listen_host>-->
+
+ <!-- Default values - try listen localhost on ipv4 and ipv6: -->
+
+<!--<listen_host>::1</listen_host>-->
+ <!-- <listen_host>127.0.0.1</listen_host>-->
+
+ <!-- Don't exit if ipv6 or ipv4 unavailable, but listen_host with this protocol specified -->
+ <!-- <listen_try>0</listen_try>-->
+
+ <!-- Allow listen on same address:port -->
+ <!-- <listen_reuse_port>0</listen_reuse_port>-->
+
+ <listen_backlog>64</listen_backlog>
+
+ <max_connections>4096</max_connections>
+ <keep_alive_timeout>600</keep_alive_timeout>
+
+ <!-- Maximum number of concurrent queries. -->
+ <!-- 21.12version 150 change to 500. -->
+ <max_concurrent_queries>500</max_concurrent_queries>
+
+ <!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
+ correct maximum value. -->
+ <!-- <max_open_files>262144</max_open_files> -->
+
+ <!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
+ In bytes. Cache is single for server. Memory is allocated only on demand.
+ Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
+ Uncompressed cache is advantageous only for very short queries and in rare cases.
+ -->
+ <uncompressed_cache_size>8589934592</uncompressed_cache_size>
+
+ <!-- Approximate size of mark cache, used in tables of MergeTree family.
+ In bytes. Cache is single for server. Memory is allocated only on demand.
+ You should not lower this value.
+ -->
+ <mark_cache_size>5368709120</mark_cache_size>
+
+
+ <!-- Path to data directory, with trailing slash. -->
+<!-- <path>{{ data_dir }}/clickhouse/</path> -->
+ <path>{{ deploy_dir }}/clickhouse/</path>
+
+ <!-- Path to temporary data for processing hard queries. -->
+<!-- <tmp_path>{{ data_dir }}/clickhouse/tmp/</tmp_path>-->
+ <tmp_path>{{ deploy_dir }}/clickhouse/tmp/</tmp_path>
+
+ <!-- Directory with user provided files that are accessible by 'file' table function. -->
+ <user_files_path>{{ deploy_dir }}/clickhouse/user_files/</user_files_path>
+
+ <!-- Path to configuration file with users, access rights, profiles of settings, quotas. -->
+ <users_config>users.xml</users_config>
+
+ <!-- Default profile of settings. -->
+ <default_profile>default</default_profile>
+
+ <!-- System profile of settings. This settings are used by internal processes (Buffer storage, Distibuted DDL worker and so on). -->
+ <!-- <system_profile>default</system_profile> -->
+
+ <!-- Default database. -->
+ <default_database>default</default_database>
+
+ <!-- Server time zone could be set here.
+
+ Time zone is used when converting between String and DateTime types,
+ when printing DateTime in text formats and parsing DateTime from text,
+ it is used in date and time related functions, if specific time zone was not passed as an argument.
+
+ Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
+ If not specified, system time zone at server startup is used.
+
+ Please note, that server could display time zone alias instead of specified name.
+ Example: W-SU is an alias for Europe/Moscow and Zulu is an alias for UTC.
+ -->
+ <!-- <timezone>Europe/Moscow</timezone> -->
+ <timezone>UTC</timezone>
+ <!-- You can specify umask here (see "man umask"). Server will apply it on startup.
+ Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
+ -->
+ <!-- <umask>022</umask> -->
+
+ <!-- Configuration of clusters that could be used in Distributed tables.
+ https://clickhouse.yandex/docs/en/table_engines/distributed/
+ -->
+ <remote_servers incl="clickhouse_remote_servers" >
+ <!-- Test only shard config for testing distributed storage
+ <test_shard_localhost>
+ <shard>
+ <replica>
+ <host>localhost</host>
+ <port>9000</port>
+ </replica>
+ </shard>
+ </test_shard_localhost>
+ <test_shard_localhost_secure>
+ <shard>
+ <replica>
+ <host>localhost</host>
+ <port>9440</port>
+ <secure>1</secure>
+ </replica>
+ </shard>
+ </test_shard_localhost_secure>-->
+ </remote_servers>
+
+
+ <!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
+ By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
+ Values for substitutions are specified in /yandex:wq
+/name_of_substitution elements in that file.
+ -->
+
+ <!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
+ Optional. If you don't use replicated tables, you could omit that.
+
+ See https://clickhouse.yandex/docs/en/table_engines/replication/
+ -->
+ <zookeeper incl="zookeeper-servers" optional="true" />
+
+ <!-- Substitutions for parameters of replicated tables.
+ Optional. If you don't use replicated tables, you could omit that.
+
+ See https://clickhouse.yandex/docs/en/table_engines/replication/#creating-replicated-tables
+ -->
+ <macros incl="macros" optional="true" />
+
+
+ <!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
+ <builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
+
+
+ <!-- Maximum session timeout, in seconds. Default: 3600. -->
+ <max_session_timeout>21600</max_session_timeout>
+
+ <!-- Default session timeout, in seconds. Default: 60. -->
+ <default_session_timeout>6000</default_session_timeout>
+<max_table_size_to_drop>0</max_table_size_to_drop>
+<max_partition_size_to_drop>0</max_partition_size_to_drop>
+<include_from>{{ deploy_dir }}/clickhouse/clickhouse-server/metrika.xml</include_from>
+ <!-- Sending data to Graphite for monitoring. Several sections can be defined. -->
+ <!--
+ interval - send every X second
+ root_path - prefix for keys
+ hostname_in_path - append hostname to root_path (default = true)
+ metrics - send data from table system.metrics
+ events - send data from table system.events
+ asynchronous_metrics - send data from table system.asynchronous_metrics
+ -->
+ <!--
+ <graphite>
+ <host>localhost</host>
+ <port>42000</port>
+ <timeout>0.1</timeout>
+ <interval>60</interval>
+ <root_path>one_min</root_path>
+ <hostname_in_path>true</hostname_in_path>
+
+ <metrics>true</metrics>
+ <events>true</events>
+ <asynchronous_metrics>true</asynchronous_metrics>
+ </graphite>
+ <graphite>
+ <host>localhost</host>
+ <port>42000</port>
+ <timeout>0.1</timeout>
+ <interval>1</interval>
+ <root_path>one_sec</root_path>
+
+ <metrics>true</metrics>
+ <events>true</events>
+ <asynchronous_metrics>false</asynchronous_metrics>
+ </graphite>
+ -->
+
+
+ <!-- Query log. Used only for queries with setting log_queries = 1. -->
+ <query_log>
+ <!-- What table to insert data. If table is not exist, it will be created.
+ When query log structure is changed after system update,
+ then old table will be renamed and new table will be created automatically.
+ -->
+ <database>system</database>
+ <table>query_log</table>
+ <!--
+ PARTITION BY expr https://clickhouse.yandex/docs/en/table_engines/custom_partitioning_key/
+ Example:
+ event_date
+ toMonday(event_date)
+ toYYYYMM(event_date)
+ toStartOfHour(event_time)
+ -->
+ <partition_by>toYYYYMM(event_date)</partition_by>
+ <!-- Interval of flushing data. -->
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
+ </query_log>
+
+
+ <!-- Uncomment if use part_log
+ <part_log>
+ <database>system</database>
+ <table>part_log</table>
+
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
+ </part_log>
+ -->
+
+
+ <!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
+ See https://clickhouse.yandex/docs/en/dicts/internal_dicts/
+ -->
+
+ <!-- Path to file with region hierarchy. -->
+ <!-- <path_to_regions_hierarchy_file>/opt/geo/regions_hierarchy.txt</path_to_regions_hierarchy_file> -->
+
+ <!-- Path to directory with files containing names of regions -->
+ <!-- <path_to_regions_names_files>/opt/geo/</path_to_regions_names_files> -->
+
+
+ <!-- Configuration of external dictionaries. See:
+ https://clickhouse.yandex/docs/en/dicts/external_dicts/
+ -->
+ <dictionaries_config>*_dictionary.xml</dictionaries_config>
+
+ <!-- Uncomment if you want data to be compressed 30-100% better.
+ Don't do that if you just started using ClickHouse.
+ -->
+ <compression incl="clickhouse_compression">
+ <!--
+ <!- - Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. - ->
+ <case>
+
+ <!- - Conditions. All must be satisfied. Some conditions may be omitted. - ->
+ <min_part_size>10000000000</min_part_size> <!- - Min part size in bytes. - ->
+ <min_part_size_ratio>0.01</min_part_size_ratio> <!- - Min size of part relative to whole table size. - ->
+
+ <!- - What compression method to use. - ->
+ <method>zstd</method>
+ </case>
+ -->
+ </compression>
+
+ <!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
+ Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. -->
+ <distributed_ddl>
+ <!-- Path in ZooKeeper to queue with DDL queries -->
+ <path>/clickhouse/task_queue/ddl</path>
+
+ <!-- Settings from this profile will be used to execute DDL queries -->
+ <!-- <profile>default</profile> -->
+ </distributed_ddl>
+
+ <!-- Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
+ <merge_tree>
+ <max_bytes_to_merge_at_max_space_in_pool>60000000000</max_bytes_to_merge_at_max_space_in_pool>
+ <ttl_only_drop_parts>1</ttl_only_drop_parts>
+ <min_merge_bytes_to_use_direct_io>0</min_merge_bytes_to_use_direct_io>
+ <max_suspicious_broken_parts>100</max_suspicious_broken_parts>
+ </merge_tree>
+
+
+ <!-- Protection from accidental DROP.
+ If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
+ If you want do delete one table and don't want to restart clickhouse-server, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
+ By default max_table_size_to_drop is 50GB; max_table_size_to_drop=0 allows to DROP any tables.
+ The same for max_partition_size_to_drop.
+ Uncomment to disable protection.
+ -->
+ <!-- <max_table_size_to_drop>0</max_table_size_to_drop> -->
+ <!-- <max_partition_size_to_drop>0</max_partition_size_to_drop> -->
+
+ <!-- Example of parameters for GraphiteMergeTree table engine -->
+ <graphite_rollup_example>
+ <pattern>
+ <regexp>click_cost</regexp>
+ <function>any</function>
+ <retention>
+ <age>0</age>
+ <precision>3600</precision>
+ </retention>
+ <retention>
+ <age>86400</age>
+ <precision>60</precision>
+ </retention>
+ </pattern>
+ <default>
+ <function>max</function>
+ <retention>
+ <age>0</age>
+ <precision>60</precision>
+ </retention>
+ <retention>
+ <age>3600</age>
+ <precision>300</precision>
+ </retention>
+ <retention>
+ <age>86400</age>
+ <precision>3600</precision>
+ </retention>
+ </default>
+ </graphite_rollup_example>
+
+ <!-- Directory in <clickhouse-path> containing schema files for various input formats.
+ The directory will be created if it doesn't exist.
+ -->
+ <format_schema_path>{{ deploy_dir }}/clickhouse/format_schemas/</format_schema_path>
+
+ <!--
+ <storage_configuration>
+ <disks>
+ <ssd>
+ <path>if you want wo use this policies, please config the ssd mount path</path>
+ </ssd>
+ </disks>
+
+ <policies>
+ <ssd_to_hdd>
+ <volumes>
+ <hot>
+ <disk>ssd</disk>
+ </hot>
+ <default>
+ <disk>default</disk>
+ </default>
+ </volumes>
+ <move_factor>0.1</move_factor>
+ </ssd_to_hdd>
+ </policies>
+ </storage_configuration>
+ -->
+
+ <!-- Uncomment to disable ClickHouse internal DNS caching. -->
+ <!-- <disable_internal_dns_cache>1</disable_internal_dns_cache> -->
+</yandex>
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/templates/docker-compose_exporter.yml.j2 b/Clickhouse/21.8.13.1/clickhouse/role/templates/docker-compose_exporter.yml.j2
new file mode 100644
index 0000000..3f37ef7
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/templates/docker-compose_exporter.yml.j2
@@ -0,0 +1,20 @@
+version: '3.3'
+
+services:
+ clickhouse_exporter:
+ image: clickhouse_exporter:v2.0
+ container_name: clickhouse_exporter
+ ports:
+ - 9904:9116
+ restart: always
+ command:
+ - -scrape_uri=http://{{ inventory_hostname }}:8123/
+ environment:
+ - CLICKHOUSE_USER=default
+ - CLICKHOUSE_PASSWORD={{ clickhouse_default_pin }}
+ networks:
+ olap:
+ ipv4_address: 172.20.88.10
+networks:
+ olap:
+ external: true
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/templates/metrika_data.xml.j2 b/Clickhouse/21.8.13.1/clickhouse/role/templates/metrika_data.xml.j2
new file mode 100644
index 0000000..4d88504
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/templates/metrika_data.xml.j2
@@ -0,0 +1,47 @@
+<yandex>
+<!--ck集群节点-->
+<clickhouse_remote_servers>
+
+<ck_cluster>
+ <shard>
+ <!-- Optional. Shard weight when writing data. Default: 1. -->
+ <weight>1</weight>
+ <!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
+ <internal_replication>false</internal_replication>
+ <replica>
+ <host>{{ inventory_hostname }}</host>
+ <port>9001</port>
+ <user>default</user>
+ <password>{{ clickhouse_default_pin }}</password>
+ </replica>
+ </shard>
+</ck_cluster>
+
+</clickhouse_remote_servers>
+<zookeeper-servers>
+{% for dev_info in groups.zookeeper %}
+<node index="{{ loop.index }}">
+ <host>{{ dev_info }}</host>
+ <port>2181</port>
+</node>
+
+{% endfor %}
+<session_timeout_ms>120000</session_timeout_ms>
+</zookeeper-servers>
+
+<networks>
+<ip>::/0</ip>
+</networks>
+
+<!--压缩相关配置-->
+<clickhouse_compression>
+<case>
+<min_part_size>10000000000</min_part_size>
+<min_part_size_ratio>0.01</min_part_size_ratio>
+<method>lz4</method> <!--压缩算法lz4压缩比zstd快, 更占磁盘-->
+</case>
+</clickhouse_compression>
+</yandex>
+
+
+
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/templates/metrika_query.xml.j2 b/Clickhouse/21.8.13.1/clickhouse/role/templates/metrika_query.xml.j2
new file mode 100644
index 0000000..448c767
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/templates/metrika_query.xml.j2
@@ -0,0 +1,96 @@
+<yandex>
+<!--ck集群节点-->
+<clickhouse_remote_servers>
+
+<!--clickhouse query集群节点-->
+<ck_query>
+{% for dev_info in groups.clickhouse %}
+{% if loop.index <= 2 %}
+<shard>
+ <!-- Optional. Shard weight when writing data. Default: 1. -->
+ <weight>1</weight>
+ <!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
+ <internal_replication>false</internal_replication>
+ <replica>
+ <host>{{ dev_info }}</host>
+ <port>9001</port>
+ <user>default</user>
+ <password>{{ clickhouse_default_pin }}</password>
+ </replica>
+</shard>
+
+{% endif %}
+{% endfor %}
+</ck_query>
+
+
+<!--clickhouse cluster集群节点-->
+<ck_cluster>
+{% for dev_info in groups.clickhouse %}
+{% if loop.index > 2 %}
+<shard>
+ <!-- Optional. Shard weight when writing data. Default: 1. -->
+ <weight>1</weight>
+ <!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
+ <internal_replication>false</internal_replication>
+ <replica>
+ <host>{{ dev_info }}</host>
+ <port>9001</port>
+ <user>default</user>
+ <password>{{ clickhouse_default_pin }}</password>
+ </replica>
+</shard>
+
+{% endif %}
+{% endfor %}
+</ck_cluster>
+
+
+<!--clickhouse 所有节点-->
+<ck_all>
+{% for dev_info in groups.clickhouse %}
+<shard>
+ <!-- Optional. Shard weight when writing data. Default: 1. -->
+ <weight>1</weight>
+ <!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
+ <internal_replication>false</internal_replication>
+ <replica>
+ <host>{{ dev_info }}</host>
+ <port>9001</port>
+ <user>default</user>
+ <password>{{ clickhouse_default_pin }}</password>
+ </replica>
+</shard>
+
+{% endfor %}
+</ck_all>
+
+</clickhouse_remote_servers>
+
+<zookeeper-servers>
+{% for dev_info in groups.zookeeper %}
+<node index="{{ loop.index }}">
+ <host>{{ dev_info }}</host>
+ <port>2181</port>
+</node>
+
+{% endfor %}
+<session_timeout_ms>120000</session_timeout_ms>
+</zookeeper-servers>
+
+<networks>
+<ip>::/0</ip>
+</networks>
+
+<!--压缩相关配置-->
+<clickhouse_compression>
+<case>
+<min_part_size>10000000000</min_part_size>
+<min_part_size_ratio>0.01</min_part_size_ratio>
+<method>lz4</method> <!--压缩算法lz4压缩比zstd快, 更占磁盘-->
+</case>
+</clickhouse_compression>
+</yandex>
+
+
+
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/templates/standalone/ck_monitor.sh.j2 b/Clickhouse/21.8.13.1/clickhouse/role/templates/standalone/ck_monitor.sh.j2
new file mode 100644
index 0000000..755ee73
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/templates/standalone/ck_monitor.sh.j2
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+CK_USER="default"
+CK_PIN="{{ clickhouse_default_pin }}"
+CK_MONITOR_PROM_FILE="{{ deploy_dir }}/node-exporter/prom/ck_monitor.prom"
+CK_STATUS=`ps -ef |grep "clickhouse-server/config.xml" | grep -v grep | wc -l`
+
+if [ $CK_STATUS -eq "1" ];then
+echo "clickhouse_up 1" > $CK_MONITOR_PROM_FILE
+else
+echo "clickhouse_up 0" > $CK_MONITOR_PROM_FILE
+fi
+
+current1=`date "+%Y-%m-%d %H:%M:%S"`
+startDate=`date -d "${current1}" +%s`
+pcount=`docker exec -it clickhouse clickhouse-client -h {{ inventory_hostname }} --port 9001 -m -u $CK_USER --password $CK_PIN --max_execution_time=100 --query="select count(*) from system.processes" `
+
+current2=`date "+%Y-%m-%d %H:%M:%S"`
+endDate=`date -d "${current2}" +%s`
+diff=`expr $endDate - $startDate`
+
+mcount=`docker exec -it clickhouse clickhouse-client -h {{ inventory_hostname }} --port 9001 -m -u $CK_USER --password $CK_PIN --max_execution_time=100 --query="select count(*) from system.merges" `
+
+
+echo ck_processes_count $pcount >> $CK_MONITOR_PROM_FILE
+echo ck_merges_count $mcount >> $CK_MONITOR_PROM_FILE
+echo ck_connect_time $diff >> $CK_MONITOR_PROM_FILE
+
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/templates/standalone/config.xml.j2 b/Clickhouse/21.8.13.1/clickhouse/role/templates/standalone/config.xml.j2
new file mode 100644
index 0000000..7cca85d
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/templates/standalone/config.xml.j2
@@ -0,0 +1,403 @@
+<?xml version="1.0"?>
+<yandex>
+ <logger>
+ <!-- Possible levels: https://github.com/pocoproject/poco/blob/develop/Foundation/include/Poco/Logger.h#L105 -->
+ <level>error</level>
+ <log>/var/logs/clickhouse-server.log</log>
+ <errorlog>/var/logs/clickhouse-server.err.log</errorlog>
+ <size>200M</size>
+ <count>10</count>
+ <!-- <console>1</console> --> <!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
+ </logger>
+ <!--display_name>production</display_name--> <!-- It is the name that will be shown in the client -->
+ <http_port>8123</http_port>
+ <tcp_port>9001</tcp_port>
+ <max_server_memory_usage>{{ clickhouse.max_server_memory_usage }}</max_server_memory_usage>
+
+ <!-- For HTTPS and SSL over native protocol. -->
+ <!--
+ <https_port>8443</https_port>
+ <tcp_port_secure>9440</tcp_port_secure>
+ -->
+
+ <!-- Used with https_port and tcp_port_secure. Full ssl options list: https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->
+ <openSSL>
+ <server> <!-- Used for https server AND secure tcp port -->
+ <!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
+ <certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
+ <privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
+ <!-- openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096 -->
+ <dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile>
+ <verificationMode>none</verificationMode>
+ <loadDefaultCAFile>true</loadDefaultCAFile>
+ <cacheSessions>true</cacheSessions>
+ <disableProtocols>sslv2,sslv3</disableProtocols>
+ <preferServerCiphers>true</preferServerCiphers>
+ </server>
+
+ <client> <!-- Used for connecting to https dictionary source -->
+ <loadDefaultCAFile>true</loadDefaultCAFile>
+ <cacheSessions>true</cacheSessions>
+ <disableProtocols>sslv2,sslv3</disableProtocols>
+ <preferServerCiphers>true</preferServerCiphers>
+ <!-- Use for self-signed: <verificationMode>none</verificationMode> -->
+ <invalidCertificateHandler>
+ <!-- Use for self-signed: <name>AcceptCertificateHandler</name> -->
+ <name>RejectCertificateHandler</name>
+ </invalidCertificateHandler>
+ </client>
+ </openSSL>
+
+ <!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 -->
+ <!--
+ <http_server_default_response><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></http_server_default_response>
+ -->
+
+ <!-- Port for communication between replicas. Used for data exchange. -->
+ <interserver_http_port>9009</interserver_http_port>
+
+ <!-- Hostname that is used by other replicas to request this server.
+ If not specified, than it is determined analoguous to 'hostname -f' command.
+ This setting could be used to switch replication to another network interface.
+ -->
+
+ <interserver_http_host>{{ inventory_hostname }}</interserver_http_host>
+
+
+ <!-- Listen specified host. use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere. -->
+ <listen_host>0.0.0.0</listen_host>
+ <!-- Same for hosts with disabled ipv6: -->
+ <!--<listen_host>0.0.0.0</listen_host>-->
+
+ <!-- Default values - try listen localhost on ipv4 and ipv6: -->
+
+<!--<listen_host>::1</listen_host>-->
+ <!-- <listen_host>127.0.0.1</listen_host>-->
+
+ <!-- Don't exit if ipv6 or ipv4 unavailable, but listen_host with this protocol specified -->
+ <!-- <listen_try>0</listen_try>-->
+
+ <!-- Allow listen on same address:port -->
+ <!-- <listen_reuse_port>0</listen_reuse_port>-->
+
+ <listen_backlog>64</listen_backlog>
+
+ <max_connections>4096</max_connections>
+ <keep_alive_timeout>600</keep_alive_timeout>
+
+ <!-- Maximum number of concurrent queries. -->
+ <!-- 21.12version 150 change to 500. -->
+ <max_concurrent_queries>500</max_concurrent_queries>
+
+ <!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
+ correct maximum value. -->
+ <!-- <max_open_files>262144</max_open_files> -->
+
+ <!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
+ In bytes. Cache is single for server. Memory is allocated only on demand.
+ Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
+ Uncompressed cache is advantageous only for very short queries and in rare cases.
+ -->
+ <uncompressed_cache_size>8589934592</uncompressed_cache_size>
+
+ <!-- Approximate size of mark cache, used in tables of MergeTree family.
+ In bytes. Cache is single for server. Memory is allocated only on demand.
+ You should not lower this value.
+ -->
+ <mark_cache_size>5368709120</mark_cache_size>
+
+
+ <!-- Path to data directory, with trailing slash. -->
+<!-- <path>{{ data_dir }}/clickhouse/</path> -->
+ <path>/var/lib/clickhouse/</path>
+
+ <!-- Path to temporary data for processing hard queries. -->
+<!-- <tmp_path>{{ data_dir }}/clickhouse/tmp/</tmp_path>-->
+ <tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
+
+ <!-- Directory with user provided files that are accessible by 'file' table function. -->
+ <user_files_path>/var/lib/clickhouse/user_files/</user_files_path>
+
+ <!-- Path to configuration file with users, access rights, profiles of settings, quotas. -->
+ <users_config>users.xml</users_config>
+
+ <!-- Default profile of settings. -->
+ <default_profile>default</default_profile>
+
+ <!-- System profile of settings. This settings are used by internal processes (Buffer storage, Distibuted DDL worker and so on). -->
+ <!-- <system_profile>default</system_profile> -->
+
+ <!-- Default database. -->
+ <default_database>default</default_database>
+
+ <!-- Server time zone could be set here.
+
+ Time zone is used when converting between String and DateTime types,
+ when printing DateTime in text formats and parsing DateTime from text,
+ it is used in date and time related functions, if specific time zone was not passed as an argument.
+
+ Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
+ If not specified, system time zone at server startup is used.
+
+ Please note, that server could display time zone alias instead of specified name.
+ Example: W-SU is an alias for Europe/Moscow and Zulu is an alias for UTC.
+ -->
+ <!-- <timezone>Europe/Moscow</timezone> -->
+ <timezone>UTC</timezone>
+ <!-- You can specify umask here (see "man umask"). Server will apply it on startup.
+ Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
+ -->
+ <!-- <umask>022</umask> -->
+
+ <!-- Configuration of clusters that could be used in Distributed tables.
+ https://clickhouse.yandex/docs/en/table_engines/distributed/
+ -->
+ <remote_servers incl="clickhouse_remote_servers" >
+ <!-- Test only shard config for testing distributed storage
+ <test_shard_localhost>
+ <shard>
+ <replica>
+ <host>localhost</host>
+ <port>9000</port>
+ </replica>
+ </shard>
+ </test_shard_localhost>
+ <test_shard_localhost_secure>
+ <shard>
+ <replica>
+ <host>localhost</host>
+ <port>9440</port>
+ <secure>1</secure>
+ </replica>
+ </shard>
+ </test_shard_localhost_secure>-->
+ </remote_servers>
+
+
+ <!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
+ By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
+ Values for substitutions are specified in /yandex:wq
+/name_of_substitution elements in that file.
+ -->
+
+ <!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
+ Optional. If you don't use replicated tables, you could omit that.
+
+ See https://clickhouse.yandex/docs/en/table_engines/replication/
+ -->
+ <zookeeper incl="zookeeper-servers" optional="true" />
+
+ <!-- Substitutions for parameters of replicated tables.
+ Optional. If you don't use replicated tables, you could omit that.
+
+ See https://clickhouse.yandex/docs/en/table_engines/replication/#creating-replicated-tables
+ -->
+ <macros incl="macros" optional="true" />
+
+
+ <!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
+ <builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
+
+
+ <!-- Maximum session timeout, in seconds. Default: 3600. -->
+ <max_session_timeout>21600</max_session_timeout>
+
+ <!-- Default session timeout, in seconds. Default: 60. -->
+ <default_session_timeout>6000</default_session_timeout>
+<max_table_size_to_drop>0</max_table_size_to_drop>
+<max_partition_size_to_drop>0</max_partition_size_to_drop>
+<include_from>/etc/clickhouse-server/metrika.xml</include_from>
+ <!-- Sending data to Graphite for monitoring. Several sections can be defined. -->
+ <!--
+ interval - send every X second
+ root_path - prefix for keys
+ hostname_in_path - append hostname to root_path (default = true)
+ metrics - send data from table system.metrics
+ events - send data from table system.events
+ asynchronous_metrics - send data from table system.asynchronous_metrics
+ -->
+ <!--
+ <graphite>
+ <host>localhost</host>
+ <port>42000</port>
+ <timeout>0.1</timeout>
+ <interval>60</interval>
+ <root_path>one_min</root_path>
+ <hostname_in_path>true</hostname_in_path>
+
+ <metrics>true</metrics>
+ <events>true</events>
+ <asynchronous_metrics>true</asynchronous_metrics>
+ </graphite>
+ <graphite>
+ <host>localhost</host>
+ <port>42000</port>
+ <timeout>0.1</timeout>
+ <interval>1</interval>
+ <root_path>one_sec</root_path>
+
+ <metrics>true</metrics>
+ <events>true</events>
+ <asynchronous_metrics>false</asynchronous_metrics>
+ </graphite>
+ -->
+
+
+ <!-- Query log. Used only for queries with setting log_queries = 1. -->
+ <query_log>
+ <!-- What table to insert data. If table is not exist, it will be created.
+ When query log structure is changed after system update,
+ then old table will be renamed and new table will be created automatically.
+ -->
+ <database>system</database>
+ <table>query_log</table>
+ <!--
+ PARTITION BY expr https://clickhouse.yandex/docs/en/table_engines/custom_partitioning_key/
+ Example:
+ event_date
+ toMonday(event_date)
+ toYYYYMM(event_date)
+ toStartOfHour(event_time)
+ -->
+ <partition_by>toYYYYMM(event_date)</partition_by>
+ <!-- Interval of flushing data. -->
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
+ </query_log>
+
+
+ <!-- Uncomment if use part_log
+ <part_log>
+ <database>system</database>
+ <table>part_log</table>
+
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
+ </part_log>
+ -->
+
+
+ <!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
+ See https://clickhouse.yandex/docs/en/dicts/internal_dicts/
+ -->
+
+ <!-- Path to file with region hierarchy. -->
+ <!-- <path_to_regions_hierarchy_file>/opt/geo/regions_hierarchy.txt</path_to_regions_hierarchy_file> -->
+
+ <!-- Path to directory with files containing names of regions -->
+ <!-- <path_to_regions_names_files>/opt/geo/</path_to_regions_names_files> -->
+
+
+ <!-- Configuration of external dictionaries. See:
+ https://clickhouse.yandex/docs/en/dicts/external_dicts/
+ -->
+ <dictionaries_config>*_dictionary.xml</dictionaries_config>
+
+ <!-- Uncomment if you want data to be compressed 30-100% better.
+ Don't do that if you just started using ClickHouse.
+ -->
+ <compression incl="clickhouse_compression">
+ <!--
+ <!- - Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. - ->
+ <case>
+
+ <!- - Conditions. All must be satisfied. Some conditions may be omitted. - ->
+ <min_part_size>10000000000</min_part_size> <!- - Min part size in bytes. - ->
+ <min_part_size_ratio>0.01</min_part_size_ratio> <!- - Min size of part relative to whole table size. - ->
+
+ <!- - What compression method to use. - ->
+ <method>zstd</method>
+ </case>
+ -->
+ </compression>
+
+ <!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
+ Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. -->
+ <distributed_ddl>
+ <!-- Path in ZooKeeper to queue with DDL queries -->
+ <path>/clickhouse/task_queue/ddl</path>
+
+ <!-- Settings from this profile will be used to execute DDL queries -->
+ <!-- <profile>default</profile> -->
+ </distributed_ddl>
+
+ <!-- Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
+ <merge_tree>
+ <max_bytes_to_merge_at_max_space_in_pool>60000000000</max_bytes_to_merge_at_max_space_in_pool>
+ <ttl_only_drop_parts>1</ttl_only_drop_parts>
+ <min_merge_bytes_to_use_direct_io>0</min_merge_bytes_to_use_direct_io>
+ <max_suspicious_broken_parts>100</max_suspicious_broken_parts>
+ </merge_tree>
+
+
+ <!-- Protection from accidental DROP.
+ If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
+ If you want do delete one table and don't want to restart clickhouse-server, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
+ By default max_table_size_to_drop is 50GB; max_table_size_to_drop=0 allows to DROP any tables.
+ The same for max_partition_size_to_drop.
+ Uncomment to disable protection.
+ -->
+ <!-- <max_table_size_to_drop>0</max_table_size_to_drop> -->
+ <!-- <max_partition_size_to_drop>0</max_partition_size_to_drop> -->
+
+ <!-- Example of parameters for GraphiteMergeTree table engine -->
+ <graphite_rollup_example>
+ <pattern>
+ <regexp>click_cost</regexp>
+ <function>any</function>
+ <retention>
+ <age>0</age>
+ <precision>3600</precision>
+ </retention>
+ <retention>
+ <age>86400</age>
+ <precision>60</precision>
+ </retention>
+ </pattern>
+ <default>
+ <function>max</function>
+ <retention>
+ <age>0</age>
+ <precision>60</precision>
+ </retention>
+ <retention>
+ <age>3600</age>
+ <precision>300</precision>
+ </retention>
+ <retention>
+ <age>86400</age>
+ <precision>3600</precision>
+ </retention>
+ </default>
+ </graphite_rollup_example>
+
+ <!-- Directory in <clickhouse-path> containing schema files for various input formats.
+ The directory will be created if it doesn't exist.
+ -->
+ <format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path>
+
+ <!--
+ <storage_configuration>
+ <disks>
+ <ssd>
+ <path>if you want wo use this policies, please config the ssd mount path</path>
+ </ssd>
+ </disks>
+
+ <policies>
+ <ssd_to_hdd>
+ <volumes>
+ <hot>
+ <disk>ssd</disk>
+ </hot>
+ <default>
+ <disk>default</disk>
+ </default>
+ </volumes>
+ <move_factor>0.1</move_factor>
+ </ssd_to_hdd>
+ </policies>
+ </storage_configuration>
+ -->
+
+ <!-- Uncomment to disable ClickHouse internal DNS caching. -->
+ <!-- <disable_internal_dns_cache>1</disable_internal_dns_cache> -->
+</yandex>
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/templates/standalone/docker-compose.yml.j2 b/Clickhouse/21.8.13.1/clickhouse/role/templates/standalone/docker-compose.yml.j2
new file mode 100644
index 0000000..8f8616d
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/templates/standalone/docker-compose.yml.j2
@@ -0,0 +1,15 @@
+version: '3'
+
+services:
+ clickhouse:
+ image: {{ image_name }}:{{ image_tag }}
+ container_name: {{ container_name }}
+ volumes:
+ - "{{ deploy_dir }}/{{ container_name }}/clickhouse-server/config.xml:/etc/clickhouse-server/config.xml"
+ - "{{ deploy_dir }}/{{ container_name }}/clickhouse-server/users.xml:/etc/clickhouse-server/users.xml"
+ - "{{ deploy_dir }}/{{ container_name }}/clickhouse-server/metrika.xml:/etc/clickhouse-server/metrika.xml"
+ - "{{ deploy_dir }}/{{ container_name }}/logs:/var/logs"
+ - "{{ deploy_dir }}/{{ container_name }}/data:/var/lib/clickhouse/data"
+ - "{{ deploy_dir }}/{{ container_name }}/metadata:/var/lib/clickhouse/metadata"
+ restart: always
+ network_mode: "host"
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/templates/standalone/metrika_standalone.xml.j2 b/Clickhouse/21.8.13.1/clickhouse/role/templates/standalone/metrika_standalone.xml.j2
new file mode 100644
index 0000000..f9d4d1f
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/templates/standalone/metrika_standalone.xml.j2
@@ -0,0 +1,87 @@
+<yandex>
+<!--ck集群节点-->
+<clickhouse_remote_servers>
+
+<!--clickhouse query集群节点-->
+<ck_query>
+
+<shard>
+ <!-- Optional. Shard weight when writing data. Default: 1. -->
+ <weight>1</weight>
+ <!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
+ <internal_replication>false</internal_replication>
+ <replica>
+ <host>{{ inventory_hostname }}</host>
+ <port>9001</port>
+ <user>default</user>
+ <password>{{ clickhouse_default_pin }}</password>
+ </replica>
+</shard>
+
+</ck_query>
+
+
+<!--clickhouse cluster集群节点-->
+<ck_cluster>
+
+<shard>
+ <!-- Optional. Shard weight when writing data. Default: 1. -->
+ <weight>1</weight>
+ <!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
+ <internal_replication>false</internal_replication>
+ <replica>
+ <host>{{ inventory_hostname }}</host>
+ <port>9001</port>
+ <user>default</user>
+ <password>{{ clickhouse_default_pin }}</password>
+ </replica>
+</shard>
+
+</ck_cluster>
+
+
+<!--clickhouse 所有节点-->
+<ck_all>
+
+<shard>
+ <!-- Optional. Shard weight when writing data. Default: 1. -->
+ <weight>1</weight>
+ <!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
+ <internal_replication>false</internal_replication>
+ <replica>
+ <host>{{ inventory_hostname }}</host>
+ <port>9001</port>
+ <user>default</user>
+ <password>{{ clickhouse_default_pin }}</password>
+ </replica>
+</shard>
+
+</ck_all>
+
+</clickhouse_remote_servers>
+
+<zookeeper-servers>
+
+<node index="1">
+ <host>{{ inventory_hostname }}</host>
+ <port>2181</port>
+</node>
+
+<session_timeout_ms>120000</session_timeout_ms>
+</zookeeper-servers>
+
+<networks>
+<ip>::/0</ip>
+</networks>
+
+<!--压缩相关配置-->
+<clickhouse_compression>
+<case>
+<min_part_size>10000000000</min_part_size>
+<min_part_size_ratio>0.01</min_part_size_ratio>
+<method>lz4</method> <!--压缩算法lz4压缩比zstd快, 更占磁盘-->
+</case>
+</clickhouse_compression>
+</yandex>
+
+
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/templates/unload_ck.sh.j2 b/Clickhouse/21.8.13.1/clickhouse/role/templates/unload_ck.sh.j2
new file mode 100644
index 0000000..3f31745
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/templates/unload_ck.sh.j2
@@ -0,0 +1,43 @@
+#!/bin/bash
+source /etc/profile
+
+function killService(){
+keeppath='/etc/init.d/clickhouse-server'
+if [ -x $keeppath ];then
+service clickhouse-server stop
+fi
+}
+
+function killPid(){
+
+livenum=`rpm -qa | grep clickhouse | wc -l`
+if [ $livenum -ne 0 ];then
+service clickhouse-server stop
+rpm -e --noscripts clickhouse-server-21.8.13.1.altinitystable-2.noarch
+rpm -e --noscripts clickhouse-client-21.8.13.1.altinitystable-2.noarch
+rpm -e --noscripts clickhouse-common-static-21.8.13.1.altinitystable-2.x86_64
+fi
+
+}
+
+function drop_folder(){
+FOLDER_NAME=$1
+
+if [ -d "$FOLDER_NAME" ];then
+ rm -rf $FOLDER_NAME
+fi
+}
+
+function drop_file(){
+FILE_NAME=$1
+
+if [ -f "$FILE_NAME" ];then
+ rm -rf $FILE_NAME
+fi
+}
+
+killService
+sleep 15
+killPid
+drop_folder {{ deploy_dir }}/clickhouse
+drop_folder {{ data_dir }}/clickhouse
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/templates/users.xml.j2 b/Clickhouse/21.8.13.1/clickhouse/role/templates/users.xml.j2
new file mode 100644
index 0000000..b01923e
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/templates/users.xml.j2
@@ -0,0 +1,214 @@
+<?xml version="1.0"?>
+<yandex>
+ <!-- Profiles of settings. -->
+ <profiles>
+ <!-- Default settings. -->
+ <default>
+ <!-- Maximum memory usage for processing single query, in bytes. -->
+ <max_memory_usage>{{ clickhouse.max_server_memory_usage }}</max_memory_usage>
+ <!-- <max_memory_usage_for_all_queries>200000000000</max_memory_usage_for_all_queries> -->
+ <default_database_engine>Ordinary</default_database_engine>
+ <optimize_on_insert>0</optimize_on_insert>
+ <async_socket_for_remote>0</async_socket_for_remote>
+ <distributed_ddl_task_timeout>0</distributed_ddl_task_timeout>
+ <max_bytes_before_external_group_by>75000000000</max_bytes_before_external_group_by>
+ <distributed_aggregation_memory_efficient>1</distributed_aggregation_memory_efficient>
+ <distributed_product_mode>local</distributed_product_mode>
+ <log_queries>1</log_queries>
+ <cancel_http_readonly_queries_on_client_close>1</cancel_http_readonly_queries_on_client_close>
+ <background_pool_size>{{ clickhouse.background_pool_size }}</background_pool_size>
+ <!-- <enable_http_compression>1</enable_http_compression>-->
+ <replication_alter_columns_timeout>60</replication_alter_columns_timeout>
+ <skip_unavailable_shards>1</skip_unavailable_shards>
+ <max_execution_time>21600</max_execution_time>
+ <!-- Use cache of uncompressed blocks of data. Meaningful only for processing many of very short queries. -->
+ <use_uncompressed_cache>1</use_uncompressed_cache>
+ <replace_running_query>1</replace_running_query>
+ <http_receive_timeout>21600</http_receive_timeout>
+ <http_send_timeout>21600</http_send_timeout>
+ <receive_timeout>21600</receive_timeout>
+ <send_timeout>21600</send_timeout>
+ <count_distinct_implementation>uniqCombined</count_distinct_implementation>
+ <!-- How to choose between replicas during distributed query processing.
+ random - choose random replica from set of replicas with minimum number of errors
+ nearest_hostname - from set of replicas with minimum number of errors, choose replica
+ with minumum number of different symbols between replica's hostname and local hostname
+ (Hamming distance).
+ in_order - first live replica is choosen in specified order.
+ -->
+ <max_rows_to_group_by>10000000</max_rows_to_group_by>
+ <group_by_overflow_mode>any</group_by_overflow_mode>
+ <timeout_before_checking_execution_speed>3600</timeout_before_checking_execution_speed>
+ <load_balancing>in_order</load_balancing>
+ </default>
+
+ <!-- Profile that allows only read queries. -->
+ <readonly>
+ <max_memory_usage>{{ clickhouse.max_server_memory_usage }}</max_memory_usage>
+ <!-- <max_memory_usage_for_all_queries>200000000000</max_memory_usage_for_all_queries> -->
+ <default_database_engine>Ordinary</default_database_engine>
+ <optimize_on_insert>0</optimize_on_insert>
+ <async_socket_for_remote>0</async_socket_for_remote>
+ <distributed_ddl_task_timeout>0</distributed_ddl_task_timeout>
+ <distributed_product_mode>local</distributed_product_mode>
+ <http_receive_timeout>600</http_receive_timeout>
+ <http_send_timeout>600</http_send_timeout>
+ <receive_timeout>600</receive_timeout>
+ <send_timeout>600</send_timeout>
+ <log_queries>1</log_queries>
+ <cancel_http_readonly_queries_on_client_close>1</cancel_http_readonly_queries_on_client_close>
+ <background_pool_size>{{ clickhouse.background_pool_size }}</background_pool_size>
+ <!-- http压缩 不影响http请求,只影响使用chproxy的客户端-->
+ <enable_http_compression>1</enable_http_compression>
+ <replace_running_query>1</replace_running_query>
+ <replication_alter_columns_timeout>60</replication_alter_columns_timeout>
+ <skip_unavailable_shards>1</skip_unavailable_shards>
+ <max_execution_time>600</max_execution_time>
+ <!-- Use cache of uncompressed blocks of data. Meaningful only for processing many of very short queries. -->
+ <timeout_before_checking_execution_speed>600</timeout_before_checking_execution_speed>
+ <use_uncompressed_cache>1</use_uncompressed_cache>
+ <count_distinct_implementation>uniqCombined</count_distinct_implementation>
+ <load_balancing>in_order</load_balancing>
+ <distributed_aggregation_memory_efficient>1</distributed_aggregation_memory_efficient>
+ <max_rows_to_group_by>10000000</max_rows_to_group_by>
+ <group_by_overflow_mode>any</group_by_overflow_mode>
+ <readonly>2</readonly>
+
+ </readonly>
+
+ <ckinsert>
+ <max_memory_usage>{{ clickhouse.max_server_memory_usage }}</max_memory_usage>
+ <!-- <max_memory_usage_for_all_queries>200000000000</max_memory_usage_for_all_queries> -->
+ <default_database_engine>Ordinary</default_database_engine>
+ <optimize_on_insert>0</optimize_on_insert>
+ <async_socket_for_remote>0</async_socket_for_remote>
+ <distributed_ddl_task_timeout>0</distributed_ddl_task_timeout>
+ <distributed_product_mode>local</distributed_product_mode>
+ <log_queries>1</log_queries>
+ <background_pool_size>{{ clickhouse.background_pool_size }}</background_pool_size>
+
+ <replication_alter_columns_timeout>60</replication_alter_columns_timeout>
+ <skip_unavailable_shards>1</skip_unavailable_shards>
+ <max_execution_time>300</max_execution_time>
+ <!-- Use cache of uncompressed blocks of data. Meaningful only for processing many of very short queries. -->
+ <use_uncompressed_cache>0</use_uncompressed_cache>
+ <timeout_before_checking_execution_speed>300</timeout_before_checking_execution_speed>
+ <http_receive_timeout>300</http_receive_timeout>
+ <http_send_timeout>300</http_send_timeout>
+ <receive_timeout>300</receive_timeout>
+ <send_timeout>300</send_timeout>
+ <allow_ddl>0</allow_ddl>
+ <load_balancing>random</load_balancing>
+ </ckinsert>
+ </profiles>
+
+ <!-- Users and ACL. -->
+ <users>
+ <!-- If user name was not specified, 'default' user is used. -->
+ <default>
+ <!-- Password could be specified in plaintext or in SHA256 (in hex format).
+
+ If you want to specify password in plaintext (not recommended), place it in 'password' element.
+ Example: <password>qwerty</password>.
+ Password could be empty.
+
+ If you want to specify SHA256, place it in 'password_sha256_hex' element.
+ Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
+
+ How to generate decent password:
+ Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
+ In first line will be password and in second - corresponding SHA256.
+ -->
+ <password_sha256_hex>{{ root_key_sha }}</password_sha256_hex>
+
+ <!-- List of networks with open access.
+
+ To open access from everywhere, specify:
+ <ip>::/0</ip>
+
+ To open access only from localhost, specify:
+ <ip>::1</ip>
+ <ip>127.0.0.1</ip>
+
+ Each element of list has one of the following forms:
+ <ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
+ 2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
+ <host> Hostname. Example: server01.yandex.ru.
+ To check access, DNS query is performed, and all received addresses compared to peer address.
+ <host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.yandex\.ru$
+ To check access, DNS PTR query is performed for peer address and then regexp is applied.
+ Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
+ Strongly recommended that regexp is ends with $
+ All results of DNS requests are cached till server restart.
+ -->
+ <networks incl="networks" replace="replace">
+ <ip>::/0</ip>
+ </networks>
+
+ <!-- Settings profile for user. -->
+ <profile>default</profile>
+
+ <!-- Quota for user. -->
+ <quota>default</quota>
+ </default>
+
+ <tsg_report>
+ <password_sha256_hex>{{ root_key_sha }}</password_sha256_hex>
+ <networks incl="networks" replace="replace">
+ <ip>::/0</ip>
+ </networks>
+ <profile>default</profile>
+ <quota>default</quota>
+ </tsg_report>
+
+
+ <tsg_insert>
+ <password_sha256_hex>{{ root_key_sha }}</password_sha256_hex>
+ <networks incl="networks" replace="replace">
+ <ip>::/0</ip>
+ </networks>
+ <profile>ckinsert</profile>
+ <quota>default</quota>
+ </tsg_insert>
+
+ <!-- Example of user with readonly access. -->
+ <tsg_query>
+ <password_sha256_hex>{{ readonly_key_sha }}</password_sha256_hex>
+ <networks incl="networks" replace="replace">
+ <ip>::/0</ip>
+ </networks>
+ <profile>readonly</profile>
+ <quota>default</quota>
+ </tsg_query>
+
+
+ <!-- Example of user with readonly access. -->
+ <readonly>
+ <password></password>
+ <networks incl="networks" replace="replace">
+ <ip>::1</ip>
+ <ip>127.0.0.1</ip>
+ </networks>
+ <profile>readonly</profile>
+ <quota>default</quota>
+ </readonly>
+ </users>
+
+ <!-- Quotas. -->
+ <quotas>
+ <!-- Name of quota. -->
+ <default>
+ <!-- Limits for time interval. You could specify many intervals with different limits. -->
+ <interval>
+ <!-- Length of interval. -->
+ <duration>3600</duration>
+ <!-- No limits. Just calculate resource usage for time interval. -->
+ <queries>0</queries>
+ <errors>0</errors>
+ <result_rows>0</result_rows>
+ <read_rows>0</read_rows>
+ <execution_time>0</execution_time>
+ </interval>
+ </default>
+ </quotas>
+</yandex>
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/vars/.main.yml.swp b/Clickhouse/21.8.13.1/clickhouse/role/vars/.main.yml.swp
new file mode 100644
index 0000000..eb0391f
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/vars/.main.yml.swp
Binary files differ
diff --git a/Clickhouse/21.8.13.1/clickhouse/role/vars/main.yml b/Clickhouse/21.8.13.1/clickhouse/role/vars/main.yml
new file mode 100644
index 0000000..f803f79
--- /dev/null
+++ b/Clickhouse/21.8.13.1/clickhouse/role/vars/main.yml
@@ -0,0 +1,12 @@
+#镜像名称
+image_name: clickhouse
+
+#镜像版本号
+image_tag: 21.8.13.1.altinitystable
+
+#容器名称
+container_name: clickhouse
+
+#组件版本
+component_version: clickhouse-21.8.13.1.altinitystable
+
diff --git a/GrootStream/1.0/grootstream/hosts b/GrootStream/1.0/grootstream/hosts
new file mode 100644
index 0000000..829ebb5
--- /dev/null
+++ b/GrootStream/1.0/grootstream/hosts
@@ -0,0 +1,2 @@
+[grootstream]
+192.168.45.102
diff --git a/GrootStream/1.0/grootstream/install.yml b/GrootStream/1.0/grootstream/install.yml
new file mode 100644
index 0000000..b95f26c
--- /dev/null
+++ b/GrootStream/1.0/grootstream/install.yml
@@ -0,0 +1,7 @@
+- hosts: grootstream
+ remote_user: root
+ roles:
+ - role
+ vars_files:
+ - role/vars/main.yml
+
diff --git a/GrootStream/1.0/grootstream/role/defaults/main.yml b/GrootStream/1.0/grootstream/role/defaults/main.yml
new file mode 100644
index 0000000..6734e27
--- /dev/null
+++ b/GrootStream/1.0/grootstream/role/defaults/main.yml
@@ -0,0 +1,9 @@
+#The default installation location
+deploy_dir: /data/olap
+
+#The default data storage location,use storing application data,logs and configuration files
+data_dir: /data/olap
+
+qgw_api: '{{ vrrp_instance.default.virtual_ipaddress }}'
+
+hos_api: '{{ vrrp_instance.oss.virtual_ipaddress }}'
diff --git a/GrootStream/1.0/grootstream/role/tasks/deploy.yml b/GrootStream/1.0/grootstream/role/tasks/deploy.yml
new file mode 100644
index 0000000..f1d4071
--- /dev/null
+++ b/GrootStream/1.0/grootstream/role/tasks/deploy.yml
@@ -0,0 +1,32 @@
+- name: check Jdk version
+ shell: source /etc/profile && java -version 2>&1 | grep {{ java_version }} | wc -l
+ ignore_errors: false
+ register: jdk_out
+
+- name: To terminate execution
+ fail:
+ msg: "JDK is not installed in the target cluster, please check!"
+ when: jdk_out.stdout != '2'
+ run_once: true
+ delegate_to: 127.0.0.1
+
+- name: Create groot package path:{{ deploy_dir }}
+ file:
+ state: directory
+ path: '{{ deploy_dir }}'
+
+- name: Unpack {{ groot_stream_version }}.zip to {{ deploy_dir }}/
+ unarchive:
+ src: 'files/{{ groot_stream_version }}.zip'
+ dest: '{{ deploy_dir }}/'
+
+- name: Copying config files
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: '{{ item.mode }}'
+ backup: false
+ with_items:
+ - { src: 'groot-stream.sh.j2', dest: '/etc/profile.d/groot-stream.sh', mode: '0755' }
+ - { src: 'grootstream.yaml.j2', dest: '{{ deploy_dir }}/{{ groot_stream_version }}/config/grootstream.yaml', mode: '0644' }
+
diff --git a/GrootStream/1.0/grootstream/role/tasks/main.yml b/GrootStream/1.0/grootstream/role/tasks/main.yml
new file mode 100644
index 0000000..0b02eb7
--- /dev/null
+++ b/GrootStream/1.0/grootstream/role/tasks/main.yml
@@ -0,0 +1,8 @@
+- block:
+ - include: uninstall.yml
+ - include: deploy.yml
+ when: (operation) == "install"
+
+- block:
+ - include: uninstall.yml
+ when: (operation) == "uninstall"
diff --git a/GrootStream/1.0/grootstream/role/tasks/uninstall.yml b/GrootStream/1.0/grootstream/role/tasks/uninstall.yml
new file mode 100644
index 0000000..e2ca97b
--- /dev/null
+++ b/GrootStream/1.0/grootstream/role/tasks/uninstall.yml
@@ -0,0 +1,10 @@
+- block:
+ - name: Ansible delete old /etc/profile.d/groot-stream.sh
+ file:
+ path: '/etc/profile.d/groot-stream.sh'
+ state: absent
+
+ - name: Ansible delete old {{ deploy_dir }}/{{ groot_stream_version }}
+ file:
+ path: '{{ deploy_dir }}/{{ groot_stream_version }}'
+ state: absent
diff --git a/GrootStream/1.0/grootstream/role/templates/groot-stream.sh.j2 b/GrootStream/1.0/grootstream/role/templates/groot-stream.sh.j2
new file mode 100644
index 0000000..716813b
--- /dev/null
+++ b/GrootStream/1.0/grootstream/role/templates/groot-stream.sh.j2
@@ -0,0 +1,4 @@
+#groot-stream
+export GROOT_HOME={{ deploy_dir }}/{{ groot_stream_version }}
+export PATH=$GROOT_HOME/bin:$PATH
+
diff --git a/GrootStream/1.0/grootstream/role/templates/grootstream.yaml.j2 b/GrootStream/1.0/grootstream/role/templates/grootstream.yaml.j2
new file mode 100644
index 0000000..d8a6c60
--- /dev/null
+++ b/GrootStream/1.0/grootstream/role/templates/grootstream.yaml.j2
@@ -0,0 +1,21 @@
+grootstream:
+ knowledge_base:
+ - name: tsg_asnlookup
+ type: asnlookup
+ properties:
+ fs_type: hos
+ fs_default_path: http://{{ qgw_api }}:9999/v1/knowledge_base?kb_id=
+ files:
+ - f9f6bc91-2142-4673-8249-e097c00fe1ea
+ - name: tsg_geoiplookup
+ type: geoiplookup
+ properties:
+ fs_type: hos
+ fs_default_path: http://{{ qgw_api }}:9999/v1/knowledge_base?kb_id=
+ files:
+ - 64af7077-eb9b-4b8f-80cf-2ceebc89bea9
+ - 004390bc-3135-4a6f-a492-3662ecb9e289
+ properties:
+ hos.path: http://{{ hos_api }}:9098/hos
+ hos.bucket.name.traffic_file: traffic_file_bucket
+ hos.bucket.name.troubleshooting_file: troubleshooting_file_bucket
diff --git a/GrootStream/1.0/grootstream/role/vars/main.yml b/GrootStream/1.0/grootstream/role/vars/main.yml
new file mode 100644
index 0000000..7ab3fbb
--- /dev/null
+++ b/GrootStream/1.0/grootstream/role/vars/main.yml
@@ -0,0 +1,5 @@
+#groot版本
+groot_stream_version: groot-stream-1.0
+
+#Jdk版本
+java_version: 1.8.0_73
diff --git a/MariaDB/10.5.3/mariadb/hosts b/MariaDB/10.5.3/mariadb/hosts
new file mode 100644
index 0000000..9ddfaf2
--- /dev/null
+++ b/MariaDB/10.5.3/mariadb/hosts
@@ -0,0 +1,2 @@
+[mariadb]
+192.168.45.102
diff --git a/MariaDB/10.5.3/mariadb/install.yml b/MariaDB/10.5.3/mariadb/install.yml
new file mode 100644
index 0000000..4d1be87
--- /dev/null
+++ b/MariaDB/10.5.3/mariadb/install.yml
@@ -0,0 +1,7 @@
+- hosts: mariadb
+ remote_user: root
+ roles:
+ - role
+ vars_files:
+ - role/vars/main.yml
+
diff --git a/MariaDB/10.5.3/mariadb/role/defaults/main.yml b/MariaDB/10.5.3/mariadb/role/defaults/main.yml
new file mode 100644
index 0000000..ac8bc01
--- /dev/null
+++ b/MariaDB/10.5.3/mariadb/role/defaults/main.yml
@@ -0,0 +1,9 @@
+#The default installation location
+deploy_dir: /data/olap
+
+#The default data storage location,use storing application data,logs and configuration files
+data_dir: /data/olap
+
+mariadb:
+ #Used to cache data and index data from tables in the InnoDB storage engine.
+ innodb_buffer_pool_size: 2048
diff --git a/MariaDB/10.5.3/mariadb/role/files/pyMysql.zip b/MariaDB/10.5.3/mariadb/role/files/pyMysql.zip
new file mode 100644
index 0000000..ce72a5b
--- /dev/null
+++ b/MariaDB/10.5.3/mariadb/role/files/pyMysql.zip
Binary files differ
diff --git a/MariaDB/10.5.3/mariadb/role/handlers/main.yml b/MariaDB/10.5.3/mariadb/role/handlers/main.yml
new file mode 100644
index 0000000..5062d63
--- /dev/null
+++ b/MariaDB/10.5.3/mariadb/role/handlers/main.yml
@@ -0,0 +1,29 @@
+- name: Loading Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ load_path: '{{ deploy_dir }}/{{ container_name }}/{{ image_name }}-{{ image_tag }}.tar'
+ source: load
+ force_tag: yes
+ force_source: yes
+ timeout: 300
+
+- name: Start Container
+ docker_compose:
+ project_src: '{{ deploy_dir }}/{{ container_name }}/'
+
+- name: Loading Exporter Image
+ docker_image:
+ name: 'mysqld_exporter'
+ tag: 'v1.0'
+ load_path: '{{ deploy_dir }}/{{ container_name }}/monitor/mysqld_exporter-v1.0.tar'
+ source: load
+ force_tag: yes
+ force_source: yes
+ timeout: 300
+
+- name: Start Exporter Container
+ docker_compose:
+ project_src: '{{ deploy_dir }}/{{ container_name }}/monitor/'
+
+
diff --git a/MariaDB/10.5.3/mariadb/role/tasks/deploy-cluster.yml b/MariaDB/10.5.3/mariadb/role/tasks/deploy-cluster.yml
new file mode 100644
index 0000000..983b483
--- /dev/null
+++ b/MariaDB/10.5.3/mariadb/role/tasks/deploy-cluster.yml
@@ -0,0 +1,153 @@
+- name: Setting node_nums variable
+ set_fact: node_nums={{ groups.mariadb|length }}
+
+- name: To terminate execution
+ fail:
+ msg: "MariaDB in master-master mode. The value must have 2 nodes,please checking configurations/hosts -> mariadb"
+ when: node_nums != '2'
+
+- name: Creating directory
+ file:
+ state: directory
+ owner: ods
+ group: root
+ path: '{{ deploy_dir }}/{{ container_name }}/{{ item.dir }}'
+ with_items:
+ - { dir: 'config' }
+ - { dir: 'logs' }
+ - { dir: 'monitor' }
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
+ copy:
+ src: 'files/{{ image_name }}-{{ image_tag }}.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: true
+ notify:
+ - Loading Image
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/monitor
+ copy:
+ src: 'files/mysqld_exporter-v1.0.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/monitor/'
+ force: true
+ notify:
+ - Loading Exporter Image
+
+- name: Copying Mariadb config files
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: 0644
+ with_items:
+ - { src: 'my.cnf.j2', dest: '{{ deploy_dir }}/{{ container_name }}/config/my.cnf' }
+ - { src: 'docker-compose.yml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml' }
+ notify:
+ - Start Container
+
+- meta: flush_handlers
+
+- name: Waitting for MariaDB running,20s
+ shell: sleep 20
+
+- name: Creating mariadb readonly user
+ shell: mysql -uroot -h{{ inventory_hostname }} -p{{ mariadb_default_pin }} -e "CREATE USER IF NOT EXISTS '{{ mariadb_query_username}}'@'%' IDENTIFIED BY '{{ mariadb_query_pin }}' WITH MAX_USER_CONNECTIONS 3;GRANT PROCESS, REPLICATION CLIENT, SELECT ON *.* TO '{{ mariadb_query_username}}'@'%';FLUSH PRIVILEGES;"
+
+- name: change mariadb remote authority
+ shell: mysql -uroot -h{{ inventory_hostname }} -p{{ mariadb_default_pin }} -e"use mysql;grant all privileges on *.* to 'root'@'%' identified by '{{ mariadb_default_pin }}' with grant option;FLUSH PRIVILEGES;"
+
+- name: copy pyMysql.zip
+ unarchive:
+ src: files/pyMysql.zip
+ dest: /tmp/
+ register: copy_info
+
+- name: pip install pyMysql
+ shell: cd /tmp/pyMysql && pip install --ignore-installed *
+ when: copy_info.changed
+
+- name: Get mariadb slave_status
+ mysql_info:
+ login_host: '{{ groups.mariadb[0] }}'
+ login_user: root
+ login_password: '{{ mariadb_default_pin }}'
+ filter: slave_status
+ register: mariadb_info
+
+- name: change_slave_to_master1
+ mysql_replication:
+ login_unix_socket: "{{ deploy_dir }}/{{ container_name }}/data/mysql.sock"
+ login_host: "{{ groups.mariadb[1] }}"
+ login_port: "3306"
+ login_user: root
+ login_password: "{{ mariadb_default_pin }}"
+ master_host: "{{ groups.mariadb[0] }}"
+ master_user: root
+ master_password: "{{ mariadb_default_pin }}"
+ master_port: "3306"
+ mode: changemaster
+ run_once: true
+ delegate_to: '{{ groups.mariadb[1] }}'
+ tags:
+ - change_slave_to_master1
+
+- name: start_slave1
+ mysql_replication:
+ login_unix_socket: "{{ deploy_dir }}/{{ container_name }}/data/mysql.sock"
+ login_user: root
+ login_host: "{{ inventory_hostname }}"
+ login_port: "3306"
+ login_password: "{{ mariadb_default_pin }}"
+ mode: startslave
+ tags:
+ - start_slave1
+ run_once: true
+ delegate_to: '{{ groups.mariadb[1] }}'
+
+- name: change_slave_to_master2
+ mysql_replication:
+ login_unix_socket: "{{ deploy_dir }}/{{ container_name }}/data/mysql.sock"
+ login_host: "{{ groups.mariadb[0] }}"
+ login_port: "3306"
+ login_user: root
+ login_password: "{{ mariadb_default_pin }}"
+ master_host: "{{ groups.mariadb[1] }}"
+ master_user: root
+ master_password: "{{ mariadb_default_pin }}"
+ master_port: "3306"
+ mode: changemaster
+ run_once: true
+ delegate_to: '{{ groups.mariadb[0] }}'
+ tags:
+ - change_slave_to_master2
+
+- name: start_slave2
+ mysql_replication:
+ login_unix_socket: "{{ deploy_dir }}/{{ container_name }}/data/mysql.sock"
+ login_user: root
+ login_host: "{{ inventory_hostname }}"
+ login_port: "3306"
+ login_password: "{{ mariadb_default_pin }}"
+ mode: startslave
+ tags:
+ - start_slave2
+ run_once: true
+ delegate_to: '{{ groups.mariadb[0] }}'
+
+- name: get_slave_info
+ mysql_replication:
+ login_host: 127.0.0.1
+ login_user: root
+ login_port: "3306"
+ login_password: "{{ mariadb_default_pin }}"
+ mode: getslave
+ register: info
+ tags:
+ - get_slave_info
+
+- name: Copying Mariadb config files
+ template:
+ src: 'exporter_docker-compose.yml.j2'
+ dest: '{{ deploy_dir }}/{{ container_name }}/monitor/docker-compose.yml'
+ mode: 0644
+ notify:
+ - Start Exporter Container
diff --git a/MariaDB/10.5.3/mariadb/role/tasks/deploy-standalone.yml b/MariaDB/10.5.3/mariadb/role/tasks/deploy-standalone.yml
new file mode 100644
index 0000000..d94eb38
--- /dev/null
+++ b/MariaDB/10.5.3/mariadb/role/tasks/deploy-standalone.yml
@@ -0,0 +1,66 @@
+- name: Setting node_nums variable
+ set_fact: node_nums={{ groups.mariadb|length }}
+
+- name: To terminate execution
+ fail:
+ msg: "MariaDB standalone mode. The value must have 1 nodes,please checking configurations/hosts -> mariadb"
+ when: node_nums != '1'
+
+- name: Creating directory
+ file:
+ state: directory
+ owner: ods
+ group: root
+ path: '{{ deploy_dir }}/{{ container_name }}/{{ item.dir }}'
+ with_items:
+ - { dir: 'config' }
+ - { dir: 'logs' }
+ - { dir: 'monitor' }
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
+ copy:
+ src: 'files/{{ image_name }}-{{ image_tag }}.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: true
+ notify:
+ - Loading Image
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/monitor
+ copy:
+ src: 'files/mysqld_exporter-v1.0.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/monitor/'
+ force: true
+ notify:
+ - Loading Exporter Image
+
+- name: Copying Mariadb config files
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: 0644
+ with_items:
+ - { src: 'my.cnf.j2', dest: '{{ deploy_dir }}/{{ container_name }}/config/my.cnf' }
+ - { src: 'docker-compose.yml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml' }
+ notify:
+ - Start Container
+
+- meta: flush_handlers
+
+- name: Waitting for MariaDB running,20s
+ shell: sleep 20
+
+- name: Creating mariadb readonly user
+ shell: mysql -uroot -h{{ inventory_hostname }} -p{{ mariadb_default_pin }} -e "CREATE USER IF NOT EXISTS '{{ mariadb_query_username}}'@'%' IDENTIFIED BY '{{ mariadb_query_pin }}' WITH MAX_USER_CONNECTIONS 3;GRANT PROCESS, REPLICATION CLIENT, SELECT ON *.* TO '{{ mariadb_query_username}}'@'%';FLUSH PRIVILEGES;"
+
+- name: change mariadb remote authority
+ shell: mysql -uroot -h{{ inventory_hostname }} -p{{ mariadb_default_pin }} -e"use mysql;grant all privileges on *.* to 'root'@'%' identified by '{{ mariadb_default_pin }}' with grant option;FLUSH PRIVILEGES;"
+
+- name: Copying Mariadb config files
+ template:
+ src: 'exporter_docker-compose.yml.j2'
+ dest: '{{ deploy_dir }}/{{ container_name }}/monitor/docker-compose.yml'
+ mode: 0644
+ notify:
+ - Start Exporter Container
+
+- meta: flush_handlers
diff --git a/MariaDB/10.5.3/mariadb/role/tasks/main.yml b/MariaDB/10.5.3/mariadb/role/tasks/main.yml
new file mode 100644
index 0000000..a8a33a1
--- /dev/null
+++ b/MariaDB/10.5.3/mariadb/role/tasks/main.yml
@@ -0,0 +1,11 @@
+- block:
+ - include: uninstall.yml
+ - include: "{{ playbook_name }}"
+ vars:
+ playbook_name: "{{ 'deploy-cluster.yml' if groups.mariadb | length > 1 else 'deploy-standalone.yml' }}"
+ - include: status-check.yml
+ when: (operation) == "install"
+
+- block:
+ - include: uninstall.yml
+ when: (operation) == "uninstall"
diff --git a/MariaDB/10.5.3/mariadb/role/tasks/status-check.yml b/MariaDB/10.5.3/mariadb/role/tasks/status-check.yml
new file mode 100644
index 0000000..60b813e
--- /dev/null
+++ b/MariaDB/10.5.3/mariadb/role/tasks/status-check.yml
@@ -0,0 +1,14 @@
+- name: Check if the MariaDB already exists
+ shell: ps -ef | grep -v mysqld_exporter | grep -v grep | grep mysqld | wc -l
+ register: process_out
+
+- name: Check if the MariaDB already exists
+ shell: netstat -anlp | egrep "3306" | grep LISTEN | wc -l
+ register: port_out
+
+- name: To terminate execution
+ fail:
+ msg: "MariaDB on node {{ inventory_hostname }} is not started. Please check"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: process_out.stdout != '1' or port_out.stdout != '1'
diff --git a/MariaDB/10.5.3/mariadb/role/tasks/uninstall.yml b/MariaDB/10.5.3/mariadb/role/tasks/uninstall.yml
new file mode 100644
index 0000000..1f9872f
--- /dev/null
+++ b/MariaDB/10.5.3/mariadb/role/tasks/uninstall.yml
@@ -0,0 +1,27 @@
+- block:
+ - name: Stopping and removing {{ container_name }} container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+ - name: Removing old {{ image_name }} image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
+
+ - name: Stopping and removing exporter container
+ docker_container:
+ name: 'mysqld_exporter'
+ state: absent
+
+ - name: Removing old exporter image
+ docker_image:
+ name: 'mysqld_exporter'
+ tag: 'v1.0'
+ state: absent
+
+ - name: Ansible delete old {{ deploy_dir }}/{{ container_name }}
+ file:
+ path: '{{ deploy_dir }}/{{ container_name }}'
+ state: absent
diff --git a/MariaDB/10.5.3/mariadb/role/templates/docker-compose.yml.j2 b/MariaDB/10.5.3/mariadb/role/templates/docker-compose.yml.j2
new file mode 100644
index 0000000..b752df8
--- /dev/null
+++ b/MariaDB/10.5.3/mariadb/role/templates/docker-compose.yml.j2
@@ -0,0 +1,15 @@
+version: "3.7"
+
+services:
+ mariadb:
+ image: {{ image_name }}:{{ image_tag }}
+ container_name: {{ container_name }}
+ restart: always
+ environment:
+ - MYSQL_ROOT_PASSWORD={{ mariadb_default_pin }}
+ volumes:
+ - {{ deploy_dir }}/{{ container_name }}/data:/opt/mariadb/data
+ - {{ deploy_dir }}/{{ container_name }}/logs:/opt/mariadb/logs
+ - {{ deploy_dir }}/{{ container_name }}/config/my.cnf:/etc/mysql/my.cnf
+ network_mode: "host"
+
diff --git a/MariaDB/10.5.3/mariadb/role/templates/exporter_docker-compose.yml.j2 b/MariaDB/10.5.3/mariadb/role/templates/exporter_docker-compose.yml.j2
new file mode 100644
index 0000000..a2ce0a5
--- /dev/null
+++ b/MariaDB/10.5.3/mariadb/role/templates/exporter_docker-compose.yml.j2
@@ -0,0 +1,17 @@
+version: '3'
+services:
+ mysql_exporter:
+ image: mysqld_exporter:v1.0
+ container_name: mysqld_exporter
+ restart: always
+ ports:
+ - 9911:9104
+ hostname: mysqld_exporter
+ environment:
+ - DATA_SOURCE_NAME=tsg_query:{{ mariadb_query_pin }}@({{ inventory_hostname }}:3306)/
+ networks:
+ olap:
+ ipv4_address: 172.20.88.6
+networks:
+ olap:
+ external: true
diff --git a/MariaDB/10.5.3/mariadb/role/templates/keepalived/check_mariadb.sh.j2 b/MariaDB/10.5.3/mariadb/role/templates/keepalived/check_mariadb.sh.j2
new file mode 100644
index 0000000..3dc80d5
--- /dev/null
+++ b/MariaDB/10.5.3/mariadb/role/templates/keepalived/check_mariadb.sh.j2
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+PORT_CHECK=$(netstat -anlp | grep 3306 | grep LISTEN | grep mysqld | grep -v grep | wc -l)
+
+if [[ $PORT_CHECK -eq 0 ]];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Mariadb 端口未监听,服务异常" >> /etc/keepalived/keepalived_check.log
+ exit 1
+fi
+
+SQL_CHECK=`mysql -h{{inventory_hostname}} -utsg_query -p{{mariadb_query_pin}} -e "SELECT 'ok' AS OK;"`
+
+if [[ $? -ne '0' ]];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Mariadb SQL执行失败,服务状态异常" >> /etc/keepalived/keepalived_check.log
+ exit 1
+fi
diff --git a/MariaDB/10.5.3/mariadb/role/templates/keepalived/keepalived-mariadb.conf.j2 b/MariaDB/10.5.3/mariadb/role/templates/keepalived/keepalived-mariadb.conf.j2
new file mode 100644
index 0000000..4cd1970
--- /dev/null
+++ b/MariaDB/10.5.3/mariadb/role/templates/keepalived/keepalived-mariadb.conf.j2
@@ -0,0 +1,48 @@
+#mariadb Load balancing start
+vrrp_script chk_mariadb {
+ script "/etc/keepalived/check_mariadb.sh"
+ #每2s检查一次
+ interval 2
+ #每次检查-20
+ weight -20
+}
+
+#VRRP实例定义块
+vrrp_instance VI_MARIADB {
+{% if inventory_hostname == groups.mariadb[0] %}
+#状态只有MASTER和BACKUP两种,并且要大写,MASTER为工作状态,BACKUP是备用状态。
+state MASTER
+#优先级,同一个vrrp_instance的MASTER优先级必须比BACKUP高。
+priority 150
+{% elif inventory_hostname == groups.mariadb[1] %}
+#状态只有MASTER和BACKUP两种,并且要大写,MASTER为工作状态,BACKUP是备用状态。
+state BACKUP
+#优先级,同一个vrrp_instance的MASTER优先级必须比BACKUP高。
+priority 100
+{% endif %}
+#网卡名称
+interface {{ ansible_default_ipv4.interface }}
+#虚拟路由标识,同一个vrrp_instance的MASTER和BACKUP的vitrual_router_id 是一致的。
+virtual_router_id 66
+#MASTER 与BACKUP 负载均衡器之间同步检查的时间间隔,单位为秒。
+advert_int 1
+authentication {
+#验证authentication。包含验证类型和验证密码。类型主要有PASS、AH 两种,通常使用的类型为PASS
+auth_type PASS
+#据说AH 使用时有问题。验证密码为明文,同一vrrp 实例MASTER 与BACKUP 使用相同的密码才能正常通信
+auth_pass 1111
+}
+#触发的脚本
+track_script {
+#检测脚本,上面配置的
+ chk_mariadb
+}
+virtual_ipaddress {
+#虚拟ip地址,可以有多个地址,每个地址占一行,不需要子网掩码
+ {{ mariadb_virtual_ipaddress }}
+}
+}
+#mariadb Load balancing end
+
+
+
diff --git a/MariaDB/10.5.3/mariadb/role/templates/keepalived/unload_balancer.sh.j2 b/MariaDB/10.5.3/mariadb/role/templates/keepalived/unload_balancer.sh.j2
new file mode 100644
index 0000000..fa717dc
--- /dev/null
+++ b/MariaDB/10.5.3/mariadb/role/templates/keepalived/unload_balancer.sh.j2
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+function del_ipaddr(){
+keepHostCheck=`ip address show {{ vrrp_instance.default.interface }} | grep "{{ vrrp_instance.default.virtual_ipaddress }}" | wc -l`
+if [ $keepHostCheck -eq "1" ];then
+ ip address del {{ vrrp_instance.default.virtual_ipaddress }} dev {{ vrrp_instance.default.interface }}
+fi
+}
+
+
+if [ -f "/etc/keepalived/conf.d/keepalived-mariadb.conf" ];then
+ rm -rf /etc/keepalived/check_mariadb.sh
+ rm -rf /etc/keepalived/conf.d/keepalived-mariadb.conf
+ service keepalived stop && systemctl daemon-reload && sleep 3 && service keepalived start
+ del_ipaddr
+else
+ del_ipaddr
+fi
diff --git a/MariaDB/10.5.3/mariadb/role/templates/my.cnf.j2 b/MariaDB/10.5.3/mariadb/role/templates/my.cnf.j2
new file mode 100644
index 0000000..9ab16d7
--- /dev/null
+++ b/MariaDB/10.5.3/mariadb/role/templates/my.cnf.j2
@@ -0,0 +1,198 @@
+# Example MariaDB config file for very large systems.
+#
+# This is for a large system with memory of 1G-2G where the system runs mainly
+# MariaDB.
+#
+# MariaDB programs look for option files in a set of
+# locations which depend on the deployment platform.
+# You can copy this option file to one of those
+# locations. For information about these locations, do:
+# 'my_print_defaults --help' and see what is printed under
+# Default options are read from the following files in the given order:
+# More information at: http://dev.mysql.com/doc/mysql/en/option-files.html
+#
+# In this file, you can use all long options that a program supports.
+# If you want to know which options a program supports, run the program
+# with the "--help" option.
+
+# The following options will be passed to all MySQL clients
+[client]
+#password = your_password
+port = 3306
+socket = /opt/mariadb/data/mysql.sock
+
+# Here follows entries for some specific programs
+
+# The MySQL server
+[mysqld]
+default-time_zone='+0:00'
+port = 3306
+datadir = /opt/mariadb/data
+socket = /opt/mariadb/data/mysql.sock
+skip-external-locking
+key_buffer_size = 384M
+max_allowed_packet = 1M
+table_open_cache = 512
+sort_buffer_size = 2M
+read_buffer_size = 2M
+read_rnd_buffer_size = 8M
+myisam_sort_buffer_size = 64M
+thread_cache_size = 8
+query_cache_size = 32M
+# Try number of CPU's*2 for thread_concurrency
+thread_concurrency = 8
+
+innodb_file_per_table = on #为每个表都创建分区
+skip_name_resolve = on #跳过域名解析
+lower_case_table_names = 1 #忽略表名大小写
+character-set-server = utf8mb4 #数据库编码
+character-set-client = utf8mb4
+slow_query_log
+long_query_time = 0.1
+#mysql连接数设置
+max_connections=5000
+log_error=/opt/mariadb/logs/log_error.log
+slow_query_log_file = /opt/mariadb/logs/slow_query.log
+innodb_log_file_size = 512M
+innodb_flush_method = O_DIRECT
+innodb_log_files_in_group = 3
+innodb_buffer_pool_size = {{ mariadb.innodb_buffer_pool_size }}
+innodb_buffer_pool_instances = 4
+socket = /opt/mariadb/data/mysql.sock
+skip-external-locking
+key_buffer_size = 384M
+max_allowed_packet = 128M
+table_open_cache = 1024
+sort_buffer_size = 2M
+read_buffer_size = 2M
+read_rnd_buffer_size = 8M
+myisam_sort_buffer_size = 64M
+thread_cache_size = 8
+query_cache_size = 32M
+thread_concurrency = 16 #线程数量,推荐为cpu和核数的2倍
+bulk_insert_buffer_size=100M
+
+slave_skip_errors = 1062
+
+# Point the following paths to a dedicated disk
+#tmpdir = /tmp/
+
+# Don't listen on a TCP/IP port at all. This can be a security enhancement,
+# if all processes that need to connect to mysqld run on the same host.
+# All interaction with mysqld must be made via Unix sockets or named pipes.
+# Note that using this option without enabling named pipes on Windows
+# (via the "enable-named-pipe" option) will render mysqld useless!
+#
+#skip-networking
+
+# Replication Master Server (default)
+# binary logging is required for replication
+log-bin=mysql-bin
+
+# required unique id between 1 and 2^32 - 1
+# defaults to 1 if master-host is not set
+# but will not function as a master if omitted
+
+server-id = {{ groups['mariadb'].index(inventory_hostname) +1 }}
+auto_increment_increment = {{ groups.mariadb|length }} #步进值auto_imcrement。一般有n台主MySQL就填n
+auto_increment_offset = {{ groups['mariadb'].index(inventory_hostname) +1 }} #起始值。一般填第n台主MySQL。此时为第一台主MySQL
+
+log_slave_updates=on
+
+binlog-ignore-db = mysql
+binlog-ignore-db = performance_schema
+binlog-ignore-db = information_schema
+
+# Replication Slave (comment out master section to use this)
+#
+# To configure this host as a replication slave, you can choose between
+# two methods :
+#
+# 1) Use the CHANGE MASTER TO command (fully described in our manual) -
+# the syntax is:
+#
+# CHANGE MASTER TO MASTER_HOST=<host>, MASTER_PORT=<port>,
+# MASTER_USER=<user>, MASTER_PASSWORD=<password> ;
+#
+# where you replace <host>, <user>, <password> by quoted strings and
+# <port> by the master's port number (3306 by default).
+#
+# Example:
+#
+# CHANGE MASTER TO MASTER_HOST='125.564.12.1', MASTER_PORT=3306,
+# MASTER_USER='joe', MASTER_PASSWORD='secret';
+#
+# OR
+#
+# 2) Set the variables below. However, in case you choose this method, then
+# start replication for the first time (even unsuccessfully, for example
+# if you mistyped the password in master-password and the slave fails to
+# connect), the slave will create a master.info file, and any later
+# change in this file to the variables' values below will be ignored and
+# overridden by the content of the master.info file, unless you shutdown
+# the slave server, delete master.info and restart the slaver server.
+# For that reason, you may want to leave the lines below untouched
+# (commented) and instead use CHANGE MASTER TO (see above)
+#
+# required unique id between 2 and 2^32 - 1
+# (and different from the master)
+# defaults to 2 if master-host is set
+# but will not function as a slave if omitted
+#server-id = 2
+#
+# The replication master for this slave - required
+#master-host = <hostname>
+#
+# The username the slave will use for authentication when connecting
+# to the master - required
+#master-user = <username>
+#
+# The password the slave will authenticate with when connecting to
+#master-password = <password>
+#
+# The port the master is listening on.
+# optional - defaults to 3306
+#master-port = <port>
+#
+# binary logging - not required for slaves, but recommended
+#log-bin=mysql-bin
+#
+# binary logging format - mixed recommended
+#binlog_format=mixed
+
+# Uncomment the following if you are using InnoDB tables
+#innodb_data_home_dir = /usr/local/mysql/data
+#innodb_data_file_path = ibdata1:2000M;ibdata2:10M:autoextend
+#innodb_log_group_home_dir = /usr/local/mysql/data
+# You can set .._buffer_pool_size up to 50 - 80 %
+# of RAM but beware of setting memory usage too high
+#innodb_buffer_pool_size = 384M
+# Set .._log_file_size to 25 % of buffer pool size
+#innodb_log_file_size = 100M
+#innodb_log_buffer_size = 8M
+innodb_flush_log_at_trx_commit = 1
+innodb_lock_wait_timeout = 300
+sync_binlog=1
+
+relay-log=relay-bin
+relay-log-index=relay-bin-index
+slave-skip-errors=all
+
+[mysqldump]
+quick
+max_allowed_packet = 256M
+
+[mysql]
+no-auto-rehash
+# Remove the next comment character if you are not familiar with SQL
+#safe-updates
+
+[myisamchk]
+key_buffer_size = 256M
+sort_buffer_size = 256M
+read_buffer = 2M
+write_buffer = 2M
+
+[mysqlhotcopy]
+interactive-timeout
+
diff --git a/MariaDB/10.5.3/mariadb/role/vars/main.yml b/MariaDB/10.5.3/mariadb/role/vars/main.yml
new file mode 100644
index 0000000..5b44b38
--- /dev/null
+++ b/MariaDB/10.5.3/mariadb/role/vars/main.yml
@@ -0,0 +1,9 @@
+#镜像名称
+image_name: mariadb
+
+#镜像版本号
+image_tag: 10.5.3
+
+#容器名称
+container_name: mariadb
+
diff --git a/Nacos/2.0.2/hosts b/Nacos/2.0.2/hosts
new file mode 100644
index 0000000..c6dced0
--- /dev/null
+++ b/Nacos/2.0.2/hosts
@@ -0,0 +1,5 @@
+[mariadb]
+192.168.45.102
+
+[nacos]
+192.168.45.102
diff --git a/Nacos/2.0.2/install.yml b/Nacos/2.0.2/install.yml
new file mode 100644
index 0000000..edd453f
--- /dev/null
+++ b/Nacos/2.0.2/install.yml
@@ -0,0 +1,7 @@
+- hosts: nacos
+ remote_user: root
+ roles:
+ - role
+ vars_files:
+ - role/vars/main.yml
+
diff --git a/Nacos/2.0.2/role/defaults/main.yml b/Nacos/2.0.2/role/defaults/main.yml
new file mode 100644
index 0000000..efcb197
--- /dev/null
+++ b/Nacos/2.0.2/role/defaults/main.yml
@@ -0,0 +1,18 @@
+#The default installation location
+deploy_dir: /data/olap
+
+#The default data storage location,use storing application data,logs and configuration files
+data_dir: /data/olap
+
+#The Mysql/Mariadb databases connection information
+nacos_mysql_host: '{{ vrrp_instance.default.virtual_ipaddress }}'
+
+#The Mysql/Mariadb username
+nacos_mysql_username: 'root'
+
+#The Mysql/Mariadb password
+nacos_mysql_password: '{{ mariadb_default_pin }}'
+
+nacos:
+ #Running memory of the Nacos.
+ java_opt: '-Xmx1024m -Xms1024m -Xmn256m'
diff --git a/Nacos/2.0.2/role/files/mysql b/Nacos/2.0.2/role/files/mysql
new file mode 100644
index 0000000..dd458ec
--- /dev/null
+++ b/Nacos/2.0.2/role/files/mysql
Binary files differ
diff --git a/Nacos/2.0.2/role/handlers/main.yml b/Nacos/2.0.2/role/handlers/main.yml
new file mode 100644
index 0000000..aa0145c
--- /dev/null
+++ b/Nacos/2.0.2/role/handlers/main.yml
@@ -0,0 +1,24 @@
+- name: Loading Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ load_path: '{{ deploy_dir }}/{{ container_name }}/{{ image_name }}-{{ image_tag }}.tar'
+ source: load
+ force_tag: yes
+ force_source: yes
+ timeout: 300
+
+- name: Stop Container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+- name: Start Container
+ docker_compose:
+ project_src: '{{ deploy_dir }}/{{ container_name }}/'
+
+- name: Removing Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
diff --git a/Nacos/2.0.2/role/tasks/deploy.yml b/Nacos/2.0.2/role/tasks/deploy.yml
new file mode 100644
index 0000000..766cb5c
--- /dev/null
+++ b/Nacos/2.0.2/role/tasks/deploy.yml
@@ -0,0 +1,50 @@
+- name: copy mysql to /usr/bin/
+ copy:
+ src: 'files/mysql'
+ dest: '/usr/bin/'
+ force: true
+ mode: 0755
+
+- name: Creating directory
+ file:
+ state: directory
+ path: '{{ deploy_dir }}/{{ container_name }}/{{ item.dir }}'
+ with_items:
+ - { dir: 'conf' }
+ - { dir: 'logs' }
+ - { dir: 'init' }
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
+ copy:
+ src: 'files/{{ image_name }}-{{ image_tag }}.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: true
+ notify:
+ - Loading Image
+
+- name: Copying Kafka config files
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ mode: 0644
+ with_items:
+ - { src: 'application.properties.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/application.properties' }
+ - { src: 'cluster.conf.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/cluster.conf' }
+ - { src: 'nacos-logback.xml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/nacos-logback.xml' }
+ - { src: 'nacos-mysql.sql.j2', dest: '{{ deploy_dir }}/{{ container_name }}/init/nacos-mysql.sql' }
+ - { src: 'docker-compose.yml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml' }
+ notify:
+ - Loading Image
+ - Start Container
+
+- block:
+ - name: create nacos database
+ shell: mysql -uroot -p{{ mariadb_default_pin }} -P3306 -h{{ groups.mariadb[0] }} -e "create database if not exists {{ nacos_database }} default character set utf8mb4 collate utf8mb4_general_ci;"
+
+ - name: exec nacos-mysql.sql
+ shell: 'mysql -uroot -p{{ mariadb_default_pin }} -P3306 -h{{ groups.mariadb[0] }} {{ nacos_database }} < {{ deploy_dir }}/{{ container_name }}/init/nacos-mysql.sql'
+ run_once: true
+ delegate_to: '{{groups.nacos[0]}}'
+
+- meta: flush_handlers
+
diff --git a/Nacos/2.0.2/role/tasks/main.yml b/Nacos/2.0.2/role/tasks/main.yml
new file mode 100644
index 0000000..7d9aec3
--- /dev/null
+++ b/Nacos/2.0.2/role/tasks/main.yml
@@ -0,0 +1,10 @@
+- block:
+ - include: uninstall.yml
+ - include: deploy.yml
+ - include: status-check.yml
+ when: (operation) == "install"
+
+- block:
+ - include: uninstall.yml
+ when: (operation) == "uninstall"
+
diff --git a/Nacos/2.0.2/role/tasks/status-check.yml b/Nacos/2.0.2/role/tasks/status-check.yml
new file mode 100644
index 0000000..87a05f6
--- /dev/null
+++ b/Nacos/2.0.2/role/tasks/status-check.yml
@@ -0,0 +1,15 @@
+- name: Waiting for the Nacos Server start,sleep 60s
+ shell: sleep 60
+
+- name: Check the Nacos node status
+ shell: source /etc/profile && curl -s http://{{ inventory_hostname }}:8847/nacos/actuator/health | grep UP | wc -l
+ register: check_nacos
+
+- name: To terminate execution
+ fail:
+ msg: "检测到{{ inventory_hostname }}节点Nacos未正常启动;请保留日志反馈,路径:{{ deploy_dir }}/{{ container_name }}/logs"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: check_nacos.stdout != '1'
+
+
diff --git a/Nacos/2.0.2/role/tasks/uninstall.yml b/Nacos/2.0.2/role/tasks/uninstall.yml
new file mode 100644
index 0000000..9f19295
--- /dev/null
+++ b/Nacos/2.0.2/role/tasks/uninstall.yml
@@ -0,0 +1,21 @@
+- block:
+ - name: Stopping and removing {{ container_name }} container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+ - name: Removing old {{ image_name }} image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
+
+ - name: Ansible delete old {{ deploy_dir }}/{{ container_name }}
+ file:
+ path: '{{ deploy_dir }}/{{ container_name }}'
+ state: absent
+
+ - name: Drop mariadb nacos database
+ shell: mysql -s -uroot -p{{ mariadb_default_pin }} -P3306 -h{{ groups.mariadb[0] }} -e "DROP DATABASE IF EXISTS {{ nacos_database }};"
+ run_once: true
+ delegate_to: "{{ groups.nacos[0] }}"
diff --git a/Nacos/2.0.2/role/templates/application.properties.j2 b/Nacos/2.0.2/role/templates/application.properties.j2
new file mode 100644
index 0000000..57a1e65
--- /dev/null
+++ b/Nacos/2.0.2/role/templates/application.properties.j2
@@ -0,0 +1,228 @@
+#
+# Copyright 1999-2018 Alibaba Group Holding Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#*************** Spring Boot Related Configurations ***************#
+### Default web context path:
+server.servlet.contextPath=/nacos
+### Default web server port:
+server.port=8847
+
+#*************** Network Related Configurations ***************#
+### If prefer hostname over ip for Nacos server addresses in cluster.conf:
+# nacos.inetutils.prefer-hostname-over-ip=false
+
+### Specify local server's IP:
+# nacos.inetutils.ip-address=
+
+
+#*************** Config Module Related Configurations ***************#
+### If use MySQL as datasource:
+spring.datasource.platform=mysql
+
+### Count of DB:
+db.num=1
+
+### Connect URL of DB:
+db.url.0=jdbc:mysql://{{ nacos_mysql_host }}:3306/nacos?characterEncoding=utf8&connectTimeout=10000&socketTimeout=30000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC&failOverReadOnly=false
+db.user.0={{ nacos_mysql_username }}
+db.password.0={{ nacos_mysql_password }}
+
+### Connection pool configuration: hikariCP
+db.pool.config.connectionTimeout=30000
+db.pool.config.validationTimeout=10000
+db.pool.config.maximumPoolSize=20
+db.pool.config.minimumIdle=2
+
+#*************** Naming Module Related Configurations ***************#
+### Data dispatch task execution period in milliseconds: Will removed on v2.1.X, replace with nacos.core.protocol.distro.data.sync.delayMs
+# nacos.naming.distro.taskDispatchPeriod=200
+
+### Data count of batch sync task: Will removed on v2.1.X. Deprecated
+# nacos.naming.distro.batchSyncKeyCount=1000
+
+### Retry delay in milliseconds if sync task failed: Will removed on v2.1.X, replace with nacos.core.protocol.distro.data.sync.retryDelayMs
+# nacos.naming.distro.syncRetryDelay=5000
+
+### If enable data warmup. If set to false, the server would accept request without local data preparation:
+# nacos.naming.data.warmup=true
+
+### If enable the instance auto expiration, kind like of health check of instance:
+# nacos.naming.expireInstance=true
+
+### will be removed and replaced by `nacos.naming.clean` properties
+nacos.naming.empty-service.auto-clean=true
+nacos.naming.empty-service.clean.initial-delay-ms=50000
+nacos.naming.empty-service.clean.period-time-ms=30000
+
+### Add in 2.0.0
+### The interval to clean empty service, unit: milliseconds.
+# nacos.naming.clean.empty-service.interval=60000
+
+### The expired time to clean empty service, unit: milliseconds.
+# nacos.naming.clean.empty-service.expired-time=60000
+
+### The interval to clean expired metadata, unit: milliseconds.
+# nacos.naming.clean.expired-metadata.interval=5000
+
+### The expired time to clean metadata, unit: milliseconds.
+# nacos.naming.clean.expired-metadata.expired-time=60000
+
+### The delay time before push task to execute from service changed, unit: milliseconds.
+# nacos.naming.push.pushTaskDelay=500
+
+### The timeout for push task execute, unit: milliseconds.
+# nacos.naming.push.pushTaskTimeout=5000
+
+### The delay time for retrying failed push task, unit: milliseconds.
+# nacos.naming.push.pushTaskRetryDelay=1000
+
+#*************** CMDB Module Related Configurations ***************#
+### The interval to dump external CMDB in seconds:
+# nacos.cmdb.dumpTaskInterval=3600
+
+### The interval of polling data change event in seconds:
+# nacos.cmdb.eventTaskInterval=10
+
+### The interval of loading labels in seconds:
+# nacos.cmdb.labelTaskInterval=300
+
+### If turn on data loading task:
+# nacos.cmdb.loadDataAtStart=false
+
+
+#*************** Metrics Related Configurations ***************#
+### Metrics for prometheus
+management.endpoints.web.exposure.include=*
+
+### Metrics for elastic search
+management.metrics.export.elastic.enabled=false
+#management.metrics.export.elastic.host=http://localhost:9200
+
+### Metrics for influx
+management.metrics.export.influx.enabled=false
+#management.metrics.export.influx.db=springboot
+#management.metrics.export.influx.uri=http://localhost:8086
+#management.metrics.export.influx.auto-create-db=true
+#management.metrics.export.influx.consistency=one
+#management.metrics.export.influx.compressed=true
+
+#*************** Access Log Related Configurations ***************#
+### If turn on the access log:
+server.tomcat.accesslog.enabled=false
+
+### The access log pattern:
+server.tomcat.accesslog.pattern=%h %l %u %t "%r" %s %b %D %{User-Agent}i %{Request-Source}i
+
+### The directory of access log:
+server.tomcat.basedir=
+
+#*************** Access Control Related Configurations ***************#
+### If enable spring security, this option is deprecated in 1.2.0:
+#spring.security.enabled=false
+
+### The ignore urls of auth, is deprecated in 1.2.0:
+nacos.security.ignore.urls=/,/error,/**/*.css,/**/*.js,/**/*.html,/**/*.map,/**/*.svg,/**/*.png,/**/*.ico,/console-ui/public/**,/v1/auth/**,/v1/console/health/**,/actuator/**,/v1/console/server/**
+
+### The auth system to use, currently only 'nacos' and 'ldap' is supported:
+nacos.core.auth.system.type=nacos
+
+### If turn on auth system:
+nacos.core.auth.enabled=true
+
+### worked when nacos.core.auth.system.type=ldap,{0} is Placeholder,replace login username
+# nacos.core.auth.ldap.url=ldap://localhost:389
+# nacos.core.auth.ldap.userdn=cn={0},ou=user,dc=company,dc=com
+
+### The token expiration in seconds:
+nacos.core.auth.default.token.expire.seconds=18000
+
+### The default token:
+nacos.core.auth.default.token.secret.key=SecretKey012345678901234567890123456789012345678901234567890123456789
+
+### Turn on/off caching of auth information. By turning on this switch, the update of auth information would have a 15 seconds delay.
+nacos.core.auth.caching.enabled=false
+
+### Since 1.4.1, Turn on/off white auth for user-agent: nacos-server, only for upgrade from old version.
+nacos.core.auth.enable.userAgentAuthWhite=true
+
+### Since 1.4.1, worked when nacos.core.auth.enabled=true and nacos.core.auth.enable.userAgentAuthWhite=false.
+### The two properties is the white list for auth and used by identity the request from other server.
+nacos.core.auth.server.identity.key=serverIdentity
+nacos.core.auth.server.identity.value=security
+
+#*************** Istio Related Configurations ***************#
+### If turn on the MCP server:
+nacos.istio.mcp.server.enabled=false
+
+#*************** Core Related Configurations ***************#
+
+### set the WorkerID manually
+# nacos.core.snowflake.worker-id=
+
+### Member-MetaData
+# nacos.core.member.meta.site=
+# nacos.core.member.meta.adweight=
+# nacos.core.member.meta.weight=
+
+### MemberLookup
+### Addressing pattern category, If set, the priority is highest
+# nacos.core.member.lookup.type=[file,address-server]
+## Set the cluster list with a configuration file or command-line argument
+# nacos.member.list=192.168.16.101:8847?raft_port=8807,192.168.16.101?raft_port=8808,192.168.16.101:8849?raft_port=8809
+## for AddressServerMemberLookup
+# Maximum number of retries to query the address server upon initialization
+# nacos.core.address-server.retry=5
+## Server domain name address of [address-server] mode
+# address.server.domain=jmenv.tbsite.net
+## Server port of [address-server] mode
+# address.server.port=8080
+## Request address of [address-server] mode
+# address.server.url=/nacos/serverlist
+
+#*************** JRaft Related Configurations ***************#
+
+### Sets the Raft cluster election timeout, default value is 5 second
+# nacos.core.protocol.raft.data.election_timeout_ms=5000
+### Sets the amount of time the Raft snapshot will execute periodically, default is 30 minute
+# nacos.core.protocol.raft.data.snapshot_interval_secs=30
+### raft internal worker threads
+# nacos.core.protocol.raft.data.core_thread_num=8
+### Number of threads required for raft business request processing
+# nacos.core.protocol.raft.data.cli_service_thread_num=4
+### raft linear read strategy. Safe linear reads are used by default, that is, the Leader tenure is confirmed by heartbeat
+# nacos.core.protocol.raft.data.read_index_type=ReadOnlySafe
+### rpc request timeout, default 5 seconds
+# nacos.core.protocol.raft.data.rpc_request_timeout_ms=5000
+
+#*************** Distro Related Configurations ***************#
+
+### Distro data sync delay time, when sync task delayed, task will be merged for same data key. Default 1 second.
+# nacos.core.protocol.distro.data.sync.delayMs=1000
+
+### Distro data sync timeout for one sync data, default 3 seconds.
+# nacos.core.protocol.distro.data.sync.timeoutMs=3000
+
+### Distro data sync retry delay time when sync data failed or timeout, same behavior with delayMs, default 3 seconds.
+# nacos.core.protocol.distro.data.sync.retryDelayMs=3000
+
+### Distro data verify interval time, verify synced data whether expired for a interval. Default 5 seconds.
+# nacos.core.protocol.distro.data.verify.intervalMs=5000
+
+### Distro data verify timeout for one verify, default 3 seconds.
+# nacos.core.protocol.distro.data.verify.timeoutMs=3000
+
+### Distro data load retry delay when load snapshot data failed, default 30 seconds.
+# nacos.core.protocol.distro.data.load.retryDelayMs=30000
diff --git a/Nacos/2.0.2/role/templates/cluster.conf.j2 b/Nacos/2.0.2/role/templates/cluster.conf.j2
new file mode 100644
index 0000000..23f09bc
--- /dev/null
+++ b/Nacos/2.0.2/role/templates/cluster.conf.j2
@@ -0,0 +1,20 @@
+#
+# Copyright 1999-2018 Alibaba Group Holding Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#it is ip
+{% for dev_info in groups.nacos %}
+{{dev_info}}
+{% endfor %}
diff --git a/Nacos/2.0.2/role/templates/docker-compose.yml.j2 b/Nacos/2.0.2/role/templates/docker-compose.yml.j2
new file mode 100644
index 0000000..4ef885b
--- /dev/null
+++ b/Nacos/2.0.2/role/templates/docker-compose.yml.j2
@@ -0,0 +1,14 @@
+version: '3'
+
+services:
+ nacos:
+ image: {{ image_name }}:{{ image_tag }}
+ restart: always
+ container_name: {{ container_name }}
+ network_mode: "host"
+ environment:
+ MODE: cluster
+ JVM_OPTIONS: {{ nacos.java_opt }}
+ volumes:
+ - "{{ deploy_dir }}/{{ container_name }}/conf:/opt/nacos/conf"
+ - "{{ deploy_dir }}/{{ container_name }}/logs:/opt/nacos/logs"
diff --git a/Nacos/2.0.2/role/templates/nacos-logback.xml.j2 b/Nacos/2.0.2/role/templates/nacos-logback.xml.j2
new file mode 100644
index 0000000..2b76b8d
--- /dev/null
+++ b/Nacos/2.0.2/role/templates/nacos-logback.xml.j2
@@ -0,0 +1,642 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ ~ Copyright 1999-2018 Alibaba Group Holding Ltd.
+ ~
+ ~ Licensed under the Apache License, Version 2.0 (the "License");
+ ~ you may not use this file except in compliance with the License.
+ ~ You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing, software
+ ~ distributed under the License is distributed on an "AS IS" BASIS,
+ ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ~ See the License for the specific language governing permissions and
+ ~ limitations under the License.
+ -->
+
+<configuration scan="true" scanPeriod="10 seconds">
+
+ <springProperty scope="context" name="logPath" source="nacos.logs.path" defaultValue="${nacos.home}/logs"/>
+ <property name="LOG_HOME" value="${logPath}"/>
+
+ <appender name="cmdb-main"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${nacos.home}/logs/cmdb-main.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${nacos.home}/logs/cmdb-main.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>1GB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>7GB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+
+ <appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+
+ <appender name="naming-server"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/naming-server.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/naming-server.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>1GB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>7GB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+
+ <appender name="async-naming-server" class="ch.qos.logback.classic.AsyncAppender">
+ <discardingThreshold>0</discardingThreshold>
+ <queueSize>1024</queueSize>
+ <neverBlock>true</neverBlock>
+ <appender-ref ref="naming-server"/>
+ </appender>
+
+ <appender name="naming-raft"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/naming-raft.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/naming-raft.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>1GB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>3GB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+
+ <appender name="async-naming-raft" class="ch.qos.logback.classic.AsyncAppender">
+ <discardingThreshold>0</discardingThreshold>
+ <queueSize>1024</queueSize>
+ <neverBlock>true</neverBlock>
+ <appender-ref ref="naming-raft"/>
+ </appender>
+
+
+ <appender name="naming-distro"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/naming-distro.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/naming-distro.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>1GB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>3GB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+
+ <appender name="async-naming-distro" class="ch.qos.logback.classic.AsyncAppender">
+ <discardingThreshold>0</discardingThreshold>
+ <queueSize>1024</queueSize>
+ <neverBlock>true</neverBlock>
+ <appender-ref ref="naming-distro"/>
+ </appender>
+
+ <appender name="naming-event"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/naming-event.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/naming-event.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>1GB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>3GB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+
+ <appender name="async-naming-event" class="ch.qos.logback.classic.AsyncAppender">
+ <discardingThreshold>0</discardingThreshold>
+ <queueSize>1024</queueSize>
+ <neverBlock>true</neverBlock>
+ <appender-ref ref="naming-event"/>
+ </appender>
+
+ <appender name="naming-push"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/naming-push.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/naming-push.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>1GB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>3GB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+ <appender name="naming-rt"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/naming-rt.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/naming-rt.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>1GB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>3GB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%msg%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+
+ <appender name="naming-performance"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/naming-performance.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/naming-performance.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>1GB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>3GB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+
+ <!--config module logback config-->
+ <appender name="dumpFile"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/config-dump.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/config-dump.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>2GB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>7GB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+ <appender name="pullFile"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/config-pull.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/config-pull.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>20MB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>128MB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+ <appender name="fatalFile"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/config-fatal.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/config-fatal.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>20MB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>128MB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+ <appender name="memoryFile"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/config-memory.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/config-memory.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>20MB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>128MB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+ <appender name="pullCheckFile"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/config-pull-check.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/config-pull-check.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>1GB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>3GB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%msg%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+
+ <appender name="clientLog"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/config-client-request.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/config-client-request.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>2GB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>7GB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date|%msg%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+
+ <appender name="traceLog"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/config-trace.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/config-trace.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>2GB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>7GB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date|%msg%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+
+ <appender name="notifyLog"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/config-notify.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/config-notify.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>1GB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>3GB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+
+ <appender name="startLog"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/config-server.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/config-server.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>50MB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>512MB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+
+ <appender name="rootFile"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/nacos.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/nacos.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>50MB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>512MB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+
+ <appender name="nacos-address"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/nacos-address.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/nacos-address.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>2GB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>7GB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+
+ <appender name="istio-main"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/istio-main.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/istio-main.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>2GB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>7GB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+
+ <appender name="core-auth"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/core-auth.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/core-auth.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>2GB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>7GB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+
+ <appender name="protocol-raft"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/protocol-raft.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/protocol-raft.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>2GB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>7GB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+
+ <appender name="protocol-distro"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/protocol-distro.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/protocol-distro.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>2GB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>7GB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+
+ <appender name="nacos-cluster"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/nacos-cluster.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/nacos-cluster.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>2GB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>7GB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+
+ <appender name="alipay-jraft"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${LOG_HOME}/alipay-jraft.log</file>
+ <append>true</append>
+ <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+ <fileNamePattern>${LOG_HOME}/alipay-jraft.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
+ <maxFileSize>2GB</maxFileSize>
+ <maxHistory>7</maxHistory>
+ <totalSizeCap>7GB</totalSizeCap>
+ <cleanHistoryOnStart>true</cleanHistoryOnStart>
+ </rollingPolicy>
+ <encoder>
+ <Pattern>%date %level %msg%n%n</Pattern>
+ <charset>UTF-8</charset>
+ </encoder>
+ </appender>
+
+ <logger name="com.alibaba.nacos.address.main" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="nacos-address"/>
+ </logger>
+
+ <logger name="com.alibaba.nacos.cmdb.main" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="cmdb-main"/>
+ </logger>
+
+ <logger name="com.alibaba.nacos.naming.main" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="async-naming-server"/>
+ </logger>
+ <logger name="com.alibaba.nacos.naming.raft" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="async-naming-raft"/>
+ </logger>
+ <logger name="com.alibaba.nacos.naming.distro" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="async-naming-distro"/>
+ </logger>
+ <logger name="com.alibaba.nacos.naming.event" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="async-naming-event"/>
+ </logger>
+ <logger name="com.alibaba.nacos.naming.push" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="naming-push"/>
+ </logger>
+ <logger name="com.alibaba.nacos.naming.rt" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="naming-rt"/>
+ </logger>
+ <logger name="com.alibaba.nacos.naming.performance" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="naming-performance"/>
+ </logger>
+
+ <logger name="com.alibaba.nacos.config.dumpLog" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="dumpFile"/>
+ </logger>
+ <logger name="com.alibaba.nacos.config.pullLog" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="pullFile"/>
+ </logger>
+ <logger name="com.alibaba.nacos.config.pullCheckLog" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="pullCheckFile"/>
+ </logger>
+ <logger name="com.alibaba.nacos.config.fatal" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="fatalFile"/>
+ </logger>
+ <logger name="com.alibaba.nacos.config.monitorLog" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="memoryFile"/>
+ </logger>
+
+ <logger name="com.alibaba.nacos.config.clientLog" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="clientLog"/>
+ </logger>
+
+ <logger name="com.alibaba.nacos.config.notifyLog" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="notifyLog"/>
+ </logger>
+
+ <logger name="com.alibaba.nacos.config.traceLog" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="traceLog"/>
+ </logger>
+
+ <logger name="com.alibaba.nacos.config.startLog" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="startLog"/>
+ </logger>
+
+ <logger name="com.alibaba.nacos.istio.main" additivity="false">
+ <level value="WARN"/>
+ <appender-ref ref="istio-main"/>
+ </logger>
+
+ <logger name="com.alibaba.nacos.core.auth" additivity="false">
+ <level value="WARN"/>
+ <appender-ref ref="core-auth"/>
+ </logger>
+
+ <logger name="com.alibaba.nacos.core.protocol.raft" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="protocol-raft"/>
+ </logger>
+
+ <logger name="com.alipay.sofa.jraft" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="alipay-jraft"/>
+ </logger>
+
+ <logger name="com.alibaba.nacos.core.protocol.distro" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="protocol-distro"/>
+ </logger>
+
+ <logger name="com.alibaba.nacos.core.cluster" additivity="false">
+ <level value="ERROR"/>
+ <appender-ref ref="nacos-cluster"/>
+ </logger>
+
+ <springProfile name="standalone">
+ <logger name="org.springframework">
+ <appender-ref ref="CONSOLE"/>
+ <level value="ERROR"/>
+ </logger>
+
+ <logger name="org.apache.catalina.startup.DigesterFactory">
+ <appender-ref ref="CONSOLE"/>
+ <level value="ERROR"/>
+ </logger>
+
+ <logger name="org.apache.catalina.util.LifecycleBase">
+ <appender-ref ref="CONSOLE"/>
+ <level value="ERROR"/>
+ </logger>
+
+ <logger name="org.apache.coyote.http11.Http11NioProtocol">
+ <appender-ref ref="CONSOLE"/>
+ <level value="WARN"/>
+ </logger>
+
+ <logger name="org.apache.tomcat.util.net.NioSelectorPool">
+ <appender-ref ref="CONSOLE"/>
+ <level value="WARN"/>
+ </logger>
+ </springProfile>
+
+ <logger name="com.alibaba.nacos.core.listener.StartingApplicationListener">
+ <appender-ref ref="CONSOLE"/>
+ <level value="ERROR"/>
+ </logger>
+
+ <logger name="com.alibaba.nacos.common.notify.NotifyCenter">
+ <appender-ref ref="CONSOLE"/>
+ <level value="ERROR"/>
+ </logger>
+
+ <logger name="com.alibaba.nacos.sys.file.WatchFileCenter">
+ <appender-ref ref="CONSOLE"/>
+ <level value="ERROR"/>
+ </logger>
+
+ <logger name="com.alibaba.nacos.common.executor.ThreadPoolManager">
+ <appender-ref ref="CONSOLE"/>
+ <level value="ERROR"/>
+ </logger>
+
+ <root>
+ <level value="ERROR"/>
+ <appender-ref ref="rootFile"/>
+ </root>
+</configuration>
+
diff --git a/Nacos/2.0.2/role/templates/nacos-mysql.sql.j2 b/Nacos/2.0.2/role/templates/nacos-mysql.sql.j2
new file mode 100644
index 0000000..e9d78b1
--- /dev/null
+++ b/Nacos/2.0.2/role/templates/nacos-mysql.sql.j2
@@ -0,0 +1,220 @@
+/*
+ * Copyright 1999-2018 Alibaba Group Holding Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/******************************************/
+/* 数据库全名 = nacos_config */
+/* 表名称 = config_info */
+/******************************************/
+CREATE TABLE `config_info` (
+ `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
+ `data_id` varchar(255) NOT NULL COMMENT 'data_id',
+ `group_id` varchar(255) DEFAULT NULL,
+ `content` longtext NOT NULL COMMENT 'content',
+ `md5` varchar(32) DEFAULT NULL COMMENT 'md5',
+ `gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+ `gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
+ `src_user` text COMMENT 'source user',
+ `src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
+ `app_name` varchar(128) DEFAULT NULL,
+ `tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
+ `c_desc` varchar(256) DEFAULT NULL,
+ `c_use` varchar(64) DEFAULT NULL,
+ `effect` varchar(64) DEFAULT NULL,
+ `type` varchar(64) DEFAULT NULL,
+ `c_schema` text,
+ PRIMARY KEY (`id`),
+ UNIQUE KEY `uk_configinfo_datagrouptenant` (`data_id`,`group_id`,`tenant_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info';
+
+/******************************************/
+/* 数据库全名 = nacos_config */
+/* 表名称 = config_info_aggr */
+/******************************************/
+CREATE TABLE `config_info_aggr` (
+ `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
+ `data_id` varchar(255) NOT NULL COMMENT 'data_id',
+ `group_id` varchar(255) NOT NULL COMMENT 'group_id',
+ `datum_id` varchar(255) NOT NULL COMMENT 'datum_id',
+ `content` longtext NOT NULL COMMENT '内容',
+ `gmt_modified` datetime NOT NULL COMMENT '修改时间',
+ `app_name` varchar(128) DEFAULT NULL,
+ `tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
+ PRIMARY KEY (`id`),
+ UNIQUE KEY `uk_configinfoaggr_datagrouptenantdatum` (`data_id`,`group_id`,`tenant_id`,`datum_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='增加租户字段';
+
+
+/******************************************/
+/* 数据库全名 = nacos_config */
+/* 表名称 = config_info_beta */
+/******************************************/
+CREATE TABLE `config_info_beta` (
+ `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
+ `data_id` varchar(255) NOT NULL COMMENT 'data_id',
+ `group_id` varchar(128) NOT NULL COMMENT 'group_id',
+ `app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
+ `content` longtext NOT NULL COMMENT 'content',
+ `beta_ips` varchar(1024) DEFAULT NULL COMMENT 'betaIps',
+ `md5` varchar(32) DEFAULT NULL COMMENT 'md5',
+ `gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+ `gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
+ `src_user` text COMMENT 'source user',
+ `src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
+ `tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
+ PRIMARY KEY (`id`),
+ UNIQUE KEY `uk_configinfobeta_datagrouptenant` (`data_id`,`group_id`,`tenant_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info_beta';
+
+/******************************************/
+/* 数据库全名 = nacos_config */
+/* 表名称 = config_info_tag */
+/******************************************/
+CREATE TABLE `config_info_tag` (
+ `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
+ `data_id` varchar(255) NOT NULL COMMENT 'data_id',
+ `group_id` varchar(128) NOT NULL COMMENT 'group_id',
+ `tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id',
+ `tag_id` varchar(128) NOT NULL COMMENT 'tag_id',
+ `app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
+ `content` longtext NOT NULL COMMENT 'content',
+ `md5` varchar(32) DEFAULT NULL COMMENT 'md5',
+ `gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+ `gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
+ `src_user` text COMMENT 'source user',
+ `src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
+ PRIMARY KEY (`id`),
+ UNIQUE KEY `uk_configinfotag_datagrouptenanttag` (`data_id`,`group_id`,`tenant_id`,`tag_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info_tag';
+
+/******************************************/
+/* 数据库全名 = nacos_config */
+/* 表名称 = config_tags_relation */
+/******************************************/
+CREATE TABLE `config_tags_relation` (
+ `id` bigint(20) NOT NULL COMMENT 'id',
+ `tag_name` varchar(128) NOT NULL COMMENT 'tag_name',
+ `tag_type` varchar(64) DEFAULT NULL COMMENT 'tag_type',
+ `data_id` varchar(255) NOT NULL COMMENT 'data_id',
+ `group_id` varchar(128) NOT NULL COMMENT 'group_id',
+ `tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id',
+ `nid` bigint(20) NOT NULL AUTO_INCREMENT,
+ PRIMARY KEY (`nid`),
+ UNIQUE KEY `uk_configtagrelation_configidtag` (`id`,`tag_name`,`tag_type`),
+ KEY `idx_tenant_id` (`tenant_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_tag_relation';
+
+/******************************************/
+/* 数据库全名 = nacos_config */
+/* 表名称 = group_capacity */
+/******************************************/
+CREATE TABLE `group_capacity` (
+ `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID',
+ `group_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Group ID,空字符表示整个集群',
+ `quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额,0表示使用默认值',
+ `usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量',
+ `max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限,单位为字节,0表示使用默认值',
+ `max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数,,0表示使用默认值',
+ `max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限,单位为字节,0表示使用默认值',
+ `max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',
+ `gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+ `gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
+ PRIMARY KEY (`id`),
+ UNIQUE KEY `uk_group_id` (`group_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='集群、各Group容量信息表';
+
+/******************************************/
+/* 数据库全名 = nacos_config */
+/* 表名称 = his_config_info */
+/******************************************/
+CREATE TABLE `his_config_info` (
+ `id` bigint(64) unsigned NOT NULL,
+ `nid` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ `data_id` varchar(255) NOT NULL,
+ `group_id` varchar(128) NOT NULL,
+ `app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
+ `content` longtext NOT NULL,
+ `md5` varchar(32) DEFAULT NULL,
+ `gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ `gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ `src_user` text,
+ `src_ip` varchar(50) DEFAULT NULL,
+ `op_type` char(10) DEFAULT NULL,
+ `tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
+ PRIMARY KEY (`nid`),
+ KEY `idx_gmt_create` (`gmt_create`),
+ KEY `idx_gmt_modified` (`gmt_modified`),
+ KEY `idx_did` (`data_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='多租户改造';
+
+
+/******************************************/
+/* 数据库全名 = nacos_config */
+/* 表名称 = tenant_capacity */
+/******************************************/
+CREATE TABLE `tenant_capacity` (
+ `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID',
+ `tenant_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Tenant ID',
+ `quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额,0表示使用默认值',
+ `usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量',
+ `max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限,单位为字节,0表示使用默认值',
+ `max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数',
+ `max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限,单位为字节,0表示使用默认值',
+ `max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',
+ `gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+ `gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
+ PRIMARY KEY (`id`),
+ UNIQUE KEY `uk_tenant_id` (`tenant_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='租户容量信息表';
+
+
+CREATE TABLE `tenant_info` (
+ `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
+ `kp` varchar(128) NOT NULL COMMENT 'kp',
+ `tenant_id` varchar(128) default '' COMMENT 'tenant_id',
+ `tenant_name` varchar(128) default '' COMMENT 'tenant_name',
+ `tenant_desc` varchar(256) DEFAULT NULL COMMENT 'tenant_desc',
+ `create_source` varchar(32) DEFAULT NULL COMMENT 'create_source',
+ `gmt_create` bigint(20) NOT NULL COMMENT '创建时间',
+ `gmt_modified` bigint(20) NOT NULL COMMENT '修改时间',
+ PRIMARY KEY (`id`),
+ UNIQUE KEY `uk_tenant_info_kptenantid` (`kp`,`tenant_id`),
+ KEY `idx_tenant_id` (`tenant_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='tenant_info';
+
+CREATE TABLE `users` (
+ `username` varchar(50) NOT NULL PRIMARY KEY,
+ `password` varchar(500) NOT NULL,
+ `enabled` boolean NOT NULL
+);
+
+CREATE TABLE `roles` (
+ `username` varchar(50) NOT NULL,
+ `role` varchar(50) NOT NULL,
+ UNIQUE INDEX `idx_user_role` (`username` ASC, `role` ASC) USING BTREE
+);
+
+CREATE TABLE `permissions` (
+ `role` varchar(50) NOT NULL,
+ `resource` varchar(255) NOT NULL,
+ `action` varchar(8) NOT NULL,
+ UNIQUE INDEX `uk_role_permission` (`role`,`resource`,`action`) USING BTREE
+);
+
+INSERT INTO users (username, password, enabled) VALUES ('nacos', '$2a$10$EuWPZHzz32dJN7jexM34MOeYirDdFAZm2kuWj7VEOJhhZkDrxfvUu', TRUE);
+
+INSERT INTO roles (username, role) VALUES ('nacos', 'ROLE_ADMIN');
+
+
diff --git a/Nacos/2.0.2/role/vars/main.yml b/Nacos/2.0.2/role/vars/main.yml
new file mode 100644
index 0000000..d1b368b
--- /dev/null
+++ b/Nacos/2.0.2/role/vars/main.yml
@@ -0,0 +1,13 @@
+#镜像名称
+image_name: nacos
+
+#镜像版本号
+image_tag: 2.0.2-alp
+
+#容器名称
+container_name: nacos
+
+#组件版本
+component_version: nacos-2.0.2
+
+nacos_database: nacos
diff --git a/Pushgateway/1.4.2/pushgateway/hosts b/Pushgateway/1.4.2/pushgateway/hosts
new file mode 100644
index 0000000..a19852d
--- /dev/null
+++ b/Pushgateway/1.4.2/pushgateway/hosts
@@ -0,0 +1,3 @@
+[pushgateway]
+192.168.45.102
+
diff --git a/Pushgateway/1.4.2/pushgateway/install.yml b/Pushgateway/1.4.2/pushgateway/install.yml
new file mode 100644
index 0000000..8cf4d5d
--- /dev/null
+++ b/Pushgateway/1.4.2/pushgateway/install.yml
@@ -0,0 +1,7 @@
+- hosts: pushgateway
+ remote_user: root
+ roles:
+ - role
+ vars_files:
+ - role/vars/main.yml
+
diff --git a/Pushgateway/1.4.2/pushgateway/role/defaults/main.yml b/Pushgateway/1.4.2/pushgateway/role/defaults/main.yml
new file mode 100644
index 0000000..d636519
--- /dev/null
+++ b/Pushgateway/1.4.2/pushgateway/role/defaults/main.yml
@@ -0,0 +1,6 @@
+#The default installation location
+deploy_dir: /data/olap
+
+#The default data storage location,use storing application data,logs and configuration files
+data_dir: /data/olap
+
diff --git a/Pushgateway/1.4.2/pushgateway/role/handlers/main.yml b/Pushgateway/1.4.2/pushgateway/role/handlers/main.yml
new file mode 100644
index 0000000..5d63377
--- /dev/null
+++ b/Pushgateway/1.4.2/pushgateway/role/handlers/main.yml
@@ -0,0 +1,25 @@
+- name: Loading Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ load_path: '{{ deploy_dir }}/{{ container_name }}/{{ image_name }}-{{ image_tag }}.tar'
+ source: load
+ force_tag: yes
+ force_source: yes
+ timeout: 300
+
+- name: Stop Container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+- name: Start Container
+ docker_compose:
+ project_src: '{{ deploy_dir }}/{{ container_name }}/'
+
+- name: Removing Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
+
diff --git a/Pushgateway/1.4.2/pushgateway/role/tasks/deploy.yml b/Pushgateway/1.4.2/pushgateway/role/tasks/deploy.yml
new file mode 100644
index 0000000..40f58d4
--- /dev/null
+++ b/Pushgateway/1.4.2/pushgateway/role/tasks/deploy.yml
@@ -0,0 +1,22 @@
+- name: Creating directory
+ file:
+ state: directory
+ path: '{{ deploy_dir }}/{{ container_name }}'
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
+ copy:
+ src: 'files/{{ image_name }}-{{ image_tag }}.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: true
+ notify:
+ - Loading Image
+
+- name: Copying Kafka config files
+ template:
+ src: 'docker-compose.yml.j2'
+ dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml'
+ mode: 0644
+ notify:
+ - Start Container
+
+- meta: flush_handlers
diff --git a/Pushgateway/1.4.2/pushgateway/role/tasks/main.yml b/Pushgateway/1.4.2/pushgateway/role/tasks/main.yml
new file mode 100644
index 0000000..7d9aec3
--- /dev/null
+++ b/Pushgateway/1.4.2/pushgateway/role/tasks/main.yml
@@ -0,0 +1,10 @@
+- block:
+ - include: uninstall.yml
+ - include: deploy.yml
+ - include: status-check.yml
+ when: (operation) == "install"
+
+- block:
+ - include: uninstall.yml
+ when: (operation) == "uninstall"
+
diff --git a/Pushgateway/1.4.2/pushgateway/role/tasks/status-check.yml b/Pushgateway/1.4.2/pushgateway/role/tasks/status-check.yml
new file mode 100644
index 0000000..7542edb
--- /dev/null
+++ b/Pushgateway/1.4.2/pushgateway/role/tasks/status-check.yml
@@ -0,0 +1,17 @@
+- name: Waitting for Pushgateway running,30s
+ shell: sleep 30
+
+- name: Check if the Pushgateway already exists
+ shell: ps -ef | grep pushgateway |grep -v grep | wc -l
+ register: process_out
+
+- name: Check if the Pushgateway already exists
+ shell: netstat -anlp | egrep "9091" | grep LISTEN | wc -l
+ register: port_out
+
+- name: To terminate execution
+ fail:
+ msg: "Kafka on node {{ inventory_hostname }} is not started. Please check"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: process_out.stdout != '1' or port_out.stdout != '1'
diff --git a/Pushgateway/1.4.2/pushgateway/role/tasks/uninstall.yml b/Pushgateway/1.4.2/pushgateway/role/tasks/uninstall.yml
new file mode 100644
index 0000000..8a76065
--- /dev/null
+++ b/Pushgateway/1.4.2/pushgateway/role/tasks/uninstall.yml
@@ -0,0 +1,27 @@
+- block:
+ - name: Stopping and removing {{ container_name }} container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+ - name: Removing old {{ image_name }} image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
+
+ - name: Stopping and removing exporter container
+ docker_container:
+ name: 'kafka_exporter'
+ state: absent
+
+ - name: Removing old exporter image
+ docker_image:
+ name: 'kafka_exporter'
+ tag: 'v2.0'
+ state: absent
+
+ - name: Ansible delete old {{ deploy_dir }}/{{ container_name }}
+ file:
+ path: '{{ deploy_dir }}/{{ container_name }}'
+ state: absent
diff --git a/Pushgateway/1.4.2/pushgateway/role/templates/docker-compose.yml.j2 b/Pushgateway/1.4.2/pushgateway/role/templates/docker-compose.yml.j2
new file mode 100644
index 0000000..ccb96bd
--- /dev/null
+++ b/Pushgateway/1.4.2/pushgateway/role/templates/docker-compose.yml.j2
@@ -0,0 +1,15 @@
+version: '3'
+
+services:
+ pushgateway:
+ image: {{ image_name }}:{{ image_tag }}
+ container_name: {{ container_name }}
+ ports:
+ - 9091:9091
+ networks:
+ olap:
+ ipv4_address: 172.20.88.12
+networks:
+ olap:
+ external: true
+
diff --git a/Pushgateway/1.4.2/pushgateway/role/vars/.main.yml.swp b/Pushgateway/1.4.2/pushgateway/role/vars/.main.yml.swp
new file mode 100644
index 0000000..edf3460
--- /dev/null
+++ b/Pushgateway/1.4.2/pushgateway/role/vars/.main.yml.swp
Binary files differ
diff --git a/Pushgateway/1.4.2/pushgateway/role/vars/main.yml b/Pushgateway/1.4.2/pushgateway/role/vars/main.yml
new file mode 100644
index 0000000..a82253d
--- /dev/null
+++ b/Pushgateway/1.4.2/pushgateway/role/vars/main.yml
@@ -0,0 +1,11 @@
+#镜像名称
+image_name: pushgateway
+
+#镜像版本号
+image_tag: 1.4.2
+
+#容器名称
+container_name: pushgateway
+
+#组件版本
+component_version: pushgateway-1.4.2
diff --git a/Redis/6.2.5/redis/hosts b/Redis/6.2.5/redis/hosts
new file mode 100644
index 0000000..93b92b8
--- /dev/null
+++ b/Redis/6.2.5/redis/hosts
@@ -0,0 +1,2 @@
+[redis]
+192.168.45.102
diff --git a/Redis/6.2.5/redis/install.yml b/Redis/6.2.5/redis/install.yml
new file mode 100644
index 0000000..d3fd1db
--- /dev/null
+++ b/Redis/6.2.5/redis/install.yml
@@ -0,0 +1,7 @@
+- hosts: redis
+ remote_user: root
+ roles:
+ - role
+ vars_files:
+ - role/vars/main.yml
+
diff --git a/Redis/6.2.5/redis/role/defaults/main.yml b/Redis/6.2.5/redis/role/defaults/main.yml
new file mode 100644
index 0000000..d636519
--- /dev/null
+++ b/Redis/6.2.5/redis/role/defaults/main.yml
@@ -0,0 +1,6 @@
+#The default installation location
+deploy_dir: /data/olap
+
+#The default data storage location,use storing application data,logs and configuration files
+data_dir: /data/olap
+
diff --git a/Redis/6.2.5/redis/role/handlers/main.yml b/Redis/6.2.5/redis/role/handlers/main.yml
new file mode 100644
index 0000000..aa0145c
--- /dev/null
+++ b/Redis/6.2.5/redis/role/handlers/main.yml
@@ -0,0 +1,24 @@
+- name: Loading Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ load_path: '{{ deploy_dir }}/{{ container_name }}/{{ image_name }}-{{ image_tag }}.tar'
+ source: load
+ force_tag: yes
+ force_source: yes
+ timeout: 300
+
+- name: Stop Container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+- name: Start Container
+ docker_compose:
+ project_src: '{{ deploy_dir }}/{{ container_name }}/'
+
+- name: Removing Image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
diff --git a/Redis/6.2.5/redis/role/tasks/deploy-cluster.yml b/Redis/6.2.5/redis/role/tasks/deploy-cluster.yml
new file mode 100644
index 0000000..c2e89fa
--- /dev/null
+++ b/Redis/6.2.5/redis/role/tasks/deploy-cluster.yml
@@ -0,0 +1,48 @@
+- name: Setting node_nums variable
+ set_fact: node_nums={{ groups.redis|length }}
+
+- name: To terminate execution
+ fail:
+ msg: "Redis in master-slave mode. The value must have 2 nodes,please checking configurations/hosts -> redis"
+ when: node_nums != '2'
+
+- name: Creating directory
+ file:
+ state: directory
+ path: '{{ deploy_dir }}/{{ container_name }}/conf'
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
+ copy:
+ src: 'files/{{ image_name }}-{{ image_tag }}.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: true
+ notify:
+ - Loading Image
+
+- name: Copying Redis config files
+ template:
+ src: 'redis-master.conf.j2'
+ dest: '{{ deploy_dir }}/{{ container_name }}/conf/redis.conf'
+ mode: 0644
+ run_once: true
+ delegate_to: "{{ groups.redis[0] }}"
+
+- name: Copying Redis config files
+ template:
+ src: 'redis-slave.conf.j2'
+ dest: '{{ deploy_dir }}/{{ container_name }}/conf/redis.conf'
+ mode: 0644
+ run_once: true
+ delegate_to: "{{ groups.redis[1] }}"
+
+- name: Copying Redis config files
+ template:
+ src: 'docker-compose.yml.j2'
+ dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml'
+ mode: 0644
+ notify:
+ - Loading Image
+ - Start Container
+
+- meta: flush_handlers
+
diff --git a/Redis/6.2.5/redis/role/tasks/deploy-standalone.yml b/Redis/6.2.5/redis/role/tasks/deploy-standalone.yml
new file mode 100644
index 0000000..862f892
--- /dev/null
+++ b/Redis/6.2.5/redis/role/tasks/deploy-standalone.yml
@@ -0,0 +1,30 @@
+- name: Creating directory
+ file:
+ state: directory
+ path: '{{ deploy_dir }}/{{ container_name }}/conf'
+
+- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
+ copy:
+ src: 'files/{{ image_name }}-{{ image_tag }}.tar'
+ dest: '{{ deploy_dir }}/{{ container_name }}/'
+ force: true
+ notify:
+ - Loading Image
+
+- name: Copying Redis config files
+ template:
+ src: 'redis-master.conf.j2'
+ dest: '{{ deploy_dir }}/{{ container_name }}/conf/redis.conf'
+ mode: 0644
+
+- name: Copying Redis config files
+ template:
+ src: 'docker-compose.yml.j2'
+ dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml'
+ mode: 0644
+ notify:
+ - Loading Image
+ - Start Container
+
+- meta: flush_handlers
+
diff --git a/Redis/6.2.5/redis/role/tasks/main.yml b/Redis/6.2.5/redis/role/tasks/main.yml
new file mode 100644
index 0000000..920efe6
--- /dev/null
+++ b/Redis/6.2.5/redis/role/tasks/main.yml
@@ -0,0 +1,11 @@
+- block:
+ - include: uninstall.yml
+ - include: "{{ playbook_name }}"
+ vars:
+ playbook_name: "{{ 'deploy-cluster.yml' if groups.mairadb | length > 1 else 'deploy-standalone.yml' }}"
+ - include: status-check.yml
+ when: (operation) == "install"
+
+- block:
+ - include: uninstall.yml
+ when: (operation) == "uninstall"
diff --git a/Redis/6.2.5/redis/role/tasks/status-check.yml b/Redis/6.2.5/redis/role/tasks/status-check.yml
new file mode 100644
index 0000000..f9190a5
--- /dev/null
+++ b/Redis/6.2.5/redis/role/tasks/status-check.yml
@@ -0,0 +1,17 @@
+- name: Waitting for Redis running,10s
+ shell: sleep 10
+
+- name: Check if the Redis already exists
+ shell: ps -ef | grep -v grep | grep "redis-server" | wc -l
+ register: process_out
+
+- name: Check if the Redis already exists
+ shell: netstat -anlp | egrep "6379" | grep LISTEN | wc -l
+ register: port_out
+
+- name: To terminate execution
+ fail:
+ msg: "Kafka on node {{ inventory_hostname }} is not started. Please check"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: process_out.stdout != '1' or port_out.stdout != '2'
diff --git a/Redis/6.2.5/redis/role/tasks/uninstall.yml b/Redis/6.2.5/redis/role/tasks/uninstall.yml
new file mode 100644
index 0000000..5015eb6
--- /dev/null
+++ b/Redis/6.2.5/redis/role/tasks/uninstall.yml
@@ -0,0 +1,16 @@
+- block:
+ - name: Stopping and removing {{ container_name }} container
+ docker_container:
+ name: '{{ container_name }}'
+ state: absent
+
+ - name: Removing old {{ image_name }} image
+ docker_image:
+ name: '{{ image_name }}'
+ tag: '{{ image_tag }}'
+ state: absent
+
+ - name: Ansible delete old {{ deploy_dir }}/{{ container_name }}
+ file:
+ path: '{{ deploy_dir }}/{{ container_name }}'
+ state: absent
diff --git a/Redis/6.2.5/redis/role/templates/docker-compose.yml.j2 b/Redis/6.2.5/redis/role/templates/docker-compose.yml.j2
new file mode 100644
index 0000000..61ac98d
--- /dev/null
+++ b/Redis/6.2.5/redis/role/templates/docker-compose.yml.j2
@@ -0,0 +1,12 @@
+version: '3'
+
+services:
+ redis:
+ image: {{ image_name }}:{{ image_tag }}
+ restart: always
+ container_name: {{ container_name }}
+ volumes:
+ - {{ deploy_dir }}/{{ container_name }}/conf/redis.conf:/redis.conf
+ - {{ deploy_dir }}/{{ container_name }}/data:/data
+ command: redis-server /redis.conf
+ network_mode: "host"
diff --git a/Redis/6.2.5/redis/role/templates/redis-master.conf.j2 b/Redis/6.2.5/redis/role/templates/redis-master.conf.j2
new file mode 100644
index 0000000..1869cb7
--- /dev/null
+++ b/Redis/6.2.5/redis/role/templates/redis-master.conf.j2
@@ -0,0 +1,2051 @@
+# Redis configuration file example.
+#
+# Note that in order to read the configuration file, Redis must be
+# started with the file path as first argument:
+#
+# ./redis-server /path/to/redis.conf
+
+# Note on units: when memory size is needed, it is possible to specify
+# it in the usual form of 1k 5GB 4M and so forth:
+#
+# 1k => 1000 bytes
+# 1kb => 1024 bytes
+# 1m => 1000000 bytes
+# 1mb => 1024*1024 bytes
+# 1g => 1000000000 bytes
+# 1gb => 1024*1024*1024 bytes
+#
+# units are case insensitive so 1GB 1Gb 1gB are all the same.
+
+################################## INCLUDES ###################################
+
+# Include one or more other config files here. This is useful if you
+# have a standard template that goes to all Redis servers but also need
+# to customize a few per-server settings. Include files can include
+# other files, so use this wisely.
+#
+# Note that option "include" won't be rewritten by command "CONFIG REWRITE"
+# from admin or Redis Sentinel. Since Redis always uses the last processed
+# line as value of a configuration directive, you'd better put includes
+# at the beginning of this file to avoid overwriting config change at runtime.
+#
+# If instead you are interested in using includes to override configuration
+# options, it is better to use include as the last line.
+#
+# include /path/to/local.conf
+# include /path/to/other.conf
+
+################################## MODULES #####################################
+
+# Load modules at startup. If the server is not able to load modules
+# it will abort. It is possible to use multiple loadmodule directives.
+#
+# loadmodule /path/to/my_module.so
+# loadmodule /path/to/other_module.so
+
+################################## NETWORK #####################################
+
+# By default, if no "bind" configuration directive is specified, Redis listens
+# for connections from all available network interfaces on the host machine.
+# It is possible to listen to just one or multiple selected interfaces using
+# the "bind" configuration directive, followed by one or more IP addresses.
+# Each address can be prefixed by "-", which means that redis will not fail to
+# start if the address is not available. Being not available only refers to
+# addresses that does not correspond to any network interfece. Addresses that
+# are already in use will always fail, and unsupported protocols will always BE
+# silently skipped.
+#
+# Examples:
+#
+# bind 192.168.1.100 10.0.0.1 # listens on two specific IPv4 addresses
+# bind 127.0.0.1 ::1 # listens on loopback IPv4 and IPv6
+# bind * -::* # like the default, all available interfaces
+#
+# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the
+# internet, binding to all the interfaces is dangerous and will expose the
+# instance to everybody on the internet. So by default we uncomment the
+# following bind directive, that will force Redis to listen only on the
+# IPv4 and IPv6 (if available) loopback interface addresses (this means Redis
+# will only be able to accept client connections from the same host that it is
+# running on).
+#
+# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES
+# JUST COMMENT OUT THE FOLLOWING LINE.
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+bind 0.0.0.0 -::1
+
+# Protected mode is a layer of security protection, in order to avoid that
+# Redis instances left open on the internet are accessed and exploited.
+#
+# When protected mode is on and if:
+#
+# 1) The server is not binding explicitly to a set of addresses using the
+# "bind" directive.
+# 2) No password is configured.
+#
+# The server only accepts connections from clients connecting from the
+# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain
+# sockets.
+#
+# By default protected mode is enabled. You should disable it only if
+# you are sure you want clients from other hosts to connect to Redis
+# even if no authentication is configured, nor a specific set of interfaces
+# are explicitly listed using the "bind" directive.
+protected-mode yes
+
+# Accept connections on the specified port, default is 6379 (IANA #815344).
+# If port 0 is specified Redis will not listen on a TCP socket.
+port 6379
+
+# TCP listen() backlog.
+#
+# In high requests-per-second environments you need a high backlog in order
+# to avoid slow clients connection issues. Note that the Linux kernel
+# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
+# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
+# in order to get the desired effect.
+tcp-backlog 511
+
+# Unix socket.
+#
+# Specify the path for the Unix socket that will be used to listen for
+# incoming connections. There is no default, so Redis will not listen
+# on a unix socket when not specified.
+#
+# unixsocket /run/redis.sock
+# unixsocketperm 700
+
+# Close the connection after a client is idle for N seconds (0 to disable)
+timeout 0
+
+# TCP keepalive.
+#
+# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
+# of communication. This is useful for two reasons:
+#
+# 1) Detect dead peers.
+# 2) Force network equipment in the middle to consider the connection to be
+# alive.
+#
+# On Linux, the specified value (in seconds) is the period used to send ACKs.
+# Note that to close the connection the double of the time is needed.
+# On other kernels the period depends on the kernel configuration.
+#
+# A reasonable value for this option is 300 seconds, which is the new
+# Redis default starting with Redis 3.2.1.
+tcp-keepalive 300
+
+################################# TLS/SSL #####################################
+
+# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration
+# directive can be used to define TLS-listening ports. To enable TLS on the
+# default port, use:
+#
+# port 0
+# tls-port 6379
+
+# Configure a X.509 certificate and private key to use for authenticating the
+# server to connected clients, masters or cluster peers. These files should be
+# PEM formatted.
+#
+# tls-cert-file redis.crt
+# tls-key-file redis.key
+#
+# If the key file is encrypted using a passphrase, it can be included here
+# as well.
+#
+# tls-key-file-pass secret
+
+# Normally Redis uses the same certificate for both server functions (accepting
+# connections) and client functions (replicating from a master, establishing
+# cluster bus connections, etc.).
+#
+# Sometimes certificates are issued with attributes that designate them as
+# client-only or server-only certificates. In that case it may be desired to use
+# different certificates for incoming (server) and outgoing (client)
+# connections. To do that, use the following directives:
+#
+# tls-client-cert-file client.crt
+# tls-client-key-file client.key
+#
+# If the key file is encrypted using a passphrase, it can be included here
+# as well.
+#
+# tls-client-key-file-pass secret
+
+# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange:
+#
+# tls-dh-params-file redis.dh
+
+# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL
+# clients and peers. Redis requires an explicit configuration of at least one
+# of these, and will not implicitly use the system wide configuration.
+#
+# tls-ca-cert-file ca.crt
+# tls-ca-cert-dir /etc/ssl/certs
+
+# By default, clients (including replica servers) on a TLS port are required
+# to authenticate using valid client side certificates.
+#
+# If "no" is specified, client certificates are not required and not accepted.
+# If "optional" is specified, client certificates are accepted and must be
+# valid if provided, but are not required.
+#
+# tls-auth-clients no
+# tls-auth-clients optional
+
+# By default, a Redis replica does not attempt to establish a TLS connection
+# with its master.
+#
+# Use the following directive to enable TLS on replication links.
+#
+# tls-replication yes
+
+# By default, the Redis Cluster bus uses a plain TCP connection. To enable
+# TLS for the bus protocol, use the following directive:
+#
+# tls-cluster yes
+
+# By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended
+# that older formally deprecated versions are kept disabled to reduce the attack surface.
+# You can explicitly specify TLS versions to support.
+# Allowed values are case insensitive and include "TLSv1", "TLSv1.1", "TLSv1.2",
+# "TLSv1.3" (OpenSSL >= 1.1.1) or any combination.
+# To enable only TLSv1.2 and TLSv1.3, use:
+#
+# tls-protocols "TLSv1.2 TLSv1.3"
+
+# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information
+# about the syntax of this string.
+#
+# Note: this configuration applies only to <= TLSv1.2.
+#
+# tls-ciphers DEFAULT:!MEDIUM
+
+# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more
+# information about the syntax of this string, and specifically for TLSv1.3
+# ciphersuites.
+#
+# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256
+
+# When choosing a cipher, use the server's preference instead of the client
+# preference. By default, the server follows the client's preference.
+#
+# tls-prefer-server-ciphers yes
+
+# By default, TLS session caching is enabled to allow faster and less expensive
+# reconnections by clients that support it. Use the following directive to disable
+# caching.
+#
+# tls-session-caching no
+
+# Change the default number of TLS sessions cached. A zero value sets the cache
+# to unlimited size. The default size is 20480.
+#
+# tls-session-cache-size 5000
+
+# Change the default timeout of cached TLS sessions. The default timeout is 300
+# seconds.
+#
+# tls-session-cache-timeout 60
+
+################################# GENERAL #####################################
+
+# By default Redis does not run as a daemon. Use 'yes' if you need it.
+# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
+# When Redis is supervised by upstart or systemd, this parameter has no impact.
+daemonize no
+
+# If you run Redis from upstart or systemd, Redis can interact with your
+# supervision tree. Options:
+# supervised no - no supervision interaction
+# supervised upstart - signal upstart by putting Redis into SIGSTOP mode
+# requires "expect stop" in your upstart job config
+# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
+# on startup, and updating Redis status on a regular
+# basis.
+# supervised auto - detect upstart or systemd method based on
+# UPSTART_JOB or NOTIFY_SOCKET environment variables
+# Note: these supervision methods only signal "process is ready."
+# They do not enable continuous pings back to your supervisor.
+#
+# The default is "no". To run under upstart/systemd, you can simply uncomment
+# the line below:
+#
+# supervised auto
+
+# If a pid file is specified, Redis writes it where specified at startup
+# and removes it at exit.
+#
+# When the server runs non daemonized, no pid file is created if none is
+# specified in the configuration. When the server is daemonized, the pid file
+# is used even if not specified, defaulting to "/var/run/redis.pid".
+#
+# Creating a pid file is best effort: if Redis is not able to create it
+# nothing bad happens, the server will start and run normally.
+#
+# Note that on modern Linux systems "/run/redis.pid" is more conforming
+# and should be used instead.
+pidfile /var/run/redis_6379.pid
+
+# Specify the server verbosity level.
+# This can be one of:
+# debug (a lot of information, useful for development/testing)
+# verbose (many rarely useful info, but not a mess like the debug level)
+# notice (moderately verbose, what you want in production probably)
+# warning (only very important / critical messages are logged)
+loglevel notice
+
+# Specify the log file name. Also the empty string can be used to force
+# Redis to log on the standard output. Note that if you use standard
+# output for logging but daemonize, logs will be sent to /dev/null
+logfile ./redis.log
+
+# To enable logging to the system logger, just set 'syslog-enabled' to yes,
+# and optionally update the other syslog parameters to suit your needs.
+# syslog-enabled no
+
+# Specify the syslog identity.
+# syslog-ident redis
+
+# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
+# syslog-facility local0
+
+# To disable the built in crash log, which will possibly produce cleaner core
+# dumps when they are needed, uncomment the following:
+#
+# crash-log-enabled no
+
+# To disable the fast memory check that's run as part of the crash log, which
+# will possibly let redis terminate sooner, uncomment the following:
+#
+# crash-memcheck-enabled no
+
+# Set the number of databases. The default database is DB 0, you can select
+# a different one on a per-connection basis using SELECT <dbid> where
+# dbid is a number between 0 and 'databases'-1
+databases 16
+
+# By default Redis shows an ASCII art logo only when started to log to the
+# standard output and if the standard output is a TTY and syslog logging is
+# disabled. Basically this means that normally a logo is displayed only in
+# interactive sessions.
+#
+# However it is possible to force the pre-4.0 behavior and always show a
+# ASCII art logo in startup logs by setting the following option to yes.
+always-show-logo no
+
+# By default, Redis modifies the process title (as seen in 'top' and 'ps') to
+# provide some runtime information. It is possible to disable this and leave
+# the process name as executed by setting the following to no.
+set-proc-title yes
+
+# When changing the process title, Redis uses the following template to construct
+# the modified title.
+#
+# Template variables are specified in curly brackets. The following variables are
+# supported:
+#
+# {title} Name of process as executed if parent, or type of child process.
+# {listen-addr} Bind address or '*' followed by TCP or TLS port listening on, or
+# Unix socket if only that's available.
+# {server-mode} Special mode, i.e. "[sentinel]" or "[cluster]".
+# {port} TCP port listening on, or 0.
+# {tls-port} TLS port listening on, or 0.
+# {unixsocket} Unix domain socket listening on, or "".
+# {config-file} Name of configuration file used.
+#
+proc-title-template "{title} {listen-addr} {server-mode}"
+
+################################ SNAPSHOTTING ################################
+
+# Save the DB to disk.
+#
+# save <seconds> <changes>
+#
+# Redis will save the DB if both the given number of seconds and the given
+# number of write operations against the DB occurred.
+#
+# Snapshotting can be completely disabled with a single empty string argument
+# as in following example:
+#
+# save ""
+#
+# Unless specified otherwise, by default Redis will save the DB:
+# * After 3600 seconds (an hour) if at least 1 key changed
+# * After 300 seconds (5 minutes) if at least 100 keys changed
+# * After 60 seconds if at least 10000 keys changed
+#
+# You can set these explicitly by uncommenting the three following lines.
+#
+# save 3600 1
+# save 300 100
+# save 60 10000
+
+# By default Redis will stop accepting writes if RDB snapshots are enabled
+# (at least one save point) and the latest background save failed.
+# This will make the user aware (in a hard way) that data is not persisting
+# on disk properly, otherwise chances are that no one will notice and some
+# disaster will happen.
+#
+# If the background saving process will start working again Redis will
+# automatically allow writes again.
+#
+# However if you have setup your proper monitoring of the Redis server
+# and persistence, you may want to disable this feature so that Redis will
+# continue to work as usual even if there are problems with disk,
+# permissions, and so forth.
+stop-writes-on-bgsave-error yes
+
+# Compress string objects using LZF when dump .rdb databases?
+# By default compression is enabled as it's almost always a win.
+# If you want to save some CPU in the saving child set it to 'no' but
+# the dataset will likely be bigger if you have compressible values or keys.
+rdbcompression yes
+
+# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
+# This makes the format more resistant to corruption but there is a performance
+# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
+# for maximum performances.
+#
+# RDB files created with checksum disabled have a checksum of zero that will
+# tell the loading code to skip the check.
+rdbchecksum yes
+
+# Enables or disables full sanitation checks for ziplist and listpack etc when
+# loading an RDB or RESTORE payload. This reduces the chances of a assertion or
+# crash later on while processing commands.
+# Options:
+# no - Never perform full sanitation
+# yes - Always perform full sanitation
+# clients - Perform full sanitation only for user connections.
+# Excludes: RDB files, RESTORE commands received from the master
+# connection, and client connections which have the
+# skip-sanitize-payload ACL flag.
+# The default should be 'clients' but since it currently affects cluster
+# resharding via MIGRATE, it is temporarily set to 'no' by default.
+#
+# sanitize-dump-payload no
+
+# The filename where to dump the DB
+dbfilename dump.rdb
+
+# Remove RDB files used by replication in instances without persistence
+# enabled. By default this option is disabled, however there are environments
+# where for regulations or other security concerns, RDB files persisted on
+# disk by masters in order to feed replicas, or stored on disk by replicas
+# in order to load them for the initial synchronization, should be deleted
+# ASAP. Note that this option ONLY WORKS in instances that have both AOF
+# and RDB persistence disabled, otherwise is completely ignored.
+#
+# An alternative (and sometimes better) way to obtain the same effect is
+# to use diskless replication on both master and replicas instances. However
+# in the case of replicas, diskless is not always an option.
+rdb-del-sync-files no
+
+# The working directory.
+#
+# The DB will be written inside this directory, with the filename specified
+# above using the 'dbfilename' configuration directive.
+#
+# The Append Only File will also be created inside this directory.
+#
+# Note that you must specify a directory here, not a file name.
+dir ./
+
+################################# REPLICATION #################################
+
+# Master-Replica replication. Use replicaof to make a Redis instance a copy of
+# another Redis server. A few things to understand ASAP about Redis replication.
+#
+# +------------------+ +---------------+
+# | Master | ---> | Replica |
+# | (receive writes) | | (exact copy) |
+# +------------------+ +---------------+
+#
+# 1) Redis replication is asynchronous, but you can configure a master to
+# stop accepting writes if it appears to be not connected with at least
+# a given number of replicas.
+# 2) Redis replicas are able to perform a partial resynchronization with the
+# master if the replication link is lost for a relatively small amount of
+# time. You may want to configure the replication backlog size (see the next
+# sections of this file) with a sensible value depending on your needs.
+# 3) Replication is automatic and does not need user intervention. After a
+# network partition replicas automatically try to reconnect to masters
+# and resynchronize with them.
+#
+# replicaof <masterip> <masterport>
+
+# If the master is password protected (using the "requirepass" configuration
+# directive below) it is possible to tell the replica to authenticate before
+# starting the replication synchronization process, otherwise the master will
+# refuse the replica request.
+#
+# masterauth <master-password>
+#
+# However this is not enough if you are using Redis ACLs (for Redis version
+# 6 or greater), and the default user is not capable of running the PSYNC
+# command and/or other commands needed for replication. In this case it's
+# better to configure a special user to use with replication, and specify the
+# masteruser configuration as such:
+#
+# masteruser <username>
+#
+# When masteruser is specified, the replica will authenticate against its
+# master using the new AUTH form: AUTH <username> <password>.
+
+# When a replica loses its connection with the master, or when the replication
+# is still in progress, the replica can act in two different ways:
+#
+# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will
+# still reply to client requests, possibly with out of date data, or the
+# data set may just be empty if this is the first synchronization.
+#
+# 2) If replica-serve-stale-data is set to 'no' the replica will reply with
+# an error "SYNC with master in progress" to all commands except:
+# INFO, REPLICAOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE,
+# UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST,
+# HOST and LATENCY.
+#
+replica-serve-stale-data yes
+
+# You can configure a replica instance to accept writes or not. Writing against
+# a replica instance may be useful to store some ephemeral data (because data
+# written on a replica will be easily deleted after resync with the master) but
+# may also cause problems if clients are writing to it because of a
+# misconfiguration.
+#
+# Since Redis 2.6 by default replicas are read-only.
+#
+# Note: read only replicas are not designed to be exposed to untrusted clients
+# on the internet. It's just a protection layer against misuse of the instance.
+# Still a read only replica exports by default all the administrative commands
+# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
+# security of read only replicas using 'rename-command' to shadow all the
+# administrative / dangerous commands.
+replica-read-only yes
+
+# Replication SYNC strategy: disk or socket.
+#
+# New replicas and reconnecting replicas that are not able to continue the
+# replication process just receiving differences, need to do what is called a
+# "full synchronization". An RDB file is transmitted from the master to the
+# replicas.
+#
+# The transmission can happen in two different ways:
+#
+# 1) Disk-backed: The Redis master creates a new process that writes the RDB
+# file on disk. Later the file is transferred by the parent
+# process to the replicas incrementally.
+# 2) Diskless: The Redis master creates a new process that directly writes the
+# RDB file to replica sockets, without touching the disk at all.
+#
+# With disk-backed replication, while the RDB file is generated, more replicas
+# can be queued and served with the RDB file as soon as the current child
+# producing the RDB file finishes its work. With diskless replication instead
+# once the transfer starts, new replicas arriving will be queued and a new
+# transfer will start when the current one terminates.
+#
+# When diskless replication is used, the master waits a configurable amount of
+# time (in seconds) before starting the transfer in the hope that multiple
+# replicas will arrive and the transfer can be parallelized.
+#
+# With slow disks and fast (large bandwidth) networks, diskless replication
+# works better.
+repl-diskless-sync no
+
+# When diskless replication is enabled, it is possible to configure the delay
+# the server waits in order to spawn the child that transfers the RDB via socket
+# to the replicas.
+#
+# This is important since once the transfer starts, it is not possible to serve
+# new replicas arriving, that will be queued for the next RDB transfer, so the
+# server waits a delay in order to let more replicas arrive.
+#
+# The delay is specified in seconds, and by default is 5 seconds. To disable
+# it entirely just set it to 0 seconds and the transfer will start ASAP.
+repl-diskless-sync-delay 5
+
+# -----------------------------------------------------------------------------
+# WARNING: RDB diskless load is experimental. Since in this setup the replica
+# does not immediately store an RDB on disk, it may cause data loss during
+# failovers. RDB diskless load + Redis modules not handling I/O reads may also
+# cause Redis to abort in case of I/O errors during the initial synchronization
+# stage with the master. Use only if you know what you are doing.
+# -----------------------------------------------------------------------------
+#
+# Replica can load the RDB it reads from the replication link directly from the
+# socket, or store the RDB to a file and read that file after it was completely
+# received from the master.
+#
+# In many cases the disk is slower than the network, and storing and loading
+# the RDB file may increase replication time (and even increase the master's
+# Copy on Write memory and salve buffers).
+# However, parsing the RDB file directly from the socket may mean that we have
+# to flush the contents of the current database before the full rdb was
+# received. For this reason we have the following options:
+#
+# "disabled" - Don't use diskless load (store the rdb file to the disk first)
+# "on-empty-db" - Use diskless load only when it is completely safe.
+# "swapdb" - Keep a copy of the current db contents in RAM while parsing
+# the data directly from the socket. note that this requires
+# sufficient memory, if you don't have it, you risk an OOM kill.
+repl-diskless-load disabled
+
+# Replicas send PINGs to server in a predefined interval. It's possible to
+# change this interval with the repl_ping_replica_period option. The default
+# value is 10 seconds.
+#
+# repl-ping-replica-period 10
+
+# The following option sets the replication timeout for:
+#
+# 1) Bulk transfer I/O during SYNC, from the point of view of replica.
+# 2) Master timeout from the point of view of replicas (data, pings).
+# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings).
+#
+# It is important to make sure that this value is greater than the value
+# specified for repl-ping-replica-period otherwise a timeout will be detected
+# every time there is low traffic between the master and the replica. The default
+# value is 60 seconds.
+#
+# repl-timeout 60
+
+# Disable TCP_NODELAY on the replica socket after SYNC?
+#
+# If you select "yes" Redis will use a smaller number of TCP packets and
+# less bandwidth to send data to replicas. But this can add a delay for
+# the data to appear on the replica side, up to 40 milliseconds with
+# Linux kernels using a default configuration.
+#
+# If you select "no" the delay for data to appear on the replica side will
+# be reduced but more bandwidth will be used for replication.
+#
+# By default we optimize for low latency, but in very high traffic conditions
+# or when the master and replicas are many hops away, turning this to "yes" may
+# be a good idea.
+repl-disable-tcp-nodelay no
+
+# Set the replication backlog size. The backlog is a buffer that accumulates
+# replica data when replicas are disconnected for some time, so that when a
+# replica wants to reconnect again, often a full resync is not needed, but a
+# partial resync is enough, just passing the portion of data the replica
+# missed while disconnected.
+#
+# The bigger the replication backlog, the longer the replica can endure the
+# disconnect and later be able to perform a partial resynchronization.
+#
+# The backlog is only allocated if there is at least one replica connected.
+#
+# repl-backlog-size 1mb
+
+# After a master has no connected replicas for some time, the backlog will be
+# freed. The following option configures the amount of seconds that need to
+# elapse, starting from the time the last replica disconnected, for the backlog
+# buffer to be freed.
+#
+# Note that replicas never free the backlog for timeout, since they may be
+# promoted to masters later, and should be able to correctly "partially
+# resynchronize" with other replicas: hence they should always accumulate backlog.
+#
+# A value of 0 means to never release the backlog.
+#
+# repl-backlog-ttl 3600
+
+# The replica priority is an integer number published by Redis in the INFO
+# output. It is used by Redis Sentinel in order to select a replica to promote
+# into a master if the master is no longer working correctly.
+#
+# A replica with a low priority number is considered better for promotion, so
+# for instance if there are three replicas with priority 10, 100, 25 Sentinel
+# will pick the one with priority 10, that is the lowest.
+#
+# However a special priority of 0 marks the replica as not able to perform the
+# role of master, so a replica with priority of 0 will never be selected by
+# Redis Sentinel for promotion.
+#
+# By default the priority is 100.
+replica-priority 100
+
+# -----------------------------------------------------------------------------
+# By default, Redis Sentinel includes all replicas in its reports. A replica
+# can be excluded from Redis Sentinel's announcements. An unannounced replica
+# will be ignored by the 'sentinel replicas <master>' command and won't be
+# exposed to Redis Sentinel's clients.
+#
+# This option does not change the behavior of replica-priority. Even with
+# replica-announced set to 'no', the replica can be promoted to master. To
+# prevent this behavior, set replica-priority to 0.
+#
+# replica-announced yes
+
+# It is possible for a master to stop accepting writes if there are less than
+# N replicas connected, having a lag less or equal than M seconds.
+#
+# The N replicas need to be in "online" state.
+#
+# The lag in seconds, that must be <= the specified value, is calculated from
+# the last ping received from the replica, that is usually sent every second.
+#
+# This option does not GUARANTEE that N replicas will accept the write, but
+# will limit the window of exposure for lost writes in case not enough replicas
+# are available, to the specified number of seconds.
+#
+# For example to require at least 3 replicas with a lag <= 10 seconds use:
+#
+# min-replicas-to-write 3
+# min-replicas-max-lag 10
+#
+# Setting one or the other to 0 disables the feature.
+#
+# By default min-replicas-to-write is set to 0 (feature disabled) and
+# min-replicas-max-lag is set to 10.
+
+# A Redis master is able to list the address and port of the attached
+# replicas in different ways. For example the "INFO replication" section
+# offers this information, which is used, among other tools, by
+# Redis Sentinel in order to discover replica instances.
+# Another place where this info is available is in the output of the
+# "ROLE" command of a master.
+#
+# The listed IP address and port normally reported by a replica is
+# obtained in the following way:
+#
+# IP: The address is auto detected by checking the peer address
+# of the socket used by the replica to connect with the master.
+#
+# Port: The port is communicated by the replica during the replication
+# handshake, and is normally the port that the replica is using to
+# listen for connections.
+#
+# However when port forwarding or Network Address Translation (NAT) is
+# used, the replica may actually be reachable via different IP and port
+# pairs. The following two options can be used by a replica in order to
+# report to its master a specific set of IP and port, so that both INFO
+# and ROLE will report those values.
+#
+# There is no need to use both the options if you need to override just
+# the port or the IP address.
+#
+# replica-announce-ip 5.5.5.5
+# replica-announce-port 1234
+
+############################### KEYS TRACKING #################################
+
+# Redis implements server assisted support for client side caching of values.
+# This is implemented using an invalidation table that remembers, using
+# a radix key indexed by key name, what clients have which keys. In turn
+# this is used in order to send invalidation messages to clients. Please
+# check this page to understand more about the feature:
+#
+# https://redis.io/topics/client-side-caching
+#
+# When tracking is enabled for a client, all the read only queries are assumed
+# to be cached: this will force Redis to store information in the invalidation
+# table. When keys are modified, such information is flushed away, and
+# invalidation messages are sent to the clients. However if the workload is
+# heavily dominated by reads, Redis could use more and more memory in order
+# to track the keys fetched by many clients.
+#
+# For this reason it is possible to configure a maximum fill value for the
+# invalidation table. By default it is set to 1M of keys, and once this limit
+# is reached, Redis will start to evict keys in the invalidation table
+# even if they were not modified, just to reclaim memory: this will in turn
+# force the clients to invalidate the cached values. Basically the table
+# maximum size is a trade off between the memory you want to spend server
+# side to track information about who cached what, and the ability of clients
+# to retain cached objects in memory.
+#
+# If you set the value to 0, it means there are no limits, and Redis will
+# retain as many keys as needed in the invalidation table.
+# In the "stats" INFO section, you can find information about the number of
+# keys in the invalidation table at every given moment.
+#
+# Note: when key tracking is used in broadcasting mode, no memory is used
+# in the server side so this setting is useless.
+#
+# tracking-table-max-keys 1000000
+
+################################## SECURITY ###################################
+
+# Warning: since Redis is pretty fast, an outside user can try up to
+# 1 million passwords per second against a modern box. This means that you
+# should use very strong passwords, otherwise they will be very easy to break.
+# Note that because the password is really a shared secret between the client
+# and the server, and should not be memorized by any human, the password
+# can be easily a long string from /dev/urandom or whatever, so by using a
+# long and unguessable password no brute force attack will be possible.
+
+# Redis ACL users are defined in the following format:
+#
+# user <username> ... acl rules ...
+#
+# For example:
+#
+# user worker +@list +@connection ~jobs:* on >ffa9203c493aa99
+#
+# The special username "default" is used for new connections. If this user
+# has the "nopass" rule, then new connections will be immediately authenticated
+# as the "default" user without the need of any password provided via the
+# AUTH command. Otherwise if the "default" user is not flagged with "nopass"
+# the connections will start in not authenticated state, and will require
+# AUTH (or the HELLO command AUTH option) in order to be authenticated and
+# start to work.
+#
+# The ACL rules that describe what a user can do are the following:
+#
+# on Enable the user: it is possible to authenticate as this user.
+# off Disable the user: it's no longer possible to authenticate
+# with this user, however the already authenticated connections
+# will still work.
+# skip-sanitize-payload RESTORE dump-payload sanitation is skipped.
+# sanitize-payload RESTORE dump-payload is sanitized (default).
+# +<command> Allow the execution of that command
+# -<command> Disallow the execution of that command
+# +@<category> Allow the execution of all the commands in such category
+# with valid categories are like @admin, @set, @sortedset, ...
+# and so forth, see the full list in the server.c file where
+# the Redis command table is described and defined.
+# The special category @all means all the commands, but currently
+# present in the server, and that will be loaded in the future
+# via modules.
+# +<command>|subcommand Allow a specific subcommand of an otherwise
+# disabled command. Note that this form is not
+# allowed as negative like -DEBUG|SEGFAULT, but
+# only additive starting with "+".
+# allcommands Alias for +@all. Note that it implies the ability to execute
+# all the future commands loaded via the modules system.
+# nocommands Alias for -@all.
+# ~<pattern> Add a pattern of keys that can be mentioned as part of
+# commands. For instance ~* allows all the keys. The pattern
+# is a glob-style pattern like the one of KEYS.
+# It is possible to specify multiple patterns.
+# allkeys Alias for ~*
+# resetkeys Flush the list of allowed keys patterns.
+# &<pattern> Add a glob-style pattern of Pub/Sub channels that can be
+# accessed by the user. It is possible to specify multiple channel
+# patterns.
+# allchannels Alias for &*
+# resetchannels Flush the list of allowed channel patterns.
+# ><password> Add this password to the list of valid password for the user.
+# For example >mypass will add "mypass" to the list.
+# This directive clears the "nopass" flag (see later).
+# <<password> Remove this password from the list of valid passwords.
+# nopass All the set passwords of the user are removed, and the user
+# is flagged as requiring no password: it means that every
+# password will work against this user. If this directive is
+# used for the default user, every new connection will be
+# immediately authenticated with the default user without
+# any explicit AUTH command required. Note that the "resetpass"
+# directive will clear this condition.
+# resetpass Flush the list of allowed passwords. Moreover removes the
+# "nopass" status. After "resetpass" the user has no associated
+# passwords and there is no way to authenticate without adding
+# some password (or setting it as "nopass" later).
+# reset Performs the following actions: resetpass, resetkeys, off,
+# -@all. The user returns to the same state it has immediately
+# after its creation.
+#
+# ACL rules can be specified in any order: for instance you can start with
+# passwords, then flags, or key patterns. However note that the additive
+# and subtractive rules will CHANGE MEANING depending on the ordering.
+# For instance see the following example:
+#
+# user alice on +@all -DEBUG ~* >somepassword
+#
+# This will allow "alice" to use all the commands with the exception of the
+# DEBUG command, since +@all added all the commands to the set of the commands
+# alice can use, and later DEBUG was removed. However if we invert the order
+# of two ACL rules the result will be different:
+#
+# user alice on -DEBUG +@all ~* >somepassword
+#
+# Now DEBUG was removed when alice had yet no commands in the set of allowed
+# commands, later all the commands are added, so the user will be able to
+# execute everything.
+#
+# Basically ACL rules are processed left-to-right.
+#
+# For more information about ACL configuration please refer to
+# the Redis web site at https://redis.io/topics/acl
+
+# ACL LOG
+#
+# The ACL Log tracks failed commands and authentication events associated
+# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked
+# by ACLs. The ACL Log is stored in memory. You can reclaim memory with
+# ACL LOG RESET. Define the maximum entry length of the ACL Log below.
+acllog-max-len 128
+
+# Using an external ACL file
+#
+# Instead of configuring users here in this file, it is possible to use
+# a stand-alone file just listing users. The two methods cannot be mixed:
+# if you configure users here and at the same time you activate the external
+# ACL file, the server will refuse to start.
+#
+# The format of the external ACL user file is exactly the same as the
+# format that is used inside redis.conf to describe users.
+#
+# aclfile /etc/redis/users.acl
+
+# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility
+# layer on top of the new ACL system. The option effect will be just setting
+# the password for the default user. Clients will still authenticate using
+# AUTH <password> as usually, or more explicitly with AUTH default <password>
+# if they follow the new protocol: both will work.
+#
+# The requirepass is not compatable with aclfile option and the ACL LOAD
+# command, these will cause requirepass to be ignored.
+#
+# requirepass foobared
+
+# New users are initialized with restrictive permissions by default, via the
+# equivalent of this ACL rule 'off resetkeys -@all'. Starting with Redis 6.2, it
+# is possible to manage access to Pub/Sub channels with ACL rules as well. The
+# default Pub/Sub channels permission if new users is controlled by the
+# acl-pubsub-default configuration directive, which accepts one of these values:
+#
+# allchannels: grants access to all Pub/Sub channels
+# resetchannels: revokes access to all Pub/Sub channels
+#
+# To ensure backward compatibility while upgrading Redis 6.0, acl-pubsub-default
+# defaults to the 'allchannels' permission.
+#
+# Future compatibility note: it is very likely that in a future version of Redis
+# the directive's default of 'allchannels' will be changed to 'resetchannels' in
+# order to provide better out-of-the-box Pub/Sub security. Therefore, it is
+# recommended that you explicitly define Pub/Sub permissions for all users
+# rather then rely on implicit default values. Once you've set explicit
+# Pub/Sub for all existing users, you should uncomment the following line.
+#
+# acl-pubsub-default resetchannels
+
+# Command renaming (DEPRECATED).
+#
+# ------------------------------------------------------------------------
+# WARNING: avoid using this option if possible. Instead use ACLs to remove
+# commands from the default user, and put them only in some admin user you
+# create for administrative purposes.
+# ------------------------------------------------------------------------
+#
+# It is possible to change the name of dangerous commands in a shared
+# environment. For instance the CONFIG command may be renamed into something
+# hard to guess so that it will still be available for internal-use tools
+# but not available for general clients.
+#
+# Example:
+#
+# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
+#
+# It is also possible to completely kill a command by renaming it into
+# an empty string:
+#
+# rename-command CONFIG ""
+#
+# Please note that changing the name of commands that are logged into the
+# AOF file or transmitted to replicas may cause problems.
+
+################################### CLIENTS ####################################
+
+# Set the max number of connected clients at the same time. By default
+# this limit is set to 10000 clients, however if the Redis server is not
+# able to configure the process file limit to allow for the specified limit
+# the max number of allowed clients is set to the current file limit
+# minus 32 (as Redis reserves a few file descriptors for internal uses).
+#
+# Once the limit is reached Redis will close all the new connections sending
+# an error 'max number of clients reached'.
+#
+# IMPORTANT: When Redis Cluster is used, the max number of connections is also
+# shared with the cluster bus: every node in the cluster will use two
+# connections, one incoming and another outgoing. It is important to size the
+# limit accordingly in case of very large clusters.
+#
+# maxclients 10000
+
+############################## MEMORY MANAGEMENT ################################
+
+# Set a memory usage limit to the specified amount of bytes.
+# When the memory limit is reached Redis will try to remove keys
+# according to the eviction policy selected (see maxmemory-policy).
+#
+# If Redis can't remove keys according to the policy, or if the policy is
+# set to 'noeviction', Redis will start to reply with errors to commands
+# that would use more memory, like SET, LPUSH, and so on, and will continue
+# to reply to read-only commands like GET.
+#
+# This option is usually useful when using Redis as an LRU or LFU cache, or to
+# set a hard memory limit for an instance (using the 'noeviction' policy).
+#
+# WARNING: If you have replicas attached to an instance with maxmemory on,
+# the size of the output buffers needed to feed the replicas are subtracted
+# from the used memory count, so that network problems / resyncs will
+# not trigger a loop where keys are evicted, and in turn the output
+# buffer of replicas is full with DELs of keys evicted triggering the deletion
+# of more keys, and so forth until the database is completely emptied.
+#
+# In short... if you have replicas attached it is suggested that you set a lower
+# limit for maxmemory so that there is some free RAM on the system for replica
+# output buffers (but this is not needed if the policy is 'noeviction').
+#
+# maxmemory <bytes>
+
+# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
+# is reached. You can select one from the following behaviors:
+#
+# volatile-lru -> Evict using approximated LRU, only keys with an expire set.
+# allkeys-lru -> Evict any key using approximated LRU.
+# volatile-lfu -> Evict using approximated LFU, only keys with an expire set.
+# allkeys-lfu -> Evict any key using approximated LFU.
+# volatile-random -> Remove a random key having an expire set.
+# allkeys-random -> Remove a random key, any key.
+# volatile-ttl -> Remove the key with the nearest expire time (minor TTL)
+# noeviction -> Don't evict anything, just return an error on write operations.
+#
+# LRU means Least Recently Used
+# LFU means Least Frequently Used
+#
+# Both LRU, LFU and volatile-ttl are implemented using approximated
+# randomized algorithms.
+#
+# Note: with any of the above policies, when there are no suitable keys for
+# eviction, Redis will return an error on write operations that require
+# more memory. These are usually commands that create new keys, add data or
+# modify existing keys. A few examples are: SET, INCR, HSET, LPUSH, SUNIONSTORE,
+# SORT (due to the STORE argument), and EXEC (if the transaction includes any
+# command that requires memory).
+#
+# The default is:
+#
+# maxmemory-policy noeviction
+
+# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated
+# algorithms (in order to save memory), so you can tune it for speed or
+# accuracy. By default Redis will check five keys and pick the one that was
+# used least recently, you can change the sample size using the following
+# configuration directive.
+#
+# The default of 5 produces good enough results. 10 Approximates very closely
+# true LRU but costs more CPU. 3 is faster but not very accurate.
+#
+# maxmemory-samples 5
+
+# Eviction processing is designed to function well with the default setting.
+# If there is an unusually large amount of write traffic, this value may need to
+# be increased. Decreasing this value may reduce latency at the risk of
+# eviction processing effectiveness
+# 0 = minimum latency, 10 = default, 100 = process without regard to latency
+#
+# maxmemory-eviction-tenacity 10
+
+# Starting from Redis 5, by default a replica will ignore its maxmemory setting
+# (unless it is promoted to master after a failover or manually). It means
+# that the eviction of keys will be just handled by the master, sending the
+# DEL commands to the replica as keys evict in the master side.
+#
+# This behavior ensures that masters and replicas stay consistent, and is usually
+# what you want, however if your replica is writable, or you want the replica
+# to have a different memory setting, and you are sure all the writes performed
+# to the replica are idempotent, then you may change this default (but be sure
+# to understand what you are doing).
+#
+# Note that since the replica by default does not evict, it may end using more
+# memory than the one set via maxmemory (there are certain buffers that may
+# be larger on the replica, or data structures may sometimes take more memory
+# and so forth). So make sure you monitor your replicas and make sure they
+# have enough memory to never hit a real out-of-memory condition before the
+# master hits the configured maxmemory setting.
+#
+# replica-ignore-maxmemory yes
+
+# Redis reclaims expired keys in two ways: upon access when those keys are
+# found to be expired, and also in background, in what is called the
+# "active expire key". The key space is slowly and interactively scanned
+# looking for expired keys to reclaim, so that it is possible to free memory
+# of keys that are expired and will never be accessed again in a short time.
+#
+# The default effort of the expire cycle will try to avoid having more than
+# ten percent of expired keys still in memory, and will try to avoid consuming
+# more than 25% of total memory and to add latency to the system. However
+# it is possible to increase the expire "effort" that is normally set to
+# "1", to a greater value, up to the value "10". At its maximum value the
+# system will use more CPU, longer cycles (and technically may introduce
+# more latency), and will tolerate less already expired keys still present
+# in the system. It's a tradeoff between memory, CPU and latency.
+#
+# active-expire-effort 1
+
+############################# LAZY FREEING ####################################
+
+# Redis has two primitives to delete keys. One is called DEL and is a blocking
+# deletion of the object. It means that the server stops processing new commands
+# in order to reclaim all the memory associated with an object in a synchronous
+# way. If the key deleted is associated with a small object, the time needed
+# in order to execute the DEL command is very small and comparable to most other
+# O(1) or O(log_N) commands in Redis. However if the key is associated with an
+# aggregated value containing millions of elements, the server can block for
+# a long time (even seconds) in order to complete the operation.
+#
+# For the above reasons Redis also offers non blocking deletion primitives
+# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and
+# FLUSHDB commands, in order to reclaim memory in background. Those commands
+# are executed in constant time. Another thread will incrementally free the
+# object in the background as fast as possible.
+#
+# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled.
+# It's up to the design of the application to understand when it is a good
+# idea to use one or the other. However the Redis server sometimes has to
+# delete keys or flush the whole database as a side effect of other operations.
+# Specifically Redis deletes objects independently of a user call in the
+# following scenarios:
+#
+# 1) On eviction, because of the maxmemory and maxmemory policy configurations,
+# in order to make room for new data, without going over the specified
+# memory limit.
+# 2) Because of expire: when a key with an associated time to live (see the
+# EXPIRE command) must be deleted from memory.
+# 3) Because of a side effect of a command that stores data on a key that may
+# already exist. For example the RENAME command may delete the old key
+# content when it is replaced with another one. Similarly SUNIONSTORE
+# or SORT with STORE option may delete existing keys. The SET command
+# itself removes any old content of the specified key in order to replace
+# it with the specified string.
+# 4) During replication, when a replica performs a full resynchronization with
+# its master, the content of the whole database is removed in order to
+# load the RDB file just transferred.
+#
+# In all the above cases the default is to delete objects in a blocking way,
+# like if DEL was called. However you can configure each case specifically
+# in order to instead release memory in a non-blocking way like if UNLINK
+# was called, using the following configuration directives.
+
+lazyfree-lazy-eviction no
+lazyfree-lazy-expire no
+lazyfree-lazy-server-del no
+replica-lazy-flush no
+
+# It is also possible, for the case when to replace the user code DEL calls
+# with UNLINK calls is not easy, to modify the default behavior of the DEL
+# command to act exactly like UNLINK, using the following configuration
+# directive:
+
+lazyfree-lazy-user-del no
+
+# FLUSHDB, FLUSHALL, and SCRIPT FLUSH support both asynchronous and synchronous
+# deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the
+# commands. When neither flag is passed, this directive will be used to determine
+# if the data should be deleted asynchronously.
+
+lazyfree-lazy-user-flush no
+
+################################ THREADED I/O #################################
+
+# Redis is mostly single threaded, however there are certain threaded
+# operations such as UNLINK, slow I/O accesses and other things that are
+# performed on side threads.
+#
+# Now it is also possible to handle Redis clients socket reads and writes
+# in different I/O threads. Since especially writing is so slow, normally
+# Redis users use pipelining in order to speed up the Redis performances per
+# core, and spawn multiple instances in order to scale more. Using I/O
+# threads it is possible to easily speedup two times Redis without resorting
+# to pipelining nor sharding of the instance.
+#
+# By default threading is disabled, we suggest enabling it only in machines
+# that have at least 4 or more cores, leaving at least one spare core.
+# Using more than 8 threads is unlikely to help much. We also recommend using
+# threaded I/O only if you actually have performance problems, with Redis
+# instances being able to use a quite big percentage of CPU time, otherwise
+# there is no point in using this feature.
+#
+# So for instance if you have a four cores boxes, try to use 2 or 3 I/O
+# threads, if you have a 8 cores, try to use 6 threads. In order to
+# enable I/O threads use the following configuration directive:
+#
+# io-threads 4
+#
+# Setting io-threads to 1 will just use the main thread as usual.
+# When I/O threads are enabled, we only use threads for writes, that is
+# to thread the write(2) syscall and transfer the client buffers to the
+# socket. However it is also possible to enable threading of reads and
+# protocol parsing using the following configuration directive, by setting
+# it to yes:
+#
+# io-threads-do-reads no
+#
+# Usually threading reads doesn't help much.
+#
+# NOTE 1: This configuration directive cannot be changed at runtime via
+# CONFIG SET. Aso this feature currently does not work when SSL is
+# enabled.
+#
+# NOTE 2: If you want to test the Redis speedup using redis-benchmark, make
+# sure you also run the benchmark itself in threaded mode, using the
+# --threads option to match the number of Redis threads, otherwise you'll not
+# be able to notice the improvements.
+
+############################ KERNEL OOM CONTROL ##############################
+
+# On Linux, it is possible to hint the kernel OOM killer on what processes
+# should be killed first when out of memory.
+#
+# Enabling this feature makes Redis actively control the oom_score_adj value
+# for all its processes, depending on their role. The default scores will
+# attempt to have background child processes killed before all others, and
+# replicas killed before masters.
+#
+# Redis supports three options:
+#
+# no: Don't make changes to oom-score-adj (default).
+# yes: Alias to "relative" see below.
+# absolute: Values in oom-score-adj-values are written as is to the kernel.
+# relative: Values are used relative to the initial value of oom_score_adj when
+# the server starts and are then clamped to a range of -1000 to 1000.
+# Because typically the initial value is 0, they will often match the
+# absolute values.
+oom-score-adj no
+
+# When oom-score-adj is used, this directive controls the specific values used
+# for master, replica and background child processes. Values range -2000 to
+# 2000 (higher means more likely to be killed).
+#
+# Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities)
+# can freely increase their value, but not decrease it below its initial
+# settings. This means that setting oom-score-adj to "relative" and setting the
+# oom-score-adj-values to positive values will always succeed.
+oom-score-adj-values 0 200 800
+
+
+#################### KERNEL transparent hugepage CONTROL ######################
+
+# Usually the kernel Transparent Huge Pages control is set to "madvise" or
+# or "never" by default (/sys/kernel/mm/transparent_hugepage/enabled), in which
+# case this config has no effect. On systems in which it is set to "always",
+# redis will attempt to disable it specifically for the redis process in order
+# to avoid latency problems specifically with fork(2) and CoW.
+# If for some reason you prefer to keep it enabled, you can set this config to
+# "no" and the kernel global to "always".
+
+disable-thp yes
+
+############################## APPEND ONLY MODE ###############################
+
+# By default Redis asynchronously dumps the dataset on disk. This mode is
+# good enough in many applications, but an issue with the Redis process or
+# a power outage may result into a few minutes of writes lost (depending on
+# the configured save points).
+#
+# The Append Only File is an alternative persistence mode that provides
+# much better durability. For instance using the default data fsync policy
+# (see later in the config file) Redis can lose just one second of writes in a
+# dramatic event like a server power outage, or a single write if something
+# wrong with the Redis process itself happens, but the operating system is
+# still running correctly.
+#
+# AOF and RDB persistence can be enabled at the same time without problems.
+# If the AOF is enabled on startup Redis will load the AOF, that is the file
+# with the better durability guarantees.
+#
+# Please check https://redis.io/topics/persistence for more information.
+
+appendonly no
+
+# The name of the append only file (default: "appendonly.aof")
+
+appendfilename "appendonly.aof"
+
+# The fsync() call tells the Operating System to actually write data on disk
+# instead of waiting for more data in the output buffer. Some OS will really flush
+# data on disk, some other OS will just try to do it ASAP.
+#
+# Redis supports three different modes:
+#
+# no: don't fsync, just let the OS flush the data when it wants. Faster.
+# always: fsync after every write to the append only log. Slow, Safest.
+# everysec: fsync only one time every second. Compromise.
+#
+# The default is "everysec", as that's usually the right compromise between
+# speed and data safety. It's up to you to understand if you can relax this to
+# "no" that will let the operating system flush the output buffer when
+# it wants, for better performances (but if you can live with the idea of
+# some data loss consider the default persistence mode that's snapshotting),
+# or on the contrary, use "always" that's very slow but a bit safer than
+# everysec.
+#
+# More details please check the following article:
+# http://antirez.com/post/redis-persistence-demystified.html
+#
+# If unsure, use "everysec".
+
+# appendfsync always
+appendfsync everysec
+# appendfsync no
+
+# When the AOF fsync policy is set to always or everysec, and a background
+# saving process (a background save or AOF log background rewriting) is
+# performing a lot of I/O against the disk, in some Linux configurations
+# Redis may block too long on the fsync() call. Note that there is no fix for
+# this currently, as even performing fsync in a different thread will block
+# our synchronous write(2) call.
+#
+# In order to mitigate this problem it's possible to use the following option
+# that will prevent fsync() from being called in the main process while a
+# BGSAVE or BGREWRITEAOF is in progress.
+#
+# This means that while another child is saving, the durability of Redis is
+# the same as "appendfsync none". In practical terms, this means that it is
+# possible to lose up to 30 seconds of log in the worst scenario (with the
+# default Linux settings).
+#
+# If you have latency problems turn this to "yes". Otherwise leave it as
+# "no" that is the safest pick from the point of view of durability.
+
+no-appendfsync-on-rewrite no
+
+# Automatic rewrite of the append only file.
+# Redis is able to automatically rewrite the log file implicitly calling
+# BGREWRITEAOF when the AOF log size grows by the specified percentage.
+#
+# This is how it works: Redis remembers the size of the AOF file after the
+# latest rewrite (if no rewrite has happened since the restart, the size of
+# the AOF at startup is used).
+#
+# This base size is compared to the current size. If the current size is
+# bigger than the specified percentage, the rewrite is triggered. Also
+# you need to specify a minimal size for the AOF file to be rewritten, this
+# is useful to avoid rewriting the AOF file even if the percentage increase
+# is reached but it is still pretty small.
+#
+# Specify a percentage of zero in order to disable the automatic AOF
+# rewrite feature.
+
+auto-aof-rewrite-percentage 100
+auto-aof-rewrite-min-size 64mb
+
+# An AOF file may be found to be truncated at the end during the Redis
+# startup process, when the AOF data gets loaded back into memory.
+# This may happen when the system where Redis is running
+# crashes, especially when an ext4 filesystem is mounted without the
+# data=ordered option (however this can't happen when Redis itself
+# crashes or aborts but the operating system still works correctly).
+#
+# Redis can either exit with an error when this happens, or load as much
+# data as possible (the default now) and start if the AOF file is found
+# to be truncated at the end. The following option controls this behavior.
+#
+# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
+# the Redis server starts emitting a log to inform the user of the event.
+# Otherwise if the option is set to no, the server aborts with an error
+# and refuses to start. When the option is set to no, the user requires
+# to fix the AOF file using the "redis-check-aof" utility before to restart
+# the server.
+#
+# Note that if the AOF file will be found to be corrupted in the middle
+# the server will still exit with an error. This option only applies when
+# Redis will try to read more data from the AOF file but not enough bytes
+# will be found.
+aof-load-truncated yes
+
+# When rewriting the AOF file, Redis is able to use an RDB preamble in the
+# AOF file for faster rewrites and recoveries. When this option is turned
+# on the rewritten AOF file is composed of two different stanzas:
+#
+# [RDB file][AOF tail]
+#
+# When loading, Redis recognizes that the AOF file starts with the "REDIS"
+# string and loads the prefixed RDB file, then continues loading the AOF
+# tail.
+aof-use-rdb-preamble yes
+
+################################ LUA SCRIPTING ###############################
+
+# Max execution time of a Lua script in milliseconds.
+#
+# If the maximum execution time is reached Redis will log that a script is
+# still in execution after the maximum allowed time and will start to
+# reply to queries with an error.
+#
+# When a long running script exceeds the maximum execution time only the
+# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
+# used to stop a script that did not yet call any write commands. The second
+# is the only way to shut down the server in the case a write command was
+# already issued by the script but the user doesn't want to wait for the natural
+# termination of the script.
+#
+# Set it to 0 or a negative value for unlimited execution without warnings.
+lua-time-limit 5000
+
+################################ REDIS CLUSTER ###############################
+
+# Normal Redis instances can't be part of a Redis Cluster; only nodes that are
+# started as cluster nodes can. In order to start a Redis instance as a
+# cluster node enable the cluster support uncommenting the following:
+#
+# cluster-enabled yes
+
+# Every cluster node has a cluster configuration file. This file is not
+# intended to be edited by hand. It is created and updated by Redis nodes.
+# Every Redis Cluster node requires a different cluster configuration file.
+# Make sure that instances running in the same system do not have
+# overlapping cluster configuration file names.
+#
+# cluster-config-file nodes-6379.conf
+
+# Cluster node timeout is the amount of milliseconds a node must be unreachable
+# for it to be considered in failure state.
+# Most other internal time limits are a multiple of the node timeout.
+#
+# cluster-node-timeout 15000
+
+# A replica of a failing master will avoid to start a failover if its data
+# looks too old.
+#
+# There is no simple way for a replica to actually have an exact measure of
+# its "data age", so the following two checks are performed:
+#
+# 1) If there are multiple replicas able to failover, they exchange messages
+# in order to try to give an advantage to the replica with the best
+# replication offset (more data from the master processed).
+# Replicas will try to get their rank by offset, and apply to the start
+# of the failover a delay proportional to their rank.
+#
+# 2) Every single replica computes the time of the last interaction with
+# its master. This can be the last ping or command received (if the master
+# is still in the "connected" state), or the time that elapsed since the
+# disconnection with the master (if the replication link is currently down).
+# If the last interaction is too old, the replica will not try to failover
+# at all.
+#
+# The point "2" can be tuned by user. Specifically a replica will not perform
+# the failover if, since the last interaction with the master, the time
+# elapsed is greater than:
+#
+# (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period
+#
+# So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor
+# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the
+# replica will not try to failover if it was not able to talk with the master
+# for longer than 310 seconds.
+#
+# A large cluster-replica-validity-factor may allow replicas with too old data to failover
+# a master, while a too small value may prevent the cluster from being able to
+# elect a replica at all.
+#
+# For maximum availability, it is possible to set the cluster-replica-validity-factor
+# to a value of 0, which means, that replicas will always try to failover the
+# master regardless of the last time they interacted with the master.
+# (However they'll always try to apply a delay proportional to their
+# offset rank).
+#
+# Zero is the only value able to guarantee that when all the partitions heal
+# the cluster will always be able to continue.
+#
+# cluster-replica-validity-factor 10
+
+# Cluster replicas are able to migrate to orphaned masters, that are masters
+# that are left without working replicas. This improves the cluster ability
+# to resist to failures as otherwise an orphaned master can't be failed over
+# in case of failure if it has no working replicas.
+#
+# Replicas migrate to orphaned masters only if there are still at least a
+# given number of other working replicas for their old master. This number
+# is the "migration barrier". A migration barrier of 1 means that a replica
+# will migrate only if there is at least 1 other working replica for its master
+# and so forth. It usually reflects the number of replicas you want for every
+# master in your cluster.
+#
+# Default is 1 (replicas migrate only if their masters remain with at least
+# one replica). To disable migration just set it to a very large value or
+# set cluster-allow-replica-migration to 'no'.
+# A value of 0 can be set but is useful only for debugging and dangerous
+# in production.
+#
+# cluster-migration-barrier 1
+
+# Turning off this option allows to use less automatic cluster configuration.
+# It both disables migration to orphaned masters and migration from masters
+# that became empty.
+#
+# Default is 'yes' (allow automatic migrations).
+#
+# cluster-allow-replica-migration yes
+
+# By default Redis Cluster nodes stop accepting queries if they detect there
+# is at least a hash slot uncovered (no available node is serving it).
+# This way if the cluster is partially down (for example a range of hash slots
+# are no longer covered) all the cluster becomes, eventually, unavailable.
+# It automatically returns available as soon as all the slots are covered again.
+#
+# However sometimes you want the subset of the cluster which is working,
+# to continue to accept queries for the part of the key space that is still
+# covered. In order to do so, just set the cluster-require-full-coverage
+# option to no.
+#
+# cluster-require-full-coverage yes
+
+# This option, when set to yes, prevents replicas from trying to failover its
+# master during master failures. However the replica can still perform a
+# manual failover, if forced to do so.
+#
+# This is useful in different scenarios, especially in the case of multiple
+# data center operations, where we want one side to never be promoted if not
+# in the case of a total DC failure.
+#
+# cluster-replica-no-failover no
+
+# This option, when set to yes, allows nodes to serve read traffic while the
+# the cluster is in a down state, as long as it believes it owns the slots.
+#
+# This is useful for two cases. The first case is for when an application
+# doesn't require consistency of data during node failures or network partitions.
+# One example of this is a cache, where as long as the node has the data it
+# should be able to serve it.
+#
+# The second use case is for configurations that don't meet the recommended
+# three shards but want to enable cluster mode and scale later. A
+# master outage in a 1 or 2 shard configuration causes a read/write outage to the
+# entire cluster without this option set, with it set there is only a write outage.
+# Without a quorum of masters, slot ownership will not change automatically.
+#
+# cluster-allow-reads-when-down no
+
+# In order to setup your cluster make sure to read the documentation
+# available at https://redis.io web site.
+
+########################## CLUSTER DOCKER/NAT support ########################
+
+# In certain deployments, Redis Cluster nodes address discovery fails, because
+# addresses are NAT-ted or because ports are forwarded (the typical case is
+# Docker and other containers).
+#
+# In order to make Redis Cluster working in such environments, a static
+# configuration where each node knows its public address is needed. The
+# following four options are used for this scope, and are:
+#
+# * cluster-announce-ip
+# * cluster-announce-port
+# * cluster-announce-tls-port
+# * cluster-announce-bus-port
+#
+# Each instructs the node about its address, client ports (for connections
+# without and with TLS) and cluster message bus port. The information is then
+# published in the header of the bus packets so that other nodes will be able to
+# correctly map the address of the node publishing the information.
+#
+# If cluster-tls is set to yes and cluster-announce-tls-port is omitted or set
+# to zero, then cluster-announce-port refers to the TLS port. Note also that
+# cluster-announce-tls-port has no effect if cluster-tls is set to no.
+#
+# If the above options are not used, the normal Redis Cluster auto-detection
+# will be used instead.
+#
+# Note that when remapped, the bus port may not be at the fixed offset of
+# clients port + 10000, so you can specify any port and bus-port depending
+# on how they get remapped. If the bus-port is not set, a fixed offset of
+# 10000 will be used as usual.
+#
+# Example:
+#
+# cluster-announce-ip 10.1.1.5
+# cluster-announce-tls-port 6379
+# cluster-announce-port 0
+# cluster-announce-bus-port 6380
+
+################################## SLOW LOG ###################################
+
+# The Redis Slow Log is a system to log queries that exceeded a specified
+# execution time. The execution time does not include the I/O operations
+# like talking with the client, sending the reply and so forth,
+# but just the time needed to actually execute the command (this is the only
+# stage of command execution where the thread is blocked and can not serve
+# other requests in the meantime).
+#
+# You can configure the slow log with two parameters: one tells Redis
+# what is the execution time, in microseconds, to exceed in order for the
+# command to get logged, and the other parameter is the length of the
+# slow log. When a new command is logged the oldest one is removed from the
+# queue of logged commands.
+
+# The following time is expressed in microseconds, so 1000000 is equivalent
+# to one second. Note that a negative number disables the slow log, while
+# a value of zero forces the logging of every command.
+slowlog-log-slower-than 10000
+
+# There is no limit to this length. Just be aware that it will consume memory.
+# You can reclaim memory used by the slow log with SLOWLOG RESET.
+slowlog-max-len 128
+
+################################ LATENCY MONITOR ##############################
+
+# The Redis latency monitoring subsystem samples different operations
+# at runtime in order to collect data related to possible sources of
+# latency of a Redis instance.
+#
+# Via the LATENCY command this information is available to the user that can
+# print graphs and obtain reports.
+#
+# The system only logs operations that were performed in a time equal or
+# greater than the amount of milliseconds specified via the
+# latency-monitor-threshold configuration directive. When its value is set
+# to zero, the latency monitor is turned off.
+#
+# By default latency monitoring is disabled since it is mostly not needed
+# if you don't have latency issues, and collecting data has a performance
+# impact, that while very small, can be measured under big load. Latency
+# monitoring can easily be enabled at runtime using the command
+# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
+latency-monitor-threshold 0
+
+############################# EVENT NOTIFICATION ##############################
+
+# Redis can notify Pub/Sub clients about events happening in the key space.
+# This feature is documented at https://redis.io/topics/notifications
+#
+# For instance if keyspace events notification is enabled, and a client
+# performs a DEL operation on key "foo" stored in the Database 0, two
+# messages will be published via Pub/Sub:
+#
+# PUBLISH __keyspace@0__:foo del
+# PUBLISH __keyevent@0__:del foo
+#
+# It is possible to select the events that Redis will notify among a set
+# of classes. Every class is identified by a single character:
+#
+# K Keyspace events, published with __keyspace@<db>__ prefix.
+# E Keyevent events, published with __keyevent@<db>__ prefix.
+# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
+# $ String commands
+# l List commands
+# s Set commands
+# h Hash commands
+# z Sorted set commands
+# x Expired events (events generated every time a key expires)
+# e Evicted events (events generated when a key is evicted for maxmemory)
+# t Stream commands
+# d Module key type events
+# m Key-miss events (Note: It is not included in the 'A' class)
+# A Alias for g$lshzxetd, so that the "AKE" string means all the events
+# (Except key-miss events which are excluded from 'A' due to their
+# unique nature).
+#
+# The "notify-keyspace-events" takes as argument a string that is composed
+# of zero or multiple characters. The empty string means that notifications
+# are disabled.
+#
+# Example: to enable list and generic events, from the point of view of the
+# event name, use:
+#
+# notify-keyspace-events Elg
+#
+# Example 2: to get the stream of the expired keys subscribing to channel
+# name __keyevent@0__:expired use:
+#
+# notify-keyspace-events Ex
+#
+# By default all notifications are disabled because most users don't need
+# this feature and the feature has some overhead. Note that if you don't
+# specify at least one of K or E, no events will be delivered.
+notify-keyspace-events ""
+
+############################### GOPHER SERVER #################################
+
+# Redis contains an implementation of the Gopher protocol, as specified in
+# the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt).
+#
+# The Gopher protocol was very popular in the late '90s. It is an alternative
+# to the web, and the implementation both server and client side is so simple
+# that the Redis server has just 100 lines of code in order to implement this
+# support.
+#
+# What do you do with Gopher nowadays? Well Gopher never *really* died, and
+# lately there is a movement in order for the Gopher more hierarchical content
+# composed of just plain text documents to be resurrected. Some want a simpler
+# internet, others believe that the mainstream internet became too much
+# controlled, and it's cool to create an alternative space for people that
+# want a bit of fresh air.
+#
+# Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol
+# as a gift.
+#
+# --- HOW IT WORKS? ---
+#
+# The Redis Gopher support uses the inline protocol of Redis, and specifically
+# two kind of inline requests that were anyway illegal: an empty request
+# or any request that starts with "/" (there are no Redis commands starting
+# with such a slash). Normal RESP2/RESP3 requests are completely out of the
+# path of the Gopher protocol implementation and are served as usual as well.
+#
+# If you open a connection to Redis when Gopher is enabled and send it
+# a string like "/foo", if there is a key named "/foo" it is served via the
+# Gopher protocol.
+#
+# In order to create a real Gopher "hole" (the name of a Gopher site in Gopher
+# talking), you likely need a script like the following:
+#
+# https://github.com/antirez/gopher2redis
+#
+# --- SECURITY WARNING ---
+#
+# If you plan to put Redis on the internet in a publicly accessible address
+# to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance.
+# Once a password is set:
+#
+# 1. The Gopher server (when enabled, not by default) will still serve
+# content via Gopher.
+# 2. However other commands cannot be called before the client will
+# authenticate.
+#
+# So use the 'requirepass' option to protect your instance.
+#
+# Note that Gopher is not currently supported when 'io-threads-do-reads'
+# is enabled.
+#
+# To enable Gopher support, uncomment the following line and set the option
+# from no (the default) to yes.
+#
+# gopher-enabled no
+
+############################### ADVANCED CONFIG ###############################
+
+# Hashes are encoded using a memory efficient data structure when they have a
+# small number of entries, and the biggest entry does not exceed a given
+# threshold. These thresholds can be configured using the following directives.
+hash-max-ziplist-entries 512
+hash-max-ziplist-value 64
+
+# Lists are also encoded in a special way to save a lot of space.
+# The number of entries allowed per internal list node can be specified
+# as a fixed maximum size or a maximum number of elements.
+# For a fixed maximum size, use -5 through -1, meaning:
+# -5: max size: 64 Kb <-- not recommended for normal workloads
+# -4: max size: 32 Kb <-- not recommended
+# -3: max size: 16 Kb <-- probably not recommended
+# -2: max size: 8 Kb <-- good
+# -1: max size: 4 Kb <-- good
+# Positive numbers mean store up to _exactly_ that number of elements
+# per list node.
+# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),
+# but if your use case is unique, adjust the settings as necessary.
+list-max-ziplist-size -2
+
+# Lists may also be compressed.
+# Compress depth is the number of quicklist ziplist nodes from *each* side of
+# the list to *exclude* from compression. The head and tail of the list
+# are always uncompressed for fast push/pop operations. Settings are:
+# 0: disable all list compression
+# 1: depth 1 means "don't start compressing until after 1 node into the list,
+# going from either the head or tail"
+# So: [head]->node->node->...->node->[tail]
+# [head], [tail] will always be uncompressed; inner nodes will compress.
+# 2: [head]->[next]->node->node->...->node->[prev]->[tail]
+# 2 here means: don't compress head or head->next or tail->prev or tail,
+# but compress all nodes between them.
+# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail]
+# etc.
+list-compress-depth 0
+
+# Sets have a special encoding in just one case: when a set is composed
+# of just strings that happen to be integers in radix 10 in the range
+# of 64 bit signed integers.
+# The following configuration setting sets the limit in the size of the
+# set in order to use this special memory saving encoding.
+set-max-intset-entries 512
+
+# Similarly to hashes and lists, sorted sets are also specially encoded in
+# order to save a lot of space. This encoding is only used when the length and
+# elements of a sorted set are below the following limits:
+zset-max-ziplist-entries 128
+zset-max-ziplist-value 64
+
+# HyperLogLog sparse representation bytes limit. The limit includes the
+# 16 bytes header. When an HyperLogLog using the sparse representation crosses
+# this limit, it is converted into the dense representation.
+#
+# A value greater than 16000 is totally useless, since at that point the
+# dense representation is more memory efficient.
+#
+# The suggested value is ~ 3000 in order to have the benefits of
+# the space efficient encoding without slowing down too much PFADD,
+# which is O(N) with the sparse encoding. The value can be raised to
+# ~ 10000 when CPU is not a concern, but space is, and the data set is
+# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
+hll-sparse-max-bytes 3000
+
+# Streams macro node max size / items. The stream data structure is a radix
+# tree of big nodes that encode multiple items inside. Using this configuration
+# it is possible to configure how big a single node can be in bytes, and the
+# maximum number of items it may contain before switching to a new node when
+# appending new stream entries. If any of the following settings are set to
+# zero, the limit is ignored, so for instance it is possible to set just a
+# max entries limit by setting max-bytes to 0 and max-entries to the desired
+# value.
+stream-node-max-bytes 4096
+stream-node-max-entries 100
+
+# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
+# order to help rehashing the main Redis hash table (the one mapping top-level
+# keys to values). The hash table implementation Redis uses (see dict.c)
+# performs a lazy rehashing: the more operation you run into a hash table
+# that is rehashing, the more rehashing "steps" are performed, so if the
+# server is idle the rehashing is never complete and some more memory is used
+# by the hash table.
+#
+# The default is to use this millisecond 10 times every second in order to
+# actively rehash the main dictionaries, freeing memory when possible.
+#
+# If unsure:
+# use "activerehashing no" if you have hard latency requirements and it is
+# not a good thing in your environment that Redis can reply from time to time
+# to queries with 2 milliseconds delay.
+#
+# use "activerehashing yes" if you don't have such hard requirements but
+# want to free memory asap when possible.
+activerehashing yes
+
+# The client output buffer limits can be used to force disconnection of clients
+# that are not reading data from the server fast enough for some reason (a
+# common reason is that a Pub/Sub client can't consume messages as fast as the
+# publisher can produce them).
+#
+# The limit can be set differently for the three different classes of clients:
+#
+# normal -> normal clients including MONITOR clients
+# replica -> replica clients
+# pubsub -> clients subscribed to at least one pubsub channel or pattern
+#
+# The syntax of every client-output-buffer-limit directive is the following:
+#
+# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
+#
+# A client is immediately disconnected once the hard limit is reached, or if
+# the soft limit is reached and remains reached for the specified number of
+# seconds (continuously).
+# So for instance if the hard limit is 32 megabytes and the soft limit is
+# 16 megabytes / 10 seconds, the client will get disconnected immediately
+# if the size of the output buffers reach 32 megabytes, but will also get
+# disconnected if the client reaches 16 megabytes and continuously overcomes
+# the limit for 10 seconds.
+#
+# By default normal clients are not limited because they don't receive data
+# without asking (in a push way), but just after a request, so only
+# asynchronous clients may create a scenario where data is requested faster
+# than it can read.
+#
+# Instead there is a default limit for pubsub and replica clients, since
+# subscribers and replicas receive data in a push fashion.
+#
+# Both the hard or the soft limit can be disabled by setting them to zero.
+client-output-buffer-limit normal 0 0 0
+client-output-buffer-limit replica 256mb 64mb 60
+client-output-buffer-limit pubsub 32mb 8mb 60
+
+# Client query buffers accumulate new commands. They are limited to a fixed
+# amount by default in order to avoid that a protocol desynchronization (for
+# instance due to a bug in the client) will lead to unbound memory usage in
+# the query buffer. However you can configure it here if you have very special
+# needs, such us huge multi/exec requests or alike.
+#
+# client-query-buffer-limit 1gb
+
+# In the Redis protocol, bulk requests, that are, elements representing single
+# strings, are normally limited to 512 mb. However you can change this limit
+# here, but must be 1mb or greater
+#
+# proto-max-bulk-len 512mb
+
+# Redis calls an internal function to perform many background tasks, like
+# closing connections of clients in timeout, purging expired keys that are
+# never requested, and so forth.
+#
+# Not all tasks are performed with the same frequency, but Redis checks for
+# tasks to perform according to the specified "hz" value.
+#
+# By default "hz" is set to 10. Raising the value will use more CPU when
+# Redis is idle, but at the same time will make Redis more responsive when
+# there are many keys expiring at the same time, and timeouts may be
+# handled with more precision.
+#
+# The range is between 1 and 500, however a value over 100 is usually not
+# a good idea. Most users should use the default of 10 and raise this up to
+# 100 only in environments where very low latency is required.
+hz 10
+
+# Normally it is useful to have an HZ value which is proportional to the
+# number of clients connected. This is useful in order, for instance, to
+# avoid too many clients are processed for each background task invocation
+# in order to avoid latency spikes.
+#
+# Since the default HZ value by default is conservatively set to 10, Redis
+# offers, and enables by default, the ability to use an adaptive HZ value
+# which will temporarily raise when there are many connected clients.
+#
+# When dynamic HZ is enabled, the actual configured HZ will be used
+# as a baseline, but multiples of the configured HZ value will be actually
+# used as needed once more clients are connected. In this way an idle
+# instance will use very little CPU time while a busy instance will be
+# more responsive.
+dynamic-hz yes
+
+# When a child rewrites the AOF file, if the following option is enabled
+# the file will be fsync-ed every 32 MB of data generated. This is useful
+# in order to commit the file to the disk more incrementally and avoid
+# big latency spikes.
+aof-rewrite-incremental-fsync yes
+
+# When redis saves RDB file, if the following option is enabled
+# the file will be fsync-ed every 32 MB of data generated. This is useful
+# in order to commit the file to the disk more incrementally and avoid
+# big latency spikes.
+rdb-save-incremental-fsync yes
+
+# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good
+# idea to start with the default settings and only change them after investigating
+# how to improve the performances and how the keys LFU change over time, which
+# is possible to inspect via the OBJECT FREQ command.
+#
+# There are two tunable parameters in the Redis LFU implementation: the
+# counter logarithm factor and the counter decay time. It is important to
+# understand what the two parameters mean before changing them.
+#
+# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis
+# uses a probabilistic increment with logarithmic behavior. Given the value
+# of the old counter, when a key is accessed, the counter is incremented in
+# this way:
+#
+# 1. A random number R between 0 and 1 is extracted.
+# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1).
+# 3. The counter is incremented only if R < P.
+#
+# The default lfu-log-factor is 10. This is a table of how the frequency
+# counter changes with a different number of accesses with different
+# logarithmic factors:
+#
+# +--------+------------+------------+------------+------------+------------+
+# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits |
+# +--------+------------+------------+------------+------------+------------+
+# | 0 | 104 | 255 | 255 | 255 | 255 |
+# +--------+------------+------------+------------+------------+------------+
+# | 1 | 18 | 49 | 255 | 255 | 255 |
+# +--------+------------+------------+------------+------------+------------+
+# | 10 | 10 | 18 | 142 | 255 | 255 |
+# +--------+------------+------------+------------+------------+------------+
+# | 100 | 8 | 11 | 49 | 143 | 255 |
+# +--------+------------+------------+------------+------------+------------+
+#
+# NOTE: The above table was obtained by running the following commands:
+#
+# redis-benchmark -n 1000000 incr foo
+# redis-cli object freq foo
+#
+# NOTE 2: The counter initial value is 5 in order to give new objects a chance
+# to accumulate hits.
+#
+# The counter decay time is the time, in minutes, that must elapse in order
+# for the key counter to be divided by two (or decremented if it has a value
+# less <= 10).
+#
+# The default value for the lfu-decay-time is 1. A special value of 0 means to
+# decay the counter every time it happens to be scanned.
+#
+# lfu-log-factor 10
+# lfu-decay-time 1
+
+########################### ACTIVE DEFRAGMENTATION #######################
+#
+# What is active defragmentation?
+# -------------------------------
+#
+# Active (online) defragmentation allows a Redis server to compact the
+# spaces left between small allocations and deallocations of data in memory,
+# thus allowing to reclaim back memory.
+#
+# Fragmentation is a natural process that happens with every allocator (but
+# less so with Jemalloc, fortunately) and certain workloads. Normally a server
+# restart is needed in order to lower the fragmentation, or at least to flush
+# away all the data and create it again. However thanks to this feature
+# implemented by Oran Agra for Redis 4.0 this process can happen at runtime
+# in a "hot" way, while the server is running.
+#
+# Basically when the fragmentation is over a certain level (see the
+# configuration options below) Redis will start to create new copies of the
+# values in contiguous memory regions by exploiting certain specific Jemalloc
+# features (in order to understand if an allocation is causing fragmentation
+# and to allocate it in a better place), and at the same time, will release the
+# old copies of the data. This process, repeated incrementally for all the keys
+# will cause the fragmentation to drop back to normal values.
+#
+# Important things to understand:
+#
+# 1. This feature is disabled by default, and only works if you compiled Redis
+# to use the copy of Jemalloc we ship with the source code of Redis.
+# This is the default with Linux builds.
+#
+# 2. You never need to enable this feature if you don't have fragmentation
+# issues.
+#
+# 3. Once you experience fragmentation, you can enable this feature when
+# needed with the command "CONFIG SET activedefrag yes".
+#
+# The configuration parameters are able to fine tune the behavior of the
+# defragmentation process. If you are not sure about what they mean it is
+# a good idea to leave the defaults untouched.
+
+# Enabled active defragmentation
+# activedefrag no
+
+# Minimum amount of fragmentation waste to start active defrag
+# active-defrag-ignore-bytes 100mb
+
+# Minimum percentage of fragmentation to start active defrag
+# active-defrag-threshold-lower 10
+
+# Maximum percentage of fragmentation at which we use maximum effort
+# active-defrag-threshold-upper 100
+
+# Minimal effort for defrag in CPU percentage, to be used when the lower
+# threshold is reached
+# active-defrag-cycle-min 1
+
+# Maximal effort for defrag in CPU percentage, to be used when the upper
+# threshold is reached
+# active-defrag-cycle-max 25
+
+# Maximum number of set/hash/zset/list fields that will be processed from
+# the main dictionary scan
+# active-defrag-max-scan-fields 1000
+
+# Jemalloc background thread for purging will be enabled by default
+jemalloc-bg-thread yes
+
+# It is possible to pin different threads and processes of Redis to specific
+# CPUs in your system, in order to maximize the performances of the server.
+# This is useful both in order to pin different Redis threads in different
+# CPUs, but also in order to make sure that multiple Redis instances running
+# in the same host will be pinned to different CPUs.
+#
+# Normally you can do this using the "taskset" command, however it is also
+# possible to this via Redis configuration directly, both in Linux and FreeBSD.
+#
+# You can pin the server/IO threads, bio threads, aof rewrite child process, and
+# the bgsave child process. The syntax to specify the cpu list is the same as
+# the taskset command:
+#
+# Set redis server/io threads to cpu affinity 0,2,4,6:
+# server_cpulist 0-7:2
+#
+# Set bio threads to cpu affinity 1,3:
+# bio_cpulist 1,3
+#
+# Set aof rewrite child process to cpu affinity 8,9,10,11:
+# aof_rewrite_cpulist 8-11
+#
+# Set bgsave child process to cpu affinity 1,10,11
+# bgsave_cpulist 1,10-11
+
+# In some cases redis will emit warnings and even refuse to start if it detects
+# that the system is in bad state, it is possible to suppress these warnings
+# by setting the following config which takes a space delimited list of warnings
+# to suppress
+#
+# ignore-warnings ARM64-COW-BUG
diff --git a/Redis/6.2.5/redis/role/templates/redis-slave.conf.j2 b/Redis/6.2.5/redis/role/templates/redis-slave.conf.j2
new file mode 100644
index 0000000..8704b32
--- /dev/null
+++ b/Redis/6.2.5/redis/role/templates/redis-slave.conf.j2
@@ -0,0 +1,2052 @@
+# Redis configuration file example.
+#
+# Note that in order to read the configuration file, Redis must be
+# started with the file path as first argument:
+#
+# ./redis-server /path/to/redis.conf
+
+# Note on units: when memory size is needed, it is possible to specify
+# it in the usual form of 1k 5GB 4M and so forth:
+#
+# 1k => 1000 bytes
+# 1kb => 1024 bytes
+# 1m => 1000000 bytes
+# 1mb => 1024*1024 bytes
+# 1g => 1000000000 bytes
+# 1gb => 1024*1024*1024 bytes
+#
+# units are case insensitive so 1GB 1Gb 1gB are all the same.
+
+################################## INCLUDES ###################################
+
+# Include one or more other config files here. This is useful if you
+# have a standard template that goes to all Redis servers but also need
+# to customize a few per-server settings. Include files can include
+# other files, so use this wisely.
+#
+# Note that option "include" won't be rewritten by command "CONFIG REWRITE"
+# from admin or Redis Sentinel. Since Redis always uses the last processed
+# line as value of a configuration directive, you'd better put includes
+# at the beginning of this file to avoid overwriting config change at runtime.
+#
+# If instead you are interested in using includes to override configuration
+# options, it is better to use include as the last line.
+#
+# include /path/to/local.conf
+# include /path/to/other.conf
+
+################################## MODULES #####################################
+
+# Load modules at startup. If the server is not able to load modules
+# it will abort. It is possible to use multiple loadmodule directives.
+#
+# loadmodule /path/to/my_module.so
+# loadmodule /path/to/other_module.so
+
+################################## NETWORK #####################################
+
+# By default, if no "bind" configuration directive is specified, Redis listens
+# for connections from all available network interfaces on the host machine.
+# It is possible to listen to just one or multiple selected interfaces using
+# the "bind" configuration directive, followed by one or more IP addresses.
+# Each address can be prefixed by "-", which means that redis will not fail to
+# start if the address is not available. Being not available only refers to
+# addresses that does not correspond to any network interfece. Addresses that
+# are already in use will always fail, and unsupported protocols will always BE
+# silently skipped.
+#
+# Examples:
+#
+# bind 192.168.1.100 10.0.0.1 # listens on two specific IPv4 addresses
+# bind 127.0.0.1 ::1 # listens on loopback IPv4 and IPv6
+# bind * -::* # like the default, all available interfaces
+#
+# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the
+# internet, binding to all the interfaces is dangerous and will expose the
+# instance to everybody on the internet. So by default we uncomment the
+# following bind directive, that will force Redis to listen only on the
+# IPv4 and IPv6 (if available) loopback interface addresses (this means Redis
+# will only be able to accept client connections from the same host that it is
+# running on).
+#
+# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES
+# JUST COMMENT OUT THE FOLLOWING LINE.
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+bind 0.0.0.0 -::1
+
+# Protected mode is a layer of security protection, in order to avoid that
+# Redis instances left open on the internet are accessed and exploited.
+#
+# When protected mode is on and if:
+#
+# 1) The server is not binding explicitly to a set of addresses using the
+# "bind" directive.
+# 2) No password is configured.
+#
+# The server only accepts connections from clients connecting from the
+# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain
+# sockets.
+#
+# By default protected mode is enabled. You should disable it only if
+# you are sure you want clients from other hosts to connect to Redis
+# even if no authentication is configured, nor a specific set of interfaces
+# are explicitly listed using the "bind" directive.
+protected-mode yes
+
+# Accept connections on the specified port, default is 6379 (IANA #815344).
+# If port 0 is specified Redis will not listen on a TCP socket.
+port 6379
+
+# TCP listen() backlog.
+#
+# In high requests-per-second environments you need a high backlog in order
+# to avoid slow clients connection issues. Note that the Linux kernel
+# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
+# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
+# in order to get the desired effect.
+tcp-backlog 511
+
+# Unix socket.
+#
+# Specify the path for the Unix socket that will be used to listen for
+# incoming connections. There is no default, so Redis will not listen
+# on a unix socket when not specified.
+#
+# unixsocket /run/redis.sock
+# unixsocketperm 700
+
+# Close the connection after a client is idle for N seconds (0 to disable)
+timeout 0
+
+# TCP keepalive.
+#
+# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
+# of communication. This is useful for two reasons:
+#
+# 1) Detect dead peers.
+# 2) Force network equipment in the middle to consider the connection to be
+# alive.
+#
+# On Linux, the specified value (in seconds) is the period used to send ACKs.
+# Note that to close the connection the double of the time is needed.
+# On other kernels the period depends on the kernel configuration.
+#
+# A reasonable value for this option is 300 seconds, which is the new
+# Redis default starting with Redis 3.2.1.
+tcp-keepalive 300
+
+################################# TLS/SSL #####################################
+
+# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration
+# directive can be used to define TLS-listening ports. To enable TLS on the
+# default port, use:
+#
+# port 0
+# tls-port 6379
+
+# Configure a X.509 certificate and private key to use for authenticating the
+# server to connected clients, masters or cluster peers. These files should be
+# PEM formatted.
+#
+# tls-cert-file redis.crt
+# tls-key-file redis.key
+#
+# If the key file is encrypted using a passphrase, it can be included here
+# as well.
+#
+# tls-key-file-pass secret
+
+# Normally Redis uses the same certificate for both server functions (accepting
+# connections) and client functions (replicating from a master, establishing
+# cluster bus connections, etc.).
+#
+# Sometimes certificates are issued with attributes that designate them as
+# client-only or server-only certificates. In that case it may be desired to use
+# different certificates for incoming (server) and outgoing (client)
+# connections. To do that, use the following directives:
+#
+# tls-client-cert-file client.crt
+# tls-client-key-file client.key
+#
+# If the key file is encrypted using a passphrase, it can be included here
+# as well.
+#
+# tls-client-key-file-pass secret
+
+# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange:
+#
+# tls-dh-params-file redis.dh
+
+# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL
+# clients and peers. Redis requires an explicit configuration of at least one
+# of these, and will not implicitly use the system wide configuration.
+#
+# tls-ca-cert-file ca.crt
+# tls-ca-cert-dir /etc/ssl/certs
+
+# By default, clients (including replica servers) on a TLS port are required
+# to authenticate using valid client side certificates.
+#
+# If "no" is specified, client certificates are not required and not accepted.
+# If "optional" is specified, client certificates are accepted and must be
+# valid if provided, but are not required.
+#
+# tls-auth-clients no
+# tls-auth-clients optional
+
+# By default, a Redis replica does not attempt to establish a TLS connection
+# with its master.
+#
+# Use the following directive to enable TLS on replication links.
+#
+# tls-replication yes
+
+# By default, the Redis Cluster bus uses a plain TCP connection. To enable
+# TLS for the bus protocol, use the following directive:
+#
+# tls-cluster yes
+
+# By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended
+# that older formally deprecated versions are kept disabled to reduce the attack surface.
+# You can explicitly specify TLS versions to support.
+# Allowed values are case insensitive and include "TLSv1", "TLSv1.1", "TLSv1.2",
+# "TLSv1.3" (OpenSSL >= 1.1.1) or any combination.
+# To enable only TLSv1.2 and TLSv1.3, use:
+#
+# tls-protocols "TLSv1.2 TLSv1.3"
+
+# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information
+# about the syntax of this string.
+#
+# Note: this configuration applies only to <= TLSv1.2.
+#
+# tls-ciphers DEFAULT:!MEDIUM
+
+# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more
+# information about the syntax of this string, and specifically for TLSv1.3
+# ciphersuites.
+#
+# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256
+
+# When choosing a cipher, use the server's preference instead of the client
+# preference. By default, the server follows the client's preference.
+#
+# tls-prefer-server-ciphers yes
+
+# By default, TLS session caching is enabled to allow faster and less expensive
+# reconnections by clients that support it. Use the following directive to disable
+# caching.
+#
+# tls-session-caching no
+
+# Change the default number of TLS sessions cached. A zero value sets the cache
+# to unlimited size. The default size is 20480.
+#
+# tls-session-cache-size 5000
+
+# Change the default timeout of cached TLS sessions. The default timeout is 300
+# seconds.
+#
+# tls-session-cache-timeout 60
+
+################################# GENERAL #####################################
+
+# By default Redis does not run as a daemon. Use 'yes' if you need it.
+# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
+# When Redis is supervised by upstart or systemd, this parameter has no impact.
+daemonize no
+
+# If you run Redis from upstart or systemd, Redis can interact with your
+# supervision tree. Options:
+# supervised no - no supervision interaction
+# supervised upstart - signal upstart by putting Redis into SIGSTOP mode
+# requires "expect stop" in your upstart job config
+# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
+# on startup, and updating Redis status on a regular
+# basis.
+# supervised auto - detect upstart or systemd method based on
+# UPSTART_JOB or NOTIFY_SOCKET environment variables
+# Note: these supervision methods only signal "process is ready."
+# They do not enable continuous pings back to your supervisor.
+#
+# The default is "no". To run under upstart/systemd, you can simply uncomment
+# the line below:
+#
+# supervised auto
+
+# If a pid file is specified, Redis writes it where specified at startup
+# and removes it at exit.
+#
+# When the server runs non daemonized, no pid file is created if none is
+# specified in the configuration. When the server is daemonized, the pid file
+# is used even if not specified, defaulting to "/var/run/redis.pid".
+#
+# Creating a pid file is best effort: if Redis is not able to create it
+# nothing bad happens, the server will start and run normally.
+#
+# Note that on modern Linux systems "/run/redis.pid" is more conforming
+# and should be used instead.
+pidfile /var/run/redis_6379.pid
+
+# Specify the server verbosity level.
+# This can be one of:
+# debug (a lot of information, useful for development/testing)
+# verbose (many rarely useful info, but not a mess like the debug level)
+# notice (moderately verbose, what you want in production probably)
+# warning (only very important / critical messages are logged)
+loglevel notice
+
+# Specify the log file name. Also the empty string can be used to force
+# Redis to log on the standard output. Note that if you use standard
+# output for logging but daemonize, logs will be sent to /dev/null
+logfile ./redis.log
+
+# To enable logging to the system logger, just set 'syslog-enabled' to yes,
+# and optionally update the other syslog parameters to suit your needs.
+# syslog-enabled no
+
+# Specify the syslog identity.
+# syslog-ident redis
+
+# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
+# syslog-facility local0
+
+# To disable the built in crash log, which will possibly produce cleaner core
+# dumps when they are needed, uncomment the following:
+#
+# crash-log-enabled no
+
+# To disable the fast memory check that's run as part of the crash log, which
+# will possibly let redis terminate sooner, uncomment the following:
+#
+# crash-memcheck-enabled no
+
+# Set the number of databases. The default database is DB 0, you can select
+# a different one on a per-connection basis using SELECT <dbid> where
+# dbid is a number between 0 and 'databases'-1
+databases 16
+
+# By default Redis shows an ASCII art logo only when started to log to the
+# standard output and if the standard output is a TTY and syslog logging is
+# disabled. Basically this means that normally a logo is displayed only in
+# interactive sessions.
+#
+# However it is possible to force the pre-4.0 behavior and always show a
+# ASCII art logo in startup logs by setting the following option to yes.
+always-show-logo no
+
+# By default, Redis modifies the process title (as seen in 'top' and 'ps') to
+# provide some runtime information. It is possible to disable this and leave
+# the process name as executed by setting the following to no.
+set-proc-title yes
+
+# When changing the process title, Redis uses the following template to construct
+# the modified title.
+#
+# Template variables are specified in curly brackets. The following variables are
+# supported:
+#
+# {title} Name of process as executed if parent, or type of child process.
+# {listen-addr} Bind address or '*' followed by TCP or TLS port listening on, or
+# Unix socket if only that's available.
+# {server-mode} Special mode, i.e. "[sentinel]" or "[cluster]".
+# {port} TCP port listening on, or 0.
+# {tls-port} TLS port listening on, or 0.
+# {unixsocket} Unix domain socket listening on, or "".
+# {config-file} Name of configuration file used.
+#
+proc-title-template "{title} {listen-addr} {server-mode}"
+
+################################ SNAPSHOTTING ################################
+
+# Save the DB to disk.
+#
+# save <seconds> <changes>
+#
+# Redis will save the DB if both the given number of seconds and the given
+# number of write operations against the DB occurred.
+#
+# Snapshotting can be completely disabled with a single empty string argument
+# as in following example:
+#
+# save ""
+#
+# Unless specified otherwise, by default Redis will save the DB:
+# * After 3600 seconds (an hour) if at least 1 key changed
+# * After 300 seconds (5 minutes) if at least 100 keys changed
+# * After 60 seconds if at least 10000 keys changed
+#
+# You can set these explicitly by uncommenting the three following lines.
+#
+# save 3600 1
+# save 300 100
+# save 60 10000
+
+# By default Redis will stop accepting writes if RDB snapshots are enabled
+# (at least one save point) and the latest background save failed.
+# This will make the user aware (in a hard way) that data is not persisting
+# on disk properly, otherwise chances are that no one will notice and some
+# disaster will happen.
+#
+# If the background saving process will start working again Redis will
+# automatically allow writes again.
+#
+# However if you have setup your proper monitoring of the Redis server
+# and persistence, you may want to disable this feature so that Redis will
+# continue to work as usual even if there are problems with disk,
+# permissions, and so forth.
+stop-writes-on-bgsave-error yes
+
+# Compress string objects using LZF when dump .rdb databases?
+# By default compression is enabled as it's almost always a win.
+# If you want to save some CPU in the saving child set it to 'no' but
+# the dataset will likely be bigger if you have compressible values or keys.
+rdbcompression yes
+
+# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
+# This makes the format more resistant to corruption but there is a performance
+# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
+# for maximum performances.
+#
+# RDB files created with checksum disabled have a checksum of zero that will
+# tell the loading code to skip the check.
+rdbchecksum yes
+
+# Enables or disables full sanitation checks for ziplist and listpack etc when
+# loading an RDB or RESTORE payload. This reduces the chances of a assertion or
+# crash later on while processing commands.
+# Options:
+# no - Never perform full sanitation
+# yes - Always perform full sanitation
+# clients - Perform full sanitation only for user connections.
+# Excludes: RDB files, RESTORE commands received from the master
+# connection, and client connections which have the
+# skip-sanitize-payload ACL flag.
+# The default should be 'clients' but since it currently affects cluster
+# resharding via MIGRATE, it is temporarily set to 'no' by default.
+#
+# sanitize-dump-payload no
+
+# The filename where to dump the DB
+dbfilename dump.rdb
+
+# Remove RDB files used by replication in instances without persistence
+# enabled. By default this option is disabled, however there are environments
+# where for regulations or other security concerns, RDB files persisted on
+# disk by masters in order to feed replicas, or stored on disk by replicas
+# in order to load them for the initial synchronization, should be deleted
+# ASAP. Note that this option ONLY WORKS in instances that have both AOF
+# and RDB persistence disabled, otherwise is completely ignored.
+#
+# An alternative (and sometimes better) way to obtain the same effect is
+# to use diskless replication on both master and replicas instances. However
+# in the case of replicas, diskless is not always an option.
+rdb-del-sync-files no
+
+# The working directory.
+#
+# The DB will be written inside this directory, with the filename specified
+# above using the 'dbfilename' configuration directive.
+#
+# The Append Only File will also be created inside this directory.
+#
+# Note that you must specify a directory here, not a file name.
+dir ./
+
+################################# REPLICATION #################################
+
+# Master-Replica replication. Use replicaof to make a Redis instance a copy of
+# another Redis server. A few things to understand ASAP about Redis replication.
+#
+# +------------------+ +---------------+
+# | Master | ---> | Replica |
+# | (receive writes) | | (exact copy) |
+# +------------------+ +---------------+
+#
+# 1) Redis replication is asynchronous, but you can configure a master to
+# stop accepting writes if it appears to be not connected with at least
+# a given number of replicas.
+# 2) Redis replicas are able to perform a partial resynchronization with the
+# master if the replication link is lost for a relatively small amount of
+# time. You may want to configure the replication backlog size (see the next
+# sections of this file) with a sensible value depending on your needs.
+# 3) Replication is automatic and does not need user intervention. After a
+# network partition replicas automatically try to reconnect to masters
+# and resynchronize with them.
+#
+# replicaof <masterip> <masterport>
+
+# If the master is password protected (using the "requirepass" configuration
+# directive below) it is possible to tell the replica to authenticate before
+# starting the replication synchronization process, otherwise the master will
+# refuse the replica request.
+#
+# masterauth <master-password>
+#
+# However this is not enough if you are using Redis ACLs (for Redis version
+# 6 or greater), and the default user is not capable of running the PSYNC
+# command and/or other commands needed for replication. In this case it's
+# better to configure a special user to use with replication, and specify the
+# masteruser configuration as such:
+#
+# masteruser <username>
+#
+# When masteruser is specified, the replica will authenticate against its
+# master using the new AUTH form: AUTH <username> <password>.
+
+# When a replica loses its connection with the master, or when the replication
+# is still in progress, the replica can act in two different ways:
+#
+# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will
+# still reply to client requests, possibly with out of date data, or the
+# data set may just be empty if this is the first synchronization.
+#
+# 2) If replica-serve-stale-data is set to 'no' the replica will reply with
+# an error "SYNC with master in progress" to all commands except:
+# INFO, REPLICAOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE,
+# UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST,
+# HOST and LATENCY.
+#
+replica-serve-stale-data yes
+
+# You can configure a replica instance to accept writes or not. Writing against
+# a replica instance may be useful to store some ephemeral data (because data
+# written on a replica will be easily deleted after resync with the master) but
+# may also cause problems if clients are writing to it because of a
+# misconfiguration.
+#
+# Since Redis 2.6 by default replicas are read-only.
+#
+# Note: read only replicas are not designed to be exposed to untrusted clients
+# on the internet. It's just a protection layer against misuse of the instance.
+# Still a read only replica exports by default all the administrative commands
+# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
+# security of read only replicas using 'rename-command' to shadow all the
+# administrative / dangerous commands.
+replica-read-only yes
+
+# Replication SYNC strategy: disk or socket.
+#
+# New replicas and reconnecting replicas that are not able to continue the
+# replication process just receiving differences, need to do what is called a
+# "full synchronization". An RDB file is transmitted from the master to the
+# replicas.
+#
+# The transmission can happen in two different ways:
+#
+# 1) Disk-backed: The Redis master creates a new process that writes the RDB
+# file on disk. Later the file is transferred by the parent
+# process to the replicas incrementally.
+# 2) Diskless: The Redis master creates a new process that directly writes the
+# RDB file to replica sockets, without touching the disk at all.
+#
+# With disk-backed replication, while the RDB file is generated, more replicas
+# can be queued and served with the RDB file as soon as the current child
+# producing the RDB file finishes its work. With diskless replication instead
+# once the transfer starts, new replicas arriving will be queued and a new
+# transfer will start when the current one terminates.
+#
+# When diskless replication is used, the master waits a configurable amount of
+# time (in seconds) before starting the transfer in the hope that multiple
+# replicas will arrive and the transfer can be parallelized.
+#
+# With slow disks and fast (large bandwidth) networks, diskless replication
+# works better.
+repl-diskless-sync no
+
+# When diskless replication is enabled, it is possible to configure the delay
+# the server waits in order to spawn the child that transfers the RDB via socket
+# to the replicas.
+#
+# This is important since once the transfer starts, it is not possible to serve
+# new replicas arriving, that will be queued for the next RDB transfer, so the
+# server waits a delay in order to let more replicas arrive.
+#
+# The delay is specified in seconds, and by default is 5 seconds. To disable
+# it entirely just set it to 0 seconds and the transfer will start ASAP.
+repl-diskless-sync-delay 5
+
+# -----------------------------------------------------------------------------
+# WARNING: RDB diskless load is experimental. Since in this setup the replica
+# does not immediately store an RDB on disk, it may cause data loss during
+# failovers. RDB diskless load + Redis modules not handling I/O reads may also
+# cause Redis to abort in case of I/O errors during the initial synchronization
+# stage with the master. Use only if you know what you are doing.
+# -----------------------------------------------------------------------------
+#
+# Replica can load the RDB it reads from the replication link directly from the
+# socket, or store the RDB to a file and read that file after it was completely
+# received from the master.
+#
+# In many cases the disk is slower than the network, and storing and loading
+# the RDB file may increase replication time (and even increase the master's
+# Copy on Write memory and salve buffers).
+# However, parsing the RDB file directly from the socket may mean that we have
+# to flush the contents of the current database before the full rdb was
+# received. For this reason we have the following options:
+#
+# "disabled" - Don't use diskless load (store the rdb file to the disk first)
+# "on-empty-db" - Use diskless load only when it is completely safe.
+# "swapdb" - Keep a copy of the current db contents in RAM while parsing
+# the data directly from the socket. note that this requires
+# sufficient memory, if you don't have it, you risk an OOM kill.
+repl-diskless-load disabled
+
+# Replicas send PINGs to server in a predefined interval. It's possible to
+# change this interval with the repl_ping_replica_period option. The default
+# value is 10 seconds.
+#
+# repl-ping-replica-period 10
+
+# The following option sets the replication timeout for:
+#
+# 1) Bulk transfer I/O during SYNC, from the point of view of replica.
+# 2) Master timeout from the point of view of replicas (data, pings).
+# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings).
+#
+# It is important to make sure that this value is greater than the value
+# specified for repl-ping-replica-period otherwise a timeout will be detected
+# every time there is low traffic between the master and the replica. The default
+# value is 60 seconds.
+#
+# repl-timeout 60
+
+# Disable TCP_NODELAY on the replica socket after SYNC?
+#
+# If you select "yes" Redis will use a smaller number of TCP packets and
+# less bandwidth to send data to replicas. But this can add a delay for
+# the data to appear on the replica side, up to 40 milliseconds with
+# Linux kernels using a default configuration.
+#
+# If you select "no" the delay for data to appear on the replica side will
+# be reduced but more bandwidth will be used for replication.
+#
+# By default we optimize for low latency, but in very high traffic conditions
+# or when the master and replicas are many hops away, turning this to "yes" may
+# be a good idea.
+repl-disable-tcp-nodelay no
+
+# Set the replication backlog size. The backlog is a buffer that accumulates
+# replica data when replicas are disconnected for some time, so that when a
+# replica wants to reconnect again, often a full resync is not needed, but a
+# partial resync is enough, just passing the portion of data the replica
+# missed while disconnected.
+#
+# The bigger the replication backlog, the longer the replica can endure the
+# disconnect and later be able to perform a partial resynchronization.
+#
+# The backlog is only allocated if there is at least one replica connected.
+#
+# repl-backlog-size 1mb
+
+# After a master has no connected replicas for some time, the backlog will be
+# freed. The following option configures the amount of seconds that need to
+# elapse, starting from the time the last replica disconnected, for the backlog
+# buffer to be freed.
+#
+# Note that replicas never free the backlog for timeout, since they may be
+# promoted to masters later, and should be able to correctly "partially
+# resynchronize" with other replicas: hence they should always accumulate backlog.
+#
+# A value of 0 means to never release the backlog.
+#
+# repl-backlog-ttl 3600
+
+# The replica priority is an integer number published by Redis in the INFO
+# output. It is used by Redis Sentinel in order to select a replica to promote
+# into a master if the master is no longer working correctly.
+#
+# A replica with a low priority number is considered better for promotion, so
+# for instance if there are three replicas with priority 10, 100, 25 Sentinel
+# will pick the one with priority 10, that is the lowest.
+#
+# However a special priority of 0 marks the replica as not able to perform the
+# role of master, so a replica with priority of 0 will never be selected by
+# Redis Sentinel for promotion.
+#
+# By default the priority is 100.
+replica-priority 100
+
+# -----------------------------------------------------------------------------
+# By default, Redis Sentinel includes all replicas in its reports. A replica
+# can be excluded from Redis Sentinel's announcements. An unannounced replica
+# will be ignored by the 'sentinel replicas <master>' command and won't be
+# exposed to Redis Sentinel's clients.
+#
+# This option does not change the behavior of replica-priority. Even with
+# replica-announced set to 'no', the replica can be promoted to master. To
+# prevent this behavior, set replica-priority to 0.
+#
+# replica-announced yes
+
+# It is possible for a master to stop accepting writes if there are less than
+# N replicas connected, having a lag less or equal than M seconds.
+#
+# The N replicas need to be in "online" state.
+#
+# The lag in seconds, that must be <= the specified value, is calculated from
+# the last ping received from the replica, that is usually sent every second.
+#
+# This option does not GUARANTEE that N replicas will accept the write, but
+# will limit the window of exposure for lost writes in case not enough replicas
+# are available, to the specified number of seconds.
+#
+# For example to require at least 3 replicas with a lag <= 10 seconds use:
+#
+# min-replicas-to-write 3
+# min-replicas-max-lag 10
+#
+# Setting one or the other to 0 disables the feature.
+#
+# By default min-replicas-to-write is set to 0 (feature disabled) and
+# min-replicas-max-lag is set to 10.
+
+# A Redis master is able to list the address and port of the attached
+# replicas in different ways. For example the "INFO replication" section
+# offers this information, which is used, among other tools, by
+# Redis Sentinel in order to discover replica instances.
+# Another place where this info is available is in the output of the
+# "ROLE" command of a master.
+#
+# The listed IP address and port normally reported by a replica is
+# obtained in the following way:
+#
+# IP: The address is auto detected by checking the peer address
+# of the socket used by the replica to connect with the master.
+#
+# Port: The port is communicated by the replica during the replication
+# handshake, and is normally the port that the replica is using to
+# listen for connections.
+#
+# However when port forwarding or Network Address Translation (NAT) is
+# used, the replica may actually be reachable via different IP and port
+# pairs. The following two options can be used by a replica in order to
+# report to its master a specific set of IP and port, so that both INFO
+# and ROLE will report those values.
+#
+# There is no need to use both the options if you need to override just
+# the port or the IP address.
+#
+# replica-announce-ip 5.5.5.5
+# replica-announce-port 1234
+
+############################### KEYS TRACKING #################################
+
+# Redis implements server assisted support for client side caching of values.
+# This is implemented using an invalidation table that remembers, using
+# a radix key indexed by key name, what clients have which keys. In turn
+# this is used in order to send invalidation messages to clients. Please
+# check this page to understand more about the feature:
+#
+# https://redis.io/topics/client-side-caching
+#
+# When tracking is enabled for a client, all the read only queries are assumed
+# to be cached: this will force Redis to store information in the invalidation
+# table. When keys are modified, such information is flushed away, and
+# invalidation messages are sent to the clients. However if the workload is
+# heavily dominated by reads, Redis could use more and more memory in order
+# to track the keys fetched by many clients.
+#
+# For this reason it is possible to configure a maximum fill value for the
+# invalidation table. By default it is set to 1M of keys, and once this limit
+# is reached, Redis will start to evict keys in the invalidation table
+# even if they were not modified, just to reclaim memory: this will in turn
+# force the clients to invalidate the cached values. Basically the table
+# maximum size is a trade off between the memory you want to spend server
+# side to track information about who cached what, and the ability of clients
+# to retain cached objects in memory.
+#
+# If you set the value to 0, it means there are no limits, and Redis will
+# retain as many keys as needed in the invalidation table.
+# In the "stats" INFO section, you can find information about the number of
+# keys in the invalidation table at every given moment.
+#
+# Note: when key tracking is used in broadcasting mode, no memory is used
+# in the server side so this setting is useless.
+#
+# tracking-table-max-keys 1000000
+
+################################## SECURITY ###################################
+
+# Warning: since Redis is pretty fast, an outside user can try up to
+# 1 million passwords per second against a modern box. This means that you
+# should use very strong passwords, otherwise they will be very easy to break.
+# Note that because the password is really a shared secret between the client
+# and the server, and should not be memorized by any human, the password
+# can be easily a long string from /dev/urandom or whatever, so by using a
+# long and unguessable password no brute force attack will be possible.
+
+# Redis ACL users are defined in the following format:
+#
+# user <username> ... acl rules ...
+#
+# For example:
+#
+# user worker +@list +@connection ~jobs:* on >ffa9203c493aa99
+#
+# The special username "default" is used for new connections. If this user
+# has the "nopass" rule, then new connections will be immediately authenticated
+# as the "default" user without the need of any password provided via the
+# AUTH command. Otherwise if the "default" user is not flagged with "nopass"
+# the connections will start in not authenticated state, and will require
+# AUTH (or the HELLO command AUTH option) in order to be authenticated and
+# start to work.
+#
+# The ACL rules that describe what a user can do are the following:
+#
+# on Enable the user: it is possible to authenticate as this user.
+# off Disable the user: it's no longer possible to authenticate
+# with this user, however the already authenticated connections
+# will still work.
+# skip-sanitize-payload RESTORE dump-payload sanitation is skipped.
+# sanitize-payload RESTORE dump-payload is sanitized (default).
+# +<command> Allow the execution of that command
+# -<command> Disallow the execution of that command
+# +@<category> Allow the execution of all the commands in such category
+# with valid categories are like @admin, @set, @sortedset, ...
+# and so forth, see the full list in the server.c file where
+# the Redis command table is described and defined.
+# The special category @all means all the commands, but currently
+# present in the server, and that will be loaded in the future
+# via modules.
+# +<command>|subcommand Allow a specific subcommand of an otherwise
+# disabled command. Note that this form is not
+# allowed as negative like -DEBUG|SEGFAULT, but
+# only additive starting with "+".
+# allcommands Alias for +@all. Note that it implies the ability to execute
+# all the future commands loaded via the modules system.
+# nocommands Alias for -@all.
+# ~<pattern> Add a pattern of keys that can be mentioned as part of
+# commands. For instance ~* allows all the keys. The pattern
+# is a glob-style pattern like the one of KEYS.
+# It is possible to specify multiple patterns.
+# allkeys Alias for ~*
+# resetkeys Flush the list of allowed keys patterns.
+# &<pattern> Add a glob-style pattern of Pub/Sub channels that can be
+# accessed by the user. It is possible to specify multiple channel
+# patterns.
+# allchannels Alias for &*
+# resetchannels Flush the list of allowed channel patterns.
+# ><password> Add this password to the list of valid password for the user.
+# For example >mypass will add "mypass" to the list.
+# This directive clears the "nopass" flag (see later).
+# <<password> Remove this password from the list of valid passwords.
+# nopass All the set passwords of the user are removed, and the user
+# is flagged as requiring no password: it means that every
+# password will work against this user. If this directive is
+# used for the default user, every new connection will be
+# immediately authenticated with the default user without
+# any explicit AUTH command required. Note that the "resetpass"
+# directive will clear this condition.
+# resetpass Flush the list of allowed passwords. Moreover removes the
+# "nopass" status. After "resetpass" the user has no associated
+# passwords and there is no way to authenticate without adding
+# some password (or setting it as "nopass" later).
+# reset Performs the following actions: resetpass, resetkeys, off,
+# -@all. The user returns to the same state it has immediately
+# after its creation.
+#
+# ACL rules can be specified in any order: for instance you can start with
+# passwords, then flags, or key patterns. However note that the additive
+# and subtractive rules will CHANGE MEANING depending on the ordering.
+# For instance see the following example:
+#
+# user alice on +@all -DEBUG ~* >somepassword
+#
+# This will allow "alice" to use all the commands with the exception of the
+# DEBUG command, since +@all added all the commands to the set of the commands
+# alice can use, and later DEBUG was removed. However if we invert the order
+# of two ACL rules the result will be different:
+#
+# user alice on -DEBUG +@all ~* >somepassword
+#
+# Now DEBUG was removed when alice had yet no commands in the set of allowed
+# commands, later all the commands are added, so the user will be able to
+# execute everything.
+#
+# Basically ACL rules are processed left-to-right.
+#
+# For more information about ACL configuration please refer to
+# the Redis web site at https://redis.io/topics/acl
+
+# ACL LOG
+#
+# The ACL Log tracks failed commands and authentication events associated
+# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked
+# by ACLs. The ACL Log is stored in memory. You can reclaim memory with
+# ACL LOG RESET. Define the maximum entry length of the ACL Log below.
+acllog-max-len 128
+
+# Using an external ACL file
+#
+# Instead of configuring users here in this file, it is possible to use
+# a stand-alone file just listing users. The two methods cannot be mixed:
+# if you configure users here and at the same time you activate the external
+# ACL file, the server will refuse to start.
+#
+# The format of the external ACL user file is exactly the same as the
+# format that is used inside redis.conf to describe users.
+#
+# aclfile /etc/redis/users.acl
+
+# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility
+# layer on top of the new ACL system. The option effect will be just setting
+# the password for the default user. Clients will still authenticate using
+# AUTH <password> as usually, or more explicitly with AUTH default <password>
+# if they follow the new protocol: both will work.
+#
+# The requirepass is not compatable with aclfile option and the ACL LOAD
+# command, these will cause requirepass to be ignored.
+#
+# requirepass foobared
+
+# New users are initialized with restrictive permissions by default, via the
+# equivalent of this ACL rule 'off resetkeys -@all'. Starting with Redis 6.2, it
+# is possible to manage access to Pub/Sub channels with ACL rules as well. The
+# default Pub/Sub channels permission if new users is controlled by the
+# acl-pubsub-default configuration directive, which accepts one of these values:
+#
+# allchannels: grants access to all Pub/Sub channels
+# resetchannels: revokes access to all Pub/Sub channels
+#
+# To ensure backward compatibility while upgrading Redis 6.0, acl-pubsub-default
+# defaults to the 'allchannels' permission.
+#
+# Future compatibility note: it is very likely that in a future version of Redis
+# the directive's default of 'allchannels' will be changed to 'resetchannels' in
+# order to provide better out-of-the-box Pub/Sub security. Therefore, it is
+# recommended that you explicitly define Pub/Sub permissions for all users
+# rather then rely on implicit default values. Once you've set explicit
+# Pub/Sub for all existing users, you should uncomment the following line.
+#
+# acl-pubsub-default resetchannels
+
+# Command renaming (DEPRECATED).
+#
+# ------------------------------------------------------------------------
+# WARNING: avoid using this option if possible. Instead use ACLs to remove
+# commands from the default user, and put them only in some admin user you
+# create for administrative purposes.
+# ------------------------------------------------------------------------
+#
+# It is possible to change the name of dangerous commands in a shared
+# environment. For instance the CONFIG command may be renamed into something
+# hard to guess so that it will still be available for internal-use tools
+# but not available for general clients.
+#
+# Example:
+#
+# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
+#
+# It is also possible to completely kill a command by renaming it into
+# an empty string:
+#
+# rename-command CONFIG ""
+#
+# Please note that changing the name of commands that are logged into the
+# AOF file or transmitted to replicas may cause problems.
+
+################################### CLIENTS ####################################
+
+# Set the max number of connected clients at the same time. By default
+# this limit is set to 10000 clients, however if the Redis server is not
+# able to configure the process file limit to allow for the specified limit
+# the max number of allowed clients is set to the current file limit
+# minus 32 (as Redis reserves a few file descriptors for internal uses).
+#
+# Once the limit is reached Redis will close all the new connections sending
+# an error 'max number of clients reached'.
+#
+# IMPORTANT: When Redis Cluster is used, the max number of connections is also
+# shared with the cluster bus: every node in the cluster will use two
+# connections, one incoming and another outgoing. It is important to size the
+# limit accordingly in case of very large clusters.
+#
+# maxclients 10000
+
+############################## MEMORY MANAGEMENT ################################
+
+# Set a memory usage limit to the specified amount of bytes.
+# When the memory limit is reached Redis will try to remove keys
+# according to the eviction policy selected (see maxmemory-policy).
+#
+# If Redis can't remove keys according to the policy, or if the policy is
+# set to 'noeviction', Redis will start to reply with errors to commands
+# that would use more memory, like SET, LPUSH, and so on, and will continue
+# to reply to read-only commands like GET.
+#
+# This option is usually useful when using Redis as an LRU or LFU cache, or to
+# set a hard memory limit for an instance (using the 'noeviction' policy).
+#
+# WARNING: If you have replicas attached to an instance with maxmemory on,
+# the size of the output buffers needed to feed the replicas are subtracted
+# from the used memory count, so that network problems / resyncs will
+# not trigger a loop where keys are evicted, and in turn the output
+# buffer of replicas is full with DELs of keys evicted triggering the deletion
+# of more keys, and so forth until the database is completely emptied.
+#
+# In short... if you have replicas attached it is suggested that you set a lower
+# limit for maxmemory so that there is some free RAM on the system for replica
+# output buffers (but this is not needed if the policy is 'noeviction').
+#
+# maxmemory <bytes>
+
+# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
+# is reached. You can select one from the following behaviors:
+#
+# volatile-lru -> Evict using approximated LRU, only keys with an expire set.
+# allkeys-lru -> Evict any key using approximated LRU.
+# volatile-lfu -> Evict using approximated LFU, only keys with an expire set.
+# allkeys-lfu -> Evict any key using approximated LFU.
+# volatile-random -> Remove a random key having an expire set.
+# allkeys-random -> Remove a random key, any key.
+# volatile-ttl -> Remove the key with the nearest expire time (minor TTL)
+# noeviction -> Don't evict anything, just return an error on write operations.
+#
+# LRU means Least Recently Used
+# LFU means Least Frequently Used
+#
+# Both LRU, LFU and volatile-ttl are implemented using approximated
+# randomized algorithms.
+#
+# Note: with any of the above policies, when there are no suitable keys for
+# eviction, Redis will return an error on write operations that require
+# more memory. These are usually commands that create new keys, add data or
+# modify existing keys. A few examples are: SET, INCR, HSET, LPUSH, SUNIONSTORE,
+# SORT (due to the STORE argument), and EXEC (if the transaction includes any
+# command that requires memory).
+#
+# The default is:
+#
+# maxmemory-policy noeviction
+
+# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated
+# algorithms (in order to save memory), so you can tune it for speed or
+# accuracy. By default Redis will check five keys and pick the one that was
+# used least recently, you can change the sample size using the following
+# configuration directive.
+#
+# The default of 5 produces good enough results. 10 Approximates very closely
+# true LRU but costs more CPU. 3 is faster but not very accurate.
+#
+# maxmemory-samples 5
+
+# Eviction processing is designed to function well with the default setting.
+# If there is an unusually large amount of write traffic, this value may need to
+# be increased. Decreasing this value may reduce latency at the risk of
+# eviction processing effectiveness
+# 0 = minimum latency, 10 = default, 100 = process without regard to latency
+#
+# maxmemory-eviction-tenacity 10
+
+# Starting from Redis 5, by default a replica will ignore its maxmemory setting
+# (unless it is promoted to master after a failover or manually). It means
+# that the eviction of keys will be just handled by the master, sending the
+# DEL commands to the replica as keys evict in the master side.
+#
+# This behavior ensures that masters and replicas stay consistent, and is usually
+# what you want, however if your replica is writable, or you want the replica
+# to have a different memory setting, and you are sure all the writes performed
+# to the replica are idempotent, then you may change this default (but be sure
+# to understand what you are doing).
+#
+# Note that since the replica by default does not evict, it may end using more
+# memory than the one set via maxmemory (there are certain buffers that may
+# be larger on the replica, or data structures may sometimes take more memory
+# and so forth). So make sure you monitor your replicas and make sure they
+# have enough memory to never hit a real out-of-memory condition before the
+# master hits the configured maxmemory setting.
+#
+# replica-ignore-maxmemory yes
+
+# Redis reclaims expired keys in two ways: upon access when those keys are
+# found to be expired, and also in background, in what is called the
+# "active expire key". The key space is slowly and interactively scanned
+# looking for expired keys to reclaim, so that it is possible to free memory
+# of keys that are expired and will never be accessed again in a short time.
+#
+# The default effort of the expire cycle will try to avoid having more than
+# ten percent of expired keys still in memory, and will try to avoid consuming
+# more than 25% of total memory and to add latency to the system. However
+# it is possible to increase the expire "effort" that is normally set to
+# "1", to a greater value, up to the value "10". At its maximum value the
+# system will use more CPU, longer cycles (and technically may introduce
+# more latency), and will tolerate less already expired keys still present
+# in the system. It's a tradeoff between memory, CPU and latency.
+#
+# active-expire-effort 1
+
+############################# LAZY FREEING ####################################
+
+# Redis has two primitives to delete keys. One is called DEL and is a blocking
+# deletion of the object. It means that the server stops processing new commands
+# in order to reclaim all the memory associated with an object in a synchronous
+# way. If the key deleted is associated with a small object, the time needed
+# in order to execute the DEL command is very small and comparable to most other
+# O(1) or O(log_N) commands in Redis. However if the key is associated with an
+# aggregated value containing millions of elements, the server can block for
+# a long time (even seconds) in order to complete the operation.
+#
+# For the above reasons Redis also offers non blocking deletion primitives
+# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and
+# FLUSHDB commands, in order to reclaim memory in background. Those commands
+# are executed in constant time. Another thread will incrementally free the
+# object in the background as fast as possible.
+#
+# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled.
+# It's up to the design of the application to understand when it is a good
+# idea to use one or the other. However the Redis server sometimes has to
+# delete keys or flush the whole database as a side effect of other operations.
+# Specifically Redis deletes objects independently of a user call in the
+# following scenarios:
+#
+# 1) On eviction, because of the maxmemory and maxmemory policy configurations,
+# in order to make room for new data, without going over the specified
+# memory limit.
+# 2) Because of expire: when a key with an associated time to live (see the
+# EXPIRE command) must be deleted from memory.
+# 3) Because of a side effect of a command that stores data on a key that may
+# already exist. For example the RENAME command may delete the old key
+# content when it is replaced with another one. Similarly SUNIONSTORE
+# or SORT with STORE option may delete existing keys. The SET command
+# itself removes any old content of the specified key in order to replace
+# it with the specified string.
+# 4) During replication, when a replica performs a full resynchronization with
+# its master, the content of the whole database is removed in order to
+# load the RDB file just transferred.
+#
+# In all the above cases the default is to delete objects in a blocking way,
+# like if DEL was called. However you can configure each case specifically
+# in order to instead release memory in a non-blocking way like if UNLINK
+# was called, using the following configuration directives.
+
+lazyfree-lazy-eviction no
+lazyfree-lazy-expire no
+lazyfree-lazy-server-del no
+replica-lazy-flush no
+
+# It is also possible, for the case when to replace the user code DEL calls
+# with UNLINK calls is not easy, to modify the default behavior of the DEL
+# command to act exactly like UNLINK, using the following configuration
+# directive:
+
+lazyfree-lazy-user-del no
+
+# FLUSHDB, FLUSHALL, and SCRIPT FLUSH support both asynchronous and synchronous
+# deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the
+# commands. When neither flag is passed, this directive will be used to determine
+# if the data should be deleted asynchronously.
+
+lazyfree-lazy-user-flush no
+
+################################ THREADED I/O #################################
+
+# Redis is mostly single threaded, however there are certain threaded
+# operations such as UNLINK, slow I/O accesses and other things that are
+# performed on side threads.
+#
+# Now it is also possible to handle Redis clients socket reads and writes
+# in different I/O threads. Since especially writing is so slow, normally
+# Redis users use pipelining in order to speed up the Redis performances per
+# core, and spawn multiple instances in order to scale more. Using I/O
+# threads it is possible to easily speedup two times Redis without resorting
+# to pipelining nor sharding of the instance.
+#
+# By default threading is disabled, we suggest enabling it only in machines
+# that have at least 4 or more cores, leaving at least one spare core.
+# Using more than 8 threads is unlikely to help much. We also recommend using
+# threaded I/O only if you actually have performance problems, with Redis
+# instances being able to use a quite big percentage of CPU time, otherwise
+# there is no point in using this feature.
+#
+# So for instance if you have a four cores boxes, try to use 2 or 3 I/O
+# threads, if you have a 8 cores, try to use 6 threads. In order to
+# enable I/O threads use the following configuration directive:
+#
+# io-threads 4
+#
+# Setting io-threads to 1 will just use the main thread as usual.
+# When I/O threads are enabled, we only use threads for writes, that is
+# to thread the write(2) syscall and transfer the client buffers to the
+# socket. However it is also possible to enable threading of reads and
+# protocol parsing using the following configuration directive, by setting
+# it to yes:
+#
+# io-threads-do-reads no
+#
+# Usually threading reads doesn't help much.
+#
+# NOTE 1: This configuration directive cannot be changed at runtime via
+# CONFIG SET. Aso this feature currently does not work when SSL is
+# enabled.
+#
+# NOTE 2: If you want to test the Redis speedup using redis-benchmark, make
+# sure you also run the benchmark itself in threaded mode, using the
+# --threads option to match the number of Redis threads, otherwise you'll not
+# be able to notice the improvements.
+
+############################ KERNEL OOM CONTROL ##############################
+
+# On Linux, it is possible to hint the kernel OOM killer on what processes
+# should be killed first when out of memory.
+#
+# Enabling this feature makes Redis actively control the oom_score_adj value
+# for all its processes, depending on their role. The default scores will
+# attempt to have background child processes killed before all others, and
+# replicas killed before masters.
+#
+# Redis supports three options:
+#
+# no: Don't make changes to oom-score-adj (default).
+# yes: Alias to "relative" see below.
+# absolute: Values in oom-score-adj-values are written as is to the kernel.
+# relative: Values are used relative to the initial value of oom_score_adj when
+# the server starts and are then clamped to a range of -1000 to 1000.
+# Because typically the initial value is 0, they will often match the
+# absolute values.
+oom-score-adj no
+
+# When oom-score-adj is used, this directive controls the specific values used
+# for master, replica and background child processes. Values range -2000 to
+# 2000 (higher means more likely to be killed).
+#
+# Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities)
+# can freely increase their value, but not decrease it below its initial
+# settings. This means that setting oom-score-adj to "relative" and setting the
+# oom-score-adj-values to positive values will always succeed.
+oom-score-adj-values 0 200 800
+
+
+#################### KERNEL transparent hugepage CONTROL ######################
+
+# Usually the kernel Transparent Huge Pages control is set to "madvise" or
+# or "never" by default (/sys/kernel/mm/transparent_hugepage/enabled), in which
+# case this config has no effect. On systems in which it is set to "always",
+# redis will attempt to disable it specifically for the redis process in order
+# to avoid latency problems specifically with fork(2) and CoW.
+# If for some reason you prefer to keep it enabled, you can set this config to
+# "no" and the kernel global to "always".
+
+disable-thp yes
+
+############################## APPEND ONLY MODE ###############################
+
+# By default Redis asynchronously dumps the dataset on disk. This mode is
+# good enough in many applications, but an issue with the Redis process or
+# a power outage may result into a few minutes of writes lost (depending on
+# the configured save points).
+#
+# The Append Only File is an alternative persistence mode that provides
+# much better durability. For instance using the default data fsync policy
+# (see later in the config file) Redis can lose just one second of writes in a
+# dramatic event like a server power outage, or a single write if something
+# wrong with the Redis process itself happens, but the operating system is
+# still running correctly.
+#
+# AOF and RDB persistence can be enabled at the same time without problems.
+# If the AOF is enabled on startup Redis will load the AOF, that is the file
+# with the better durability guarantees.
+#
+# Please check https://redis.io/topics/persistence for more information.
+
+appendonly no
+
+# The name of the append only file (default: "appendonly.aof")
+
+appendfilename "appendonly.aof"
+
+# The fsync() call tells the Operating System to actually write data on disk
+# instead of waiting for more data in the output buffer. Some OS will really flush
+# data on disk, some other OS will just try to do it ASAP.
+#
+# Redis supports three different modes:
+#
+# no: don't fsync, just let the OS flush the data when it wants. Faster.
+# always: fsync after every write to the append only log. Slow, Safest.
+# everysec: fsync only one time every second. Compromise.
+#
+# The default is "everysec", as that's usually the right compromise between
+# speed and data safety. It's up to you to understand if you can relax this to
+# "no" that will let the operating system flush the output buffer when
+# it wants, for better performances (but if you can live with the idea of
+# some data loss consider the default persistence mode that's snapshotting),
+# or on the contrary, use "always" that's very slow but a bit safer than
+# everysec.
+#
+# More details please check the following article:
+# http://antirez.com/post/redis-persistence-demystified.html
+#
+# If unsure, use "everysec".
+
+# appendfsync always
+appendfsync everysec
+# appendfsync no
+
+# When the AOF fsync policy is set to always or everysec, and a background
+# saving process (a background save or AOF log background rewriting) is
+# performing a lot of I/O against the disk, in some Linux configurations
+# Redis may block too long on the fsync() call. Note that there is no fix for
+# this currently, as even performing fsync in a different thread will block
+# our synchronous write(2) call.
+#
+# In order to mitigate this problem it's possible to use the following option
+# that will prevent fsync() from being called in the main process while a
+# BGSAVE or BGREWRITEAOF is in progress.
+#
+# This means that while another child is saving, the durability of Redis is
+# the same as "appendfsync none". In practical terms, this means that it is
+# possible to lose up to 30 seconds of log in the worst scenario (with the
+# default Linux settings).
+#
+# If you have latency problems turn this to "yes". Otherwise leave it as
+# "no" that is the safest pick from the point of view of durability.
+
+no-appendfsync-on-rewrite no
+
+# Automatic rewrite of the append only file.
+# Redis is able to automatically rewrite the log file implicitly calling
+# BGREWRITEAOF when the AOF log size grows by the specified percentage.
+#
+# This is how it works: Redis remembers the size of the AOF file after the
+# latest rewrite (if no rewrite has happened since the restart, the size of
+# the AOF at startup is used).
+#
+# This base size is compared to the current size. If the current size is
+# bigger than the specified percentage, the rewrite is triggered. Also
+# you need to specify a minimal size for the AOF file to be rewritten, this
+# is useful to avoid rewriting the AOF file even if the percentage increase
+# is reached but it is still pretty small.
+#
+# Specify a percentage of zero in order to disable the automatic AOF
+# rewrite feature.
+
+auto-aof-rewrite-percentage 100
+auto-aof-rewrite-min-size 64mb
+
+# An AOF file may be found to be truncated at the end during the Redis
+# startup process, when the AOF data gets loaded back into memory.
+# This may happen when the system where Redis is running
+# crashes, especially when an ext4 filesystem is mounted without the
+# data=ordered option (however this can't happen when Redis itself
+# crashes or aborts but the operating system still works correctly).
+#
+# Redis can either exit with an error when this happens, or load as much
+# data as possible (the default now) and start if the AOF file is found
+# to be truncated at the end. The following option controls this behavior.
+#
+# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
+# the Redis server starts emitting a log to inform the user of the event.
+# Otherwise if the option is set to no, the server aborts with an error
+# and refuses to start. When the option is set to no, the user requires
+# to fix the AOF file using the "redis-check-aof" utility before to restart
+# the server.
+#
+# Note that if the AOF file will be found to be corrupted in the middle
+# the server will still exit with an error. This option only applies when
+# Redis will try to read more data from the AOF file but not enough bytes
+# will be found.
+aof-load-truncated yes
+
+# When rewriting the AOF file, Redis is able to use an RDB preamble in the
+# AOF file for faster rewrites and recoveries. When this option is turned
+# on the rewritten AOF file is composed of two different stanzas:
+#
+# [RDB file][AOF tail]
+#
+# When loading, Redis recognizes that the AOF file starts with the "REDIS"
+# string and loads the prefixed RDB file, then continues loading the AOF
+# tail.
+aof-use-rdb-preamble yes
+
+################################ LUA SCRIPTING ###############################
+
+# Max execution time of a Lua script in milliseconds.
+#
+# If the maximum execution time is reached Redis will log that a script is
+# still in execution after the maximum allowed time and will start to
+# reply to queries with an error.
+#
+# When a long running script exceeds the maximum execution time only the
+# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
+# used to stop a script that did not yet call any write commands. The second
+# is the only way to shut down the server in the case a write command was
+# already issued by the script but the user doesn't want to wait for the natural
+# termination of the script.
+#
+# Set it to 0 or a negative value for unlimited execution without warnings.
+lua-time-limit 5000
+
+################################ REDIS CLUSTER ###############################
+
+# Normal Redis instances can't be part of a Redis Cluster; only nodes that are
+# started as cluster nodes can. In order to start a Redis instance as a
+# cluster node enable the cluster support uncommenting the following:
+#
+# cluster-enabled yes
+
+# Every cluster node has a cluster configuration file. This file is not
+# intended to be edited by hand. It is created and updated by Redis nodes.
+# Every Redis Cluster node requires a different cluster configuration file.
+# Make sure that instances running in the same system do not have
+# overlapping cluster configuration file names.
+#
+# cluster-config-file nodes-6379.conf
+
+# Cluster node timeout is the amount of milliseconds a node must be unreachable
+# for it to be considered in failure state.
+# Most other internal time limits are a multiple of the node timeout.
+#
+# cluster-node-timeout 15000
+
+# A replica of a failing master will avoid to start a failover if its data
+# looks too old.
+#
+# There is no simple way for a replica to actually have an exact measure of
+# its "data age", so the following two checks are performed:
+#
+# 1) If there are multiple replicas able to failover, they exchange messages
+# in order to try to give an advantage to the replica with the best
+# replication offset (more data from the master processed).
+# Replicas will try to get their rank by offset, and apply to the start
+# of the failover a delay proportional to their rank.
+#
+# 2) Every single replica computes the time of the last interaction with
+# its master. This can be the last ping or command received (if the master
+# is still in the "connected" state), or the time that elapsed since the
+# disconnection with the master (if the replication link is currently down).
+# If the last interaction is too old, the replica will not try to failover
+# at all.
+#
+# The point "2" can be tuned by user. Specifically a replica will not perform
+# the failover if, since the last interaction with the master, the time
+# elapsed is greater than:
+#
+# (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period
+#
+# So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor
+# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the
+# replica will not try to failover if it was not able to talk with the master
+# for longer than 310 seconds.
+#
+# A large cluster-replica-validity-factor may allow replicas with too old data to failover
+# a master, while a too small value may prevent the cluster from being able to
+# elect a replica at all.
+#
+# For maximum availability, it is possible to set the cluster-replica-validity-factor
+# to a value of 0, which means, that replicas will always try to failover the
+# master regardless of the last time they interacted with the master.
+# (However they'll always try to apply a delay proportional to their
+# offset rank).
+#
+# Zero is the only value able to guarantee that when all the partitions heal
+# the cluster will always be able to continue.
+#
+# cluster-replica-validity-factor 10
+
+# Cluster replicas are able to migrate to orphaned masters, that are masters
+# that are left without working replicas. This improves the cluster ability
+# to resist to failures as otherwise an orphaned master can't be failed over
+# in case of failure if it has no working replicas.
+#
+# Replicas migrate to orphaned masters only if there are still at least a
+# given number of other working replicas for their old master. This number
+# is the "migration barrier". A migration barrier of 1 means that a replica
+# will migrate only if there is at least 1 other working replica for its master
+# and so forth. It usually reflects the number of replicas you want for every
+# master in your cluster.
+#
+# Default is 1 (replicas migrate only if their masters remain with at least
+# one replica). To disable migration just set it to a very large value or
+# set cluster-allow-replica-migration to 'no'.
+# A value of 0 can be set but is useful only for debugging and dangerous
+# in production.
+#
+# cluster-migration-barrier 1
+
+# Turning off this option allows to use less automatic cluster configuration.
+# It both disables migration to orphaned masters and migration from masters
+# that became empty.
+#
+# Default is 'yes' (allow automatic migrations).
+#
+# cluster-allow-replica-migration yes
+
+# By default Redis Cluster nodes stop accepting queries if they detect there
+# is at least a hash slot uncovered (no available node is serving it).
+# This way if the cluster is partially down (for example a range of hash slots
+# are no longer covered) all the cluster becomes, eventually, unavailable.
+# It automatically returns available as soon as all the slots are covered again.
+#
+# However sometimes you want the subset of the cluster which is working,
+# to continue to accept queries for the part of the key space that is still
+# covered. In order to do so, just set the cluster-require-full-coverage
+# option to no.
+#
+# cluster-require-full-coverage yes
+
+# This option, when set to yes, prevents replicas from trying to failover its
+# master during master failures. However the replica can still perform a
+# manual failover, if forced to do so.
+#
+# This is useful in different scenarios, especially in the case of multiple
+# data center operations, where we want one side to never be promoted if not
+# in the case of a total DC failure.
+#
+# cluster-replica-no-failover no
+
+# This option, when set to yes, allows nodes to serve read traffic while the
+# the cluster is in a down state, as long as it believes it owns the slots.
+#
+# This is useful for two cases. The first case is for when an application
+# doesn't require consistency of data during node failures or network partitions.
+# One example of this is a cache, where as long as the node has the data it
+# should be able to serve it.
+#
+# The second use case is for configurations that don't meet the recommended
+# three shards but want to enable cluster mode and scale later. A
+# master outage in a 1 or 2 shard configuration causes a read/write outage to the
+# entire cluster without this option set, with it set there is only a write outage.
+# Without a quorum of masters, slot ownership will not change automatically.
+#
+# cluster-allow-reads-when-down no
+
+# In order to setup your cluster make sure to read the documentation
+# available at https://redis.io web site.
+
+########################## CLUSTER DOCKER/NAT support ########################
+
+# In certain deployments, Redis Cluster nodes address discovery fails, because
+# addresses are NAT-ted or because ports are forwarded (the typical case is
+# Docker and other containers).
+#
+# In order to make Redis Cluster working in such environments, a static
+# configuration where each node knows its public address is needed. The
+# following four options are used for this scope, and are:
+#
+# * cluster-announce-ip
+# * cluster-announce-port
+# * cluster-announce-tls-port
+# * cluster-announce-bus-port
+#
+# Each instructs the node about its address, client ports (for connections
+# without and with TLS) and cluster message bus port. The information is then
+# published in the header of the bus packets so that other nodes will be able to
+# correctly map the address of the node publishing the information.
+#
+# If cluster-tls is set to yes and cluster-announce-tls-port is omitted or set
+# to zero, then cluster-announce-port refers to the TLS port. Note also that
+# cluster-announce-tls-port has no effect if cluster-tls is set to no.
+#
+# If the above options are not used, the normal Redis Cluster auto-detection
+# will be used instead.
+#
+# Note that when remapped, the bus port may not be at the fixed offset of
+# clients port + 10000, so you can specify any port and bus-port depending
+# on how they get remapped. If the bus-port is not set, a fixed offset of
+# 10000 will be used as usual.
+#
+# Example:
+#
+# cluster-announce-ip 10.1.1.5
+# cluster-announce-tls-port 6379
+# cluster-announce-port 0
+# cluster-announce-bus-port 6380
+
+################################## SLOW LOG ###################################
+
+# The Redis Slow Log is a system to log queries that exceeded a specified
+# execution time. The execution time does not include the I/O operations
+# like talking with the client, sending the reply and so forth,
+# but just the time needed to actually execute the command (this is the only
+# stage of command execution where the thread is blocked and can not serve
+# other requests in the meantime).
+#
+# You can configure the slow log with two parameters: one tells Redis
+# what is the execution time, in microseconds, to exceed in order for the
+# command to get logged, and the other parameter is the length of the
+# slow log. When a new command is logged the oldest one is removed from the
+# queue of logged commands.
+
+# The following time is expressed in microseconds, so 1000000 is equivalent
+# to one second. Note that a negative number disables the slow log, while
+# a value of zero forces the logging of every command.
+slowlog-log-slower-than 10000
+
+# There is no limit to this length. Just be aware that it will consume memory.
+# You can reclaim memory used by the slow log with SLOWLOG RESET.
+slowlog-max-len 128
+
+################################ LATENCY MONITOR ##############################
+
+# The Redis latency monitoring subsystem samples different operations
+# at runtime in order to collect data related to possible sources of
+# latency of a Redis instance.
+#
+# Via the LATENCY command this information is available to the user that can
+# print graphs and obtain reports.
+#
+# The system only logs operations that were performed in a time equal or
+# greater than the amount of milliseconds specified via the
+# latency-monitor-threshold configuration directive. When its value is set
+# to zero, the latency monitor is turned off.
+#
+# By default latency monitoring is disabled since it is mostly not needed
+# if you don't have latency issues, and collecting data has a performance
+# impact, that while very small, can be measured under big load. Latency
+# monitoring can easily be enabled at runtime using the command
+# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
+latency-monitor-threshold 0
+
+############################# EVENT NOTIFICATION ##############################
+
+# Redis can notify Pub/Sub clients about events happening in the key space.
+# This feature is documented at https://redis.io/topics/notifications
+#
+# For instance if keyspace events notification is enabled, and a client
+# performs a DEL operation on key "foo" stored in the Database 0, two
+# messages will be published via Pub/Sub:
+#
+# PUBLISH __keyspace@0__:foo del
+# PUBLISH __keyevent@0__:del foo
+#
+# It is possible to select the events that Redis will notify among a set
+# of classes. Every class is identified by a single character:
+#
+# K Keyspace events, published with __keyspace@<db>__ prefix.
+# E Keyevent events, published with __keyevent@<db>__ prefix.
+# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
+# $ String commands
+# l List commands
+# s Set commands
+# h Hash commands
+# z Sorted set commands
+# x Expired events (events generated every time a key expires)
+# e Evicted events (events generated when a key is evicted for maxmemory)
+# t Stream commands
+# d Module key type events
+# m Key-miss events (Note: It is not included in the 'A' class)
+# A Alias for g$lshzxetd, so that the "AKE" string means all the events
+# (Except key-miss events which are excluded from 'A' due to their
+# unique nature).
+#
+# The "notify-keyspace-events" takes as argument a string that is composed
+# of zero or multiple characters. The empty string means that notifications
+# are disabled.
+#
+# Example: to enable list and generic events, from the point of view of the
+# event name, use:
+#
+# notify-keyspace-events Elg
+#
+# Example 2: to get the stream of the expired keys subscribing to channel
+# name __keyevent@0__:expired use:
+#
+# notify-keyspace-events Ex
+#
+# By default all notifications are disabled because most users don't need
+# this feature and the feature has some overhead. Note that if you don't
+# specify at least one of K or E, no events will be delivered.
+notify-keyspace-events ""
+
+############################### GOPHER SERVER #################################
+
+# Redis contains an implementation of the Gopher protocol, as specified in
+# the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt).
+#
+# The Gopher protocol was very popular in the late '90s. It is an alternative
+# to the web, and the implementation both server and client side is so simple
+# that the Redis server has just 100 lines of code in order to implement this
+# support.
+#
+# What do you do with Gopher nowadays? Well Gopher never *really* died, and
+# lately there is a movement in order for the Gopher more hierarchical content
+# composed of just plain text documents to be resurrected. Some want a simpler
+# internet, others believe that the mainstream internet became too much
+# controlled, and it's cool to create an alternative space for people that
+# want a bit of fresh air.
+#
+# Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol
+# as a gift.
+#
+# --- HOW IT WORKS? ---
+#
+# The Redis Gopher support uses the inline protocol of Redis, and specifically
+# two kind of inline requests that were anyway illegal: an empty request
+# or any request that starts with "/" (there are no Redis commands starting
+# with such a slash). Normal RESP2/RESP3 requests are completely out of the
+# path of the Gopher protocol implementation and are served as usual as well.
+#
+# If you open a connection to Redis when Gopher is enabled and send it
+# a string like "/foo", if there is a key named "/foo" it is served via the
+# Gopher protocol.
+#
+# In order to create a real Gopher "hole" (the name of a Gopher site in Gopher
+# talking), you likely need a script like the following:
+#
+# https://github.com/antirez/gopher2redis
+#
+# --- SECURITY WARNING ---
+#
+# If you plan to put Redis on the internet in a publicly accessible address
+# to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance.
+# Once a password is set:
+#
+# 1. The Gopher server (when enabled, not by default) will still serve
+# content via Gopher.
+# 2. However other commands cannot be called before the client will
+# authenticate.
+#
+# So use the 'requirepass' option to protect your instance.
+#
+# Note that Gopher is not currently supported when 'io-threads-do-reads'
+# is enabled.
+#
+# To enable Gopher support, uncomment the following line and set the option
+# from no (the default) to yes.
+#
+# gopher-enabled no
+
+############################### ADVANCED CONFIG ###############################
+
+# Hashes are encoded using a memory efficient data structure when they have a
+# small number of entries, and the biggest entry does not exceed a given
+# threshold. These thresholds can be configured using the following directives.
+hash-max-ziplist-entries 512
+hash-max-ziplist-value 64
+
+# Lists are also encoded in a special way to save a lot of space.
+# The number of entries allowed per internal list node can be specified
+# as a fixed maximum size or a maximum number of elements.
+# For a fixed maximum size, use -5 through -1, meaning:
+# -5: max size: 64 Kb <-- not recommended for normal workloads
+# -4: max size: 32 Kb <-- not recommended
+# -3: max size: 16 Kb <-- probably not recommended
+# -2: max size: 8 Kb <-- good
+# -1: max size: 4 Kb <-- good
+# Positive numbers mean store up to _exactly_ that number of elements
+# per list node.
+# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),
+# but if your use case is unique, adjust the settings as necessary.
+list-max-ziplist-size -2
+
+# Lists may also be compressed.
+# Compress depth is the number of quicklist ziplist nodes from *each* side of
+# the list to *exclude* from compression. The head and tail of the list
+# are always uncompressed for fast push/pop operations. Settings are:
+# 0: disable all list compression
+# 1: depth 1 means "don't start compressing until after 1 node into the list,
+# going from either the head or tail"
+# So: [head]->node->node->...->node->[tail]
+# [head], [tail] will always be uncompressed; inner nodes will compress.
+# 2: [head]->[next]->node->node->...->node->[prev]->[tail]
+# 2 here means: don't compress head or head->next or tail->prev or tail,
+# but compress all nodes between them.
+# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail]
+# etc.
+list-compress-depth 0
+
+# Sets have a special encoding in just one case: when a set is composed
+# of just strings that happen to be integers in radix 10 in the range
+# of 64 bit signed integers.
+# The following configuration setting sets the limit in the size of the
+# set in order to use this special memory saving encoding.
+set-max-intset-entries 512
+
+# Similarly to hashes and lists, sorted sets are also specially encoded in
+# order to save a lot of space. This encoding is only used when the length and
+# elements of a sorted set are below the following limits:
+zset-max-ziplist-entries 128
+zset-max-ziplist-value 64
+
+# HyperLogLog sparse representation bytes limit. The limit includes the
+# 16 bytes header. When an HyperLogLog using the sparse representation crosses
+# this limit, it is converted into the dense representation.
+#
+# A value greater than 16000 is totally useless, since at that point the
+# dense representation is more memory efficient.
+#
+# The suggested value is ~ 3000 in order to have the benefits of
+# the space efficient encoding without slowing down too much PFADD,
+# which is O(N) with the sparse encoding. The value can be raised to
+# ~ 10000 when CPU is not a concern, but space is, and the data set is
+# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
+hll-sparse-max-bytes 3000
+
+# Streams macro node max size / items. The stream data structure is a radix
+# tree of big nodes that encode multiple items inside. Using this configuration
+# it is possible to configure how big a single node can be in bytes, and the
+# maximum number of items it may contain before switching to a new node when
+# appending new stream entries. If any of the following settings are set to
+# zero, the limit is ignored, so for instance it is possible to set just a
+# max entries limit by setting max-bytes to 0 and max-entries to the desired
+# value.
+stream-node-max-bytes 4096
+stream-node-max-entries 100
+
+# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
+# order to help rehashing the main Redis hash table (the one mapping top-level
+# keys to values). The hash table implementation Redis uses (see dict.c)
+# performs a lazy rehashing: the more operation you run into a hash table
+# that is rehashing, the more rehashing "steps" are performed, so if the
+# server is idle the rehashing is never complete and some more memory is used
+# by the hash table.
+#
+# The default is to use this millisecond 10 times every second in order to
+# actively rehash the main dictionaries, freeing memory when possible.
+#
+# If unsure:
+# use "activerehashing no" if you have hard latency requirements and it is
+# not a good thing in your environment that Redis can reply from time to time
+# to queries with 2 milliseconds delay.
+#
+# use "activerehashing yes" if you don't have such hard requirements but
+# want to free memory asap when possible.
+activerehashing yes
+
+# The client output buffer limits can be used to force disconnection of clients
+# that are not reading data from the server fast enough for some reason (a
+# common reason is that a Pub/Sub client can't consume messages as fast as the
+# publisher can produce them).
+#
+# The limit can be set differently for the three different classes of clients:
+#
+# normal -> normal clients including MONITOR clients
+# replica -> replica clients
+# pubsub -> clients subscribed to at least one pubsub channel or pattern
+#
+# The syntax of every client-output-buffer-limit directive is the following:
+#
+# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
+#
+# A client is immediately disconnected once the hard limit is reached, or if
+# the soft limit is reached and remains reached for the specified number of
+# seconds (continuously).
+# So for instance if the hard limit is 32 megabytes and the soft limit is
+# 16 megabytes / 10 seconds, the client will get disconnected immediately
+# if the size of the output buffers reach 32 megabytes, but will also get
+# disconnected if the client reaches 16 megabytes and continuously overcomes
+# the limit for 10 seconds.
+#
+# By default normal clients are not limited because they don't receive data
+# without asking (in a push way), but just after a request, so only
+# asynchronous clients may create a scenario where data is requested faster
+# than it can read.
+#
+# Instead there is a default limit for pubsub and replica clients, since
+# subscribers and replicas receive data in a push fashion.
+#
+# Both the hard or the soft limit can be disabled by setting them to zero.
+client-output-buffer-limit normal 0 0 0
+client-output-buffer-limit replica 256mb 64mb 60
+client-output-buffer-limit pubsub 32mb 8mb 60
+
+# Client query buffers accumulate new commands. They are limited to a fixed
+# amount by default in order to avoid that a protocol desynchronization (for
+# instance due to a bug in the client) will lead to unbound memory usage in
+# the query buffer. However you can configure it here if you have very special
+# needs, such us huge multi/exec requests or alike.
+#
+# client-query-buffer-limit 1gb
+
+# In the Redis protocol, bulk requests, that are, elements representing single
+# strings, are normally limited to 512 mb. However you can change this limit
+# here, but must be 1mb or greater
+#
+# proto-max-bulk-len 512mb
+
+# Redis calls an internal function to perform many background tasks, like
+# closing connections of clients in timeout, purging expired keys that are
+# never requested, and so forth.
+#
+# Not all tasks are performed with the same frequency, but Redis checks for
+# tasks to perform according to the specified "hz" value.
+#
+# By default "hz" is set to 10. Raising the value will use more CPU when
+# Redis is idle, but at the same time will make Redis more responsive when
+# there are many keys expiring at the same time, and timeouts may be
+# handled with more precision.
+#
+# The range is between 1 and 500, however a value over 100 is usually not
+# a good idea. Most users should use the default of 10 and raise this up to
+# 100 only in environments where very low latency is required.
+hz 10
+
+# Normally it is useful to have an HZ value which is proportional to the
+# number of clients connected. This is useful in order, for instance, to
+# avoid too many clients are processed for each background task invocation
+# in order to avoid latency spikes.
+#
+# Since the default HZ value by default is conservatively set to 10, Redis
+# offers, and enables by default, the ability to use an adaptive HZ value
+# which will temporarily raise when there are many connected clients.
+#
+# When dynamic HZ is enabled, the actual configured HZ will be used
+# as a baseline, but multiples of the configured HZ value will be actually
+# used as needed once more clients are connected. In this way an idle
+# instance will use very little CPU time while a busy instance will be
+# more responsive.
+dynamic-hz yes
+
+# When a child rewrites the AOF file, if the following option is enabled
+# the file will be fsync-ed every 32 MB of data generated. This is useful
+# in order to commit the file to the disk more incrementally and avoid
+# big latency spikes.
+aof-rewrite-incremental-fsync yes
+
+# When redis saves RDB file, if the following option is enabled
+# the file will be fsync-ed every 32 MB of data generated. This is useful
+# in order to commit the file to the disk more incrementally and avoid
+# big latency spikes.
+rdb-save-incremental-fsync yes
+
+# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good
+# idea to start with the default settings and only change them after investigating
+# how to improve the performances and how the keys LFU change over time, which
+# is possible to inspect via the OBJECT FREQ command.
+#
+# There are two tunable parameters in the Redis LFU implementation: the
+# counter logarithm factor and the counter decay time. It is important to
+# understand what the two parameters mean before changing them.
+#
+# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis
+# uses a probabilistic increment with logarithmic behavior. Given the value
+# of the old counter, when a key is accessed, the counter is incremented in
+# this way:
+#
+# 1. A random number R between 0 and 1 is extracted.
+# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1).
+# 3. The counter is incremented only if R < P.
+#
+# The default lfu-log-factor is 10. This is a table of how the frequency
+# counter changes with a different number of accesses with different
+# logarithmic factors:
+#
+# +--------+------------+------------+------------+------------+------------+
+# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits |
+# +--------+------------+------------+------------+------------+------------+
+# | 0 | 104 | 255 | 255 | 255 | 255 |
+# +--------+------------+------------+------------+------------+------------+
+# | 1 | 18 | 49 | 255 | 255 | 255 |
+# +--------+------------+------------+------------+------------+------------+
+# | 10 | 10 | 18 | 142 | 255 | 255 |
+# +--------+------------+------------+------------+------------+------------+
+# | 100 | 8 | 11 | 49 | 143 | 255 |
+# +--------+------------+------------+------------+------------+------------+
+#
+# NOTE: The above table was obtained by running the following commands:
+#
+# redis-benchmark -n 1000000 incr foo
+# redis-cli object freq foo
+#
+# NOTE 2: The counter initial value is 5 in order to give new objects a chance
+# to accumulate hits.
+#
+# The counter decay time is the time, in minutes, that must elapse in order
+# for the key counter to be divided by two (or decremented if it has a value
+# less <= 10).
+#
+# The default value for the lfu-decay-time is 1. A special value of 0 means to
+# decay the counter every time it happens to be scanned.
+#
+# lfu-log-factor 10
+# lfu-decay-time 1
+
+########################### ACTIVE DEFRAGMENTATION #######################
+#
+# What is active defragmentation?
+# -------------------------------
+#
+# Active (online) defragmentation allows a Redis server to compact the
+# spaces left between small allocations and deallocations of data in memory,
+# thus allowing to reclaim back memory.
+#
+# Fragmentation is a natural process that happens with every allocator (but
+# less so with Jemalloc, fortunately) and certain workloads. Normally a server
+# restart is needed in order to lower the fragmentation, or at least to flush
+# away all the data and create it again. However thanks to this feature
+# implemented by Oran Agra for Redis 4.0 this process can happen at runtime
+# in a "hot" way, while the server is running.
+#
+# Basically when the fragmentation is over a certain level (see the
+# configuration options below) Redis will start to create new copies of the
+# values in contiguous memory regions by exploiting certain specific Jemalloc
+# features (in order to understand if an allocation is causing fragmentation
+# and to allocate it in a better place), and at the same time, will release the
+# old copies of the data. This process, repeated incrementally for all the keys
+# will cause the fragmentation to drop back to normal values.
+#
+# Important things to understand:
+#
+# 1. This feature is disabled by default, and only works if you compiled Redis
+# to use the copy of Jemalloc we ship with the source code of Redis.
+# This is the default with Linux builds.
+#
+# 2. You never need to enable this feature if you don't have fragmentation
+# issues.
+#
+# 3. Once you experience fragmentation, you can enable this feature when
+# needed with the command "CONFIG SET activedefrag yes".
+#
+# The configuration parameters are able to fine tune the behavior of the
+# defragmentation process. If you are not sure about what they mean it is
+# a good idea to leave the defaults untouched.
+
+# Enabled active defragmentation
+# activedefrag no
+
+# Minimum amount of fragmentation waste to start active defrag
+# active-defrag-ignore-bytes 100mb
+
+# Minimum percentage of fragmentation to start active defrag
+# active-defrag-threshold-lower 10
+
+# Maximum percentage of fragmentation at which we use maximum effort
+# active-defrag-threshold-upper 100
+
+# Minimal effort for defrag in CPU percentage, to be used when the lower
+# threshold is reached
+# active-defrag-cycle-min 1
+
+# Maximal effort for defrag in CPU percentage, to be used when the upper
+# threshold is reached
+# active-defrag-cycle-max 25
+
+# Maximum number of set/hash/zset/list fields that will be processed from
+# the main dictionary scan
+# active-defrag-max-scan-fields 1000
+
+# Jemalloc background thread for purging will be enabled by default
+jemalloc-bg-thread yes
+
+# It is possible to pin different threads and processes of Redis to specific
+# CPUs in your system, in order to maximize the performances of the server.
+# This is useful both in order to pin different Redis threads in different
+# CPUs, but also in order to make sure that multiple Redis instances running
+# in the same host will be pinned to different CPUs.
+#
+# Normally you can do this using the "taskset" command, however it is also
+# possible to this via Redis configuration directly, both in Linux and FreeBSD.
+#
+# You can pin the server/IO threads, bio threads, aof rewrite child process, and
+# the bgsave child process. The syntax to specify the cpu list is the same as
+# the taskset command:
+#
+# Set redis server/io threads to cpu affinity 0,2,4,6:
+# server_cpulist 0-7:2
+#
+# Set bio threads to cpu affinity 1,3:
+# bio_cpulist 1,3
+#
+# Set aof rewrite child process to cpu affinity 8,9,10,11:
+# aof_rewrite_cpulist 8-11
+#
+# Set bgsave child process to cpu affinity 1,10,11
+# bgsave_cpulist 1,10-11
+
+# In some cases redis will emit warnings and even refuse to start if it detects
+# that the system is in bad state, it is possible to suppress these warnings
+# by setting the following config which takes a space delimited list of warnings
+# to suppress
+#
+# ignore-warnings ARM64-COW-BUG
+slaveof {{ groups.redis[0] }} 6379
diff --git a/Redis/6.2.5/redis/role/vars/main.yml b/Redis/6.2.5/redis/role/vars/main.yml
new file mode 100644
index 0000000..90253e2
--- /dev/null
+++ b/Redis/6.2.5/redis/role/vars/main.yml
@@ -0,0 +1,19 @@
+#镜像名称
+image_name: redis
+
+#镜像版本号
+image_tag: 6.2.5
+
+#容器名称
+container_name: redis
+
+#组件版本
+component_version: redis-6.2.5
+
+#备份目录
+backup_path: "{{ deploy_dir }}/backup/platform/{{ old_version }}/{{ container_name }}"
+
+#待备份的文件
+backup_items:
+ - "{{ deploy_dir }}/{{ container_name }}/redis.conf"
+ - "{{ deploy_dir }}/{{ container_name }}/docker-compose.yml"
diff --git a/full_config.yml b/full_config.yml
new file mode 100644
index 0000000..f712811
--- /dev/null
+++ b/full_config.yml
@@ -0,0 +1,143 @@
+zookeeper:
+ #Running memory of the Zookeeper.
+ java_opts: -Xmx1024m -Xms1024m
+
+mariadb:
+ #Used to cache data and index data from tables in the InnoDB storage engine.
+ innodb_buffer_pool_size: 2048
+
+nacos:
+ #Running memory of the Nacos.
+ java_opt: '-Xmx1024m -Xms1024m -Xmn256m'
+
+druid:
+ broker:
+ #Running memory of the Druid-Broker.
+ java_opts: -Xmx1024m -Xms1024m
+ #Worker tasks also use off-heap ("direct") memory. Set the amount of direct memory available (-XX:MaxDirectMemorySize) to at least (druid.processing.numThreads + 1) * druid.processing.buffer.sizeBytes
+ MaxDirectMemorySize: 512m
+ #This specifies a buffer size (less than 2GiB), for the storage of intermediate results
+ druid.processing.buffer.sizeBytes: 50000000
+ #The number of direct memory buffers available for merging query results.
+ druid.processing.numMergeBuffers: 4
+ #The number of processing threads to have available for parallel processing of segments.
+ druid.processing.numThreads: 5
+ coordinator:
+ #Running memory of the Druid-Coordinator.
+ java_opts: -Xmx1024m -Xms1024m
+ historical:
+ #Running memory of the Druid-Historical.
+ java_opts: -Xmx1024m -Xms1024m
+ #The size of the process's temporary cache data on disk
+ druid.segmentCache.locations: 300000000000
+ #Worker tasks also use off-heap ("direct") memory. Set the amount of direct memory available (-XX:MaxDirectMemorySize) to at least (druid.processing.numThreads + 1) * druid.processing.buffer.sizeBytes
+ MaxDirectMemorySize: 512m
+ #This specifies a buffer size (less than 2GiB), for the storage of intermediate results
+ druid.processing.buffer.sizeBytes: 50000000
+ #The number of direct memory buffers available for merging query results.
+ druid.processing.numMergeBuffers: 4
+ #The number of processing threads to have available for parallel processing of segments.
+ druid.processing.numThreads: 5
+ middlemanager:
+ #Running memory of the Druid-Middlemanager.
+ java_opts: -Xmx1024m -Xms1024m
+ druid.indexer.fork.property.druid.processing.numMergeBuffers: 2
+ druid.indexer.fork.property.druid.processing.buffer.sizeBytes: 20000000
+ druid.indexer.fork.property.druid.processing.numThreads: 1
+
+hadoop:
+ namenode:
+ #Running memory of the Hadoop Namenode.
+ java_opt: '-Xmx1024m -Xms1024m'
+ #The number of Namenode RPC server threads that listen to requests from clients.
+ dfs.namenode.handler.count: 30
+ datanode:
+ #Running memory of the Hadoop Datanode.
+ java_opt: '-Xmx1024m -Xms1024m'
+ #The number of server threads for the datanode.
+ dfs.datanode.handler.count: 40
+ journalnode:
+ #Running memory of the Hadoop JournalNode.
+ java_opt: '-Xmx1024m -Xms1024m'
+ zkfc:
+ #Running memory of the Hadoop DFSZKFailoverController.
+ java_opt: '-Xmx1024m -Xms1024m'
+ yarn:
+ resourcemanager:
+ #Running memory of the Hadoop ResourceManager.
+ java_opt: '-Xmx1024m -Xms1024m'
+ nodemanager:
+ #Running memory of the Hadoop NodeManager.
+ java_opt: '-Xmx1024m -Xms1024m'
+ #Amount of physical memory, in MB, that can be allocated for containers.
+ yarn.nodemanager.resource.memory-mb: 16384
+ #The maximum allocation for every container request at the RM in MBs.
+ yarn.scheduler.maximum-allocation-mb: 16384
+ #Number of vcores that can be allocated for containers. This is used by the RM scheduler when allocating resources for containers.
+ yarn.nodemanager.resource.cpu-vcores: 48
+ #The maximum allocation for every container request at the RM in terms of virtual CPU cores.
+ yarn.scheduler.maximum-allocation-vcores: 48
+
+flink:
+ #Total Process Memory size for the JobManager.
+ jobmanager.memory.process.size: 1024M
+ #Total Process Memory size for the TaskExecutors.
+ taskmanager.memory.process.size: 10240M
+ #This is the size of off-heap memory managed for sorting, hash tables, caching of intermediate results and state backend.
+ taskmanager.memory.managed.size: 512M
+ #Framework Off-Heap Memory size for TaskExecutors. This is the size of off-heap memory reserved for TaskExecutor framework
+ taskmanager.memory.framework.off-heap.size: 128M
+ #JVM Metaspace Size for the TaskExecutors.
+ taskmanager.memory.jvm-metaspace.size: 1024M
+ #Max Network Memory size for TaskExecutors. Network Memory is off-heap memory reserved for ShuffleEnvironment.
+ taskmanager.memory.network.max: 256M
+ #The number of parallel operator or user function instances that a single TaskManager can run.
+ #This value is typically proportional to the number of physical CPU cores that the TaskManager's machine has (e.g., equal to the number of cores, or half the number of cores).
+ taskmanager.numberOfTaskSlots: 1
+
+hbase:
+ common:
+ #The HBase resource isolation function is used to group tables for storage.
+ enable_rsgroup: true
+ hmaster:
+ #Running memory of the HBase HMaster.
+ java_opt: '-Xmx1024m -Xms1024m'
+ regionserver:
+ #Running memory of the HBase HRegionserver.
+ java_opt: '-Xmx1024m -Xms1024m -Xmn128m'
+ #This defines the number of threads the region server keeps open to serve requests to tables,It should generally be set to (number of cores - 1)
+ hbase.regionserver.handler.count: 40
+ #If any one of a column families' HStoreFiles has grown to exceed this value, the hosting HRegion is split in two.
+ hbase.hregion.max.filesize: 10737418240
+ #Indicates the memory used by all read caches. The value can be the actual memory value, expressed in MB
+ hbase.bucketcache.size: 100
+
+kafka:
+ #Running memory of the Kafka.
+ java_opt: '-Xmx1024m -Xms1024m'
+ #The minimum age of a log file to be eligible for deletion due to age
+ log.retention.hours: 168
+ #A size-based retention policy for logs,unit byte
+ log.retention.bytes: 10737418240
+
+clickhouse:
+ #Limit on total memory usage. Zero means Unlimited.
+ max_server_memory_usage: 30000000000
+ #Sets the number of threads performing background merges and mutations for tables with MergeTree engines.
+ background_pool_size: 16
+
+hos:
+ #Running memory of the Kafka.
+ java_opt: '-Xmx1024m -Xms1024m -Xmn512m'
+ #Download files quickly,Used for HBase with a memory larger than 20GB.open: 1 , close: 0
+ isQuickDownloadFile: 0
+ #Whether to enable SSL.open: 1 , close: 0
+ enable_ssl: 0
+ #nacos contains the name of the namespace where the configuration is stored
+ nacos.config.namespace: prod
+
+ignite:
+ #Running memory of the Nacos.
+ java_opt: '-Xmx1024m -Xms1024m'
+ #Setting region max size equal to physical RAM size(5 GB).
+ maxSize: '#{5L * 1024 * 1024 * 1024}'
diff --git a/full_hosts b/full_hosts
new file mode 100644
index 0000000..8393e20
--- /dev/null
+++ b/full_hosts
@@ -0,0 +1,118 @@
+#==============================================================================
+# Basic Components
+#
+# Orchestration & Coordinator & Configuration & Cold Storage
+#==============================================================================
+
+#Configure Mariadb deployed nodes,Use master-master replication mode,maximum 2 servers.
+[mariadb]
+
+
+#Apache Zookeeper is a centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services.
+#At least 3 servers,And it is strongly recommended that you have an odd number of servers.
+[zookeeper]
+
+
+#Alibaba Nacos an easy-to-use dynamic service discovery, configuration and service management platform
+#At least 3 servers,Multi-node HA mode.
+[nacos]
+
+
+#Apache Hadoop HDFS(Hadoop Distributed File System)
+#At least 3 servers,An HDFS cluster consists of two Namenodes and a certain number of Datanodes node.
+[hdfs]
+
+
+#==============================================================================
+# BigData Processing Components
+#
+# Big data is a term that refers to the massive volume, variety, and velocity of data that is generated from various sources and needs to be stored, processed, and analyzed efficiently.
+# The Big Data processing component is used to provide a platform for fast and efficient processing
+#==============================================================================
+
+#Apache Kafka is a distributed event streaming platform,used for high-performance data pipelines, streaming analytics.
+#At least 3 servers,By default install CMAK(Management tool) on the first server.
+[kafka]
+
+
+#Apache Hadoop Yarn,Includes the Flink runtime environment.
+#Yarn is the resource management and job scheduling technology in the open source Hadoop distributed processing framework.
+#At least 3 servers,A Yarn cluster consists of two ResourceManager (RM) and a certain number of NodeManager(NM) node.
+[yarn]
+
+
+#Groot Stream is a real-time data stream processing platform.
+[grootstream]
+
+#==============================================================================
+# BigData Processing Components
+#
+# Big data is a term that refers to the massive volume, variety, and velocity of data that is generated from various sources and needs to be stored, processed, and analyzed efficiently.
+# The Big Data processing component is used to provide a platform for fast and efficient processing
+#==============================================================================
+
+#Apache HBase is the hosting of very large tables -- billions of rows X millions of columns -- atop clusters of commodity hardware
+#At least 3 servers,A HBase cluster consists of three HMaster and a certain number of HRegionServer node.
+[hbase]
+
+
+#Apache Druid is a high performance, real-time analytics database that delivers sub-second queries on streaming and batch data at scale and under load.
+#At least 3 servers,A Druid cluster consists of two master/query and a certain number of worker node.
+[druid]
+
+
+#Yandex ClickHouse is the fastest and most resource efficient open-source database for real-time apps and analytics.
+#At least 3 servers,A Clickhouse cluster consists of two query and a certain number of data node.
+[clickhouse]
+
+
+#ArangoDB is a scalable graph database system to drive value from connected data, faster.
+#Only support single server deployment
+[arangodb]
+
+#Redis is an open source (BSD licensed), in-memory data structure store, used as a database, cache, and message broker.
+#Supports single-node and master-slave modes
+[redis]
+
+#Apache Ignite is a leading distributed database management system for high-performance computing with in-memory speed
+#Supports single-node and cluster modes
+[ignite]
+
+#==============================================================================
+# OLAP Self-research service
+#
+#==============================================================================
+#Default load balancer include keepalived/Galaxy-gateway-nginx
+[loadbalancer]
+
+
+#Chproxy is an HTTP proxy and load balancer for ClickHouse
+[chproxy]
+
+
+#Galaxy-hos-service is a distributed object storage service.
+#Include components:Keepalived/Nginx/galaxy-hos-service
+#At least 2 servers,keepalived and nginx services are deployed on the first two nodes by default.
+[galaxy_hos_service]
+
+
+#The query gateway,Provides a unified query entry
+[galaxy_qgw_service]
+
+
+#A lightweight distributed task scheduling framework.
+#Include components: Galaxy-job-admin/Galaxy-job-executor
+[galaxy_job_service]
+
+#
+[saved_query_scheduler]
+
+
+#==============================================================================
+# Monitoring module
+#
+#==============================================================================
+
+#Receive metrics pushed by the program. Pushgateway then exposed these metrics to Prometheus.
+[pushgateway]
+