summaryrefslogtreecommitdiff
path: root/MSH-PIC
diff options
context:
space:
mode:
Diffstat (limited to 'MSH-PIC')
-rw-r--r--MSH-PIC/clickhouse/clickhouse-server/clickhouse-server355
-rw-r--r--MSH-PIC/clickhouse/clickhouse-server/clickhouse-server.pid1
-rw-r--r--MSH-PIC/clickhouse/clickhouse-server/config.xml403
-rw-r--r--MSH-PIC/clickhouse/clickhouse-server/metrika.xml55
-rw-r--r--MSH-PIC/clickhouse/clickhouse-server/users.xml214
-rw-r--r--MSH-PIC/cmak/docker-compose.yml20
-rw-r--r--MSH-PIC/cmak/kafka_client_jaas.conf5
-rw-r--r--MSH-PIC/deployment configuration/tsg/MSH_config.zipbin0 -> 5271 bytes
-rw-r--r--MSH-PIC/deployment configuration/tsg/components.yml217
-rw-r--r--MSH-PIC/deployment configuration/tsg/config.yml98
-rw-r--r--MSH-PIC/deployment configuration/tsg/hosts101
-rw-r--r--MSH-PIC/deployment configuration/tsg/services.yml39
-rw-r--r--MSH-PIC/flink/bin/bash-java-utils.jarbin0 -> 2010313 bytes
-rw-r--r--MSH-PIC/flink/bin/config.sh560
-rw-r--r--MSH-PIC/flink/bin/find-flink-home.sh28
-rw-r--r--MSH-PIC/flink/bin/flink55
-rw-r--r--MSH-PIC/flink/bin/flink-console.sh111
-rw-r--r--MSH-PIC/flink/bin/flink-daemon.sh194
-rw-r--r--MSH-PIC/flink/bin/historyserver.sh39
-rw-r--r--MSH-PIC/flink/bin/jobmanager.sh64
-rw-r--r--MSH-PIC/flink/bin/kubernetes-jobmanager.sh42
-rw-r--r--MSH-PIC/flink/bin/kubernetes-session.sh39
-rw-r--r--MSH-PIC/flink/bin/kubernetes-taskmanager.sh45
-rw-r--r--MSH-PIC/flink/bin/mesos-appmaster-job.sh23
-rw-r--r--MSH-PIC/flink/bin/mesos-appmaster.sh23
-rw-r--r--MSH-PIC/flink/bin/mesos-jobmanager.sh52
-rw-r--r--MSH-PIC/flink/bin/mesos-taskmanager.sh43
-rw-r--r--MSH-PIC/flink/bin/pyflink-shell.sh84
-rw-r--r--MSH-PIC/flink/bin/set_flink_yarn_env.sh7
-rw-r--r--MSH-PIC/flink/bin/sql-client.sh88
-rw-r--r--MSH-PIC/flink/bin/standalone-job.sh55
-rw-r--r--MSH-PIC/flink/bin/start-cluster.sh53
-rw-r--r--MSH-PIC/flink/bin/start-zookeeper-quorum.sh46
-rw-r--r--MSH-PIC/flink/bin/stop-cluster.sh47
-rw-r--r--MSH-PIC/flink/bin/stop-zookeeper-quorum.sh46
-rw-r--r--MSH-PIC/flink/bin/taskmanager.sh80
-rw-r--r--MSH-PIC/flink/bin/yarn-session.sh38
-rw-r--r--MSH-PIC/flink/bin/zookeeper.sh68
-rw-r--r--MSH-PIC/flink/conf/core-site.xml58
-rw-r--r--MSH-PIC/flink/conf/flink-conf.yaml207
-rw-r--r--MSH-PIC/flink/conf/hdfs-site.xml142
-rw-r--r--MSH-PIC/flink/conf/log4j-cli.properties67
-rw-r--r--MSH-PIC/flink/conf/log4j-console.properties66
-rw-r--r--MSH-PIC/flink/conf/log4j-session.properties40
-rw-r--r--MSH-PIC/flink/conf/log4j.properties59
-rw-r--r--MSH-PIC/flink/conf/log4j2.component.properties2
-rw-r--r--MSH-PIC/flink/conf/logback-console.xml64
-rw-r--r--MSH-PIC/flink/conf/logback-session.xml39
-rw-r--r--MSH-PIC/flink/conf/logback.xml58
-rw-r--r--MSH-PIC/flink/conf/masters2
-rw-r--r--MSH-PIC/flink/conf/workers1
-rw-r--r--MSH-PIC/flink/conf/yarn-site.xml224
-rw-r--r--MSH-PIC/flink/conf/zoo.cfg36
-rw-r--r--MSH-PIC/flink/topology/completion/config/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED78
-rw-r--r--MSH-PIC/flink/topology/completion/config/ETL-BGP-RECORD-COMPLETED78
-rw-r--r--MSH-PIC/flink/topology/completion/config/ETL-GTPC-RECORD-COMPLETED78
-rw-r--r--MSH-PIC/flink/topology/completion/config/ETL-INTERIM-SESSION-RECORD-COMPLETED78
-rw-r--r--MSH-PIC/flink/topology/completion/config/ETL-PROXY-EVENT-COMPLETED78
-rw-r--r--MSH-PIC/flink/topology/completion/config/ETL-RADIUS-RECORD-COMPLETED78
-rw-r--r--MSH-PIC/flink/topology/completion/config/ETL-SECURITY-EVENT-COMPLETED78
-rw-r--r--MSH-PIC/flink/topology/completion/config/ETL-SESSION-RECORD-COMPLETED78
-rw-r--r--MSH-PIC/flink/topology/completion/config/ETL-TRANSACTION-RECORD-COMPLETED78
-rw-r--r--MSH-PIC/flink/topology/completion/config/MIRROR-DOS-SKETCH-RECORD78
-rw-r--r--MSH-PIC/flink/topology/completion/config/MIRROR-GTPC-RECORD-COMPLETED78
-rw-r--r--MSH-PIC/flink/topology/completion/config/MIRROR-INTERNAL-PACKET-CAPTURE-EVENT78
-rw-r--r--MSH-PIC/flink/topology/completion/config/MIRROR-NETWORK-TRAFFIC-METRICS78
-rw-r--r--MSH-PIC/flink/topology/completion/config/MIRROR-POLICY-RULE-METRICS78
-rw-r--r--MSH-PIC/flink/topology/completion/config/MIRROR-PXY-EXCH-INTERMEDIA-CERT78
-rw-r--r--MSH-PIC/flink/topology/completion/config/MIRROR-SYS-PACKET-CAPTURE-EVENT78
-rw-r--r--MSH-PIC/flink/topology/completion/config/MIRROR-TRAFFIC-TOP-METRICS78
-rw-r--r--MSH-PIC/flink/topology/completion/config/MIRROR-VOIP-RECORD78
-rw-r--r--MSH-PIC/flink/topology/completion/service_flow_config.properties78
-rw-r--r--MSH-PIC/flink/topology/completion/start.sh67
-rw-r--r--MSH-PIC/flink/topology/completion/stop.sh34
-rw-r--r--MSH-PIC/flink/topology/data/asn_v4.mmdbbin0 -> 5873392 bytes
-rw-r--r--MSH-PIC/flink/topology/data/asn_v6.mmdbbin0 -> 3011336 bytes
-rw-r--r--MSH-PIC/flink/topology/data/ip_v4_built_in.mmdbbin0 -> 28626992 bytes
-rw-r--r--MSH-PIC/flink/topology/data/ip_v4_user_defined.mmdbbin0 -> 621 bytes
-rw-r--r--MSH-PIC/flink/topology/data/ip_v6_built_in.mmdbbin0 -> 7560407 bytes
-rw-r--r--MSH-PIC/flink/topology/data/ip_v6_user_defined.mmdbbin0 -> 1197 bytes
-rw-r--r--MSH-PIC/flink/topology/data/keystore.jksbin0 -> 787 bytes
-rw-r--r--MSH-PIC/flink/topology/data/truststore.jksbin0 -> 583 bytes
-rw-r--r--MSH-PIC/flink/topology/relationship-gtpc-user/config/RELATIONSHIP-GTPC-USER33
-rw-r--r--MSH-PIC/flink/topology/relationship-gtpc-user/service_flow_config.properties33
-rw-r--r--MSH-PIC/flink/topology/relationship-gtpc-user/start.sh67
-rw-r--r--MSH-PIC/flink/topology/relationship-gtpc-user/stop.sh34
-rw-r--r--MSH-PIC/flink/topology/relationship-radius-account/config/RELATIONSHIP-RADIUS-ACCOUNT28
-rw-r--r--MSH-PIC/flink/topology/relationship-radius-account/service_flow_config.properties28
-rw-r--r--MSH-PIC/flink/topology/relationship-radius-account/start.sh67
-rw-r--r--MSH-PIC/flink/topology/relationship-radius-account/stop.sh34
-rw-r--r--MSH-PIC/galaxy-hos-nginx/conf/nginx.conf85
-rw-r--r--MSH-PIC/galaxy-hos-nginx/conf/self-sign.crt13
-rw-r--r--MSH-PIC/galaxy-hos-nginx/conf/self-sign.key8
-rw-r--r--MSH-PIC/galaxy-hos-nginx/docker-compose.yml16
-rw-r--r--MSH-PIC/galaxy-hos-service/config/application.yml23
-rw-r--r--MSH-PIC/galaxy-hos-service/config/log4j2-dev.xml56
-rw-r--r--MSH-PIC/galaxy-hos-service/docker-compose.yml15
-rw-r--r--MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_active_defence_event_tsgv3.sh51
-rw-r--r--MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_dos_event_tsgv3.sh51
-rw-r--r--MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_gtpc_record_tsgv3.sh51
-rw-r--r--MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_interim_session_record_tsgv3.sh51
-rw-r--r--MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_proxy_event_tsgv3.sh51
-rw-r--r--MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_radius_onff_tsgv3.sh52
-rw-r--r--MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_radius_record_tsgv3.sh51
-rw-r--r--MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_security_event_tsgv3.sh51
-rw-r--r--MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_session_record_tsgv3.sh51
-rw-r--r--MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_sys_packet_capture_event_tsgv3.sh51
-rw-r--r--MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_transaction_record_tsgv3.sh51
-rw-r--r--MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_voip_record_tsgv3.sh51
-rw-r--r--MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_xxxxxx_tsgv3.sh51
-rw-r--r--MSH-PIC/gohangout/check_status.sh11
-rw-r--r--MSH-PIC/gohangout/conf/k2ck_active_defence_event_tsgv3.yml30
-rw-r--r--MSH-PIC/gohangout/conf/k2ck_dos_event_tsgv3.yml30
-rw-r--r--MSH-PIC/gohangout/conf/k2ck_gtpc_record_tsgv3.yml30
-rw-r--r--MSH-PIC/gohangout/conf/k2ck_interim_session_record_tsgv3.yml30
-rw-r--r--MSH-PIC/gohangout/conf/k2ck_proxy_event_tsgv3.yml30
-rw-r--r--MSH-PIC/gohangout/conf/k2ck_radius_onff_tsgv3.yml30
-rw-r--r--MSH-PIC/gohangout/conf/k2ck_radius_record_tsgv3.yml31
-rw-r--r--MSH-PIC/gohangout/conf/k2ck_security_event_tsgv3.yml30
-rw-r--r--MSH-PIC/gohangout/conf/k2ck_session_record_tsgv3.yml30
-rw-r--r--MSH-PIC/gohangout/conf/k2ck_sys_packet_capture_event_tsgv3.yml30
-rw-r--r--MSH-PIC/gohangout/conf/k2ck_transaction_record_tsgv3.yml30
-rw-r--r--MSH-PIC/gohangout/conf/k2ck_voip_record_tsgv3.yml30
-rw-r--r--MSH-PIC/gohangout/docker-compose.yml15
-rw-r--r--MSH-PIC/gohangout/start_all.sh17
-rw-r--r--MSH-PIC/hadoop/bin/container-executorbin0 -> 160127 bytes
-rw-r--r--MSH-PIC/hadoop/bin/hadoop169
-rw-r--r--MSH-PIC/hadoop/bin/hadoop.cmd272
-rw-r--r--MSH-PIC/hadoop/bin/hdfs308
-rw-r--r--MSH-PIC/hadoop/bin/hdfs.cmd234
-rw-r--r--MSH-PIC/hadoop/bin/ini_hdfs.sh46
-rw-r--r--MSH-PIC/hadoop/bin/mapred172
-rw-r--r--MSH-PIC/hadoop/bin/mapred.cmd216
-rw-r--r--MSH-PIC/hadoop/bin/rcc61
-rw-r--r--MSH-PIC/hadoop/bin/set_hdfs_env.sh71
-rw-r--r--MSH-PIC/hadoop/bin/set_yarn_env.sh58
-rw-r--r--MSH-PIC/hadoop/bin/test-container-executorbin0 -> 204075 bytes
-rw-r--r--MSH-PIC/hadoop/bin/yarn330
-rw-r--r--MSH-PIC/hadoop/bin/yarn.cmd332
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/capacity-scheduler.xml134
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/configuration.xsl40
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/container-executor.cfg4
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/core-site.xml58
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/hadoop-env.cmd81
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/hadoop-env.sh105
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/hadoop-metrics.properties75
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/hadoop-metrics2.properties68
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/hadoop-policy.xml226
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/hdfs-site.xml142
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/httpfs-env.sh53
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/httpfs-log4j.properties35
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/httpfs-signature.secret1
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/httpfs-site.xml17
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/kms-acls.xml135
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/kms-env.sh55
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/kms-log4j.properties38
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/kms-site.xml173
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/log4j.properties268
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/log4j.properties_bak268
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/mapred-env.cmd20
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/mapred-env.sh27
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/mapred-queues.xml.template92
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/mapred-site.xml33
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/slaves6
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/ssl-client.xml.example80
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/ssl-server.xml.example78
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/yarn-env.cmd60
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/yarn-env.sh127
-rw-r--r--MSH-PIC/hadoop/etc/hadoop/yarn-site.xml224
-rw-r--r--MSH-PIC/hadoop/sbin/dae-hdfsjournal.sh42
-rw-r--r--MSH-PIC/hadoop/sbin/dae-hdfsmaster.sh53
-rw-r--r--MSH-PIC/hadoop/sbin/dae-hdfsworker.sh47
-rw-r--r--MSH-PIC/hadoop/sbin/dae-yarnhistory.sh41
-rw-r--r--MSH-PIC/hadoop/sbin/dae-yarnmaster.sh41
-rw-r--r--MSH-PIC/hadoop/sbin/dae-yarnworker.sh41
-rw-r--r--MSH-PIC/hadoop/sbin/distribute-exclude.sh81
-rw-r--r--MSH-PIC/hadoop/sbin/hadoop-daemon.sh214
-rw-r--r--MSH-PIC/hadoop/sbin/hadoop-daemons.sh36
-rw-r--r--MSH-PIC/hadoop/sbin/hdfs-config.cmd43
-rw-r--r--MSH-PIC/hadoop/sbin/hdfs-config.sh36
-rw-r--r--MSH-PIC/hadoop/sbin/httpfs.sh65
-rw-r--r--MSH-PIC/hadoop/sbin/kms.sh83
-rw-r--r--MSH-PIC/hadoop/sbin/mr-jobhistory-daemon.sh147
-rw-r--r--MSH-PIC/hadoop/sbin/refresh-namenodes.sh48
-rw-r--r--MSH-PIC/hadoop/sbin/slaves.sh67
-rw-r--r--MSH-PIC/hadoop/sbin/start-all.cmd52
-rw-r--r--MSH-PIC/hadoop/sbin/start-all.sh38
-rw-r--r--MSH-PIC/hadoop/sbin/start-balancer.sh27
-rw-r--r--MSH-PIC/hadoop/sbin/start-dfs.cmd41
-rw-r--r--MSH-PIC/hadoop/sbin/start-dfs.sh118
-rw-r--r--MSH-PIC/hadoop/sbin/start-secure-dns.sh33
-rw-r--r--MSH-PIC/hadoop/sbin/start-yarn.cmd47
-rw-r--r--MSH-PIC/hadoop/sbin/start-yarn.sh35
-rw-r--r--MSH-PIC/hadoop/sbin/stop-all.cmd52
-rw-r--r--MSH-PIC/hadoop/sbin/stop-all.sh38
-rw-r--r--MSH-PIC/hadoop/sbin/stop-balancer.sh28
-rw-r--r--MSH-PIC/hadoop/sbin/stop-dfs.cmd41
-rw-r--r--MSH-PIC/hadoop/sbin/stop-dfs.sh89
-rw-r--r--MSH-PIC/hadoop/sbin/stop-secure-dns.sh33
-rw-r--r--MSH-PIC/hadoop/sbin/stop-yarn.cmd47
-rw-r--r--MSH-PIC/hadoop/sbin/stop-yarn.sh35
-rw-r--r--MSH-PIC/hadoop/sbin/yarn-daemon.sh161
-rw-r--r--MSH-PIC/hadoop/sbin/yarn-daemons.sh38
-rw-r--r--MSH-PIC/hbase/bin/alter-hbase-table.sh19
-rw-r--r--MSH-PIC/hbase/bin/considerAsDead.sh61
-rw-r--r--MSH-PIC/hbase/bin/create-hbase-table.sh23
-rw-r--r--MSH-PIC/hbase/bin/create-phoenix-table.sh394
-rw-r--r--MSH-PIC/hbase/bin/dae-hmaster.sh41
-rw-r--r--MSH-PIC/hbase/bin/dae-hregion.sh40
-rw-r--r--MSH-PIC/hbase/bin/draining_servers.rb156
-rw-r--r--MSH-PIC/hbase/bin/get-active-master.rb38
-rw-r--r--MSH-PIC/hbase/bin/graceful_stop.sh186
-rw-r--r--MSH-PIC/hbase/bin/hbase687
-rw-r--r--MSH-PIC/hbase/bin/hbase-cleanup.sh147
-rw-r--r--MSH-PIC/hbase/bin/hbase-common.sh41
-rw-r--r--MSH-PIC/hbase/bin/hbase-config.cmd78
-rw-r--r--MSH-PIC/hbase/bin/hbase-config.sh170
-rw-r--r--MSH-PIC/hbase/bin/hbase-daemon.sh371
-rw-r--r--MSH-PIC/hbase/bin/hbase-daemons.sh62
-rw-r--r--MSH-PIC/hbase/bin/hbase-jruby22
-rw-r--r--MSH-PIC/hbase/bin/hbase.cmd469
-rw-r--r--MSH-PIC/hbase/bin/hirb.rb264
-rw-r--r--MSH-PIC/hbase/bin/local-master-backup.sh65
-rw-r--r--MSH-PIC/hbase/bin/local-regionservers.sh74
-rw-r--r--MSH-PIC/hbase/bin/master-backup.sh74
-rw-r--r--MSH-PIC/hbase/bin/region_mover.rb24
-rw-r--r--MSH-PIC/hbase/bin/region_status.rb150
-rw-r--r--MSH-PIC/hbase/bin/regionservers.sh83
-rw-r--r--MSH-PIC/hbase/bin/replication/copy_tables_desc.rb104
-rw-r--r--MSH-PIC/hbase/bin/rolling-restart.sh227
-rw-r--r--MSH-PIC/hbase/bin/rsgroup.sh23
-rw-r--r--MSH-PIC/hbase/bin/set_hbase_env.sh29
-rw-r--r--MSH-PIC/hbase/bin/shutdown_regionserver.rb56
-rw-r--r--MSH-PIC/hbase/bin/start-hbase.cmd61
-rw-r--r--MSH-PIC/hbase/bin/start-hbase.sh65
-rw-r--r--MSH-PIC/hbase/bin/stop-hbase.cmd54
-rw-r--r--MSH-PIC/hbase/bin/stop-hbase.sh68
-rw-r--r--MSH-PIC/hbase/bin/test/process_based_cluster.sh110
-rw-r--r--MSH-PIC/hbase/bin/zookeepers.sh59
-rw-r--r--MSH-PIC/hbase/conf/backup-masters2
-rw-r--r--MSH-PIC/hbase/conf/core-site.xml58
-rw-r--r--MSH-PIC/hbase/conf/hadoop-metrics2-hbase.properties44
-rw-r--r--MSH-PIC/hbase/conf/hbase-env.cmd83
-rw-r--r--MSH-PIC/hbase/conf/hbase-env.sh143
-rw-r--r--MSH-PIC/hbase/conf/hbase-policy.xml53
-rw-r--r--MSH-PIC/hbase/conf/hbase-site.xml205
-rw-r--r--MSH-PIC/hbase/conf/hdfs-site.xml142
-rw-r--r--MSH-PIC/hbase/conf/log4j-hbtop.properties27
-rw-r--r--MSH-PIC/hbase/conf/log4j.properties124
-rw-r--r--MSH-PIC/hbase/conf/regionservers3
-rw-r--r--MSH-PIC/hbase/conf/yarn-site.xml224
-rw-r--r--MSH-PIC/kafka/bin/connect-distributed.sh41
-rw-r--r--MSH-PIC/kafka/bin/connect-standalone.sh41
-rw-r--r--MSH-PIC/kafka/bin/create_topic.sh73
-rw-r--r--MSH-PIC/kafka/bin/dae-kafka.sh49
-rw-r--r--MSH-PIC/kafka/bin/kafka-acls.sh17
-rw-r--r--MSH-PIC/kafka/bin/kafka-broker-api-versions.sh17
-rw-r--r--MSH-PIC/kafka/bin/kafka-configs.sh17
-rw-r--r--MSH-PIC/kafka/bin/kafka-console-consumer.sh22
-rw-r--r--MSH-PIC/kafka/bin/kafka-console-producer.sh21
-rw-r--r--MSH-PIC/kafka/bin/kafka-consumer-groups.sh17
-rw-r--r--MSH-PIC/kafka/bin/kafka-consumer-perf-test.sh20
-rw-r--r--MSH-PIC/kafka/bin/kafka-delete-records.sh17
-rw-r--r--MSH-PIC/kafka/bin/kafka-log-dirs.sh17
-rw-r--r--MSH-PIC/kafka/bin/kafka-mirror-maker.sh17
-rw-r--r--MSH-PIC/kafka/bin/kafka-operation.sh40
-rw-r--r--MSH-PIC/kafka/bin/kafka-preferred-replica-election.sh17
-rw-r--r--MSH-PIC/kafka/bin/kafka-producer-perf-test.sh20
-rw-r--r--MSH-PIC/kafka/bin/kafka-reassign-partitions.sh17
-rw-r--r--MSH-PIC/kafka/bin/kafka-replay-log-producer.sh17
-rw-r--r--MSH-PIC/kafka/bin/kafka-replica-verification.sh17
-rw-r--r--MSH-PIC/kafka/bin/kafka-run-class.sh272
-rw-r--r--MSH-PIC/kafka/bin/kafka-server-start.sh46
-rw-r--r--MSH-PIC/kafka/bin/kafka-server-start.sh.bak44
-rw-r--r--MSH-PIC/kafka/bin/kafka-server-stop.sh24
-rw-r--r--MSH-PIC/kafka/bin/kafka-simple-consumer-shell.sh17
-rw-r--r--MSH-PIC/kafka/bin/kafka-streams-application-reset.sh21
-rw-r--r--MSH-PIC/kafka/bin/kafka-topics.sh17
-rw-r--r--MSH-PIC/kafka/bin/kafka-verifiable-consumer.sh20
-rw-r--r--MSH-PIC/kafka/bin/kafka-verifiable-producer.sh20
-rw-r--r--MSH-PIC/kafka/bin/kflogdelete.sh11
-rw-r--r--MSH-PIC/kafka/bin/set_kafka_env.sh17
-rw-r--r--MSH-PIC/kafka/bin/trogdor.sh50
-rw-r--r--MSH-PIC/kafka/bin/windows/connect-distributed.bat34
-rw-r--r--MSH-PIC/kafka/bin/windows/connect-standalone.bat34
-rw-r--r--MSH-PIC/kafka/bin/windows/kafka-acls.bat17
-rw-r--r--MSH-PIC/kafka/bin/windows/kafka-broker-api-versions.bat17
-rw-r--r--MSH-PIC/kafka/bin/windows/kafka-configs.bat17
-rw-r--r--MSH-PIC/kafka/bin/windows/kafka-console-consumer.bat20
-rw-r--r--MSH-PIC/kafka/bin/windows/kafka-console-producer.bat20
-rw-r--r--MSH-PIC/kafka/bin/windows/kafka-consumer-groups.bat17
-rw-r--r--MSH-PIC/kafka/bin/windows/kafka-consumer-offset-checker.bat17
-rw-r--r--MSH-PIC/kafka/bin/windows/kafka-consumer-perf-test.bat20
-rw-r--r--MSH-PIC/kafka/bin/windows/kafka-mirror-maker.bat17
-rw-r--r--MSH-PIC/kafka/bin/windows/kafka-preferred-replica-election.bat17
-rw-r--r--MSH-PIC/kafka/bin/windows/kafka-producer-perf-test.bat20
-rw-r--r--MSH-PIC/kafka/bin/windows/kafka-reassign-partitions.bat17
-rw-r--r--MSH-PIC/kafka/bin/windows/kafka-replay-log-producer.bat17
-rw-r--r--MSH-PIC/kafka/bin/windows/kafka-replica-verification.bat17
-rw-r--r--MSH-PIC/kafka/bin/windows/kafka-run-class.bat191
-rw-r--r--MSH-PIC/kafka/bin/windows/kafka-server-start.bat38
-rw-r--r--MSH-PIC/kafka/bin/windows/kafka-server-stop.bat18
-rw-r--r--MSH-PIC/kafka/bin/windows/kafka-simple-consumer-shell.bat17
-rw-r--r--MSH-PIC/kafka/bin/windows/kafka-topics.bat17
-rw-r--r--MSH-PIC/kafka/bin/windows/zookeeper-server-start.bat30
-rw-r--r--MSH-PIC/kafka/bin/windows/zookeeper-server-stop.bat17
-rw-r--r--MSH-PIC/kafka/bin/windows/zookeeper-shell.bat22
-rw-r--r--MSH-PIC/kafka/bin/zookeeper-security-migration.sh17
-rw-r--r--MSH-PIC/kafka/bin/zookeeper-server-start.sh44
-rw-r--r--MSH-PIC/kafka/bin/zookeeper-server-stop.sh24
-rw-r--r--MSH-PIC/kafka/bin/zookeeper-shell.sh23
-rw-r--r--MSH-PIC/kafka/config/client-ssl.properties6
-rw-r--r--MSH-PIC/kafka/config/connect-console-sink.properties19
-rw-r--r--MSH-PIC/kafka/config/connect-console-source.properties19
-rw-r--r--MSH-PIC/kafka/config/connect-distributed.properties93
-rw-r--r--MSH-PIC/kafka/config/connect-file-sink.properties20
-rw-r--r--MSH-PIC/kafka/config/connect-file-source.properties20
-rw-r--r--MSH-PIC/kafka/config/connect-log4j.properties25
-rw-r--r--MSH-PIC/kafka/config/connect-standalone.properties48
-rw-r--r--MSH-PIC/kafka/config/consumer.properties26
-rw-r--r--MSH-PIC/kafka/config/kafka_client_jaas.conf5
-rw-r--r--MSH-PIC/kafka/config/kafka_server_jaas.conf7
-rw-r--r--MSH-PIC/kafka/config/keystore.jksbin0 -> 787 bytes
-rw-r--r--MSH-PIC/kafka/config/log4j.properties92
-rw-r--r--MSH-PIC/kafka/config/log4j.properties_bak92
-rw-r--r--MSH-PIC/kafka/config/producer.properties45
-rw-r--r--MSH-PIC/kafka/config/sasl-config.properties7
-rw-r--r--MSH-PIC/kafka/config/server.properties171
-rw-r--r--MSH-PIC/kafka/config/server.properties.bak136
-rw-r--r--MSH-PIC/kafka/config/tools-log4j.properties21
-rw-r--r--MSH-PIC/kafka/config/truststore.jksbin0 -> 583 bytes
-rw-r--r--MSH-PIC/kafka/config/zookeeper.properties20
-rw-r--r--MSH-PIC/packet_dump/docker-compose.yml54
-rw-r--r--MSH-PIC/packet_dump/firewall/config/packet_dump.yml34
-rw-r--r--MSH-PIC/packet_dump/firewall/logs/log8
-rw-r--r--MSH-PIC/packet_dump/firewall/logs/log.20230707000063
-rw-r--r--MSH-PIC/packet_dump/firewall/logs/log.2023070800008
-rw-r--r--MSH-PIC/packet_dump/firewall/status0
-rw-r--r--MSH-PIC/packet_dump/rtp/config/packet_dump.yml37
-rw-r--r--MSH-PIC/packet_dump/rtp/logs/log8
-rw-r--r--MSH-PIC/packet_dump/rtp/logs/log.20230707000063
-rw-r--r--MSH-PIC/packet_dump/rtp/logs/log.2023070800008
-rw-r--r--MSH-PIC/packet_dump/rtp/status0
-rw-r--r--MSH-PIC/packet_dump/troubleshooting/config/packet_dump.yml35
-rw-r--r--MSH-PIC/packet_dump/troubleshooting/status0
-rw-r--r--MSH-PIC/phoenix-hbase/bin/argparse-1.4.0/argparse.py2392
-rw-r--r--MSH-PIC/phoenix-hbase/bin/core-site.xml58
-rw-r--r--MSH-PIC/phoenix-hbase/bin/daemon.py999
-rw-r--r--MSH-PIC/phoenix-hbase/bin/end2endTest.py47
-rw-r--r--MSH-PIC/phoenix-hbase/bin/hadoop-metrics2-hbase.properties36
-rw-r--r--MSH-PIC/phoenix-hbase/bin/hadoop-metrics2-phoenix.properties70
-rw-r--r--MSH-PIC/phoenix-hbase/bin/hbase-omid-client-config.yml33
-rw-r--r--MSH-PIC/phoenix-hbase/bin/hbase-site.xml205
-rw-r--r--MSH-PIC/phoenix-hbase/bin/hdfs-site.xml142
-rw-r--r--MSH-PIC/phoenix-hbase/bin/log4j.properties76
-rw-r--r--MSH-PIC/phoenix-hbase/bin/performance.py147
-rw-r--r--MSH-PIC/phoenix-hbase/bin/pherf-standalone.py72
-rw-r--r--MSH-PIC/phoenix-hbase/bin/phoenix_utils.py218
-rw-r--r--MSH-PIC/phoenix-hbase/bin/psql.py73
-rw-r--r--MSH-PIC/phoenix-hbase/bin/readme.txt50
-rw-r--r--MSH-PIC/phoenix-hbase/bin/sqlline.py120
-rw-r--r--MSH-PIC/phoenix-hbase/bin/startsql.sh12
-rw-r--r--MSH-PIC/phoenix-hbase/bin/traceserver.py195
-rw-r--r--MSH-PIC/zookeeper/bin/README.txt6
-rw-r--r--MSH-PIC/zookeeper/bin/change_myid.sh14
-rw-r--r--MSH-PIC/zookeeper/bin/create_cmak_node.sh7
-rw-r--r--MSH-PIC/zookeeper/bin/dae-zookeeper.sh39
-rw-r--r--MSH-PIC/zookeeper/bin/old/zkEnv.sh115
-rw-r--r--MSH-PIC/zookeeper/bin/old/zkServer.sh225
-rw-r--r--MSH-PIC/zookeeper/bin/set_zk_env.sh16
-rw-r--r--MSH-PIC/zookeeper/bin/zkCleanup.sh51
-rw-r--r--MSH-PIC/zookeeper/bin/zkCli.cmd24
-rw-r--r--MSH-PIC/zookeeper/bin/zkCli.sh41
-rw-r--r--MSH-PIC/zookeeper/bin/zkEnv.cmd49
-rw-r--r--MSH-PIC/zookeeper/bin/zkEnv.sh116
-rw-r--r--MSH-PIC/zookeeper/bin/zkServer.cmd24
-rw-r--r--MSH-PIC/zookeeper/bin/zkServer.sh225
-rw-r--r--MSH-PIC/zookeeper/bin/zklogdelete.sh18
-rw-r--r--MSH-PIC/zookeeper/conf/configuration.xsl24
-rw-r--r--MSH-PIC/zookeeper/conf/java.env6
-rw-r--r--MSH-PIC/zookeeper/conf/log4j.properties63
-rw-r--r--MSH-PIC/zookeeper/conf/zoo.cfg51
-rw-r--r--MSH-PIC/zookeeper/conf/zoo_sample.cfg28
383 files changed, 29553 insertions, 0 deletions
diff --git a/MSH-PIC/clickhouse/clickhouse-server/clickhouse-server b/MSH-PIC/clickhouse/clickhouse-server/clickhouse-server
new file mode 100644
index 0000000..7fe5acd
--- /dev/null
+++ b/MSH-PIC/clickhouse/clickhouse-server/clickhouse-server
@@ -0,0 +1,355 @@
+#!/bin/sh
+### BEGIN INIT INFO
+# Provides: clickhouse-server
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Required-Start:
+# Required-Stop:
+# Short-Description: Yandex clickhouse-server daemon
+### END INIT INFO
+
+
+CLICKHOUSE_USER=clickhouse
+CLICKHOUSE_GROUP=${CLICKHOUSE_USER}
+SHELL=/bin/bash
+PROGRAM=clickhouse-server
+GENERIC_PROGRAM=clickhouse
+EXTRACT_FROM_CONFIG=${GENERIC_PROGRAM}-extract-from-config
+SYSCONFDIR=/data/tsg/olap/clickhouse/$PROGRAM
+CLICKHOUSE_LOGDIR=/data/tsg/olap/clickhouse/clickhouse-server
+CLICKHOUSE_LOGDIR_USER=root
+CLICKHOUSE_DATADIR_OLD=/data/tsg/olap/clickhouse/clickhouse_old
+LOCALSTATEDIR=/var/lock
+BINDIR=/usr/bin
+CLICKHOUSE_CRONFILE=/etc/cron.d/clickhouse-server
+CLICKHOUSE_CONFIG=/data/tsg/olap/clickhouse/clickhouse-server/config.xml
+LOCKFILE=$LOCALSTATEDIR/$PROGRAM
+RETVAL=0
+CLICKHOUSE_PIDDIR=/data/tsg/olap/clickhouse/$PROGRAM
+CLICKHOUSE_PIDFILE="$CLICKHOUSE_PIDDIR/$PROGRAM.pid"
+
+# Some systems lack "flock"
+command -v flock >/dev/null && FLOCK=flock
+
+
+# Override defaults from optional config file
+test -f /etc/default/clickhouse && . /etc/default/clickhouse
+
+# On x86_64, check for required instruction set.
+if uname -mpi | grep -q 'x86_64'; then
+ if ! grep -q 'sse4_2' /proc/cpuinfo; then
+ # On KVM, cpuinfo could falsely not report SSE 4.2 support, so skip the check.
+ if ! grep -q 'Common KVM processor' /proc/cpuinfo; then
+
+ # Some other VMs also report wrong flags in cpuinfo.
+ # Tricky way to test for instruction set:
+ # create temporary binary and run it;
+ # if it get caught illegal instruction signal,
+ # then required instruction set is not supported really.
+ #
+ # Generated this way:
+ # gcc -xc -Os -static -nostdlib - <<< 'void _start() { __asm__("pcmpgtq %%xmm0, %%xmm1; mov $0x3c, %%rax; xor %%rdi, %%rdi; syscall":::"memory"); }' && strip -R .note.gnu.build-id -R .comment -R .eh_frame -s ./a.out && gzip -c -9 ./a.out | base64 -w0; echo
+
+ if ! (echo -n 'H4sICAwAW1cCA2Eub3V0AKt39XFjYmRkgAEmBjsGEI+H0QHMd4CKGyCUAMUsGJiBJDNQNUiYlQEZOKDQclB9cnD9CmCSBYqJBRxQOvBpSQobGfqIAWn8FuYnPI4fsAGyPQz/87MeZtArziguKSpJTGLQK0mtKGGgGHADMSgoYH6AhTMPNHyE0NQzYuEzYzEXFr6CBPQDANAsXKTwAQAA' | base64 -d | gzip -d > /tmp/clickhouse_test_sse42 && chmod a+x /tmp/clickhouse_test_sse42 && /tmp/clickhouse_test_sse42); then
+ echo 'Warning! SSE 4.2 instruction set is not supported'
+ #exit 3
+ fi
+ fi
+ fi
+fi
+
+
+SUPPORTED_COMMANDS="{start|stop|status|restart|forcestop|forcerestart|reload|condstart|condstop|condrestart|condreload|initdb}"
+is_supported_command()
+{
+ echo "$SUPPORTED_COMMANDS" | grep -E "(\{|\|)$1(\||})" &> /dev/null
+}
+
+
+is_running()
+{
+ [ -r "$CLICKHOUSE_PIDFILE" ] && pgrep -s $(cat "$CLICKHOUSE_PIDFILE") 1> /dev/null 2> /dev/null
+}
+
+
+wait_for_done()
+{
+ while is_running; do
+ sleep 1
+ done
+}
+
+
+die()
+{
+ echo $1 >&2
+ exit 1
+}
+
+
+# Check that configuration file is Ok.
+check_config()
+{
+ if [ -x "$BINDIR/$EXTRACT_FROM_CONFIG" ]; then
+ su -s $SHELL ${CLICKHOUSE_USER} -c "$BINDIR/$EXTRACT_FROM_CONFIG --config-file=\"$CLICKHOUSE_CONFIG\" --key=path" >/dev/null || die "Configuration file ${CLICKHOUSE_CONFIG} doesn't parse successfully. Won't restart server. You may use forcerestart if you are sure.";
+ fi
+}
+
+
+initdb()
+{
+ if [ -d ${SYSCONFDIR} ]; then
+ su -s /bin/sh ${CLICKHOUSE_USER} -c "test -w ${SYSCONFDIR}" || chown ${CLICKHOUSE_USER}:${CLICKHOUSE_GROUP} ${SYSCONFDIR}
+ fi
+
+ if [ -x "$BINDIR/$EXTRACT_FROM_CONFIG" ]; then
+ CLICKHOUSE_DATADIR_FROM_CONFIG=$(su -s $SHELL ${CLICKHOUSE_USER} -c "$BINDIR/$EXTRACT_FROM_CONFIG --config-file=\"$CLICKHOUSE_CONFIG\" --key=path")
+ if [ "(" "$?" -ne "0" ")" -o "(" -z "${CLICKHOUSE_DATADIR_FROM_CONFIG}" ")" ]; then
+ die "Cannot obtain value of path from config file: ${CLICKHOUSE_CONFIG}";
+ fi
+ echo "Path to data directory in ${CLICKHOUSE_CONFIG}: ${CLICKHOUSE_DATADIR_FROM_CONFIG}"
+ else
+ CLICKHOUSE_DATADIR_FROM_CONFIG="/var/lib/clickhouse"
+ fi
+
+ if ! getent group ${CLICKHOUSE_USER} >/dev/null; then
+ echo "Can't chown to non-existing user ${CLICKHOUSE_USER}"
+ return
+ fi
+ if ! getent passwd ${CLICKHOUSE_GROUP} >/dev/null; then
+ echo "Can't chown to non-existing group ${CLICKHOUSE_GROUP}"
+ return
+ fi
+
+ if ! $(su -s $SHELL ${CLICKHOUSE_USER} -c "test -r ${CLICKHOUSE_CONFIG}"); then
+ echo "Warning! clickhouse config [${CLICKHOUSE_CONFIG}] not readable by user [${CLICKHOUSE_USER}]"
+ fi
+
+ if ! $(su -s $SHELL ${CLICKHOUSE_USER} -c "test -O \"${CLICKHOUSE_DATADIR_FROM_CONFIG}\" && test -G \"${CLICKHOUSE_DATADIR_FROM_CONFIG}\""); then
+ if [ $(dirname "${CLICKHOUSE_DATADIR_FROM_CONFIG}") == "/" ]; then
+ echo "Directory ${CLICKHOUSE_DATADIR_FROM_CONFIG} seems too dangerous to chown."
+ else
+ if [ ! -e "${CLICKHOUSE_DATADIR_FROM_CONFIG}" ]; then
+ echo "Creating directory ${CLICKHOUSE_DATADIR_FROM_CONFIG}"
+ mkdir -p "${CLICKHOUSE_DATADIR_FROM_CONFIG}"
+ fi
+
+ echo "Changing owner of [${CLICKHOUSE_DATADIR_FROM_CONFIG}] to [${CLICKHOUSE_USER}:${CLICKHOUSE_GROUP}]"
+ chown -R ${CLICKHOUSE_USER}:${CLICKHOUSE_GROUP} "${CLICKHOUSE_DATADIR_FROM_CONFIG}"
+ fi
+ fi
+
+ if ! $(su -s $SHELL ${CLICKHOUSE_USER} -c "test -w ${CLICKHOUSE_LOGDIR}"); then
+ echo "Changing owner of [${CLICKHOUSE_LOGDIR}/*] to [${CLICKHOUSE_USER}:${CLICKHOUSE_GROUP}]"
+ chown -R ${CLICKHOUSE_USER}:${CLICKHOUSE_GROUP} ${CLICKHOUSE_LOGDIR}/*
+ echo "Changing owner of [${CLICKHOUSE_LOGDIR}] to [${CLICKHOUSE_LOGDIR_USER}:${CLICKHOUSE_GROUP}]"
+ chown ${CLICKHOUSE_LOGDIR_USER}:${CLICKHOUSE_GROUP} ${CLICKHOUSE_LOGDIR}
+ fi
+}
+
+
+start()
+{
+ [ -x $BINDIR/$PROGRAM ] || exit 0
+ local EXIT_STATUS
+ EXIT_STATUS=0
+
+ echo -n "Start $PROGRAM service: "
+
+ if is_running; then
+ echo -n "already running "
+ EXIT_STATUS=1
+ else
+ ulimit -n 262144
+ mkdir -p $CLICKHOUSE_PIDDIR
+ chown -R $CLICKHOUSE_USER:$CLICKHOUSE_GROUP $CLICKHOUSE_PIDDIR
+ initdb
+ if ! is_running; then
+ # Lock should not be held while running child process, so we release the lock. Note: obviously, there is race condition.
+ # But clickhouse-server has protection from simultaneous runs with same data directory.
+ su -s $SHELL ${CLICKHOUSE_USER} -c "$FLOCK -u 9; exec -a \"$PROGRAM\" \"$BINDIR/$PROGRAM\" --daemon --pid-file=\"$CLICKHOUSE_PIDFILE\" --config-file=\"$CLICKHOUSE_CONFIG\""
+ EXIT_STATUS=$?
+ if [ $EXIT_STATUS -ne 0 ]; then
+ break
+ fi
+ fi
+ fi
+
+ if [ $EXIT_STATUS -eq 0 ]; then
+ echo "DONE"
+ else
+ echo "FAILED"
+ fi
+
+ return $EXIT_STATUS
+}
+
+
+stop()
+{
+ local EXIT_STATUS
+ EXIT_STATUS=0
+
+ if [ -f $CLICKHOUSE_PIDFILE ]; then
+
+ echo -n "Stop $PROGRAM service: "
+
+ kill -TERM $(cat "$CLICKHOUSE_PIDFILE")
+
+ wait_for_done
+
+ echo "DONE"
+ fi
+ return $EXIT_STATUS
+}
+
+
+restart()
+{
+ check_config
+ stop
+ start
+}
+
+
+forcestop()
+{
+ local EXIT_STATUS
+ EXIT_STATUS=0
+
+ echo -n "Stop forcefully $PROGRAM service: "
+
+ kill -KILL $(cat "$CLICKHOUSE_PIDFILE")
+
+ wait_for_done
+
+ echo "DONE"
+ return $EXIT_STATUS
+}
+
+
+forcerestart()
+{
+ forcestop
+ start
+}
+
+use_cron()
+{
+ # 1. running systemd
+ if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then
+ return 1
+ fi
+ # 2. disabled by config
+ if [ -z "$CLICKHOUSE_CRONFILE" ]; then
+ return 2
+ fi
+ return 0
+}
+
+enable_cron()
+{
+ use_cron && sed -i 's/^#*//' "$CLICKHOUSE_CRONFILE"
+}
+
+
+disable_cron()
+{
+ use_cron && sed -i 's/^#*/#/' "$CLICKHOUSE_CRONFILE"
+}
+
+
+is_cron_disabled()
+{
+ use_cron || return 0
+
+ # Assumes that either no lines are commented or all lines are commented.
+ # Also please note, that currently cron file for ClickHouse has only one line (but some time ago there was more).
+ grep -q -E '^#' "$CLICKHOUSE_CRONFILE";
+}
+
+
+main()
+{
+ # See how we were called.
+ EXIT_STATUS=0
+ case "$1" in
+ start)
+ start && enable_cron
+ ;;
+ stop)
+ disable_cron && stop
+ ;;
+ restart)
+ restart && enable_cron
+ ;;
+ forcestop)
+ disable_cron && forcestop
+ ;;
+ forcerestart)
+ forcerestart && enable_cron
+ ;;
+ reload)
+ restart
+ ;;
+ condstart)
+ is_running || start
+ ;;
+ condstop)
+ is_running && stop
+ ;;
+ condrestart)
+ is_running && restart
+ ;;
+ condreload)
+ is_running && restart
+ ;;
+ initdb)
+ initdb
+ ;;
+ enable_cron)
+ enable_cron
+ ;;
+ disable_cron)
+ disable_cron
+ ;;
+ *)
+ echo "Usage: $0 $SUPPORTED_COMMANDS"
+ exit 2
+ ;;
+ esac
+
+ exit $EXIT_STATUS
+}
+
+
+status()
+{
+ if is_running; then
+ echo "$PROGRAM service is running"
+ else
+ if is_cron_disabled; then
+ echo "$PROGRAM service is stopped";
+ else
+ echo "$PROGRAM: process unexpectedly terminated"
+ fi
+ fi
+}
+
+
+# Running commands without need of locking
+case "$1" in
+status)
+ status
+ exit 0
+ ;;
+esac
+
+
+(
+ if $FLOCK -n 9; then
+ main "$@"
+ else
+ echo "Init script is already running" && exit 1
+ fi
+) 9> $LOCKFILE
diff --git a/MSH-PIC/clickhouse/clickhouse-server/clickhouse-server.pid b/MSH-PIC/clickhouse/clickhouse-server/clickhouse-server.pid
new file mode 100644
index 0000000..8d9f099
--- /dev/null
+++ b/MSH-PIC/clickhouse/clickhouse-server/clickhouse-server.pid
@@ -0,0 +1 @@
+56515 \ No newline at end of file
diff --git a/MSH-PIC/clickhouse/clickhouse-server/config.xml b/MSH-PIC/clickhouse/clickhouse-server/config.xml
new file mode 100644
index 0000000..de391a7
--- /dev/null
+++ b/MSH-PIC/clickhouse/clickhouse-server/config.xml
@@ -0,0 +1,403 @@
+<?xml version="1.0"?>
+<yandex>
+ <logger>
+ <!-- Possible levels: https://github.com/pocoproject/poco/blob/develop/Foundation/include/Poco/Logger.h#L105 -->
+ <level>error</level>
+ <log>/data/tsg/olap/clickhouse/logs/clickhouse-server.log</log>
+ <errorlog>/data/tsg/olap/clickhouse/logs/clickhouse-server.err.log</errorlog>
+ <size>200M</size>
+ <count>10</count>
+ <!-- <console>1</console> --> <!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
+ </logger>
+ <!--display_name>production</display_name--> <!-- It is the name that will be shown in the client -->
+ <http_port>8123</http_port>
+ <tcp_port>9001</tcp_port>
+ <max_server_memory_usage>150000000000</max_server_memory_usage>
+
+ <!-- For HTTPS and SSL over native protocol. -->
+ <!--
+ <https_port>8443</https_port>
+ <tcp_port_secure>9440</tcp_port_secure>
+ -->
+
+ <!-- Used with https_port and tcp_port_secure. Full ssl options list: https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->
+ <openSSL>
+ <server> <!-- Used for https server AND secure tcp port -->
+ <!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
+ <certificateFile>/data/tsg/olap/clickhouse/clickhouse-server/server.crt</certificateFile>
+ <privateKeyFile>/data/tsg/olap/clickhouse/clickhouse-server/server.key</privateKeyFile>
+ <!-- openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096 -->
+ <dhParamsFile>/data/tsg/olap/clickhouse/clickhouse-server/dhparam.pem</dhParamsFile>
+ <verificationMode>none</verificationMode>
+ <loadDefaultCAFile>true</loadDefaultCAFile>
+ <cacheSessions>true</cacheSessions>
+ <disableProtocols>sslv2,sslv3</disableProtocols>
+ <preferServerCiphers>true</preferServerCiphers>
+ </server>
+
+ <client> <!-- Used for connecting to https dictionary source -->
+ <loadDefaultCAFile>true</loadDefaultCAFile>
+ <cacheSessions>true</cacheSessions>
+ <disableProtocols>sslv2,sslv3</disableProtocols>
+ <preferServerCiphers>true</preferServerCiphers>
+ <!-- Use for self-signed: <verificationMode>none</verificationMode> -->
+ <invalidCertificateHandler>
+ <!-- Use for self-signed: <name>AcceptCertificateHandler</name> -->
+ <name>RejectCertificateHandler</name>
+ </invalidCertificateHandler>
+ </client>
+ </openSSL>
+
+ <!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 -->
+ <!--
+ <http_server_default_response><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></http_server_default_response>
+ -->
+
+ <!-- Port for communication between replicas. Used for data exchange. -->
+ <interserver_http_port>9009</interserver_http_port>
+
+ <!-- Hostname that is used by other replicas to request this server.
+ If not specified, than it is determined analoguous to 'hostname -f' command.
+ This setting could be used to switch replication to another network interface.
+ -->
+
+ <interserver_http_host>192.168.20.193</interserver_http_host>
+
+
+ <!-- Listen specified host. use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere. -->
+ <listen_host>0.0.0.0</listen_host>
+ <!-- Same for hosts with disabled ipv6: -->
+ <!--<listen_host>0.0.0.0</listen_host>-->
+
+ <!-- Default values - try listen localhost on ipv4 and ipv6: -->
+
+<!--<listen_host>::1</listen_host>-->
+ <!-- <listen_host>127.0.0.1</listen_host>-->
+
+ <!-- Don't exit if ipv6 or ipv4 unavailable, but listen_host with this protocol specified -->
+ <!-- <listen_try>0</listen_try>-->
+
+ <!-- Allow listen on same address:port -->
+ <!-- <listen_reuse_port>0</listen_reuse_port>-->
+
+ <listen_backlog>64</listen_backlog>
+
+ <max_connections>4096</max_connections>
+ <keep_alive_timeout>600</keep_alive_timeout>
+
+ <!-- Maximum number of concurrent queries. -->
+ <!-- 21.12version 150 change to 500. -->
+ <max_concurrent_queries>500</max_concurrent_queries>
+
+ <!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
+ correct maximum value. -->
+ <!-- <max_open_files>262144</max_open_files> -->
+
+ <!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
+ In bytes. Cache is single for server. Memory is allocated only on demand.
+ Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
+ Uncompressed cache is advantageous only for very short queries and in rare cases.
+ -->
+ <uncompressed_cache_size>8589934592</uncompressed_cache_size>
+
+ <!-- Approximate size of mark cache, used in tables of MergeTree family.
+ In bytes. Cache is single for server. Memory is allocated only on demand.
+ You should not lower this value.
+ -->
+ <mark_cache_size>5368709120</mark_cache_size>
+
+
+ <!-- Path to data directory, with trailing slash. -->
+<!-- <path>/data/tsg/olap/clickhouse/</path> -->
+ <path>/data/tsg/olap/clickhouse/</path>
+
+ <!-- Path to temporary data for processing hard queries. -->
+<!-- <tmp_path>/data/tsg/olap/clickhouse/tmp/</tmp_path>-->
+ <tmp_path>/data/tsg/olap/clickhouse/tmp/</tmp_path>
+
+ <!-- Directory with user provided files that are accessible by 'file' table function. -->
+ <user_files_path>/data/tsg/olap/clickhouse/user_files/</user_files_path>
+
+ <!-- Path to configuration file with users, access rights, profiles of settings, quotas. -->
+ <users_config>users.xml</users_config>
+
+ <!-- Default profile of settings. -->
+ <default_profile>default</default_profile>
+
+ <!-- System profile of settings. This settings are used by internal processes (Buffer storage, Distibuted DDL worker and so on). -->
+ <!-- <system_profile>default</system_profile> -->
+
+ <!-- Default database. -->
+ <default_database>default</default_database>
+
+ <!-- Server time zone could be set here.
+
+ Time zone is used when converting between String and DateTime types,
+ when printing DateTime in text formats and parsing DateTime from text,
+ it is used in date and time related functions, if specific time zone was not passed as an argument.
+
+ Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
+ If not specified, system time zone at server startup is used.
+
+ Please note, that server could display time zone alias instead of specified name.
+ Example: W-SU is an alias for Europe/Moscow and Zulu is an alias for UTC.
+ -->
+ <!-- <timezone>Europe/Moscow</timezone> -->
+ <timezone>UTC</timezone>
+ <!-- You can specify umask here (see "man umask"). Server will apply it on startup.
+ Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
+ -->
+ <!-- <umask>022</umask> -->
+
+ <!-- Configuration of clusters that could be used in Distributed tables.
+ https://clickhouse.yandex/docs/en/table_engines/distributed/
+ -->
+ <remote_servers incl="clickhouse_remote_servers" >
+ <!-- Test only shard config for testing distributed storage
+ <test_shard_localhost>
+ <shard>
+ <replica>
+ <host>localhost</host>
+ <port>9000</port>
+ </replica>
+ </shard>
+ </test_shard_localhost>
+ <test_shard_localhost_secure>
+ <shard>
+ <replica>
+ <host>localhost</host>
+ <port>9440</port>
+ <secure>1</secure>
+ </replica>
+ </shard>
+ </test_shard_localhost_secure>-->
+ </remote_servers>
+
+
+ <!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
+ By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
+ Values for substitutions are specified in /yandex:wq
+/name_of_substitution elements in that file.
+ -->
+
+ <!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
+ Optional. If you don't use replicated tables, you could omit that.
+
+ See https://clickhouse.yandex/docs/en/table_engines/replication/
+ -->
+ <zookeeper incl="zookeeper-servers" optional="true" />
+
+ <!-- Substitutions for parameters of replicated tables.
+ Optional. If you don't use replicated tables, you could omit that.
+
+ See https://clickhouse.yandex/docs/en/table_engines/replication/#creating-replicated-tables
+ -->
+ <macros incl="macros" optional="true" />
+
+
+ <!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
+ <builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
+
+
+ <!-- Maximum session timeout, in seconds. Default: 3600. -->
+ <max_session_timeout>21600</max_session_timeout>
+
+ <!-- Default session timeout, in seconds. Default: 60. -->
+ <default_session_timeout>6000</default_session_timeout>
+<max_table_size_to_drop>0</max_table_size_to_drop>
+<max_partition_size_to_drop>0</max_partition_size_to_drop>
+<include_from>/data/tsg/olap/clickhouse/clickhouse-server/metrika.xml</include_from>
+ <!-- Sending data to Graphite for monitoring. Several sections can be defined. -->
+ <!--
+ interval - send every X second
+ root_path - prefix for keys
+ hostname_in_path - append hostname to root_path (default = true)
+ metrics - send data from table system.metrics
+ events - send data from table system.events
+ asynchronous_metrics - send data from table system.asynchronous_metrics
+ -->
+ <!--
+ <graphite>
+ <host>localhost</host>
+ <port>42000</port>
+ <timeout>0.1</timeout>
+ <interval>60</interval>
+ <root_path>one_min</root_path>
+ <hostname_in_path>true</hostname_in_path>
+
+ <metrics>true</metrics>
+ <events>true</events>
+ <asynchronous_metrics>true</asynchronous_metrics>
+ </graphite>
+ <graphite>
+ <host>localhost</host>
+ <port>42000</port>
+ <timeout>0.1</timeout>
+ <interval>1</interval>
+ <root_path>one_sec</root_path>
+
+ <metrics>true</metrics>
+ <events>true</events>
+ <asynchronous_metrics>false</asynchronous_metrics>
+ </graphite>
+ -->
+
+
+ <!-- Query log. Used only for queries with setting log_queries = 1. -->
+ <query_log>
+ <!-- What table to insert data. If table is not exist, it will be created.
+ When query log structure is changed after system update,
+ then old table will be renamed and new table will be created automatically.
+ -->
+ <database>system</database>
+ <table>query_log</table>
+ <!--
+ PARTITION BY expr https://clickhouse.yandex/docs/en/table_engines/custom_partitioning_key/
+ Example:
+ event_date
+ toMonday(event_date)
+ toYYYYMM(event_date)
+ toStartOfHour(event_time)
+ -->
+ <partition_by>toYYYYMM(event_date)</partition_by>
+ <!-- Interval of flushing data. -->
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
+ </query_log>
+
+
+ <!-- Uncomment if use part_log
+ <part_log>
+ <database>system</database>
+ <table>part_log</table>
+
+ <flush_interval_milliseconds>7500</flush_interval_milliseconds>
+ </part_log>
+ -->
+
+
+ <!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
+ See https://clickhouse.yandex/docs/en/dicts/internal_dicts/
+ -->
+
+ <!-- Path to file with region hierarchy. -->
+ <!-- <path_to_regions_hierarchy_file>/opt/geo/regions_hierarchy.txt</path_to_regions_hierarchy_file> -->
+
+ <!-- Path to directory with files containing names of regions -->
+ <!-- <path_to_regions_names_files>/opt/geo/</path_to_regions_names_files> -->
+
+
+ <!-- Configuration of external dictionaries. See:
+ https://clickhouse.yandex/docs/en/dicts/external_dicts/
+ -->
+ <dictionaries_config>*_dictionary.xml</dictionaries_config>
+
+ <!-- Uncomment if you want data to be compressed 30-100% better.
+ Don't do that if you just started using ClickHouse.
+ -->
+ <compression incl="clickhouse_compression">
+ <!--
+ <!- - Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. - ->
+ <case>
+
+ <!- - Conditions. All must be satisfied. Some conditions may be omitted. - ->
+ <min_part_size>10000000000</min_part_size> <!- - Min part size in bytes. - ->
+ <min_part_size_ratio>0.01</min_part_size_ratio> <!- - Min size of part relative to whole table size. - ->
+
+ <!- - What compression method to use. - ->
+ <method>zstd</method>
+ </case>
+ -->
+ </compression>
+
+ <!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
+ Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. -->
+ <distributed_ddl>
+ <!-- Path in ZooKeeper to queue with DDL queries -->
+ <path>/clickhouse/task_queue/ddl</path>
+
+ <!-- Settings from this profile will be used to execute DDL queries -->
+ <!-- <profile>default</profile> -->
+ </distributed_ddl>
+
+ <!-- Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
+ <merge_tree>
+ <max_bytes_to_merge_at_max_space_in_pool>60000000000</max_bytes_to_merge_at_max_space_in_pool>
+ <ttl_only_drop_parts>1</ttl_only_drop_parts>
+ <min_merge_bytes_to_use_direct_io>0</min_merge_bytes_to_use_direct_io>
+ <max_suspicious_broken_parts>100</max_suspicious_broken_parts>
+ </merge_tree>
+
+
+ <!-- Protection from accidental DROP.
+ If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
+ If you want do delete one table and don't want to restart clickhouse-server, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
+ By default max_table_size_to_drop is 50GB; max_table_size_to_drop=0 allows to DROP any tables.
+ The same for max_partition_size_to_drop.
+ Uncomment to disable protection.
+ -->
+ <!-- <max_table_size_to_drop>0</max_table_size_to_drop> -->
+ <!-- <max_partition_size_to_drop>0</max_partition_size_to_drop> -->
+
+ <!-- Example of parameters for GraphiteMergeTree table engine -->
+ <graphite_rollup_example>
+ <pattern>
+ <regexp>click_cost</regexp>
+ <function>any</function>
+ <retention>
+ <age>0</age>
+ <precision>3600</precision>
+ </retention>
+ <retention>
+ <age>86400</age>
+ <precision>60</precision>
+ </retention>
+ </pattern>
+ <default>
+ <function>max</function>
+ <retention>
+ <age>0</age>
+ <precision>60</precision>
+ </retention>
+ <retention>
+ <age>3600</age>
+ <precision>300</precision>
+ </retention>
+ <retention>
+ <age>86400</age>
+ <precision>3600</precision>
+ </retention>
+ </default>
+ </graphite_rollup_example>
+
+ <!-- Directory in <clickhouse-path> containing schema files for various input formats.
+ The directory will be created if it doesn't exist.
+ -->
+ <format_schema_path>/data/tsg/olap/clickhouse/format_schemas/</format_schema_path>
+
+ <!--
+ <storage_configuration>
+ <disks>
+ <ssd>
+ <path>if you want wo use this policies, please config the ssd mount path</path>
+ </ssd>
+ </disks>
+
+ <policies>
+ <ssd_to_hdd>
+ <volumes>
+ <hot>
+ <disk>ssd</disk>
+ </hot>
+ <default>
+ <disk>default</disk>
+ </default>
+ </volumes>
+ <move_factor>0.1</move_factor>
+ </ssd_to_hdd>
+ </policies>
+ </storage_configuration>
+ -->
+
+ <!-- Uncomment to disable ClickHouse internal DNS caching. -->
+ <!-- <disable_internal_dns_cache>1</disable_internal_dns_cache> -->
+</yandex>
diff --git a/MSH-PIC/clickhouse/clickhouse-server/metrika.xml b/MSH-PIC/clickhouse/clickhouse-server/metrika.xml
new file mode 100644
index 0000000..f5faeb6
--- /dev/null
+++ b/MSH-PIC/clickhouse/clickhouse-server/metrika.xml
@@ -0,0 +1,55 @@
+<yandex>
+<!--ck集群节点-->
+<clickhouse_remote_servers>
+
+<ck_cluster>
+ <shard>
+ <!-- Optional. Shard weight when writing data. Default: 1. -->
+ <weight>1</weight>
+ <!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
+ <internal_replication>false</internal_replication>
+ <replica>
+ <host>192.168.20.193</host>
+ <port>9001</port>
+ <user>default</user>
+ <password>galaxy2019</password>
+ </replica>
+ </shard>
+</ck_cluster>
+
+</clickhouse_remote_servers>
+<zookeeper-servers>
+<node index="1">
+<host>192.168.20.221</host>
+<port>2181</port>
+</node>
+
+<node index="2">
+<host>192.168.20.222</host>
+<port>2181</port>
+</node>
+
+<node index="3">
+<host>192.168.20.223</host>
+<port>2181</port>
+</node>
+
+<session_timeout_ms>120000</session_timeout_ms>
+</zookeeper-servers>
+
+<networks>
+<ip>::/0</ip>
+</networks>
+
+<!--压缩相关配置-->
+<clickhouse_compression>
+<case>
+<min_part_size>10000000000</min_part_size>
+<min_part_size_ratio>0.01</min_part_size_ratio>
+<method>lz4</method> <!--压缩算法lz4压缩比zstd快, 更占磁盘-->
+</case>
+</clickhouse_compression>
+</yandex>
+
+
+
diff --git a/MSH-PIC/clickhouse/clickhouse-server/users.xml b/MSH-PIC/clickhouse/clickhouse-server/users.xml
new file mode 100644
index 0000000..990135b
--- /dev/null
+++ b/MSH-PIC/clickhouse/clickhouse-server/users.xml
@@ -0,0 +1,214 @@
+<?xml version="1.0"?>
+<yandex>
+ <!-- Profiles of settings. -->
+ <profiles>
+ <!-- Default settings. -->
+ <default>
+ <!-- Maximum memory usage for processing single query, in bytes. -->
+ <max_memory_usage>150000000000</max_memory_usage>
+ <!-- <max_memory_usage_for_all_queries>200000000000</max_memory_usage_for_all_queries> -->
+ <default_database_engine>Ordinary</default_database_engine>
+ <optimize_on_insert>0</optimize_on_insert>
+ <async_socket_for_remote>0</async_socket_for_remote>
+ <distributed_ddl_task_timeout>0</distributed_ddl_task_timeout>
+ <max_bytes_before_external_group_by>75000000000</max_bytes_before_external_group_by>
+ <distributed_aggregation_memory_efficient>1</distributed_aggregation_memory_efficient>
+ <distributed_product_mode>local</distributed_product_mode>
+ <log_queries>1</log_queries>
+ <cancel_http_readonly_queries_on_client_close>1</cancel_http_readonly_queries_on_client_close>
+ <background_pool_size>16</background_pool_size>
+ <!-- <enable_http_compression>1</enable_http_compression>-->
+ <replication_alter_columns_timeout>60</replication_alter_columns_timeout>
+ <skip_unavailable_shards>1</skip_unavailable_shards>
+ <max_execution_time>21600</max_execution_time>
+ <!-- Use cache of uncompressed blocks of data. Meaningful only for processing many of very short queries. -->
+ <use_uncompressed_cache>1</use_uncompressed_cache>
+ <replace_running_query>1</replace_running_query>
+ <http_receive_timeout>21600</http_receive_timeout>
+ <http_send_timeout>21600</http_send_timeout>
+ <receive_timeout>21600</receive_timeout>
+ <send_timeout>21600</send_timeout>
+ <count_distinct_implementation>uniqCombined</count_distinct_implementation>
+ <!-- How to choose between replicas during distributed query processing.
+ random - choose random replica from set of replicas with minimum number of errors
+ nearest_hostname - from set of replicas with minimum number of errors, choose replica
+ with minumum number of different symbols between replica's hostname and local hostname
+ (Hamming distance).
+ in_order - first live replica is choosen in specified order.
+ -->
+ <max_rows_to_group_by>10000000</max_rows_to_group_by>
+ <group_by_overflow_mode>any</group_by_overflow_mode>
+ <timeout_before_checking_execution_speed>3600</timeout_before_checking_execution_speed>
+ <load_balancing>in_order</load_balancing>
+ </default>
+
+ <!-- Profile that allows only read queries. -->
+ <readonly>
+ <max_memory_usage>150000000000</max_memory_usage>
+ <!-- <max_memory_usage_for_all_queries>200000000000</max_memory_usage_for_all_queries> -->
+ <default_database_engine>Ordinary</default_database_engine>
+ <optimize_on_insert>0</optimize_on_insert>
+ <async_socket_for_remote>0</async_socket_for_remote>
+ <distributed_ddl_task_timeout>0</distributed_ddl_task_timeout>
+ <distributed_product_mode>local</distributed_product_mode>
+ <http_receive_timeout>600</http_receive_timeout>
+ <http_send_timeout>600</http_send_timeout>
+ <receive_timeout>600</receive_timeout>
+ <send_timeout>600</send_timeout>
+ <log_queries>1</log_queries>
+ <cancel_http_readonly_queries_on_client_close>1</cancel_http_readonly_queries_on_client_close>
+ <background_pool_size>16</background_pool_size>
+ <!-- http压缩 不影响http请求,只影响使用chproxy的客户端-->
+ <enable_http_compression>1</enable_http_compression>
+ <replace_running_query>1</replace_running_query>
+ <replication_alter_columns_timeout>60</replication_alter_columns_timeout>
+ <skip_unavailable_shards>1</skip_unavailable_shards>
+ <max_execution_time>600</max_execution_time>
+ <!-- Use cache of uncompressed blocks of data. Meaningful only for processing many of very short queries. -->
+ <timeout_before_checking_execution_speed>600</timeout_before_checking_execution_speed>
+ <use_uncompressed_cache>1</use_uncompressed_cache>
+ <count_distinct_implementation>uniqCombined</count_distinct_implementation>
+ <load_balancing>in_order</load_balancing>
+ <distributed_aggregation_memory_efficient>1</distributed_aggregation_memory_efficient>
+ <max_rows_to_group_by>10000000</max_rows_to_group_by>
+ <group_by_overflow_mode>any</group_by_overflow_mode>
+ <readonly>2</readonly>
+
+ </readonly>
+
+ <ckinsert>
+ <max_memory_usage>150000000000</max_memory_usage>
+ <!-- <max_memory_usage_for_all_queries>200000000000</max_memory_usage_for_all_queries> -->
+ <default_database_engine>Ordinary</default_database_engine>
+ <optimize_on_insert>0</optimize_on_insert>
+ <async_socket_for_remote>0</async_socket_for_remote>
+ <distributed_ddl_task_timeout>0</distributed_ddl_task_timeout>
+ <distributed_product_mode>local</distributed_product_mode>
+ <log_queries>1</log_queries>
+ <background_pool_size>16</background_pool_size>
+
+ <replication_alter_columns_timeout>60</replication_alter_columns_timeout>
+ <skip_unavailable_shards>1</skip_unavailable_shards>
+ <max_execution_time>300</max_execution_time>
+ <!-- Use cache of uncompressed blocks of data. Meaningful only for processing many of very short queries. -->
+ <use_uncompressed_cache>0</use_uncompressed_cache>
+ <timeout_before_checking_execution_speed>300</timeout_before_checking_execution_speed>
+ <http_receive_timeout>300</http_receive_timeout>
+ <http_send_timeout>300</http_send_timeout>
+ <receive_timeout>300</receive_timeout>
+ <send_timeout>300</send_timeout>
+ <allow_ddl>0</allow_ddl>
+ <load_balancing>random</load_balancing>
+ </ckinsert>
+ </profiles>
+
+ <!-- Users and ACL. -->
+ <users>
+ <!-- If user name was not specified, 'default' user is used. -->
+ <default>
+ <!-- Password could be specified in plaintext or in SHA256 (in hex format).
+
+ If you want to specify password in plaintext (not recommended), place it in 'password' element.
+ Example: <password>qwerty</password>.
+ Password could be empty.
+
+ If you want to specify SHA256, place it in 'password_sha256_hex' element.
+ Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
+
+ How to generate decent password:
+ Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
+ In first line will be password and in second - corresponding SHA256.
+ -->
+ <password_sha256_hex>d24247a535fe6794275904f9b72e7fcf14a8a45628874d2eb1fd147020a403f7</password_sha256_hex>
+
+ <!-- List of networks with open access.
+
+ To open access from everywhere, specify:
+ <ip>::/0</ip>
+
+ To open access only from localhost, specify:
+ <ip>::1</ip>
+ <ip>127.0.0.1</ip>
+
+ Each element of list has one of the following forms:
+ <ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
+ 2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
+ <host> Hostname. Example: server01.yandex.ru.
+ To check access, DNS query is performed, and all received addresses compared to peer address.
+ <host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.yandex\.ru$
+ To check access, DNS PTR query is performed for peer address and then regexp is applied.
+ Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
+ Strongly recommended that regexp is ends with $
+ All results of DNS requests are cached till server restart.
+ -->
+ <networks incl="networks" replace="replace">
+ <ip>::/0</ip>
+ </networks>
+
+ <!-- Settings profile for user. -->
+ <profile>default</profile>
+
+ <!-- Quota for user. -->
+ <quota>default</quota>
+ </default>
+
+ <tsg_report>
+ <password_sha256_hex>d24247a535fe6794275904f9b72e7fcf14a8a45628874d2eb1fd147020a403f7</password_sha256_hex>
+ <networks incl="networks" replace="replace">
+ <ip>::/0</ip>
+ </networks>
+ <profile>default</profile>
+ <quota>default</quota>
+ </tsg_report>
+
+
+ <tsg_insert>
+ <password_sha256_hex>d24247a535fe6794275904f9b72e7fcf14a8a45628874d2eb1fd147020a403f7</password_sha256_hex>
+ <networks incl="networks" replace="replace">
+ <ip>::/0</ip>
+ </networks>
+ <profile>ckinsert</profile>
+ <quota>default</quota>
+ </tsg_insert>
+
+ <!-- Example of user with readonly access. -->
+ <tsg_query>
+ <password_sha256_hex>bce24719d7fef9c9569e710a344bf24d4a1d6a8f19c9ec1f4c4b7884a9d31121</password_sha256_hex>
+ <networks incl="networks" replace="replace">
+ <ip>::/0</ip>
+ </networks>
+ <profile>readonly</profile>
+ <quota>default</quota>
+ </tsg_query>
+
+
+ <!-- Example of user with readonly access. -->
+ <readonly>
+ <password></password>
+ <networks incl="networks" replace="replace">
+ <ip>::1</ip>
+ <ip>127.0.0.1</ip>
+ </networks>
+ <profile>readonly</profile>
+ <quota>default</quota>
+ </readonly>
+ </users>
+
+ <!-- Quotas. -->
+ <quotas>
+ <!-- Name of quota. -->
+ <default>
+ <!-- Limits for time interval. You could specify many intervals with different limits. -->
+ <interval>
+ <!-- Length of interval. -->
+ <duration>3600</duration>
+ <!-- No limits. Just calculate resource usage for time interval. -->
+ <queries>0</queries>
+ <errors>0</errors>
+ <result_rows>0</result_rows>
+ <read_rows>0</read_rows>
+ <execution_time>0</execution_time>
+ </interval>
+ </default>
+ </quotas>
+</yandex>
diff --git a/MSH-PIC/cmak/docker-compose.yml b/MSH-PIC/cmak/docker-compose.yml
new file mode 100644
index 0000000..f3f9a15
--- /dev/null
+++ b/MSH-PIC/cmak/docker-compose.yml
@@ -0,0 +1,20 @@
+version: '3.6'
+
+services:
+ cmak:
+ image: cmak:3.0.0.6
+ ports:
+ - "9998:9000"
+ restart: always
+ container_name: cmak
+ command:
+ - "-Dhttp.port=9998"
+ - "-Dcmak.zkhosts=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181"
+ - "-DbasicAuthentication.enabled=true"
+ - "-DbasicAuthentication.username=galaxy"
+ - "-DbasicAuthentication.password=galaxy2019"
+ - "-Djava.security.auth.login.config=/cmak/conf/kafka_client_jaas.conf"
+ volumes:
+ - "/home/tsg/olap/galaxy/volumes/cmak/kafka_client_jaas.conf:/cmak/conf/kafka_client_jaas.conf"
+ - "/home/tsg/olap/galaxy/volumes/cmak/logs:/cmak/logs"
+ network_mode: "host"
diff --git a/MSH-PIC/cmak/kafka_client_jaas.conf b/MSH-PIC/cmak/kafka_client_jaas.conf
new file mode 100644
index 0000000..5f8cde7
--- /dev/null
+++ b/MSH-PIC/cmak/kafka_client_jaas.conf
@@ -0,0 +1,5 @@
+KafkaClient {
+ org.apache.kafka.common.security.plain.PlainLoginModule required
+ username="admin"
+ password="galaxy2019";
+};
diff --git a/MSH-PIC/deployment configuration/tsg/MSH_config.zip b/MSH-PIC/deployment configuration/tsg/MSH_config.zip
new file mode 100644
index 0000000..ff5b492
--- /dev/null
+++ b/MSH-PIC/deployment configuration/tsg/MSH_config.zip
Binary files differ
diff --git a/MSH-PIC/deployment configuration/tsg/components.yml b/MSH-PIC/deployment configuration/tsg/components.yml
new file mode 100644
index 0000000..7c4d1ba
--- /dev/null
+++ b/MSH-PIC/deployment configuration/tsg/components.yml
@@ -0,0 +1,217 @@
+#===========================Apache Zookeeper configuration===============================#
+#The zookeeper JVM heap size MB,The -Xmx value must be greater than or equal to 1024 considering the running capacity。
+zookeeper_java_opt: '-Xmx4096m -Xms1024m'
+
+#===========================Apache Druid configuration===============================#
+#Druid's MariaDB database name
+mariadb_druid_database: druid
+
+#The maxmium size of segment data,bytes
+#The sum of available disk space across these locations is set as the default value for property
+#druid.server.maxSize:which controls the total size of segment data that can be assigned by the Coordinator to a Historical.
+server_disk_maxsize: 5000000000000
+
+#The maxmium size of cache segment data,bytes
+#druid.segmentCache.locations specifies locations where segment data can be stored on the Historical.
+#A greater proportion of segments can be kept in memory, allowing for better query performance.
+segmentCache_max_size: 300000000000
+
+#========historical configuration========
+#JVM heap size,MB
+historical_mem: 512
+
+#The maximum size of direct memory,MB
+historical_MaxDirectMemorySize: 512
+
+#Buffer size,Bytes
+#druid.processing.buffer.sizeBytes,controls the size of the off-heap buffers allocated to the processing threads.
+#The TopN and GroupBy queries use these buffers to store intermediate computed results.
+historical_buffer_sizeBytes: 50000000
+
+#druid.processing.numMergeBuffers
+#The number of direct memory buffers available for merging query results.
+historical_numMergeBuffers: 4
+
+#druid.processing.numThreads
+#The number of processing threads to have available for parallel processing of segments.
+#It should generally be set to (number of cores - 1)
+historical_numThreads: 5
+
+#========middlemanager configuration========
+#A string of -X Java options to pass to the peon's(worker) JVM.
+#druid.indexer.runner.javaOpts,JVM configuration for each task execution
+middlemanager_runner_javaOpts: '-Xms512m -Xmx512m -XX:MaxDirectMemorySize=512m'
+
+#druid.processing.numMergeBuffers
+#The number of direct memory buffers available for merging query results.
+middlemanager_numMergeBuffers: 2
+
+#Buffer size,Bytes
+#druid.indexer.fork.property.druid.processing.buffer.sizeBytes,controls the size of the off-heap buffers allocated to the processing threads.
+##The TopN and GroupBy queries use these buffers to store intermediate computed results.
+middlemanager_buffer_sizeBytes: 20000000
+
+#druid.indexer.fork.property.druid.processing.numThreads
+#The number of processing threads to have available for parallel processing of segments.
+##It should generally be set to (number of cores - 1)
+middlemanager_numThreads: 1
+
+#========coordinator configuration========
+#coordinator-overlord JVM heap size,MB
+coordinator_mem: 512
+
+#========broker configuration========
+#JVM heap size,MB
+broker_mem: 512
+
+#The maximum size of direct memory,MB
+broker_MaxDirectMemorySize: 1024
+
+#Buffer size,Bytes
+##druid.processing.buffer.sizeBytes,controls the size of the off-heap buffers allocated to the processing threads.
+##The TopN and GroupBy queries use these buffers to store intermediate computed results.
+broker_sizeBytes: 50000000
+
+#druid.processing.numMergeBuffers
+#The number of direct memory buffers available for merging query results.
+broker_numMergeBuffers: 6
+
+#druid.processing.numThreads
+#The number of processing threads to have available for parallel processing of segments.
+#It should generally be set to (number of cores - 1)
+broker_numThreads: 1
+
+#===========================Hadoop configuration===============================#
+#---------------------------------HDFS config----------------------------#
+#namenode JVM heap size MB
+#The -Xmx value must be greater than or equal to 512 considering the running capacity.
+namenode_java_opt: '-Xmx10240m -Xms10240m'
+
+#datanode JVM heap size MB
+#The -Xmx value must be greater than or equal to 512 considering the running capacity.
+datanode_java_opt: '-Xmx5120m -Xms5120m'
+
+#journalnode JVM heap size MB
+#The -Xmx value must be greater than or equal to 1024 considering the running capacity.
+journal_java_opt: '-Xmx1024m -Xms1024m'
+
+#zkfc JVM heap size MB
+#The -Xmx value must be greater than or equal to 512 considering the running capacity.
+zkfc_java_opt: '-Xmx1024m -Xms1024m'
+
+#The number of server threads for the namenode.
+#dfs.namenode.handler.count
+#It should generally be set to 20*log2(cluster size)
+namenode_handlers: 30
+
+#The number of server threads for the datanode
+#dfs.datanode.handler.count
+datanode_handlers: 40
+
+#---------------------------------Yarn config----------------------------#
+#ResourceManager JVM heap size,Unit in megabytes(MB).
+resource_manager_java_opt: '-Xmx2048m -Xms1024m'
+
+#NodeManager JVM heap size,Unit in megabytes(MB).
+node_manager_java_opt: '-Xmx2048m -Xms1024m'
+
+#The NodeManager maximum amount of memory that can be allocated for a single container request. Unit in megabytes(MB).
+nodemanager_mem: 61440
+
+#The maximum number of virtual CPU cores that can be allocated for a single container request.
+#It's generally equal to the number of CPU cores.
+nodemanager_cores: 48
+
+#parameter is used to specify the maximum percentage of resources available for applications in the cluster.
+#It limits the resource percentage that can be used by the ApplicationMaster (AM).
+resource_scheduler_capacity_percent: 0.5
+
+#===========================HBase Configuration===============================#
+#Hmaster JVM heap size,MB
+hmaster_java_opt: '-Xmx2048m -Xms2048m'
+
+#Hregionserver JVM heap size,MB
+hregion_java_opt: '-Xmx20480m -Xms20480m -Xmn128m'
+
+#The number of server threads for the regionserver
+#It should generally be set to (number of cores - 1)
+#hbase.regionserver.handler.count
+regionserverhandlers: 40
+
+#Maximum HStoreFile size.
+#If any one of a column families' HStoreFiles has grown to exceed this value, the hosting HRegion is split in two.
+hregion_max_filesize: 10737418240
+
+#The HBase resource isolation function is used to group tables for storage.
+#open: 1 , close: 0
+hbase_enable_rsgroup: 0
+
+#===========================Apache Kafka Configuration===============================#
+#Kafka JVM heap size,MB
+kafka_java_opt: '-Xmx16384m -Xms4096m'
+
+#The minimum age of a log file to be eligible for deletion due to age
+#hours,default 168 hours
+log_reten_hours: 168
+
+#A size-based retention policy for logs. Segments are pruned from the log unless the remaining
+# segments drop below topic_max_bytes. Functions independently of log_reten_hours.
+topic_max_bytes: 10737418240
+
+#Record topic partition num, whose default value is the number of clusters.
+#If a value is greater than the default value and is an integer multiple of it, this value can be used as the record topic partition num.
+record_topic_partition: 24
+#===========================Mariadb configuration===============================
+#Buffer pool size,MB
+mariadb_innodb_buffer_pool_size: 2048
+
+#mariadb port
+galaxy_mariadb_port: 3306
+
+#Mariadb username
+galaxy_mariadb_username: root
+
+#===========================Spark configuration===============================#
+#spark worker JVM heap size,MB
+spark_worker_mem: 1024
+
+#The number of processing threads of worker
+spark_worker_cores: 30
+
+#===========================Nacos Configuration===============================#
+#A string of -X Java options to the Nacos
+nacos_java_opt: '-Xmx1024m -Xms1024m -Xmn256m'
+
+#Nacos's MaraiDB database name
+mariadb_nacos_database: nacos
+
+#===========================Flink Configuration================================#
+#JobManager JVM heap size,MB
+jobmanager_memory_size: 1024
+
+#taskmanager Network buffer size,MB
+taskmanager_memory_network_min: 64
+taskmanager_memory_network_max: 128
+
+#Taskmanager direct memory,MB
+taskmanager_memory_managed_size: 10
+
+#TaskManager JVM heap size,MB
+taskmanager_memory_size: 1024
+
+#Taskmanager JVM metaspace size,MB
+taskmanager_jvm_metaspace_size: 384
+
+#Taskmanager Framework Off-Heap,MB
+taskmanager_memory_framework_offheap_size: 128
+
+#The number of slots for taskmanager
+taskmanager_numberOfTaskSlots: 1
+
+#===========================Clickhouse Configuration================================#
+#Clickhouse node max memory use,KB
+clickhouse_max_memory: 150000000000
+
+#Number of threads performing background operations in the Mutiation table engine
+clickhouse_background_pool_size: 16
+
diff --git a/MSH-PIC/deployment configuration/tsg/config.yml b/MSH-PIC/deployment configuration/tsg/config.yml
new file mode 100644
index 0000000..184b5a1
--- /dev/null
+++ b/MSH-PIC/deployment configuration/tsg/config.yml
@@ -0,0 +1,98 @@
+#============================Galaxy OLAP Configuration===============================#
+#The default installation location
+install_path: /home/tsg/olap
+
+#The default data storage location,use storing application data,logs and configuration files
+data_path: /home/tsg/olap
+
+#Are you allowed to uninstall existed componets
+#Note:Used to repeatedly install or remove old version components.Only supports OLAP componets.
+allowed_unload: "yes"
+
+#Use commas (,) to separate the network segments that the firewall allows to access
+firewall_allow_network_segment: 192.168.10.0/24,192.168.20.0/24,192.168.30.0/24
+
+#Name of the currently deployed data center.
+#This parameter is used to configure the UI navigation domain name of the OLAP component.
+data_center_name: MSH
+
+#============================Central Management Configuration===============================#
+#Central Management MariaDB IP,used for services of galaxy-qgw-service and galaxy-report-service.
+cm_mariadb_host: 127.0.0.1
+
+#Central Management MariaDB Port
+cm_mariadb_port: 3306
+
+#Central Management MariaDB database name
+cm_mariadb_database: tsg-bifang
+
+#CM mariadb password
+#Sensitive data is encrypted since version 22.01, and clear text pin will no longer appear in configuration files.
+#This is default pin,If the pin change Contact the administrator for processing.
+cm_mariadb_pin: ENC(iW8ekP1SZC6v/7cfJKAqXXrjApJox+cH)
+
+#============================Token Configuration===============================#
+#Central Management Server Token,Each environment Token is different.
+#Requires communication with CM developers.
+#used for services of DOS-DETECTION-APPLICATION and IP-Locate-Library.
+cm_server_token: aa2bdec5518ad131f71944b13ce5c298&1&
+
+#Galaxy-hos-service Server Token,Each environment Token is different.
+#For the Token generation method, see STEP4 in OLAP Cluster Installation.
+#used for services of galaxy-hos-service/galaxy-job-service/packet_dump.
+hos_server_token: f5c5186ba4874182b33b9b2b2b6e3f77
+
+#Encrypt Galaxy-hos-service Server Token,Each environment Token is different.
+#For the Token generation method, see STEP4 in OLAP Cluster Installation.
+hos_servrt_token_encrypt: M8BbPaTywYw1/NyRY6TAVnqPzx7Nae92BVBcHoYi3pL9/o6kunHqpW3E50LO/XEL
+
+#============================Keepalived Configuration===============================#
+#=======Gateway Keepalived Configuration=======#
+#Specify the interface virtual IP address
+#It used to configure high availability for OLAP query engine
+#Only one IP address can be configured on the same network segment
+gateway_keepalive_host: 192.168.20.252
+
+#Specify the network interface to which the virtual address is assigned
+gateway_keepalive_interface: em1
+
+#The virtual router ID must be unique to each VRRP instance that you define
+#No special requirements, this value does not need to be modified
+gateway_keepalive_router_id: 61
+
+#=======HOS Keepalived Configuration=======#
+#Specify the interface virtual IP address
+#It used to configure high availability for HOS service
+#Only one IP address can be configured on the same network segment
+hos_keepalive_host: 192.168.20.251
+
+#Specify the network interface to which the virtual address is assigned
+hos_keepalive_interface: em1
+
+#The virtual router ID must be unique to each VRRP instance that you define
+#No special requirements, this value does not need to be modified
+hos_keepalive_router_id: 62
+
+#=======Mariadb Keepalived Configuration=======#
+#Specify the interface virtual IP address
+#It used to configure high availability for Mariadb service
+#Only one IP address can be configured on the same network segment
+mariadb_keepalive_host: 127.0.0.1
+
+#Specify the network interface to which the virtual address is assigned
+mariadb_keepalive_interface: em1
+
+#The virtual router ID must be unique to each VRRP instance that you define
+#No special requirements, this value does not need to be modified
+mariadb_keepalive_router_id: 63
+
+#============================Kafka Configuration===============================#
+#Kafka source brokers,kafkaip1:9094,kafkaip2:9094,kafkaip3:9094.....
+#The default source and sink servers are the same
+#It is used for getting data out of Kafka
+kafka_source_servers: 192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#Kafka sink brokers,kafkaip1:9094,kafkaip2:9094,kafkaip3:9094.....
+#The default source and sink servers are the same
+#It is used for sending data into kafka
+kafka_sink_servers: 192.168.20.223:9094,192.168.20.224:9094,192.168.20.225:9094
diff --git a/MSH-PIC/deployment configuration/tsg/hosts b/MSH-PIC/deployment configuration/tsg/hosts
new file mode 100644
index 0000000..fac59ab
--- /dev/null
+++ b/MSH-PIC/deployment configuration/tsg/hosts
@@ -0,0 +1,101 @@
+#=================================Galaxy OLAP deployment checklist======================================
+#Mariadb,master to master.
+#Maximum 2 servers.
+[mariadb]
+
+#Include components: galaxy-job-service
+#At least 2 servers
+[job]
+
+#Include components: galaxy-qgw-service
+#At least 2 servers
+[qgw]
+
+#Include components: galaxy-report-service
+#At least 2 servers
+[report]
+
+#galaxy-hos-service,galaxy-hos-keepalive
+#At least 2 servers
+[hos]
+192.168.20.193
+192.168.20.194
+
+#Include components:galaxy-gateway-keepalive,galaxy-gateway-nginx,chproxy
+#At least 2 servers,cluster servers>2,galaxy-gateway-keepalive on the first and second server.
+[gateway_load_balancer]
+
+#Include components:galaxy-hos-keepalive and galaxy-hos-nginx
+#At least 2 servers,cluster servers>2,galaxy-hos-keepalive on the first and second server.
+[hos_load_balancer]
+192.168.20.193
+192.168.20.194
+
+#ArangoDB,only support single server deployment
+[arangodb]
+
+#Alibaba Nacos
+#At least 3 servers
+[nacos]
+
+#Apache Zookeeper
+#At least 3 servers,and it is strongly recommended that you have an odd number of servers.
+[zookeeper]
+192.168.20.193
+192.168.20.194
+192.168.20.195
+
+#Apache Kafka
+#By default,install Kafka-Manager on the first server.
+[kafka]
+192.168.20.193
+192.168.20.194
+192.168.20.195
+
+#Apache Flink
+#By default,install master process on the first and second server.All servers install worker process.
+[flink]
+192.168.20.193
+192.168.20.194
+192.168.20.195
+
+#Apache Hadoop
+[hadoop]
+192.168.20.193
+192.168.20.194
+192.168.20.195
+
+#Apache HBase
+[hbase]
+192.168.20.193
+192.168.20.194
+192.168.20.195
+
+#Apache Spark
+#By default,cluster servers<=3,all servers install worker process.cluster servers>3,only install master process on first server.
+[spark]
+
+#Galaxy-Gohangout
+[gohangout]
+192.168.20.193
+192.168.20.194
+192.168.20.195
+
+#Apache Druid
+#By default,cluster servers>4,only install Query process on first and second server.
+[druid]
+
+#Clickhouse
+#By default,install Query process on first and second server.
+[clickhouse]
+192.168.20.193
+192.168.20.194
+192.168.20.195
+
+#DPI packet_dump
+[packet_dump]
+192.168.20.193
+
+#Superset,only support single server deployment
+[superset]
+
diff --git a/MSH-PIC/deployment configuration/tsg/services.yml b/MSH-PIC/deployment configuration/tsg/services.yml
new file mode 100644
index 0000000..aae93d1
--- /dev/null
+++ b/MSH-PIC/deployment configuration/tsg/services.yml
@@ -0,0 +1,39 @@
+#=================Galaxy-hos-service Configuration=================#
+#JVM heap size
+hos_java_opts: "-Xmx10240m -Xms10240m -Xmn512m"
+
+#Whether to enable SSL.
+#open: 1 , close: 0
+hos_enable_ssl: 0
+
+#Download files quickly,Used for HBase with a memory larger than 20GB.
+#open: 1 , close: 0
+hos_quick_download: 0
+
+#Nacos namespace
+hos_config_namespace: MSH
+
+#=================Galaxy-qgw-service Configuration=================#
+#JVM heap size
+qgw_java_opts: "-Xmx5120m -Xms1024m"
+
+#Nacos namespace
+qgw_config_namespace: MSH
+
+#=================Galaxy-job-service Configuration=================#
+#JVM heap size
+job_java_opts: "-Xmx2048m -Xms1024m"
+
+#Galaxy-job-service's MariaDB database name
+mariadb_job_database: xxl_job
+
+#Nacos namespace
+job_config_namespace: MSH
+
+#================Galaxy-report-service Configuration=================#
+#JVM heap size
+report_java_opts: "-Xmx1024m -Xms1024m"
+
+#Nacos namespace
+report_config_namespace: MSH
+
diff --git a/MSH-PIC/flink/bin/bash-java-utils.jar b/MSH-PIC/flink/bin/bash-java-utils.jar
new file mode 100644
index 0000000..5b2e369
--- /dev/null
+++ b/MSH-PIC/flink/bin/bash-java-utils.jar
Binary files differ
diff --git a/MSH-PIC/flink/bin/config.sh b/MSH-PIC/flink/bin/config.sh
new file mode 100644
index 0000000..208b2d1
--- /dev/null
+++ b/MSH-PIC/flink/bin/config.sh
@@ -0,0 +1,560 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+constructFlinkClassPath() {
+ local FLINK_DIST
+ local FLINK_CLASSPATH
+
+ while read -d '' -r jarfile ; do
+ if [[ "$jarfile" =~ .*/flink-dist[^/]*.jar$ ]]; then
+ FLINK_DIST="$FLINK_DIST":"$jarfile"
+ elif [[ "$FLINK_CLASSPATH" == "" ]]; then
+ FLINK_CLASSPATH="$jarfile";
+ else
+ FLINK_CLASSPATH="$FLINK_CLASSPATH":"$jarfile"
+ fi
+ done < <(find "$FLINK_LIB_DIR" ! -type d -name '*.jar' -print0 | sort -z)
+
+ if [[ "$FLINK_DIST" == "" ]]; then
+ # write error message to stderr since stdout is stored as the classpath
+ (>&2 echo "[ERROR] Flink distribution jar not found in $FLINK_LIB_DIR.")
+
+ # exit function with empty classpath to force process failure
+ exit 1
+ fi
+
+ echo "$FLINK_CLASSPATH""$FLINK_DIST"
+}
+
+findFlinkDistJar() {
+ local FLINK_DIST="`find "$FLINK_LIB_DIR" -name 'flink-dist*.jar'`"
+
+ if [[ "$FLINK_DIST" == "" ]]; then
+ # write error message to stderr since stdout is stored as the classpath
+ (>&2 echo "[ERROR] Flink distribution jar not found in $FLINK_LIB_DIR.")
+
+ # exit function with empty classpath to force process failure
+ exit 1
+ fi
+
+ echo "$FLINK_DIST"
+}
+
+# These are used to mangle paths that are passed to java when using
+# cygwin. Cygwin paths are like linux paths, i.e. /path/to/somewhere
+# but the windows java version expects them in Windows Format, i.e. C:\bla\blub.
+# "cygpath" can do the conversion.
+manglePath() {
+ UNAME=$(uname -s)
+ if [ "${UNAME:0:6}" == "CYGWIN" ]; then
+ echo `cygpath -w "$1"`
+ else
+ echo $1
+ fi
+}
+
+manglePathList() {
+ UNAME=$(uname -s)
+ # a path list, for example a java classpath
+ if [ "${UNAME:0:6}" == "CYGWIN" ]; then
+ echo `cygpath -wp "$1"`
+ else
+ echo $1
+ fi
+}
+
+# Looks up a config value by key from a simple YAML-style key-value map.
+# $1: key to look up
+# $2: default value to return if key does not exist
+# $3: config file to read from
+readFromConfig() {
+ local key=$1
+ local defaultValue=$2
+ local configFile=$3
+
+ # first extract the value with the given key (1st sed), then trim the result (2nd sed)
+ # if a key exists multiple times, take the "last" one (tail)
+ local value=`sed -n "s/^[ ]*${key}[ ]*: \([^#]*\).*$/\1/p" "${configFile}" | sed "s/^ *//;s/ *$//" | tail -n 1`
+
+ [ -z "$value" ] && echo "$defaultValue" || echo "$value"
+}
+
+########################################################################################################################
+# DEFAULT CONFIG VALUES: These values will be used when nothing has been specified in conf/flink-conf.yaml
+# -or- the respective environment variables are not set.
+########################################################################################################################
+
+
+# WARNING !!! , these values are only used if there is nothing else is specified in
+# conf/flink-conf.yaml
+
+DEFAULT_ENV_PID_DIR="$(cd "`dirname "$0"`"/..; pwd)/tmp" # Directory to store *.pid files to
+DEFAULT_ENV_LOG_MAX=10 # Maximum number of old log files to keep
+DEFAULT_ENV_JAVA_OPTS="" # Optional JVM args
+DEFAULT_ENV_JAVA_OPTS_JM="" # Optional JVM args (JobManager)
+DEFAULT_ENV_JAVA_OPTS_TM="" # Optional JVM args (TaskManager)
+DEFAULT_ENV_JAVA_OPTS_HS="" # Optional JVM args (HistoryServer)
+DEFAULT_ENV_JAVA_OPTS_CLI="" # Optional JVM args (Client)
+DEFAULT_ENV_SSH_OPTS="" # Optional SSH parameters running in cluster mode
+DEFAULT_YARN_CONF_DIR="" # YARN Configuration Directory, if necessary
+DEFAULT_HADOOP_CONF_DIR="" # Hadoop Configuration Directory, if necessary
+DEFAULT_HBASE_CONF_DIR="" # HBase Configuration Directory, if necessary
+
+########################################################################################################################
+# CONFIG KEYS: The default values can be overwritten by the following keys in conf/flink-conf.yaml
+########################################################################################################################
+
+KEY_TASKM_COMPUTE_NUMA="taskmanager.compute.numa"
+
+KEY_ENV_PID_DIR="env.pid.dir"
+KEY_ENV_LOG_DIR="env.log.dir"
+KEY_ENV_LOG_MAX="env.log.max"
+KEY_ENV_YARN_CONF_DIR="env.yarn.conf.dir"
+KEY_ENV_HADOOP_CONF_DIR="env.hadoop.conf.dir"
+KEY_ENV_HBASE_CONF_DIR="env.hbase.conf.dir"
+KEY_ENV_JAVA_HOME="env.java.home"
+KEY_ENV_JAVA_OPTS="env.java.opts"
+KEY_ENV_JAVA_OPTS_JM="env.java.opts.jobmanager"
+KEY_ENV_JAVA_OPTS_TM="env.java.opts.taskmanager"
+KEY_ENV_JAVA_OPTS_HS="env.java.opts.historyserver"
+KEY_ENV_JAVA_OPTS_CLI="env.java.opts.client"
+KEY_ENV_SSH_OPTS="env.ssh.opts"
+KEY_HIGH_AVAILABILITY="high-availability"
+KEY_ZK_HEAP_MB="zookeeper.heap.mb"
+
+########################################################################################################################
+# PATHS AND CONFIG
+########################################################################################################################
+
+target="$0"
+# For the case, the executable has been directly symlinked, figure out
+# the correct bin path by following its symlink up to an upper bound.
+# Note: we can't use the readlink utility here if we want to be POSIX
+# compatible.
+iteration=0
+while [ -L "$target" ]; do
+ if [ "$iteration" -gt 100 ]; then
+ echo "Cannot resolve path: You have a cyclic symlink in $target."
+ break
+ fi
+ ls=`ls -ld -- "$target"`
+ target=`expr "$ls" : '.* -> \(.*\)$'`
+ iteration=$((iteration + 1))
+done
+
+# Convert relative path to absolute path and resolve directory symlinks
+bin=`dirname "$target"`
+SYMLINK_RESOLVED_BIN=`cd "$bin"; pwd -P`
+
+# Define the main directory of the flink installation
+# If config.sh is called by pyflink-shell.sh in python bin directory(pip installed), then do not need to set the FLINK_HOME here.
+if [ -z "$_FLINK_HOME_DETERMINED" ]; then
+ FLINK_HOME=`dirname "$SYMLINK_RESOLVED_BIN"`
+fi
+if [ -z "$FLINK_LIB_DIR" ]; then FLINK_LIB_DIR=$FLINK_HOME/lib; fi
+if [ -z "$FLINK_PLUGINS_DIR" ]; then FLINK_PLUGINS_DIR=$FLINK_HOME/plugins; fi
+if [ -z "$FLINK_OPT_DIR" ]; then FLINK_OPT_DIR=$FLINK_HOME/opt; fi
+
+
+# These need to be mangled because they are directly passed to java.
+# The above lib path is used by the shell script to retrieve jars in a
+# directory, so it needs to be unmangled.
+FLINK_HOME_DIR_MANGLED=`manglePath "$FLINK_HOME"`
+if [ -z "$FLINK_CONF_DIR" ]; then FLINK_CONF_DIR=$FLINK_HOME_DIR_MANGLED/conf; fi
+FLINK_BIN_DIR=$FLINK_HOME_DIR_MANGLED/bin
+DEFAULT_FLINK_LOG_DIR=$FLINK_HOME_DIR_MANGLED/log
+FLINK_CONF_FILE="flink-conf.yaml"
+YAML_CONF=${FLINK_CONF_DIR}/${FLINK_CONF_FILE}
+
+### Exported environment variables ###
+export FLINK_CONF_DIR
+export FLINK_BIN_DIR
+export FLINK_PLUGINS_DIR
+# export /lib dir to access it during deployment of the Yarn staging files
+export FLINK_LIB_DIR
+# export /opt dir to access it for the SQL client
+export FLINK_OPT_DIR
+
+########################################################################################################################
+# ENVIRONMENT VARIABLES
+########################################################################################################################
+
+# read JAVA_HOME from config with no default value
+MY_JAVA_HOME=$(readFromConfig ${KEY_ENV_JAVA_HOME} "" "${YAML_CONF}")
+# check if config specified JAVA_HOME
+if [ -z "${MY_JAVA_HOME}" ]; then
+ # config did not specify JAVA_HOME. Use system JAVA_HOME
+ MY_JAVA_HOME="${JAVA_HOME}"
+fi
+# check if we have a valid JAVA_HOME and if java is not available
+if [ -z "${MY_JAVA_HOME}" ] && ! type java > /dev/null 2> /dev/null; then
+ echo "Please specify JAVA_HOME. Either in Flink config ./conf/flink-conf.yaml or as system-wide JAVA_HOME."
+ exit 1
+else
+ JAVA_HOME="${MY_JAVA_HOME}"
+fi
+
+UNAME=$(uname -s)
+if [ "${UNAME:0:6}" == "CYGWIN" ]; then
+ JAVA_RUN=java
+else
+ if [[ -d "$JAVA_HOME" ]]; then
+ JAVA_RUN="$JAVA_HOME"/bin/java
+ else
+ JAVA_RUN=java
+ fi
+fi
+
+# Define HOSTNAME if it is not already set
+if [ -z "${HOSTNAME}" ]; then
+ HOSTNAME=`hostname`
+fi
+
+IS_NUMBER="^[0-9]+$"
+
+# Verify that NUMA tooling is available
+command -v numactl >/dev/null 2>&1
+if [[ $? -ne 0 ]]; then
+ FLINK_TM_COMPUTE_NUMA="false"
+else
+ # Define FLINK_TM_COMPUTE_NUMA if it is not already set
+ if [ -z "${FLINK_TM_COMPUTE_NUMA}" ]; then
+ FLINK_TM_COMPUTE_NUMA=$(readFromConfig ${KEY_TASKM_COMPUTE_NUMA} "false" "${YAML_CONF}")
+ fi
+fi
+
+if [ -z "${MAX_LOG_FILE_NUMBER}" ]; then
+ MAX_LOG_FILE_NUMBER=$(readFromConfig ${KEY_ENV_LOG_MAX} ${DEFAULT_ENV_LOG_MAX} "${YAML_CONF}")
+ export MAX_LOG_FILE_NUMBER
+fi
+
+if [ -z "${FLINK_LOG_DIR}" ]; then
+ FLINK_LOG_DIR=$(readFromConfig ${KEY_ENV_LOG_DIR} "${DEFAULT_FLINK_LOG_DIR}" "${YAML_CONF}")
+fi
+
+if [ -z "${YARN_CONF_DIR}" ]; then
+ YARN_CONF_DIR=$(readFromConfig ${KEY_ENV_YARN_CONF_DIR} "${DEFAULT_YARN_CONF_DIR}" "${YAML_CONF}")
+fi
+
+if [ -z "${HADOOP_CONF_DIR}" ]; then
+ HADOOP_CONF_DIR=$(readFromConfig ${KEY_ENV_HADOOP_CONF_DIR} "${DEFAULT_HADOOP_CONF_DIR}" "${YAML_CONF}")
+fi
+
+if [ -z "${HBASE_CONF_DIR}" ]; then
+ HBASE_CONF_DIR=$(readFromConfig ${KEY_ENV_HBASE_CONF_DIR} "${DEFAULT_HBASE_CONF_DIR}" "${YAML_CONF}")
+fi
+
+if [ -z "${FLINK_PID_DIR}" ]; then
+ FLINK_PID_DIR=$(readFromConfig ${KEY_ENV_PID_DIR} "${DEFAULT_ENV_PID_DIR}" "${YAML_CONF}")
+fi
+
+if [ -z "${FLINK_ENV_JAVA_OPTS}" ]; then
+ FLINK_ENV_JAVA_OPTS=$(readFromConfig ${KEY_ENV_JAVA_OPTS} "${DEFAULT_ENV_JAVA_OPTS}" "${YAML_CONF}")
+
+ # Remove leading and ending double quotes (if present) of value
+ FLINK_ENV_JAVA_OPTS="$( echo "${FLINK_ENV_JAVA_OPTS}" | sed -e 's/^"//' -e 's/"$//' )"
+fi
+
+if [ -z "${FLINK_ENV_JAVA_OPTS_JM}" ]; then
+ FLINK_ENV_JAVA_OPTS_JM=$(readFromConfig ${KEY_ENV_JAVA_OPTS_JM} "${DEFAULT_ENV_JAVA_OPTS_JM}" "${YAML_CONF}")
+ # Remove leading and ending double quotes (if present) of value
+ FLINK_ENV_JAVA_OPTS_JM="$( echo "${FLINK_ENV_JAVA_OPTS_JM}" | sed -e 's/^"//' -e 's/"$//' )"
+fi
+
+if [ -z "${FLINK_ENV_JAVA_OPTS_TM}" ]; then
+ FLINK_ENV_JAVA_OPTS_TM=$(readFromConfig ${KEY_ENV_JAVA_OPTS_TM} "${DEFAULT_ENV_JAVA_OPTS_TM}" "${YAML_CONF}")
+ # Remove leading and ending double quotes (if present) of value
+ FLINK_ENV_JAVA_OPTS_TM="$( echo "${FLINK_ENV_JAVA_OPTS_TM}" | sed -e 's/^"//' -e 's/"$//' )"
+fi
+
+if [ -z "${FLINK_ENV_JAVA_OPTS_HS}" ]; then
+ FLINK_ENV_JAVA_OPTS_HS=$(readFromConfig ${KEY_ENV_JAVA_OPTS_HS} "${DEFAULT_ENV_JAVA_OPTS_HS}" "${YAML_CONF}")
+ # Remove leading and ending double quotes (if present) of value
+ FLINK_ENV_JAVA_OPTS_HS="$( echo "${FLINK_ENV_JAVA_OPTS_HS}" | sed -e 's/^"//' -e 's/"$//' )"
+fi
+
+if [ -z "${FLINK_ENV_JAVA_OPTS_CLI}" ]; then
+ FLINK_ENV_JAVA_OPTS_CLI=$(readFromConfig ${KEY_ENV_JAVA_OPTS_CLI} "${DEFAULT_ENV_JAVA_OPTS_CLI}" "${YAML_CONF}")
+ # Remove leading and ending double quotes (if present) of value
+ FLINK_ENV_JAVA_OPTS_CLI="$( echo "${FLINK_ENV_JAVA_OPTS_CLI}" | sed -e 's/^"//' -e 's/"$//' )"
+fi
+
+if [ -z "${FLINK_SSH_OPTS}" ]; then
+ FLINK_SSH_OPTS=$(readFromConfig ${KEY_ENV_SSH_OPTS} "${DEFAULT_ENV_SSH_OPTS}" "${YAML_CONF}")
+fi
+
+# Define ZK_HEAP if it is not already set
+if [ -z "${ZK_HEAP}" ]; then
+ ZK_HEAP=$(readFromConfig ${KEY_ZK_HEAP_MB} 0 "${YAML_CONF}")
+fi
+
+# High availability
+if [ -z "${HIGH_AVAILABILITY}" ]; then
+ HIGH_AVAILABILITY=$(readFromConfig ${KEY_HIGH_AVAILABILITY} "" "${YAML_CONF}")
+ if [ -z "${HIGH_AVAILABILITY}" ]; then
+ # Try deprecated value
+ DEPRECATED_HA=$(readFromConfig "recovery.mode" "" "${YAML_CONF}")
+ if [ -z "${DEPRECATED_HA}" ]; then
+ HIGH_AVAILABILITY="none"
+ elif [ ${DEPRECATED_HA} == "standalone" ]; then
+ # Standalone is now 'none'
+ HIGH_AVAILABILITY="none"
+ else
+ HIGH_AVAILABILITY=${DEPRECATED_HA}
+ fi
+ fi
+fi
+
+# Arguments for the JVM. Used for job and task manager JVMs.
+# DO NOT USE FOR MEMORY SETTINGS! Use conf/flink-conf.yaml with keys
+# JobManagerOptions#TOTAL_PROCESS_MEMORY and TaskManagerOptions#TOTAL_PROCESS_MEMORY for that!
+if [ -z "${JVM_ARGS}" ]; then
+ JVM_ARGS=""
+fi
+
+# Check if deprecated HADOOP_HOME is set, and specify config path to HADOOP_CONF_DIR if it's empty.
+if [ -z "$HADOOP_CONF_DIR" ]; then
+ if [ -n "$HADOOP_HOME" ]; then
+ # HADOOP_HOME is set. Check if its a Hadoop 1.x or 2.x HADOOP_HOME path
+ if [ -d "$HADOOP_HOME/conf" ]; then
+ # It's Hadoop 1.x
+ HADOOP_CONF_DIR="$HADOOP_HOME/conf"
+ fi
+ if [ -d "$HADOOP_HOME/etc/hadoop" ]; then
+ # It's Hadoop 2.2+
+ HADOOP_CONF_DIR="$HADOOP_HOME/etc/hadoop"
+ fi
+ fi
+fi
+
+# if neither HADOOP_CONF_DIR nor HADOOP_CLASSPATH are set, use some common default (if available)
+if [ -z "$HADOOP_CONF_DIR" ] && [ -z "$HADOOP_CLASSPATH" ]; then
+ if [ -d "/etc/hadoop/conf" ]; then
+ echo "Setting HADOOP_CONF_DIR=/etc/hadoop/conf because no HADOOP_CONF_DIR or HADOOP_CLASSPATH was set."
+ HADOOP_CONF_DIR="/etc/hadoop/conf"
+ fi
+fi
+
+# Check if deprecated HBASE_HOME is set, and specify config path to HBASE_CONF_DIR if it's empty.
+if [ -z "$HBASE_CONF_DIR" ]; then
+ if [ -n "$HBASE_HOME" ]; then
+ # HBASE_HOME is set.
+ if [ -d "$HBASE_HOME/conf" ]; then
+ HBASE_CONF_DIR="$HBASE_HOME/conf"
+ fi
+ fi
+fi
+
+# try and set HBASE_CONF_DIR to some common default if it's not set
+if [ -z "$HBASE_CONF_DIR" ]; then
+ if [ -d "/etc/hbase/conf" ]; then
+ echo "Setting HBASE_CONF_DIR=/etc/hbase/conf because no HBASE_CONF_DIR was set."
+ HBASE_CONF_DIR="/etc/hbase/conf"
+ fi
+fi
+
+INTERNAL_HADOOP_CLASSPATHS="${HADOOP_CLASSPATH}:${HADOOP_CONF_DIR}:${YARN_CONF_DIR}"
+
+if [ -n "${HBASE_CONF_DIR}" ]; then
+ INTERNAL_HADOOP_CLASSPATHS="${INTERNAL_HADOOP_CLASSPATHS}:${HBASE_CONF_DIR}"
+fi
+
+# Auxilliary function which extracts the name of host from a line which
+# also potentially includes topology information and the taskManager type
+extractHostName() {
+ # handle comments: extract first part of string (before first # character)
+ WORKER=`echo $1 | cut -d'#' -f 1`
+
+ # Extract the hostname from the network hierarchy
+ if [[ "$WORKER" =~ ^.*/([0-9a-zA-Z.-]+)$ ]]; then
+ WORKER=${BASH_REMATCH[1]}
+ fi
+
+ echo $WORKER
+}
+
+readMasters() {
+ MASTERS_FILE="${FLINK_CONF_DIR}/masters"
+
+ if [[ ! -f "${MASTERS_FILE}" ]]; then
+ echo "No masters file. Please specify masters in 'conf/masters'."
+ exit 1
+ fi
+
+ MASTERS=()
+ WEBUIPORTS=()
+
+ MASTERS_ALL_LOCALHOST=true
+ GOON=true
+ while $GOON; do
+ read line || GOON=false
+ HOSTWEBUIPORT=$( extractHostName $line)
+
+ if [ -n "$HOSTWEBUIPORT" ]; then
+ HOST=$(echo $HOSTWEBUIPORT | cut -f1 -d:)
+ WEBUIPORT=$(echo $HOSTWEBUIPORT | cut -s -f2 -d:)
+ MASTERS+=(${HOST})
+
+ if [ -z "$WEBUIPORT" ]; then
+ WEBUIPORTS+=(0)
+ else
+ WEBUIPORTS+=(${WEBUIPORT})
+ fi
+
+ if [ "${HOST}" != "localhost" ] && [ "${HOST}" != "127.0.0.1" ] ; then
+ MASTERS_ALL_LOCALHOST=false
+ fi
+ fi
+ done < "$MASTERS_FILE"
+}
+
+readWorkers() {
+ WORKERS_FILE="${FLINK_CONF_DIR}/workers"
+
+ if [[ ! -f "$WORKERS_FILE" ]]; then
+ echo "No workers file. Please specify workers in 'conf/workers'."
+ exit 1
+ fi
+
+ WORKERS=()
+
+ WORKERS_ALL_LOCALHOST=true
+ GOON=true
+ while $GOON; do
+ read line || GOON=false
+ HOST=$( extractHostName $line)
+ if [ -n "$HOST" ] ; then
+ WORKERS+=(${HOST})
+ if [ "${HOST}" != "localhost" ] && [ "${HOST}" != "127.0.0.1" ] ; then
+ WORKERS_ALL_LOCALHOST=false
+ fi
+ fi
+ done < "$WORKERS_FILE"
+}
+
+# starts or stops TMs on all workers
+# TMWorkers start|stop
+TMWorkers() {
+ CMD=$1
+
+ readWorkers
+
+ if [ ${WORKERS_ALL_LOCALHOST} = true ] ; then
+ # all-local setup
+ for worker in ${WORKERS[@]}; do
+ "${FLINK_BIN_DIR}"/taskmanager.sh "${CMD}"
+ done
+ else
+ # non-local setup
+ # start/stop TaskManager instance(s) using pdsh (Parallel Distributed Shell) when available
+ command -v pdsh >/dev/null 2>&1
+ if [[ $? -ne 0 ]]; then
+ for worker in ${WORKERS[@]}; do
+ ssh -n $FLINK_SSH_OPTS $worker -- "nohup /bin/bash -l \"${FLINK_BIN_DIR}/taskmanager.sh\" \"${CMD}\" &"
+ done
+ else
+ PDSH_SSH_ARGS="" PDSH_SSH_ARGS_APPEND=$FLINK_SSH_OPTS pdsh -w $(IFS=, ; echo "${WORKERS[*]}") \
+ "nohup /bin/bash -l \"${FLINK_BIN_DIR}/taskmanager.sh\" \"${CMD}\""
+ fi
+ fi
+}
+
+runBashJavaUtilsCmd() {
+ local cmd=$1
+ local conf_dir=$2
+ local class_path=$3
+ local dynamic_args=${@:4}
+ class_path=`manglePathList "${class_path}"`
+
+ local output=`"${JAVA_RUN}" -classpath "${class_path}" org.apache.flink.runtime.util.bash.BashJavaUtils ${cmd} --configDir "${conf_dir}" $dynamic_args 2>&1 | tail -n 1000`
+ if [[ $? -ne 0 ]]; then
+ echo "[ERROR] Cannot run BashJavaUtils to execute command ${cmd}." 1>&2
+ # Print the output in case the user redirect the log to console.
+ echo "$output" 1>&2
+ exit 1
+ fi
+
+ echo "$output"
+}
+
+extractExecutionResults() {
+ local output="$1"
+ local expected_lines="$2"
+ local EXECUTION_PREFIX="BASH_JAVA_UTILS_EXEC_RESULT:"
+ local execution_results
+ local num_lines
+
+ execution_results=$(echo "${output}" | grep ${EXECUTION_PREFIX})
+ num_lines=$(echo "${execution_results}" | wc -l)
+ # explicit check for empty result, becuase if execution_results is empty, then wc returns 1
+ if [[ -z ${execution_results} ]]; then
+ echo "[ERROR] The execution result is empty." 1>&2
+ exit 1
+ fi
+ if [[ ${num_lines} -ne ${expected_lines} ]]; then
+ echo "[ERROR] The execution results has unexpected number of lines, expected: ${expected_lines}, actual: ${num_lines}." 1>&2
+ echo "[ERROR] An execution result line is expected following the prefix '${EXECUTION_PREFIX}'" 1>&2
+ echo "$output" 1>&2
+ exit 1
+ fi
+
+ echo "${execution_results//${EXECUTION_PREFIX}/}"
+}
+
+extractLoggingOutputs() {
+ local output="$1"
+ local EXECUTION_PREFIX="BASH_JAVA_UTILS_EXEC_RESULT:"
+
+ echo "${output}" | grep -v ${EXECUTION_PREFIX}
+}
+
+parseResourceParamsAndExportLogs() {
+ local cmd=$1
+ java_utils_output=$(runBashJavaUtilsCmd ${cmd} "${FLINK_CONF_DIR}" "${FLINK_BIN_DIR}/bash-java-utils.jar:$(findFlinkDistJar)" "$@")
+ logging_output=$(extractLoggingOutputs "${java_utils_output}")
+ params_output=$(extractExecutionResults "${java_utils_output}" 2)
+
+ if [[ $? -ne 0 ]]; then
+ echo "[ERROR] Could not get JVM parameters and dynamic configurations properly."
+ echo "[ERROR] Raw output from BashJavaUtils:"
+ echo "$java_utils_output"
+ exit 1
+ fi
+
+ jvm_params=$(echo "${params_output}" | head -n1)
+ export JVM_ARGS="${JVM_ARGS} ${jvm_params}"
+ export DYNAMIC_PARAMETERS=$(IFS=" " echo "${params_output}" | tail -n1)
+
+ export FLINK_INHERITED_LOGS="
+$FLINK_INHERITED_LOGS
+
+RESOURCE_PARAMS extraction logs:
+jvm_params: $jvm_params
+dynamic_configs: $DYNAMIC_PARAMETERS
+logs: $logging_output
+"
+}
+
+parseJmArgsAndExportLogs() {
+ parseResourceParamsAndExportLogs GET_JM_RESOURCE_PARAMS
+}
+
+parseTmArgsAndExportLogs() {
+ parseResourceParamsAndExportLogs GET_TM_RESOURCE_PARAMS
+}
diff --git a/MSH-PIC/flink/bin/find-flink-home.sh b/MSH-PIC/flink/bin/find-flink-home.sh
new file mode 100644
index 0000000..e0fe95f
--- /dev/null
+++ b/MSH-PIC/flink/bin/find-flink-home.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+CURRENT_DIR="$( cd "$(dirname "$0")" ; pwd -P )"
+FIND_FLINK_HOME_PYTHON_SCRIPT="$CURRENT_DIR/find_flink_home.py"
+
+if [ ! -f "$FIND_FLINK_HOME_PYTHON_SCRIPT" ]; then
+ export FLINK_HOME="$( cd "$CURRENT_DIR"/.. ; pwd -P )"
+else
+ PYFLINK_PYTHON="${PYFLINK_PYTHON:-"python"}"
+ export FLINK_HOME=$("$FIND_FLINK_HOME_PYTHON_SCRIPT")
+fi
diff --git a/MSH-PIC/flink/bin/flink b/MSH-PIC/flink/bin/flink
new file mode 100644
index 0000000..3413463
--- /dev/null
+++ b/MSH-PIC/flink/bin/flink
@@ -0,0 +1,55 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+target="$0"
+# For the case, the executable has been directly symlinked, figure out
+# the correct bin path by following its symlink up to an upper bound.
+# Note: we can't use the readlink utility here if we want to be POSIX
+# compatible.
+iteration=0
+while [ -L "$target" ]; do
+ if [ "$iteration" -gt 100 ]; then
+ echo "Cannot resolve path: You have a cyclic symlink in $target."
+ break
+ fi
+ ls=`ls -ld -- "$target"`
+ target=`expr "$ls" : '.* -> \(.*\)$'`
+ iteration=$((iteration + 1))
+done
+
+# Convert relative path to absolute path
+bin=`dirname "$target"`
+
+# get flink config
+. "$bin"/config.sh
+
+if [ "$FLINK_IDENT_STRING" = "" ]; then
+ FLINK_IDENT_STRING="$USER"
+fi
+
+CC_CLASSPATH=`constructFlinkClassPath`
+
+log=$FLINK_LOG_DIR/flink-$FLINK_IDENT_STRING-client-$HOSTNAME.log
+log_setting=(-Dlog.file="$log" -Dlog4j.configuration=file:"$FLINK_CONF_DIR"/log4j-cli.properties -Dlog4j.configurationFile=file:"$FLINK_CONF_DIR"/log4j-cli.properties -Dlogback.configurationFile=file:"$FLINK_CONF_DIR"/logback.xml)
+
+# Add Client-specific JVM options
+FLINK_ENV_JAVA_OPTS="${FLINK_ENV_JAVA_OPTS} ${FLINK_ENV_JAVA_OPTS_CLI}"
+
+# Add HADOOP_CLASSPATH to allow the usage of Hadoop file systems
+exec "${JAVA_RUN}" $JVM_ARGS $FLINK_ENV_JAVA_OPTS "${log_setting[@]}" -classpath "`manglePathList "$CC_CLASSPATH:$INTERNAL_HADOOP_CLASSPATHS"`" org.apache.flink.client.cli.CliFrontend "$@"
diff --git a/MSH-PIC/flink/bin/flink-console.sh b/MSH-PIC/flink/bin/flink-console.sh
new file mode 100644
index 0000000..6ebe2ac
--- /dev/null
+++ b/MSH-PIC/flink/bin/flink-console.sh
@@ -0,0 +1,111 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+# Start a Flink service as a console application. Must be stopped with Ctrl-C
+# or with SIGTERM by kill or the controlling process.
+USAGE="Usage: flink-console.sh (taskexecutor|zookeeper|historyserver|standalonesession|standalonejob|kubernetes-session|kubernetes-application|kubernetes-taskmanager) [args]"
+
+SERVICE=$1
+ARGS=("${@:2}") # get remaining arguments as array
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/config.sh
+
+case $SERVICE in
+ (taskexecutor)
+ CLASS_TO_RUN=org.apache.flink.runtime.taskexecutor.TaskManagerRunner
+ ;;
+
+ (historyserver)
+ CLASS_TO_RUN=org.apache.flink.runtime.webmonitor.history.HistoryServer
+ ;;
+
+ (zookeeper)
+ CLASS_TO_RUN=org.apache.flink.runtime.zookeeper.FlinkZooKeeperQuorumPeer
+ ;;
+
+ (standalonesession)
+ CLASS_TO_RUN=org.apache.flink.runtime.entrypoint.StandaloneSessionClusterEntrypoint
+ ;;
+
+ (standalonejob)
+ CLASS_TO_RUN=org.apache.flink.container.entrypoint.StandaloneApplicationClusterEntryPoint
+ ;;
+
+ (kubernetes-session)
+ CLASS_TO_RUN=org.apache.flink.kubernetes.entrypoint.KubernetesSessionClusterEntrypoint
+ ;;
+
+ (kubernetes-application)
+ CLASS_TO_RUN=org.apache.flink.kubernetes.entrypoint.KubernetesApplicationClusterEntrypoint
+ ;;
+
+ (kubernetes-taskmanager)
+ CLASS_TO_RUN=org.apache.flink.kubernetes.taskmanager.KubernetesTaskExecutorRunner
+ ;;
+
+ (*)
+ echo "Unknown service '${SERVICE}'. $USAGE."
+ exit 1
+ ;;
+esac
+
+FLINK_TM_CLASSPATH=`constructFlinkClassPath`
+
+if [ "$FLINK_IDENT_STRING" = "" ]; then
+ FLINK_IDENT_STRING="$USER"
+fi
+
+pid=$FLINK_PID_DIR/flink-$FLINK_IDENT_STRING-$SERVICE.pid
+mkdir -p "$FLINK_PID_DIR"
+# The lock needs to be released after use because this script is started foreground
+command -v flock >/dev/null 2>&1
+flock_exist=$?
+if [[ ${flock_exist} -eq 0 ]]; then
+ exec 200<"$FLINK_PID_DIR"
+ flock 200
+fi
+# Remove the pid file when all the processes are dead
+if [ -f "$pid" ]; then
+ all_dead=0
+ while read each_pid; do
+ # Check whether the process is still running
+ kill -0 $each_pid > /dev/null 2>&1
+ [[ $? -eq 0 ]] && all_dead=1
+ done < "$pid"
+ [ ${all_dead} -eq 0 ] && rm $pid
+fi
+id=$([ -f "$pid" ] && echo $(wc -l < "$pid") || echo "0")
+
+FLINK_LOG_PREFIX="${FLINK_LOG_DIR}/flink-${FLINK_IDENT_STRING}-${SERVICE}-${id}-${HOSTNAME}"
+log="${FLINK_LOG_PREFIX}.log"
+
+log_setting=("-Dlog.file=${log}" "-Dlog4j.configuration=file:${FLINK_CONF_DIR}/log4j-console.properties" "-Dlog4j.configurationFile=file:${FLINK_CONF_DIR}/log4j-console.properties" "-Dlogback.configurationFile=file:${FLINK_CONF_DIR}/logback-console.xml")
+
+echo "Starting $SERVICE as a console application on host $HOSTNAME."
+
+# Add the current process id to pid file
+echo $$ >> "$pid" 2>/dev/null
+
+# Release the lock because the java process runs in the foreground and would block other processes from modifying the pid file
+[[ ${flock_exist} -eq 0 ]] && flock -u 200
+
+exec "$JAVA_RUN" $JVM_ARGS ${FLINK_ENV_JAVA_OPTS} "${log_setting[@]}" -classpath "`manglePathList "$FLINK_TM_CLASSPATH:$INTERNAL_HADOOP_CLASSPATHS"`" ${CLASS_TO_RUN} "${ARGS[@]}"
diff --git a/MSH-PIC/flink/bin/flink-daemon.sh b/MSH-PIC/flink/bin/flink-daemon.sh
new file mode 100644
index 0000000..67fe698
--- /dev/null
+++ b/MSH-PIC/flink/bin/flink-daemon.sh
@@ -0,0 +1,194 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+# Start/stop a Flink daemon.
+USAGE="Usage: flink-daemon.sh (start|stop|stop-all) (taskexecutor|zookeeper|historyserver|standalonesession|standalonejob) [args]"
+
+STARTSTOP=$1
+DAEMON=$2
+ARGS=("${@:3}") # get remaining arguments as array
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/config.sh
+
+case $DAEMON in
+ (taskexecutor)
+ CLASS_TO_RUN=org.apache.flink.runtime.taskexecutor.TaskManagerRunner
+ ;;
+
+ (zookeeper)
+ CLASS_TO_RUN=org.apache.flink.runtime.zookeeper.FlinkZooKeeperQuorumPeer
+ ;;
+
+ (historyserver)
+ CLASS_TO_RUN=org.apache.flink.runtime.webmonitor.history.HistoryServer
+ ;;
+
+ (standalonesession)
+ CLASS_TO_RUN=org.apache.flink.runtime.entrypoint.StandaloneSessionClusterEntrypoint
+ ;;
+
+ (standalonejob)
+ CLASS_TO_RUN=org.apache.flink.container.entrypoint.StandaloneApplicationClusterEntryPoint
+ ;;
+
+ (*)
+ echo "Unknown daemon '${DAEMON}'. $USAGE."
+ exit 1
+ ;;
+esac
+
+if [ "$FLINK_IDENT_STRING" = "" ]; then
+ FLINK_IDENT_STRING="$USER"
+fi
+
+FLINK_TM_CLASSPATH=`constructFlinkClassPath`
+
+pid=$FLINK_PID_DIR/flink-$FLINK_IDENT_STRING-$DAEMON.pid
+
+mkdir -p "$FLINK_PID_DIR"
+
+# Log files for daemons are indexed from the process ID's position in the PID
+# file. The following lock prevents a race condition during daemon startup
+# when multiple daemons read, index, and write to the PID file concurrently.
+# The lock is created on the PID directory since a lock file cannot be safely
+# removed. The daemon is started with the lock closed and the lock remains
+# active in this script until the script exits.
+command -v flock >/dev/null 2>&1
+if [[ $? -eq 0 ]]; then
+ exec 200<"$FLINK_PID_DIR"
+ flock 200
+fi
+
+# Ascending ID depending on number of lines in pid file.
+# This allows us to start multiple daemon of each type.
+id=$([ -f "$pid" ] && echo $(wc -l < "$pid") || echo "0")
+
+FLINK_LOG_PREFIX="${FLINK_LOG_DIR}/flink-${FLINK_IDENT_STRING}-${DAEMON}-${id}-${HOSTNAME}"
+log="${FLINK_LOG_PREFIX}.log"
+out="${FLINK_LOG_PREFIX}.out"
+
+log_setting=("-Dlog.file=${log}" "-Dlog4j.configuration=file:${FLINK_CONF_DIR}/log4j.properties" "-Dlog4j.configurationFile=file:${FLINK_CONF_DIR}/log4j.properties" "-Dlogback.configurationFile=file:${FLINK_CONF_DIR}/logback.xml")
+
+function guaranteed_kill {
+ to_stop_pid=$1
+ daemon=$2
+
+ # send sigterm for graceful shutdown
+ kill $to_stop_pid
+ # if timeout exists, use it
+ if command -v timeout &> /dev/null ; then
+ # wait 10 seconds for process to stop. By default, Flink kills the JVM 5 seconds after sigterm.
+ timeout 10 tail --pid=$to_stop_pid -f /dev/null
+ if [ "$?" -eq 124 ]; then
+ echo "Daemon $daemon didn't stop within 10 seconds. Killing it."
+ # send sigkill
+ kill -9 $to_stop_pid
+ fi
+ fi
+}
+
+case $STARTSTOP in
+
+ (start)
+
+ # Print a warning if daemons are already running on host
+ if [ -f "$pid" ]; then
+ active=()
+ while IFS='' read -r p || [[ -n "$p" ]]; do
+ kill -0 $p >/dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ active+=($p)
+ fi
+ done < "${pid}"
+
+ count="${#active[@]}"
+
+ if [ ${count} -gt 0 ]; then
+ echo "[INFO] $count instance(s) of $DAEMON are already running on $HOSTNAME."
+ fi
+ fi
+
+ # Evaluate user options for local variable expansion
+ FLINK_ENV_JAVA_OPTS=$(eval echo ${FLINK_ENV_JAVA_OPTS})
+
+ echo "Starting $DAEMON daemon on host $HOSTNAME."
+ "$JAVA_RUN" $JVM_ARGS ${FLINK_ENV_JAVA_OPTS} "${log_setting[@]}" -classpath "`manglePathList "$FLINK_TM_CLASSPATH:$INTERNAL_HADOOP_CLASSPATHS"`" ${CLASS_TO_RUN} "${ARGS[@]}" > "$out" 200<&- 2>&1 < /dev/null &
+
+ mypid=$!
+
+ # Add to pid file if successful start
+ if [[ ${mypid} =~ ${IS_NUMBER} ]] && kill -0 $mypid > /dev/null 2>&1 ; then
+ echo $mypid >> "$pid"
+ else
+ echo "Error starting $DAEMON daemon."
+ exit 1
+ fi
+ ;;
+
+ (stop)
+ if [ -f "$pid" ]; then
+ # Remove last in pid file
+ to_stop=$(tail -n 1 "$pid")
+
+ if [ -z $to_stop ]; then
+ rm "$pid" # If all stopped, clean up pid file
+ echo "No $DAEMON daemon to stop on host $HOSTNAME."
+ else
+ sed \$d "$pid" > "$pid.tmp" # all but last line
+
+ # If all stopped, clean up pid file
+ [ $(wc -l < "$pid.tmp") -eq 0 ] && rm "$pid" "$pid.tmp" || mv "$pid.tmp" "$pid"
+
+ if kill -0 $to_stop > /dev/null 2>&1; then
+ echo "Stopping $DAEMON daemon (pid: $to_stop) on host $HOSTNAME."
+ guaranteed_kill $to_stop $DAEMON
+ else
+ echo "No $DAEMON daemon (pid: $to_stop) is running anymore on $HOSTNAME."
+ fi
+ fi
+ else
+ echo "No $DAEMON daemon to stop on host $HOSTNAME."
+ fi
+ ;;
+
+ (stop-all)
+ if [ -f "$pid" ]; then
+ mv "$pid" "${pid}.tmp"
+
+ while read to_stop; do
+ if kill -0 $to_stop > /dev/null 2>&1; then
+ echo "Stopping $DAEMON daemon (pid: $to_stop) on host $HOSTNAME."
+ guaranteed_kill $to_stop $DAEMON
+ else
+ echo "Skipping $DAEMON daemon (pid: $to_stop), because it is not running anymore on $HOSTNAME."
+ fi
+ done < "${pid}.tmp"
+ rm "${pid}.tmp"
+ fi
+ ;;
+
+ (*)
+ echo "Unexpected argument '$STARTSTOP'. $USAGE."
+ exit 1
+ ;;
+
+esac
diff --git a/MSH-PIC/flink/bin/historyserver.sh b/MSH-PIC/flink/bin/historyserver.sh
new file mode 100644
index 0000000..3bc3049
--- /dev/null
+++ b/MSH-PIC/flink/bin/historyserver.sh
@@ -0,0 +1,39 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+# Start/stop a Flink HistoryServer
+USAGE="Usage: historyserver.sh (start|start-foreground|stop)"
+
+STARTSTOP=$1
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/config.sh
+
+if [[ $STARTSTOP == "start" ]] || [[ $STARTSTOP == "start-foreground" ]]; then
+ export FLINK_ENV_JAVA_OPTS="${FLINK_ENV_JAVA_OPTS} ${FLINK_ENV_JAVA_OPTS_HS}"
+ args=("--configDir" "${FLINK_CONF_DIR}")
+fi
+
+if [[ $STARTSTOP == "start-foreground" ]]; then
+ exec "${FLINK_BIN_DIR}"/flink-console.sh historyserver "${args[@]}"
+else
+ "${FLINK_BIN_DIR}"/flink-daemon.sh $STARTSTOP historyserver "${args[@]}"
+fi
diff --git a/MSH-PIC/flink/bin/jobmanager.sh b/MSH-PIC/flink/bin/jobmanager.sh
new file mode 100644
index 0000000..35fbe2c
--- /dev/null
+++ b/MSH-PIC/flink/bin/jobmanager.sh
@@ -0,0 +1,64 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+# Start/stop a Flink JobManager.
+USAGE="Usage: jobmanager.sh ((start|start-foreground) [host] [webui-port])|stop|stop-all"
+
+STARTSTOP=$1
+HOST=$2 # optional when starting multiple instances
+WEBUIPORT=$3 # optional when starting multiple instances
+
+if [[ $STARTSTOP != "start" ]] && [[ $STARTSTOP != "start-foreground" ]] && [[ $STARTSTOP != "stop" ]] && [[ $STARTSTOP != "stop-all" ]]; then
+ echo $USAGE
+ exit 1
+fi
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/config.sh
+
+ENTRYPOINT=standalonesession
+
+if [[ $STARTSTOP == "start" ]] || [[ $STARTSTOP == "start-foreground" ]]; then
+ # Add JobManager-specific JVM options
+ export FLINK_ENV_JAVA_OPTS="${FLINK_ENV_JAVA_OPTS} ${FLINK_ENV_JAVA_OPTS_JM}"
+ parseJmArgsAndExportLogs "${ARGS[@]}"
+
+ args=("--configDir" "${FLINK_CONF_DIR}" "--executionMode" "cluster")
+ if [ ! -z $HOST ]; then
+ args+=("--host")
+ args+=("${HOST}")
+ fi
+
+ if [ ! -z $WEBUIPORT ]; then
+ args+=("--webui-port")
+ args+=("${WEBUIPORT}")
+ fi
+
+ if [ ! -z "${DYNAMIC_PARAMETERS}" ]; then
+ args+=(${DYNAMIC_PARAMETERS[@]})
+ fi
+fi
+
+if [[ $STARTSTOP == "start-foreground" ]]; then
+ exec "${FLINK_BIN_DIR}"/flink-console.sh $ENTRYPOINT "${args[@]}"
+else
+ "${FLINK_BIN_DIR}"/flink-daemon.sh $STARTSTOP $ENTRYPOINT "${args[@]}"
+fi
diff --git a/MSH-PIC/flink/bin/kubernetes-jobmanager.sh b/MSH-PIC/flink/bin/kubernetes-jobmanager.sh
new file mode 100644
index 0000000..0513123
--- /dev/null
+++ b/MSH-PIC/flink/bin/kubernetes-jobmanager.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+# Start a Flink JobManager for native Kubernetes.
+# NOTE: This script is not meant to be started manually. It will be used by native Kubernetes integration.
+
+USAGE="Usage: kubernetes-jobmanager.sh kubernetes-session|kubernetes-application [args]"
+
+ENTRY_POINT_NAME=$1
+
+ARGS=("${@:2}")
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/config.sh
+
+# Add JobManager specific JVM options
+export FLINK_ENV_JAVA_OPTS="${FLINK_ENV_JAVA_OPTS} ${FLINK_ENV_JAVA_OPTS_JM}"
+parseJmArgsAndExportLogs "${ARGS[@]}"
+
+if [ ! -z "${DYNAMIC_PARAMETERS}" ]; then
+ ARGS=(${DYNAMIC_PARAMETERS[@]} "${ARGS[@]}")
+fi
+
+exec "${FLINK_BIN_DIR}"/flink-console.sh ${ENTRY_POINT_NAME} "${ARGS[@]}"
diff --git a/MSH-PIC/flink/bin/kubernetes-session.sh b/MSH-PIC/flink/bin/kubernetes-session.sh
new file mode 100644
index 0000000..559a776
--- /dev/null
+++ b/MSH-PIC/flink/bin/kubernetes-session.sh
@@ -0,0 +1,39 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+# get Flink config
+. "$bin"/config.sh
+
+if [ "$FLINK_IDENT_STRING" = "" ]; then
+ FLINK_IDENT_STRING="$USER"
+fi
+
+JVM_ARGS="$JVM_ARGS -Xmx512m"
+
+CC_CLASSPATH=`manglePathList $(constructFlinkClassPath):$INTERNAL_HADOOP_CLASSPATHS`
+
+log=$FLINK_LOG_DIR/flink-$FLINK_IDENT_STRING-k8s-session-$HOSTNAME.log
+log_setting="-Dlog.file="$log" -Dlog4j.configuration=file:"$FLINK_CONF_DIR"/log4j-session.properties -Dlog4j.configurationFile=file:"$FLINK_CONF_DIR"/log4j-session.properties -Dlogback.configurationFile=file:"$FLINK_CONF_DIR"/logback-session.xml"
+
+export FLINK_CONF_DIR
+
+"$JAVA_RUN" $JVM_ARGS -classpath "$CC_CLASSPATH" $log_setting org.apache.flink.kubernetes.cli.KubernetesSessionCli "$@"
diff --git a/MSH-PIC/flink/bin/kubernetes-taskmanager.sh b/MSH-PIC/flink/bin/kubernetes-taskmanager.sh
new file mode 100644
index 0000000..b11fb89
--- /dev/null
+++ b/MSH-PIC/flink/bin/kubernetes-taskmanager.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+# Start a Flink TaskManager for native Kubernetes.
+# NOTE: This script is not meant to be started manually. It will be used by native Kubernetes integration.
+
+USAGE="Usage: kubernetes-taskmanager.sh [args]"
+
+ENTRYPOINT=kubernetes-taskmanager
+
+ARGS=("${@:1}")
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/config.sh
+
+# if no other JVM options are set, set the GC to G1
+if [ -z "${FLINK_ENV_JAVA_OPTS}" ] && [ -z "${FLINK_ENV_JAVA_OPTS_TM}" ]; then
+ export JVM_ARGS="$JVM_ARGS -XX:+UseG1GC"
+fi
+
+# Add TaskManager specific JVM options
+export FLINK_ENV_JAVA_OPTS="${FLINK_ENV_JAVA_OPTS} ${FLINK_ENV_JAVA_OPTS_TM}"
+export JVM_ARGS="$JVM_ARGS $FLINK_TM_JVM_MEM_OPTS"
+
+ARGS=("--configDir" "${FLINK_CONF_DIR}" "${ARGS[@]}")
+
+exec "${FLINK_BIN_DIR}"/flink-console.sh $ENTRYPOINT "${ARGS[@]}"
diff --git a/MSH-PIC/flink/bin/mesos-appmaster-job.sh b/MSH-PIC/flink/bin/mesos-appmaster-job.sh
new file mode 100644
index 0000000..5ae7396
--- /dev/null
+++ b/MSH-PIC/flink/bin/mesos-appmaster-job.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+bin=$(dirname "$0")
+bin=$(cd "${bin}" || exit; pwd)
+
+exec "${bin}"/mesos-jobmanager.sh "org.apache.flink.mesos.entrypoint.MesosJobClusterEntrypoint" "$@"
diff --git a/MSH-PIC/flink/bin/mesos-appmaster.sh b/MSH-PIC/flink/bin/mesos-appmaster.sh
new file mode 100644
index 0000000..2939e31
--- /dev/null
+++ b/MSH-PIC/flink/bin/mesos-appmaster.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+bin=$(dirname "$0")
+bin=$(cd "${bin}" || exit; pwd)
+
+exec "${bin}"/mesos-jobmanager.sh "org.apache.flink.mesos.entrypoint.MesosSessionClusterEntrypoint" "$@"
diff --git a/MSH-PIC/flink/bin/mesos-jobmanager.sh b/MSH-PIC/flink/bin/mesos-jobmanager.sh
new file mode 100644
index 0000000..b786e18
--- /dev/null
+++ b/MSH-PIC/flink/bin/mesos-jobmanager.sh
@@ -0,0 +1,52 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+ENTRY_POINT=$1
+ARGS=("${@:2}")
+
+bin=$(dirname "$0")
+bin=$(cd "${bin}" || exit; pwd)
+
+# get Flink config
+. "${bin}"/config.sh
+
+parseJmArgsAndExportLogs "${ARGS[@]}"
+
+if [ ! -z "${DYNAMIC_PARAMETERS}" ]; then
+ ARGS=(${DYNAMIC_PARAMETERS[@]} "${ARGS[@]}")
+fi
+
+if [ "$FLINK_IDENT_STRING" = "" ]; then
+ FLINK_IDENT_STRING="$USER"
+fi
+
+CC_CLASSPATH=$(manglePathList "$(constructFlinkClassPath):${INTERNAL_HADOOP_CLASSPATHS}")
+
+log="${FLINK_LOG_DIR}/flink-${FLINK_IDENT_STRING}-mesos-appmaster-${HOSTNAME}.log"
+log_setting="-Dlog.file=${log} -Dlog4j.configuration=file:${FLINK_CONF_DIR}/log4j.properties -Dlog4j.configurationFile=file:${FLINK_CONF_DIR}/log4j.properties -Dlogback.configurationFile=file:${FLINK_CONF_DIR}/logback.xml"
+
+"${JAVA_RUN}" ${JVM_ARGS} -classpath ${CC_CLASSPATH} ${log_setting} ${ENTRY_POINT} "${ARGS[@]}"
+
+rc=$?
+
+if [[ ${rc} -ne 0 ]]; then
+ echo "Error while starting the mesos application master. Please check ${log} for more details."
+fi
+
+exit ${rc}
diff --git a/MSH-PIC/flink/bin/mesos-taskmanager.sh b/MSH-PIC/flink/bin/mesos-taskmanager.sh
new file mode 100644
index 0000000..6c65c2a
--- /dev/null
+++ b/MSH-PIC/flink/bin/mesos-taskmanager.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+# get Flink config
+. "$bin"/config.sh
+
+CC_CLASSPATH=`manglePathList $(constructFlinkClassPath):$INTERNAL_HADOOP_CLASSPATHS`
+
+log=flink-taskmanager.log
+log_setting="-Dlog.file="$log" -Dlog4j.configuration=file:"$FLINK_CONF_DIR"/log4j.properties -Dlog4j.configurationFile=file:"$FLINK_CONF_DIR"/log4j.properties -Dlogback.configurationFile=file:"$FLINK_CONF_DIR"/logback.xml"
+
+# Add precomputed memory JVM options
+if [ -z "${FLINK_ENV_JAVA_OPTS_MEM}" ]; then
+ FLINK_ENV_JAVA_OPTS_MEM=""
+fi
+export FLINK_ENV_JAVA_OPTS="${FLINK_ENV_JAVA_OPTS} ${FLINK_ENV_JAVA_OPTS_MEM}"
+
+# Add TaskManager-specific JVM options
+export FLINK_ENV_JAVA_OPTS="${FLINK_ENV_JAVA_OPTS} ${FLINK_ENV_JAVA_OPTS_TM}"
+
+ENTRY_POINT=org.apache.flink.mesos.entrypoint.MesosTaskExecutorRunner
+
+exec "$JAVA_RUN" $JVM_ARGS ${FLINK_ENV_JAVA_OPTS} -classpath "$CC_CLASSPATH" $log_setting ${ENTRY_POINT} "$@"
+
diff --git a/MSH-PIC/flink/bin/pyflink-shell.sh b/MSH-PIC/flink/bin/pyflink-shell.sh
new file mode 100644
index 0000000..a616abb
--- /dev/null
+++ b/MSH-PIC/flink/bin/pyflink-shell.sh
@@ -0,0 +1,84 @@
+#!/bin/bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+. "$bin"/find-flink-home.sh
+
+_FLINK_HOME_DETERMINED=1
+
+. "$FLINK_HOME"/bin/config.sh
+
+FLINK_CLASSPATH=`constructFlinkClassPath`
+PYTHON_JAR_PATH=`echo "$FLINK_OPT_DIR"/flink-python*.jar`
+
+
+PYFLINK_PYTHON="${PYFLINK_PYTHON:-"python"}"
+
+# So that python can find out Flink's Jars
+export FLINK_BIN_DIR=$FLINK_BIN_DIR
+export FLINK_HOME
+
+# Add pyflink & py4j & cloudpickle to PYTHONPATH
+export PYTHONPATH="$FLINK_OPT_DIR/python/pyflink.zip:$PYTHONPATH"
+PY4J_ZIP=`echo "$FLINK_OPT_DIR"/python/py4j-*-src.zip`
+CLOUDPICKLE_ZIP=`echo "$FLINK_OPT_DIR"/python/cloudpickle-*-src.zip`
+export PYTHONPATH="$PY4J_ZIP:$CLOUDPICKLE_ZIP:$PYTHONPATH"
+
+PARSER="org.apache.flink.client.python.PythonShellParser"
+function parse_options() {
+ "${JAVA_RUN}" ${JVM_ARGS} -cp ${FLINK_CLASSPATH}:${PYTHON_JAR_PATH} ${PARSER} "$@"
+ printf "%d\0" $?
+}
+
+# Turn off posix mode since it does not allow process substitution
+set +o posix
+# If the command has option --help | -h, the script will directly
+# run the PythonShellParser program to stdout the help message.
+if [[ "$@" =~ '--help' ]] || [[ "$@" =~ '-h' ]]; then
+ "${JAVA_RUN}" ${JVM_ARGS} -cp ${FLINK_CLASSPATH}:${PYTHON_JAR_PATH} ${PARSER} "$@"
+ exit 0
+fi
+OPTIONS=()
+while IFS= read -d '' -r ARG; do
+ OPTIONS+=("$ARG")
+done < <(parse_options "$@")
+
+COUNT=${#OPTIONS[@]}
+LAST=$((COUNT - 1))
+LAUNCHER_EXIT_CODE=${OPTIONS[$LAST]}
+
+# Certain JVM failures result in errors being printed to stdout (instead of stderr), which causes
+# the code that parses the output of the launcher to get confused. In those cases, check if the
+# exit code is an integer, and if it's not, handle it as a special error case.
+if ! [[ ${LAUNCHER_EXIT_CODE} =~ ^[0-9]+$ ]]; then
+ echo "${OPTIONS[@]}" | head -n-1 1>&2
+ exit 1
+fi
+
+if [[ ${LAUNCHER_EXIT_CODE} != 0 ]]; then
+ exit ${LAUNCHER_EXIT_CODE}
+fi
+
+OPTIONS=("${OPTIONS[@]:0:$LAST}")
+
+export SUBMIT_ARGS=${OPTIONS[@]}
+
+# -i: interactive
+# -m: execute shell.py in the zip package
+${PYFLINK_PYTHON} -i -m pyflink.shell
diff --git a/MSH-PIC/flink/bin/set_flink_yarn_env.sh b/MSH-PIC/flink/bin/set_flink_yarn_env.sh
new file mode 100644
index 0000000..70314f9
--- /dev/null
+++ b/MSH-PIC/flink/bin/set_flink_yarn_env.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+echo -e "\n#flink\nexport FLINK_HOME=/home/tsg/olap/flink-1.13.1\nexport PATH=\$FLINK_HOME/bin:\$PATH" >> /etc/profile.d/flink.sh
+chmod +x /etc/profile.d/flink.sh
+source /etc/profile
+
+
diff --git a/MSH-PIC/flink/bin/sql-client.sh b/MSH-PIC/flink/bin/sql-client.sh
new file mode 100644
index 0000000..759f0c6
--- /dev/null
+++ b/MSH-PIC/flink/bin/sql-client.sh
@@ -0,0 +1,88 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+################################################################################
+# Adopted from "flink" bash script
+################################################################################
+
+target="$0"
+# For the case, the executable has been directly symlinked, figure out
+# the correct bin path by following its symlink up to an upper bound.
+# Note: we can't use the readlink utility here if we want to be POSIX
+# compatible.
+iteration=0
+while [ -L "$target" ]; do
+ if [ "$iteration" -gt 100 ]; then
+ echo "Cannot resolve path: You have a cyclic symlink in $target."
+ break
+ fi
+ ls=`ls -ld -- "$target"`
+ target=`expr "$ls" : '.* -> \(.*\)$'`
+ iteration=$((iteration + 1))
+done
+
+# Convert relative path to absolute path
+bin=`dirname "$target"`
+
+# get flink config
+. "$bin"/config.sh
+
+if [ "$FLINK_IDENT_STRING" = "" ]; then
+ FLINK_IDENT_STRING="$USER"
+fi
+
+CC_CLASSPATH=`constructFlinkClassPath`
+
+################################################################################
+# SQL client specific logic
+################################################################################
+
+log=$FLINK_LOG_DIR/flink-$FLINK_IDENT_STRING-sql-client-$HOSTNAME.log
+log_setting=(-Dlog.file="$log" -Dlog4j.configuration=file:"$FLINK_CONF_DIR"/log4j-cli.properties -Dlog4j.configurationFile=file:"$FLINK_CONF_DIR"/log4j-cli.properties -Dlogback.configurationFile=file:"$FLINK_CONF_DIR"/logback.xml)
+
+# get path of jar in /opt if it exist
+FLINK_SQL_CLIENT_JAR=$(find "$FLINK_OPT_DIR" -regex ".*flink-sql-client.*.jar")
+
+# add flink-python jar to the classpath
+if [[ ! "$CC_CLASSPATH" =~ .*flink-python.*.jar ]]; then
+ FLINK_PYTHON_JAR=$(find "$FLINK_OPT_DIR" -regex ".*flink-python.*.jar")
+ if [ -n "$FLINK_PYTHON_JAR" ]; then
+ CC_CLASSPATH="$CC_CLASSPATH:$FLINK_PYTHON_JAR"
+ fi
+fi
+
+# check if SQL client is already in classpath and must not be shipped manually
+if [[ "$CC_CLASSPATH" =~ .*flink-sql-client.*.jar ]]; then
+
+ # start client without jar
+ exec "$JAVA_RUN" $JVM_ARGS "${log_setting[@]}" -classpath "`manglePathList "$CC_CLASSPATH:$INTERNAL_HADOOP_CLASSPATHS"`" org.apache.flink.table.client.SqlClient "$@"
+
+# check if SQL client jar is in /opt
+elif [ -n "$FLINK_SQL_CLIENT_JAR" ]; then
+
+ # start client with jar
+ exec "$JAVA_RUN" $JVM_ARGS "${log_setting[@]}" -classpath "`manglePathList "$CC_CLASSPATH:$INTERNAL_HADOOP_CLASSPATHS:$FLINK_SQL_CLIENT_JAR"`" org.apache.flink.table.client.SqlClient "$@" --jar "`manglePath $FLINK_SQL_CLIENT_JAR`"
+
+# write error message to stderr
+else
+ (>&2 echo "[ERROR] Flink SQL Client JAR file 'flink-sql-client*.jar' neither found in classpath nor /opt directory should be located in $FLINK_OPT_DIR.")
+
+ # exit to force process failure
+ exit 1
+fi
diff --git a/MSH-PIC/flink/bin/standalone-job.sh b/MSH-PIC/flink/bin/standalone-job.sh
new file mode 100644
index 0000000..b4cfa20
--- /dev/null
+++ b/MSH-PIC/flink/bin/standalone-job.sh
@@ -0,0 +1,55 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+# Start/stop a Flink JobManager.
+USAGE="Usage: standalone-job.sh ((start|start-foreground))|stop [args]"
+
+STARTSTOP=$1
+ENTRY_POINT_NAME="standalonejob"
+
+if [[ $STARTSTOP != "start" ]] && [[ $STARTSTOP != "start-foreground" ]] && [[ $STARTSTOP != "stop" ]]; then
+ echo $USAGE
+ exit 1
+fi
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/config.sh
+
+# Startup parameters
+ARGS=("${@:2}")
+
+if [[ $STARTSTOP == "start" ]] || [[ $STARTSTOP == "start-foreground" ]]; then
+ # Add cluster entry point specific JVM options
+ export FLINK_ENV_JAVA_OPTS="${FLINK_ENV_JAVA_OPTS} ${FLINK_ENV_JAVA_OPTS_JM}"
+ parseJmArgsAndExportLogs "${ARGS[@]}"
+
+ if [ ! -z "${DYNAMIC_PARAMETERS}" ]; then
+ ARGS=(${DYNAMIC_PARAMETERS[@]} "${ARGS[@]}")
+ fi
+fi
+
+ARGS=("--configDir" "${FLINK_CONF_DIR}" "${ARGS[@]}")
+
+if [[ $STARTSTOP == "start-foreground" ]]; then
+ exec "${FLINK_BIN_DIR}"/flink-console.sh ${ENTRY_POINT_NAME} "${ARGS[@]}"
+else
+ "${FLINK_BIN_DIR}"/flink-daemon.sh ${STARTSTOP} ${ENTRY_POINT_NAME} "${ARGS[@]}"
+fi
diff --git a/MSH-PIC/flink/bin/start-cluster.sh b/MSH-PIC/flink/bin/start-cluster.sh
new file mode 100644
index 0000000..720b33c
--- /dev/null
+++ b/MSH-PIC/flink/bin/start-cluster.sh
@@ -0,0 +1,53 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/config.sh
+
+# Start the JobManager instance(s)
+shopt -s nocasematch
+if [[ $HIGH_AVAILABILITY == "zookeeper" ]]; then
+ # HA Mode
+ readMasters
+
+ echo "Starting HA cluster with ${#MASTERS[@]} masters."
+
+ for ((i=0;i<${#MASTERS[@]};++i)); do
+ master=${MASTERS[i]}
+ webuiport=${WEBUIPORTS[i]}
+
+ if [ ${MASTERS_ALL_LOCALHOST} = true ] ; then
+ "${FLINK_BIN_DIR}"/jobmanager.sh start "${master}" "${webuiport}"
+ else
+ ssh -n $FLINK_SSH_OPTS $master -- "nohup /bin/bash -l \"${FLINK_BIN_DIR}/jobmanager.sh\" start ${master} ${webuiport} &"
+ fi
+ done
+
+else
+ echo "Starting cluster."
+
+ # Start single JobManager on this machine
+ "$FLINK_BIN_DIR"/jobmanager.sh start
+fi
+shopt -u nocasematch
+
+# Start TaskManager instance(s)
+TMWorkers start
diff --git a/MSH-PIC/flink/bin/start-zookeeper-quorum.sh b/MSH-PIC/flink/bin/start-zookeeper-quorum.sh
new file mode 100644
index 0000000..d5a7593
--- /dev/null
+++ b/MSH-PIC/flink/bin/start-zookeeper-quorum.sh
@@ -0,0 +1,46 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/config.sh
+
+# Starts a ZooKeeper quorum as configured in $FLINK_CONF/zoo.cfg
+
+ZK_CONF="$FLINK_CONF_DIR/zoo.cfg"
+if [ ! -f "$ZK_CONF" ]; then
+ echo "[ERROR] No ZooKeeper configuration file found in '$ZK_CONF'."
+ exit 1
+fi
+
+# Extract server.X from ZooKeeper config and start instances
+while read server ; do
+ server=$(echo -e "${server}" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//') # trim
+
+ # match server.id=address[:port[:port]]
+ if [[ $server =~ ^server\.([0-9]+)[[:space:]]*\=[[:space:]]*([^: \#]+) ]]; then
+ id=${BASH_REMATCH[1]}
+ address=${BASH_REMATCH[2]}
+
+ ssh -n $FLINK_SSH_OPTS $address -- "nohup /bin/bash -l $FLINK_BIN_DIR/zookeeper.sh start $id &"
+ else
+ echo "[WARN] Parse error. Skipping config entry '$server'."
+ fi
+done < <(grep "^server\." "$ZK_CONF")
diff --git a/MSH-PIC/flink/bin/stop-cluster.sh b/MSH-PIC/flink/bin/stop-cluster.sh
new file mode 100644
index 0000000..d29b4f3
--- /dev/null
+++ b/MSH-PIC/flink/bin/stop-cluster.sh
@@ -0,0 +1,47 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/config.sh
+
+# Stop TaskManager instance(s)
+TMWorkers stop
+
+# Stop JobManager instance(s)
+shopt -s nocasematch
+if [[ $HIGH_AVAILABILITY == "zookeeper" ]]; then
+ # HA Mode
+ readMasters
+
+ if [ ${MASTERS_ALL_LOCALHOST} = true ] ; then
+ for master in ${MASTERS[@]}; do
+ "$FLINK_BIN_DIR"/jobmanager.sh stop
+ done
+ else
+ for master in ${MASTERS[@]}; do
+ ssh -n $FLINK_SSH_OPTS $master -- "nohup /bin/bash -l \"${FLINK_BIN_DIR}/jobmanager.sh\" stop &"
+ done
+ fi
+
+else
+ "$FLINK_BIN_DIR"/jobmanager.sh stop
+fi
+shopt -u nocasematch
diff --git a/MSH-PIC/flink/bin/stop-zookeeper-quorum.sh b/MSH-PIC/flink/bin/stop-zookeeper-quorum.sh
new file mode 100644
index 0000000..ad79de8
--- /dev/null
+++ b/MSH-PIC/flink/bin/stop-zookeeper-quorum.sh
@@ -0,0 +1,46 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/config.sh
+
+# Stops a ZooKeeper quorum as configured in $FLINK_CONF/zoo.cfg
+
+ZK_CONF="$FLINK_CONF_DIR/zoo.cfg"
+if [ ! -f "$ZK_CONF" ]; then
+ echo "[ERROR] No ZooKeeper configuration file found in '$ZK_CONF'."
+ exit 1
+fi
+
+# Extract server.X from ZooKeeper config and stop instances
+while read server ; do
+ server=$(echo -e "${server}" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//') # trim
+
+ # match server.id=address[:port[:port]]
+ if [[ $server =~ ^server\.([0-9]+)[[:space:]]*\=[[:space:]]*([^: \#]+) ]]; then
+ id=${BASH_REMATCH[1]}
+ server=${BASH_REMATCH[2]}
+
+ ssh -n $FLINK_SSH_OPTS $server -- "nohup /bin/bash -l $FLINK_BIN_DIR/zookeeper.sh stop &"
+ else
+ echo "[WARN] Parse error. Skipping config entry '$server'."
+ fi
+done < <(grep "^server\." "$ZK_CONF")
diff --git a/MSH-PIC/flink/bin/taskmanager.sh b/MSH-PIC/flink/bin/taskmanager.sh
new file mode 100644
index 0000000..fdc6514
--- /dev/null
+++ b/MSH-PIC/flink/bin/taskmanager.sh
@@ -0,0 +1,80 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+# Start/stop a Flink TaskManager.
+USAGE="Usage: taskmanager.sh (start|start-foreground|stop|stop-all)"
+
+STARTSTOP=$1
+
+ARGS=("${@:2}")
+
+if [[ $STARTSTOP != "start" ]] && [[ $STARTSTOP != "start-foreground" ]] && [[ $STARTSTOP != "stop" ]] && [[ $STARTSTOP != "stop-all" ]]; then
+ echo $USAGE
+ exit 1
+fi
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/config.sh
+
+ENTRYPOINT=taskexecutor
+
+if [[ $STARTSTOP == "start" ]] || [[ $STARTSTOP == "start-foreground" ]]; then
+
+ # if no other JVM options are set, set the GC to G1
+ if [ -z "${FLINK_ENV_JAVA_OPTS}" ] && [ -z "${FLINK_ENV_JAVA_OPTS_TM}" ]; then
+ export JVM_ARGS="$JVM_ARGS -XX:+UseG1GC"
+ fi
+
+ # Add TaskManager-specific JVM options
+ export FLINK_ENV_JAVA_OPTS="${FLINK_ENV_JAVA_OPTS} ${FLINK_ENV_JAVA_OPTS_TM}"
+
+ # Startup parameters
+
+ parseTmArgsAndExportLogs "${ARGS[@]}"
+
+ if [ ! -z "${DYNAMIC_PARAMETERS}" ]; then
+ ARGS=(${DYNAMIC_PARAMETERS[@]} "${ARGS[@]}")
+ fi
+
+ ARGS=("--configDir" "${FLINK_CONF_DIR}" "${ARGS[@]}")
+fi
+
+if [[ $STARTSTOP == "start-foreground" ]]; then
+ exec "${FLINK_BIN_DIR}"/flink-console.sh $ENTRYPOINT "${ARGS[@]}"
+else
+ if [[ $FLINK_TM_COMPUTE_NUMA == "false" ]]; then
+ # Start a single TaskManager
+ "${FLINK_BIN_DIR}"/flink-daemon.sh $STARTSTOP $ENTRYPOINT "${ARGS[@]}"
+ else
+ # Example output from `numactl --show` on an AWS c4.8xlarge:
+ # policy: default
+ # preferred node: current
+ # physcpubind: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
+ # cpubind: 0 1
+ # nodebind: 0 1
+ # membind: 0 1
+ read -ra NODE_LIST <<< $(numactl --show | grep "^nodebind: ")
+ for NODE_ID in "${NODE_LIST[@]:1}"; do
+ # Start a TaskManager for each NUMA node
+ numactl --membind=$NODE_ID --cpunodebind=$NODE_ID -- "${FLINK_BIN_DIR}"/flink-daemon.sh $STARTSTOP $ENTRYPOINT "${ARGS[@]}"
+ done
+ fi
+fi
diff --git a/MSH-PIC/flink/bin/yarn-session.sh b/MSH-PIC/flink/bin/yarn-session.sh
new file mode 100644
index 0000000..f36ca34
--- /dev/null
+++ b/MSH-PIC/flink/bin/yarn-session.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+# get Flink config
+. "$bin"/config.sh
+
+if [ "$FLINK_IDENT_STRING" = "" ]; then
+ FLINK_IDENT_STRING="$USER"
+fi
+
+JVM_ARGS="$JVM_ARGS -Xmx512m"
+
+CC_CLASSPATH=`manglePathList $(constructFlinkClassPath):$INTERNAL_HADOOP_CLASSPATHS`
+
+log=$FLINK_LOG_DIR/flink-$FLINK_IDENT_STRING-yarn-session-$HOSTNAME.log
+log_setting="-Dlog.file="$log" -Dlog4j.configuration=file:"$FLINK_CONF_DIR"/log4j-session.properties -Dlog4j.configurationFile=file:"$FLINK_CONF_DIR"/log4j-session.properties -Dlogback.configurationFile=file:"$FLINK_CONF_DIR"/logback-session.xml"
+
+"$JAVA_RUN" $JVM_ARGS -classpath "$CC_CLASSPATH" $log_setting org.apache.flink.yarn.cli.FlinkYarnSessionCli -j "$FLINK_LIB_DIR"/flink-dist*.jar "$@"
+
diff --git a/MSH-PIC/flink/bin/zookeeper.sh b/MSH-PIC/flink/bin/zookeeper.sh
new file mode 100644
index 0000000..9297401
--- /dev/null
+++ b/MSH-PIC/flink/bin/zookeeper.sh
@@ -0,0 +1,68 @@
+#!/usr/bin/env bash
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+# Start/stop a ZooKeeper quorum peer.
+USAGE="Usage: zookeeper.sh ((start|start-foreground) peer-id)|stop|stop-all"
+
+STARTSTOP=$1
+PEER_ID=$2
+
+if [[ $STARTSTOP != "start" ]] && [[ $STARTSTOP != "start-foreground" ]] && [[ $STARTSTOP != "stop" ]] && [[ $STARTSTOP != "stop-all" ]]; then
+ echo $USAGE
+ exit 1
+fi
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/config.sh
+
+ZK_CONF="$FLINK_CONF_DIR/zoo.cfg"
+if [ ! -f "$ZK_CONF" ]; then
+ echo "[ERROR] No ZooKeeper configuration file found in '$ZK_CONF'."
+ exit 1
+fi
+
+if [[ $STARTSTOP == "start" ]] || [[ $STARTSTOP == "start-foreground" ]]; then
+ if [ -z $PEER_ID ]; then
+ echo "[ERROR] Missing peer id argument. $USAGE."
+ exit 1
+ fi
+
+ if [[ ! ${ZK_HEAP} =~ ${IS_NUMBER} ]]; then
+ echo "[ERROR] Configured ZooKeeper JVM heap size is not a number. Please set '$KEY_ZK_HEAP_MB' in $FLINK_CONF_FILE."
+ exit 1
+ fi
+
+ if [ "$ZK_HEAP" -gt 0 ]; then
+ export JVM_ARGS="$JVM_ARGS -Xms"$ZK_HEAP"m -Xmx"$ZK_HEAP"m"
+ fi
+
+ # Startup parameters
+ args=("--zkConfigFile" "${ZK_CONF}" "--peerId" "${PEER_ID}")
+fi
+
+# the JMX log4j integration in ZK 3.4 does not work log4j 2
+export JVM_ARGS="$JVM_ARGS -Dzookeeper.jmx.log4j.disable=true"
+
+if [[ $STARTSTOP == "start-foreground" ]]; then
+ "${FLINK_BIN_DIR}"/flink-console.sh zookeeper "${args[@]}"
+else
+ "${FLINK_BIN_DIR}"/flink-daemon.sh $STARTSTOP zookeeper "${args[@]}"
+fi
diff --git a/MSH-PIC/flink/conf/core-site.xml b/MSH-PIC/flink/conf/core-site.xml
new file mode 100644
index 0000000..f380e36
--- /dev/null
+++ b/MSH-PIC/flink/conf/core-site.xml
@@ -0,0 +1,58 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>fs.defaultFS</name>
+ <value>hdfs://ns1</value>
+ </property>
+ <property>
+ <name>hadoop.tmp.dir</name>
+ <value>file:/home/tsg/olap/hadoop/tmp</value>
+ </property>
+ <property>
+ <name>io.file.buffer.size</name>
+ <value>131702</value>
+ </property>
+ <property>
+ <name>hadoop.proxyuser.root.hosts</name>
+ <value>*</value>
+ </property>
+ <property>
+ <name>hadoop.proxyuser.root.groups</name>
+ <value>*</value>
+ </property>
+ <property>
+ <name>hadoop.logfile.size</name>
+ <value>10000000</value>
+ <description>The max size of each log file</description>
+ </property>
+ <property>
+ <name>hadoop.logfile.count</name>
+ <value>1</value>
+ <description>The max number of log files</description>
+ </property>
+ <property>
+ <name>ha.zookeeper.quorum</name>
+ <value>192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181</value>
+ </property>
+ <property>
+ <name>ipc.client.connect.timeout</name>
+ <value>90000</value>
+ </property>
+</configuration>
diff --git a/MSH-PIC/flink/conf/flink-conf.yaml b/MSH-PIC/flink/conf/flink-conf.yaml
new file mode 100644
index 0000000..e64f0c4
--- /dev/null
+++ b/MSH-PIC/flink/conf/flink-conf.yaml
@@ -0,0 +1,207 @@
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+
+#==============================================================================
+# Common
+#==============================================================================
+
+# The external address of the host on which the JobManager runs and can be
+# reached by the TaskManagers and any clients which want to connect. This setting
+# is only used in Standalone mode and may be overwritten on the JobManager side
+# by specifying the --host <hostname> parameter of the bin/jobmanager.sh executable.
+# In high availability mode, if you use the bin/start-cluster.sh script and setup
+# the conf/masters file, this will be taken care of automatically. Yarn/Mesos
+# automatically configure the host name based on the hostname of the node where the
+# JobManager runs.
+
+jobmanager.rpc.address: 192.168.20.193
+
+#JVM 相关配置
+#env.java.opts: "-XX:+UseG1GC -XX:NewRatio=2 -XX:MaxGCPauseMillis=300 -XX:InitiatingHeapOccupancyPercent=35 -Xloggc:/home/tsg/olap/flink-1.13.1/log/gc.log -XX:+PrintGCDetails -XX:-OmitStackTraceInFastThrow -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=20 -XX:GCLogFileSize=20M"
+
+#jobmanager rpc 端口
+jobmanager.rpc.port: 6123
+
+#允许任务在所有taskmanager上均匀分布
+cluster.evenly-spread-out-slots: true
+
+#避免报出metaspace oom而是flink jvm进程挂掉
+classloader.fail-on-metaspace-oom-error: false
+
+#规避第三方库堆栈泄漏问题
+classloader.check-leaked-classloader: false
+
+#避免由于task不能正常取消而使taskmanager服务挂掉
+task.cancellation.timeout: 0
+
+#JobManager进程占用的所有与Flink相关的内存
+jobmanager.memory.process.size: 1024M
+
+#TaskManager进程占用的所有与Flink相关的内存
+taskmanager.memory.process.size: 1024M
+
+#taskmanager使用的堆外内存的大小
+taskmanager.memory.managed.size: 10M
+
+#taskmanager.memory.off-heap默认为false,主要指的是Flink Managed Memory使用Heap还是Non-heap,
+#默认使用Heap,如果开启使用Non-heap将再减少一部分资源
+taskmanager.memory.off-heap: false
+
+#堆外部分(Framework Off-Heap),以直接内存形式分配
+taskmanager.memory.framework.off-heap.size: 128M
+
+#taskmanager元数据大小 默认256M
+taskmanager.memory.jvm-metaspace.size: 384M
+
+#每个排序合并阻塞结果分区所需的最小网络缓冲区数,默认64。对于生产使用,建议将该配置值增加到2048,以提高数据压缩比并减少较小的网络数据包。增加该参数值,需要增加总网络内存大小。
+taskmanager.network.sort-shuffle.min-buffers: 64
+
+#用于读取shuffle数据的内存大小(目前只用于排序合并shuffle)。该内存参数占用framework.off-heap.size内存,默认32M,当更改该参数时,需要增加framework.off-heap.size内存大小。
+taskmanager.memory.framework.off-heap.batch-shuffle.size: 8M
+
+#每个通道可以使用的最大缓冲区数,默认为10。该参数可以通过防止在数据倾斜和配置的浮动缓冲区数量高的情况下缓冲的动态数据的过度增长来加速检查点对齐。
+taskmanager.network.memory.max-buffers-per-channel: 10
+
+# The number of task slots that each TaskManager offers. Each slot runs one parallel pipeline.
+taskmanager.numberOfTaskSlots: 1
+
+# The parallelism used for programs that did not specify and other parallelism.
+parallelism.default: 1
+
+# The default file system scheme and authority.
+#
+# By default file paths without scheme are interpreted relative to the local
+# root file system 'file:///'. Use this to override the default and interpret
+# relative paths relative to a different file system,
+# for example 'hdfs://mynamenode:12345'
+#
+# fs.default-scheme
+
+#==============================================================================
+# NetWork
+#==============================================================================
+
+#网络缓冲区数目,默认为8。帮助缓解由于子分区之间的数据分布不均匀造成的背压。
+taskmanager.network.memory.floating-buffers-per-gate: 8
+
+#输入/输出通道使用的独占网络缓冲区的数量。至少配置2。
+taskmanager.network.memory.buffers-per-channel: 2
+
+#用于TaskManager之间(shuffle、广播等)及与外部组件的数据传输
+#Min
+taskmanager.memory.network.min: 64M
+#Max
+taskmanager.memory.network.max: 128M
+
+#==============================================================================
+# High Availability
+#==============================================================================
+
+# The high-availability mode. Possible options are 'NONE' or 'zookeeper'.
+#
+# high-availability: zookeeper
+
+# The path where metadata for master recovery is persisted. While ZooKeeper stores
+# the small ground truth for checkpoint and leader election, this location stores
+# the larger objects, like persisted dataflow graphs.
+#
+# Must be a durable file system that is accessible from all nodes
+# (like HDFS, S3, Ceph, nfs, ...)
+#
+# high-availability.storageDir: hdfs:///flink/ha/
+
+# The list of ZooKeeper quorum peers that coordinate the high-availability
+# setup. This must be a list of the form:
+# "host1:clientPort,host2:clientPort,..." (default clientPort: 2181)
+
+#high-availability: zookeeper
+#high-availability.zookeeper.quorum: 192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+#high-availability.zookeeper.path.root: /flink
+#high-availability.zookeeper.client.connection-timeout: 150000
+#high-availability.zookeeper.client.max-retry-attempts: 10
+#high-availability.zookeeper.client.retry-wait: 10000
+#high-availability.zookeeper.client.session-timeout: 240000
+
+#读取本地Hadoop配置文件
+#fs.hdfs.hadoopconf: /home/tsg/olap/flink-1.13.1/conf/
+#high-availability.cluster-id: /flink_cluster
+#important: customize per cluster
+#high-availability.storageDir: hdfs:///flink/recover
+
+heartbeat.timeout: 180000
+heartbeat.interval: 20000
+akka.ask.timeout: 300 s
+
+# ACL options are based on https://zookeeper.apache.org/doc/r3.1.2/zookeeperProgrammers.html#sc_BuiltinACLSchemes
+# It can be either "creator" (ZOO_CREATE_ALL_ACL) or "open" (ZOO_OPEN_ACL_UNSAFE)
+# The default value is "open" and it can be changed to "creator" if ZK security is enabled
+#
+# high-availability.zookeeper.client.acl: open
+
+# The failover strategy, i.e., how the job computation recovers from task failures.
+# Only restart tasks that may have been affected by the task failure, which typically includes
+# downstream tasks and potentially upstream tasks if their produced data is no longer available for consumption.
+jobmanager.execution.failover-strategy: region
+
+#rest.port: 8080
+
+restart-strategy: fixed-delay
+
+#重启策略
+#21.12 version value is 9999
+#22.01 version value change to INT_MAX
+restart-strategy.fixed-delay.attempts: 2147483647
+
+yarn.application-attempts: 10000
+
+restart-strategy.fixed-delay.delay: 5 s
+
+jobmanager.web.upload.dir: /home/tsg/olap/flink-1.13.1/flink-web
+
+#==============================================================================
+# Advanced
+#==============================================================================
+
+# Override the directories for temporary files. If not specified, the
+# system-specific Java temporary directory (java.io.tmpdir property) is taken.
+#
+# For framework setups on Yarn or Mesos, Flink will automatically pick up the
+# containers' temp directories without any need for configuration.
+#
+# Add a delimited list for multiple directories, using the system directory
+# delimiter (colon ':' on unix) or a comma, e.g.:
+# /data1/tmp:/data2/tmp:/data3/tmp
+#
+# Note: Each directory entry is read from and written to by a different I/O
+# thread. You can include the same directory multiple times in order to create
+# multiple I/O threads against that directory. This is for example relevant for
+# high-throughput RAIDs.
+#
+# io.tmp.dirs: /tmp
+
+# The classloading resolve order. Possible values are 'child-first' (Flink's default)
+# and 'parent-first' (Java's default).
+#
+# Child first classloading allows users to use different dependency/library
+# versions in their application than those in the classpath. Switching back
+# to 'parent-first' may help with debugging dependency issues.
+#
+# classloader.resolve-order: child-first
+classloader.resolve-order: parent-first
+
diff --git a/MSH-PIC/flink/conf/hdfs-site.xml b/MSH-PIC/flink/conf/hdfs-site.xml
new file mode 100644
index 0000000..6d93805
--- /dev/null
+++ b/MSH-PIC/flink/conf/hdfs-site.xml
@@ -0,0 +1,142 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>dfs.namenode.name.dir</name>
+ <value>file:/home/tsg/olap/hadoop/dfs/name</value>
+ </property>
+ <property>
+ <name>dfs.datanode.data.dir</name>
+ <value>file:/home/tsg/olap/hadoop/dfs/data</value>
+ </property>
+ <property>
+ <name>dfs.replication</name>
+ <value>2</value>
+ </property>
+ <property>
+ <name>dfs.webhdfs.enabled</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>dfs.permissions</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>dfs.permissions.enabled</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>dfs.nameservices</name>
+ <value>ns1</value>
+ </property>
+ <property>
+ <name>dfs.blocksize</name>
+ <value>134217728</value>
+ </property>
+ <property>
+ <name>dfs.ha.namenodes.ns1</name>
+ <value>nn1,nn2</value>
+ </property>
+ <!-- nn1的RPC通信地址,nn1所在地址 -->
+ <property>
+ <name>dfs.namenode.rpc-address.ns1.nn1</name>
+ <value>192.168.20.193:9000</value>
+ </property>
+ <!-- nn1的http通信地址,外部访问地址 -->
+ <property>
+ <name>dfs.namenode.http-address.ns1.nn1</name>
+ <value>192.168.20.193:50070</value>
+ </property>
+ <!-- nn2的RPC通信地址,nn2所在地址 -->
+ <property>
+ <name>dfs.namenode.rpc-address.ns1.nn2</name>
+ <value>192.168.20.194:9000</value>
+ </property>
+ <!-- nn2的http通信地址,外部访问地址 -->
+ <property>
+ <name>dfs.namenode.http-address.ns1.nn2</name>
+ <value>192.168.20.194:50070</value>
+ </property>
+ <!-- 指定NameNode的元数据在JournalNode日志上的存放位置(一般和zookeeper部署在一起) -->
+ <property>
+ <name>dfs.namenode.shared.edits.dir</name>
+ <value>qjournal://192.168.20.193:8485;192.168.20.194:8485;192.168.20.195:8485/ns1</value>
+ </property>
+ <!-- 指定JournalNode在本地磁盘存放数据的位置 -->
+ <property>
+ <name>dfs.journalnode.edits.dir</name>
+ <value>/home/tsg/olap/hadoop/journal</value>
+ </property>
+ <!--客户端通过代理访问namenode,访问文件系统,HDFS 客户端与Active 节点通信的Java 类,使用其确定Active 节点是否活跃 -->
+ <property>
+ <name>dfs.client.failover.proxy.provider.ns1</name>
+ <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
+ </property>
+ <!--这是配置自动切换的方法,有多种使用方法,具体可以看官网,在文末会给地址,这里是远程登录杀死的方法 -->
+ <property>
+ <name>dfs.ha.fencing.methods</name>
+ <value>sshfence</value>
+ <value>shell(true)</value>
+ </property>
+ <!-- 这个是使用sshfence隔离机制时才需要配置ssh免登陆 -->
+ <property>
+ <name>dfs.ha.fencing.ssh.private-key-files</name>
+ <value>/root/.ssh/id_rsa</value>
+ </property>
+ <!-- 配置sshfence隔离机制超时时间,这个属性同上,如果你是用脚本的方法切换,这个应该是可以不配置的 -->
+ <property>
+ <name>dfs.ha.fencing.ssh.connect-timeout</name>
+ <value>30000</value>
+ </property>
+ <!-- 这个是开启自动故障转移,如果你没有自动故障转移,这个可以先不配 -->
+ <property>
+ <name>dfs.ha.automatic-failover.enabled</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>dfs.datanode.max.transfer.threads</name>
+ <value>8192</value>
+ </property>
+ <!-- namenode处理RPC请求线程数,增大该值资源占用不大 -->
+ <property>
+ <name>dfs.namenode.handler.count</name>
+ <value>30</value>
+ </property>
+ <!-- datanode处理RPC请求线程数,增大该值会占用更多内存 -->
+ <property>
+ <name>dfs.datanode.handler.count</name>
+ <value>40</value>
+ </property>
+ <!-- balance时可占用的带宽 -->
+ <property>
+ <name>dfs.balance.bandwidthPerSec</name>
+ <value>104857600</value>
+ </property>
+ <!-- 磁盘预留空间,该空间不会被hdfs占用,单位字节-->
+ <property>
+ <name>dfs.datanode.du.reserved</name>
+ <value>53687091200</value>
+ </property>
+ <!-- datanode与namenode连接超时时间,单位毫秒 2 * heartbeat.recheck.interval + 30000 -->
+ <property>
+ <name>heartbeat.recheck.interval</name>
+ <value>100000</value>
+ </property>
+</configuration>
+
diff --git a/MSH-PIC/flink/conf/log4j-cli.properties b/MSH-PIC/flink/conf/log4j-cli.properties
new file mode 100644
index 0000000..e7add42
--- /dev/null
+++ b/MSH-PIC/flink/conf/log4j-cli.properties
@@ -0,0 +1,67 @@
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+# Allows this configuration to be modified at runtime. The file will be checked every 30 seconds.
+monitorInterval=30
+
+rootLogger.level = INFO
+rootLogger.appenderRef.file.ref = FileAppender
+
+# Log all infos in the given file
+appender.file.name = FileAppender
+appender.file.type = FILE
+appender.file.append = false
+appender.file.fileName = ${sys:log.file}
+appender.file.layout.type = PatternLayout
+appender.file.layout.pattern = [%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %-5p %-60c %x - %m%n
+
+# Log output from org.apache.flink.yarn to the console. This is used by the
+# CliFrontend class when using a per-job YARN cluster.
+logger.yarn.name = org.apache.flink.yarn
+logger.yarn.level = INFO
+logger.yarn.appenderRef.console.ref = ConsoleAppender
+logger.yarncli.name = org.apache.flink.yarn.cli.FlinkYarnSessionCli
+logger.yarncli.level = INFO
+logger.yarncli.appenderRef.console.ref = ConsoleAppender
+logger.hadoop.name = org.apache.hadoop
+logger.hadoop.level = INFO
+logger.hadoop.appenderRef.console.ref = ConsoleAppender
+
+# Make sure hive logs go to the file.
+logger.hive.name = org.apache.hadoop.hive
+logger.hive.level = INFO
+logger.hive.additivity = false
+logger.hive.appenderRef.file.ref = FileAppender
+
+# Log output from org.apache.flink.kubernetes to the console.
+logger.kubernetes.name = org.apache.flink.kubernetes
+logger.kubernetes.level = INFO
+logger.kubernetes.appenderRef.console.ref = ConsoleAppender
+
+appender.console.name = ConsoleAppender
+appender.console.type = CONSOLE
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %-5p %-60c %x - %m%n
+
+# suppress the warning that hadoop native libraries are not loaded (irrelevant for the client)
+logger.hadoopnative.name = org.apache.hadoop.util.NativeCodeLoader
+logger.hadoopnative.level = OFF
+
+# Suppress the irrelevant (wrong) warnings from the Netty channel handler
+logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
+logger.netty.level = OFF
diff --git a/MSH-PIC/flink/conf/log4j-console.properties b/MSH-PIC/flink/conf/log4j-console.properties
new file mode 100644
index 0000000..499839e
--- /dev/null
+++ b/MSH-PIC/flink/conf/log4j-console.properties
@@ -0,0 +1,66 @@
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+# Allows this configuration to be modified at runtime. The file will be checked every 30 seconds.
+monitorInterval=30
+
+# This affects logging for both user code and Flink
+rootLogger.level = INFO
+rootLogger.appenderRef.console.ref = ConsoleAppender
+rootLogger.appenderRef.rolling.ref = RollingFileAppender
+
+# Uncomment this if you want to _only_ change Flink's logging
+#logger.flink.name = org.apache.flink
+#logger.flink.level = INFO
+
+# The following lines keep the log level of common libraries/connectors on
+# log level INFO. The root logger does not override this. You have to manually
+# change the log levels here.
+logger.akka.name = akka
+logger.akka.level = INFO
+logger.kafka.name= org.apache.kafka
+logger.kafka.level = INFO
+logger.hadoop.name = org.apache.hadoop
+logger.hadoop.level = INFO
+logger.zookeeper.name = org.apache.zookeeper
+logger.zookeeper.level = INFO
+
+# Log all infos to the console
+appender.console.name = ConsoleAppender
+appender.console.type = CONSOLE
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %-5p %-60c %x - %m%n
+
+# Log all infos in the given rolling file
+appender.rolling.name = RollingFileAppender
+appender.rolling.type = RollingFile
+appender.rolling.append = true
+appender.rolling.fileName = ${sys:log.file}
+appender.rolling.filePattern = ${sys:log.file}.%i
+appender.rolling.layout.type = PatternLayout
+appender.rolling.layout.pattern = [%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %-5p %-60c %x - %m%n
+appender.rolling.policies.type = Policies
+appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
+appender.rolling.policies.size.size=100MB
+appender.rolling.policies.startup.type = OnStartupTriggeringPolicy
+appender.rolling.strategy.type = DefaultRolloverStrategy
+appender.rolling.strategy.max = ${env:MAX_LOG_FILE_NUMBER:-10}
+
+# Suppress the irrelevant (wrong) warnings from the Netty channel handler
+logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
+logger.netty.level = OFF
diff --git a/MSH-PIC/flink/conf/log4j-session.properties b/MSH-PIC/flink/conf/log4j-session.properties
new file mode 100644
index 0000000..9044140
--- /dev/null
+++ b/MSH-PIC/flink/conf/log4j-session.properties
@@ -0,0 +1,40 @@
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+# Allows this configuration to be modified at runtime. The file will be checked every 30 seconds.
+monitorInterval=30
+
+rootLogger.level = INFO
+rootLogger.appenderRef.console.ref = ConsoleAppender
+
+appender.console.name = ConsoleAppender
+appender.console.type = CONSOLE
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %-5p %-60c %x - %m%n
+
+# Suppress the irrelevant (wrong) warnings from the Netty channel handler
+logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
+logger.netty.level = OFF
+logger.zookeeper.name = org.apache.zookeeper
+logger.zookeeper.level = WARN
+logger.curator.name = org.apache.flink.shaded.org.apache.curator.framework
+logger.curator.level = WARN
+logger.runtimeutils.name= org.apache.flink.runtime.util.ZooKeeperUtils
+logger.runtimeutils.level = WARN
+logger.runtimeleader.name = org.apache.flink.runtime.leaderretrieval.ZooKeeperLeaderRetrievalDriver
+logger.runtimeleader.level = WARN
diff --git a/MSH-PIC/flink/conf/log4j.properties b/MSH-PIC/flink/conf/log4j.properties
new file mode 100644
index 0000000..64293a9
--- /dev/null
+++ b/MSH-PIC/flink/conf/log4j.properties
@@ -0,0 +1,59 @@
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+# Allows this configuration to be modified at runtime. The file will be checked every 30 seconds.
+monitorInterval=30
+
+# This affects logging for both user code and Flink
+rootLogger.level = ERROR
+rootLogger.appenderRef.file.ref = MainAppender
+
+# Uncomment this if you want to _only_ change Flink's logging
+#logger.flink.name = org.apache.flink
+#logger.flink.level = INFO
+
+# The following lines keep the log level of common libraries/connectors on
+# log level INFO. The root logger does not override this. You have to manually
+# change the log levels here.
+logger.akka.name = akka
+logger.akka.level = INFO
+logger.kafka.name= org.apache.kafka
+logger.kafka.level = INFO
+logger.hadoop.name = org.apache.hadoop
+logger.hadoop.level = INFO
+logger.zookeeper.name = org.apache.zookeeper
+logger.zookeeper.level = INFO
+
+# Log all infos in the given file
+appender.main.name = MainAppender
+appender.main.type = RollingFile
+appender.main.append = true
+appender.main.fileName = ${sys:log.file}
+appender.main.filePattern = ${sys:log.file}.%i
+appender.main.layout.type = PatternLayout
+appender.main.layout.pattern = [%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %-5p %-60c %x - %m%n
+appender.main.policies.type = Policies
+appender.main.policies.size.type = SizeBasedTriggeringPolicy
+appender.main.policies.size.size = 100MB
+appender.main.policies.startup.type = OnStartupTriggeringPolicy
+appender.main.strategy.type = DefaultRolloverStrategy
+appender.main.strategy.max = ${env:MAX_LOG_FILE_NUMBER:-10}
+
+# Suppress the irrelevant (wrong) warnings from the Netty channel handler
+logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
+logger.netty.level = OFF
diff --git a/MSH-PIC/flink/conf/log4j2.component.properties b/MSH-PIC/flink/conf/log4j2.component.properties
new file mode 100644
index 0000000..2d5d906
--- /dev/null
+++ b/MSH-PIC/flink/conf/log4j2.component.properties
@@ -0,0 +1,2 @@
+#此文件放在flink安装目录配置文件conf/下
+log4j2.formatMsgNoLookups=true
diff --git a/MSH-PIC/flink/conf/logback-console.xml b/MSH-PIC/flink/conf/logback-console.xml
new file mode 100644
index 0000000..62963f3
--- /dev/null
+++ b/MSH-PIC/flink/conf/logback-console.xml
@@ -0,0 +1,64 @@
+<!--
+ ~ Licensed to the Apache Software Foundation (ASF) under one
+ ~ or more contributor license agreements. See the NOTICE file
+ ~ distributed with this work for additional information
+ ~ regarding copyright ownership. The ASF licenses this file
+ ~ to you under the Apache License, Version 2.0 (the
+ ~ "License"); you may not use this file except in compliance
+ ~ with the License. You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing, software
+ ~ distributed under the License is distributed on an "AS IS" BASIS,
+ ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ~ See the License for the specific language governing permissions and
+ ~ limitations under the License.
+ -->
+
+<configuration>
+ <appender name="console" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <pattern>[%d{yyyy-MM-dd HH:mm:ssZ,UTC}] [%thread] %-5level %logger{60} %X{sourceThread} - %msg%n</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="rolling" class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${log.file}</file>
+ <append>false</append>
+
+ <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${log.file}.%i</fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>10</maxIndex>
+ </rollingPolicy>
+
+ <triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>100MB</maxFileSize>
+ </triggeringPolicy>
+
+ <encoder>
+ <pattern>[%d{yyyy-MM-dd HH:mm:ssZ,UTC}] [%thread] %-5level %logger{60} %X{sourceThread} - %msg%n</pattern>
+ </encoder>
+ </appender>
+
+ <!-- This affects logging for both user code and Flink -->
+ <root level="INFO">
+ <appender-ref ref="console"/>
+ <appender-ref ref="rolling"/>
+ </root>
+
+ <!-- Uncomment this if you want to only change Flink's logging -->
+ <!--<logger name="org.apache.flink" level="INFO"/>-->
+
+ <!-- The following lines keep the log level of common libraries/connectors on
+ log level INFO. The root logger does not override this. You have to manually
+ change the log levels here. -->
+ <logger name="akka" level="INFO"/>
+ <logger name="org.apache.kafka" level="INFO"/>
+ <logger name="org.apache.hadoop" level="INFO"/>
+ <logger name="org.apache.zookeeper" level="INFO"/>
+
+ <!-- Suppress the irrelevant (wrong) warnings from the Netty channel handler -->
+ <logger name="org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline" level="ERROR"/>
+</configuration>
diff --git a/MSH-PIC/flink/conf/logback-session.xml b/MSH-PIC/flink/conf/logback-session.xml
new file mode 100644
index 0000000..7c07147
--- /dev/null
+++ b/MSH-PIC/flink/conf/logback-session.xml
@@ -0,0 +1,39 @@
+<!--
+ ~ Licensed to the Apache Software Foundation (ASF) under one
+ ~ or more contributor license agreements. See the NOTICE file
+ ~ distributed with this work for additional information
+ ~ regarding copyright ownership. The ASF licenses this file
+ ~ to you under the Apache License, Version 2.0 (the
+ ~ "License"); you may not use this file except in compliance
+ ~ with the License. You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing, software
+ ~ distributed under the License is distributed on an "AS IS" BASIS,
+ ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ~ See the License for the specific language governing permissions and
+ ~ limitations under the License.
+ -->
+
+<configuration>
+ <appender name="file" class="ch.qos.logback.core.FileAppender">
+ <file>${log.file}</file>
+ <append>false</append>
+ <encoder>
+ <pattern>[%d{yyyy-MM-dd HH:mm:ssZ,UTC}] [%thread] %-5level %logger{60} %X{sourceThread} - %msg%n</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="console" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <pattern>[%d{yyyy-MM-dd HH:mm:ssZ,UTC}] %-5level %logger{60} %X{sourceThread} - %msg%n</pattern>
+ </encoder>
+ </appender>
+
+ <logger name="ch.qos.logback" level="WARN" />
+ <root level="INFO">
+ <appender-ref ref="file"/>
+ <appender-ref ref="console"/>
+ </root>
+</configuration>
diff --git a/MSH-PIC/flink/conf/logback.xml b/MSH-PIC/flink/conf/logback.xml
new file mode 100644
index 0000000..e1c0d7c
--- /dev/null
+++ b/MSH-PIC/flink/conf/logback.xml
@@ -0,0 +1,58 @@
+<!--
+ ~ Licensed to the Apache Software Foundation (ASF) under one
+ ~ or more contributor license agreements. See the NOTICE file
+ ~ distributed with this work for additional information
+ ~ regarding copyright ownership. The ASF licenses this file
+ ~ to you under the Apache License, Version 2.0 (the
+ ~ "License"); you may not use this file except in compliance
+ ~ with the License. You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing, software
+ ~ distributed under the License is distributed on an "AS IS" BASIS,
+ ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ~ See the License for the specific language governing permissions and
+ ~ limitations under the License.
+ -->
+
+<configuration>
+ <appender name="file" class="ch.qos.logback.core.FileAppender">
+ <file>${log.file}</file>
+ <append>false</append>
+ <encoder>
+ <pattern>[%d{yyyy-MM-dd HH:mm:ssZ,UTC}] [%thread] %-5level %logger{60} %X{sourceThread} - %msg%n</pattern>
+ </encoder>
+ </appender>
+
+ <!-- This affects logging for both user code and Flink -->
+ <root level="INFO">
+ <appender-ref ref="file"/>
+ </root>
+
+ <!-- Uncomment this if you want to only change Flink's logging -->
+ <!--<logger name="org.apache.flink" level="INFO">-->
+ <!--<appender-ref ref="file"/>-->
+ <!--</logger>-->
+
+ <!-- The following lines keep the log level of common libraries/connectors on
+ log level INFO. The root logger does not override this. You have to manually
+ change the log levels here. -->
+ <logger name="akka" level="INFO">
+ <appender-ref ref="file"/>
+ </logger>
+ <logger name="org.apache.kafka" level="INFO">
+ <appender-ref ref="file"/>
+ </logger>
+ <logger name="org.apache.hadoop" level="INFO">
+ <appender-ref ref="file"/>
+ </logger>
+ <logger name="org.apache.zookeeper" level="INFO">
+ <appender-ref ref="file"/>
+ </logger>
+
+ <!-- Suppress the irrelevant (wrong) warnings from the Netty channel handler -->
+ <logger name="org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline" level="ERROR">
+ <appender-ref ref="file"/>
+ </logger>
+</configuration>
diff --git a/MSH-PIC/flink/conf/masters b/MSH-PIC/flink/conf/masters
new file mode 100644
index 0000000..08e29dc
--- /dev/null
+++ b/MSH-PIC/flink/conf/masters
@@ -0,0 +1,2 @@
+192.168.20.193:8080
+192.168.20.194:8080
diff --git a/MSH-PIC/flink/conf/workers b/MSH-PIC/flink/conf/workers
new file mode 100644
index 0000000..fbfed19
--- /dev/null
+++ b/MSH-PIC/flink/conf/workers
@@ -0,0 +1 @@
+192.168.20.195
diff --git a/MSH-PIC/flink/conf/yarn-site.xml b/MSH-PIC/flink/conf/yarn-site.xml
new file mode 100644
index 0000000..366878b
--- /dev/null
+++ b/MSH-PIC/flink/conf/yarn-site.xml
@@ -0,0 +1,224 @@
+<?xml version="1.0"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+ <property>
+ <name>yarn.nodemanager.aux-services</name>
+ <value>mapreduce_shuffle</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.enabled</name>
+ <value>true</value>
+ </property>
+
+ <!--声明两台resourcemanager的地址-->
+ <property>
+ <name>yarn.resourcemanager.cluster-id</name>
+ <value>rsmcluster</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.rm-ids</name>
+ <value>rsm1,rsm2</value>
+ </property>
+
+ <!-- 配置rm1-->
+ <!-- 配置rm1 hostname-->
+ <property>
+ <name>yarn.resourcemanager.hostname.rsm1</name>
+ <value>192.168.20.193</value>
+ </property>
+
+ <!-- 配置rm1 web application-->
+ <property>
+ <name>yarn.resourcemanager.webapp.address.rsm1</name>
+ <value>192.168.20.193:8080</value>
+ </property>
+
+ <!-- 配置rm1 调度端口,默认8030-->
+ <property>
+ <name>yarn.resourcemanager.scheduler.address.rsm1</name>
+ <value>192.168.20.193:8030</value>
+ </property>
+
+ <!-- 默认端口8031-->
+ <property>
+ <name>yarn.resourcemanager.resource-tracker.address.rsm1</name>
+ <value>192.168.20.193:8031</value>
+ </property>
+
+ <!-- 配置rm1 应用程序管理器接口的地址端口,默认8032-->
+ <property>
+ <name>yarn.resourcemanager.address.rsm1</name>
+ <value>192.168.20.193:8032</value>
+ </property>
+
+ <!-- 配置rm1 管理端口,默认8033-->
+ <property>
+ <name>yarn.resourcemanager.admin.address.rsm1</name>
+ <value>192.168.20.193:8033</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.admin.address.rsm1</name>
+ <value>192.168.20.193:23142</value>
+ </property>
+
+ <!-- 配置rm2-->
+ <property>
+ <name>yarn.resourcemanager.hostname.rsm2</name>
+ <value>192.168.20.194</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.webapp.address.rsm2</name>
+ <value>192.168.20.194:8080</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.scheduler.address.rsm2</name>
+ <value>192.168.20.194:8030</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.resource-tracker.address.rsm2</name>
+ <value>192.168.20.194:8031</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.address.rsm2</name>
+ <value>192.168.20.194:8032</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.admin.address.rsm2</name>
+ <value>192.168.20.194:8033</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.admin.address.rsm2</name>
+ <value>192.168.20.194:23142</value>
+ </property>
+
+ <!--指定zookeeper集群的地址-->
+ <property>
+ <name>yarn.resourcemanager.zk-address</name>
+ <value>192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181</value>
+ </property>
+
+ <!--启用自动恢复,当任务进行一半,rm坏掉,就要启动自动恢复,默认是false-->
+ <property>
+ <name>yarn.resourcemanager.recovery.enabled</name>
+ <value>true</value>
+ </property>
+
+ <!--启用Nodemanager自动恢复,默认是false-->
+ <property>
+ <name>yarn.nodemanager.recovery.enabled</name>
+ <value>true</value>
+ </property>
+
+ <!--配置NodeManager保存运行状态的本地文件系统目录路径 -->
+ <property>
+ <name>yarn.nodemanager.recovery.dir</name>
+ <value>/home/tsg/olap/hadoop-2.7.1/yarn</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.store.class</name>
+ <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
+ </property>
+
+ <!--配置nm可用的RPC地址,默认${yarn.nodemanager.hostname}:0,为临时端口。集群重启后,nm与rm连接的端口会变化,这里指定端口,保障nm restart功能 -->
+ <property>
+ <name>yarn.nodemanager.address</name>
+ <value>${yarn.nodemanager.hostname}:9923</value>
+ </property>
+
+ <property>
+ <name>yarn.log-aggregation-enable</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
+ <value>3600</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.remote-app-log-dir</name>
+ <value>/home/tsg/olap/hadoop-2.7.1/logs/app-logs/</value>
+ </property>
+
+ <!--NM可以为容器分配的物理内存量,以MB为单位 ,默认8192-->
+ <property>
+ <name>yarn.nodemanager.resource.memory-mb</name>
+ <value>61440</value>
+ </property>
+
+ <!-- RM上每个容器请求的最小分配,以mb为单位,默认1024-->
+ <property>
+ <name>yarn.scheduler.minimum-allocation-mb</name>
+ <value>1024</value>
+ </property>
+
+ <!-- RM上每个容器请求的最大分配,以mb为单位,一般设置为 yarn.nodemanager.resource.memory-mb 一致,默认8192-->
+ <property>
+ <name>yarn.scheduler.maximum-allocation-mb</name>
+ <value>61440</value>
+ </property>
+
+ <!--可为容器分配的vcore数。RM调度器在为容器分配资源时使用它。这不是用来限制YARN容器使用的物理内核的数量,默认8,一般配置为服务器cpu总核数一致 -->
+ <property>
+ <name>yarn.nodemanager.resource.cpu-vcores</name>
+ <value>48</value>
+ </property>
+
+ <!--RM上每个容器请求的最小分配(以虚拟CPU内核为单位) ,默认1-->
+ <property>
+ <name>yarn.scheduler.minimum-allocation-vcores</name>
+ <value>1</value>
+ </property>
+
+ <!--RM上每个容器请求的最大分配(以虚拟CPU内核为单位) ,默认32,一般配置为略小于yarn.nodemanager.resource.cpu-vcores,同时指定任务的slot不应超过该值-->
+ <property>
+ <name>yarn.scheduler.maximum-allocation-vcores</name>
+ <value>48</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.vmem-check-enabled</name>
+ <value>false</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.pmem-check-enabled</name>
+ <value>false</value>
+ </property>
+
+ <!--ApplicationMaster重启次数,配置HA后默认为2,生产环境可增大该值-->
+ <property>
+ <name>yarn.resourcemanager.am.max-attempts</name>
+ <value>10000</value>
+ </property>
+
+ <property>
+ <name>yarn.log.server.url</name>
+ <value>http://192.168.20.193:19888/jobhistory/logs</value>
+ </property>
+
+</configuration>
+
diff --git a/MSH-PIC/flink/conf/zoo.cfg b/MSH-PIC/flink/conf/zoo.cfg
new file mode 100644
index 0000000..f598997
--- /dev/null
+++ b/MSH-PIC/flink/conf/zoo.cfg
@@ -0,0 +1,36 @@
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+# The number of milliseconds of each tick
+tickTime=2000
+
+# The number of ticks that the initial synchronization phase can take
+initLimit=10
+
+# The number of ticks that can pass between sending a request and getting an acknowledgement
+syncLimit=5
+
+# The directory where the snapshot is stored.
+# dataDir=/tmp/zookeeper
+
+# The port at which the clients will connect
+clientPort=2181
+
+# ZooKeeper quorum peers
+server.1=localhost:2888:3888
+# server.2=host:peer-port:leader-port
diff --git a/MSH-PIC/flink/topology/completion/config/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED b/MSH-PIC/flink/topology/completion/config/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED
new file mode 100644
index 0000000..ad22a08
--- /dev/null
+++ b/MSH-PIC/flink/topology/completion/config/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED
@@ -0,0 +1,78 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#用于分配log_id、连接hbase的zookeeper地址
+zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+#hdfs地址用于获取定位库
+hdfs.servers=192.168.20.193:9000,192.168.20.194:9000
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库存储文件系统类型,hdfs or local
+knowledgebase.file.storage.type=hdfs
+
+#定位库地址,根据file.system.type配置填写对应地址路径。
+knowledgebase.file.storage.path=/knowledgebase/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED/
+
+#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高
+knowledgebase.type.list=ip_location,asn
+
+#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name
+knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6
+
+#工具库地址,存放秘钥文件等。
+tools.library=/home/tsg/olap/topology/data/
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.20.252:8848
+
+#schema namespace名称
+nacos.schema.namespace=MSH
+
+#schema data id名称
+nacos.schema.data.id=active_defence_event.json
+
+#knowledgebase namespace名称
+nacos.knowledgebase.namespace=
+
+#knowledgebase data id名称
+nacos.knowledgebase.data.id=knowledge_base.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+#kafka 接收数据topic
+source.kafka.topic=ACTIVE-DEFENCE-EVENT
+
+#补全数据 输出 topic
+sink.kafka.topic=ACTIVE-DEFENCE-EVENT-COMPLETED
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=active-defence-log-20220408-1
+
+#--------------------------------topology配置------------------------------#
+#consumer 并行度
+source.parallelism=3
+
+#转换函数并行度
+transform.parallelism=3
+
+#kafka producer 并行度
+sink.parallelism=3
+
+#数据中心,取值范围(0-31)
+data.center.id.num=1
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=1
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=snappy
diff --git a/MSH-PIC/flink/topology/completion/config/ETL-BGP-RECORD-COMPLETED b/MSH-PIC/flink/topology/completion/config/ETL-BGP-RECORD-COMPLETED
new file mode 100644
index 0000000..330ab1a
--- /dev/null
+++ b/MSH-PIC/flink/topology/completion/config/ETL-BGP-RECORD-COMPLETED
@@ -0,0 +1,78 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#用于分配log_id、连接hbase的zookeeper地址
+zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+#hdfs地址用于获取定位库
+hdfs.servers=192.168.20.193:9000,192.168.20.194:9000
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库存储文件系统类型,hdfs or local
+knowledgebase.file.storage.type=hdfs
+
+#定位库地址,根据file.system.type配置填写对应地址路径。
+knowledgebase.file.storage.path=/knowledgebase/ETL-BGP-RECORD-COMPLETED/
+
+#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高
+knowledgebase.type.list=ip_location,asn
+
+#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name
+knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6
+
+#工具库地址,存放秘钥文件等。
+tools.library=/home/tsg/olap/topology/data/
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.20.252:8848
+
+#schema namespace名称
+nacos.schema.namespace=MSH
+
+#schema data id名称
+nacos.schema.data.id=bgp_record.json
+
+#knowledgebase namespace名称
+nacos.knowledgebase.namespace=
+
+#knowledgebase data id名称
+nacos.knowledgebase.data.id=knowledge_base.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+#kafka 接收数据topic
+source.kafka.topic=BGP-RECORD
+
+#补全数据 输出 topic
+sink.kafka.topic=BGP-RECORD-COMPLETED
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=bgp-record-20220801-1
+
+#--------------------------------topology配置------------------------------#
+#consumer 并行度
+source.parallelism=3
+
+#转换函数并行度
+transform.parallelism=3
+
+#kafka producer 并行度
+sink.parallelism=3
+
+#数据中心,取值范围(0-31)
+data.center.id.num=2
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=1
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=snappy
diff --git a/MSH-PIC/flink/topology/completion/config/ETL-GTPC-RECORD-COMPLETED b/MSH-PIC/flink/topology/completion/config/ETL-GTPC-RECORD-COMPLETED
new file mode 100644
index 0000000..b12330c
--- /dev/null
+++ b/MSH-PIC/flink/topology/completion/config/ETL-GTPC-RECORD-COMPLETED
@@ -0,0 +1,78 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#用于分配log_id、连接hbase的zookeeper地址
+zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+#hdfs地址用于获取定位库
+hdfs.servers=192.168.20.193:9000,192.168.20.194:9000
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库存储文件系统类型,hdfs or local
+knowledgebase.file.storage.type=hdfs
+
+#定位库地址,根据file.system.type配置填写对应地址路径。
+knowledgebase.file.storage.path=/knowledgebase/ETL-GTPC-RECORD-COMPLETED/
+
+#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高
+knowledgebase.type.list=ip_location,asn
+
+#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name
+knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6
+
+#工具库地址,存放秘钥文件等。
+tools.library=/home/tsg/olap/topology/data/
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.20.252:8848
+
+#schema namespace名称
+nacos.schema.namespace=MSH
+
+#schema data id名称
+nacos.schema.data.id=gtpc_record.json
+
+#knowledgebase namespace名称
+nacos.knowledgebase.namespace=
+
+#knowledgebase data id名称
+nacos.knowledgebase.data.id=knowledge_base.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+#kafka 接收数据topic
+source.kafka.topic=GTPC-RECORD
+
+#补全数据 输出 topic
+sink.kafka.topic=GTPC-RECORD-COMPLETED
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=gtpc-record-log-20220408-1
+
+#--------------------------------topology配置------------------------------#
+#consumer 并行度
+source.parallelism=3
+
+#转换函数并行度
+transform.parallelism=3
+
+#kafka producer 并行度
+sink.parallelism=3
+
+#数据中心,取值范围(0-31)
+data.center.id.num=3
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=1
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=snappy
diff --git a/MSH-PIC/flink/topology/completion/config/ETL-INTERIM-SESSION-RECORD-COMPLETED b/MSH-PIC/flink/topology/completion/config/ETL-INTERIM-SESSION-RECORD-COMPLETED
new file mode 100644
index 0000000..7d30bd9
--- /dev/null
+++ b/MSH-PIC/flink/topology/completion/config/ETL-INTERIM-SESSION-RECORD-COMPLETED
@@ -0,0 +1,78 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#用于分配log_id、连接hbase的zookeeper地址
+zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+#hdfs地址用于获取定位库
+hdfs.servers=192.168.20.193:9000,192.168.20.194:9000
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库存储文件系统类型,hdfs or local
+knowledgebase.file.storage.type=hdfs
+
+#定位库地址,根据file.system.type配置填写对应地址路径。
+knowledgebase.file.storage.path=/knowledgebase/ETL-INTERIM-SESSION-RECORD-COMPLETED/
+
+#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高
+knowledgebase.type.list=ip_location,asn
+
+#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name
+knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6
+
+#工具库地址,存放秘钥文件等。
+tools.library=/home/tsg/olap/topology/data/
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.20.252:8848
+
+#schema namespace名称
+nacos.schema.namespace=MSH
+
+#schema data id名称
+nacos.schema.data.id=interim_session_record.json
+
+#knowledgebase namespace名称
+nacos.knowledgebase.namespace=
+
+#knowledgebase data id名称
+nacos.knowledgebase.data.id=knowledge_base.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+#kafka 接收数据topic
+source.kafka.topic=INTERIM-SESSION-RECORD
+
+#补全数据 输出 topic
+sink.kafka.topic=INTERIM-SESSION-RECORD-COMPLETED
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=linterim-session-record-20220408-1
+
+#--------------------------------topology配置------------------------------#
+#consumer 并行度
+source.parallelism=3
+
+#转换函数并行度
+transform.parallelism=3
+
+#kafka producer 并行度
+sink.parallelism=3
+
+#数据中心,取值范围(0-31)
+data.center.id.num=4
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=1
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=snappy
diff --git a/MSH-PIC/flink/topology/completion/config/ETL-PROXY-EVENT-COMPLETED b/MSH-PIC/flink/topology/completion/config/ETL-PROXY-EVENT-COMPLETED
new file mode 100644
index 0000000..74af8f9
--- /dev/null
+++ b/MSH-PIC/flink/topology/completion/config/ETL-PROXY-EVENT-COMPLETED
@@ -0,0 +1,78 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#用于分配log_id、连接hbase的zookeeper地址
+zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+#hdfs地址用于获取定位库
+hdfs.servers=192.168.20.193:9000,192.168.20.194:9000
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库存储文件系统类型,hdfs or local
+knowledgebase.file.storage.type=hdfs
+
+#定位库地址,根据file.system.type配置填写对应地址路径。
+knowledgebase.file.storage.path=/knowledgebase/ETL-PROXY-EVENT-COMPLETED/
+
+#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高
+knowledgebase.type.list=ip_location,asn
+
+#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name
+knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6
+
+#工具库地址,存放秘钥文件等。
+tools.library=/home/tsg/olap/topology/data/
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.20.252:8848
+
+#schema namespace名称
+nacos.schema.namespace=MSH
+
+#schema data id名称
+nacos.schema.data.id=proxy_event.json
+
+#knowledgebase namespace名称
+nacos.knowledgebase.namespace=
+
+#knowledgebase data id名称
+nacos.knowledgebase.data.id=knowledge_base.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+#kafka 接收数据topic
+source.kafka.topic=PROXY-EVENT
+
+#补全数据 输出 topic
+sink.kafka.topic=PROXY-EVENT-COMPLETED
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=proxy-event-20220408-1
+
+#--------------------------------topology配置------------------------------#
+#consumer 并行度
+source.parallelism=24
+
+#转换函数并行度
+transform.parallelism=24
+
+#kafka producer 并行度
+sink.parallelism=24
+
+#数据中心,取值范围(0-31)
+data.center.id.num=5
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=1
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=snappy
diff --git a/MSH-PIC/flink/topology/completion/config/ETL-RADIUS-RECORD-COMPLETED b/MSH-PIC/flink/topology/completion/config/ETL-RADIUS-RECORD-COMPLETED
new file mode 100644
index 0000000..120a247
--- /dev/null
+++ b/MSH-PIC/flink/topology/completion/config/ETL-RADIUS-RECORD-COMPLETED
@@ -0,0 +1,78 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#用于分配log_id、连接hbase的zookeeper地址
+zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+#hdfs地址用于获取定位库
+hdfs.servers=192.168.20.193:9000,192.168.20.194:9000
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库存储文件系统类型,hdfs or local
+knowledgebase.file.storage.type=hdfs
+
+#定位库地址,根据file.system.type配置填写对应地址路径。
+knowledgebase.file.storage.path=/knowledgebase/ETL-RADIUS-RECORD-COMPLETED/
+
+#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高
+knowledgebase.type.list=ip_location,asn
+
+#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name
+knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6
+
+#工具库地址,存放秘钥文件等。
+tools.library=/home/tsg/olap/topology/data/
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.20.252:8848
+
+#schema namespace名称
+nacos.schema.namespace=MSH
+
+#schema data id名称
+nacos.schema.data.id=radius_record.json
+
+#knowledgebase namespace名称
+nacos.knowledgebase.namespace=
+
+#knowledgebase data id名称
+nacos.knowledgebase.data.id=knowledge_base.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+#kafka 接收数据topic
+source.kafka.topic=RADIUS-RECORD
+
+#补全数据 输出 topic
+sink.kafka.topic=RADIUS-RECORD-COMPLETED
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=radius-record-log-20220408-1
+
+#--------------------------------topology配置------------------------------#
+#consumer 并行度
+source.parallelism=3
+
+#转换函数并行度
+transform.parallelism=3
+
+#kafka producer 并行度
+sink.parallelism=3
+
+#数据中心,取值范围(0-31)
+data.center.id.num=6
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=1
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=snappy
diff --git a/MSH-PIC/flink/topology/completion/config/ETL-SECURITY-EVENT-COMPLETED b/MSH-PIC/flink/topology/completion/config/ETL-SECURITY-EVENT-COMPLETED
new file mode 100644
index 0000000..9e22783
--- /dev/null
+++ b/MSH-PIC/flink/topology/completion/config/ETL-SECURITY-EVENT-COMPLETED
@@ -0,0 +1,78 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#用于分配log_id、连接hbase的zookeeper地址
+zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+#hdfs地址用于获取定位库
+hdfs.servers=192.168.20.193:9000,192.168.20.194:9000
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库存储文件系统类型,hdfs or local
+knowledgebase.file.storage.type=hdfs
+
+#定位库地址,根据file.system.type配置填写对应地址路径。
+knowledgebase.file.storage.path=/knowledgebase/ETL-SECURITY-EVENT-COMPLETED/
+
+#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高
+knowledgebase.type.list=ip_location,asn
+
+#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name
+knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6
+
+#工具库地址,存放秘钥文件等。
+tools.library=/home/tsg/olap/topology/data/
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.20.252:8848
+
+#schema namespace名称
+nacos.schema.namespace=MSH
+
+#schema data id名称
+nacos.schema.data.id=security_event.json
+
+#knowledgebase namespace名称
+nacos.knowledgebase.namespace=
+
+#knowledgebase data id名称
+nacos.knowledgebase.data.id=knowledge_base.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+#kafka 接收数据topic
+source.kafka.topic=SECURITY-EVENT
+
+#补全数据 输出 topic
+sink.kafka.topic=SECURITY-EVENT-COMPLETED
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=security-event-log-20220408-1
+
+#--------------------------------topology配置------------------------------#
+#consumer 并行度
+source.parallelism=24
+
+#转换函数并行度
+transform.parallelism=24
+
+#kafka producer 并行度
+sink.parallelism=24
+
+#数据中心,取值范围(0-31)
+data.center.id.num=7
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=1
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=snappy
diff --git a/MSH-PIC/flink/topology/completion/config/ETL-SESSION-RECORD-COMPLETED b/MSH-PIC/flink/topology/completion/config/ETL-SESSION-RECORD-COMPLETED
new file mode 100644
index 0000000..1638f99
--- /dev/null
+++ b/MSH-PIC/flink/topology/completion/config/ETL-SESSION-RECORD-COMPLETED
@@ -0,0 +1,78 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#用于分配log_id、连接hbase的zookeeper地址
+zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+#hdfs地址用于获取定位库
+hdfs.servers=192.168.20.193:9000,192.168.20.194:9000
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库存储文件系统类型,hdfs or local
+knowledgebase.file.storage.type=hdfs
+
+#定位库地址,根据file.system.type配置填写对应地址路径。
+knowledgebase.file.storage.path=/knowledgebase/ETL-SESSION-RECORD-COMPLETED/
+
+#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高
+knowledgebase.type.list=ip_location,asn
+
+#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name
+knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6
+
+#工具库地址,存放秘钥文件等。
+tools.library=/home/tsg/olap/topology/data/
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.20.252:8848
+
+#schema namespace名称
+nacos.schema.namespace=MSH
+
+#schema data id名称
+nacos.schema.data.id=session_record.json
+
+#knowledgebase namespace名称
+nacos.knowledgebase.namespace=
+
+#knowledgebase data id名称
+nacos.knowledgebase.data.id=knowledge_base.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+#kafka 接收数据topic
+source.kafka.topic=SESSION-RECORD
+
+#补全数据 输出 topic
+sink.kafka.topic=SESSION-RECORD-COMPLETED
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=session-record-log-20220408-1
+
+#--------------------------------topology配置------------------------------#
+#consumer 并行度
+source.parallelism=3
+
+#转换函数并行度
+transform.parallelism=3
+
+#kafka producer 并行度
+sink.parallelism=3
+
+#数据中心,取值范围(0-31)
+data.center.id.num=8
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=1
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=snappy
diff --git a/MSH-PIC/flink/topology/completion/config/ETL-TRANSACTION-RECORD-COMPLETED b/MSH-PIC/flink/topology/completion/config/ETL-TRANSACTION-RECORD-COMPLETED
new file mode 100644
index 0000000..a862abc
--- /dev/null
+++ b/MSH-PIC/flink/topology/completion/config/ETL-TRANSACTION-RECORD-COMPLETED
@@ -0,0 +1,78 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#用于分配log_id、连接hbase的zookeeper地址
+zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+#hdfs地址用于获取定位库
+hdfs.servers=192.168.20.193:9000,192.168.20.194:9000
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库存储文件系统类型,hdfs or local
+knowledgebase.file.storage.type=hdfs
+
+#定位库地址,根据file.system.type配置填写对应地址路径。
+knowledgebase.file.storage.path=/knowledgebase/ETL-TRANSACTION-RECORD-COMPLETED/
+
+#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高
+knowledgebase.type.list=ip_location,asn
+
+#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name
+knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6
+
+#工具库地址,存放秘钥文件等。
+tools.library=/home/tsg/olap/topology/data/
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.20.252:8848
+
+#schema namespace名称
+nacos.schema.namespace=MSH
+
+#schema data id名称
+nacos.schema.data.id=transaction_record.json
+
+#knowledgebase namespace名称
+nacos.knowledgebase.namespace=
+
+#knowledgebase data id名称
+nacos.knowledgebase.data.id=knowledge_base.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+#kafka 接收数据topic
+source.kafka.topic=TRANSACTION-RECORD
+
+#补全数据 输出 topic
+sink.kafka.topic=TRANSACTION-RECORD-COMPLETED
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=transaction-record-20220408-1
+
+#--------------------------------topology配置------------------------------#
+#consumer 并行度
+source.parallelism=3
+
+#转换函数并行度
+transform.parallelism=3
+
+#kafka producer 并行度
+sink.parallelism=3
+
+#数据中心,取值范围(0-31)
+data.center.id.num=10
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=1
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=snappy
diff --git a/MSH-PIC/flink/topology/completion/config/MIRROR-DOS-SKETCH-RECORD b/MSH-PIC/flink/topology/completion/config/MIRROR-DOS-SKETCH-RECORD
new file mode 100644
index 0000000..9996c66
--- /dev/null
+++ b/MSH-PIC/flink/topology/completion/config/MIRROR-DOS-SKETCH-RECORD
@@ -0,0 +1,78 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.20.223:9094,192.168.20.224:9094,192.168.20.225:9094
+
+#用于分配log_id、连接hbase的zookeeper地址
+zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+#hdfs地址用于获取定位库
+hdfs.servers=192.168.20.193:9000,192.168.20.194:9000
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库存储文件系统类型,hdfs or local
+knowledgebase.file.storage.type=hdfs
+
+#定位库地址,根据file.system.type配置填写对应地址路径。
+knowledgebase.file.storage.path=/knowledgebase/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED/
+
+#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高
+knowledgebase.type.list=ip_location,asn
+
+#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name
+knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6
+
+#工具库地址,存放秘钥文件等。
+tools.library=/home/tsg/olap/topology/data/
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.20.252:8848
+
+#schema namespace名称
+nacos.schema.namespace=MSH
+
+#schema data id名称
+nacos.schema.data.id=active_defence_event.json
+
+#knowledgebase namespace名称
+nacos.knowledgebase.namespace=
+
+#knowledgebase data id名称
+nacos.knowledgebase.data.id=knowledge_base.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+#kafka 接收数据topic
+source.kafka.topic=DOS-SKETCH-RECORD
+
+#补全数据 输出 topic
+sink.kafka.topic=DOS-SKETCH-RECORD
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=dos-sketch-record-20230629-1
+
+#--------------------------------topology配置------------------------------#
+#consumer 并行度
+source.parallelism=3
+
+#转换函数并行度
+transform.parallelism=3
+
+#kafka producer 并行度
+sink.parallelism=3
+
+#数据中心,取值范围(0-31)
+data.center.id.num=1
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=0
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=snappy
diff --git a/MSH-PIC/flink/topology/completion/config/MIRROR-GTPC-RECORD-COMPLETED b/MSH-PIC/flink/topology/completion/config/MIRROR-GTPC-RECORD-COMPLETED
new file mode 100644
index 0000000..7c2ab7f
--- /dev/null
+++ b/MSH-PIC/flink/topology/completion/config/MIRROR-GTPC-RECORD-COMPLETED
@@ -0,0 +1,78 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.20.223:9094,192.168.20.224:9094,192.168.20.225:9094
+
+#用于分配log_id、连接hbase的zookeeper地址
+zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+#hdfs地址用于获取定位库
+hdfs.servers=192.168.20.193:9000,192.168.20.194:9000
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库存储文件系统类型,hdfs or local
+knowledgebase.file.storage.type=hdfs
+
+#定位库地址,根据file.system.type配置填写对应地址路径。
+knowledgebase.file.storage.path=/knowledgebase/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED/
+
+#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高
+knowledgebase.type.list=ip_location,asn
+
+#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name
+knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6
+
+#工具库地址,存放秘钥文件等。
+tools.library=/home/tsg/olap/topology/data/
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.20.252:8848
+
+#schema namespace名称
+nacos.schema.namespace=MSH
+
+#schema data id名称
+nacos.schema.data.id=active_defence_event.json
+
+#knowledgebase namespace名称
+nacos.knowledgebase.namespace=
+
+#knowledgebase data id名称
+nacos.knowledgebase.data.id=knowledge_base.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+#kafka 接收数据topic
+source.kafka.topic=GTPC-RECORD-COMPLETED
+
+#补全数据 输出 topic
+sink.kafka.topic=GTPC-RECORD-COMPLETED
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=gtpc-record-completed-20230629-1
+
+#--------------------------------topology配置------------------------------#
+#consumer 并行度
+source.parallelism=3
+
+#转换函数并行度
+transform.parallelism=3
+
+#kafka producer 并行度
+sink.parallelism=3
+
+#数据中心,取值范围(0-31)
+data.center.id.num=1
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=0
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=snappy
diff --git a/MSH-PIC/flink/topology/completion/config/MIRROR-INTERNAL-PACKET-CAPTURE-EVENT b/MSH-PIC/flink/topology/completion/config/MIRROR-INTERNAL-PACKET-CAPTURE-EVENT
new file mode 100644
index 0000000..c4363e4
--- /dev/null
+++ b/MSH-PIC/flink/topology/completion/config/MIRROR-INTERNAL-PACKET-CAPTURE-EVENT
@@ -0,0 +1,78 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.20.223:9094,192.168.20.224:9094,192.168.20.225:9094
+
+#用于分配log_id、连接hbase的zookeeper地址
+zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+#hdfs地址用于获取定位库
+hdfs.servers=192.168.20.193:9000,192.168.20.194:9000
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库存储文件系统类型,hdfs or local
+knowledgebase.file.storage.type=hdfs
+
+#定位库地址,根据file.system.type配置填写对应地址路径。
+knowledgebase.file.storage.path=/knowledgebase/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED/
+
+#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高
+knowledgebase.type.list=ip_location,asn
+
+#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name
+knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6
+
+#工具库地址,存放秘钥文件等。
+tools.library=/home/tsg/olap/topology/data/
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.20.252:8848
+
+#schema namespace名称
+nacos.schema.namespace=MSH
+
+#schema data id名称
+nacos.schema.data.id=active_defence_event.json
+
+#knowledgebase namespace名称
+nacos.knowledgebase.namespace=
+
+#knowledgebase data id名称
+nacos.knowledgebase.data.id=knowledge_base.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+#kafka 接收数据topic
+source.kafka.topic=INTERNAL-PACKET-CAPTURE-EVENT
+
+#补全数据 输出 topic
+sink.kafka.topic=INTERNAL-PACKET-CAPTURE-EVENT
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=internal-packet-capture-event-20230629-1
+
+#--------------------------------topology配置------------------------------#
+#consumer 并行度
+source.parallelism=3
+
+#转换函数并行度
+transform.parallelism=3
+
+#kafka producer 并行度
+sink.parallelism=3
+
+#数据中心,取值范围(0-31)
+data.center.id.num=1
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=0
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=snappy
diff --git a/MSH-PIC/flink/topology/completion/config/MIRROR-NETWORK-TRAFFIC-METRICS b/MSH-PIC/flink/topology/completion/config/MIRROR-NETWORK-TRAFFIC-METRICS
new file mode 100644
index 0000000..45ef126
--- /dev/null
+++ b/MSH-PIC/flink/topology/completion/config/MIRROR-NETWORK-TRAFFIC-METRICS
@@ -0,0 +1,78 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.20.223:9094,192.168.20.224:9094,192.168.20.225:9094
+
+#用于分配log_id、连接hbase的zookeeper地址
+zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+#hdfs地址用于获取定位库
+hdfs.servers=192.168.20.193:9000,192.168.20.194:9000
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库存储文件系统类型,hdfs or local
+knowledgebase.file.storage.type=hdfs
+
+#定位库地址,根据file.system.type配置填写对应地址路径。
+knowledgebase.file.storage.path=/knowledgebase/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED/
+
+#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高
+knowledgebase.type.list=ip_location,asn
+
+#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name
+knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6
+
+#工具库地址,存放秘钥文件等。
+tools.library=/home/tsg/olap/topology/data/
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.20.252:8848
+
+#schema namespace名称
+nacos.schema.namespace=MSH
+
+#schema data id名称
+nacos.schema.data.id=active_defence_event.json
+
+#knowledgebase namespace名称
+nacos.knowledgebase.namespace=
+
+#knowledgebase data id名称
+nacos.knowledgebase.data.id=knowledge_base.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+#kafka 接收数据topic
+source.kafka.topic=NETWORK-TRAFFIC-METRICS
+
+#补全数据 输出 topic
+sink.kafka.topic=NETWORK-TRAFFIC-METRICS
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=network-traffic-metrics-20230629-1
+
+#--------------------------------topology配置------------------------------#
+#consumer 并行度
+source.parallelism=3
+
+#转换函数并行度
+transform.parallelism=3
+
+#kafka producer 并行度
+sink.parallelism=3
+
+#数据中心,取值范围(0-31)
+data.center.id.num=1
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=0
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=snappy
diff --git a/MSH-PIC/flink/topology/completion/config/MIRROR-POLICY-RULE-METRICS b/MSH-PIC/flink/topology/completion/config/MIRROR-POLICY-RULE-METRICS
new file mode 100644
index 0000000..da02fdf
--- /dev/null
+++ b/MSH-PIC/flink/topology/completion/config/MIRROR-POLICY-RULE-METRICS
@@ -0,0 +1,78 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.20.223:9094,192.168.20.224:9094,192.168.20.225:9094
+
+#用于分配log_id、连接hbase的zookeeper地址
+zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+#hdfs地址用于获取定位库
+hdfs.servers=192.168.20.193:9000,192.168.20.194:9000
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库存储文件系统类型,hdfs or local
+knowledgebase.file.storage.type=hdfs
+
+#定位库地址,根据file.system.type配置填写对应地址路径。
+knowledgebase.file.storage.path=/knowledgebase/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED/
+
+#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高
+knowledgebase.type.list=ip_location,asn
+
+#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name
+knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6
+
+#工具库地址,存放秘钥文件等。
+tools.library=/home/tsg/olap/topology/data/
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.20.252:8848
+
+#schema namespace名称
+nacos.schema.namespace=MSH
+
+#schema data id名称
+nacos.schema.data.id=active_defence_event.json
+
+#knowledgebase namespace名称
+nacos.knowledgebase.namespace=
+
+#knowledgebase data id名称
+nacos.knowledgebase.data.id=knowledge_base.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+#kafka 接收数据topic
+source.kafka.topic=POLICY-RULE-METRICS
+
+#补全数据 输出 topic
+sink.kafka.topic=POLICY-RULE-METRICS
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=policy-rule-metrics-20230629-1
+
+#--------------------------------topology配置------------------------------#
+#consumer 并行度
+source.parallelism=3
+
+#转换函数并行度
+transform.parallelism=3
+
+#kafka producer 并行度
+sink.parallelism=3
+
+#数据中心,取值范围(0-31)
+data.center.id.num=1
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=0
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=snappy
diff --git a/MSH-PIC/flink/topology/completion/config/MIRROR-PXY-EXCH-INTERMEDIA-CERT b/MSH-PIC/flink/topology/completion/config/MIRROR-PXY-EXCH-INTERMEDIA-CERT
new file mode 100644
index 0000000..d7fc346
--- /dev/null
+++ b/MSH-PIC/flink/topology/completion/config/MIRROR-PXY-EXCH-INTERMEDIA-CERT
@@ -0,0 +1,78 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.20.223:9094,192.168.20.224:9094,192.168.20.225:9094
+
+#用于分配log_id、连接hbase的zookeeper地址
+zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+#hdfs地址用于获取定位库
+hdfs.servers=192.168.20.193:9000,192.168.20.194:9000
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库存储文件系统类型,hdfs or local
+knowledgebase.file.storage.type=hdfs
+
+#定位库地址,根据file.system.type配置填写对应地址路径。
+knowledgebase.file.storage.path=/knowledgebase/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED/
+
+#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高
+knowledgebase.type.list=ip_location,asn
+
+#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name
+knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6
+
+#工具库地址,存放秘钥文件等。
+tools.library=/home/tsg/olap/topology/data/
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.20.252:8848
+
+#schema namespace名称
+nacos.schema.namespace=MSH
+
+#schema data id名称
+nacos.schema.data.id=active_defence_event.json
+
+#knowledgebase namespace名称
+nacos.knowledgebase.namespace=
+
+#knowledgebase data id名称
+nacos.knowledgebase.data.id=knowledge_base.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+#kafka 接收数据topic
+source.kafka.topic=PXY-EXCH-INTERMEDIA-CERT
+
+#补全数据 输出 topic
+sink.kafka.topic=PXY-EXCH-INTERMEDIA-CERT
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=pxy-exch-intermedia-cert-1
+
+#--------------------------------topology配置------------------------------#
+#consumer 并行度
+source.parallelism=3
+
+#转换函数并行度
+transform.parallelism=3
+
+#kafka producer 并行度
+sink.parallelism=3
+
+#数据中心,取值范围(0-31)
+data.center.id.num=1
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=0
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=snappy
diff --git a/MSH-PIC/flink/topology/completion/config/MIRROR-SYS-PACKET-CAPTURE-EVENT b/MSH-PIC/flink/topology/completion/config/MIRROR-SYS-PACKET-CAPTURE-EVENT
new file mode 100644
index 0000000..b361532
--- /dev/null
+++ b/MSH-PIC/flink/topology/completion/config/MIRROR-SYS-PACKET-CAPTURE-EVENT
@@ -0,0 +1,78 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.20.223:9094,192.168.20.224:9094,192.168.20.225:9094
+
+#用于分配log_id、连接hbase的zookeeper地址
+zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+#hdfs地址用于获取定位库
+hdfs.servers=192.168.20.193:9000,192.168.20.194:9000
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库存储文件系统类型,hdfs or local
+knowledgebase.file.storage.type=hdfs
+
+#定位库地址,根据file.system.type配置填写对应地址路径。
+knowledgebase.file.storage.path=/knowledgebase/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED/
+
+#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高
+knowledgebase.type.list=ip_location,asn
+
+#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name
+knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6
+
+#工具库地址,存放秘钥文件等。
+tools.library=/home/tsg/olap/topology/data/
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.20.252:8848
+
+#schema namespace名称
+nacos.schema.namespace=MSH
+
+#schema data id名称
+nacos.schema.data.id=active_defence_event.json
+
+#knowledgebase namespace名称
+nacos.knowledgebase.namespace=
+
+#knowledgebase data id名称
+nacos.knowledgebase.data.id=knowledge_base.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+#kafka 接收数据topic
+source.kafka.topic=SYS-PACKET-CAPTURE-EVENT
+
+#补全数据 输出 topic
+sink.kafka.topic=SYS-PACKET-CAPTURE-EVENT
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=sys-packet-capture-event-20230629-1
+
+#--------------------------------topology配置------------------------------#
+#consumer 并行度
+source.parallelism=3
+
+#转换函数并行度
+transform.parallelism=3
+
+#kafka producer 并行度
+sink.parallelism=3
+
+#数据中心,取值范围(0-31)
+data.center.id.num=1
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=0
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=snappy
diff --git a/MSH-PIC/flink/topology/completion/config/MIRROR-TRAFFIC-TOP-METRICS b/MSH-PIC/flink/topology/completion/config/MIRROR-TRAFFIC-TOP-METRICS
new file mode 100644
index 0000000..5ee5b31
--- /dev/null
+++ b/MSH-PIC/flink/topology/completion/config/MIRROR-TRAFFIC-TOP-METRICS
@@ -0,0 +1,78 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.20.223:9094,192.168.20.224:9094,192.168.20.225:9094
+
+#用于分配log_id、连接hbase的zookeeper地址
+zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+#hdfs地址用于获取定位库
+hdfs.servers=192.168.20.193:9000,192.168.20.194:9000
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库存储文件系统类型,hdfs or local
+knowledgebase.file.storage.type=hdfs
+
+#定位库地址,根据file.system.type配置填写对应地址路径。
+knowledgebase.file.storage.path=/knowledgebase/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED/
+
+#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高
+knowledgebase.type.list=ip_location,asn
+
+#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name
+knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6
+
+#工具库地址,存放秘钥文件等。
+tools.library=/home/tsg/olap/topology/data/
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.20.252:8848
+
+#schema namespace名称
+nacos.schema.namespace=MSH
+
+#schema data id名称
+nacos.schema.data.id=active_defence_event.json
+
+#knowledgebase namespace名称
+nacos.knowledgebase.namespace=
+
+#knowledgebase data id名称
+nacos.knowledgebase.data.id=knowledge_base.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+#kafka 接收数据topic
+source.kafka.topic=TRAFFIC-TOP-METRICS
+
+#补全数据 输出 topic
+sink.kafka.topic=TRAFFIC-TOP-METRICS
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=traffic-top-metrics-20230629-1
+
+#--------------------------------topology配置------------------------------#
+#consumer 并行度
+source.parallelism=3
+
+#转换函数并行度
+transform.parallelism=3
+
+#kafka producer 并行度
+sink.parallelism=3
+
+#数据中心,取值范围(0-31)
+data.center.id.num=1
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=0
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=snappy
diff --git a/MSH-PIC/flink/topology/completion/config/MIRROR-VOIP-RECORD b/MSH-PIC/flink/topology/completion/config/MIRROR-VOIP-RECORD
new file mode 100644
index 0000000..21e7267
--- /dev/null
+++ b/MSH-PIC/flink/topology/completion/config/MIRROR-VOIP-RECORD
@@ -0,0 +1,78 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.20.223:9094,192.168.20.224:9094,192.168.20.225:9094
+
+#用于分配log_id、连接hbase的zookeeper地址
+zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+#hdfs地址用于获取定位库
+hdfs.servers=192.168.20.193:9000,192.168.20.194:9000
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库存储文件系统类型,hdfs or local
+knowledgebase.file.storage.type=hdfs
+
+#定位库地址,根据file.system.type配置填写对应地址路径。
+knowledgebase.file.storage.path=/knowledgebase/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED/
+
+#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高
+knowledgebase.type.list=ip_location,asn
+
+#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name
+knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6
+
+#工具库地址,存放秘钥文件等。
+tools.library=/home/tsg/olap/topology/data/
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.20.252:8848
+
+#schema namespace名称
+nacos.schema.namespace=MSH
+
+#schema data id名称
+nacos.schema.data.id=active_defence_event.json
+
+#knowledgebase namespace名称
+nacos.knowledgebase.namespace=
+
+#knowledgebase data id名称
+nacos.knowledgebase.data.id=knowledge_base.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+#kafka 接收数据topic
+source.kafka.topic=VOIP-RECORD
+
+#补全数据 输出 topic
+sink.kafka.topic=VOIP-RECORD
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=voip-record-20230629-1
+
+#--------------------------------topology配置------------------------------#
+#consumer 并行度
+source.parallelism=3
+
+#转换函数并行度
+transform.parallelism=3
+
+#kafka producer 并行度
+sink.parallelism=3
+
+#数据中心,取值范围(0-31)
+data.center.id.num=1
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=0
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=snappy
diff --git a/MSH-PIC/flink/topology/completion/service_flow_config.properties b/MSH-PIC/flink/topology/completion/service_flow_config.properties
new file mode 100644
index 0000000..5527d21
--- /dev/null
+++ b/MSH-PIC/flink/topology/completion/service_flow_config.properties
@@ -0,0 +1,78 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#用于分配log_id、连接hbase的zookeeper地址
+zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+#hdfs地址用于获取定位库
+hdfs.servers=192.168.20.193:9000,192.168.20.194:9000
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库存储文件系统类型,hdfs or local
+knowledgebase.file.storage.type=hdfs
+
+#定位库地址,根据file.system.type配置填写对应地址路径。
+knowledgebase.file.storage.path=/knowledgebase/ETL-SECURITY-EVENT-COMPLETED/
+
+#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高
+knowledgebase.type.list=ip_location,asn
+
+#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name
+knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6
+
+#工具库地址,存放秘钥文件等。
+tools.library=/home/tsg/olap/topology/dat/
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.20.252:8848
+
+#schema namespace名称
+nacos.schema.namespace=MSH
+
+#schema data id名称
+nacos.schema.data.id=security_event.json
+
+#knowledgebase namespace名称
+nacos.knowledgebase.namespace=
+
+#knowledgebase data id名称
+nacos.knowledgebase.data.id=knowledge_base.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+#kafka 接收数据topic
+source.kafka.topic=SECURITY-EVENT
+
+#补全数据 输出 topic
+sink.kafka.topic=SECURITY-EVENT-COMPLETED
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=security-event-log-20220408-1
+
+#--------------------------------topology配置------------------------------#
+#consumer 并行度
+source.parallelism=24
+
+#转换函数并行度
+transform.parallelism=24
+
+#kafka producer 并行度
+sink.parallelism=24
+
+#数据中心,取值范围(0-31)
+data.center.id.num=7
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=1
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=snappy
diff --git a/MSH-PIC/flink/topology/completion/start.sh b/MSH-PIC/flink/topology/completion/start.sh
new file mode 100644
index 0000000..28760f5
--- /dev/null
+++ b/MSH-PIC/flink/topology/completion/start.sh
@@ -0,0 +1,67 @@
+#!/bin/bash
+#启动storm任务脚本
+source /etc/profile
+#任务jar所在目录
+BASE_DIR=$(pwd)
+
+#######################参数配置####################################
+#yarn任务运行模式 per-job 或 session
+TASK_MODE="per-job"
+#更新jar的名字
+PRIMORDIAL='log-completion-schema-230607-FastJson2.jar'
+#jar name
+JAR_NAME='log-completion-schema_tmp.jar'
+
+MAIN_CLASS="com.zdjizhi.topology.LogFlowWriteTopology"
+SESSION_CLUSTER="Flink session cluster"
+CONFIG_NAME="service_flow_config.properties"
+JOBMANAGER_MEMORY="1024m"
+TASKMANAGER_MEMORY="4096m"
+TASK_SLOTS=3
+#######################参数配置####################################
+
+APPLICATION_ID=$(yarn application -list | grep "$SESSION_CLUSTER" | awk '{print $1}')
+
+yes | cp -r $PRIMORDIAL $JAR_NAME
+
+#cd $BASE_DIR
+jar -xvf $BASE_DIR/$JAR_NAME $CONFIG_NAME
+function read_dir() {
+ for file in $(ls $1); do
+ if [ -d $1"/"$file ]; then
+ read_dir $1"/"$file
+ else
+ if [[ -z $TASK_MODE || $TASK_MODE == "per-job" ]]; then
+ num=$(yarn application -list | grep $file | wc -l)
+ if [ $num -eq "0" ]; then
+ cat $1$file >$BASE_DIR/$CONFIG_NAME
+ jar -uvf $BASE_DIR/$JAR_NAME $CONFIG_NAME
+ if [[ $file == "ETL-PROXY-EVENT-COMPLETED" || $file == "ETL-SECURITY-EVENT-COMPLETED" ]]; then
+ flink run -t yarn-per-job -Djobmanager.memory.process.size=2048m -Dtaskmanager.memory.process.size=5120m -Dyarn.application.name=$file -Dtaskmanager.numberOfTaskSlots=8 -d -c $MAIN_CLASS $BASE_DIR/$JAR_NAME $file
+ else
+ flink run -t yarn-per-job -Djobmanager.memory.process.size=$JOBMANAGER_MEMORY -Dtaskmanager.memory.process.size=$TASKMANAGER_MEMORY -Dyarn.application.name=$file -Dtaskmanager.numberOfTaskSlots=$TASK_SLOTS -d -c $MAIN_CLASS $BASE_DIR/$JAR_NAME $file
+ sleep 10
+ fi
+ fi
+ fi
+ if [[ -n $APPLICATION_ID && (-z $TASK_MODE || $TASK_MODE == "session") ]]; then
+ num=$(flink list | grep "$file" | grep -v flink | wc -l)
+ if [ $num -eq "0" ]; then
+ cat $1$file >$BASE_DIR/$CONFIG_NAME
+ jar -uvf $BASE_DIR/$JAR_NAME $CONFIG_NAME
+ #session
+ flink run -t yarn-session -Dyarn.application.id=$APPLICATION_ID -d -c $MAIN_CLASS $BASE_DIR/$JAR_NAME $file
+ sleep 10
+ fi
+ fi
+ fi
+ done
+}
+if [ $# != 1 ]; then
+ echo "usage: ./startall.sh [Configuration path]"
+ exit 1
+fi
+#读取第一个参数 为配置文件目录名称
+read_dir $1
+rm -rf $JAR_NAME
+
diff --git a/MSH-PIC/flink/topology/completion/stop.sh b/MSH-PIC/flink/topology/completion/stop.sh
new file mode 100644
index 0000000..24e1a83
--- /dev/null
+++ b/MSH-PIC/flink/topology/completion/stop.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+#flink任务停止脚本
+source /etc/profile
+#加参数 per-job 或 session
+TASK_MODE="per-job"
+SESSION_CLUSTER="Flink session cluster"
+
+APPLICATION_ID=$(yarn application -list | grep "$SESSION_CLUSTER" | awk '{print $1}')
+
+function read_dir() {
+ for file in $(ls $1); do
+ if [ -d $1"/"$file ]; then
+ read_dir $1"/"$file
+ else
+ if [[ $TASK_MODE == "per-job" ]]; then
+ appid=$(yarn application -list | grep "$file" | awk '{print $1}')
+ yarn application -kill $appid
+ echo -e "\033[32mcancel $file\033[0m"
+
+ elif [[ -n $APPLICATION_ID && $TASK_MODE == "session" ]]; then
+ jobid=$(flink list | grep -v flink | grep "$file" | awk '{print $4}')
+ flink cancel $jobid
+ echo -e "\033[32mcancel $file\033[0m"
+ fi
+
+ fi
+
+ done
+}
+
+#读取第一个参数 为配置文件目录名
+read_dir $1
+
diff --git a/MSH-PIC/flink/topology/data/asn_v4.mmdb b/MSH-PIC/flink/topology/data/asn_v4.mmdb
new file mode 100644
index 0000000..63df444
--- /dev/null
+++ b/MSH-PIC/flink/topology/data/asn_v4.mmdb
Binary files differ
diff --git a/MSH-PIC/flink/topology/data/asn_v6.mmdb b/MSH-PIC/flink/topology/data/asn_v6.mmdb
new file mode 100644
index 0000000..25cff33
--- /dev/null
+++ b/MSH-PIC/flink/topology/data/asn_v6.mmdb
Binary files differ
diff --git a/MSH-PIC/flink/topology/data/ip_v4_built_in.mmdb b/MSH-PIC/flink/topology/data/ip_v4_built_in.mmdb
new file mode 100644
index 0000000..7210af4
--- /dev/null
+++ b/MSH-PIC/flink/topology/data/ip_v4_built_in.mmdb
Binary files differ
diff --git a/MSH-PIC/flink/topology/data/ip_v4_user_defined.mmdb b/MSH-PIC/flink/topology/data/ip_v4_user_defined.mmdb
new file mode 100644
index 0000000..9853019
--- /dev/null
+++ b/MSH-PIC/flink/topology/data/ip_v4_user_defined.mmdb
Binary files differ
diff --git a/MSH-PIC/flink/topology/data/ip_v6_built_in.mmdb b/MSH-PIC/flink/topology/data/ip_v6_built_in.mmdb
new file mode 100644
index 0000000..35d1d32
--- /dev/null
+++ b/MSH-PIC/flink/topology/data/ip_v6_built_in.mmdb
Binary files differ
diff --git a/MSH-PIC/flink/topology/data/ip_v6_user_defined.mmdb b/MSH-PIC/flink/topology/data/ip_v6_user_defined.mmdb
new file mode 100644
index 0000000..5047903
--- /dev/null
+++ b/MSH-PIC/flink/topology/data/ip_v6_user_defined.mmdb
Binary files differ
diff --git a/MSH-PIC/flink/topology/data/keystore.jks b/MSH-PIC/flink/topology/data/keystore.jks
new file mode 100644
index 0000000..2e2328b
--- /dev/null
+++ b/MSH-PIC/flink/topology/data/keystore.jks
Binary files differ
diff --git a/MSH-PIC/flink/topology/data/truststore.jks b/MSH-PIC/flink/topology/data/truststore.jks
new file mode 100644
index 0000000..b435e09
--- /dev/null
+++ b/MSH-PIC/flink/topology/data/truststore.jks
Binary files differ
diff --git a/MSH-PIC/flink/topology/relationship-gtpc-user/config/RELATIONSHIP-GTPC-USER b/MSH-PIC/flink/topology/relationship-gtpc-user/config/RELATIONSHIP-GTPC-USER
new file mode 100644
index 0000000..82d314d
--- /dev/null
+++ b/MSH-PIC/flink/topology/relationship-gtpc-user/config/RELATIONSHIP-GTPC-USER
@@ -0,0 +1,33 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+input.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#hbase zookeeper地址 用于连接HBase
+hbase.zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+hbase.scan.limit=100000
+
+hbase.rpc.timeout=60000
+
+cache.expire.seconds=86400
+
+cache.max.size=100000
+
+cache.update.seconds=3600
+#--------------------------------Kafka消费组信息------------------------------#
+
+#kafka 接收数据topic
+input.kafka.topic=GTPC-RECORD-COMPLETED
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=relationship-gtpc-user-20220830-1
+
+#--------------------------------topology配置------------------------------#
+#ip-account对应关系表
+relation.user.teid.table.name=tsg_galaxy:relation_user_teid
+
+#定位库地址
+tools.library=/home/tsg/olap/topology/data/
+
+#account-ip对应关系表
+gtpc.knowledge.base.table.name=tsg_galaxy:gtpc_knowledge_base
diff --git a/MSH-PIC/flink/topology/relationship-gtpc-user/service_flow_config.properties b/MSH-PIC/flink/topology/relationship-gtpc-user/service_flow_config.properties
new file mode 100644
index 0000000..b72fac4
--- /dev/null
+++ b/MSH-PIC/flink/topology/relationship-gtpc-user/service_flow_config.properties
@@ -0,0 +1,33 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+input.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#hbase zookeeper地址 用于连接HBase
+hbase.zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+hbase.scan.limit=100000
+
+hbase.rpc.timeout=60000
+
+cache.expire.seconds=86400
+
+cache.max.size=100000
+
+cache.update.seconds=3600
+#--------------------------------Kafka消费组信息------------------------------#
+
+#kafka 接收数据topic
+input.kafka.topic=GTPC-RECORD-COMPLETED
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=relationship-gtpc-user-20220830-1
+
+#--------------------------------topology配置------------------------------#
+#ip-account对应关系表
+relation.user.teid.table.name=tsg_galaxy:relation_user_teid
+
+#定位库地址
+tools.library=/home/tsg/olap/topology/dat/
+
+#account-ip对应关系表
+gtpc.knowledge.base.table.name=tsg_galaxy:gtpc_knowledge_base
diff --git a/MSH-PIC/flink/topology/relationship-gtpc-user/start.sh b/MSH-PIC/flink/topology/relationship-gtpc-user/start.sh
new file mode 100644
index 0000000..9ea8fd5
--- /dev/null
+++ b/MSH-PIC/flink/topology/relationship-gtpc-user/start.sh
@@ -0,0 +1,67 @@
+#!/bin/bash
+#启动storm任务脚本
+source /etc/profile
+#######################参数配置####################################
+#yarn任务运行模式 per-job 或 per-job
+TASK_MODE="per-job"
+#更新jar的名字
+#PRIMORDIAL
+PRIMORDIAL='relationship-gtpc-user-23-06-02.jar'
+#jar name
+JAR_NAME='relationship-gtpc-user_tmp.jar'
+
+SESSION_CLUSTER="Flink per-job cluster"
+MAIN_CLASS=""
+CONFIG_NAME="service_flow_config.properties"
+JOBMANAGER_MEMORY="1024m"
+TASKMANAGER_MEMORY="3072m"
+TASK_SLOTS=3
+CLASS_LOADER='child-first'
+#######################参数配置####################################
+
+#任务jar所在目录
+BASE_DIR=$(pwd)
+APPLICATION_ID=$(yarn application -list | grep "$SESSION_CLUSTER" | awk '{print $1}')
+
+yes | cp -r $PRIMORDIAL $JAR_NAME
+
+#cd $BASE_DIR
+jar -xvf $BASE_DIR/$JAR_NAME $CONFIG_NAME
+function read_dir() {
+ for file in $(ls $1); do
+ if [ -d $1"/"$file ]; then
+ read_dir $1"/"$file
+ else
+ #perl job
+ if [[ $TASK_MODE == "per-job" ]]; then
+ num=$(yarn application -list | grep $file | wc -l)
+ if [ $num -eq "0" ]; then
+ cat $1$file >$BASE_DIR/$CONFIG_NAME
+ jar -uvf $BASE_DIR/$JAR_NAME $CONFIG_NAME
+
+ flink run -t yarn-per-job -Djobmanager.memory.process.size=$JOBMANAGER_MEMORY -Dtaskmanager.memory.process.size=$TASKMANAGER_MEMORY -Dyarn.application.name=$file -Dtaskmanager.numberOfTaskSlots=$TASK_SLOTS -p 3 -d $BASE_DIR/$JAR_NAME $file
+ sleep 10
+ fi
+ elif [[ -n $APPLICATION_ID && $TASK_MODE == "per-job" ]]; then
+ num=$(flink list | grep "$file" | grep -v flink | wc -l)
+ if [ $num -eq "0" ]; then
+ cat $1$file >$BASE_DIR/$CONFIG_NAME
+ jar -uvf $BASE_DIR/$JAR_NAME $CONFIG_NAME
+ #per-job
+ flink run -t yarn-per-job -Dyarn.application.id=$APPLICATION_ID -Dclassloader.resolve-order=$CLASS_LOADER -d $BASE_DIR/$JAR_NAME $file
+ sleep 10
+ fi
+ fi
+
+ fi
+ done
+}
+if [ $# != 1 ]; then
+ echo "usage: ./startall.sh [Configuration path]"
+ exit 1
+fi
+#读取第一个参数 为配置文件目录名称
+read_dir $1
+
+rm -rf $JAR_NAME
+
diff --git a/MSH-PIC/flink/topology/relationship-gtpc-user/stop.sh b/MSH-PIC/flink/topology/relationship-gtpc-user/stop.sh
new file mode 100644
index 0000000..3657871
--- /dev/null
+++ b/MSH-PIC/flink/topology/relationship-gtpc-user/stop.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+#flink任务停止脚本
+source /etc/profile
+#加参数 per-job 或 per-job
+TASK_MODE="per-job"
+SESSION_CLUSTER="Flink per-job cluster"
+
+APPLICATION_ID=$(yarn application -list | grep "$SESSION_CLUSTER" | awk '{print $1}')
+
+function read_dir() {
+ for file in $(ls $1); do
+ if [ -d $1"/"$file ]; then
+ read_dir $1"/"$file
+ else
+ if [[ $TASK_MODE == "per-job" ]]; then
+ appid=$(yarn application -list | grep "$file" | awk '{print $1}')
+ yarn application -kill $appid
+ echo -e "\033[32mcancel $file\033[0m"
+
+ elif [[ -n $APPLICATION_ID && $TASK_MODE == "per-job" ]]; then
+ jobid=$(flink list | grep -v flink | grep "$file" | awk '{print $4}')
+ flink cancel $jobid
+ echo -e "\033[32mcancel $file\033[0m"
+ fi
+
+ fi
+
+ done
+}
+
+#读取第一个参数 为配置文件目录名
+read_dir $1
+
diff --git a/MSH-PIC/flink/topology/relationship-radius-account/config/RELATIONSHIP-RADIUS-ACCOUNT b/MSH-PIC/flink/topology/relationship-radius-account/config/RELATIONSHIP-RADIUS-ACCOUNT
new file mode 100644
index 0000000..9dccde7
--- /dev/null
+++ b/MSH-PIC/flink/topology/relationship-radius-account/config/RELATIONSHIP-RADIUS-ACCOUNT
@@ -0,0 +1,28 @@
+#管理kafka地址
+input.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#hbase zookeeper地址 用于连接HBase
+hbase.zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+#--------------------------------Kafka消费组信息------------------------------#
+
+#kafka 接收数据topic
+input.kafka.topic=RADIUS-RECORD
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=account-framedip-hbase-20211113-1
+
+#--------------------------------topology配置------------------------------#
+#ip-account对应关系表
+hbase.framedip.table.name=tsg_galaxy:relation_framedip_account
+
+#定位库地址
+tools.library=/home/tsg/olap/topology/data/
+
+#account-ip对应关系表
+hbase.account.table.name=tsg_galaxy:relation_account_framedip
+
+hbase.rpc.timeout=60000
+
+hbase.scan.limit=100000
+
diff --git a/MSH-PIC/flink/topology/relationship-radius-account/service_flow_config.properties b/MSH-PIC/flink/topology/relationship-radius-account/service_flow_config.properties
new file mode 100644
index 0000000..ccc7ad7
--- /dev/null
+++ b/MSH-PIC/flink/topology/relationship-radius-account/service_flow_config.properties
@@ -0,0 +1,28 @@
+#管理kafka地址
+input.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094
+
+#hbase zookeeper地址 用于连接HBase
+hbase.zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181
+
+#--------------------------------Kafka消费组信息------------------------------#
+
+#kafka 接收数据topic
+input.kafka.topic=RADIUS-RECORD
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=account-framedip-hbase-20211113-1
+
+#--------------------------------topology配置------------------------------#
+#ip-account对应关系表
+hbase.framedip.table.name=tsg_galaxy:relation_framedip_account
+
+#定位库地址
+tools.library=/home/tsg/olap/topology/dat/
+
+#account-ip对应关系表
+hbase.account.table.name=tsg_galaxy:relation_account_framedip
+
+hbase.rpc.timeout=60000
+
+hbase.scan.limit=100000
+
diff --git a/MSH-PIC/flink/topology/relationship-radius-account/start.sh b/MSH-PIC/flink/topology/relationship-radius-account/start.sh
new file mode 100644
index 0000000..00eee48
--- /dev/null
+++ b/MSH-PIC/flink/topology/relationship-radius-account/start.sh
@@ -0,0 +1,67 @@
+#!/bin/bash
+#启动storm任务脚本
+source /etc/profile
+#######################参数配置####################################
+#yarn任务运行模式 per-job 或 per-job
+TASK_MODE="per-job"
+#更新jar的名字
+#PRIMORDIAL
+PRIMORDIAL='radius-relation-23-06-02.jar'
+#jar name
+JAR_NAME='radius-relation_tmp.jar'
+
+SESSION_CLUSTER="Flink per-job cluster"
+MAIN_CLASS=""
+CONFIG_NAME="service_flow_config.properties"
+JOBMANAGER_MEMORY="1024m"
+TASKMANAGER_MEMORY="3072m"
+TASK_SLOTS=3
+CLASS_LOADER='child-first'
+#######################参数配置####################################
+
+#任务jar所在目录
+BASE_DIR=$(pwd)
+APPLICATION_ID=$(yarn application -list | grep "$SESSION_CLUSTER" | awk '{print $1}')
+
+yes | cp -r $PRIMORDIAL $JAR_NAME
+
+#cd $BASE_DIR
+jar -xvf $BASE_DIR/$JAR_NAME $CONFIG_NAME
+function read_dir() {
+ for file in $(ls $1); do
+ if [ -d $1"/"$file ]; then
+ read_dir $1"/"$file
+ else
+ #perl job
+ if [[ $TASK_MODE == "per-job" ]]; then
+ num=$(yarn application -list | grep $file | wc -l)
+ if [ $num -eq "0" ]; then
+ cat $1$file >$BASE_DIR/$CONFIG_NAME
+ jar -uvf $BASE_DIR/$JAR_NAME $CONFIG_NAME
+
+ flink run -t yarn-per-job -Djobmanager.memory.process.size=$JOBMANAGER_MEMORY -Dtaskmanager.memory.process.size=$TASKMANAGER_MEMORY -Dyarn.application.name=$file -Dtaskmanager.numberOfTaskSlots=$TASK_SLOTS -p 3 -d $BASE_DIR/$JAR_NAME $file
+ sleep 10
+ fi
+ elif [[ -n $APPLICATION_ID && $TASK_MODE == "per-job" ]]; then
+ num=$(flink list | grep "$file" | grep -v flink | wc -l)
+ if [ $num -eq "0" ]; then
+ cat $1$file >$BASE_DIR/$CONFIG_NAME
+ jar -uvf $BASE_DIR/$JAR_NAME $CONFIG_NAME
+ #per-job
+ flink run -t yarn-per-job -Dyarn.application.id=$APPLICATION_ID -Dclassloader.resolve-order=$CLASS_LOADER -d $BASE_DIR/$JAR_NAME $file
+ sleep 10
+ fi
+ fi
+
+ fi
+ done
+}
+if [ $# != 1 ]; then
+ echo "usage: ./startall.sh [Configuration path]"
+ exit 1
+fi
+#读取第一个参数 为配置文件目录名称
+read_dir $1
+
+rm -rf $JAR_NAME
+
diff --git a/MSH-PIC/flink/topology/relationship-radius-account/stop.sh b/MSH-PIC/flink/topology/relationship-radius-account/stop.sh
new file mode 100644
index 0000000..3657871
--- /dev/null
+++ b/MSH-PIC/flink/topology/relationship-radius-account/stop.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+#flink任务停止脚本
+source /etc/profile
+#加参数 per-job 或 per-job
+TASK_MODE="per-job"
+SESSION_CLUSTER="Flink per-job cluster"
+
+APPLICATION_ID=$(yarn application -list | grep "$SESSION_CLUSTER" | awk '{print $1}')
+
+function read_dir() {
+ for file in $(ls $1); do
+ if [ -d $1"/"$file ]; then
+ read_dir $1"/"$file
+ else
+ if [[ $TASK_MODE == "per-job" ]]; then
+ appid=$(yarn application -list | grep "$file" | awk '{print $1}')
+ yarn application -kill $appid
+ echo -e "\033[32mcancel $file\033[0m"
+
+ elif [[ -n $APPLICATION_ID && $TASK_MODE == "per-job" ]]; then
+ jobid=$(flink list | grep -v flink | grep "$file" | awk '{print $4}')
+ flink cancel $jobid
+ echo -e "\033[32mcancel $file\033[0m"
+ fi
+
+ fi
+
+ done
+}
+
+#读取第一个参数 为配置文件目录名
+read_dir $1
+
diff --git a/MSH-PIC/galaxy-hos-nginx/conf/nginx.conf b/MSH-PIC/galaxy-hos-nginx/conf/nginx.conf
new file mode 100644
index 0000000..759017a
--- /dev/null
+++ b/MSH-PIC/galaxy-hos-nginx/conf/nginx.conf
@@ -0,0 +1,85 @@
+worker_processes auto;
+
+events {
+ worker_connections 4096;
+}
+
+http {
+ include mime.types;
+ default_type application/octet-stream;
+ sendfile on;
+ keepalive_timeout 65;
+ keepalive_requests 100000;
+ vhost_traffic_status_zone;
+ vhost_traffic_status_filter_by_host on;
+ client_max_body_size 5120M; #(设置客户端请求体最大值)
+ client_body_buffer_size 128k; #(配置请求体缓存区大小,)
+ access_log off; # 关闭access日志
+
+ upstream hos {
+ server 192.168.20.193:8186;
+ server 192.168.20.194:8186;
+ keepalive 1000;
+ keepalive_timeout 65;
+ keepalive_requests 100000;
+ }
+
+ geo $islocalip {
+ default 0;
+ }
+
+#hos非加密
+server {
+ listen 9098;
+ server_name localhost;
+
+ location / {
+ proxy_http_version 1.1;
+ proxy_set_header Connection "";
+ proxy_set_header Host $http_host;
+ proxy_connect_timeout 60s;#nginx跟后端服务器连接超时时间(代理连接超时)默认60s
+ proxy_send_timeout 600s;#后端服务器数据回传时间(代理发送超时)默认值60s
+ proxy_read_timeout 600s;#连接成功后,后端服务器响应时间(代理接收超时)默认值60s
+ set $ssl 0; #是否开启重定向加密,开启是1,关闭是0,默认关闭
+ if ($islocalip = 0){
+ set $ssl "${ssl}1";
+ }
+ if ($request_method = GET) {
+ set $ssl "${ssl}1";
+ }
+ if ($ssl = "111") {
+ return 302 https://$host:9097$request_uri;
+ }
+ proxy_pass http://hos$request_uri;
+ }
+}
+
+
+#hos加密
+server {
+ listen 9097 ssl;
+ server_name localhost;
+ proxy_set_header Host $host:9098;
+ ssl_certificate /usr/local/nginx/conf/self-sign.crt;
+ ssl_certificate_key /usr/local/nginx/conf/self-sign.key;
+ location / {
+ proxy_connect_timeout 60s;#nginx跟后端服务器连接超时时间(代理连接超时)默认60s
+ proxy_send_timeout 600s;#后端服务器数据回传时间(代理发送超时)默认值60s
+ proxy_read_timeout 600s;#连接成功后,后端服务器响应时间(代理接收超时)默认值60s
+ proxy_pass http://hos;
+ }
+}
+
+#nginx监控端口
+server {
+ listen 9914;
+ server_name localhost;
+ location /status {
+ vhost_traffic_status_display;
+ vhost_traffic_status_display_format html;
+ }
+}
+
+}
+
+
diff --git a/MSH-PIC/galaxy-hos-nginx/conf/self-sign.crt b/MSH-PIC/galaxy-hos-nginx/conf/self-sign.crt
new file mode 100644
index 0000000..8cb6bd7
--- /dev/null
+++ b/MSH-PIC/galaxy-hos-nginx/conf/self-sign.crt
@@ -0,0 +1,13 @@
+-----BEGIN CERTIFICATE-----
+MIICBTCCAaugAwIBAgIJAN1eg7aXJa0AMAoGCCqGSM49BAMCMGoxCzAJBgNVBAYT
+AlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2Nv
+MRMwEQYDVQQKDApHZG50LWNsb3VkMRkwFwYDVQQDDBAqLmdkbnQtY2xvdWQuY29t
+MB4XDTIxMDgzMTA1NTk0MloXDTMxMDgyOTA1NTk0MlowajELMAkGA1UEBhMCVVMx
+EzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xEzAR
+BgNVBAoMCkdkbnQtY2xvdWQxGTAXBgNVBAMMECouZ2RudC1jbG91ZC5jb20wWTAT
+BgcqhkjOPQIBBggqhkjOPQMBBwNCAARJcFCde1et82GZjZmr7M8nsx7dQki3SJ6v
+EfVxrRO6AaAkge6eq1mg0MyYRCc2j8Q+W4foy2tlVwywRJCiKnvzozowODAJBgNV
+HRMEAjAAMCsGA1UdEQQkMCKCECouZ2RudC1jbG91ZC5jb22CDmdkbnQtY2xvdWQu
+Y29tMAoGCCqGSM49BAMCA0gAMEUCIBi5SITjNG7P/5qVs6EyJ2E9602KiNUS1EbY
+3CJ33z0YAiEAySQ+MOtTESxRzRgkxuQHFktyCGyRWmqrkOEDES1j+QQ=
+-----END CERTIFICATE-----
diff --git a/MSH-PIC/galaxy-hos-nginx/conf/self-sign.key b/MSH-PIC/galaxy-hos-nginx/conf/self-sign.key
new file mode 100644
index 0000000..3fec678
--- /dev/null
+++ b/MSH-PIC/galaxy-hos-nginx/conf/self-sign.key
@@ -0,0 +1,8 @@
+-----BEGIN EC PARAMETERS-----
+BggqhkjOPQMBBw==
+-----END EC PARAMETERS-----
+-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEIC6qFeIiJvkGqYIxpfl14NZ8bOu6Fk0jfLumg39lTTLMoAoGCCqGSM49
+AwEHoUQDQgAESXBQnXtXrfNhmY2Zq+zPJ7Me3UJIt0ierxH1ca0TugGgJIHunqtZ
+oNDMmEQnNo/EPluH6MtrZVcMsESQoip78w==
+-----END EC PRIVATE KEY-----
diff --git a/MSH-PIC/galaxy-hos-nginx/docker-compose.yml b/MSH-PIC/galaxy-hos-nginx/docker-compose.yml
new file mode 100644
index 0000000..ec088fe
--- /dev/null
+++ b/MSH-PIC/galaxy-hos-nginx/docker-compose.yml
@@ -0,0 +1,16 @@
+version: '3'
+services:
+ nginx:
+ image: nginx-metrics:1.17.0
+ container_name: galaxy-hos-nginx
+ restart: always
+ ports:
+ - 80:80
+ volumes:
+ - /home/tsg/olap/galaxy/volumes/galaxy-hos-nginx/conf/nginx.conf:/usr/local/nginx/conf/nginx.conf
+ - /home/tsg/olap/galaxy/volumes/galaxy-hos-nginx/conf/self-sign.crt:/usr/local/nginx/conf/self-sign.crt
+ - /home/tsg/olap/galaxy/volumes/galaxy-hos-nginx/conf/self-sign.key:/usr/local/nginx/conf/self-sign.key
+ - /home/tsg/olap/galaxy/volumes/galaxy-hos-nginx/logs:/usr/local/nginx/logs
+ working_dir: /etc/nginx
+ command: /etc/nginx/nginx -g 'daemon off;'
+ network_mode: "host"
diff --git a/MSH-PIC/galaxy-hos-service/config/application.yml b/MSH-PIC/galaxy-hos-service/config/application.yml
new file mode 100644
index 0000000..f956274
--- /dev/null
+++ b/MSH-PIC/galaxy-hos-service/config/application.yml
@@ -0,0 +1,23 @@
+nacos:
+ config:
+ type: yaml
+ server-addr: https://192.168.20.252:8849
+ namespace: MSH
+ data-id: galaxy-hos-service.yml
+ auto-refresh: true
+ group: Galaxy
+ username: nacos
+ password: nacos
+ bootstrap:
+ enable: true
+ log:
+ enable: true
+ discovery:
+ service-name: hos
+spring:
+ profiles:
+ active: dev
+ application:
+ name: HosServiceApplication
+logging:
+ config: ./config/log4j2-dev.xml
diff --git a/MSH-PIC/galaxy-hos-service/config/log4j2-dev.xml b/MSH-PIC/galaxy-hos-service/config/log4j2-dev.xml
new file mode 100644
index 0000000..84576fd
--- /dev/null
+++ b/MSH-PIC/galaxy-hos-service/config/log4j2-dev.xml
@@ -0,0 +1,56 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<configuration>
+
+ <!--日志打印相关参数配置-->
+ <Properties>
+ <!--每5M压缩日志文件-->
+ <property name="LOG_SIZE">200M</property>
+ <!--最多产生10个压缩文件-->
+ <property name="LOG_NUMS">10</property>
+ <!--日志打印等级-->
+ <property name="LOG_LEVEL">error</property>
+ <!--日志文件路径-->
+ <property name="LOG_PATH">logs</property>
+ <!--日志文件名称-->
+ <property name="LOG_FILE_NAME">galaxy-hos-service</property>
+ <!--日志打印格式-->
+ <property name="LOG_PATTERN">[%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] [%p] [Thread:%t] %l %x - %m%n</property>
+ </Properties>
+
+ <appenders>
+ <!-- <Console name="consoleSystemOutAppender" target="SYSTEM_OUT">
+ <ThresholdFilter level="DEBUG" onMatch="ACCEPT" onMismatch="DENY"/>
+ <PatternLayout pattern="${LOG_PATTERN}"/>
+ </Console>
+ -->
+ <RollingFile name="rollingFileAllAppender"
+ fileName="${LOG_PATH}/${LOG_FILE_NAME}.log"
+ filePattern="${LOG_PATH}/history/$${date:yyyy-MM-dd}/${LOG_FILE_NAME}-%d{yyyy-MM-dd}-%i.log.gz">
+ <PatternLayout pattern="${LOG_PATTERN}"/>
+ <Policies>
+ <SizeBasedTriggeringPolicy size="${LOG_SIZE}"/>
+ <TimeBasedTriggeringPolicy interval="1" modulate="true"/>
+ </Policies>
+ <Filters>
+ <ThresholdFilter level="all" onMatch="ACCEPT" onMismatch="DENY"/>
+ </Filters>
+ <DefaultRolloverStrategy max="${LOG_NUMS}">
+ <Delete basePath="${LOG_PATH}/history" maxDepth="1">
+ <IfFileName glob="*.log.gz">
+ <IfLastModified age="30d">
+ <IfAny>
+ <IfAccumulatedFileSize exceeds="10 GB" />
+ </IfAny>
+ </IfLastModified>
+ </IfFileName>
+ </Delete>
+ </DefaultRolloverStrategy>
+ </RollingFile>
+ </appenders>
+ <loggers>
+ <root level="${LOG_LEVEL}">
+ <!-- <appender-ref ref="consoleSystemOutAppender"/> -->
+ <appender-ref ref="rollingFileAllAppender"/>
+ </root>
+ </loggers>
+</configuration>
diff --git a/MSH-PIC/galaxy-hos-service/docker-compose.yml b/MSH-PIC/galaxy-hos-service/docker-compose.yml
new file mode 100644
index 0000000..11ee543
--- /dev/null
+++ b/MSH-PIC/galaxy-hos-service/docker-compose.yml
@@ -0,0 +1,15 @@
+version: '2'
+
+services:
+ galaxy-hos-service:
+ image: galaxy-hos-service:23.07.04
+ container_name: galaxy-hos-service
+ environment:
+ JAVA_OPTS: "-Xmx10240m -Xms10240m -Xmn512m -Xss256k -XX:+CrashOnOutOfMemoryError -XX:MetaspaceSize=128m -XX:MaxPermSize=512m -XX:SurvivorRatio=2 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:MaxTenuringThreshold=15 -XX:+UseCMSCompactAtFullCollection -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:/home/tsg/galaxy/galaxy-hos-service/logs/gc-%t.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=100M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/home/tsg/galaxy/galaxy-hos-service/logs/"
+ ports:
+ - "9098:9098"
+ volumes:
+ - "/home/tsg/olap/galaxy/volumes/galaxy-hos-service/config:/home/tsg/galaxy/galaxy-hos-service/config"
+ - "/home/tsg/olap/galaxy/volumes/galaxy-hos-service/logs:/home/tsg/galaxy/galaxy-hos-service/logs"
+ restart: always
+ network_mode: "host"
diff --git a/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_active_defence_event_tsgv3.sh b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_active_defence_event_tsgv3.sh
new file mode 100644
index 0000000..09da598
--- /dev/null
+++ b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_active_defence_event_tsgv3.sh
@@ -0,0 +1,51 @@
+#!/bin/sh
+
+#gohangout启动文件
+
+#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
+YML_NAME=k2ck_active_defence_event_tsgv3
+#gohangout的二进制启动文件路径
+BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
+#日志级别,1,5,10,数字越大日志越详细
+LOG_LV=5
+#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
+THREAD_SUM=1
+#进程总数
+PROCESS_SUM=$1
+
+if [ ! -d "$BASE_DIR/logs" ]; then
+ mkdir -p $BASE_DIR/logs
+fi
+
+echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+id=0
+logid=0
+while true ; do
+ NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ time_stamp=$(date +%Y%m%d%H%M%S)
+ if [ "${NUM}" -lt ${PROCESS_SUM} ];then
+ $BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
+ echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ ((logid++))
+ ((id++))
+ if [ ${logid} -gt ${PROCESS_SUM} ];then
+ logid=0
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ sleep 30
+ fi
+ #大于设置进程数,杀掉所有进程,重启
+ elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ id=0
+ fi
+ sleep 1
+done
diff --git a/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_dos_event_tsgv3.sh b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_dos_event_tsgv3.sh
new file mode 100644
index 0000000..776cf74
--- /dev/null
+++ b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_dos_event_tsgv3.sh
@@ -0,0 +1,51 @@
+#!/bin/sh
+
+#gohangout启动文件
+
+#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
+YML_NAME=k2ck_dos_event_tsgv3
+#gohangout的二进制启动文件路径
+BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
+#日志级别,1,5,10,数字越大日志越详细
+LOG_LV=5
+#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
+THREAD_SUM=1
+#进程总数
+PROCESS_SUM=$1
+
+if [ ! -d "$BASE_DIR/logs" ]; then
+ mkdir -p $BASE_DIR/logs
+fi
+
+echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+id=0
+logid=0
+while true ; do
+ NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ time_stamp=$(date +%Y%m%d%H%M%S)
+ if [ "${NUM}" -lt ${PROCESS_SUM} ];then
+ $BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
+ echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ ((logid++))
+ ((id++))
+ if [ ${logid} -gt ${PROCESS_SUM} ];then
+ logid=0
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ sleep 30
+ fi
+ #大于设置进程数,杀掉所有进程,重启
+ elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ id=0
+ fi
+ sleep 1
+done
diff --git a/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_gtpc_record_tsgv3.sh b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_gtpc_record_tsgv3.sh
new file mode 100644
index 0000000..edf8a66
--- /dev/null
+++ b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_gtpc_record_tsgv3.sh
@@ -0,0 +1,51 @@
+#!/bin/sh
+
+#gohangout启动文件
+
+#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
+YML_NAME=k2ck_gtpc_record_tsgv3
+#gohangout的二进制启动文件路径
+BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
+#日志级别,1,5,10,数字越大日志越详细
+LOG_LV=5
+#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
+THREAD_SUM=1
+#进程总数
+PROCESS_SUM=$1
+
+if [ ! -d "$BASE_DIR/logs" ]; then
+ mkdir -p $BASE_DIR/logs
+fi
+
+echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+id=0
+logid=0
+while true ; do
+ NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ time_stamp=$(date +%Y%m%d%H%M%S)
+ if [ "${NUM}" -lt ${PROCESS_SUM} ];then
+ $BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
+ echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ ((logid++))
+ ((id++))
+ if [ ${logid} -gt ${PROCESS_SUM} ];then
+ logid=0
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ sleep 30
+ fi
+ #大于设置进程数,杀掉所有进程,重启
+ elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ id=0
+ fi
+ sleep 1
+done
diff --git a/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_interim_session_record_tsgv3.sh b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_interim_session_record_tsgv3.sh
new file mode 100644
index 0000000..1f44b29
--- /dev/null
+++ b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_interim_session_record_tsgv3.sh
@@ -0,0 +1,51 @@
+#!/bin/sh
+
+#gohangout启动文件
+
+#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
+YML_NAME=k2ck_interim_session_record_tsgv3
+#gohangout的二进制启动文件路径
+BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
+#日志级别,1,5,10,数字越大日志越详细
+LOG_LV=5
+#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
+THREAD_SUM=1
+#进程总数
+PROCESS_SUM=$1
+
+if [ ! -d "$BASE_DIR/logs" ]; then
+ mkdir -p $BASE_DIR/logs
+fi
+
+echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+id=0
+logid=0
+while true ; do
+ NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ time_stamp=$(date +%Y%m%d%H%M%S)
+ if [ "${NUM}" -lt ${PROCESS_SUM} ];then
+ $BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
+ echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ ((logid++))
+ ((id++))
+ if [ ${logid} -gt ${PROCESS_SUM} ];then
+ logid=0
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ sleep 30
+ fi
+ #大于设置进程数,杀掉所有进程,重启
+ elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ id=0
+ fi
+ sleep 1
+done
diff --git a/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_proxy_event_tsgv3.sh b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_proxy_event_tsgv3.sh
new file mode 100644
index 0000000..a1f3981
--- /dev/null
+++ b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_proxy_event_tsgv3.sh
@@ -0,0 +1,51 @@
+#!/bin/sh
+
+#gohangout启动文件
+
+#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
+YML_NAME=k2ck_proxy_event_tsgv3
+#gohangout的二进制启动文件路径
+BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
+#日志级别,1,5,10,数字越大日志越详细
+LOG_LV=5
+#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
+THREAD_SUM=2
+#进程总数
+PROCESS_SUM=$1
+
+if [ ! -d "$BASE_DIR/logs" ]; then
+ mkdir -p $BASE_DIR/logs
+fi
+
+echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+id=0
+logid=0
+while true ; do
+ NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ time_stamp=$(date +%Y%m%d%H%M%S)
+ if [ "${NUM}" -lt ${PROCESS_SUM} ];then
+ $BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
+ echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ ((logid++))
+ ((id++))
+ if [ ${logid} -gt ${PROCESS_SUM} ];then
+ logid=0
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ sleep 30
+ fi
+ #大于设置进程数,杀掉所有进程,重启
+ elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ id=0
+ fi
+ sleep 1
+done
diff --git a/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_radius_onff_tsgv3.sh b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_radius_onff_tsgv3.sh
new file mode 100644
index 0000000..b6ba86d
--- /dev/null
+++ b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_radius_onff_tsgv3.sh
@@ -0,0 +1,52 @@
+#!/bin/sh
+
+#gohangout启动文件
+
+#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
+YML_NAME=k2ck_radius_onff_tsgv3
+#gohangout的二进制启动文件路径
+BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
+#日志级别,1,5,10,数字越大日志越详细
+LOG_LV=5
+#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
+THREAD_SUM=1
+#进程总数
+PROCESS_SUM=$1
+
+if [ ! -d "$BASE_DIR/logs" ]; then
+ mkdir -p $BASE_DIR/logs
+fi
+
+echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+id=0
+logid=0
+while true ; do
+ NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ time_stamp=$(date +%Y%m%d%H%M%S)
+ if [ "${NUM}" -lt ${PROCESS_SUM} ];then
+ $BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
+ echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ ((logid++))
+ ((id++))
+ if [ ${logid} -gt ${PROCESS_SUM} ];then
+ logid=0
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ sleep 30
+ fi
+ #大于设置进程数,杀掉所有进程,重启
+ elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ id=0
+ fi
+ sleep 1
+done
+
diff --git a/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_radius_record_tsgv3.sh b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_radius_record_tsgv3.sh
new file mode 100644
index 0000000..4db576c
--- /dev/null
+++ b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_radius_record_tsgv3.sh
@@ -0,0 +1,51 @@
+#!/bin/sh
+
+#gohangout启动文件
+
+#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
+YML_NAME=k2ck_radius_record_tsgv3
+#gohangout的二进制启动文件路径
+BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
+#日志级别,1,5,10,数字越大日志越详细
+LOG_LV=5
+#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
+THREAD_SUM=1
+#进程总数
+PROCESS_SUM=$1
+
+if [ ! -d "$BASE_DIR/logs" ]; then
+ mkdir -p $BASE_DIR/logs
+fi
+
+echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+id=0
+logid=0
+while true ; do
+ NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ time_stamp=$(date +%Y%m%d%H%M%S)
+ if [ "${NUM}" -lt ${PROCESS_SUM} ];then
+ $BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
+ echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ ((logid++))
+ ((id++))
+ if [ ${logid} -gt ${PROCESS_SUM} ];then
+ logid=0
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ sleep 30
+ fi
+ #大于设置进程数,杀掉所有进程,重启
+ elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ id=0
+ fi
+ sleep 1
+done
diff --git a/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_security_event_tsgv3.sh b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_security_event_tsgv3.sh
new file mode 100644
index 0000000..6ad0ccd
--- /dev/null
+++ b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_security_event_tsgv3.sh
@@ -0,0 +1,51 @@
+#!/bin/sh
+
+#gohangout启动文件
+
+#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
+YML_NAME=k2ck_security_event_tsgv3
+#gohangout的二进制启动文件路径
+BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
+#日志级别,1,5,10,数字越大日志越详细
+LOG_LV=5
+#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
+THREAD_SUM=1
+#进程总数
+PROCESS_SUM=$1
+
+if [ ! -d "$BASE_DIR/logs" ]; then
+ mkdir -p $BASE_DIR/logs
+fi
+
+echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+id=0
+logid=0
+while true ; do
+ NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ time_stamp=$(date +%Y%m%d%H%M%S)
+ if [ "${NUM}" -lt ${PROCESS_SUM} ];then
+ $BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
+ echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ ((logid++))
+ ((id++))
+ if [ ${logid} -gt ${PROCESS_SUM} ];then
+ logid=0
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ sleep 30
+ fi
+ #大于设置进程数,杀掉所有进程,重启
+ elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ id=0
+ fi
+ sleep 1
+done
diff --git a/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_session_record_tsgv3.sh b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_session_record_tsgv3.sh
new file mode 100644
index 0000000..eeb793f
--- /dev/null
+++ b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_session_record_tsgv3.sh
@@ -0,0 +1,51 @@
+#!/bin/sh
+
+#gohangout启动文件
+
+#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
+YML_NAME=k2ck_session_record_tsgv3
+#gohangout的二进制启动文件路径
+BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
+#日志级别,1,5,10,数字越大日志越详细
+LOG_LV=5
+#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
+THREAD_SUM=1
+#进程总数
+PROCESS_SUM=$1
+
+if [ ! -d "$BASE_DIR/logs" ]; then
+ mkdir -p $BASE_DIR/logs
+fi
+
+echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+id=0
+logid=0
+while true ; do
+ NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ time_stamp=$(date +%Y%m%d%H%M%S)
+ if [ "${NUM}" -lt ${PROCESS_SUM} ];then
+ $BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
+ echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ ((logid++))
+ ((id++))
+ if [ ${logid} -gt ${PROCESS_SUM} ];then
+ logid=0
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ sleep 30
+ fi
+ #大于设置进程数,杀掉所有进程,重启
+ elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ id=0
+ fi
+ sleep 1
+done
diff --git a/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_sys_packet_capture_event_tsgv3.sh b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_sys_packet_capture_event_tsgv3.sh
new file mode 100644
index 0000000..828fa8f
--- /dev/null
+++ b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_sys_packet_capture_event_tsgv3.sh
@@ -0,0 +1,51 @@
+#!/bin/sh
+
+#gohangout启动文件
+
+#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
+YML_NAME=k2ck_sys_packet_capture_event_tsgv3
+#gohangout的二进制启动文件路径
+BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
+#日志级别,1,5,10,数字越大日志越详细
+LOG_LV=5
+#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
+THREAD_SUM=1
+#进程总数
+PROCESS_SUM=$1
+
+if [ ! -d "$BASE_DIR/logs" ]; then
+ mkdir -p $BASE_DIR/logs
+fi
+
+echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+id=0
+logid=0
+while true ; do
+ NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ time_stamp=$(date +%Y%m%d%H%M%S)
+ if [ "${NUM}" -lt ${PROCESS_SUM} ];then
+ $BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
+ echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ ((logid++))
+ ((id++))
+ if [ ${logid} -gt ${PROCESS_SUM} ];then
+ logid=0
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ sleep 30
+ fi
+ #大于设置进程数,杀掉所有进程,重启
+ elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ id=0
+ fi
+ sleep 1
+done
diff --git a/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_transaction_record_tsgv3.sh b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_transaction_record_tsgv3.sh
new file mode 100644
index 0000000..eac7a1c
--- /dev/null
+++ b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_transaction_record_tsgv3.sh
@@ -0,0 +1,51 @@
+#!/bin/sh
+
+#gohangout启动文件
+
+#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
+YML_NAME=k2ck_transaction_record_tsgv3
+#gohangout的二进制启动文件路径
+BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
+#日志级别,1,5,10,数字越大日志越详细
+LOG_LV=5
+#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
+THREAD_SUM=1
+#进程总数
+PROCESS_SUM=$1
+
+if [ ! -d "$BASE_DIR/logs" ]; then
+ mkdir -p $BASE_DIR/logs
+fi
+
+echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+id=0
+logid=0
+while true ; do
+ NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ time_stamp=$(date +%Y%m%d%H%M%S)
+ if [ "${NUM}" -lt ${PROCESS_SUM} ];then
+ $BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
+ echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ ((logid++))
+ ((id++))
+ if [ ${logid} -gt ${PROCESS_SUM} ];then
+ logid=0
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ sleep 30
+ fi
+ #大于设置进程数,杀掉所有进程,重启
+ elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ id=0
+ fi
+ sleep 1
+done
diff --git a/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_voip_record_tsgv3.sh b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_voip_record_tsgv3.sh
new file mode 100644
index 0000000..460fc7e
--- /dev/null
+++ b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_voip_record_tsgv3.sh
@@ -0,0 +1,51 @@
+#!/bin/sh
+
+#gohangout启动文件
+
+#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
+YML_NAME=k2ck_voip_record_tsgv3
+#gohangout的二进制启动文件路径
+BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
+#日志级别,1,5,10,数字越大日志越详细
+LOG_LV=5
+#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
+THREAD_SUM=1
+#进程总数
+PROCESS_SUM=$1
+
+if [ ! -d "$BASE_DIR/logs" ]; then
+ mkdir -p $BASE_DIR/logs
+fi
+
+echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+id=0
+logid=0
+while true ; do
+ NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ time_stamp=$(date +%Y%m%d%H%M%S)
+ if [ "${NUM}" -lt ${PROCESS_SUM} ];then
+ $BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
+ echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ ((logid++))
+ ((id++))
+ if [ ${logid} -gt ${PROCESS_SUM} ];then
+ logid=0
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ sleep 30
+ fi
+ #大于设置进程数,杀掉所有进程,重启
+ elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ id=0
+ fi
+ sleep 1
+done
diff --git a/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_xxxxxx_tsgv3.sh b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_xxxxxx_tsgv3.sh
new file mode 100644
index 0000000..2ef516a
--- /dev/null
+++ b/MSH-PIC/gohangout/bin/ghoStart/start_gohangout_k2ck_xxxxxx_tsgv3.sh
@@ -0,0 +1,51 @@
+#!/bin/sh
+
+#gohangout启动文件
+
+#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
+YML_NAME=k2ck_connection_record_tsgv3
+#gohangout的二进制启动文件路径
+BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
+#日志级别,1,5,10,数字越大日志越详细
+LOG_LV=5
+#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
+THREAD_SUM=1
+#进程总数
+PROCESS_SUM=$1
+
+if [ ! -d "$BASE_DIR/logs" ]; then
+ mkdir -p $BASE_DIR/logs
+fi
+
+echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+id=0
+logid=0
+while true ; do
+ NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ time_stamp=$(date +%Y%m%d%H%M%S)
+ if [ "${NUM}" -lt ${PROCESS_SUM} ];then
+ $BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
+ echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ ((logid++))
+ ((id++))
+ if [ ${logid} -gt ${PROCESS_SUM} ];then
+ logid=0
+ pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
+ sleep 30
+ fi
+ #大于设置进程数,杀掉所有进程,重启
+ elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
+ for pid in $pids
+ do
+ kill -9 $pid
+ done
+ id=0
+ fi
+ sleep 1
+done
diff --git a/MSH-PIC/gohangout/check_status.sh b/MSH-PIC/gohangout/check_status.sh
new file mode 100644
index 0000000..420e045
--- /dev/null
+++ b/MSH-PIC/gohangout/check_status.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+NODE_DIR="/home/tsg/olap/galaxy/volumes/node-exporter/prom"
+GO_STATUS=`ps -ef |grep gohangout | grep worker | wc -l`
+
+if [ $GO_STATUS -ge "12" ];then
+echo "gohangout_status 1" > $NODE_DIR/gohangout_status.prom
+else
+echo "gohangout_status 0" > $NODE_DIR/gohangout_status.prom
+fi
+
diff --git a/MSH-PIC/gohangout/conf/k2ck_active_defence_event_tsgv3.yml b/MSH-PIC/gohangout/conf/k2ck_active_defence_event_tsgv3.yml
new file mode 100644
index 0000000..10d35fa
--- /dev/null
+++ b/MSH-PIC/gohangout/conf/k2ck_active_defence_event_tsgv3.yml
@@ -0,0 +1,30 @@
+inputs:
+ - Kafka:
+ topic:
+ ACTIVE-DEFENCE-EVENT-COMPLETED: 1
+ #assign:
+ # weblog: [0,9]
+ codec: json
+ consumer_settings:
+ bootstrap.servers: "192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094"
+ group.id: active_defence_event_msh_tsgv3
+ max.partition.fetch.bytes: '31457280'
+ auto.commit.interval.ms: '5000'
+ # from.beginning: 'true'
+ sasl.mechanism: PLAIN
+ sasl.user: admin
+ sasl.password: galaxy2019
+
+outputs:
+ - Clickhouse:
+ table: 'tsg_galaxy_v3.active_defence_event_local'
+ username: 'tsg_insert'
+ password: 'galaxy2019'
+ hosts:
+ - 'tcp://192.168.20.193:9001'
+ - 'tcp://192.168.20.194:9001'
+ - 'tcp://192.168.20.195:9001'
+ bulk_actions: 100000
+ flush_interval: 30
+ concurrent: 1
+ conn_max_life_time: 60
diff --git a/MSH-PIC/gohangout/conf/k2ck_dos_event_tsgv3.yml b/MSH-PIC/gohangout/conf/k2ck_dos_event_tsgv3.yml
new file mode 100644
index 0000000..5259603
--- /dev/null
+++ b/MSH-PIC/gohangout/conf/k2ck_dos_event_tsgv3.yml
@@ -0,0 +1,30 @@
+inputs:
+ - Kafka:
+ topic:
+ DOS-EVENT: 1
+ #assign:
+ # weblog: [0,9]
+ codec: json
+ consumer_settings:
+ bootstrap.servers: "192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094"
+ group.id: dos_event_msh_tsgv3
+ max.partition.fetch.bytes: '31457280'
+ auto.commit.interval.ms: '5000'
+ # from.beginning: 'true'
+ sasl.mechanism: PLAIN
+ sasl.user: admin
+ sasl.password: galaxy2019
+
+outputs:
+ - Clickhouse:
+ table: 'tsg_galaxy_v3.dos_event_local'
+ username: 'tsg_insert'
+ password: 'galaxy2019'
+ hosts:
+ - 'tcp://192.168.20.193:9001'
+ - 'tcp://192.168.20.194:9001'
+ - 'tcp://192.168.20.195:9001'
+ bulk_actions: 100000
+ flush_interval: 30
+ concurrent: 1
+ conn_max_life_time: 60
diff --git a/MSH-PIC/gohangout/conf/k2ck_gtpc_record_tsgv3.yml b/MSH-PIC/gohangout/conf/k2ck_gtpc_record_tsgv3.yml
new file mode 100644
index 0000000..b8c1704
--- /dev/null
+++ b/MSH-PIC/gohangout/conf/k2ck_gtpc_record_tsgv3.yml
@@ -0,0 +1,30 @@
+inputs:
+ - Kafka:
+ topic:
+ GTPC-RECORD-COMPLETED: 1
+ #assign:
+ # weblog: [0,9]
+ codec: json
+ consumer_settings:
+ bootstrap.servers: "192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094"
+ group.id: gtpc_record_completed_msh_tsgv3
+ max.partition.fetch.bytes: '31457280'
+ auto.commit.interval.ms: '5000'
+ # from.beginning: 'true'
+ sasl.mechanism: PLAIN
+ sasl.user: admin
+ sasl.password: galaxy2019
+
+outputs:
+ - Clickhouse:
+ table: 'tsg_galaxy_v3.gtpc_record_local'
+ username: 'tsg_insert'
+ password: 'galaxy2019'
+ hosts:
+ - 'tcp://192.168.20.193:9001'
+ - 'tcp://192.168.20.194:9001'
+ - 'tcp://192.168.20.195:9001'
+ bulk_actions: 100000
+ flush_interval: 30
+ concurrent: 1
+ conn_max_life_time: 60
diff --git a/MSH-PIC/gohangout/conf/k2ck_interim_session_record_tsgv3.yml b/MSH-PIC/gohangout/conf/k2ck_interim_session_record_tsgv3.yml
new file mode 100644
index 0000000..9f01644
--- /dev/null
+++ b/MSH-PIC/gohangout/conf/k2ck_interim_session_record_tsgv3.yml
@@ -0,0 +1,30 @@
+inputs:
+ - Kafka:
+ topic:
+ INTERIM-SESSION-RECORD-COMPLETED: 1
+ #assign:
+ # weblog: [0,9]
+ codec: json
+ consumer_settings:
+ bootstrap.servers: "192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094"
+ group.id: interim_session_completed_msh_tsgv3
+ max.partition.fetch.bytes: '31457280'
+ auto.commit.interval.ms: '5000'
+ # from.beginning: 'true'
+ sasl.mechanism: PLAIN
+ sasl.user: admin
+ sasl.password: galaxy2019
+
+outputs:
+ - Clickhouse:
+ table: 'tsg_galaxy_v3.interim_session_record_local'
+ username: 'tsg_insert'
+ password: 'galaxy2019'
+ hosts:
+ - 'tcp://192.168.20.193:9001'
+ - 'tcp://192.168.20.194:9001'
+ - 'tcp://192.168.20.195:9001'
+ bulk_actions: 200000
+ flush_interval: 30
+ concurrent: 1
+ conn_max_life_time: 60
diff --git a/MSH-PIC/gohangout/conf/k2ck_proxy_event_tsgv3.yml b/MSH-PIC/gohangout/conf/k2ck_proxy_event_tsgv3.yml
new file mode 100644
index 0000000..9056400
--- /dev/null
+++ b/MSH-PIC/gohangout/conf/k2ck_proxy_event_tsgv3.yml
@@ -0,0 +1,30 @@
+inputs:
+ - Kafka:
+ topic:
+ PROXY-EVENT-COMPLETED: 1
+ #assign:
+ # weblog: [0,9]
+ codec: json
+ consumer_settings:
+ bootstrap.servers: "192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094"
+ group.id: proxy_event_completed_msh_tsgv3
+ max.partition.fetch.bytes: '31457280'
+ auto.commit.interval.ms: '5000'
+ # from.beginning: 'true'
+ sasl.mechanism: PLAIN
+ sasl.user: admin
+ sasl.password: galaxy2019
+
+outputs:
+ - Clickhouse:
+ table: 'tsg_galaxy_v3.proxy_event_local'
+ username: 'tsg_insert'
+ password: 'galaxy2019'
+ hosts:
+ - 'tcp://192.168.20.193:9001'
+ - 'tcp://192.168.20.194:9001'
+ - 'tcp://192.168.20.195:9001'
+ bulk_actions: 100000
+ flush_interval: 30
+ concurrent: 1
+ conn_max_life_time: 60
diff --git a/MSH-PIC/gohangout/conf/k2ck_radius_onff_tsgv3.yml b/MSH-PIC/gohangout/conf/k2ck_radius_onff_tsgv3.yml
new file mode 100644
index 0000000..cb118e3
--- /dev/null
+++ b/MSH-PIC/gohangout/conf/k2ck_radius_onff_tsgv3.yml
@@ -0,0 +1,30 @@
+inputs:
+ - Kafka:
+ topic:
+ RADIUS-ONFF: 1
+ #assign:
+ # weblog: [0,9]
+ codec: json
+ consumer_settings:
+ bootstrap.servers: "192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094"
+ group.id: radius_onff_log_msh_tsgv3
+ max.partition.fetch.bytes: '31457280'
+ auto.commit.interval.ms: '5000'
+ # from.beginning: 'true'
+ sasl.mechanism: PLAIN
+ sasl.user: admin
+ sasl.password: galaxy2019
+
+outputs:
+ - Clickhouse:
+ table: 'tsg_galaxy_v3.radius_onff_log_local'
+ username: 'tsg_insert'
+ password: 'galaxy2019'
+ hosts:
+ - 'tcp://192.168.20.193:9001'
+ - 'tcp://192.168.20.194:9001'
+ - 'tcp://192.168.20.195:9001'
+ bulk_actions: 100000
+ flush_interval: 30
+ concurrent: 1
+ conn_max_life_time: 60
diff --git a/MSH-PIC/gohangout/conf/k2ck_radius_record_tsgv3.yml b/MSH-PIC/gohangout/conf/k2ck_radius_record_tsgv3.yml
new file mode 100644
index 0000000..34af375
--- /dev/null
+++ b/MSH-PIC/gohangout/conf/k2ck_radius_record_tsgv3.yml
@@ -0,0 +1,31 @@
+inputs:
+ - Kafka:
+ topic:
+ RADIUS-RECORD-COMPLETED: 1
+ #assign:
+ # weblog: [0,9]
+ codec: json
+ consumer_settings:
+ bootstrap.servers: "192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094"
+ group.id: radius_record_completed_msh_tsgv3
+ max.partition.fetch.bytes: '31457280'
+ auto.commit.interval.ms: '5000'
+ # from.beginning: 'true'
+ sasl.mechanism: PLAIN
+ sasl.user: admin
+ sasl.password: galaxy2019
+
+outputs:
+ - Clickhouse:
+ table: 'tsg_galaxy_v3.radius_record_local'
+ username: 'tsg_insert'
+ password: 'galaxy2019'
+ conn_max_life_time: 60
+ hosts:
+ - 'tcp://192.168.20.193:9001'
+ - 'tcp://192.168.20.194:9001'
+ - 'tcp://192.168.20.195:9001'
+ bulk_actions: 100000
+ flush_interval: 30
+ concurrent: 1
+ conn_max_life_time: 60
diff --git a/MSH-PIC/gohangout/conf/k2ck_security_event_tsgv3.yml b/MSH-PIC/gohangout/conf/k2ck_security_event_tsgv3.yml
new file mode 100644
index 0000000..5944cde
--- /dev/null
+++ b/MSH-PIC/gohangout/conf/k2ck_security_event_tsgv3.yml
@@ -0,0 +1,30 @@
+inputs:
+ - Kafka:
+ topic:
+ SECURITY-EVENT-COMPLETED: 4
+ #assign:
+ # weblog: [0,9]
+ codec: json
+ consumer_settings:
+ bootstrap.servers: "192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094"
+ group.id: security_event_completed_msh_tsgv3
+ max.partition.fetch.bytes: '31457280'
+ auto.commit.interval.ms: '5000'
+ # from.beginning: 'true'
+ sasl.mechanism: PLAIN
+ sasl.user: admin
+ sasl.password: galaxy2019
+
+outputs:
+ - Clickhouse:
+ table: 'tsg_galaxy_v3.security_event_local'
+ username: 'tsg_insert'
+ password: 'galaxy2019'
+ hosts:
+ - 'tcp://192.168.20.193:9001'
+ - 'tcp://192.168.20.194:9001'
+ - 'tcp://192.168.20.195:9001'
+ bulk_actions: 100000
+ flush_interval: 30
+ concurrent: 1
+ conn_max_life_time: 60
diff --git a/MSH-PIC/gohangout/conf/k2ck_session_record_tsgv3.yml b/MSH-PIC/gohangout/conf/k2ck_session_record_tsgv3.yml
new file mode 100644
index 0000000..dc347a4
--- /dev/null
+++ b/MSH-PIC/gohangout/conf/k2ck_session_record_tsgv3.yml
@@ -0,0 +1,30 @@
+inputs:
+ - Kafka:
+ topic:
+ SESSION-RECORD-COMPLETED: 1
+ #assign:
+ # weblog: [0,9]
+ codec: json
+ consumer_settings:
+ bootstrap.servers: "192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094"
+ group.id: session_record_completed_msh_tsgv3
+ max.partition.fetch.bytes: '31457280'
+ auto.commit.interval.ms: '5000'
+ # from.beginning: 'true'
+ sasl.mechanism: PLAIN
+ sasl.user: admin
+ sasl.password: galaxy2019
+
+outputs:
+ - Clickhouse:
+ table: 'tsg_galaxy_v3.session_record_local'
+ username: 'tsg_insert'
+ password: 'galaxy2019'
+ hosts:
+ - 'tcp://192.168.20.193:9001'
+ - 'tcp://192.168.20.194:9001'
+ - 'tcp://192.168.20.195:9001'
+ bulk_actions: 200000
+ flush_interval: 30
+ concurrent: 1
+ conn_max_life_time: 60
diff --git a/MSH-PIC/gohangout/conf/k2ck_sys_packet_capture_event_tsgv3.yml b/MSH-PIC/gohangout/conf/k2ck_sys_packet_capture_event_tsgv3.yml
new file mode 100644
index 0000000..9ba54f7
--- /dev/null
+++ b/MSH-PIC/gohangout/conf/k2ck_sys_packet_capture_event_tsgv3.yml
@@ -0,0 +1,30 @@
+inputs:
+ - Kafka:
+ topic:
+ SYS-PACKET-CAPTURE-EVENT-COMPLETED: 1
+ #assign:
+ # weblog: [0,9]
+ codec: json
+ consumer_settings:
+ bootstrap.servers: "192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094"
+ group.id: sys_packet_capture_completed_msh_tsgv3
+ max.partition.fetch.bytes: '31457280'
+ auto.commit.interval.ms: '5000'
+ # from.beginning: 'true'
+ sasl.mechanism: PLAIN
+ sasl.user: admin
+ sasl.password: galaxy2019
+
+outputs:
+ - Clickhouse:
+ table: 'tsg_galaxy_v3.sys_packet_capture_event_local'
+ username: 'tsg_insert'
+ password: 'galaxy2019'
+ hosts:
+ - 'tcp://192.168.20.193:9001'
+ - 'tcp://192.168.20.194:9001'
+ - 'tcp://192.168.20.195:9001'
+ bulk_actions: 100000
+ flush_interval: 30
+ concurrent: 1
+ conn_max_life_time: 60
diff --git a/MSH-PIC/gohangout/conf/k2ck_transaction_record_tsgv3.yml b/MSH-PIC/gohangout/conf/k2ck_transaction_record_tsgv3.yml
new file mode 100644
index 0000000..70c6d7e
--- /dev/null
+++ b/MSH-PIC/gohangout/conf/k2ck_transaction_record_tsgv3.yml
@@ -0,0 +1,30 @@
+inputs:
+ - Kafka:
+ topic:
+ TRANSACTION-RECORD-COMPLETED: 1
+ #assign:
+ # weblog: [0,9]
+ codec: json
+ consumer_settings:
+ bootstrap.servers: "192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094"
+ group.id: transaction_record_completed_msh_tsgv3
+ max.partition.fetch.bytes: '31457280'
+ auto.commit.interval.ms: '5000'
+ # from.beginning: 'true'
+ sasl.mechanism: PLAIN
+ sasl.user: admin
+ sasl.password: galaxy2019
+
+outputs:
+ - Clickhouse:
+ table: 'tsg_galaxy_v3.transaction_record_local'
+ username: 'tsg_insert'
+ password: 'galaxy2019'
+ hosts:
+ - 'tcp://192.168.20.193:9001'
+ - 'tcp://192.168.20.194:9001'
+ - 'tcp://192.168.20.195:9001'
+ bulk_actions: 200000
+ flush_interval: 30
+ concurrent: 1
+ conn_max_life_time: 60
diff --git a/MSH-PIC/gohangout/conf/k2ck_voip_record_tsgv3.yml b/MSH-PIC/gohangout/conf/k2ck_voip_record_tsgv3.yml
new file mode 100644
index 0000000..5379b9d
--- /dev/null
+++ b/MSH-PIC/gohangout/conf/k2ck_voip_record_tsgv3.yml
@@ -0,0 +1,30 @@
+inputs:
+ - Kafka:
+ topic:
+ VOIP-RECORD-COMPLETED: 1
+ #assign:
+ # weblog: [0,9]
+ codec: json
+ consumer_settings:
+ bootstrap.servers: "192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094"
+ group.id: voip_record_completed_msh_tsgv3
+ max.partition.fetch.bytes: '31457280'
+ auto.commit.interval.ms: '5000'
+ # from.beginning: 'true'
+ sasl.mechanism: PLAIN
+ sasl.user: admin
+ sasl.password: galaxy2019
+
+outputs:
+ - Clickhouse:
+ table: 'tsg_galaxy_v3.voip_record_local'
+ username: 'tsg_insert'
+ password: 'galaxy2019'
+ hosts:
+ - 'tcp://192.168.20.193:9001'
+ - 'tcp://192.168.20.194:9001'
+ - 'tcp://192.168.20.195:9001'
+ bulk_actions: 100000
+ flush_interval: 30
+ concurrent: 1
+ conn_max_life_time: 60
diff --git a/MSH-PIC/gohangout/docker-compose.yml b/MSH-PIC/gohangout/docker-compose.yml
new file mode 100644
index 0000000..7afcee8
--- /dev/null
+++ b/MSH-PIC/gohangout/docker-compose.yml
@@ -0,0 +1,15 @@
+version: "3"
+services:
+ gohangout:
+ #依赖的镜像
+ image: gohangout:1.15.2.20230310
+ restart: always
+ container_name: gohangout
+ environment:
+ PROCESSNUM: 1
+ volumes:
+ - "/home/tsg/olap/galaxy/volumes/gohangout/logs:/home/ceiec/go_gohangout/gohangout/logs"
+ - "/home/tsg/olap/galaxy/volumes/gohangout/start_all.sh:/home/ceiec/go_gohangout/gohangout/start_all.sh"
+ - "/home/tsg/olap/galaxy/volumes/gohangout/conf:/home/ceiec/go_gohangout/gohangout/conf"
+ - "/home/tsg/olap/galaxy/volumes/gohangout/bin/ghoStart:/home/ceiec/go_gohangout/gohangout/bin/ghoStart"
+ network_mode: "host"
diff --git a/MSH-PIC/gohangout/start_all.sh b/MSH-PIC/gohangout/start_all.sh
new file mode 100644
index 0000000..3f07352
--- /dev/null
+++ b/MSH-PIC/gohangout/start_all.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+STARTDIR=$(cd $(dirname $0); pwd)
+
+#docker模式
+nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_active_defence_event_tsgv3.sh $1 > /dev/null 2>&1 &
+nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_session_record_tsgv3.sh $1 > /dev/null 2>&1 &
+nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_proxy_event_tsgv3.sh $1 > /dev/null 2>&1 &
+nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_radius_record_tsgv3.sh $1 > /dev/null 2>&1 &
+nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_radius_onff_tsgv3.sh $1 > /dev/null 2>&1 &
+nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_sys_packet_capture_event_tsgv3.sh $1 > /dev/null 2>&1 &
+nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_transaction_record_tsgv3.sh $1 > /dev/null 2>&1 &
+nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_voip_record_tsgv3.sh $1 > /dev/null 2>&1 &
+nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_gtpc_record_tsgv3.sh $1 > /dev/null 2>&1 &
+nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_interim_session_record_tsgv3.sh $1 > /dev/null 2>&1 &
+nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_dos_event_tsgv3.sh $1 > /dev/null 2>&1 &
+$STARTDIR/bin/ghoStart/start_gohangout_k2ck_security_event_tsgv3.sh 2
diff --git a/MSH-PIC/hadoop/bin/container-executor b/MSH-PIC/hadoop/bin/container-executor
new file mode 100644
index 0000000..5e228bc
--- /dev/null
+++ b/MSH-PIC/hadoop/bin/container-executor
Binary files differ
diff --git a/MSH-PIC/hadoop/bin/hadoop b/MSH-PIC/hadoop/bin/hadoop
new file mode 100644
index 0000000..a5e8885
--- /dev/null
+++ b/MSH-PIC/hadoop/bin/hadoop
@@ -0,0 +1,169 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script runs the hadoop core commands.
+
+bin=`which $0`
+bin=`dirname ${bin}`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+
+function print_usage(){
+ echo "Usage: hadoop [--config confdir] [COMMAND | CLASSNAME]"
+ echo " CLASSNAME run the class named CLASSNAME"
+ echo " or"
+ echo " where COMMAND is one of:"
+ echo " fs run a generic filesystem user client"
+ echo " version print the version"
+ echo " jar <jar> run a jar file"
+ echo " note: please use \"yarn jar\" to launch"
+ echo " YARN applications, not this command."
+ echo " checknative [-a|-h] check native hadoop and compression libraries availability"
+ echo " distcp <srcurl> <desturl> copy file or directories recursively"
+ echo " archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive"
+ echo " classpath prints the class path needed to get the"
+ echo " credential interact with credential providers"
+ echo " Hadoop jar and the required libraries"
+ echo " daemonlog get/set the log level for each daemon"
+ echo " trace view and modify Hadoop tracing settings"
+ echo ""
+ echo "Most commands print help when invoked w/o parameters."
+}
+
+if [ $# = 0 ]; then
+ print_usage
+ exit
+fi
+
+COMMAND=$1
+case $COMMAND in
+ # usage flags
+ --help|-help|-h)
+ print_usage
+ exit
+ ;;
+
+ #hdfs commands
+ namenode|secondarynamenode|datanode|dfs|dfsadmin|fsck|balancer|fetchdt|oiv|dfsgroups|portmap|nfs3)
+ echo "DEPRECATED: Use of this script to execute hdfs command is deprecated." 1>&2
+ echo "Instead use the hdfs command for it." 1>&2
+ echo "" 1>&2
+ #try to locate hdfs and if present, delegate to it.
+ shift
+ if [ -f "${HADOOP_HDFS_HOME}"/bin/hdfs ]; then
+ exec "${HADOOP_HDFS_HOME}"/bin/hdfs ${COMMAND/dfsgroups/groups} "$@"
+ elif [ -f "${HADOOP_PREFIX}"/bin/hdfs ]; then
+ exec "${HADOOP_PREFIX}"/bin/hdfs ${COMMAND/dfsgroups/groups} "$@"
+ else
+ echo "HADOOP_HDFS_HOME not found!"
+ exit 1
+ fi
+ ;;
+
+ #mapred commands for backwards compatibility
+ pipes|job|queue|mrgroups|mradmin|jobtracker|tasktracker)
+ echo "DEPRECATED: Use of this script to execute mapred command is deprecated." 1>&2
+ echo "Instead use the mapred command for it." 1>&2
+ echo "" 1>&2
+ #try to locate mapred and if present, delegate to it.
+ shift
+ if [ -f "${HADOOP_MAPRED_HOME}"/bin/mapred ]; then
+ exec "${HADOOP_MAPRED_HOME}"/bin/mapred ${COMMAND/mrgroups/groups} "$@"
+ elif [ -f "${HADOOP_PREFIX}"/bin/mapred ]; then
+ exec "${HADOOP_PREFIX}"/bin/mapred ${COMMAND/mrgroups/groups} "$@"
+ else
+ echo "HADOOP_MAPRED_HOME not found!"
+ exit 1
+ fi
+ ;;
+
+ #core commands
+ *)
+ # the core commands
+ if [ "$COMMAND" = "fs" ] ; then
+ CLASS=org.apache.hadoop.fs.FsShell
+ elif [ "$COMMAND" = "version" ] ; then
+ CLASS=org.apache.hadoop.util.VersionInfo
+ elif [ "$COMMAND" = "jar" ] ; then
+ CLASS=org.apache.hadoop.util.RunJar
+ if [[ -n "${YARN_OPTS}" ]] || [[ -n "${YARN_CLIENT_OPTS}" ]]; then
+ echo "WARNING: Use \"yarn jar\" to launch YARN applications." 1>&2
+ fi
+ elif [ "$COMMAND" = "key" ] ; then
+ CLASS=org.apache.hadoop.crypto.key.KeyShell
+ elif [ "$COMMAND" = "checknative" ] ; then
+ CLASS=org.apache.hadoop.util.NativeLibraryChecker
+ elif [ "$COMMAND" = "distcp" ] ; then
+ CLASS=org.apache.hadoop.tools.DistCp
+ CLASSPATH=${CLASSPATH}:${TOOL_PATH}
+ elif [ "$COMMAND" = "daemonlog" ] ; then
+ CLASS=org.apache.hadoop.log.LogLevel
+ elif [ "$COMMAND" = "archive" ] ; then
+ CLASS=org.apache.hadoop.tools.HadoopArchives
+ CLASSPATH=${CLASSPATH}:${TOOL_PATH}
+ elif [ "$COMMAND" = "credential" ] ; then
+ CLASS=org.apache.hadoop.security.alias.CredentialShell
+ elif [ "$COMMAND" = "trace" ] ; then
+ CLASS=org.apache.hadoop.tracing.TraceAdmin
+ elif [ "$COMMAND" = "classpath" ] ; then
+ if [ "$#" -gt 1 ]; then
+ CLASS=org.apache.hadoop.util.Classpath
+ else
+ # No need to bother starting up a JVM for this simple case.
+ if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ fi
+ echo $CLASSPATH
+ exit
+ fi
+ elif [[ "$COMMAND" = -* ]] ; then
+ # class and package names cannot begin with a -
+ echo "Error: No command named \`$COMMAND' was found. Perhaps you meant \`hadoop ${COMMAND#-}'"
+ exit 1
+ else
+ CLASS=$COMMAND
+ fi
+
+ # cygwin path translation
+ if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ HADOOP_LOG_DIR=$(cygpath -w "$HADOOP_LOG_DIR" 2>/dev/null)
+ HADOOP_PREFIX=$(cygpath -w "$HADOOP_PREFIX" 2>/dev/null)
+ HADOOP_CONF_DIR=$(cygpath -w "$HADOOP_CONF_DIR" 2>/dev/null)
+ HADOOP_COMMON_HOME=$(cygpath -w "$HADOOP_COMMON_HOME" 2>/dev/null)
+ HADOOP_HDFS_HOME=$(cygpath -w "$HADOOP_HDFS_HOME" 2>/dev/null)
+ HADOOP_YARN_HOME=$(cygpath -w "$HADOOP_YARN_HOME" 2>/dev/null)
+ HADOOP_MAPRED_HOME=$(cygpath -w "$HADOOP_MAPRED_HOME" 2>/dev/null)
+ fi
+
+ shift
+
+ # Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+
+ #make sure security appender is turned off
+ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
+
+ export CLASSPATH=$CLASSPATH
+ exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@"
+ ;;
+
+esac
diff --git a/MSH-PIC/hadoop/bin/hadoop.cmd b/MSH-PIC/hadoop/bin/hadoop.cmd
new file mode 100644
index 0000000..ccf2fff
--- /dev/null
+++ b/MSH-PIC/hadoop/bin/hadoop.cmd
@@ -0,0 +1,272 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+
+@rem This script runs the hadoop core commands.
+
+@rem Environment Variables
+@rem
+@rem JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+@rem
+@rem HADOOP_CLASSPATH Extra Java CLASSPATH entries.
+@rem
+@rem HADOOP_USER_CLASSPATH_FIRST When defined, the HADOOP_CLASSPATH is
+@rem added in the beginning of the global
+@rem classpath. Can be defined, for example,
+@rem by doing
+@rem export HADOOP_USER_CLASSPATH_FIRST=true
+@rem
+@rem HADOOP_HEAPSIZE The maximum amount of heap to use, in MB.
+@rem Default is 1000.
+@rem
+@rem HADOOP_OPTS Extra Java runtime options.
+@rem
+@rem HADOOP_CLIENT_OPTS when the respective command is run.
+@rem HADOOP_{COMMAND}_OPTS etc HADOOP_JT_OPTS applies to JobTracker
+@rem for e.g. HADOOP_CLIENT_OPTS applies to
+@rem more than one command (fs, dfs, fsck,
+@rem dfsadmin etc)
+@rem
+@rem HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
+@rem
+@rem HADOOP_ROOT_LOGGER The root appender. Default is INFO,console
+@rem
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+call :updatepath %HADOOP_BIN_PATH%
+
+:main
+ setlocal enabledelayedexpansion
+
+ set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+ if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+ )
+
+ call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %*
+ if "%1" == "--config" (
+ shift
+ shift
+ )
+ if "%1" == "--loglevel" (
+ shift
+ shift
+ )
+
+ set hadoop-command=%1
+ if not defined hadoop-command (
+ goto print_usage
+ )
+
+ call :make_command_arguments %*
+
+ set hdfscommands=namenode secondarynamenode datanode dfs dfsadmin fsck balancer fetchdt oiv dfsgroups
+ for %%i in ( %hdfscommands% ) do (
+ if %hadoop-command% == %%i set hdfscommand=true
+ )
+ if defined hdfscommand (
+ @echo DEPRECATED: Use of this script to execute hdfs command is deprecated. 1>&2
+ @echo Instead use the hdfs command for it. 1>&2
+ if exist %HADOOP_HDFS_HOME%\bin\hdfs.cmd (
+ call %HADOOP_HDFS_HOME%\bin\hdfs.cmd %*
+ goto :eof
+ ) else if exist %HADOOP_HOME%\bin\hdfs.cmd (
+ call %HADOOP_HOME%\bin\hdfs.cmd %*
+ goto :eof
+ ) else (
+ echo HADOOP_HDFS_HOME not found!
+ goto :eof
+ )
+ )
+
+ set mapredcommands=pipes job queue mrgroups mradmin jobtracker tasktracker
+ for %%i in ( %mapredcommands% ) do (
+ if %hadoop-command% == %%i set mapredcommand=true
+ )
+ if defined mapredcommand (
+ @echo DEPRECATED: Use of this script to execute mapred command is deprecated. 1>&2
+ @echo Instead use the mapred command for it. 1>&2
+ if exist %HADOOP_MAPRED_HOME%\bin\mapred.cmd (
+ call %HADOOP_MAPRED_HOME%\bin\mapred.cmd %*
+ goto :eof
+ ) else if exist %HADOOP_HOME%\bin\mapred.cmd (
+ call %HADOOP_HOME%\bin\mapred.cmd %*
+ goto :eof
+ ) else (
+ echo HADOOP_MAPRED_HOME not found!
+ goto :eof
+ )
+ )
+
+ if %hadoop-command% == classpath (
+ if not defined hadoop-command-arguments (
+ @rem No need to bother starting up a JVM for this simple case.
+ @echo %CLASSPATH%
+ exit /b
+ )
+ )
+
+ set corecommands=fs version jar checknative distcp daemonlog archive classpath credential key
+ for %%i in ( %corecommands% ) do (
+ if %hadoop-command% == %%i set corecommand=true
+ )
+ if defined corecommand (
+ call :%hadoop-command%
+ ) else (
+ set CLASSPATH=%CLASSPATH%;%CD%
+ set CLASS=%hadoop-command%
+ )
+
+ set path=%PATH%;%HADOOP_BIN_PATH%
+
+ @rem Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+
+ @rem make sure security appender is turned off
+ if not defined HADOOP_SECURITY_LOGGER (
+ set HADOOP_SECURITY_LOGGER=INFO,NullAppender
+ )
+ set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER%
+
+ call %JAVA% %JAVA_HEAP_MAX% %HADOOP_OPTS% -classpath %CLASSPATH% %CLASS% %hadoop-command-arguments%
+
+ exit /b %ERRORLEVEL%
+
+:fs
+ set CLASS=org.apache.hadoop.fs.FsShell
+ goto :eof
+
+:version
+ set CLASS=org.apache.hadoop.util.VersionInfo
+ goto :eof
+
+:jar
+ if defined YARN_OPTS (
+ @echo WARNING: Use "yarn jar" to launch YARN applications. 1>&2
+ ) else if defined YARN_CLIENT_OPTS (
+ @echo WARNING: Use "yarn jar" to launch YARN applications. 1>&2
+ )
+ set CLASS=org.apache.hadoop.util.RunJar
+ goto :eof
+
+:checknative
+ set CLASS=org.apache.hadoop.util.NativeLibraryChecker
+ goto :eof
+
+:distcp
+ set CLASS=org.apache.hadoop.tools.DistCp
+ set CLASSPATH=%CLASSPATH%;%TOOL_PATH%
+ goto :eof
+
+:daemonlog
+ set CLASS=org.apache.hadoop.log.LogLevel
+ goto :eof
+
+:archive
+ set CLASS=org.apache.hadoop.tools.HadoopArchives
+ set CLASSPATH=%CLASSPATH%;%TOOL_PATH%
+ goto :eof
+
+:classpath
+ set CLASS=org.apache.hadoop.util.Classpath
+ goto :eof
+
+:credential
+ set CLASS=org.apache.hadoop.security.alias.CredentialShell
+ goto :eof
+
+:key
+ set CLASS=org.apache.hadoop.crypto.key.KeyShell
+ goto :eof
+
+:updatepath
+ set path_to_add=%*
+ set current_path_comparable=%path%
+ set current_path_comparable=%current_path_comparable: =_%
+ set current_path_comparable=%current_path_comparable:(=_%
+ set current_path_comparable=%current_path_comparable:)=_%
+ set path_to_add_comparable=%path_to_add%
+ set path_to_add_comparable=%path_to_add_comparable: =_%
+ set path_to_add_comparable=%path_to_add_comparable:(=_%
+ set path_to_add_comparable=%path_to_add_comparable:)=_%
+
+ for %%i in ( %current_path_comparable% ) do (
+ if /i "%%i" == "%path_to_add_comparable%" (
+ set path_to_add_exist=true
+ )
+ )
+ set system_path_comparable=
+ set path_to_add_comparable=
+ if not defined path_to_add_exist path=%path_to_add%;%path%
+ set path_to_add=
+ goto :eof
+
+@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
+:make_command_arguments
+ if "%1" == "--config" (
+ shift
+ shift
+ )
+ if "%1" == "--loglevel" (
+ shift
+ shift
+ )
+ if [%2] == [] goto :eof
+ shift
+ set _arguments=
+ :MakeCmdArgsLoop
+ if [%1]==[] goto :EndLoop
+
+ if not defined _arguments (
+ set _arguments=%1
+ ) else (
+ set _arguments=!_arguments! %1
+ )
+ shift
+ goto :MakeCmdArgsLoop
+ :EndLoop
+ set hadoop-command-arguments=%_arguments%
+ goto :eof
+
+:print_usage
+ @echo Usage: hadoop [--config confdir] [--loglevel loglevel] COMMAND
+ @echo where COMMAND is one of:
+ @echo fs run a generic filesystem user client
+ @echo version print the version
+ @echo jar ^<jar^> run a jar file
+ @echo note: please use "yarn jar" to launch
+ @echo YARN applications, not this command.
+ @echo checknative [-a^|-h] check native hadoop and compression libraries availability
+ @echo distcp ^<srcurl^> ^<desturl^> copy file or directories recursively
+ @echo archive -archiveName NAME -p ^<parent path^> ^<src^>* ^<dest^> create a hadoop archive
+ @echo classpath prints the class path needed to get the
+ @echo Hadoop jar and the required libraries
+ @echo credential interact with credential providers
+ @echo key manage keys via the KeyProvider
+ @echo daemonlog get/set the log level for each daemon
+ @echo or
+ @echo CLASSNAME run the class named CLASSNAME
+ @echo.
+ @echo Most commands print help when invoked w/o parameters.
+
+endlocal
diff --git a/MSH-PIC/hadoop/bin/hdfs b/MSH-PIC/hadoop/bin/hdfs
new file mode 100644
index 0000000..7f93738
--- /dev/null
+++ b/MSH-PIC/hadoop/bin/hdfs
@@ -0,0 +1,308 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Environment Variables
+#
+# JSVC_HOME home directory of jsvc binary. Required for starting secure
+# datanode.
+#
+# JSVC_OUTFILE path to jsvc output file. Defaults to
+# $HADOOP_LOG_DIR/jsvc.out.
+#
+# JSVC_ERRFILE path to jsvc error file. Defaults to $HADOOP_LOG_DIR/jsvc.err.
+
+bin=`which $0`
+bin=`dirname ${bin}`
+bin=`cd "$bin" > /dev/null; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+
+function print_usage(){
+ echo "Usage: hdfs [--config confdir] [--loglevel loglevel] COMMAND"
+ echo " where COMMAND is one of:"
+ echo " dfs run a filesystem command on the file systems supported in Hadoop."
+ echo " classpath prints the classpath"
+ echo " namenode -format format the DFS filesystem"
+ echo " secondarynamenode run the DFS secondary namenode"
+ echo " namenode run the DFS namenode"
+ echo " journalnode run the DFS journalnode"
+ echo " zkfc run the ZK Failover Controller daemon"
+ echo " datanode run a DFS datanode"
+ echo " dfsadmin run a DFS admin client"
+ echo " haadmin run a DFS HA admin client"
+ echo " fsck run a DFS filesystem checking utility"
+ echo " balancer run a cluster balancing utility"
+ echo " jmxget get JMX exported values from NameNode or DataNode."
+ echo " mover run a utility to move block replicas across"
+ echo " storage types"
+ echo " oiv apply the offline fsimage viewer to an fsimage"
+ echo " oiv_legacy apply the offline fsimage viewer to an legacy fsimage"
+ echo " oev apply the offline edits viewer to an edits file"
+ echo " fetchdt fetch a delegation token from the NameNode"
+ echo " getconf get config values from configuration"
+ echo " groups get the groups which users belong to"
+ echo " snapshotDiff diff two snapshots of a directory or diff the"
+ echo " current directory contents with a snapshot"
+ echo " lsSnapshottableDir list all snapshottable dirs owned by the current user"
+ echo " Use -help to see options"
+ echo " portmap run a portmap service"
+ echo " nfs3 run an NFS version 3 gateway"
+ echo " cacheadmin configure the HDFS cache"
+ echo " crypto configure HDFS encryption zones"
+ echo " storagepolicies list/get/set block storage policies"
+ echo " version print the version"
+ echo ""
+ echo "Most commands print help when invoked w/o parameters."
+ # There are also debug commands, but they don't show up in this listing.
+}
+
+if [ $# = 0 ]; then
+ print_usage
+ exit
+fi
+
+COMMAND=$1
+shift
+
+case $COMMAND in
+ # usage flags
+ --help|-help|-h)
+ print_usage
+ exit
+ ;;
+esac
+
+# Determine if we're starting a secure datanode, and if so, redefine appropriate variables
+if [ "$COMMAND" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
+ if [ -n "$JSVC_HOME" ]; then
+ if [ -n "$HADOOP_SECURE_DN_PID_DIR" ]; then
+ HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
+ fi
+
+ if [ -n "$HADOOP_SECURE_DN_LOG_DIR" ]; then
+ HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
+ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
+ fi
+
+ HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
+ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
+ starting_secure_dn="true"
+ else
+ echo "It looks like you're trying to start a secure DN, but \$JSVC_HOME"\
+ "isn't set. Falling back to starting insecure DN."
+ fi
+fi
+
+# Determine if we're starting a privileged NFS daemon, and if so, redefine appropriate variables
+if [ "$COMMAND" == "nfs3" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_PRIVILEGED_NFS_USER" ]; then
+ if [ -n "$JSVC_HOME" ]; then
+ if [ -n "$HADOOP_PRIVILEGED_NFS_PID_DIR" ]; then
+ HADOOP_PID_DIR=$HADOOP_PRIVILEGED_NFS_PID_DIR
+ fi
+
+ if [ -n "$HADOOP_PRIVILEGED_NFS_LOG_DIR" ]; then
+ HADOOP_LOG_DIR=$HADOOP_PRIVILEGED_NFS_LOG_DIR
+ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
+ fi
+
+ HADOOP_IDENT_STRING=$HADOOP_PRIVILEGED_NFS_USER
+ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
+ starting_privileged_nfs="true"
+ else
+ echo "It looks like you're trying to start a privileged NFS server, but"\
+ "\$JSVC_HOME isn't set. Falling back to starting unprivileged NFS server."
+ fi
+fi
+
+if [ "$COMMAND" = "namenode" ] ; then
+ CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode'
+# HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_OPTS"
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_JMX_OPTS $HADOOP_NAMENODE_OPTS"
+elif [ "$COMMAND" = "zkfc" ] ; then
+ CLASS='org.apache.hadoop.hdfs.tools.DFSZKFailoverController'
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_ZKFC_OPTS"
+elif [ "$COMMAND" = "secondarynamenode" ] ; then
+ CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_SECONDARYNAMENODE_OPTS"
+elif [ "$COMMAND" = "datanode" ] ; then
+ CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode'
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_DATANODE_JMX_OPTS"
+ if [ "$starting_secure_dn" = "true" ]; then
+ HADOOP_OPTS="$HADOOP_OPTS -jvm server $HADOOP_DATANODE_OPTS"
+ else
+ HADOOP_OPTS="$HADOOP_OPTS -server $HADOOP_DATANODE_OPTS"
+ fi
+elif [ "$COMMAND" = "journalnode" ] ; then
+ CLASS='org.apache.hadoop.hdfs.qjournal.server.JournalNode'
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_JOURNALNODE_OPTS"
+elif [ "$COMMAND" = "dfs" ] ; then
+ CLASS=org.apache.hadoop.fs.FsShell
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "dfsadmin" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "haadmin" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin
+ CLASSPATH=${CLASSPATH}:${TOOL_PATH}
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "fsck" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.DFSck
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "balancer" ] ; then
+ CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_BALANCER_OPTS"
+elif [ "$COMMAND" = "mover" ] ; then
+ CLASS=org.apache.hadoop.hdfs.server.mover.Mover
+ HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_MOVER_OPTS}"
+elif [ "$COMMAND" = "storagepolicies" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin
+elif [ "$COMMAND" = "jmxget" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.JMXGet
+elif [ "$COMMAND" = "oiv" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB
+elif [ "$COMMAND" = "oiv_legacy" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
+elif [ "$COMMAND" = "oev" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
+elif [ "$COMMAND" = "fetchdt" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
+elif [ "$COMMAND" = "getconf" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.GetConf
+elif [ "$COMMAND" = "groups" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.GetGroups
+elif [ "$COMMAND" = "snapshotDiff" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff
+elif [ "$COMMAND" = "lsSnapshottableDir" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
+elif [ "$COMMAND" = "portmap" ] ; then
+ CLASS=org.apache.hadoop.portmap.Portmap
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_PORTMAP_OPTS"
+elif [ "$COMMAND" = "nfs3" ] ; then
+ CLASS=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NFS3_OPTS"
+elif [ "$COMMAND" = "cacheadmin" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin
+elif [ "$COMMAND" = "crypto" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin
+elif [ "$COMMAND" = "version" ] ; then
+ CLASS=org.apache.hadoop.util.VersionInfo
+elif [ "$COMMAND" = "debug" ]; then
+ CLASS=org.apache.hadoop.hdfs.tools.DebugAdmin
+elif [ "$COMMAND" = "classpath" ]; then
+ if [ "$#" -gt 0 ]; then
+ CLASS=org.apache.hadoop.util.Classpath
+ else
+ # No need to bother starting up a JVM for this simple case.
+ if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ fi
+ echo $CLASSPATH
+ exit 0
+ fi
+else
+ CLASS="$COMMAND"
+fi
+
+# cygwin path translation
+if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ HADOOP_LOG_DIR=$(cygpath -w "$HADOOP_LOG_DIR" 2>/dev/null)
+ HADOOP_PREFIX=$(cygpath -w "$HADOOP_PREFIX" 2>/dev/null)
+ HADOOP_CONF_DIR=$(cygpath -w "$HADOOP_CONF_DIR" 2>/dev/null)
+ HADOOP_COMMON_HOME=$(cygpath -w "$HADOOP_COMMON_HOME" 2>/dev/null)
+ HADOOP_HDFS_HOME=$(cygpath -w "$HADOOP_HDFS_HOME" 2>/dev/null)
+ HADOOP_YARN_HOME=$(cygpath -w "$HADOOP_YARN_HOME" 2>/dev/null)
+ HADOOP_MAPRED_HOME=$(cygpath -w "$HADOOP_MAPRED_HOME" 2>/dev/null)
+fi
+
+export CLASSPATH=$CLASSPATH
+
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
+
+# Check to see if we should start a secure datanode
+if [ "$starting_secure_dn" = "true" ]; then
+ if [ "$HADOOP_PID_DIR" = "" ]; then
+ HADOOP_SECURE_DN_PID="/tmp/hadoop_secure_dn.pid"
+ else
+ HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
+ fi
+
+ JSVC=$JSVC_HOME/jsvc
+ if [ ! -f $JSVC ]; then
+ echo "JSVC_HOME is not set correctly so jsvc cannot be found. jsvc is required to run secure datanodes. "
+ echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\
+ "and set JSVC_HOME to the directory containing the jsvc binary."
+ exit
+ fi
+
+ if [[ ! $JSVC_OUTFILE ]]; then
+ JSVC_OUTFILE="$HADOOP_LOG_DIR/jsvc.out"
+ fi
+
+ if [[ ! $JSVC_ERRFILE ]]; then
+ JSVC_ERRFILE="$HADOOP_LOG_DIR/jsvc.err"
+ fi
+
+ exec "$JSVC" \
+ -Dproc_$COMMAND -outfile "$JSVC_OUTFILE" \
+ -errfile "$JSVC_ERRFILE" \
+ -pidfile "$HADOOP_SECURE_DN_PID" \
+ -nodetach \
+ -user "$HADOOP_SECURE_DN_USER" \
+ -cp "$CLASSPATH" \
+ $JAVA_HEAP_MAX $HADOOP_OPTS \
+ org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter "$@"
+elif [ "$starting_privileged_nfs" = "true" ] ; then
+ if [ "$HADOOP_PID_DIR" = "" ]; then
+ HADOOP_PRIVILEGED_NFS_PID="/tmp/hadoop_privileged_nfs3.pid"
+ else
+ HADOOP_PRIVILEGED_NFS_PID="$HADOOP_PID_DIR/hadoop_privileged_nfs3.pid"
+ fi
+
+ JSVC=$JSVC_HOME/jsvc
+ if [ ! -f $JSVC ]; then
+ echo "JSVC_HOME is not set correctly so jsvc cannot be found. jsvc is required to run privileged NFS gateways. "
+ echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\
+ "and set JSVC_HOME to the directory containing the jsvc binary."
+ exit
+ fi
+
+ if [[ ! $JSVC_OUTFILE ]]; then
+ JSVC_OUTFILE="$HADOOP_LOG_DIR/nfs3_jsvc.out"
+ fi
+
+ if [[ ! $JSVC_ERRFILE ]]; then
+ JSVC_ERRFILE="$HADOOP_LOG_DIR/nfs3_jsvc.err"
+ fi
+
+ exec "$JSVC" \
+ -Dproc_$COMMAND -outfile "$JSVC_OUTFILE" \
+ -errfile "$JSVC_ERRFILE" \
+ -pidfile "$HADOOP_PRIVILEGED_NFS_PID" \
+ -nodetach \
+ -user "$HADOOP_PRIVILEGED_NFS_USER" \
+ -cp "$CLASSPATH" \
+ $JAVA_HEAP_MAX $HADOOP_OPTS \
+ org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter "$@"
+else
+ # run it
+ exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@"
+fi
+
diff --git a/MSH-PIC/hadoop/bin/hdfs.cmd b/MSH-PIC/hadoop/bin/hdfs.cmd
new file mode 100644
index 0000000..d52f52e
--- /dev/null
+++ b/MSH-PIC/hadoop/bin/hdfs.cmd
@@ -0,0 +1,234 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+setlocal enabledelayedexpansion
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\hdfs-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+if "%1" == "--loglevel" (
+ shift
+ shift
+)
+
+:main
+ if exist %HADOOP_CONF_DIR%\hadoop-env.cmd (
+ call %HADOOP_CONF_DIR%\hadoop-env.cmd
+ )
+
+ set hdfs-command=%1
+ call :make_command_arguments %*
+
+ if not defined hdfs-command (
+ goto print_usage
+ )
+
+ if %hdfs-command% == classpath (
+ if not defined hdfs-command-arguments (
+ @rem No need to bother starting up a JVM for this simple case.
+ @echo %CLASSPATH%
+ exit /b
+ )
+ )
+ set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath
+ for %%i in ( %hdfscommands% ) do (
+ if %hdfs-command% == %%i set hdfscommand=true
+ )
+ if defined hdfscommand (
+ call :%hdfs-command%
+ ) else (
+ set CLASSPATH=%CLASSPATH%;%CD%
+ set CLASS=%hdfs-command%
+ )
+
+ set java_arguments=%JAVA_HEAP_MAX% %HADOOP_OPTS% -classpath %CLASSPATH% %CLASS% %hdfs-command-arguments%
+ call %JAVA% %java_arguments%
+
+goto :eof
+
+:namenode
+ set CLASS=org.apache.hadoop.hdfs.server.namenode.NameNode
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_NAMENODE_OPTS%
+ goto :eof
+
+:journalnode
+ set CLASS=org.apache.hadoop.hdfs.qjournal.server.JournalNode
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_JOURNALNODE_OPTS%
+ goto :eof
+
+:zkfc
+ set CLASS=org.apache.hadoop.hdfs.tools.DFSZKFailoverController
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ZKFC_OPTS%
+ goto :eof
+
+:secondarynamenode
+ set CLASS=org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_SECONDARYNAMENODE_OPTS%
+ goto :eof
+
+:datanode
+ set CLASS=org.apache.hadoop.hdfs.server.datanode.DataNode
+ set HADOOP_OPTS=%HADOOP_OPTS% -server %HADOOP_DATANODE_OPTS%
+ goto :eof
+
+:dfs
+ set CLASS=org.apache.hadoop.fs.FsShell
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:dfsadmin
+ set CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:haadmin
+ set CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin
+ set CLASSPATH=%CLASSPATH%;%TOOL_PATH%
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:fsck
+ set CLASS=org.apache.hadoop.hdfs.tools.DFSck
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:balancer
+ set CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_BALANCER_OPTS%
+ goto :eof
+
+:jmxget
+ set CLASS=org.apache.hadoop.hdfs.tools.JMXGet
+ goto :eof
+
+:classpath
+ set CLASS=org.apache.hadoop.util.Classpath
+ goto :eof
+
+:oiv
+ set CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB
+ goto :eof
+
+:oev
+ set CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
+ goto :eof
+
+:fetchdt
+ set CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
+ goto :eof
+
+:getconf
+ set CLASS=org.apache.hadoop.hdfs.tools.GetConf
+ goto :eof
+
+:groups
+ set CLASS=org.apache.hadoop.hdfs.tools.GetGroups
+ goto :eof
+
+:snapshotDiff
+ set CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff
+ goto :eof
+
+:lsSnapshottableDir
+ set CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
+ goto :eof
+
+:cacheadmin
+ set CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin
+ goto :eof
+
+:mover
+ set CLASS=org.apache.hadoop.hdfs.server.mover.Mover
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_MOVER_OPTS%
+ goto :eof
+
+:storagepolicies
+ set CLASS=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin
+ goto :eof
+
+@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
+:make_command_arguments
+ if "%1" == "--config" (
+ shift
+ shift
+ )
+ if "%1" == "--loglevel" (
+ shift
+ shift
+ )
+ if [%2] == [] goto :eof
+ shift
+ set _hdfsarguments=
+ :MakeCmdArgsLoop
+ if [%1]==[] goto :EndLoop
+
+ if not defined _hdfsarguments (
+ set _hdfsarguments=%1
+ ) else (
+ set _hdfsarguments=!_hdfsarguments! %1
+ )
+ shift
+ goto :MakeCmdArgsLoop
+ :EndLoop
+ set hdfs-command-arguments=%_hdfsarguments%
+ goto :eof
+
+:print_usage
+ @echo Usage: hdfs [--config confdir] [--loglevel loglevel] COMMAND
+ @echo where COMMAND is one of:
+ @echo dfs run a filesystem command on the file systems supported in Hadoop.
+ @echo namenode -format format the DFS filesystem
+ @echo secondarynamenode run the DFS secondary namenode
+ @echo namenode run the DFS namenode
+ @echo journalnode run the DFS journalnode
+ @echo zkfc run the ZK Failover Controller daemon
+ @echo datanode run a DFS datanode
+ @echo dfsadmin run a DFS admin client
+ @echo haadmin run a DFS HA admin client
+ @echo fsck run a DFS filesystem checking utility
+ @echo balancer run a cluster balancing utility
+ @echo jmxget get JMX exported values from NameNode or DataNode.
+ @echo oiv apply the offline fsimage viewer to an fsimage
+ @echo oev apply the offline edits viewer to an edits file
+ @echo fetchdt fetch a delegation token from the NameNode
+ @echo getconf get config values from configuration
+ @echo groups get the groups which users belong to
+ @echo snapshotDiff diff two snapshots of a directory or diff the
+ @echo current directory contents with a snapshot
+ @echo lsSnapshottableDir list all snapshottable dirs owned by the current user
+ @echo Use -help to see options
+ @echo cacheadmin configure the HDFS cache
+ @echo mover run a utility to move block replicas across storage types
+ @echo storagepolicies list/get/set block storage policies
+ @echo.
+ @echo Most commands print help when invoked w/o parameters.
+
+endlocal
diff --git a/MSH-PIC/hadoop/bin/ini_hdfs.sh b/MSH-PIC/hadoop/bin/ini_hdfs.sh
new file mode 100644
index 0000000..acc17df
--- /dev/null
+++ b/MSH-PIC/hadoop/bin/ini_hdfs.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+
+MASTER_IP=192.168.20.193
+SLAVE1_IP=192.168.20.194
+
+BASE_DIR=/home/tsg/olap
+VERSION=hadoop-2.7.1
+
+function ini_namenode() {
+
+cd $BASE_DIR/$VERSION/bin
+yes | ./hadoop namenode -format
+
+if [ $? -eq "0" ];then
+# scp -r $BASE_DIR/hadoop/ root@$SLAVE1_IP:$BASE_DIR/
+ echo yes
+else
+ echo no
+fi
+}
+
+function ini_zk() {
+
+cd $BASE_DIR/$VERSION/bin
+yes | ./hdfs zkfc -formatZK
+
+if [ $? -eq "0" ];then
+ echo yes
+else
+ echo no
+fi
+}
+
+case $1 in
+[namenode]*)
+ini_namenode
+;;
+[zkfc]*)
+ini_zk
+;;
+* )
+echo "请输入已有的指令."
+;;
+esac
+
diff --git a/MSH-PIC/hadoop/bin/mapred b/MSH-PIC/hadoop/bin/mapred
new file mode 100644
index 0000000..fe16e07
--- /dev/null
+++ b/MSH-PIC/hadoop/bin/mapred
@@ -0,0 +1,172 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`which $0`
+bin=`dirname ${bin}`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+if [ -e ${HADOOP_LIBEXEC_DIR}/mapred-config.sh ]; then
+ . ${HADOOP_LIBEXEC_DIR}/mapred-config.sh
+else
+ . "$bin/mapred-config.sh"
+fi
+
+function print_usage(){
+ echo "Usage: mapred [--config confdir] [--loglevel loglevel] COMMAND"
+ echo " where COMMAND is one of:"
+ echo " pipes run a Pipes job"
+ echo " job manipulate MapReduce jobs"
+ echo " queue get information regarding JobQueues"
+ echo " classpath prints the class path needed for running"
+ echo " mapreduce subcommands"
+ echo " historyserver run job history servers as a standalone daemon"
+ echo " distcp <srcurl> <desturl> copy file or directories recursively"
+ echo " archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive"
+ echo " hsadmin job history server admin interface"
+ echo ""
+ echo "Most commands print help when invoked w/o parameters."
+}
+
+if [ $# = 0 ]; then
+ print_usage
+ exit
+fi
+
+COMMAND=$1
+shift
+
+case $COMMAND in
+ # usage flags
+ --help|-help|-h)
+ print_usage
+ exit
+ ;;
+esac
+
+if [ "$COMMAND" = "job" ] ; then
+ CLASS=org.apache.hadoop.mapred.JobClient
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "queue" ] ; then
+ CLASS=org.apache.hadoop.mapred.JobQueueClient
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "pipes" ] ; then
+ CLASS=org.apache.hadoop.mapred.pipes.Submitter
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "sampler" ] ; then
+ CLASS=org.apache.hadoop.mapred.lib.InputSampler
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "classpath" ] ; then
+ echo -n
+elif [ "$COMMAND" = "historyserver" ] ; then
+ CLASS=org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer
+ HADOOP_OPTS="$HADOOP_OPTS -Dmapred.jobsummary.logger=${HADOOP_JHS_LOGGER:-INFO,console} $HADOOP_JOB_HISTORYSERVER_OPTS"
+ if [ "$HADOOP_JOB_HISTORYSERVER_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$HADOOP_JOB_HISTORYSERVER_HEAPSIZE""m"
+ fi
+elif [ "$COMMAND" = "mradmin" ] \
+ || [ "$COMMAND" = "jobtracker" ] \
+ || [ "$COMMAND" = "tasktracker" ] \
+ || [ "$COMMAND" = "groups" ] ; then
+ echo "Sorry, the $COMMAND command is no longer supported."
+ echo "You may find similar functionality with the \"yarn\" shell command."
+ print_usage
+ exit 1
+elif [ "$COMMAND" = "distcp" ] ; then
+ CLASS=org.apache.hadoop.tools.DistCp
+ CLASSPATH=${CLASSPATH}:${TOOL_PATH}
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "archive" ] ; then
+ CLASS=org.apache.hadoop.tools.HadoopArchives
+ CLASSPATH=${CLASSPATH}:${TOOL_PATH}
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "hsadmin" ] ; then
+ CLASS=org.apache.hadoop.mapreduce.v2.hs.client.HSAdmin
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+else
+ echo $COMMAND - invalid command
+ print_usage
+ exit 1
+fi
+
+# for developers, add mapred classes to CLASSPATH
+if [ -d "$HADOOP_MAPRED_HOME/build/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build/classes
+fi
+if [ -d "$HADOOP_MAPRED_HOME/build/webapps" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build
+fi
+if [ -d "$HADOOP_MAPRED_HOME/build/test/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build/test/classes
+fi
+if [ -d "$HADOOP_MAPRED_HOME/build/tools" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build/tools
+fi
+
+# for releases, add core mapred jar & webapps to CLASSPATH
+if [ -d "$HADOOP_PREFIX/${MAPRED_DIR}/webapps" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/${MAPRED_DIR}
+fi
+for f in $HADOOP_MAPRED_HOME/${MAPRED_DIR}/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+# Need YARN jars also
+for f in $HADOOP_YARN_HOME/${YARN_DIR}/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+# add libs to CLASSPATH
+for f in $HADOOP_MAPRED_HOME/${MAPRED_LIB_JARS_DIR}/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+# add modules to CLASSPATH
+for f in $HADOOP_MAPRED_HOME/modules/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+if [ "$COMMAND" = "classpath" ] ; then
+ if [ "$#" -gt 0 ]; then
+ CLASS=org.apache.hadoop.util.Classpath
+ else
+ if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ fi
+ echo $CLASSPATH
+ exit 0
+ fi
+fi
+
+# cygwin path translation
+if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ HADOOP_LOG_DIR=$(cygpath -w "$HADOOP_LOG_DIR" 2>/dev/null)
+ HADOOP_PREFIX=$(cygpath -w "$HADOOP_PREFIX" 2>/dev/null)
+ HADOOP_CONF_DIR=$(cygpath -w "$HADOOP_CONF_DIR" 2>/dev/null)
+ HADOOP_COMMON_HOME=$(cygpath -w "$HADOOP_COMMON_HOME" 2>/dev/null)
+ HADOOP_HDFS_HOME=$(cygpath -w "$HADOOP_HDFS_HOME" 2>/dev/null)
+ HADOOP_YARN_HOME=$(cygpath -w "$HADOOP_YARN_HOME" 2>/dev/null)
+ HADOOP_MAPRED_HOME=$(cygpath -w "$HADOOP_MAPRED_HOME" 2>/dev/null)
+fi
+
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
+
+export CLASSPATH
+exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@"
diff --git a/MSH-PIC/hadoop/bin/mapred.cmd b/MSH-PIC/hadoop/bin/mapred.cmd
new file mode 100644
index 0000000..550b1ed
--- /dev/null
+++ b/MSH-PIC/hadoop/bin/mapred.cmd
@@ -0,0 +1,216 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem The Hadoop mapred command script
+
+setlocal enabledelayedexpansion
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~`%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %DEFAULT_LIBEXEC_DIR%\mapred-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+if "%1" == "--loglevel" (
+ shift
+ shift
+)
+
+:main
+ if exist %MAPRED_CONF_DIR%\mapred-env.cmd (
+ call %MAPRED_CONF_DIR%\mapred-env.cmd
+ )
+ set mapred-command=%1
+ call :make_command_arguments %*
+
+ if not defined mapred-command (
+ goto print_usage
+ )
+
+ @rem JAVA and JAVA_HEAP_MAX are set in hadoop-confg.cmd
+
+ if defined MAPRED_HEAPSIZE (
+ @rem echo run with Java heapsize %MAPRED_HEAPSIZE%
+ set JAVA_HEAP_SIZE=-Xmx%MAPRED_HEAPSIZE%m
+ )
+
+ @rem CLASSPATH initially contains HADOOP_CONF_DIR and MAPRED_CONF_DIR
+ if not defined HADOOP_CONF_DIR (
+ echo NO HADOOP_CONF_DIR set.
+ echo Please specify it either in mapred-env.cmd or in the environment.
+ goto :eof
+ )
+
+ set CLASSPATH=%HADOOP_CONF_DIR%;%MAPRED_CONF_DIR%;%CLASSPATH%
+
+ @rem for developers, add Hadoop classes to CLASSPATH
+ if exist %HADOOP_MAPRED_HOME%\build\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\build\classes
+ )
+
+ if exist %HADOOP_MAPRED_HOME%\build\webapps (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\build
+ )
+
+ if exist %HADOOP_MAPRED_HOME%\build\test\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\build\test\classes
+ )
+
+ if exist %HADOOP_MAPRED_HOME%\build\tools (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\build\tools
+ )
+
+ @rem Need YARN jars also
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_DIR%\*
+
+ @rem add libs to CLASSPATH
+ set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\%MAPRED_LIB_JARS_DIR%\*
+
+ @rem add modules to CLASSPATH
+ set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\modules\*
+
+ if %mapred-command% == classpath (
+ if not defined mapred-command-arguments (
+ @rem No need to bother starting up a JVM for this simple case.
+ @echo %CLASSPATH%
+ exit /b
+ )
+ )
+
+ call :%mapred-command% %mapred-command-arguments%
+ set java_arguments=%JAVA_HEAP_MAX% %HADOOP_OPTS% -classpath %CLASSPATH% %CLASS% %mapred-command-arguments%
+ call %JAVA% %java_arguments%
+
+goto :eof
+
+
+:classpath
+ set CLASS=org.apache.hadoop.util.Classpath
+ goto :eof
+
+:job
+ set CLASS=org.apache.hadoop.mapred.JobClient
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:queue
+ set CLASS=org.apache.hadoop.mapred.JobQueueClient
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:sampler
+ set CLASS=org.apache.hadoop.mapred.lib.InputSampler
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:historyserver
+ set CLASS=org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer
+ set HADOOP_OPTS=%HADOOP_OPTS% -Dmapred.jobsummary.logger=%HADOOP_JHS_LOGGER% %HADOOP_JOB_HISTORYSERVER_OPTS%
+ if defined HADOOP_JOB_HISTORYSERVER_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%HADOOP_JOB_HISTORYSERVER_HEAPSIZE%m
+ )
+ goto :eof
+
+:distcp
+ set CLASS=org.apache.hadoop.tools.DistCp
+ set CLASSPATH=%CLASSPATH%;%TOO_PATH%
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:archive
+ set CLASS=org.apache.hadop.tools.HadoopArchives
+ set CLASSPATH=%CLASSPATH%;%TOO_PATH%
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+
+:hsadmin
+ set CLASS=org.apache.hadoop.mapreduce.v2.hs.client.HSAdmin
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+
+:pipes
+ goto not_supported
+
+:mradmin
+ goto not_supported
+
+:jobtracker
+ goto not_supported
+
+:tasktracker
+ goto not_supported
+
+:groups
+ goto not_supported
+
+
+@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
+:make_command_arguments
+ if [%2] == [] goto :eof
+ if "%1" == "--config" (
+ shift
+ shift
+ )
+ if "%1" == "--loglevel" (
+ shift
+ shift
+ )
+ shift
+ set _mapredarguments=
+ :MakeCmdArgsLoop
+ if [%1]==[] goto :EndLoop
+
+ if not defined _mapredarguments (
+ set _mapredarguments=%1
+ ) else (
+ set _mapredarguments=!_mapredarguments! %1
+ )
+ shift
+ goto :MakeCmdArgsLoop
+ :EndLoop
+ set mapred-command-arguments=%_mapredarguments%
+ goto :eof
+
+:not_supported
+ @echo Sorry, the %COMMAND% command is no longer supported.
+ @echo You may find similar functionality with the "yarn" shell command.
+ goto print_usage
+
+:print_usage
+ @echo Usage: mapred [--config confdir] [--loglevel loglevel] COMMAND
+ @echo where COMMAND is one of:
+ @echo job manipulate MapReduce jobs
+ @echo queue get information regarding JobQueues
+ @echo classpath prints the class path needed for running
+ @echo mapreduce subcommands
+ @echo historyserver run job history servers as a standalone daemon
+ @echo distcp ^<srcurl^> ^<desturl^> copy file or directories recursively
+ @echo archive -archiveName NAME -p ^<parent path^> ^<src^>* ^<dest^> create a hadoop archive
+ @echo hsadmin job history server admin interface
+ @echo
+ @echo Most commands print help when invoked w/o parameters.
+
+endlocal
diff --git a/MSH-PIC/hadoop/bin/rcc b/MSH-PIC/hadoop/bin/rcc
new file mode 100644
index 0000000..22bffff
--- /dev/null
+++ b/MSH-PIC/hadoop/bin/rcc
@@ -0,0 +1,61 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# The Hadoop record compiler
+#
+# Environment Variables
+#
+# JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+#
+# HADOOP_OPTS Extra Java runtime options.
+#
+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_PREFIX}/conf.
+#
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+
+if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
+ . "${HADOOP_CONF_DIR}/hadoop-env.sh"
+fi
+
+# some Java parameters
+if [ "$JAVA_HOME" != "" ]; then
+ #echo "run java in $JAVA_HOME"
+ JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+ echo "Error: JAVA_HOME is not set."
+ exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# restore ordinary behaviour
+unset IFS
+
+CLASS='org.apache.hadoop.record.compiler.generated.Rcc'
+
+# run it
+exec "$JAVA" $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@"
diff --git a/MSH-PIC/hadoop/bin/set_hdfs_env.sh b/MSH-PIC/hadoop/bin/set_hdfs_env.sh
new file mode 100644
index 0000000..146be84
--- /dev/null
+++ b/MSH-PIC/hadoop/bin/set_hdfs_env.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+
+source /etc/profile
+
+function setChkconfig(){
+echo -e "\n#hadoop\nexport HADOOP_HOME=/home/tsg/olap/hadoop-2.7.1\nexport PATH=\$HADOOP_HOME/sbin:\$PATH\nexport PATH=\$HADOOP_HOME/bin:\$PATH\nexport HADOOP_CLASSPATH=\`hadoop classpath\`" >> /etc/profile.d/hadoop.sh
+chmod +x /etc/profile.d/hadoop.sh
+
+if [ -x '/etc/init.d/keephdfsmaster' ];then
+ chkconfig --add keephdfsmaster
+ chkconfig keephdfsmaster on
+fi
+
+if [ -x '/etc/init.d/keephdfsslave' ];then
+ chkconfig --add keephdfsslave
+ chkconfig keephdfsslave on
+fi
+
+if [ -x '/etc/init.d/keephdfsworker' ];then
+ chkconfig --add keephdfsworker
+ chkconfig keephdfsworker on
+fi
+
+if [ -x '/etc/init.d/keephdfsjournal' ];then
+ chkconfig --add keephdfsjournal
+ chkconfig keephdfsjournal on
+fi
+}
+
+case $1 in
+journal)
+if [ -x '/etc/init.d/keephdfsjournal' ];then
+ service keephdfsjournal start && sleep 5
+ journal_dae=`ps -ef | grep dae-hdfsjournal.sh | grep -v grep | wc -l`
+ if [ $journal_dae -lt 1 ];then
+ nohup /home/tsg/olap/hadoop-2.7.1/sbin/dae-hdfsjournal.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+master)
+if [ -x '/etc/init.d/keephdfsmaster' ];then
+ service keephdfsmaster start && sleep 5
+ master_dae=`ps -ef | grep dae-hdfsmaster.sh | grep -v grep | wc -l`
+ if [ $master_dae -lt 1 ];then
+ nohup /home/tsg/olap/hadoop-2.7.1/sbin/dae-hdfsmaster.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+slave)
+if [ -x '/etc/init.d/keephdfsslave' ];then
+ service keephdfsslave start && sleep 5
+ slave_dae=`ps -ef | grep dae-hdfsslave.sh | grep -v grep | wc -l`
+ if [ $slave_dae -lt 1 ];then
+ nohup /home/tsg/olap/hadoop-2.7.1/sbin/dae-hdfsslave.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+worker)
+if [ -x '/etc/init.d/keephdfsworker' ];then
+ service keephdfsworker start && sleep 5
+ worker_dae=`ps -ef | grep dae-hdfsworker.sh | grep -v grep | wc -l`
+ if [ $worker_dae -lt 1 ];then
+ nohup /home/tsg/olap/hadoop-2.7.1/sbin/dae-hdfsworker.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+chkconfig)
+ setChkconfig;;
+* )
+;;
+esac
diff --git a/MSH-PIC/hadoop/bin/set_yarn_env.sh b/MSH-PIC/hadoop/bin/set_yarn_env.sh
new file mode 100644
index 0000000..8ee107f
--- /dev/null
+++ b/MSH-PIC/hadoop/bin/set_yarn_env.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+source /etc/profile
+
+function setChkconfig(){
+echo -e "\n#hadoop\nexport HADOOP_HOME=/home/tsg/olap/hadoop-2.7.1\nexport PATH=\$HADOOP_HOME/sbin:\$PATH\nexport PATH=\$HADOOP_HOME/bin:\$PATH\nexport HADOOP_CLASSPATH=\`hadoop classpath\`" >> /etc/profile.d/hadoop.sh
+chmod +x /etc/profile.d/hadoop.sh
+
+if [ -x '/etc/init.d/keepyarnhistory' ];then
+ chkconfig --add keepyarnhistory
+ chkconfig keepyarnhistory on
+fi
+
+if [ -x '/etc/init.d/keepyarnmaster' ];then
+ chkconfig --add keepyarnmaster
+ chkconfig keepyarnmaster on
+fi
+
+if [ -x '/etc/init.d/keepyarnworker' ];then
+ chkconfig --add keepyarnworker
+ chkconfig keepyarnworker on
+fi
+}
+
+case $1 in
+history)
+if [ -x '/etc/init.d/keepyarnhistory' ];then
+ service keepyarnhistory start && sleep 5
+ history_dae=`ps -ef | grep "dae-yarnhistory.sh" | grep -v grep | wc -l`
+ if [ $history_dae -lt 1 ];then
+ nohup /home/tsg/olap/hadoop-2.7.1/sbin/dae-yarnhistory.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+master)
+if [ -x '/etc/init.d/keepyarnmaster' ];then
+ service keepyarnmaster start && sleep 5
+ master_dae=`ps -ef | grep "dae-yarnmaster.sh" | grep -v grep | wc -l`
+ if [ $master_dae -lt 1 ];then
+ nohup /home/tsg/olap/hadoop-2.7.1/sbin/dae-yarnmaster.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+worker)
+if [ -x '/etc/init.d/keepyarnworker' ];then
+ service keepyarnworker start && sleep 5
+ worker_dae=`ps -ef | grep dae-yarnworker.sh | grep -v grep | wc -l`
+ if [ $worker_dae -lt 1 ];then
+ nohup /home/tsg/olap/hadoop-2.7.1/sbin/dae-yarnworker.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+chkconfig)
+ setChkconfig;;
+* )
+;;
+esac
+
diff --git a/MSH-PIC/hadoop/bin/test-container-executor b/MSH-PIC/hadoop/bin/test-container-executor
new file mode 100644
index 0000000..df8c3db
--- /dev/null
+++ b/MSH-PIC/hadoop/bin/test-container-executor
Binary files differ
diff --git a/MSH-PIC/hadoop/bin/yarn b/MSH-PIC/hadoop/bin/yarn
new file mode 100644
index 0000000..0e4c5a2
--- /dev/null
+++ b/MSH-PIC/hadoop/bin/yarn
@@ -0,0 +1,330 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# The Hadoop command script
+#
+# Environment Variables
+#
+# JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+#
+# YARN_USER_CLASSPATH Additional user CLASSPATH entries.
+#
+# YARN_USER_CLASSPATH_FIRST If set to non empty value then the user classpath
+# specified in YARN_USER_CLASSPATH will be
+# appended at the beginning of YARN's final
+# classpath instead of at the end.
+#
+# YARN_HEAPSIZE The maximum amount of heap to use, in MB.
+# Default is 1000.
+#
+# YARN_{COMMAND}_HEAPSIZE overrides YARN_HEAPSIZE for a given command
+# eg YARN_NODEMANAGER_HEAPSIZE sets the heap
+# size for the NodeManager. If you set the
+# heap size in YARN_{COMMAND}_OPTS or YARN_OPTS
+# they take precedence.
+#
+# YARN_OPTS Extra Java runtime options.
+#
+# YARN_CLIENT_OPTS when the respective command is run.
+# YARN_{COMMAND}_OPTS etc YARN_NODEMANAGER_OPTS applies to NodeManager
+# for e.g. YARN_CLIENT_OPTS applies to
+# more than one command (fs, dfs, fsck,
+# dfsadmin etc)
+#
+# YARN_CONF_DIR Alternate conf dir. Default is ${HADOOP_YARN_HOME}/conf.
+#
+# YARN_ROOT_LOGGER The root appender. Default is INFO,console
+#
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/yarn-config.sh
+
+function print_usage(){
+ echo "Usage: yarn [--config confdir] [COMMAND | CLASSNAME]"
+ echo " CLASSNAME run the class named CLASSNAME"
+ echo " or"
+ echo " where COMMAND is one of:"
+ echo " resourcemanager -format-state-store deletes the RMStateStore"
+ echo " resourcemanager run the ResourceManager"
+ echo " nodemanager run a nodemanager on each slave"
+ echo " timelineserver run the timeline server"
+ echo " rmadmin admin tools"
+ echo " sharedcachemanager run the SharedCacheManager daemon"
+ echo " scmadmin SharedCacheManager admin tools"
+ echo " version print the version"
+ echo " jar <jar> run a jar file"
+ echo " application prints application(s)"
+ echo " report/kill application"
+ echo " applicationattempt prints applicationattempt(s)"
+ echo " report"
+ echo " container prints container(s) report"
+ echo " node prints node report(s)"
+ echo " queue prints queue information"
+ echo " logs dump container logs"
+ echo " classpath prints the class path needed to"
+ echo " get the Hadoop jar and the"
+ echo " required libraries"
+ echo " cluster prints cluster information"
+ echo " daemonlog get/set the log level for each"
+ echo " daemon"
+ echo ""
+ echo "Most commands print help when invoked w/o parameters."
+}
+
+# if no args specified, show usage
+if [ $# = 0 ]; then
+ print_usage
+ exit 1
+fi
+
+# get arguments
+COMMAND=$1
+shift
+
+case $COMMAND in
+ # usage flags
+ --help|-help|-h)
+ print_usage
+ exit
+ ;;
+esac
+
+if [ -f "${YARN_CONF_DIR}/yarn-env.sh" ]; then
+ . "${YARN_CONF_DIR}/yarn-env.sh"
+fi
+
+# some Java parameters
+if [ "$JAVA_HOME" != "" ]; then
+ #echo "run java in $JAVA_HOME"
+ JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+ echo "Error: JAVA_HOME is not set."
+ exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# check envvars which might override default args
+if [ "$YARN_HEAPSIZE" != "" ]; then
+ #echo "run with heapsize $YARN_HEAPSIZE"
+ JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+ #echo $JAVA_HEAP_MAX
+fi
+
+# CLASSPATH initially contains $HADOOP_CONF_DIR & $YARN_CONF_DIR
+if [ ! -d "$HADOOP_CONF_DIR" ]; then
+ echo No HADOOP_CONF_DIR set.
+ echo Please specify it either in yarn-env.sh or in the environment.
+ exit 1
+fi
+
+CLASSPATH="${HADOOP_CONF_DIR}:${YARN_CONF_DIR}:${CLASSPATH}"
+
+# for developers, add Hadoop classes to CLASSPATH
+if [ -d "$HADOOP_YARN_HOME/yarn-api/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-api/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/yarn-common/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-common/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/yarn-mapreduce/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-mapreduce/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/yarn-master-worker/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-master-worker/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/yarn-server/yarn-server-nodemanager/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-server/yarn-server-nodemanager/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/yarn-server/yarn-server-common/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-server/yarn-server-common/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/yarn-server/yarn-server-resourcemanager/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-server/yarn-server-resourcemanager/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/yarn-server/yarn-server-applicationhistoryservice/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-server/yarn-server-applicationhistoryservice/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/build/test/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/target/test/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/build/tools" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/build/tools
+fi
+
+CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/${YARN_DIR}/*
+CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/${YARN_LIB_JARS_DIR}/*
+
+# Add user defined YARN_USER_CLASSPATH to the class path (if defined)
+if [ -n "$YARN_USER_CLASSPATH" ]; then
+ if [ -n "$YARN_USER_CLASSPATH_FIRST" ]; then
+ # User requested to add the custom entries at the beginning
+ CLASSPATH=${YARN_USER_CLASSPATH}:${CLASSPATH}
+ else
+ # By default we will just append the extra entries at the end
+ CLASSPATH=${CLASSPATH}:${YARN_USER_CLASSPATH}
+ fi
+fi
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+# default log directory & file
+if [ "$YARN_LOG_DIR" = "" ]; then
+ YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+if [ "$YARN_LOGFILE" = "" ]; then
+ YARN_LOGFILE='yarn.log'
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+# figure out which class to run
+if [ "$COMMAND" = "classpath" ] ; then
+ if [ "$#" -gt 0 ]; then
+ CLASS=org.apache.hadoop.util.Classpath
+ else
+ if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ fi
+ echo $CLASSPATH
+ exit 0
+ fi
+elif [ "$COMMAND" = "rmadmin" ] ; then
+ CLASS='org.apache.hadoop.yarn.client.cli.RMAdminCLI'
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "scmadmin" ] ; then
+ CLASS='org.apache.hadoop.yarn.client.SCMAdmin'
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "application" ] ||
+ [ "$COMMAND" = "applicationattempt" ] ||
+ [ "$COMMAND" = "container" ]; then
+ CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+ set -- $COMMAND $@
+elif [ "$COMMAND" = "node" ] ; then
+ CLASS=org.apache.hadoop.yarn.client.cli.NodeCLI
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "queue" ] ; then
+ CLASS=org.apache.hadoop.yarn.client.cli.QueueCLI
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "resourcemanager" ] ; then
+ CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/rm-config/log4j.properties
+ CLASS='org.apache.hadoop.yarn.server.resourcemanager.ResourceManager'
+ YARN_OPTS="$YARN_OPTS $YARN_RESOURCEMANAGER_OPTS $YARN_RESOURCEMANAGER_JMX_OPTS"
+ if [ "$YARN_RESOURCEMANAGER_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_RESOURCEMANAGER_HEAPSIZE""m"
+ fi
+elif [ "$COMMAND" = "historyserver" ] ; then
+ echo "DEPRECATED: Use of this command to start the timeline server is deprecated." 1>&2
+ echo "Instead use the timelineserver command for it." 1>&2
+ CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/ahs-config/log4j.properties
+ CLASS='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer'
+ YARN_OPTS="$YARN_OPTS $YARN_HISTORYSERVER_OPTS"
+ if [ "$YARN_HISTORYSERVER_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_HISTORYSERVER_HEAPSIZE""m"
+ fi
+elif [ "$COMMAND" = "timelineserver" ] ; then
+ CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/timelineserver-config/log4j.properties
+ CLASS='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer'
+ YARN_OPTS="$YARN_OPTS $YARN_TIMELINESERVER_OPTS"
+ if [ "$YARN_TIMELINESERVER_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_TIMELINESERVER_HEAPSIZE""m"
+ fi
+elif [ "$COMMAND" = "sharedcachemanager" ] ; then
+ CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/scm-config/log4j.properties
+ CLASS='org.apache.hadoop.yarn.server.sharedcachemanager.SharedCacheManager'
+ YARN_OPTS="$YARN_OPTS $YARN_SHAREDCACHEMANAGER_OPTS"
+ if [ "$YARN_SHAREDCACHEMANAGER_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_SHAREDCACHEMANAGER_HEAPSIZE""m"
+ fi
+elif [ "$COMMAND" = "nodemanager" ] ; then
+ CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/nm-config/log4j.properties
+ CLASS='org.apache.hadoop.yarn.server.nodemanager.NodeManager'
+ YARN_OPTS="$YARN_OPTS -server $YARN_NODEMANAGER_OPTS $YARN_NODEMANAGER_JMX_OPTS"
+ if [ "$YARN_NODEMANAGER_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_NODEMANAGER_HEAPSIZE""m"
+ fi
+elif [ "$COMMAND" = "proxyserver" ] ; then
+ CLASS='org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer'
+ YARN_OPTS="$YARN_OPTS $YARN_PROXYSERVER_OPTS"
+ if [ "$YARN_PROXYSERVER_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_PROXYSERVER_HEAPSIZE""m"
+ fi
+elif [ "$COMMAND" = "version" ] ; then
+ CLASS=org.apache.hadoop.util.VersionInfo
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "jar" ] ; then
+ CLASS=org.apache.hadoop.util.RunJar
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "logs" ] ; then
+ CLASS=org.apache.hadoop.yarn.client.cli.LogsCLI
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "daemonlog" ] ; then
+ CLASS=org.apache.hadoop.log.LogLevel
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "cluster" ] ; then
+ CLASS=org.apache.hadoop.yarn.client.cli.ClusterCLI
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+else
+ CLASS=$COMMAND
+fi
+
+# cygwin path translation
+if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ HADOOP_LOG_DIR=$(cygpath -w "$HADOOP_LOG_DIR" 2>/dev/null)
+ HADOOP_PREFIX=$(cygpath -w "$HADOOP_PREFIX" 2>/dev/null)
+ HADOOP_CONF_DIR=$(cygpath -w "$HADOOP_CONF_DIR" 2>/dev/null)
+ HADOOP_COMMON_HOME=$(cygpath -w "$HADOOP_COMMON_HOME" 2>/dev/null)
+ HADOOP_HDFS_HOME=$(cygpath -w "$HADOOP_HDFS_HOME" 2>/dev/null)
+ HADOOP_YARN_HOME=$(cygpath -w "$HADOOP_YARN_HOME" 2>/dev/null)
+ HADOOP_MAPRED_HOME=$(cygpath -w "$HADOOP_MAPRED_HOME" 2>/dev/null)
+ YARN_LOG_DIR=$(cygpath -w "$YARN_LOG_DIR" 2>/dev/null)
+fi
+
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$HADOOP_YARN_HOME"
+HADOOP_HOME=$HADOOP_PREFIX
+if $cygwin; then
+ HADOOP_HOME=$(cygpath -w "$HADOOP_HOME" 2>/dev/null)
+fi
+export HADOOP_HOME
+YARN_OPTS="$YARN_OPTS -Dhadoop.home.dir=$HADOOP_HOME"
+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-${HADOOP_LOGLEVEL},console}"
+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-${HADOOP_LOGLEVEL},console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+ if $cygwin; then
+ JAVA_LIBRARY_PATH=$(cygpath -w "$JAVA_LIBRARY_PATH" 2>/dev/null)
+ fi
+ YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+
+exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $YARN_OPTS -classpath "$CLASSPATH" $CLASS "$@"
diff --git a/MSH-PIC/hadoop/bin/yarn.cmd b/MSH-PIC/hadoop/bin/yarn.cmd
new file mode 100644
index 0000000..3cd57a7
--- /dev/null
+++ b/MSH-PIC/hadoop/bin/yarn.cmd
@@ -0,0 +1,332 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem The Hadoop command script
+@rem
+@rem Environment Variables
+@rem
+@rem JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+@rem
+@rem YARN_CLASSPATH Extra Java CLASSPATH entries.
+@rem
+@rem YARN_HEAPSIZE The maximum amount of heap to use, in MB.
+@rem Default is 1000.
+@rem
+@rem YARN_{COMMAND}_HEAPSIZE overrides YARN_HEAPSIZE for a given command
+@rem eg YARN_NODEMANAGER_HEAPSIZE sets the heap
+@rem size for the NodeManager. If you set the
+@rem heap size in YARN_{COMMAND}_OPTS or YARN_OPTS
+@rem they take precedence.
+@rem
+@rem YARN_OPTS Extra Java runtime options.
+@rem
+@rem YARN_CLIENT_OPTS when the respective command is run.
+@rem YARN_{COMMAND}_OPTS etc YARN_NODEMANAGER_OPTS applies to NodeManager
+@rem for e.g. YARN_CLIENT_OPTS applies to
+@rem more than one command (fs, dfs, fsck,
+@rem dfsadmin etc)
+@rem
+@rem YARN_CONF_DIR Alternate conf dir. Default is ${HADOOP_YARN_HOME}/conf.
+@rem
+@rem YARN_ROOT_LOGGER The root appender. Default is INFO,console
+@rem
+
+setlocal enabledelayedexpansion
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %DEFAULT_LIBEXEC_DIR%\yarn-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+if "%1" == "--loglevel" (
+ shift
+ shift
+)
+
+:main
+ if exist %YARN_CONF_DIR%\yarn-env.cmd (
+ call %YARN_CONF_DIR%\yarn-env.cmd
+ )
+
+ set yarn-command=%1
+ call :make_command_arguments %*
+
+ if not defined yarn-command (
+ goto print_usage
+ )
+
+ @rem JAVA and JAVA_HEAP_MAX and set in hadoop-config.cmd
+
+ if defined YARN_HEAPSIZE (
+ @rem echo run with Java heapsize %YARN_HEAPSIZE%
+ set JAVA_HEAP_MAX=-Xmx%YARN_HEAPSIZE%m
+ )
+
+ @rem CLASSPATH initially contains HADOOP_CONF_DIR & YARN_CONF_DIR
+ if not defined HADOOP_CONF_DIR (
+ echo No HADOOP_CONF_DIR set.
+ echo Please specify it either in yarn-env.cmd or in the environment.
+ goto :eof
+ )
+
+ set CLASSPATH=%HADOOP_CONF_DIR%;%YARN_CONF_DIR%;%CLASSPATH%
+
+ @rem for developers, add Hadoop classes to CLASSPATH
+ if exist %HADOOP_YARN_HOME%\yarn-api\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-api\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-common\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-common\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-mapreduce\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-mapreduce\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-master-worker\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-master-worker\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-nodemanager\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-nodemanager\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-common\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-common\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-resourcemanager\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-resourcemanager\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-applicationhistoryservice\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-applicationhistoryservice\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\build\test\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\build\test\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\build\tools (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\build\tools
+ )
+
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_DIR%\*
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR%\*
+
+ if %yarn-command% == classpath (
+ if not defined yarn-command-arguments (
+ @rem No need to bother starting up a JVM for this simple case.
+ @echo %CLASSPATH%
+ exit /b
+ )
+ )
+
+ set yarncommands=resourcemanager nodemanager proxyserver rmadmin version jar ^
+ application applicationattempt cluster container node queue logs daemonlog historyserver ^
+ timelineserver classpath
+ for %%i in ( %yarncommands% ) do (
+ if %yarn-command% == %%i set yarncommand=true
+ )
+ if defined yarncommand (
+ call :%yarn-command%
+ ) else (
+ set CLASSPATH=%CLASSPATH%;%CD%
+ set CLASS=%yarn-command%
+ )
+
+ if defined JAVA_LIBRARY_PATH (
+ set YARN_OPTS=%YARN_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH%
+ )
+
+ set java_arguments=%JAVA_HEAP_MAX% %YARN_OPTS% -classpath %CLASSPATH% %CLASS% %yarn-command-arguments%
+ call %JAVA% %java_arguments%
+
+goto :eof
+
+:classpath
+ set CLASS=org.apache.hadoop.util.Classpath
+ goto :eof
+
+:rmadmin
+ set CLASS=org.apache.hadoop.yarn.client.cli.RMAdminCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:application
+ set CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ set yarn-command-arguments=%yarn-command% %yarn-command-arguments%
+ goto :eof
+
+:applicationattempt
+ set CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ set yarn-command-arguments=%yarn-command% %yarn-command-arguments%
+ goto :eof
+
+:cluster
+ set CLASS=org.apache.hadoop.yarn.client.cli.ClusterCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:container
+ set CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ set yarn-command-arguments=%yarn-command% %yarn-command-arguments%
+ goto :eof
+
+:node
+ set CLASS=org.apache.hadoop.yarn.client.cli.NodeCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:queue
+ set CLASS=org.apache.hadoop.yarn.client.cli.QueueCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:resourcemanager
+ set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\rm-config\log4j.properties
+ set CLASS=org.apache.hadoop.yarn.server.resourcemanager.ResourceManager
+ set YARN_OPTS=%YARN_OPTS% %YARN_RESOURCEMANAGER_OPTS%
+ if defined YARN_RESOURCEMANAGER_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%YARN_RESOURCEMANAGER_HEAPSIZE%m
+ )
+ goto :eof
+
+:historyserver
+ @echo DEPRECATED: Use of this command to start the timeline server is deprecated. 1>&2
+ @echo Instead use the timelineserver command for it. 1>&2
+ set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\ahs-config\log4j.properties
+ set CLASS=org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer
+ set YARN_OPTS=%YARN_OPTS% %HADOOP_HISTORYSERVER_OPTS%
+ if defined YARN_HISTORYSERVER_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%YARN_HISTORYSERVER_HEAPSIZE%m
+ )
+ goto :eof
+
+:timelineserver
+ set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\timelineserver-config\log4j.properties
+ set CLASS=org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer
+ set YARN_OPTS=%YARN_OPTS% %HADOOP_TIMELINESERVER_OPTS%
+ if defined YARN_TIMELINESERVER_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%YARN_TIMELINESERVER_HEAPSIZE%m
+ )
+ goto :eof
+
+:nodemanager
+ set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\nm-config\log4j.properties
+ set CLASS=org.apache.hadoop.yarn.server.nodemanager.NodeManager
+ set YARN_OPTS=%YARN_OPTS% -server %HADOOP_NODEMANAGER_OPTS%
+ if defined YARN_NODEMANAGER_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%YARN_NODEMANAGER_HEAPSIZE%m
+ )
+ goto :eof
+
+:proxyserver
+ set CLASS=org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer
+ set YARN_OPTS=%YARN_OPTS% %HADOOP_PROXYSERVER_OPTS%
+ if defined YARN_PROXYSERVER_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%YARN_PROXYSERVER_HEAPSIZE%m
+ )
+ goto :eof
+
+:version
+ set CLASS=org.apache.hadoop.util.VersionInfo
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:jar
+ set CLASS=org.apache.hadoop.util.RunJar
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:logs
+ set CLASS=org.apache.hadoop.yarn.client.cli.LogsCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:daemonlog
+ set CLASS=org.apache.hadoop.log.LogLevel
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
+:make_command_arguments
+ if "%1" == "--config" (
+ shift
+ shift
+ )
+ if "%1" == "--loglevel" (
+ shift
+ shift
+ )
+ if [%2] == [] goto :eof
+ shift
+ set _yarnarguments=
+ :MakeCmdArgsLoop
+ if [%1]==[] goto :EndLoop
+
+ if not defined _yarnarguments (
+ set _yarnarguments=%1
+ ) else (
+ set _yarnarguments=!_yarnarguments! %1
+ )
+ shift
+ goto :MakeCmdArgsLoop
+ :EndLoop
+ set yarn-command-arguments=%_yarnarguments%
+ goto :eof
+
+:print_usage
+ @echo Usage: yarn [--config confdir] [--loglevel loglevel] COMMAND
+ @echo where COMMAND is one of:
+ @echo resourcemanager run the ResourceManager
+ @echo nodemanager run a nodemanager on each slave
+ @echo timelineserver run the timeline server
+ @echo rmadmin admin tools
+ @echo version print the version
+ @echo jar ^<jar^> run a jar file
+ @echo application prints application(s) report/kill application
+ @echo applicationattempt prints applicationattempt(s) report
+ @echo cluster prints cluster information
+ @echo container prints container(s) report
+ @echo node prints node report(s)
+ @echo queue prints queue information
+ @echo logs dump container logs
+ @echo classpath prints the class path needed to get the
+ @echo Hadoop jar and the required libraries
+ @echo daemonlog get/set the log level for each daemon
+ @echo or
+ @echo CLASSNAME run the class named CLASSNAME
+ @echo Most commands print help when invoked w/o parameters.
+
+endlocal
diff --git a/MSH-PIC/hadoop/etc/hadoop/capacity-scheduler.xml b/MSH-PIC/hadoop/etc/hadoop/capacity-scheduler.xml
new file mode 100644
index 0000000..1e97505
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/capacity-scheduler.xml
@@ -0,0 +1,134 @@
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+ <property>
+ <name>yarn.scheduler.capacity.maximum-applications</name>
+ <value>10000</value>
+ <description>
+ Maximum number of applications that can be pending and running.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
+ <value>0.5</value>
+ <description>
+ Maximum percent of resources in the cluster which can be used to run
+ application masters i.e. controls number of concurrent running
+ applications.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.resource-calculator</name>
+ <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+ <description>
+ The ResourceCalculator implementation to be used to compare
+ Resources in the scheduler.
+ The default i.e. DefaultResourceCalculator only uses Memory while
+ DominantResourceCalculator uses dominant-resource to compare
+ multi-dimensional resources such as Memory, CPU etc.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.queues</name>
+ <value>default</value>
+ <description>
+ The queues at the this level (root is the root queue).
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.default.capacity</name>
+ <value>100</value>
+ <description>Default queue target capacity.</description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
+ <value>1</value>
+ <description>
+ Default queue user limit a percentage from 0.0 to 1.0.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
+ <value>100</value>
+ <description>
+ The maximum capacity of the default queue.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.default.state</name>
+ <value>RUNNING</value>
+ <description>
+ The state of the default queue. State can be one of RUNNING or STOPPED.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
+ <value>*</value>
+ <description>
+ The ACL of who can submit jobs to the default queue.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.default.acl_administer_queue</name>
+ <value>*</value>
+ <description>
+ The ACL of who can administer jobs on the default queue.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.node-locality-delay</name>
+ <value>40</value>
+ <description>
+ Number of missed scheduling opportunities after which the CapacityScheduler
+ attempts to schedule rack-local containers.
+ Typically this should be set to number of nodes in the cluster, By default is setting
+ approximately number of nodes in one rack which is 40.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.queue-mappings</name>
+ <value></value>
+ <description>
+ A list of mappings that will be used to assign jobs to queues
+ The syntax for this list is [u|g]:[name]:[queue_name][,next mapping]*
+ Typically this list will be used to map users to queues,
+ for example, u:%user:%user maps all users to queues with the same name
+ as the user.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.queue-mappings-override.enable</name>
+ <value>false</value>
+ <description>
+ If a queue mapping is present, will it override the value specified
+ by the user? This can be used by administrators to place jobs in queues
+ that are different than the one specified by the user.
+ The default is false.
+ </description>
+ </property>
+
+</configuration>
diff --git a/MSH-PIC/hadoop/etc/hadoop/configuration.xsl b/MSH-PIC/hadoop/etc/hadoop/configuration.xsl
new file mode 100644
index 0000000..d50d80b
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/configuration.xsl
@@ -0,0 +1,40 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="html"/>
+<xsl:template match="configuration">
+<html>
+<body>
+<table border="1">
+<tr>
+ <td>name</td>
+ <td>value</td>
+ <td>description</td>
+</tr>
+<xsl:for-each select="property">
+<tr>
+ <td><a name="{name}"><xsl:value-of select="name"/></a></td>
+ <td><xsl:value-of select="value"/></td>
+ <td><xsl:value-of select="description"/></td>
+</tr>
+</xsl:for-each>
+</table>
+</body>
+</html>
+</xsl:template>
+</xsl:stylesheet>
diff --git a/MSH-PIC/hadoop/etc/hadoop/container-executor.cfg b/MSH-PIC/hadoop/etc/hadoop/container-executor.cfg
new file mode 100644
index 0000000..d68cee8
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/container-executor.cfg
@@ -0,0 +1,4 @@
+yarn.nodemanager.linux-container-executor.group=#configured value of yarn.nodemanager.linux-container-executor.group
+banned.users=#comma separated list of users who can not run applications
+min.user.id=1000#Prevent other super-users
+allowed.system.users=##comma separated list of system users who CAN run applications
diff --git a/MSH-PIC/hadoop/etc/hadoop/core-site.xml b/MSH-PIC/hadoop/etc/hadoop/core-site.xml
new file mode 100644
index 0000000..f380e36
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/core-site.xml
@@ -0,0 +1,58 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>fs.defaultFS</name>
+ <value>hdfs://ns1</value>
+ </property>
+ <property>
+ <name>hadoop.tmp.dir</name>
+ <value>file:/home/tsg/olap/hadoop/tmp</value>
+ </property>
+ <property>
+ <name>io.file.buffer.size</name>
+ <value>131702</value>
+ </property>
+ <property>
+ <name>hadoop.proxyuser.root.hosts</name>
+ <value>*</value>
+ </property>
+ <property>
+ <name>hadoop.proxyuser.root.groups</name>
+ <value>*</value>
+ </property>
+ <property>
+ <name>hadoop.logfile.size</name>
+ <value>10000000</value>
+ <description>The max size of each log file</description>
+ </property>
+ <property>
+ <name>hadoop.logfile.count</name>
+ <value>1</value>
+ <description>The max number of log files</description>
+ </property>
+ <property>
+ <name>ha.zookeeper.quorum</name>
+ <value>192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181</value>
+ </property>
+ <property>
+ <name>ipc.client.connect.timeout</name>
+ <value>90000</value>
+ </property>
+</configuration>
diff --git a/MSH-PIC/hadoop/etc/hadoop/hadoop-env.cmd b/MSH-PIC/hadoop/etc/hadoop/hadoop-env.cmd
new file mode 100644
index 0000000..5dbd635
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/hadoop-env.cmd
@@ -0,0 +1,81 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem Set Hadoop-specific environment variables here.
+
+@rem The only required environment variable is JAVA_HOME. All others are
+@rem optional. When running a distributed configuration it is best to
+@rem set JAVA_HOME in this file, so that it is correctly defined on
+@rem remote nodes.
+
+@rem The java implementation to use. Required.
+set JAVA_HOME=%JAVA_HOME%
+
+@rem The jsvc implementation to use. Jsvc is required to run secure datanodes.
+@rem set JSVC_HOME=%JSVC_HOME%
+
+@rem set HADOOP_CONF_DIR=
+
+@rem Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
+if exist %HADOOP_HOME%\contrib\capacity-scheduler (
+ if not defined HADOOP_CLASSPATH (
+ set HADOOP_CLASSPATH=%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
+ ) else (
+ set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
+ )
+)
+
+@rem The maximum amount of heap to use, in MB. Default is 1000.
+@rem set HADOOP_HEAPSIZE=
+@rem set HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+@rem Extra Java runtime options. Empty by default.
+@rem set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true
+
+@rem Command specific options appended to HADOOP_OPTS when specified
+if not defined HADOOP_SECURITY_LOGGER (
+ set HADOOP_SECURITY_LOGGER=INFO,RFAS
+)
+if not defined HDFS_AUDIT_LOGGER (
+ set HDFS_AUDIT_LOGGER=INFO,NullAppender
+)
+
+set HADOOP_NAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_NAMENODE_OPTS%
+set HADOOP_DATANODE_OPTS=-Dhadoop.security.logger=ERROR,RFAS %HADOOP_DATANODE_OPTS%
+set HADOOP_SECONDARYNAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_SECONDARYNAMENODE_OPTS%
+
+@rem The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+set HADOOP_CLIENT_OPTS=-Xmx512m %HADOOP_CLIENT_OPTS%
+@rem set HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData %HADOOP_JAVA_PLATFORM_OPTS%"
+
+@rem On secure datanodes, user to run the datanode as after dropping privileges
+set HADOOP_SECURE_DN_USER=%HADOOP_SECURE_DN_USER%
+
+@rem Where log files are stored. %HADOOP_HOME%/logs by default.
+@rem set HADOOP_LOG_DIR=%HADOOP_LOG_DIR%\%USERNAME%
+
+@rem Where log files are stored in the secure data environment.
+set HADOOP_SECURE_DN_LOG_DIR=%HADOOP_LOG_DIR%\%HADOOP_HDFS_USER%
+
+@rem The directory where pid files are stored. /tmp by default.
+@rem NOTE: this should be set to a directory that can only be written to by
+@rem the user that will run the hadoop daemons. Otherwise there is the
+@rem potential for a symlink attack.
+set HADOOP_PID_DIR=%HADOOP_PID_DIR%
+set HADOOP_SECURE_DN_PID_DIR=%HADOOP_PID_DIR%
+
+@rem A string representing this instance of hadoop. %USERNAME% by default.
+set HADOOP_IDENT_STRING=%USERNAME%
diff --git a/MSH-PIC/hadoop/etc/hadoop/hadoop-env.sh b/MSH-PIC/hadoop/etc/hadoop/hadoop-env.sh
new file mode 100644
index 0000000..7b9e5b1
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/hadoop-env.sh
@@ -0,0 +1,105 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME. All others are
+# optional. When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+export HADOOP_NAMENODE_JMX_OPTS="-Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false -javaagent:/home/tsg/olap/hadoop-2.7.1/monitor/jmx_prometheus_javaagent-0.12.0.jar=9905:/home/tsg/olap/hadoop-2.7.1/monitor/hdfs.yaml"
+export HADOOP_DATANODE_JMX_OPTS="-Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false -javaagent:/home/tsg/olap/hadoop-2.7.1/monitor/jmx_prometheus_javaagent-0.12.0.jar=9906:/home/tsg/olap/hadoop-2.7.1/monitor/hdfs.yaml"
+
+# The java implementation to use.
+#export HADOOP_HEAPSIZE=m
+#export JAVA_HOME=/usr/local/jdk/jdk1.8.0_73
+export JAVA_HOME=/usr/lib/jvm/jdk1.8.0_73
+# The jsvc implementation to use. Jsvc is required to run secure datanodes
+# that bind to privileged ports to provide authentication of data transfer
+# protocol. Jsvc is not required if SASL is configured for authentication of
+# data transfer protocol using non-privileged ports.
+#export JSVC_HOME=${JSVC_HOME}
+
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
+
+# Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
+for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
+ if [ "$HADOOP_CLASSPATH" ]; then
+ export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
+ else
+ export HADOOP_CLASSPATH=$f
+ fi
+done
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+#export HADOOP_HEAPSIZE=
+#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+# Extra Java runtime options. Empty by default.
+export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="$HADOOP_NAMENODE_OPTS -Xmx10240m -Xms10240m -Xss256k -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:SurvivorRatio=2 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:MaxTenuringThreshold=15 -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=1 -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:-DisableExplicitGC -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:/home/tsg/olap/hadoop-2.7.1/logs/gc-namenode-%t.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=100M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/home/tsg/olap/hadoop-2.7.1/logs/ -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender}"
+
+export HADOOP_DATANODE_OPTS="$HADOOP_DATANODE_OPTS -Xmx5120m -Xms5120m -Xss256k -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:SurvivorRatio=2 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:MaxTenuringThreshold=15 -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=1 -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:/home/tsg/olap/hadoop-2.7.1/logs/gc-datanode-%t.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=100M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/home/tsg/olap/hadoop-2.7.1/logs/ -Dhadoop.security.logger=ERROR,RFAS"
+
+export HADOOP_JOURNALNODE_OPTS="$HADOOP_JOURNALNODE_OPTS -Xmx1024m -Xms1024m"
+
+export HADOOP_ZKFC_OPTS="$HADOOP_ZKFC_OPTS -Xmx1024m -Xms1024m"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
+
+export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
+export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
+#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
+
+# On secure datanodes, user to run the datanode as after dropping privileges.
+# This **MUST** be uncommented to enable secure HDFS if using privileged ports
+# to provide authentication of data transfer protocol. This **MUST NOT** be
+# defined if SASL is configured for authentication of data transfer protocol
+# using non-privileged ports.
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
+
+# Where log files are stored. $HADOOP_HOME/logs by default.
+#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
+
+###
+# HDFS Mover specific parameters
+###
+# Specify the JVM options to be used when starting the HDFS Mover.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_MOVER_OPTS=""
+
+###
+# Advanced Users Only!
+###
+
+# The directory where pid files are stored. /tmp by default.
+# NOTE: this should be set to a directory that can only be written to by
+# the user that will run the hadoop daemons. Otherwise there is the
+# potential for a symlink attack.
+export HADOOP_PID_DIR=/home/tsg/olap/hadoop-2.7.1/pids
+export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
diff --git a/MSH-PIC/hadoop/etc/hadoop/hadoop-metrics.properties b/MSH-PIC/hadoop/etc/hadoop/hadoop-metrics.properties
new file mode 100644
index 0000000..c1b2eb7
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/hadoop-metrics.properties
@@ -0,0 +1,75 @@
+# Configuration of the "dfs" context for null
+dfs.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "dfs" context for file
+#dfs.class=org.apache.hadoop.metrics.file.FileContext
+#dfs.period=10
+#dfs.fileName=/tmp/dfsmetrics.log
+
+# Configuration of the "dfs" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# dfs.period=10
+# dfs.servers=localhost:8649
+
+
+# Configuration of the "mapred" context for null
+mapred.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "mapred" context for file
+#mapred.class=org.apache.hadoop.metrics.file.FileContext
+#mapred.period=10
+#mapred.fileName=/tmp/mrmetrics.log
+
+# Configuration of the "mapred" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# mapred.period=10
+# mapred.servers=localhost:8649
+
+
+# Configuration of the "jvm" context for null
+#jvm.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "jvm" context for file
+#jvm.class=org.apache.hadoop.metrics.file.FileContext
+#jvm.period=10
+#jvm.fileName=/tmp/jvmmetrics.log
+
+# Configuration of the "jvm" context for ganglia
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# jvm.period=10
+# jvm.servers=localhost:8649
+
+# Configuration of the "rpc" context for null
+rpc.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "rpc" context for file
+#rpc.class=org.apache.hadoop.metrics.file.FileContext
+#rpc.period=10
+#rpc.fileName=/tmp/rpcmetrics.log
+
+# Configuration of the "rpc" context for ganglia
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# rpc.period=10
+# rpc.servers=localhost:8649
+
+
+# Configuration of the "ugi" context for null
+ugi.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "ugi" context for file
+#ugi.class=org.apache.hadoop.metrics.file.FileContext
+#ugi.period=10
+#ugi.fileName=/tmp/ugimetrics.log
+
+# Configuration of the "ugi" context for ganglia
+# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# ugi.period=10
+# ugi.servers=localhost:8649
+
diff --git a/MSH-PIC/hadoop/etc/hadoop/hadoop-metrics2.properties b/MSH-PIC/hadoop/etc/hadoop/hadoop-metrics2.properties
new file mode 100644
index 0000000..0c09228
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/hadoop-metrics2.properties
@@ -0,0 +1,68 @@
+# syntax: [prefix].[source|sink].[instance].[options]
+# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
+
+*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
+# default sampling period, in seconds
+*.period=10
+
+# The namenode-metrics.out will contain metrics from all context
+#namenode.sink.file.filename=namenode-metrics.out
+# Specifying a special sampling period for namenode:
+#namenode.sink.*.period=8
+
+#datanode.sink.file.filename=datanode-metrics.out
+
+#resourcemanager.sink.file.filename=resourcemanager-metrics.out
+
+#nodemanager.sink.file.filename=nodemanager-metrics.out
+
+#mrappmaster.sink.file.filename=mrappmaster-metrics.out
+
+#jobhistoryserver.sink.file.filename=jobhistoryserver-metrics.out
+
+# the following example split metrics of different
+# context to different sinks (in this case files)
+#nodemanager.sink.file_jvm.class=org.apache.hadoop.metrics2.sink.FileSink
+#nodemanager.sink.file_jvm.context=jvm
+#nodemanager.sink.file_jvm.filename=nodemanager-jvm-metrics.out
+#nodemanager.sink.file_mapred.class=org.apache.hadoop.metrics2.sink.FileSink
+#nodemanager.sink.file_mapred.context=mapred
+#nodemanager.sink.file_mapred.filename=nodemanager-mapred-metrics.out
+
+#
+# Below are for sending metrics to Ganglia
+#
+# for Ganglia 3.0 support
+# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink30
+#
+# for Ganglia 3.1 support
+# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+
+# *.sink.ganglia.period=10
+
+# default for supportsparse is false
+# *.sink.ganglia.supportsparse=true
+
+#*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Tag values to use for the ganglia prefix. If not defined no tags are used.
+# If '*' all tags are used. If specifiying multiple tags separate them with
+# commas. Note that the last segment of the property name is the context name.
+#
+#*.sink.ganglia.tagsForPrefix.jvm=ProcesName
+#*.sink.ganglia.tagsForPrefix.dfs=
+#*.sink.ganglia.tagsForPrefix.rpc=
+#*.sink.ganglia.tagsForPrefix.mapred=
+
+#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#resourcemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#nodemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#mrappmaster.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#jobhistoryserver.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
diff --git a/MSH-PIC/hadoop/etc/hadoop/hadoop-policy.xml b/MSH-PIC/hadoop/etc/hadoop/hadoop-policy.xml
new file mode 100644
index 0000000..2bf5c02
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/hadoop-policy.xml
@@ -0,0 +1,226 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>security.client.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ClientProtocol, which is used by user code
+ via the DistributedFileSystem.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.client.datanode.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+ for block recovery.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.datanode.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for DatanodeProtocol, which is used by datanodes to
+ communicate with the namenode.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.inter.datanode.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
+ for updating generation timestamp.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.namenode.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for NamenodeProtocol, the protocol used by the secondary
+ namenode to communicate with the namenode.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.admin.operations.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for AdminOperationsProtocol. Used for admin commands.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.refresh.user.mappings.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for RefreshUserMappingsProtocol. Used to refresh
+ users mappings. The ACL is a comma-separated list of user and
+ group names. The user and group list is separated by a blank. For
+ e.g. "alice,bob users,wheel". A special value of "*" means all
+ users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.refresh.policy.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
+ dfsadmin and mradmin commands to refresh the security policy in-effect.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.ha.service.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for HAService protocol used by HAAdmin to manage the
+ active and stand-by states of namenode.</description>
+ </property>
+
+ <property>
+ <name>security.zkfc.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for access to the ZK Failover Controller
+ </description>
+ </property>
+
+ <property>
+ <name>security.qjournal.service.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for QJournalProtocol, used by the NN to communicate with
+ JNs when using the QuorumJournalManager for edit logs.</description>
+ </property>
+
+ <property>
+ <name>security.mrhs.client.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for HSClientProtocol, used by job clients to
+ communciate with the MR History Server job status etc.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <!-- YARN Protocols -->
+
+ <property>
+ <name>security.resourcetracker.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ResourceTrackerProtocol, used by the
+ ResourceManager and NodeManager to communicate with each other.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.resourcemanager-administration.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ResourceManagerAdministrationProtocol, for admin commands.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.applicationclient.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ApplicationClientProtocol, used by the ResourceManager
+ and applications submission clients to communicate with each other.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.applicationmaster.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ApplicationMasterProtocol, used by the ResourceManager
+ and ApplicationMasters to communicate with each other.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.containermanagement.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ContainerManagementProtocol protocol, used by the NodeManager
+ and ApplicationMasters to communicate with each other.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.resourcelocalizer.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ResourceLocalizer protocol, used by the NodeManager
+ and ResourceLocalizer to communicate with each other.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.job.task.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
+ tasks to communicate with the parent tasktracker.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.job.client.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for MRClientProtocol, used by job clients to
+ communciate with the MR ApplicationMaster to query job status etc.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.applicationhistory.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ApplicationHistoryProtocol, used by the timeline
+ server and the generic history service client to communicate with each other.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+</configuration>
diff --git a/MSH-PIC/hadoop/etc/hadoop/hdfs-site.xml b/MSH-PIC/hadoop/etc/hadoop/hdfs-site.xml
new file mode 100644
index 0000000..6d93805
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/hdfs-site.xml
@@ -0,0 +1,142 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>dfs.namenode.name.dir</name>
+ <value>file:/home/tsg/olap/hadoop/dfs/name</value>
+ </property>
+ <property>
+ <name>dfs.datanode.data.dir</name>
+ <value>file:/home/tsg/olap/hadoop/dfs/data</value>
+ </property>
+ <property>
+ <name>dfs.replication</name>
+ <value>2</value>
+ </property>
+ <property>
+ <name>dfs.webhdfs.enabled</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>dfs.permissions</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>dfs.permissions.enabled</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>dfs.nameservices</name>
+ <value>ns1</value>
+ </property>
+ <property>
+ <name>dfs.blocksize</name>
+ <value>134217728</value>
+ </property>
+ <property>
+ <name>dfs.ha.namenodes.ns1</name>
+ <value>nn1,nn2</value>
+ </property>
+ <!-- nn1的RPC通信地址,nn1所在地址 -->
+ <property>
+ <name>dfs.namenode.rpc-address.ns1.nn1</name>
+ <value>192.168.20.193:9000</value>
+ </property>
+ <!-- nn1的http通信地址,外部访问地址 -->
+ <property>
+ <name>dfs.namenode.http-address.ns1.nn1</name>
+ <value>192.168.20.193:50070</value>
+ </property>
+ <!-- nn2的RPC通信地址,nn2所在地址 -->
+ <property>
+ <name>dfs.namenode.rpc-address.ns1.nn2</name>
+ <value>192.168.20.194:9000</value>
+ </property>
+ <!-- nn2的http通信地址,外部访问地址 -->
+ <property>
+ <name>dfs.namenode.http-address.ns1.nn2</name>
+ <value>192.168.20.194:50070</value>
+ </property>
+ <!-- 指定NameNode的元数据在JournalNode日志上的存放位置(一般和zookeeper部署在一起) -->
+ <property>
+ <name>dfs.namenode.shared.edits.dir</name>
+ <value>qjournal://192.168.20.193:8485;192.168.20.194:8485;192.168.20.195:8485/ns1</value>
+ </property>
+ <!-- 指定JournalNode在本地磁盘存放数据的位置 -->
+ <property>
+ <name>dfs.journalnode.edits.dir</name>
+ <value>/home/tsg/olap/hadoop/journal</value>
+ </property>
+ <!--客户端通过代理访问namenode,访问文件系统,HDFS 客户端与Active 节点通信的Java 类,使用其确定Active 节点是否活跃 -->
+ <property>
+ <name>dfs.client.failover.proxy.provider.ns1</name>
+ <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
+ </property>
+ <!--这是配置自动切换的方法,有多种使用方法,具体可以看官网,在文末会给地址,这里是远程登录杀死的方法 -->
+ <property>
+ <name>dfs.ha.fencing.methods</name>
+ <value>sshfence</value>
+ <value>shell(true)</value>
+ </property>
+ <!-- 这个是使用sshfence隔离机制时才需要配置ssh免登陆 -->
+ <property>
+ <name>dfs.ha.fencing.ssh.private-key-files</name>
+ <value>/root/.ssh/id_rsa</value>
+ </property>
+ <!-- 配置sshfence隔离机制超时时间,这个属性同上,如果你是用脚本的方法切换,这个应该是可以不配置的 -->
+ <property>
+ <name>dfs.ha.fencing.ssh.connect-timeout</name>
+ <value>30000</value>
+ </property>
+ <!-- 这个是开启自动故障转移,如果你没有自动故障转移,这个可以先不配 -->
+ <property>
+ <name>dfs.ha.automatic-failover.enabled</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>dfs.datanode.max.transfer.threads</name>
+ <value>8192</value>
+ </property>
+ <!-- namenode处理RPC请求线程数,增大该值资源占用不大 -->
+ <property>
+ <name>dfs.namenode.handler.count</name>
+ <value>30</value>
+ </property>
+ <!-- datanode处理RPC请求线程数,增大该值会占用更多内存 -->
+ <property>
+ <name>dfs.datanode.handler.count</name>
+ <value>40</value>
+ </property>
+ <!-- balance时可占用的带宽 -->
+ <property>
+ <name>dfs.balance.bandwidthPerSec</name>
+ <value>104857600</value>
+ </property>
+ <!-- 磁盘预留空间,该空间不会被hdfs占用,单位字节-->
+ <property>
+ <name>dfs.datanode.du.reserved</name>
+ <value>53687091200</value>
+ </property>
+ <!-- datanode与namenode连接超时时间,单位毫秒 2 * heartbeat.recheck.interval + 30000 -->
+ <property>
+ <name>heartbeat.recheck.interval</name>
+ <value>100000</value>
+ </property>
+</configuration>
+
diff --git a/MSH-PIC/hadoop/etc/hadoop/httpfs-env.sh b/MSH-PIC/hadoop/etc/hadoop/httpfs-env.sh
new file mode 100644
index 0000000..a2701d4
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/httpfs-env.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# Set httpfs specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs HttpFS
+# Java System properties for HttpFS should be specified in this variable
+#
+# export CATALINA_OPTS=
+
+# HttpFS logs directory
+#
+# export HTTPFS_LOG=${HTTPFS_HOME}/logs
+
+# HttpFS temporary directory
+#
+# export HTTPFS_TEMP=${HTTPFS_HOME}/temp
+
+# The HTTP port used by HttpFS
+#
+# export HTTPFS_HTTP_PORT=14000
+
+# The Admin port used by HttpFS
+#
+# export HTTPFS_ADMIN_PORT=`expr ${HTTPFS_HTTP_PORT} + 1`
+
+# The hostname HttpFS server runs on
+#
+# export HTTPFS_HTTP_HOSTNAME=`hostname -f`
+
+# Indicates if HttpFS is using SSL
+#
+# export HTTPFS_SSL_ENABLED=false
+
+# The location of the SSL keystore if using SSL
+#
+# export HTTPFS_SSL_KEYSTORE_FILE=${HOME}/.keystore
+
+# The password of the SSL keystore if using SSL
+#
+# export HTTPFS_SSL_KEYSTORE_PASS=password
diff --git a/MSH-PIC/hadoop/etc/hadoop/httpfs-log4j.properties b/MSH-PIC/hadoop/etc/hadoop/httpfs-log4j.properties
new file mode 100644
index 0000000..a924a48
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/httpfs-log4j.properties
@@ -0,0 +1,35 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'httpfs.log.dir' is not defined at HttpFSServer start up time
+# Setup sets its value to '${httpfs.home}/logs'
+
+log4j.appender.httpfs=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.httpfs.DatePattern='.'yyyy-MM-dd
+log4j.appender.httpfs.File=${httpfs.log.dir}/httpfs.log
+log4j.appender.httpfs.Append=true
+log4j.appender.httpfs.layout=org.apache.log4j.PatternLayout
+log4j.appender.httpfs.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %5p %c{1} [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n
+
+log4j.appender.httpfsaudit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.httpfsaudit.DatePattern='.'yyyy-MM-dd
+log4j.appender.httpfsaudit.File=${httpfs.log.dir}/httpfs-audit.log
+log4j.appender.httpfsaudit.Append=true
+log4j.appender.httpfsaudit.layout=org.apache.log4j.PatternLayout
+log4j.appender.httpfsaudit.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %5p [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n
+
+log4j.logger.httpfsaudit=INFO, httpfsaudit
+
+log4j.logger.org.apache.hadoop.fs.http.server=INFO, httpfs
+log4j.logger.org.apache.hadoop.lib=INFO, httpfs
diff --git a/MSH-PIC/hadoop/etc/hadoop/httpfs-signature.secret b/MSH-PIC/hadoop/etc/hadoop/httpfs-signature.secret
new file mode 100644
index 0000000..56466e9
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/httpfs-signature.secret
@@ -0,0 +1 @@
+hadoop httpfs secret
diff --git a/MSH-PIC/hadoop/etc/hadoop/httpfs-site.xml b/MSH-PIC/hadoop/etc/hadoop/httpfs-site.xml
new file mode 100644
index 0000000..4a718e1
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/httpfs-site.xml
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration>
+
+</configuration>
diff --git a/MSH-PIC/hadoop/etc/hadoop/kms-acls.xml b/MSH-PIC/hadoop/etc/hadoop/kms-acls.xml
new file mode 100644
index 0000000..cba69f4
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/kms-acls.xml
@@ -0,0 +1,135 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration>
+
+ <!-- This file is hot-reloaded when it changes -->
+
+ <!-- KMS ACLs -->
+
+ <property>
+ <name>hadoop.kms.acl.CREATE</name>
+ <value>*</value>
+ <description>
+ ACL for create-key operations.
+ If the user is not in the GET ACL, the key material is not returned
+ as part of the response.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.acl.DELETE</name>
+ <value>*</value>
+ <description>
+ ACL for delete-key operations.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.acl.ROLLOVER</name>
+ <value>*</value>
+ <description>
+ ACL for rollover-key operations.
+ If the user is not in the GET ACL, the key material is not returned
+ as part of the response.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.acl.GET</name>
+ <value>*</value>
+ <description>
+ ACL for get-key-version and get-current-key operations.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.acl.GET_KEYS</name>
+ <value>*</value>
+ <description>
+ ACL for get-keys operations.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.acl.GET_METADATA</name>
+ <value>*</value>
+ <description>
+ ACL for get-key-metadata and get-keys-metadata operations.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.acl.SET_KEY_MATERIAL</name>
+ <value>*</value>
+ <description>
+ Complementary ACL for CREATE and ROLLOVER operations to allow the client
+ to provide the key material when creating or rolling a key.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.acl.GENERATE_EEK</name>
+ <value>*</value>
+ <description>
+ ACL for generateEncryptedKey CryptoExtension operations.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.acl.DECRYPT_EEK</name>
+ <value>*</value>
+ <description>
+ ACL for decryptEncryptedKey CryptoExtension operations.
+ </description>
+ </property>
+
+ <property>
+ <name>default.key.acl.MANAGEMENT</name>
+ <value>*</value>
+ <description>
+ default ACL for MANAGEMENT operations for all key acls that are not
+ explicitly defined.
+ </description>
+ </property>
+
+ <property>
+ <name>default.key.acl.GENERATE_EEK</name>
+ <value>*</value>
+ <description>
+ default ACL for GENERATE_EEK operations for all key acls that are not
+ explicitly defined.
+ </description>
+ </property>
+
+ <property>
+ <name>default.key.acl.DECRYPT_EEK</name>
+ <value>*</value>
+ <description>
+ default ACL for DECRYPT_EEK operations for all key acls that are not
+ explicitly defined.
+ </description>
+ </property>
+
+ <property>
+ <name>default.key.acl.READ</name>
+ <value>*</value>
+ <description>
+ default ACL for READ operations for all key acls that are not
+ explicitly defined.
+ </description>
+ </property>
+
+
+</configuration>
diff --git a/MSH-PIC/hadoop/etc/hadoop/kms-env.sh b/MSH-PIC/hadoop/etc/hadoop/kms-env.sh
new file mode 100644
index 0000000..44dfe6a
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/kms-env.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# Set kms specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs KMS
+# Java System properties for KMS should be specified in this variable
+#
+# export CATALINA_OPTS=
+
+# KMS logs directory
+#
+# export KMS_LOG=${KMS_HOME}/logs
+
+# KMS temporary directory
+#
+# export KMS_TEMP=${KMS_HOME}/temp
+
+# The HTTP port used by KMS
+#
+# export KMS_HTTP_PORT=16000
+
+# The Admin port used by KMS
+#
+# export KMS_ADMIN_PORT=`expr ${KMS_HTTP_PORT} + 1`
+
+# The maximum number of Tomcat handler threads
+#
+# export KMS_MAX_THREADS=1000
+
+# The location of the SSL keystore if using SSL
+#
+# export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore
+
+# The password of the SSL keystore if using SSL
+#
+# export KMS_SSL_KEYSTORE_PASS=password
+
+# The full path to any native libraries that need to be loaded
+# (For eg. location of natively compiled tomcat Apache portable
+# runtime (APR) libraries
+#
+# export JAVA_LIBRARY_PATH=${HOME}/lib/native
diff --git a/MSH-PIC/hadoop/etc/hadoop/kms-log4j.properties b/MSH-PIC/hadoop/etc/hadoop/kms-log4j.properties
new file mode 100644
index 0000000..9c189f2
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/kms-log4j.properties
@@ -0,0 +1,38 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'kms.log.dir' is not defined at KMS start up time
+# Setup sets its value to '${kms.home}/logs'
+
+log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.kms.DatePattern='.'yyyy-MM-dd
+log4j.appender.kms.File=${kms.log.dir}/kms.log
+log4j.appender.kms.Append=true
+log4j.appender.kms.layout=org.apache.log4j.PatternLayout
+log4j.appender.kms.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ssZ} %-5p %c{1} - %m%n
+
+log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.kms-audit.DatePattern='.'yyyy-MM-dd
+log4j.appender.kms-audit.File=${kms.log.dir}/kms-audit.log
+log4j.appender.kms-audit.Append=true
+log4j.appender.kms-audit.layout=org.apache.log4j.PatternLayout
+log4j.appender.kms-audit.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ssZ} %m%n
+
+log4j.logger.kms-audit=INFO, kms-audit
+log4j.additivity.kms-audit=false
+
+log4j.rootLogger=ALL, kms
+log4j.logger.org.apache.hadoop.conf=ERROR
+log4j.logger.org.apache.hadoop=INFO
+log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF
diff --git a/MSH-PIC/hadoop/etc/hadoop/kms-site.xml b/MSH-PIC/hadoop/etc/hadoop/kms-site.xml
new file mode 100644
index 0000000..a810ca4
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/kms-site.xml
@@ -0,0 +1,173 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration>
+
+ <!-- KMS Backend KeyProvider -->
+
+ <property>
+ <name>hadoop.kms.key.provider.uri</name>
+ <value>jceks://file@/${user.home}/kms.keystore</value>
+ <description>
+ URI of the backing KeyProvider for the KMS.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.security.keystore.JavaKeyStoreProvider.password</name>
+ <value>none</value>
+ <description>
+ If using the JavaKeyStoreProvider, the password for the keystore file.
+ </description>
+ </property>
+
+ <!-- KMS Cache -->
+
+ <property>
+ <name>hadoop.kms.cache.enable</name>
+ <value>true</value>
+ <description>
+ Whether the KMS will act as a cache for the backing KeyProvider.
+ When the cache is enabled, operations like getKeyVersion, getMetadata,
+ and getCurrentKey will sometimes return cached data without consulting
+ the backing KeyProvider. Cached values are flushed when keys are deleted
+ or modified.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.cache.timeout.ms</name>
+ <value>600000</value>
+ <description>
+ Expiry time for the KMS key version and key metadata cache, in
+ milliseconds. This affects getKeyVersion and getMetadata.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.current.key.cache.timeout.ms</name>
+ <value>30000</value>
+ <description>
+ Expiry time for the KMS current key cache, in milliseconds. This
+ affects getCurrentKey operations.
+ </description>
+ </property>
+
+ <!-- KMS Audit -->
+
+ <property>
+ <name>hadoop.kms.audit.aggregation.window.ms</name>
+ <value>10000</value>
+ <description>
+ Duplicate audit log events within the aggregation window (specified in
+ ms) are quashed to reduce log traffic. A single message for aggregated
+ events is printed at the end of the window, along with a count of the
+ number of aggregated events.
+ </description>
+ </property>
+
+ <!-- KMS Security -->
+
+ <property>
+ <name>hadoop.kms.authentication.type</name>
+ <value>simple</value>
+ <description>
+ Authentication type for the KMS. Can be either &quot;simple&quot;
+ or &quot;kerberos&quot;.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.authentication.kerberos.keytab</name>
+ <value>${user.home}/kms.keytab</value>
+ <description>
+ Path to the keytab with credentials for the configured Kerberos principal.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.authentication.kerberos.principal</name>
+ <value>HTTP/localhost</value>
+ <description>
+ The Kerberos principal to use for the HTTP endpoint.
+ The principal must start with 'HTTP/' as per the Kerberos HTTP SPNEGO specification.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.authentication.kerberos.name.rules</name>
+ <value>DEFAULT</value>
+ <description>
+ Rules used to resolve Kerberos principal names.
+ </description>
+ </property>
+
+ <!-- Authentication cookie signature source -->
+
+ <property>
+ <name>hadoop.kms.authentication.signer.secret.provider</name>
+ <value>random</value>
+ <description>
+ Indicates how the secret to sign the authentication cookies will be
+ stored. Options are 'random' (default), 'string' and 'zookeeper'.
+ If using a setup with multiple KMS instances, 'zookeeper' should be used.
+ </description>
+ </property>
+
+ <!-- Configuration for 'zookeeper' authentication cookie signature source -->
+
+ <property>
+ <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.path</name>
+ <value>/hadoop-kms/hadoop-auth-signature-secret</value>
+ <description>
+ The Zookeeper ZNode path where the KMS instances will store and retrieve
+ the secret from.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string</name>
+ <value>#HOSTNAME#:#PORT#,...</value>
+ <description>
+ The Zookeeper connection string, a list of hostnames and port comma
+ separated.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type</name>
+ <value>kerberos</value>
+ <description>
+ The Zookeeper authentication type, 'none' or 'sasl' (Kerberos).
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab</name>
+ <value>/etc/hadoop/conf/kms.keytab</value>
+ <description>
+ The absolute path for the Kerberos keytab with the credentials to
+ connect to Zookeeper.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal</name>
+ <value>kms/#HOSTNAME#</value>
+ <description>
+ The Kerberos service principal used to connect to Zookeeper.
+ </description>
+ </property>
+
+</configuration>
diff --git a/MSH-PIC/hadoop/etc/hadoop/log4j.properties b/MSH-PIC/hadoop/etc/hadoop/log4j.properties
new file mode 100644
index 0000000..a7c7a9a
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/log4j.properties
@@ -0,0 +1,268 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=ERROR,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshold=ALL
+
+# Null Appender
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Rolling File Appender - cap space usage at 5gb.
+#
+hadoop.log.maxfilesize=256MB
+hadoop.log.maxbackupindex=20
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.RFA.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.RFA.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollover at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %c: %m%n
+
+#
+# HDFS block state change log from block manager
+#
+# Uncomment the following to suppress normal block state change
+# messages from BlockManager in NameNode.
+#log4j.logger.BlockStateChange=WARN
+
+#
+#Security appender
+#
+hadoop.security.logger=INFO,NullAppender
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth-${user.name}.audit
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# Daily Rolling Security appender
+#
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+#
+# hadoop configuration logging
+#
+
+# Uncomment the following line to turn off configuration deprecation warnings.
+# log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,NullAppender
+hdfs.audit.log.maxfilesize=256MB
+hdfs.audit.log.maxbackupindex=20
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAAUDIT.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %c{2}: %m%n
+log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
+log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,NullAppender
+mapred.audit.log.maxfilesize=256MB
+mapred.audit.log.maxbackupindex=20
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %c{2}: %m%n
+log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
+log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}
+
+# Custom Logging levels
+
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+# AWS SDK & S3A FileSystem
+log4j.logger.com.amazonaws=ERROR
+log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
+log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+#
+# Job Summary Appender
+#
+# Use following logger to send summary to separate file defined by
+# hadoop.mapreduce.jobsummary.log.file :
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+#
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
+hadoop.mapreduce.jobsummary.log.maxbackupindex=20
+log4j.appender.JSA=org.apache.log4j.RollingFileAppender
+log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
+log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
+log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %c{2}: %m%n
+log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
+log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
+
+#
+# Yarn ResourceManager Application Summary Log
+#
+# Set the ResourceManager summary log filename
+yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
+# Set the ResourceManager summary log level and appender
+yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
+#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+
+# To enable AppSummaryLogging for the RM,
+# set yarn.server.resourcemanager.appsummary.logger to
+# <LEVEL>,RMSUMMARY in hadoop-env.sh
+
+# Appender for ResourceManager Application Summary Log
+# Requires the following properties to be set
+# - hadoop.log.dir (Hadoop Log directory)
+# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
+log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+log4j.appender.RMSUMMARY.MaxFileSize=256MB
+log4j.appender.RMSUMMARY.MaxBackupIndex=20
+log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+log4j.appender.RMSUMMARY.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %c{2}: %m%n
+
+# HS audit log configs
+#mapreduce.hs.audit.logger=INFO,HSAUDIT
+#log4j.logger.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=${mapreduce.hs.audit.logger}
+#log4j.additivity.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=false
+#log4j.appender.HSAUDIT=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.HSAUDIT.File=${hadoop.log.dir}/hs-audit.log
+#log4j.appender.HSAUDIT.layout=org.apache.log4j.PatternLayout
+#log4j.appender.HSAUDIT.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %c{2}: %m%n
+#log4j.appender.HSAUDIT.DatePattern=.yyyy-MM-dd
+
+# Http Server Request Logs
+#log4j.logger.http.requests.namenode=INFO,namenoderequestlog
+#log4j.appender.namenoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.namenoderequestlog.Filename=${hadoop.log.dir}/jetty-namenode-yyyy_mm_dd.log
+#log4j.appender.namenoderequestlog.RetainDays=3
+
+#log4j.logger.http.requests.datanode=INFO,datanoderequestlog
+#log4j.appender.datanoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.datanoderequestlog.Filename=${hadoop.log.dir}/jetty-datanode-yyyy_mm_dd.log
+#log4j.appender.datanoderequestlog.RetainDays=3
+
+#log4j.logger.http.requests.resourcemanager=INFO,resourcemanagerrequestlog
+#log4j.appender.resourcemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.resourcemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-resourcemanager-yyyy_mm_dd.log
+#log4j.appender.resourcemanagerrequestlog.RetainDays=3
+
+#log4j.logger.http.requests.jobhistory=INFO,jobhistoryrequestlog
+#log4j.appender.jobhistoryrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.jobhistoryrequestlog.Filename=${hadoop.log.dir}/jetty-jobhistory-yyyy_mm_dd.log
+#log4j.appender.jobhistoryrequestlog.RetainDays=3
+
+#log4j.logger.http.requests.nodemanager=INFO,nodemanagerrequestlog
+#log4j.appender.nodemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-yyyy_mm_dd.log
+#log4j.appender.nodemanagerrequestlog.RetainDays=3
diff --git a/MSH-PIC/hadoop/etc/hadoop/log4j.properties_bak b/MSH-PIC/hadoop/etc/hadoop/log4j.properties_bak
new file mode 100644
index 0000000..7e2ffc7
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/log4j.properties_bak
@@ -0,0 +1,268 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshold=ALL
+
+# Null Appender
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Rolling File Appender - cap space usage at 5gb.
+#
+hadoop.log.maxfilesize=256MB
+hadoop.log.maxbackupindex=20
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollover at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# HDFS block state change log from block manager
+#
+# Uncomment the following to suppress normal block state change
+# messages from BlockManager in NameNode.
+#log4j.logger.BlockStateChange=WARN
+
+#
+#Security appender
+#
+hadoop.security.logger=INFO,NullAppender
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth-${user.name}.audit
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# Daily Rolling Security appender
+#
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+#
+# hadoop configuration logging
+#
+
+# Uncomment the following line to turn off configuration deprecation warnings.
+# log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,NullAppender
+hdfs.audit.log.maxfilesize=256MB
+hdfs.audit.log.maxbackupindex=20
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
+log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,NullAppender
+mapred.audit.log.maxfilesize=256MB
+mapred.audit.log.maxbackupindex=20
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
+log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}
+
+# Custom Logging levels
+
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+# AWS SDK & S3A FileSystem
+log4j.logger.com.amazonaws=ERROR
+log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
+log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+#
+# Job Summary Appender
+#
+# Use following logger to send summary to separate file defined by
+# hadoop.mapreduce.jobsummary.log.file :
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+#
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
+hadoop.mapreduce.jobsummary.log.maxbackupindex=20
+log4j.appender.JSA=org.apache.log4j.RollingFileAppender
+log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
+log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
+log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ssZ} %p %c{2}: %m%n
+log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
+log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
+
+#
+# Yarn ResourceManager Application Summary Log
+#
+# Set the ResourceManager summary log filename
+yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
+# Set the ResourceManager summary log level and appender
+yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
+#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+
+# To enable AppSummaryLogging for the RM,
+# set yarn.server.resourcemanager.appsummary.logger to
+# <LEVEL>,RMSUMMARY in hadoop-env.sh
+
+# Appender for ResourceManager Application Summary Log
+# Requires the following properties to be set
+# - hadoop.log.dir (Hadoop Log directory)
+# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
+log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+log4j.appender.RMSUMMARY.MaxFileSize=256MB
+log4j.appender.RMSUMMARY.MaxBackupIndex=20
+log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+
+# HS audit log configs
+#mapreduce.hs.audit.logger=INFO,HSAUDIT
+#log4j.logger.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=${mapreduce.hs.audit.logger}
+#log4j.additivity.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=false
+#log4j.appender.HSAUDIT=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.HSAUDIT.File=${hadoop.log.dir}/hs-audit.log
+#log4j.appender.HSAUDIT.layout=org.apache.log4j.PatternLayout
+#log4j.appender.HSAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+#log4j.appender.HSAUDIT.DatePattern=.yyyy-MM-dd
+
+# Http Server Request Logs
+#log4j.logger.http.requests.namenode=INFO,namenoderequestlog
+#log4j.appender.namenoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.namenoderequestlog.Filename=${hadoop.log.dir}/jetty-namenode-yyyy_mm_dd.log
+#log4j.appender.namenoderequestlog.RetainDays=3
+
+#log4j.logger.http.requests.datanode=INFO,datanoderequestlog
+#log4j.appender.datanoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.datanoderequestlog.Filename=${hadoop.log.dir}/jetty-datanode-yyyy_mm_dd.log
+#log4j.appender.datanoderequestlog.RetainDays=3
+
+#log4j.logger.http.requests.resourcemanager=INFO,resourcemanagerrequestlog
+#log4j.appender.resourcemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.resourcemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-resourcemanager-yyyy_mm_dd.log
+#log4j.appender.resourcemanagerrequestlog.RetainDays=3
+
+#log4j.logger.http.requests.jobhistory=INFO,jobhistoryrequestlog
+#log4j.appender.jobhistoryrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.jobhistoryrequestlog.Filename=${hadoop.log.dir}/jetty-jobhistory-yyyy_mm_dd.log
+#log4j.appender.jobhistoryrequestlog.RetainDays=3
+
+#log4j.logger.http.requests.nodemanager=INFO,nodemanagerrequestlog
+#log4j.appender.nodemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-yyyy_mm_dd.log
+#log4j.appender.nodemanagerrequestlog.RetainDays=3
diff --git a/MSH-PIC/hadoop/etc/hadoop/mapred-env.cmd b/MSH-PIC/hadoop/etc/hadoop/mapred-env.cmd
new file mode 100644
index 0000000..f27943f
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/mapred-env.cmd
@@ -0,0 +1,20 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000
+
+set HADOOP_MAPRED_ROOT_LOGGER=%HADOOP_LOGLEVEL%,RFA
+
diff --git a/MSH-PIC/hadoop/etc/hadoop/mapred-env.sh b/MSH-PIC/hadoop/etc/hadoop/mapred-env.sh
new file mode 100644
index 0000000..6be1e27
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/mapred-env.sh
@@ -0,0 +1,27 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+
+export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000
+
+export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
+
+#export HADOOP_JOB_HISTORYSERVER_OPTS=
+#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored. $HADOOP_MAPRED_HOME/logs by default.
+#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
+#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
+#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
+#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.
diff --git a/MSH-PIC/hadoop/etc/hadoop/mapred-queues.xml.template b/MSH-PIC/hadoop/etc/hadoop/mapred-queues.xml.template
new file mode 100644
index 0000000..ce6cd20
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/mapred-queues.xml.template
@@ -0,0 +1,92 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- This is the template for queue configuration. The format supports nesting of
+ queues within queues - a feature called hierarchical queues. All queues are
+ defined within the 'queues' tag which is the top level element for this
+ XML document. The queue acls configured here for different queues are
+ checked for authorization only if the configuration property
+ mapreduce.cluster.acls.enabled is set to true. -->
+<queues>
+
+ <!-- Configuration for a queue is specified by defining a 'queue' element. -->
+ <queue>
+
+ <!-- Name of a queue. Queue name cannot contain a ':' -->
+ <name>default</name>
+
+ <!-- properties for a queue, typically used by schedulers,
+ can be defined here -->
+ <properties>
+ </properties>
+
+ <!-- State of the queue. If running, the queue will accept new jobs.
+ If stopped, the queue will not accept new jobs. -->
+ <state>running</state>
+
+ <!-- Specifies the ACLs to check for submitting jobs to this queue.
+ If set to '*', it allows all users to submit jobs to the queue.
+ If set to ' '(i.e. space), no user will be allowed to do this
+ operation. The default value for any queue acl is ' '.
+ For specifying a list of users and groups the format to use is
+ user1,user2 group1,group2
+
+ It is only used if authorization is enabled in Map/Reduce by setting
+ the configuration property mapreduce.cluster.acls.enabled to true.
+
+ Irrespective of this ACL configuration, the user who started the
+ cluster and cluster administrators configured via
+ mapreduce.cluster.administrators can do this operation. -->
+ <acl-submit-job> </acl-submit-job>
+
+ <!-- Specifies the ACLs to check for viewing and modifying jobs in this
+ queue. Modifications include killing jobs, tasks of jobs or changing
+ priorities.
+ If set to '*', it allows all users to view, modify jobs of the queue.
+ If set to ' '(i.e. space), no user will be allowed to do this
+ operation.
+ For specifying a list of users and groups the format to use is
+ user1,user2 group1,group2
+
+ It is only used if authorization is enabled in Map/Reduce by setting
+ the configuration property mapreduce.cluster.acls.enabled to true.
+
+ Irrespective of this ACL configuration, the user who started the
+ cluster and cluster administrators configured via
+ mapreduce.cluster.administrators can do the above operations on all
+ the jobs in all the queues. The job owner can do all the above
+ operations on his/her job irrespective of this ACL configuration. -->
+ <acl-administer-jobs> </acl-administer-jobs>
+ </queue>
+
+ <!-- Here is a sample of a hierarchical queue configuration
+ where q2 is a child of q1. In this example, q2 is a leaf level
+ queue as it has no queues configured within it. Currently, ACLs
+ and state are only supported for the leaf level queues.
+ Note also the usage of properties for the queue q2.
+ <queue>
+ <name>q1</name>
+ <queue>
+ <name>q2</name>
+ <properties>
+ <property key="capacity" value="20"/>
+ <property key="user-limit" value="30"/>
+ </properties>
+ </queue>
+ </queue>
+ -->
+</queues>
diff --git a/MSH-PIC/hadoop/etc/hadoop/mapred-site.xml b/MSH-PIC/hadoop/etc/hadoop/mapred-site.xml
new file mode 100644
index 0000000..3aa03c8
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/mapred-site.xml
@@ -0,0 +1,33 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>mapreduce.framework.name</name>
+ <value>yarn</value>
+ </property>
+ <property>
+ <name>mapreduce.jobhistory.address</name>
+ <value>192.168.20.193:10020</value>
+ </property>
+ <property>
+ <name>mapreduce.jobhistory.webapp.address</name>
+ <value>192.168.20.193:19888</value>
+ </property>
+</configuration>
+
diff --git a/MSH-PIC/hadoop/etc/hadoop/slaves b/MSH-PIC/hadoop/etc/hadoop/slaves
new file mode 100644
index 0000000..93932e8
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/slaves
@@ -0,0 +1,6 @@
+192.168.20.193
+192.168.20.194
+192.168.20.195
+192.168.20.193
+192.168.20.194
+192.168.20.195
diff --git a/MSH-PIC/hadoop/etc/hadoop/ssl-client.xml.example b/MSH-PIC/hadoop/etc/hadoop/ssl-client.xml.example
new file mode 100644
index 0000000..a50dce4
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/ssl-client.xml.example
@@ -0,0 +1,80 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration>
+
+<property>
+ <name>ssl.client.truststore.location</name>
+ <value></value>
+ <description>Truststore to be used by clients like distcp. Must be
+ specified.
+ </description>
+</property>
+
+<property>
+ <name>ssl.client.truststore.password</name>
+ <value></value>
+ <description>Optional. Default value is "".
+ </description>
+</property>
+
+<property>
+ <name>ssl.client.truststore.type</name>
+ <value>jks</value>
+ <description>Optional. The keystore file format, default value is "jks".
+ </description>
+</property>
+
+<property>
+ <name>ssl.client.truststore.reload.interval</name>
+ <value>10000</value>
+ <description>Truststore reload check interval, in milliseconds.
+ Default value is 10000 (10 seconds).
+ </description>
+</property>
+
+<property>
+ <name>ssl.client.keystore.location</name>
+ <value></value>
+ <description>Keystore to be used by clients like distcp. Must be
+ specified.
+ </description>
+</property>
+
+<property>
+ <name>ssl.client.keystore.password</name>
+ <value></value>
+ <description>Optional. Default value is "".
+ </description>
+</property>
+
+<property>
+ <name>ssl.client.keystore.keypassword</name>
+ <value></value>
+ <description>Optional. Default value is "".
+ </description>
+</property>
+
+<property>
+ <name>ssl.client.keystore.type</name>
+ <value>jks</value>
+ <description>Optional. The keystore file format, default value is "jks".
+ </description>
+</property>
+
+</configuration>
diff --git a/MSH-PIC/hadoop/etc/hadoop/ssl-server.xml.example b/MSH-PIC/hadoop/etc/hadoop/ssl-server.xml.example
new file mode 100644
index 0000000..02d300c
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/ssl-server.xml.example
@@ -0,0 +1,78 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration>
+
+<property>
+ <name>ssl.server.truststore.location</name>
+ <value></value>
+ <description>Truststore to be used by NN and DN. Must be specified.
+ </description>
+</property>
+
+<property>
+ <name>ssl.server.truststore.password</name>
+ <value></value>
+ <description>Optional. Default value is "".
+ </description>
+</property>
+
+<property>
+ <name>ssl.server.truststore.type</name>
+ <value>jks</value>
+ <description>Optional. The keystore file format, default value is "jks".
+ </description>
+</property>
+
+<property>
+ <name>ssl.server.truststore.reload.interval</name>
+ <value>10000</value>
+ <description>Truststore reload check interval, in milliseconds.
+ Default value is 10000 (10 seconds).
+ </description>
+</property>
+
+<property>
+ <name>ssl.server.keystore.location</name>
+ <value></value>
+ <description>Keystore to be used by NN and DN. Must be specified.
+ </description>
+</property>
+
+<property>
+ <name>ssl.server.keystore.password</name>
+ <value></value>
+ <description>Must be specified.
+ </description>
+</property>
+
+<property>
+ <name>ssl.server.keystore.keypassword</name>
+ <value></value>
+ <description>Must be specified.
+ </description>
+</property>
+
+<property>
+ <name>ssl.server.keystore.type</name>
+ <value>jks</value>
+ <description>Optional. The keystore file format, default value is "jks".
+ </description>
+</property>
+
+</configuration>
diff --git a/MSH-PIC/hadoop/etc/hadoop/yarn-env.cmd b/MSH-PIC/hadoop/etc/hadoop/yarn-env.cmd
new file mode 100644
index 0000000..d863c1e
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/yarn-env.cmd
@@ -0,0 +1,60 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem User for YARN daemons
+if not defined HADOOP_YARN_USER (
+ set HADOOP_YARN_USER=%yarn%
+)
+
+if not defined YARN_CONF_DIR (
+ set YARN_CONF_DIR=%HADOOP_YARN_HOME%\conf
+)
+
+if defined YARN_HEAPSIZE (
+ @rem echo run with Java heapsize %YARN_HEAPSIZE%
+ set JAVA_HEAP_MAX=-Xmx%YARN_HEAPSIZE%m
+)
+
+if not defined YARN_LOG_DIR (
+ set YARN_LOG_DIR=%HADOOP_YARN_HOME%\logs
+)
+
+if not defined YARN_LOGFILE (
+ set YARN_LOGFILE=yarn.log
+)
+
+@rem default policy file for service-level authorization
+if not defined YARN_POLICYFILE (
+ set YARN_POLICYFILE=hadoop-policy.xml
+)
+
+if not defined YARN_ROOT_LOGGER (
+ set YARN_ROOT_LOGGER=%HADOOP_LOGLEVEL%,console
+)
+
+set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.dir=%YARN_LOG_DIR%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.log.dir=%YARN_LOG_DIR%
+set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.file=%YARN_LOGFILE%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.log.file=%YARN_LOGFILE%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.home.dir=%HADOOP_YARN_HOME%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.id.str=%YARN_IDENT_STRING%
+set YARN_OPTS=%YARN_OPTS% -Dhadoop.home.dir=%HADOOP_YARN_HOME%
+set YARN_OPTS=%YARN_OPTS% -Dhadoop.root.logger=%YARN_ROOT_LOGGER%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.root.logger=%YARN_ROOT_LOGGER%
+if defined JAVA_LIBRARY_PATH (
+ set YARN_OPTS=%YARN_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH%
+)
+set YARN_OPTS=%YARN_OPTS% -Dyarn.policy.file=%YARN_POLICYFILE% \ No newline at end of file
diff --git a/MSH-PIC/hadoop/etc/hadoop/yarn-env.sh b/MSH-PIC/hadoop/etc/hadoop/yarn-env.sh
new file mode 100644
index 0000000..810ba1b
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/yarn-env.sh
@@ -0,0 +1,127 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+export YARN_RESOURCEMANAGER_JMX_OPTS="-Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false -javaagent:/home/tsg/olap/hadoop-2.7.1/monitor/jmx_prometheus_javaagent-0.12.0.jar=9909:/home/tsg/olap/hadoop-2.7.1/monitor/yarn.yaml"
+
+export YARN_NODEMANAGER_JMX_OPTS="-Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false -javaagent:/home/tsg/olap/hadoop-2.7.1/monitor/jmx_prometheus_javaagent-0.12.0.jar=9910:/home/tsg/olap/hadoop-2.7.1/monitor/yarn.yaml"
+
+# User for YARN daemons
+export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+
+# resolve links - $0 may be a softlink
+export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
+
+# some Java parameters
+export JAVA_HOME=/usr/lib/jvm/jdk1.8.0_73
+if [ "$JAVA_HOME" != "" ]; then
+ #echo "run java in $JAVA_HOME"
+ JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+ echo "Error: JAVA_HOME is not set."
+ exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# For setting YARN specific HEAP sizes please use this
+# Parameter and set appropriately
+# YARN_HEAPSIZE=1000
+
+# check envvars which might override default args
+if [ "$YARN_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+fi
+
+# Resource Manager specific parameters
+
+# Specify the max Heapsize for the ResourceManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_RESOURCEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+#export YARN_RESOURCEMANAGER_HEAPSIZE=1000
+export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Xmx2048m -Xms1024m"
+
+# Specify the max Heapsize for the timeline server using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_TIMELINESERVER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+#export YARN_TIMELINESERVER_HEAPSIZE=1000
+
+# Specify the JVM options to be used when starting the ResourceManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_RESOURCEMANAGER_OPTS=
+
+# Node Manager specific parameters
+
+# Specify the max Heapsize for the NodeManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_NODEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+#export YARN_NODEMANAGER_HEAPSIZE=1000
+
+export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Xmx2048m -Xms1024m"
+
+# Specify the JVM options to be used when starting the NodeManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_NODEMANAGER_OPTS=
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+
+# default log directory & file
+if [ "$YARN_LOG_DIR" = "" ]; then
+ YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+if [ "$YARN_LOGFILE" = "" ]; then
+ YARN_LOGFILE='yarn.log'
+fi
+
+# default policy file for service-level authorization
+if [ "$YARN_POLICYFILE" = "" ]; then
+ YARN_POLICYFILE="hadoop-policy.xml"
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
+YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+ YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
diff --git a/MSH-PIC/hadoop/etc/hadoop/yarn-site.xml b/MSH-PIC/hadoop/etc/hadoop/yarn-site.xml
new file mode 100644
index 0000000..366878b
--- /dev/null
+++ b/MSH-PIC/hadoop/etc/hadoop/yarn-site.xml
@@ -0,0 +1,224 @@
+<?xml version="1.0"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+ <property>
+ <name>yarn.nodemanager.aux-services</name>
+ <value>mapreduce_shuffle</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.enabled</name>
+ <value>true</value>
+ </property>
+
+ <!--声明两台resourcemanager的地址-->
+ <property>
+ <name>yarn.resourcemanager.cluster-id</name>
+ <value>rsmcluster</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.rm-ids</name>
+ <value>rsm1,rsm2</value>
+ </property>
+
+ <!-- 配置rm1-->
+ <!-- 配置rm1 hostname-->
+ <property>
+ <name>yarn.resourcemanager.hostname.rsm1</name>
+ <value>192.168.20.193</value>
+ </property>
+
+ <!-- 配置rm1 web application-->
+ <property>
+ <name>yarn.resourcemanager.webapp.address.rsm1</name>
+ <value>192.168.20.193:8080</value>
+ </property>
+
+ <!-- 配置rm1 调度端口,默认8030-->
+ <property>
+ <name>yarn.resourcemanager.scheduler.address.rsm1</name>
+ <value>192.168.20.193:8030</value>
+ </property>
+
+ <!-- 默认端口8031-->
+ <property>
+ <name>yarn.resourcemanager.resource-tracker.address.rsm1</name>
+ <value>192.168.20.193:8031</value>
+ </property>
+
+ <!-- 配置rm1 应用程序管理器接口的地址端口,默认8032-->
+ <property>
+ <name>yarn.resourcemanager.address.rsm1</name>
+ <value>192.168.20.193:8032</value>
+ </property>
+
+ <!-- 配置rm1 管理端口,默认8033-->
+ <property>
+ <name>yarn.resourcemanager.admin.address.rsm1</name>
+ <value>192.168.20.193:8033</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.admin.address.rsm1</name>
+ <value>192.168.20.193:23142</value>
+ </property>
+
+ <!-- 配置rm2-->
+ <property>
+ <name>yarn.resourcemanager.hostname.rsm2</name>
+ <value>192.168.20.194</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.webapp.address.rsm2</name>
+ <value>192.168.20.194:8080</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.scheduler.address.rsm2</name>
+ <value>192.168.20.194:8030</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.resource-tracker.address.rsm2</name>
+ <value>192.168.20.194:8031</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.address.rsm2</name>
+ <value>192.168.20.194:8032</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.admin.address.rsm2</name>
+ <value>192.168.20.194:8033</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.admin.address.rsm2</name>
+ <value>192.168.20.194:23142</value>
+ </property>
+
+ <!--指定zookeeper集群的地址-->
+ <property>
+ <name>yarn.resourcemanager.zk-address</name>
+ <value>192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181</value>
+ </property>
+
+ <!--启用自动恢复,当任务进行一半,rm坏掉,就要启动自动恢复,默认是false-->
+ <property>
+ <name>yarn.resourcemanager.recovery.enabled</name>
+ <value>true</value>
+ </property>
+
+ <!--启用Nodemanager自动恢复,默认是false-->
+ <property>
+ <name>yarn.nodemanager.recovery.enabled</name>
+ <value>true</value>
+ </property>
+
+ <!--配置NodeManager保存运行状态的本地文件系统目录路径 -->
+ <property>
+ <name>yarn.nodemanager.recovery.dir</name>
+ <value>/home/tsg/olap/hadoop-2.7.1/yarn</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.store.class</name>
+ <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
+ </property>
+
+ <!--配置nm可用的RPC地址,默认${yarn.nodemanager.hostname}:0,为临时端口。集群重启后,nm与rm连接的端口会变化,这里指定端口,保障nm restart功能 -->
+ <property>
+ <name>yarn.nodemanager.address</name>
+ <value>${yarn.nodemanager.hostname}:9923</value>
+ </property>
+
+ <property>
+ <name>yarn.log-aggregation-enable</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
+ <value>3600</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.remote-app-log-dir</name>
+ <value>/home/tsg/olap/hadoop-2.7.1/logs/app-logs/</value>
+ </property>
+
+ <!--NM可以为容器分配的物理内存量,以MB为单位 ,默认8192-->
+ <property>
+ <name>yarn.nodemanager.resource.memory-mb</name>
+ <value>61440</value>
+ </property>
+
+ <!-- RM上每个容器请求的最小分配,以mb为单位,默认1024-->
+ <property>
+ <name>yarn.scheduler.minimum-allocation-mb</name>
+ <value>1024</value>
+ </property>
+
+ <!-- RM上每个容器请求的最大分配,以mb为单位,一般设置为 yarn.nodemanager.resource.memory-mb 一致,默认8192-->
+ <property>
+ <name>yarn.scheduler.maximum-allocation-mb</name>
+ <value>61440</value>
+ </property>
+
+ <!--可为容器分配的vcore数。RM调度器在为容器分配资源时使用它。这不是用来限制YARN容器使用的物理内核的数量,默认8,一般配置为服务器cpu总核数一致 -->
+ <property>
+ <name>yarn.nodemanager.resource.cpu-vcores</name>
+ <value>48</value>
+ </property>
+
+ <!--RM上每个容器请求的最小分配(以虚拟CPU内核为单位) ,默认1-->
+ <property>
+ <name>yarn.scheduler.minimum-allocation-vcores</name>
+ <value>1</value>
+ </property>
+
+ <!--RM上每个容器请求的最大分配(以虚拟CPU内核为单位) ,默认32,一般配置为略小于yarn.nodemanager.resource.cpu-vcores,同时指定任务的slot不应超过该值-->
+ <property>
+ <name>yarn.scheduler.maximum-allocation-vcores</name>
+ <value>48</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.vmem-check-enabled</name>
+ <value>false</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.pmem-check-enabled</name>
+ <value>false</value>
+ </property>
+
+ <!--ApplicationMaster重启次数,配置HA后默认为2,生产环境可增大该值-->
+ <property>
+ <name>yarn.resourcemanager.am.max-attempts</name>
+ <value>10000</value>
+ </property>
+
+ <property>
+ <name>yarn.log.server.url</name>
+ <value>http://192.168.20.193:19888/jobhistory/logs</value>
+ </property>
+
+</configuration>
+
diff --git a/MSH-PIC/hadoop/sbin/dae-hdfsjournal.sh b/MSH-PIC/hadoop/sbin/dae-hdfsjournal.sh
new file mode 100644
index 0000000..4ec61f4
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/dae-hdfsjournal.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR=/home/tsg/olap
+
+VERSION=hadoop-2.7.1
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/$1
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/$1`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/$1
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_JN=`ps -ef | grep JournalNode | grep -v grep | wc -l`
+
+if [ $HAS_JN -eq "0" ];then
+ yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start journalnode > /dev/null
+ set_log jnRes_sum JournalNode
+fi
+
+sleep 60
+done
+
diff --git a/MSH-PIC/hadoop/sbin/dae-hdfsmaster.sh b/MSH-PIC/hadoop/sbin/dae-hdfsmaster.sh
new file mode 100644
index 0000000..57f6519
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/dae-hdfsmaster.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR=/home/tsg/olap
+
+VERSION=hadoop-2.7.1
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/$1
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/$1`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/$1
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_NN=`ps -ef | grep NameNode | grep -v grep | wc -l`
+HAS_ZKFC=`ps -ef | grep DFSZKFailoverController | grep -v grep | wc -l`
+#HAS_NM=`ps -ef | grep NodeManager | grep -v grep | wc -l`
+
+if [ $HAS_NN -eq "0" ];then
+ yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start namenode > /dev/null
+ set_log nnRes_sum NameNode
+fi
+
+if [ $HAS_ZKFC -eq "0" ];then
+ yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start zkfc > /dev/null
+ set_log zkfcRes_sum DFSZKFailoverController
+fi
+
+#if [ $HAS_NM -eq "0" ];then
+# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null
+# set_log nmRes_sum NodeManager
+#fi
+
+sleep 60
+done
diff --git a/MSH-PIC/hadoop/sbin/dae-hdfsworker.sh b/MSH-PIC/hadoop/sbin/dae-hdfsworker.sh
new file mode 100644
index 0000000..d504768
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/dae-hdfsworker.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR=/home/tsg/olap
+
+VERSION=hadoop-2.7.1
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/$1
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/$1`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/$1
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_DN=`ps -ef | grep DataNode | grep -v grep | wc -l`
+#HAS_NM=`ps -ef | grep NodeManager | grep -v grep | wc -l`
+
+if [ $HAS_DN -eq "0" ];then
+ yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start datanode > /dev/null
+ set_log dnRes_sum DataNode
+fi
+
+#if [ $HAS_NM -eq "0" ];then
+# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null
+# set_log nmRes_sum NodeManager
+#fi
+
+sleep 60
+done
diff --git a/MSH-PIC/hadoop/sbin/dae-yarnhistory.sh b/MSH-PIC/hadoop/sbin/dae-yarnhistory.sh
new file mode 100644
index 0000000..f732d6f
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/dae-yarnhistory.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR=/home/tsg/olap
+
+VERSION=hadoop-2.7.1
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/$1
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/$1`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/$1
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Yarn $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Yarn $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_HISTORY=`ps -ef | grep "org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer" | grep -v grep | wc -l`
+
+if [ $HAS_HISTORY -eq "0" ];then
+ $BASE_DIR/$VERSION/sbin/mr-jobhistory-daemon.sh start historyserver > /dev/null
+ set_log nmRes_sum JobHistoryServer
+fi
+
+sleep 60
+done
diff --git a/MSH-PIC/hadoop/sbin/dae-yarnmaster.sh b/MSH-PIC/hadoop/sbin/dae-yarnmaster.sh
new file mode 100644
index 0000000..0cb98c2
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/dae-yarnmaster.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR=/home/tsg/olap
+
+VERSION=hadoop-2.7.1
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/$1
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/$1`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/$1
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Yarn $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Yarn $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_RM=`ps -ef | grep "org.apache.hadoop.yarn.server.resourcemanager.ResourceManager" | grep -v grep | wc -l`
+
+if [ $HAS_RM -eq "0" ];then
+ $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start resourcemanager > /dev/null
+ set_log nmRes_sum ResourceManager
+fi
+
+sleep 60
+done
diff --git a/MSH-PIC/hadoop/sbin/dae-yarnworker.sh b/MSH-PIC/hadoop/sbin/dae-yarnworker.sh
new file mode 100644
index 0000000..a2db47f
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/dae-yarnworker.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR=/home/tsg/olap
+
+VERSION=hadoop-2.7.1
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/$1
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/$1`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/$1
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Yarn $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Yarn $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_NM=`ps -ef | grep "org.apache.hadoop.yarn.server.nodemanager.NodeManager" | grep -v grep | wc -l`
+
+if [ $HAS_NM -eq "0" ];then
+ $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null
+ set_log nmRes_sum NodeManager
+fi
+
+sleep 60
+done
diff --git a/MSH-PIC/hadoop/sbin/distribute-exclude.sh b/MSH-PIC/hadoop/sbin/distribute-exclude.sh
new file mode 100644
index 0000000..66fc14a
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/distribute-exclude.sh
@@ -0,0 +1,81 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# ------------------------------------------------------------------
+#
+# The purpose of this script is to distribute the exclude file (see
+# "dfs.hosts.exclude" in hdfs-site.xml).
+#
+# Input of the script is a local exclude file. The exclude file
+# will be distributed to all the namenodes. The location on the namenodes
+# is determined by the configuration "dfs.hosts.exclude" in hdfs-site.xml
+# (this value is read from the local copy of hdfs-site.xml and must be same
+# on all the namenodes).
+#
+# The user running this script needs write permissions on the target
+# directory on namenodes.
+#
+# After this command, run refresh-namenodes.sh so that namenodes start
+# using the new exclude file.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+
+if [ "$1" = '' ] ; then
+ "Error: please specify local exclude file as a first argument"
+ exit 1
+else
+ excludeFilenameLocal=$1
+fi
+
+if [ ! -f "$excludeFilenameLocal" ] ; then
+ echo "Error: exclude file [$excludeFilenameLocal] does not exist."
+ exit 1
+fi
+
+namenodes=$("$HADOOP_PREFIX/bin/hdfs" getconf -namenodes)
+excludeFilenameRemote=$("$HADOOP_PREFIX/bin/hdfs" getconf -excludeFile)
+
+if [ "$excludeFilenameRemote" = '' ] ; then
+ echo \
+ "Error: hdfs getconf -excludeFile returned empty string, " \
+ "please setup dfs.hosts.exclude in hdfs-site.xml in local cluster " \
+ "configuration and on all namenodes"
+ exit 1
+fi
+
+echo "Copying exclude file [$excludeFilenameRemote] to namenodes:"
+
+for namenode in $namenodes ; do
+ echo " [$namenode]"
+ scp "$excludeFilenameLocal" "$namenode:$excludeFilenameRemote"
+ if [ "$?" != '0' ] ; then errorFlag='1' ; fi
+done
+
+if [ "$errorFlag" = '1' ] ; then
+ echo "Error: transfer of exclude file failed, see error messages above."
+ exit 1
+else
+ echo "Transfer of exclude file to all namenodes succeeded."
+fi
+
+# eof
diff --git a/MSH-PIC/hadoop/sbin/hadoop-daemon.sh b/MSH-PIC/hadoop/sbin/hadoop-daemon.sh
new file mode 100644
index 0000000..6a4cd69
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/hadoop-daemon.sh
@@ -0,0 +1,214 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Runs a Hadoop command as a daemon.
+#
+# Environment Variables
+#
+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_PREFIX}/conf.
+# HADOOP_LOG_DIR Where log files are stored. PWD by default.
+# HADOOP_MASTER host:path where hadoop code should be rsync'd from
+# HADOOP_PID_DIR The pid files are stored. /tmp by default.
+# HADOOP_IDENT_STRING A string representing this instance of hadoop. $USER by default
+# HADOOP_NICENESS The scheduling priority for daemons. Defaults to 0.
+##
+
+usage="Usage: hadoop-daemon.sh [--config <conf-dir>] [--hosts hostlistfile] [--script script] (start|stop) <hadoop-command> <args...>"
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+
+# get arguments
+
+#default value
+hadoopScript="$HADOOP_PREFIX"/bin/hadoop
+if [ "--script" = "$1" ]
+ then
+ shift
+ hadoopScript=$1
+ shift
+fi
+startStop=$1
+shift
+command=$1
+shift
+
+hadoop_rotate_log ()
+{
+ log=$1;
+ num=5;
+ if [ -n "$2" ]; then
+ num=$2
+ fi
+ if [ -f "$log" ]; then # rotate logs
+ while [ $num -gt 1 ]; do
+ prev=`expr $num - 1`
+ [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num"
+ num=$prev
+ done
+ mv "$log" "$log.$num";
+ fi
+}
+
+if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
+ . "${HADOOP_CONF_DIR}/hadoop-env.sh"
+fi
+
+# Determine if we're starting a secure datanode, and if so, redefine appropriate variables
+if [ "$command" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
+ export HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
+ export HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
+ export HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
+ starting_secure_dn="true"
+fi
+
+#Determine if we're starting a privileged NFS, if so, redefine the appropriate variables
+if [ "$command" == "nfs3" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_PRIVILEGED_NFS_USER" ]; then
+ export HADOOP_PID_DIR=$HADOOP_PRIVILEGED_NFS_PID_DIR
+ export HADOOP_LOG_DIR=$HADOOP_PRIVILEGED_NFS_LOG_DIR
+ export HADOOP_IDENT_STRING=$HADOOP_PRIVILEGED_NFS_USER
+ starting_privileged_nfs="true"
+fi
+
+if [ "$HADOOP_IDENT_STRING" = "" ]; then
+ export HADOOP_IDENT_STRING="$USER"
+fi
+
+
+# get log directory
+if [ "$HADOOP_LOG_DIR" = "" ]; then
+ export HADOOP_LOG_DIR="$HADOOP_PREFIX/logs"
+fi
+
+if [ ! -w "$HADOOP_LOG_DIR" ] ; then
+ mkdir -p "$HADOOP_LOG_DIR"
+ chown $HADOOP_IDENT_STRING $HADOOP_LOG_DIR
+fi
+
+if [ "$HADOOP_PID_DIR" = "" ]; then
+ HADOOP_PID_DIR=/tmp
+fi
+
+# some variables
+export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log
+export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-"INFO,RFA"}
+export HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-"INFO,RFAS"}
+export HDFS_AUDIT_LOGGER=${HDFS_AUDIT_LOGGER:-"INFO,NullAppender"}
+log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out
+pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid
+HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5}
+
+# Set default scheduling priority
+if [ "$HADOOP_NICENESS" = "" ]; then
+ export HADOOP_NICENESS=0
+fi
+
+case $startStop in
+
+ (start)
+
+ [ -w "$HADOOP_PID_DIR" ] || mkdir -p "$HADOOP_PID_DIR"
+
+ if [ -f $pid ]; then
+ if kill -0 `cat $pid` > /dev/null 2>&1; then
+ echo $command running as process `cat $pid`. Stop it first.
+ exit 1
+ fi
+ fi
+
+ if [ "$HADOOP_MASTER" != "" ]; then
+ echo rsync from $HADOOP_MASTER
+ rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $HADOOP_MASTER/ "$HADOOP_PREFIX"
+ fi
+
+ hadoop_rotate_log $log
+ echo starting $command, logging to $log
+ cd "$HADOOP_PREFIX"
+ case $command in
+ namenode|secondarynamenode|datanode|journalnode|dfs|dfsadmin|fsck|balancer|zkfc)
+ if [ -z "$HADOOP_HDFS_HOME" ]; then
+ hdfsScript="$HADOOP_PREFIX"/bin/hdfs
+ else
+ hdfsScript="$HADOOP_HDFS_HOME"/bin/hdfs
+ fi
+ nohup nice -n $HADOOP_NICENESS $hdfsScript --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
+ ;;
+ (*)
+ nohup nice -n $HADOOP_NICENESS $hadoopScript --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
+ ;;
+ esac
+ echo $! > $pid
+ sleep 1
+ head "$log"
+ # capture the ulimit output
+ if [ "true" = "$starting_secure_dn" ]; then
+ echo "ulimit -a for secure datanode user $HADOOP_SECURE_DN_USER" >> $log
+ # capture the ulimit info for the appropriate user
+ su --shell=/bin/bash $HADOOP_SECURE_DN_USER -c 'ulimit -a' >> $log 2>&1
+ elif [ "true" = "$starting_privileged_nfs" ]; then
+ echo "ulimit -a for privileged nfs user $HADOOP_PRIVILEGED_NFS_USER" >> $log
+ su --shell=/bin/bash $HADOOP_PRIVILEGED_NFS_USER -c 'ulimit -a' >> $log 2>&1
+ else
+ echo "ulimit -a for user $USER" >> $log
+ ulimit -a >> $log 2>&1
+ fi
+ sleep 3;
+ if ! ps -p $! > /dev/null ; then
+ exit 1
+ fi
+ ;;
+
+ (stop)
+
+ if [ -f $pid ]; then
+ TARGET_PID=`cat $pid`
+ if kill -0 $TARGET_PID > /dev/null 2>&1; then
+ echo stopping $command
+ kill $TARGET_PID
+ sleep $HADOOP_STOP_TIMEOUT
+ if kill -0 $TARGET_PID > /dev/null 2>&1; then
+ echo "$command did not stop gracefully after $HADOOP_STOP_TIMEOUT seconds: killing with kill -9"
+ kill -9 $TARGET_PID
+ fi
+ else
+ echo no $command to stop
+ fi
+ rm -f $pid
+ else
+ echo no $command to stop
+ fi
+ ;;
+
+ (*)
+ echo $usage
+ exit 1
+ ;;
+
+esac
+
+
diff --git a/MSH-PIC/hadoop/sbin/hadoop-daemons.sh b/MSH-PIC/hadoop/sbin/hadoop-daemons.sh
new file mode 100644
index 0000000..181d7ac
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/hadoop-daemons.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Run a Hadoop command on all slave hosts.
+
+usage="Usage: hadoop-daemons.sh [--config confdir] [--hosts hostlistfile] [start|stop] command args..."
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+
+exec "$bin/slaves.sh" --config $HADOOP_CONF_DIR cd "$HADOOP_PREFIX" \; "$bin/hadoop-daemon.sh" --config $HADOOP_CONF_DIR "$@"
diff --git a/MSH-PIC/hadoop/sbin/hdfs-config.cmd b/MSH-PIC/hadoop/sbin/hdfs-config.cmd
new file mode 100644
index 0000000..f3aa733
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/hdfs-config.cmd
@@ -0,0 +1,43 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem included in all the hdfs scripts with source command
+@rem should not be executed directly
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+if exist %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd (
+ call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %*
+) else if exist %HADOOP_COMMON_HOME%\libexec\hadoop-config.cmd (
+ call %HADOOP_COMMON_HOME%\libexec\hadoop-config.cmd %*
+) else if exist %HADOOP_HOME%\libexec\hadoop-config.cmd (
+ call %HADOOP_HOME%\libexec\hadoop-config.cmd %*
+) else (
+ echo Hadoop common not found.
+)
+
+:eof
diff --git a/MSH-PIC/hadoop/sbin/hdfs-config.sh b/MSH-PIC/hadoop/sbin/hdfs-config.sh
new file mode 100644
index 0000000..2aabf53
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/hdfs-config.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# included in all the hdfs scripts with source command
+# should not be executed directly
+
+bin=`which "$0"`
+bin=`dirname "${bin}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+if [ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]; then
+ . ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh
+elif [ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]; then
+ . "$HADOOP_COMMON_HOME"/libexec/hadoop-config.sh
+elif [ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]; then
+ . "$HADOOP_HOME"/libexec/hadoop-config.sh
+else
+ echo "Hadoop common not found."
+ exit
+fi
diff --git a/MSH-PIC/hadoop/sbin/httpfs.sh b/MSH-PIC/hadoop/sbin/httpfs.sh
new file mode 100644
index 0000000..a593b67
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/httpfs.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# resolve links - $0 may be a softlink
+PRG="${0}"
+
+while [ -h "${PRG}" ]; do
+ ls=`ls -ld "${PRG}"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '/.*' > /dev/null; then
+ PRG="$link"
+ else
+ PRG=`dirname "${PRG}"`/"$link"
+ fi
+done
+
+BASEDIR=`dirname ${PRG}`
+BASEDIR=`cd ${BASEDIR}/..;pwd`
+
+source ${HADOOP_LIBEXEC_DIR:-${BASEDIR}/libexec}/httpfs-config.sh
+
+# The Java System property 'httpfs.http.port' it is not used by HttpFS,
+# it is used in Tomcat's server.xml configuration file
+#
+print "Using CATALINA_OPTS: ${CATALINA_OPTS}"
+
+catalina_opts="-Dhttpfs.home.dir=${HTTPFS_HOME}";
+catalina_opts="${catalina_opts} -Dhttpfs.config.dir=${HTTPFS_CONFIG}";
+catalina_opts="${catalina_opts} -Dhttpfs.log.dir=${HTTPFS_LOG}";
+catalina_opts="${catalina_opts} -Dhttpfs.temp.dir=${HTTPFS_TEMP}";
+catalina_opts="${catalina_opts} -Dhttpfs.admin.port=${HTTPFS_ADMIN_PORT}";
+catalina_opts="${catalina_opts} -Dhttpfs.http.port=${HTTPFS_HTTP_PORT}";
+catalina_opts="${catalina_opts} -Dhttpfs.http.hostname=${HTTPFS_HTTP_HOSTNAME}";
+catalina_opts="${catalina_opts} -Dhttpfs.ssl.enabled=${HTTPFS_SSL_ENABLED}";
+catalina_opts="${catalina_opts} -Dhttpfs.ssl.keystore.file=${HTTPFS_SSL_KEYSTORE_FILE}";
+catalina_opts="${catalina_opts} -Dhttpfs.ssl.keystore.pass=${HTTPFS_SSL_KEYSTORE_PASS}";
+
+print "Adding to CATALINA_OPTS: ${catalina_opts}"
+
+export CATALINA_OPTS="${CATALINA_OPTS} ${catalina_opts}"
+
+# A bug in catalina.sh script does not use CATALINA_OPTS for stopping the server
+#
+if [ "${1}" = "stop" ]; then
+ export JAVA_OPTS=${CATALINA_OPTS}
+fi
+
+if [ "${HTTPFS_SILENT}" != "true" ]; then
+ exec ${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@"
+else
+ exec ${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@" > /dev/null
+fi
+
diff --git a/MSH-PIC/hadoop/sbin/kms.sh b/MSH-PIC/hadoop/sbin/kms.sh
new file mode 100644
index 0000000..f6ef6a5
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/kms.sh
@@ -0,0 +1,83 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# resolve links - $0 may be a softlink
+PRG="${0}"
+
+while [ -h "${PRG}" ]; do
+ ls=`ls -ld "${PRG}"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '/.*' > /dev/null; then
+ PRG="$link"
+ else
+ PRG=`dirname "${PRG}"`/"$link"
+ fi
+done
+
+BASEDIR=`dirname ${PRG}`
+BASEDIR=`cd ${BASEDIR}/..;pwd`
+
+KMS_SILENT=${KMS_SILENT:-true}
+
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-${BASEDIR}/libexec}"
+source ${HADOOP_LIBEXEC_DIR}/kms-config.sh
+
+
+if [ "x$JAVA_LIBRARY_PATH" = "x" ]; then
+ JAVA_LIBRARY_PATH="${HADOOP_LIBEXEC_DIR}/../lib/native/"
+else
+ JAVA_LIBRARY_PATH="${HADOOP_LIBEXEC_DIR}/../lib/native/:${JAVA_LIBRARY_PATH}"
+fi
+
+# The Java System property 'kms.http.port' it is not used by Kms,
+# it is used in Tomcat's server.xml configuration file
+#
+
+# Mask the trustStorePassword
+KMS_SSL_TRUSTSTORE_PASS=`echo $CATALINA_OPTS | grep -o 'trustStorePassword=[^ ]*' | awk -F'=' '{print $2}'`
+CATALINA_OPTS_DISP=`echo ${CATALINA_OPTS} | sed -e 's/trustStorePassword=[^ ]*/trustStorePassword=***/'`
+print "Using CATALINA_OPTS: ${CATALINA_OPTS_DISP}"
+
+catalina_opts="-Dkms.home.dir=${KMS_HOME}";
+catalina_opts="${catalina_opts} -Dkms.config.dir=${KMS_CONFIG}";
+catalina_opts="${catalina_opts} -Dkms.log.dir=${KMS_LOG}";
+catalina_opts="${catalina_opts} -Dkms.temp.dir=${KMS_TEMP}";
+catalina_opts="${catalina_opts} -Dkms.admin.port=${KMS_ADMIN_PORT}";
+catalina_opts="${catalina_opts} -Dkms.http.port=${KMS_HTTP_PORT}";
+catalina_opts="${catalina_opts} -Dkms.max.threads=${KMS_MAX_THREADS}";
+catalina_opts="${catalina_opts} -Dkms.ssl.keystore.file=${KMS_SSL_KEYSTORE_FILE}";
+catalina_opts="${catalina_opts} -Djava.library.path=${JAVA_LIBRARY_PATH}";
+
+print "Adding to CATALINA_OPTS: ${catalina_opts}"
+print "Found KMS_SSL_KEYSTORE_PASS: `echo ${KMS_SSL_KEYSTORE_PASS} | sed 's/./*/g'`"
+
+export CATALINA_OPTS="${CATALINA_OPTS} ${catalina_opts}"
+
+# A bug in catalina.sh script does not use CATALINA_OPTS for stopping the server
+#
+if [ "${1}" = "stop" ]; then
+ export JAVA_OPTS=${CATALINA_OPTS}
+fi
+
+# If ssl, the populate the passwords into ssl-server.xml before starting tomcat
+if [ ! "${KMS_SSL_KEYSTORE_PASS}" = "" ] || [ ! "${KMS_SSL_TRUSTSTORE_PASS}" = "" ]; then
+ # Set a KEYSTORE_PASS if not already set
+ KMS_SSL_KEYSTORE_PASS=${KMS_SSL_KEYSTORE_PASS:-password}
+ cat ${CATALINA_BASE}/conf/ssl-server.xml.conf \
+ | sed 's/_kms_ssl_keystore_pass_/'${KMS_SSL_KEYSTORE_PASS}'/g' \
+ | sed 's/_kms_ssl_truststore_pass_/'${KMS_SSL_TRUSTSTORE_PASS}'/g' > ${CATALINA_BASE}/conf/ssl-server.xml
+fi
+
+exec ${KMS_CATALINA_HOME}/bin/catalina.sh "$@"
diff --git a/MSH-PIC/hadoop/sbin/mr-jobhistory-daemon.sh b/MSH-PIC/hadoop/sbin/mr-jobhistory-daemon.sh
new file mode 100644
index 0000000..7585c9a
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/mr-jobhistory-daemon.sh
@@ -0,0 +1,147 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+#
+# Environment Variables
+#
+# HADOOP_JHS_LOGGER Hadoop JobSummary logger.
+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_MAPRED_HOME}/conf.
+# HADOOP_MAPRED_PID_DIR The pid files are stored. /tmp by default.
+# HADOOP_MAPRED_NICENESS The scheduling priority for daemons. Defaults to 0.
+##
+
+usage="Usage: mr-jobhistory-daemon.sh [--config <conf-dir>] (start|stop) <mapred-command> "
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+if [ -e ${HADOOP_LIBEXEC_DIR}/mapred-config.sh ]; then
+ . $HADOOP_LIBEXEC_DIR/mapred-config.sh
+fi
+
+# get arguments
+startStop=$1
+shift
+command=$1
+shift
+
+hadoop_rotate_log ()
+{
+ log=$1;
+ num=5;
+ if [ -n "$2" ]; then
+ num=$2
+ fi
+ if [ -f "$log" ]; then # rotate logs
+ while [ $num -gt 1 ]; do
+ prev=`expr $num - 1`
+ [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num"
+ num=$prev
+ done
+ mv "$log" "$log.$num";
+ fi
+}
+
+if [ "$HADOOP_MAPRED_IDENT_STRING" = "" ]; then
+ export HADOOP_MAPRED_IDENT_STRING="$USER"
+fi
+
+export HADOOP_MAPRED_HOME=${HADOOP_MAPRED_HOME:-${HADOOP_PREFIX}}
+export HADOOP_MAPRED_LOGFILE=mapred-$HADOOP_MAPRED_IDENT_STRING-$command-$HOSTNAME.log
+export HADOOP_MAPRED_ROOT_LOGGER=${HADOOP_MAPRED_ROOT_LOGGER:-INFO,RFA}
+export HADOOP_JHS_LOGGER=${HADOOP_JHS_LOGGER:-INFO,JSA}
+
+if [ -f "${HADOOP_CONF_DIR}/mapred-env.sh" ]; then
+ . "${HADOOP_CONF_DIR}/mapred-env.sh"
+fi
+
+mkdir -p "$HADOOP_MAPRED_LOG_DIR"
+chown $HADOOP_MAPRED_IDENT_STRING $HADOOP_MAPRED_LOG_DIR
+
+if [ "$HADOOP_MAPRED_PID_DIR" = "" ]; then
+ HADOOP_MAPRED_PID_DIR=/tmp
+fi
+
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_MAPRED_IDENT_STRING"
+
+log=$HADOOP_MAPRED_LOG_DIR/mapred-$HADOOP_MAPRED_IDENT_STRING-$command-$HOSTNAME.out
+pid=$HADOOP_MAPRED_PID_DIR/mapred-$HADOOP_MAPRED_IDENT_STRING-$command.pid
+
+HADOOP_MAPRED_STOP_TIMEOUT=${HADOOP_MAPRED_STOP_TIMEOUT:-5}
+
+# Set default scheduling priority
+if [ "$HADOOP_MAPRED_NICENESS" = "" ]; then
+ export HADOOP_MAPRED_NICENESS=0
+fi
+
+case $startStop in
+
+ (start)
+
+ mkdir -p "$HADOOP_MAPRED_PID_DIR"
+
+ if [ -f $pid ]; then
+ if kill -0 `cat $pid` > /dev/null 2>&1; then
+ echo $command running as process `cat $pid`. Stop it first.
+ exit 1
+ fi
+ fi
+
+ hadoop_rotate_log $log
+ echo starting $command, logging to $log
+ cd "$HADOOP_MAPRED_HOME"
+ nohup nice -n $HADOOP_MAPRED_NICENESS "$HADOOP_MAPRED_HOME"/bin/mapred --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
+ echo $! > $pid
+ sleep 1; head "$log"
+ ;;
+
+ (stop)
+
+ if [ -f $pid ]; then
+ TARGET_PID=`cat $pid`
+ if kill -0 $TARGET_PID > /dev/null 2>&1; then
+ echo stopping $command
+ kill $TARGET_PID
+ sleep $HADOOP_MAPRED_STOP_TIMEOUT
+ if kill -0 $TARGET_PID > /dev/null 2>&1; then
+ echo "$command did not stop gracefully after $HADOOP_MAPRED_STOP_TIMEOUT seconds: killing with kill -9"
+ kill -9 $TARGET_PID
+ fi
+ else
+ echo no $command to stop
+ fi
+ rm -f $pid
+ else
+ echo no $command to stop
+ fi
+ ;;
+
+ (*)
+ echo $usage
+ exit 1
+ ;;
+
+esac
diff --git a/MSH-PIC/hadoop/sbin/refresh-namenodes.sh b/MSH-PIC/hadoop/sbin/refresh-namenodes.sh
new file mode 100644
index 0000000..d3f6759
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/refresh-namenodes.sh
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# ------------------------------------------------------------------
+# This script refreshes all namenodes, it's a simple wrapper
+# for dfsadmin to support multiple namenodes.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+
+namenodes=$("$HADOOP_PREFIX/bin/hdfs" getconf -nnRpcAddresses)
+if [ "$?" != '0' ] ; then errorFlag='1' ;
+else
+ for namenode in $namenodes ; do
+ echo "Refreshing namenode [$namenode]"
+ "$HADOOP_PREFIX/bin/hdfs" dfsadmin -fs hdfs://$namenode -refreshNodes
+ if [ "$?" != '0' ] ; then errorFlag='1' ; fi
+ done
+fi
+
+if [ "$errorFlag" = '1' ] ; then
+ echo "Error: refresh of namenodes failed, see error messages above."
+ exit 1
+else
+ echo "Refresh of namenodes done."
+fi
+
+
+# eof
diff --git a/MSH-PIC/hadoop/sbin/slaves.sh b/MSH-PIC/hadoop/sbin/slaves.sh
new file mode 100644
index 0000000..016392f
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/slaves.sh
@@ -0,0 +1,67 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Run a shell command on all slave hosts.
+#
+# Environment Variables
+#
+# HADOOP_SLAVES File naming remote hosts.
+# Default is ${HADOOP_CONF_DIR}/slaves.
+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_PREFIX}/conf.
+# HADOOP_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
+# HADOOP_SSH_OPTS Options passed to ssh when running remote commands.
+##
+
+usage="Usage: slaves.sh [--config confdir] command..."
+
+# if no args specified, show usage
+if [ $# -le 0 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+
+if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
+ . "${HADOOP_CONF_DIR}/hadoop-env.sh"
+fi
+
+# Where to start the script, see hadoop-config.sh
+# (it set up the variables based on command line options)
+if [ "$HADOOP_SLAVE_NAMES" != '' ] ; then
+ SLAVE_NAMES=$HADOOP_SLAVE_NAMES
+else
+ SLAVE_FILE=${HADOOP_SLAVES:-${HADOOP_CONF_DIR}/slaves}
+ SLAVE_NAMES=$(cat "$SLAVE_FILE" | sed 's/#.*$//;/^$/d')
+fi
+
+# start the daemons
+for slave in $SLAVE_NAMES ; do
+ ssh $HADOOP_SSH_OPTS $slave $"${@// /\\ }" \
+ 2>&1 | sed "s/^/$slave: /" &
+ if [ "$HADOOP_SLAVE_SLEEP" != "" ]; then
+ sleep $HADOOP_SLAVE_SLEEP
+ fi
+done
+
+wait
diff --git a/MSH-PIC/hadoop/sbin/start-all.cmd b/MSH-PIC/hadoop/sbin/start-all.cmd
new file mode 100644
index 0000000..9f65b5d
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/start-all.cmd
@@ -0,0 +1,52 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+setlocal enabledelayedexpansion
+
+@rem Start all hadoop daemons. Run this on master node.
+
+echo This script is Deprecated. Instead use start-dfs.cmd and start-yarn.cmd
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+@rem start hdfs daemons if hdfs is present
+if exist %HADOOP_HDFS_HOME%\sbin\start-dfs.cmd (
+ call %HADOOP_HDFS_HOME%\sbin\start-dfs.cmd --config %HADOOP_CONF_DIR%
+)
+
+@rem start yarn daemons if yarn is present
+if exist %HADOOP_YARN_HOME%\sbin\start-yarn.cmd (
+ call %HADOOP_YARN_HOME%\sbin\start-yarn.cmd --config %HADOOP_CONF_DIR%
+)
+
+endlocal
diff --git a/MSH-PIC/hadoop/sbin/start-all.sh b/MSH-PIC/hadoop/sbin/start-all.sh
new file mode 100644
index 0000000..3124328
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/start-all.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Start all hadoop daemons. Run this on master node.
+
+echo "This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh"
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+
+# start hdfs daemons if hdfs is present
+if [ -f "${HADOOP_HDFS_HOME}"/sbin/start-dfs.sh ]; then
+ "${HADOOP_HDFS_HOME}"/sbin/start-dfs.sh --config $HADOOP_CONF_DIR
+fi
+
+# start yarn daemons if yarn is present
+if [ -f "${HADOOP_YARN_HOME}"/sbin/start-yarn.sh ]; then
+ "${HADOOP_YARN_HOME}"/sbin/start-yarn.sh --config $HADOOP_CONF_DIR
+fi
diff --git a/MSH-PIC/hadoop/sbin/start-balancer.sh b/MSH-PIC/hadoop/sbin/start-balancer.sh
new file mode 100644
index 0000000..2c14a59
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/start-balancer.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+
+# Start balancer daemon.
+
+"$HADOOP_PREFIX"/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start balancer $@
diff --git a/MSH-PIC/hadoop/sbin/start-dfs.cmd b/MSH-PIC/hadoop/sbin/start-dfs.cmd
new file mode 100644
index 0000000..9f20e5a
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/start-dfs.cmd
@@ -0,0 +1,41 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+setlocal enabledelayedexpansion
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\hdfs-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+start "Apache Hadoop Distribution" hadoop namenode
+start "Apache Hadoop Distribution" hadoop datanode
+
+endlocal
diff --git a/MSH-PIC/hadoop/sbin/start-dfs.sh b/MSH-PIC/hadoop/sbin/start-dfs.sh
new file mode 100644
index 0000000..a8c2b98
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/start-dfs.sh
@@ -0,0 +1,118 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Start hadoop dfs daemons.
+# Optinally upgrade or rollback dfs state.
+# Run this on master node.
+
+usage="Usage: start-dfs.sh [-upgrade|-rollback] [other options such as -clusterId]"
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+
+# get arguments
+if [[ $# -ge 1 ]]; then
+ startOpt="$1"
+ shift
+ case "$startOpt" in
+ -upgrade)
+ nameStartOpt="$startOpt"
+ ;;
+ -rollback)
+ dataStartOpt="$startOpt"
+ ;;
+ *)
+ echo $usage
+ exit 1
+ ;;
+ esac
+fi
+
+#Add other possible options
+nameStartOpt="$nameStartOpt $@"
+
+#---------------------------------------------------------
+# namenodes
+
+NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -namenodes)
+
+echo "Starting namenodes on [$NAMENODES]"
+
+"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$NAMENODES" \
+ --script "$bin/hdfs" start namenode $nameStartOpt
+
+#---------------------------------------------------------
+# datanodes (using default slaves file)
+
+if [ -n "$HADOOP_SECURE_DN_USER" ]; then
+ echo \
+ "Attempting to start secure cluster, skipping datanodes. " \
+ "Run start-secure-dns.sh as root to complete startup."
+else
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --script "$bin/hdfs" start datanode $dataStartOpt
+fi
+
+#---------------------------------------------------------
+# secondary namenodes (if any)
+
+SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>/dev/null)
+
+if [ -n "$SECONDARY_NAMENODES" ]; then
+ echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
+
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$SECONDARY_NAMENODES" \
+ --script "$bin/hdfs" start secondarynamenode
+fi
+
+#---------------------------------------------------------
+# quorumjournal nodes (if any)
+
+SHARED_EDITS_DIR=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
+
+case "$SHARED_EDITS_DIR" in
+qjournal://*)
+ JOURNAL_NODES=$(echo "$SHARED_EDITS_DIR" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
+ echo "Starting journal nodes [$JOURNAL_NODES]"
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$JOURNAL_NODES" \
+ --script "$bin/hdfs" start journalnode ;;
+esac
+
+#---------------------------------------------------------
+# ZK Failover controllers, if auto-HA is enabled
+AUTOHA_ENABLED=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled)
+if [ "$(echo "$AUTOHA_ENABLED" | tr A-Z a-z)" = "true" ]; then
+ echo "Starting ZK Failover Controllers on NN hosts [$NAMENODES]"
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$NAMENODES" \
+ --script "$bin/hdfs" start zkfc
+fi
+
+# eof
diff --git a/MSH-PIC/hadoop/sbin/start-secure-dns.sh b/MSH-PIC/hadoop/sbin/start-secure-dns.sh
new file mode 100644
index 0000000..7ddf687
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/start-secure-dns.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Run as root to start secure datanodes in a security-enabled cluster.
+
+usage="Usage (run as root in order to start secure datanodes): start-secure-dns.sh"
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+
+if [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
+ "$HADOOP_PREFIX"/sbin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start datanode $dataStartOpt
+else
+ echo $usage
+fi
diff --git a/MSH-PIC/hadoop/sbin/start-yarn.cmd b/MSH-PIC/hadoop/sbin/start-yarn.cmd
new file mode 100644
index 0000000..989510b
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/start-yarn.cmd
@@ -0,0 +1,47 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+setlocal enabledelayedexpansion
+
+echo starting yarn daemons
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\yarn-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+@rem start resourceManager
+start "Apache Hadoop Distribution" yarn resourcemanager
+@rem start nodeManager
+start "Apache Hadoop Distribution" yarn nodemanager
+@rem start proxyserver
+@rem start "Apache Hadoop Distribution" yarn proxyserver
+
+endlocal
diff --git a/MSH-PIC/hadoop/sbin/start-yarn.sh b/MSH-PIC/hadoop/sbin/start-yarn.sh
new file mode 100644
index 0000000..40b77fb
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/start-yarn.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Start all yarn daemons. Run this on master node.
+
+echo "starting yarn daemons"
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/yarn-config.sh
+
+# start resourceManager
+"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager
+# start nodeManager
+"$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager
+# start proxyserver
+#"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver
diff --git a/MSH-PIC/hadoop/sbin/stop-all.cmd b/MSH-PIC/hadoop/sbin/stop-all.cmd
new file mode 100644
index 0000000..1d22c79
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/stop-all.cmd
@@ -0,0 +1,52 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+setlocal enabledelayedexpansion
+
+@rem Stop all hadoop daemons. Run this on master node.
+
+echo This script is Deprecated. Instead use stop-dfs.cmd and stop-yarn.cmd
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+@rem stop hdfs daemons if hdfs is present
+if exist %HADOOP_HDFS_HOME%\sbin\stop-dfs.cmd (
+ call %HADOOP_HDFS_HOME%\sbin\stop-dfs.cmd --config %HADOOP_CONF_DIR%
+)
+
+@rem stop yarn daemons if yarn is present
+if exist %HADOOP_YARN_HOME%\sbin\stop-yarn.cmd (
+ call %HADOOP_YARN_HOME%\sbin\stop-yarn.cmd --config %HADOOP_CONF_DIR%
+)
+
+endlocal
diff --git a/MSH-PIC/hadoop/sbin/stop-all.sh b/MSH-PIC/hadoop/sbin/stop-all.sh
new file mode 100644
index 0000000..9a2fe98
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/stop-all.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Stop all hadoop daemons. Run this on master node.
+
+echo "This script is Deprecated. Instead use stop-dfs.sh and stop-yarn.sh"
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+
+# stop hdfs daemons if hdfs is present
+if [ -f "${HADOOP_HDFS_HOME}"/sbin/stop-dfs.sh ]; then
+ "${HADOOP_HDFS_HOME}"/sbin/stop-dfs.sh --config $HADOOP_CONF_DIR
+fi
+
+# stop yarn daemons if yarn is present
+if [ -f "${HADOOP_HDFS_HOME}"/sbin/stop-yarn.sh ]; then
+ "${HADOOP_HDFS_HOME}"/sbin/stop-yarn.sh --config $HADOOP_CONF_DIR
+fi
diff --git a/MSH-PIC/hadoop/sbin/stop-balancer.sh b/MSH-PIC/hadoop/sbin/stop-balancer.sh
new file mode 100644
index 0000000..df82456
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/stop-balancer.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+
+# Stop balancer daemon.
+# Run this on the machine where the balancer is running
+
+"$HADOOP_PREFIX"/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop balancer
diff --git a/MSH-PIC/hadoop/sbin/stop-dfs.cmd b/MSH-PIC/hadoop/sbin/stop-dfs.cmd
new file mode 100644
index 0000000..f0cf015
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/stop-dfs.cmd
@@ -0,0 +1,41 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+setlocal enabledelayedexpansion
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+Taskkill /FI "WINDOWTITLE eq Apache Hadoop Distribution - hadoop namenode"
+Taskkill /FI "WINDOWTITLE eq Apache Hadoop Distribution - hadoop datanode"
+
+endlocal
diff --git a/MSH-PIC/hadoop/sbin/stop-dfs.sh b/MSH-PIC/hadoop/sbin/stop-dfs.sh
new file mode 100644
index 0000000..6a622fa
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/stop-dfs.sh
@@ -0,0 +1,89 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+
+#---------------------------------------------------------
+# namenodes
+
+NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -namenodes)
+
+echo "Stopping namenodes on [$NAMENODES]"
+
+"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$NAMENODES" \
+ --script "$bin/hdfs" stop namenode
+
+#---------------------------------------------------------
+# datanodes (using default slaves file)
+
+if [ -n "$HADOOP_SECURE_DN_USER" ]; then
+ echo \
+ "Attempting to stop secure cluster, skipping datanodes. " \
+ "Run stop-secure-dns.sh as root to complete shutdown."
+else
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --script "$bin/hdfs" stop datanode
+fi
+
+#---------------------------------------------------------
+# secondary namenodes (if any)
+
+SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>/dev/null)
+
+if [ -n "$SECONDARY_NAMENODES" ]; then
+ echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]"
+
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$SECONDARY_NAMENODES" \
+ --script "$bin/hdfs" stop secondarynamenode
+fi
+
+#---------------------------------------------------------
+# quorumjournal nodes (if any)
+
+SHARED_EDITS_DIR=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
+
+case "$SHARED_EDITS_DIR" in
+qjournal://*)
+ JOURNAL_NODES=$(echo "$SHARED_EDITS_DIR" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
+ echo "Stopping journal nodes [$JOURNAL_NODES]"
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$JOURNAL_NODES" \
+ --script "$bin/hdfs" stop journalnode ;;
+esac
+
+#---------------------------------------------------------
+# ZK Failover controllers, if auto-HA is enabled
+AUTOHA_ENABLED=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled)
+if [ "$(echo "$AUTOHA_ENABLED" | tr A-Z a-z)" = "true" ]; then
+ echo "Stopping ZK Failover Controllers on NN hosts [$NAMENODES]"
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$NAMENODES" \
+ --script "$bin/hdfs" stop zkfc
+fi
+# eof
diff --git a/MSH-PIC/hadoop/sbin/stop-secure-dns.sh b/MSH-PIC/hadoop/sbin/stop-secure-dns.sh
new file mode 100644
index 0000000..fdd47c3
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/stop-secure-dns.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Run as root to start secure datanodes in a security-enabled cluster.
+
+usage="Usage (run as root in order to stop secure datanodes): stop-secure-dns.sh"
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+
+if [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
+ "$HADOOP_PREFIX"/sbin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop datanode
+else
+ echo $usage
+fi
diff --git a/MSH-PIC/hadoop/sbin/stop-yarn.cmd b/MSH-PIC/hadoop/sbin/stop-yarn.cmd
new file mode 100644
index 0000000..0914337
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/stop-yarn.cmd
@@ -0,0 +1,47 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+setlocal enabledelayedexpansion
+
+echo stopping yarn daemons
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\yarn-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+@rem stop resourceManager
+Taskkill /FI "WINDOWTITLE eq Apache Hadoop Distribution - yarn resourcemanager"
+@rem stop nodeManager
+Taskkill /FI "WINDOWTITLE eq Apache Hadoop Distribution - yarn nodemanager"
+@rem stop proxy server
+Taskkill /FI "WINDOWTITLE eq Apache Hadoop Distribution - yarn proxyserver"
+
+endlocal
diff --git a/MSH-PIC/hadoop/sbin/stop-yarn.sh b/MSH-PIC/hadoop/sbin/stop-yarn.sh
new file mode 100644
index 0000000..a8498ef
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/stop-yarn.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Stop all yarn daemons. Run this on master node.
+
+echo "stopping yarn daemons"
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/yarn-config.sh
+
+# stop resourceManager
+"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR stop resourcemanager
+# stop nodeManager
+"$bin"/yarn-daemons.sh --config $YARN_CONF_DIR stop nodemanager
+# stop proxy server
+"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR stop proxyserver
diff --git a/MSH-PIC/hadoop/sbin/yarn-daemon.sh b/MSH-PIC/hadoop/sbin/yarn-daemon.sh
new file mode 100644
index 0000000..fbfa71d
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/yarn-daemon.sh
@@ -0,0 +1,161 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Runs a yarn command as a daemon.
+#
+# Environment Variables
+#
+# YARN_CONF_DIR Alternate conf dir. Default is ${HADOOP_YARN_HOME}/conf.
+# YARN_LOG_DIR Where log files are stored. PWD by default.
+# YARN_MASTER host:path where hadoop code should be rsync'd from
+# YARN_PID_DIR The pid files are stored. /tmp by default.
+# YARN_IDENT_STRING A string representing this instance of hadoop. $USER by default
+# YARN_NICENESS The scheduling priority for daemons. Defaults to 0.
+##
+
+usage="Usage: yarn-daemon.sh [--config <conf-dir>] [--hosts hostlistfile] (start|stop) <yarn-command> "
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/yarn-config.sh
+
+# get arguments
+startStop=$1
+shift
+command=$1
+shift
+
+hadoop_rotate_log ()
+{
+ log=$1;
+ num=5;
+ if [ -n "$2" ]; then
+ num=$2
+ fi
+ if [ -f "$log" ]; then # rotate logs
+ while [ $num -gt 1 ]; do
+ prev=`expr $num - 1`
+ [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num"
+ num=$prev
+ done
+ mv "$log" "$log.$num";
+ fi
+}
+
+if [ -f "${YARN_CONF_DIR}/yarn-env.sh" ]; then
+ . "${YARN_CONF_DIR}/yarn-env.sh"
+fi
+
+if [ "$YARN_IDENT_STRING" = "" ]; then
+ export YARN_IDENT_STRING="$USER"
+fi
+
+# get log directory
+if [ "$YARN_LOG_DIR" = "" ]; then
+ export YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+
+if [ ! -w "$YARN_LOG_DIR" ] ; then
+ mkdir -p "$YARN_LOG_DIR"
+ chown $YARN_IDENT_STRING $YARN_LOG_DIR
+fi
+
+if [ "$YARN_PID_DIR" = "" ]; then
+ YARN_PID_DIR=/tmp
+fi
+
+# some variables
+export YARN_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log
+export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,RFA}
+log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out
+pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid
+YARN_STOP_TIMEOUT=${YARN_STOP_TIMEOUT:-5}
+
+# Set default scheduling priority
+if [ "$YARN_NICENESS" = "" ]; then
+ export YARN_NICENESS=0
+fi
+
+case $startStop in
+
+ (start)
+
+ [ -w "$YARN_PID_DIR" ] || mkdir -p "$YARN_PID_DIR"
+
+ if [ -f $pid ]; then
+ if kill -0 `cat $pid` > /dev/null 2>&1; then
+ echo $command running as process `cat $pid`. Stop it first.
+ exit 1
+ fi
+ fi
+
+ if [ "$YARN_MASTER" != "" ]; then
+ echo rsync from $YARN_MASTER
+ rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $YARN_MASTER/ "$HADOOP_YARN_HOME"
+ fi
+
+ hadoop_rotate_log $log
+ echo starting $command, logging to $log
+ cd "$HADOOP_YARN_HOME"
+ nohup nice -n $YARN_NICENESS "$HADOOP_YARN_HOME"/bin/yarn --config $YARN_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
+ echo $! > $pid
+ sleep 1
+ head "$log"
+ # capture the ulimit output
+ echo "ulimit -a" >> $log
+ ulimit -a >> $log 2>&1
+ ;;
+
+ (stop)
+
+ if [ -f $pid ]; then
+ TARGET_PID=`cat $pid`
+ if kill -0 $TARGET_PID > /dev/null 2>&1; then
+ echo stopping $command
+ kill $TARGET_PID
+ sleep $YARN_STOP_TIMEOUT
+ if kill -0 $TARGET_PID > /dev/null 2>&1; then
+ echo "$command did not stop gracefully after $YARN_STOP_TIMEOUT seconds: killing with kill -9"
+ kill -9 $TARGET_PID
+ fi
+ else
+ echo no $command to stop
+ fi
+ rm -f $pid
+ else
+ echo no $command to stop
+ fi
+ ;;
+
+ (*)
+ echo $usage
+ exit 1
+ ;;
+
+esac
+
+
diff --git a/MSH-PIC/hadoop/sbin/yarn-daemons.sh b/MSH-PIC/hadoop/sbin/yarn-daemons.sh
new file mode 100644
index 0000000..a7858e4
--- /dev/null
+++ b/MSH-PIC/hadoop/sbin/yarn-daemons.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Run a Yarn command on all slave hosts.
+
+usage="Usage: yarn-daemons.sh [--config confdir] [--hosts hostlistfile] [start
+|stop] command args..."
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/yarn-config.sh
+
+exec "$bin/slaves.sh" --config $YARN_CONF_DIR cd "$HADOOP_YARN_HOME" \; "$bin/yarn-daemon.sh" --config $YARN_CONF_DIR "$@"
+
diff --git a/MSH-PIC/hbase/bin/alter-hbase-table.sh b/MSH-PIC/hbase/bin/alter-hbase-table.sh
new file mode 100644
index 0000000..a174368
--- /dev/null
+++ b/MSH-PIC/hbase/bin/alter-hbase-table.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+source /etc/profile
+
+hbase shell <<EOF
+
+disable "tsg_galaxy:job_result"
+alter "tsg_galaxy:job_result",NAME=>'detail',TTL=>'300'
+alter "tsg_galaxy:job_result",NAME=>'statistics',TTL=>'300'
+alter "tsg_galaxy:job_result",NAME=>'field_discovery',TTL=>'300'
+enable "tsg_galaxy:job_result"
+
+alter 'knowledge_base_hos_bucket',{DURABILITY => 'SYNC_WAL'}
+alter 'index_filename_knowledge_base_hos_bucket',{DURABILITY => 'SYNC_WAL'}
+alter 'index_time_knowledge_base_hos_bucket',{DURABILITY => 'SYNC_WAL'}
+alter 'index_partfile_knowledge_base_hos_bucket',{DURABILITY => 'SYNC_WAL'}
+
+EOF
+
diff --git a/MSH-PIC/hbase/bin/considerAsDead.sh b/MSH-PIC/hbase/bin/considerAsDead.sh
new file mode 100644
index 0000000..ae1b8d8
--- /dev/null
+++ b/MSH-PIC/hbase/bin/considerAsDead.sh
@@ -0,0 +1,61 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+
+usage="Usage: considerAsDead.sh --hostname serverName"
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin">/dev/null; pwd`
+
+. $bin/hbase-config.sh
+
+shift
+deadhost=$@
+
+remote_cmd="cd ${HBASE_HOME}; $bin/hbase-daemon.sh --config ${HBASE_CONF_DIR} restart"
+
+zparent=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.parent`
+if [ "$zparent" == "null" ]; then zparent="/hbase"; fi
+
+zkrs=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.rs`
+if [ "$zkrs" == "null" ]; then zkrs="rs"; fi
+
+zkrs="$zparent/$zkrs"
+online_regionservers=`$bin/hbase zkcli ls $zkrs 2>&1 | tail -1 | sed "s/\[//" | sed "s/\]//"`
+for rs in $online_regionservers
+do
+ rs_parts=(${rs//,/ })
+ hostname=${rs_parts[0]}
+ echo $deadhost
+ echo $hostname
+ if [ "$deadhost" == "$hostname" ]; then
+ znode="$zkrs/$rs"
+ echo "ZNode Deleting:" $znode
+ $bin/hbase zkcli delete $znode > /dev/null 2>&1
+ sleep 1
+ ssh $HBASE_SSH_OPTS $hostname $remote_cmd 2>&1 | sed "s/^/$hostname: /"
+ fi
+done
diff --git a/MSH-PIC/hbase/bin/create-hbase-table.sh b/MSH-PIC/hbase/bin/create-hbase-table.sh
new file mode 100644
index 0000000..b83754f
--- /dev/null
+++ b/MSH-PIC/hbase/bin/create-hbase-table.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+source /etc/profile
+
+hbase shell <<EOF
+
+create_namespace 'tsg'
+create_namespace 'dos'
+create_namespace 'tsg_galaxy'
+
+create 'tsg:report_result', {NAME => 'response', VERSIONS => 1,COMPRESSION => 'GZ',IS_MOB => true, MOB_THRESHOLD => 0}, {NAME => 'detail',COMPRESSION => 'GZ',VERSIONS => 1}
+
+create 'dos:ddos_traffic_baselines', 'TCP SYN Flood','UDP Flood','ICMP Flood','DNS Flood'
+
+create 'tsg_galaxy:relation_account_framedip', {NAME => 'radius', VERSIONS => 1,TTL=> '2592000'}, {NAME => 'common', VERSIONS => 1,TTL=> '2592000'}
+create 'tsg_galaxy:relation_framedip_account', {NAME => 'radius', VERSIONS => 1,TTL=> '2592000'}, {NAME => 'common', VERSIONS => 1,TTL=> '2592000'}
+
+create 'tsg_galaxy:recommendation_app_cip', {NAME => 'common', VERSIONS => 1}
+
+create 'tsg_galaxy:relation_user_teid',{NAME=>'gtp',TTL=> '604800'}, {NAME => 'common',TTL=> '604800'}
+create 'tsg_galaxy:gtpc_knowledge_base',{NAME => 'gtp',TTL=> '604800'}, {NAME => 'common',TTL=> '604800'},SPLITS => ['1','2','3']
+
+EOF
diff --git a/MSH-PIC/hbase/bin/create-phoenix-table.sh b/MSH-PIC/hbase/bin/create-phoenix-table.sh
new file mode 100644
index 0000000..57b8ea3
--- /dev/null
+++ b/MSH-PIC/hbase/bin/create-phoenix-table.sh
@@ -0,0 +1,394 @@
+#!/bin/bash
+
+source /etc/profile
+
+phoenix_path=/home/tsg/olap/phoenix-hbase-2.2-5.1.2-bin/bin
+
+$phoenix_path/sqlline.py<<EOF
+
+CREATE schema IF NOT EXISTS "tsg_galaxy";
+CREATE schema IF NOT EXISTS "tsg";
+
+CREATE view "tsg"."report_result"( ROWKEY VARCHAR PRIMARY KEY, "detail"."excute_sql" VARCHAR, "detail"."read_rows" UNSIGNED_LONG, "detail"."result_id" UNSIGNED_INT, "response"."result" VARCHAR);
+
+CREATE view IF NOT EXISTS "tsg_galaxy"."relation_account_framedip"(
+ROWKEY VARCHAR PRIMARY KEY,
+"common"."vsys_id" UNSIGNED_INT,
+"radius"."account" VARCHAR,
+"radius"."framed_ip" VARCHAR,
+"radius"."first_found_time" UNSIGNED_LONG,
+"radius"."last_update_time" UNSIGNED_LONG,
+"radius"."acct_status_type" UNSIGNED_INT);
+
+CREATE view "tsg_galaxy"."recommendation_app_cip"(
+ROWKEY VARCHAR PRIMARY KEY,
+"common"."app_label" VARCHAR,
+"common"."client_ip_list" VARCHAR,
+"common"."last_update_time" UNSIGNED_LONG);
+
+CREATE view IF NOT EXISTS "tsg_galaxy"."gtpc_knowledge_base"(
+ROWKEY VARCHAR PRIMARY KEY,
+"common"."vsys_id" UNSIGNED_INT,
+"gtp"."teid" UNSIGNED_LONG,
+"gtp"."uplink_teid" UNSIGNED_LONG,
+"gtp"."downlink_teid" UNSIGNED_LONG,
+"gtp"."apn" VARCHAR,
+"gtp"."phone_number" VARCHAR,
+"gtp"."imsi" VARCHAR,
+"gtp"."imei" VARCHAR,
+"gtp"."msg_type" UNSIGNED_INT,
+"gtp"."last_update_time" UNSIGNED_LONG);
+
+CREATE table IF NOT EXISTS "tsg_galaxy"."job_result"(
+ROWKEY VARCHAR PRIMARY KEY,
+"detail"."is_failed" BOOLEAN,
+"detail"."is_canceled" BOOLEAN,
+"detail"."is_done" BOOLEAN,
+"detail"."done_progress" UNSIGNED_FLOAT,
+"detail"."last_query_time" UNSIGNED_LONG,
+"detail"."duration_time" UNSIGNED_LONG,
+"detail"."count" UNSIGNED_LONG,
+"detail"."job_property" VARCHAR,
+"statistics"."result" VARCHAR,
+"field_discovery"."common_recv_time" VARCHAR,
+"field_discovery"."common_log_id" VARCHAR,
+"field_discovery"."common_policy_id" VARCHAR,
+"field_discovery"."common_subscriber_id" VARCHAR,
+"field_discovery"."common_imei" VARCHAR,
+"field_discovery"."common_imsi" VARCHAR,
+"field_discovery"."common_phone_number" VARCHAR,
+"field_discovery"."common_client_ip" VARCHAR,
+"field_discovery"."common_internal_ip" VARCHAR,
+"field_discovery"."common_client_port" VARCHAR,
+"field_discovery"."common_l4_protocol" VARCHAR,
+"field_discovery"."common_address_type" VARCHAR,
+"field_discovery"."common_server_ip" VARCHAR,
+"field_discovery"."common_server_port" VARCHAR,
+"field_discovery"."common_external_ip" VARCHAR,
+"field_discovery"."common_action" VARCHAR,
+"field_discovery"."common_direction" VARCHAR,
+"field_discovery"."common_entrance_id" VARCHAR,
+"field_discovery"."common_sled_ip" VARCHAR,
+"field_discovery"."common_client_location" VARCHAR,
+"field_discovery"."common_client_asn" VARCHAR,
+"field_discovery"."common_server_location" VARCHAR,
+"field_discovery"."common_server_asn" VARCHAR,
+"field_discovery"."common_server_fqdn" VARCHAR,
+"field_discovery"."common_server_domain" VARCHAR,
+"field_discovery"."common_sessions" VARCHAR,
+"field_discovery"."common_c2s_pkt_num" VARCHAR,
+"field_discovery"."common_s2c_pkt_num" VARCHAR,
+"field_discovery"."common_c2s_byte_num" VARCHAR,
+"field_discovery"."common_s2c_byte_num" VARCHAR,
+"field_discovery"."common_c2s_pkt_diff" VARCHAR,
+"field_discovery"."common_s2c_pkt_diff" VARCHAR,
+"field_discovery"."common_c2s_byte_diff" VARCHAR,
+"field_discovery"."common_s2c_byte_diff" VARCHAR,
+"field_discovery"."common_service" VARCHAR,
+"field_discovery"."common_schema_type" VARCHAR,
+"field_discovery"."common_vsys_id" VARCHAR,
+"field_discovery"."common_t_vsys_id" VARCHAR,
+"field_discovery"."common_flags" VARCHAR,
+"field_discovery"."common_flags_identify_info" VARCHAR,
+"field_discovery"."common_user_tags" VARCHAR,
+"field_discovery"."common_sub_action" VARCHAR,
+"field_discovery"."common_user_region" VARCHAR,
+"field_discovery"."common_shaping_rule_ids" VARCHAR,
+"field_discovery"."common_device_id" VARCHAR,
+"field_discovery"."common_egress_link_id" VARCHAR,
+"field_discovery"."common_ingress_link_id" VARCHAR,
+"field_discovery"."common_isp" VARCHAR,
+"field_discovery"."common_device_tag" VARCHAR,
+"field_discovery"."common_data_center" VARCHAR,
+"field_discovery"."common_device_group" VARCHAR,
+"field_discovery"."common_app_behavior" VARCHAR,
+"field_discovery"."common_encapsulation" VARCHAR,
+"field_discovery"."common_app_label" VARCHAR,
+"field_discovery"."common_tunnels" VARCHAR,
+"field_discovery"."common_protocol_label" VARCHAR,
+"field_discovery"."common_app_id" VARCHAR,
+"field_discovery"."common_app_full_path" VARCHAR,
+"field_discovery"."common_userdefine_app_name" VARCHAR,
+"field_discovery"."common_app_identify_info" VARCHAR,
+"field_discovery"."common_app_surrogate_id" VARCHAR,
+"field_discovery"."common_l7_protocol" VARCHAR,
+"field_discovery"."common_service_category" VARCHAR,
+"field_discovery"."common_start_time" VARCHAR,
+"field_discovery"."common_end_time" VARCHAR,
+"field_discovery"."common_establish_latency_ms" VARCHAR,
+"field_discovery"."common_con_duration_ms" VARCHAR,
+"field_discovery"."common_stream_dir" VARCHAR,
+"field_discovery"."common_address_list" VARCHAR,
+"field_discovery"."common_has_dup_traffic" VARCHAR,
+"field_discovery"."common_stream_error" VARCHAR,
+"field_discovery"."common_stream_trace_id" VARCHAR,
+"field_discovery"."common_link_info_c2s" VARCHAR,
+"field_discovery"."common_link_info_s2c" VARCHAR,
+"field_discovery"."common_packet_capture_file" VARCHAR,
+"field_discovery"."common_tunnel_endpoint_a_desc" VARCHAR,
+"field_discovery"."common_tunnel_endpoint_b_desc" VARCHAR,
+"field_discovery"."common_c2s_ipfrag_num" VARCHAR,
+"field_discovery"."common_s2c_ipfrag_num" VARCHAR,
+"field_discovery"."common_c2s_tcp_lostlen" VARCHAR,
+"field_discovery"."common_s2c_tcp_lostlen" VARCHAR,
+"field_discovery"."common_c2s_tcp_unorder_num" VARCHAR,
+"field_discovery"."common_s2c_tcp_unorder_num" VARCHAR,
+"field_discovery"."common_c2s_pkt_retrans" VARCHAR,
+"field_discovery"."common_s2c_pkt_retrans" VARCHAR,
+"field_discovery"."common_c2s_byte_retrans" VARCHAR,
+"field_discovery"."common_s2c_byte_retrans" VARCHAR,
+"field_discovery"."common_tcp_client_isn" VARCHAR,
+"field_discovery"."common_tcp_server_isn" VARCHAR,
+"field_discovery"."common_first_ttl" VARCHAR,
+"field_discovery"."common_processing_time" VARCHAR,
+"field_discovery"."common_ingestion_time" VARCHAR,
+"field_discovery"."common_mirrored_pkts" VARCHAR,
+"field_discovery"."common_mirrored_bytes" VARCHAR,
+"field_discovery"."http_url" VARCHAR,
+"field_discovery"."http_host" VARCHAR,
+"field_discovery"."http_domain" VARCHAR,
+"field_discovery"."http_request_line" VARCHAR,
+"field_discovery"."http_response_line" VARCHAR,
+"field_discovery"."http_request_header" VARCHAR,
+"field_discovery"."http_response_header" VARCHAR,
+"field_discovery"."http_request_content" VARCHAR,
+"field_discovery"."http_request_content_length" VARCHAR,
+"field_discovery"."http_request_content_type" VARCHAR,
+"field_discovery"."http_response_content" VARCHAR,
+"field_discovery"."http_response_content_length" VARCHAR,
+"field_discovery"."http_response_content_type" VARCHAR,
+"field_discovery"."http_request_body" VARCHAR,
+"field_discovery"."http_response_body" VARCHAR,
+"field_discovery"."http_request_body_key" VARCHAR,
+"field_discovery"."http_response_body_key" VARCHAR,
+"field_discovery"."http_proxy_flag" VARCHAR,
+"field_discovery"."http_sequence" VARCHAR,
+"field_discovery"."http_snapshot" VARCHAR,
+"field_discovery"."http_cookie" VARCHAR,
+"field_discovery"."http_referer" VARCHAR,
+"field_discovery"."http_user_agent" VARCHAR,
+"field_discovery"."http_content_length" VARCHAR,
+"field_discovery"."http_content_type" VARCHAR,
+"field_discovery"."http_set_cookie" VARCHAR,
+"field_discovery"."http_version" VARCHAR,
+"field_discovery"."http_response_latency_ms" VARCHAR,
+"field_discovery"."http_action_file_size" VARCHAR,
+"field_discovery"."http_session_duration_ms" VARCHAR,
+"field_discovery"."mail_protocol_type" VARCHAR,
+"field_discovery"."mail_account" VARCHAR,
+"field_discovery"."mail_from_cmd" VARCHAR,
+"field_discovery"."mail_to_cmd" VARCHAR,
+"field_discovery"."mail_from" VARCHAR,
+"field_discovery"."mail_to" VARCHAR,
+"field_discovery"."mail_cc" VARCHAR,
+"field_discovery"."mail_bcc" VARCHAR,
+"field_discovery"."mail_subject" VARCHAR,
+"field_discovery"."mail_subject_charset" VARCHAR,
+"field_discovery"."mail_content" VARCHAR,
+"field_discovery"."mail_content_charset" VARCHAR,
+"field_discovery"."mail_attachment_name" VARCHAR,
+"field_discovery"."mail_attachment_name_charset" VARCHAR,
+"field_discovery"."mail_attachment_content" VARCHAR,
+"field_discovery"."mail_eml_file" VARCHAR,
+"field_discovery"."mail_snapshot" VARCHAR,
+"field_discovery"."dns_message_id" VARCHAR,
+"field_discovery"."dns_qr" VARCHAR,
+"field_discovery"."dns_opcode" VARCHAR,
+"field_discovery"."dns_aa" VARCHAR,
+"field_discovery"."dns_tc" VARCHAR,
+"field_discovery"."dns_rd" VARCHAR,
+"field_discovery"."dns_ra" VARCHAR,
+"field_discovery"."dns_rcode" VARCHAR,
+"field_discovery"."dns_qdcount" VARCHAR,
+"field_discovery"."dns_ancount" VARCHAR,
+"field_discovery"."dns_nscount" VARCHAR,
+"field_discovery"."dns_arcount" VARCHAR,
+"field_discovery"."dns_qname" VARCHAR,
+"field_discovery"."dns_qtype" VARCHAR,
+"field_discovery"."dns_qclass" VARCHAR,
+"field_discovery"."dns_cname" VARCHAR,
+"field_discovery"."dns_sub" VARCHAR,
+"field_discovery"."dns_rr" VARCHAR,
+"field_discovery"."dns_response_latency_ms" VARCHAR,
+"field_discovery"."ssl_version" VARCHAR,
+"field_discovery"."ssl_sni" VARCHAR,
+"field_discovery"."ssl_san" VARCHAR,
+"field_discovery"."ssl_cn" VARCHAR,
+"field_discovery"."ssl_pinningst" VARCHAR,
+"field_discovery"."ssl_intercept_state" VARCHAR,
+"field_discovery"."ssl_passthrough_reason" VARCHAR,
+"field_discovery"."ssl_server_side_latency" VARCHAR,
+"field_discovery"."ssl_client_side_latency" VARCHAR,
+"field_discovery"."ssl_server_side_version" VARCHAR,
+"field_discovery"."ssl_client_side_version" VARCHAR,
+"field_discovery"."ssl_cert_verify" VARCHAR,
+"field_discovery"."ssl_error" VARCHAR,
+"field_discovery"."ssl_con_latency_ms" VARCHAR,
+"field_discovery"."ssl_ja3_fingerprint" VARCHAR,
+"field_discovery"."ssl_ja3_hash" VARCHAR,
+"field_discovery"."ssl_ja3s_fingerprint" VARCHAR,
+"field_discovery"."ssl_ja3s_hash" VARCHAR,
+"field_discovery"."ssl_cert_issuer" VARCHAR,
+"field_discovery"."ssl_cert_subject" VARCHAR,
+"field_discovery"."dtls_cookie" VARCHAR,
+"field_discovery"."dtls_version" VARCHAR,
+"field_discovery"."dtls_sni" VARCHAR,
+"field_discovery"."dtls_san" VARCHAR,
+"field_discovery"."dtls_cn" VARCHAR,
+"field_discovery"."dtls_con_latency_ms" VARCHAR,
+"field_discovery"."dtls_ja3_fingerprint" VARCHAR,
+"field_discovery"."dtls_ja3_hash" VARCHAR,
+"field_discovery"."dtls_cert_issuer" VARCHAR,
+"field_discovery"."dtls_cert_subject" VARCHAR,
+"field_discovery"."quic_version" VARCHAR,
+"field_discovery"."quic_sni" VARCHAR,
+"field_discovery"."quic_user_agent" VARCHAR,
+"field_discovery"."ftp_account" VARCHAR,
+"field_discovery"."ftp_url" VARCHAR,
+"field_discovery"."ftp_content" VARCHAR,
+"field_discovery"."ftp_link_type" VARCHAR,
+"field_discovery"."bgp_type" VARCHAR,
+"field_discovery"."bgp_as_num" VARCHAR,
+"field_discovery"."bgp_route" VARCHAR,
+"field_discovery"."voip_calling_account" VARCHAR,
+"field_discovery"."voip_called_account" VARCHAR,
+"field_discovery"."voip_calling_number" VARCHAR,
+"field_discovery"."voip_called_number" VARCHAR,
+"field_discovery"."streaming_media_url" VARCHAR,
+"field_discovery"."streaming_media_protocol" VARCHAR,
+"field_discovery"."app_extra_info" VARCHAR,
+"field_discovery"."sip_call_id" VARCHAR,
+"field_discovery"."sip_originator_description" VARCHAR,
+"field_discovery"."sip_responder_description" VARCHAR,
+"field_discovery"."sip_user_agent" VARCHAR,
+"field_discovery"."sip_server" VARCHAR,
+"field_discovery"."sip_originator_sdp_connect_ip" VARCHAR,
+"field_discovery"."sip_originator_sdp_media_port" VARCHAR,
+"field_discovery"."sip_originator_sdp_media_type" VARCHAR,
+"field_discovery"."sip_originator_sdp_content" VARCHAR,
+"field_discovery"."sip_responder_sdp_connect_ip" VARCHAR,
+"field_discovery"."sip_responder_sdp_media_port" VARCHAR,
+"field_discovery"."sip_responder_sdp_media_type" VARCHAR,
+"field_discovery"."sip_responder_sdp_content" VARCHAR,
+"field_discovery"."sip_duration_s" VARCHAR,
+"field_discovery"."sip_bye" VARCHAR,
+"field_discovery"."rtp_payload_type_c2s" VARCHAR,
+"field_discovery"."rtp_payload_type_s2c" VARCHAR,
+"field_discovery"."rtp_pcap_path" VARCHAR,
+"field_discovery"."rtp_originator_dir" VARCHAR,
+"field_discovery"."ssh_version" VARCHAR,
+"field_discovery"."ssh_auth_success" VARCHAR,
+"field_discovery"."ssh_client_version" VARCHAR,
+"field_discovery"."ssh_server_version" VARCHAR,
+"field_discovery"."ssh_cipher_alg" VARCHAR,
+"field_discovery"."ssh_mac_alg" VARCHAR,
+"field_discovery"."ssh_compression_alg" VARCHAR,
+"field_discovery"."ssh_kex_alg" VARCHAR,
+"field_discovery"."ssh_host_key_alg" VARCHAR,
+"field_discovery"."ssh_host_key" VARCHAR,
+"field_discovery"."ssh_hassh" VARCHAR,
+"field_discovery"."stratum_cryptocurrency" VARCHAR,
+"field_discovery"."stratum_mining_pools" VARCHAR,
+"field_discovery"."stratum_mining_program" VARCHAR,
+"field_discovery"."rdp_cookie" VARCHAR,
+"field_discovery"."rdp_security_protocol" VARCHAR,
+"field_discovery"."rdp_client_channels" VARCHAR,
+"field_discovery"."rdp_keyboard_layout" VARCHAR,
+"field_discovery"."rdp_client_version" VARCHAR,
+"field_discovery"."rdp_client_name" VARCHAR,
+"field_discovery"."rdp_client_product_id" VARCHAR,
+"field_discovery"."rdp_desktop_width" VARCHAR,
+"field_discovery"."rdp_desktop_height" VARCHAR,
+"field_discovery"."rdp_requested_color_depth" VARCHAR,
+"field_discovery"."rdp_certificate_type" VARCHAR,
+"field_discovery"."rdp_certificate_count" VARCHAR,
+"field_discovery"."rdp_certificate_permanent" VARCHAR,
+"field_discovery"."rdp_encryption_level" VARCHAR,
+"field_discovery"."rdp_encryption_method" VARCHAR,
+"field_discovery"."doh_url" VARCHAR,
+"field_discovery"."doh_host" VARCHAR,
+"field_discovery"."doh_request_line" VARCHAR,
+"field_discovery"."doh_response_line" VARCHAR,
+"field_discovery"."doh_cookie" VARCHAR,
+"field_discovery"."doh_referer" VARCHAR,
+"field_discovery"."doh_user_agent" VARCHAR,
+"field_discovery"."doh_content_length" VARCHAR,
+"field_discovery"."doh_content_type" VARCHAR,
+"field_discovery"."doh_set_cookie" VARCHAR,
+"field_discovery"."doh_version" VARCHAR,
+"field_discovery"."doh_message_id" VARCHAR,
+"field_discovery"."doh_qr" VARCHAR,
+"field_discovery"."doh_opcode" VARCHAR,
+"field_discovery"."doh_aa" VARCHAR,
+"field_discovery"."doh_tc" VARCHAR,
+"field_discovery"."doh_rd" VARCHAR,
+"field_discovery"."doh_ra" VARCHAR,
+"field_discovery"."doh_rcode" VARCHAR,
+"field_discovery"."doh_qdcount" VARCHAR,
+"field_discovery"."doh_ancount" VARCHAR,
+"field_discovery"."doh_nscount" VARCHAR,
+"field_discovery"."doh_arcount" VARCHAR,
+"field_discovery"."doh_qname" VARCHAR,
+"field_discovery"."doh_qtype" VARCHAR,
+"field_discovery"."doh_qclass" VARCHAR,
+"field_discovery"."doh_cname" VARCHAR,
+"field_discovery"."doh_sub" VARCHAR,
+"field_discovery"."doh_rr" VARCHAR,
+"field_discovery"."radius_packet_type" VARCHAR,
+"field_discovery"."radius_account" VARCHAR,
+"field_discovery"."radius_nas_ip" VARCHAR,
+"field_discovery"."radius_framed_ip" VARCHAR,
+"field_discovery"."radius_session_timeout" VARCHAR,
+"field_discovery"."radius_idle_timeout" VARCHAR,
+"field_discovery"."radius_acct_status_type" VARCHAR,
+"field_discovery"."radius_acct_terminate_cause" VARCHAR,
+"field_discovery"."radius_event_timestamp" VARCHAR,
+"field_discovery"."radius_service_type" VARCHAR,
+"field_discovery"."radius_nas_port" VARCHAR,
+"field_discovery"."radius_framed_protocol" VARCHAR,
+"field_discovery"."radius_callback_number" VARCHAR,
+"field_discovery"."radius_callback_id" VARCHAR,
+"field_discovery"."radius_termination_action" VARCHAR,
+"field_discovery"."radius_called_station_id" VARCHAR,
+"field_discovery"."radius_calling_station_id" VARCHAR,
+"field_discovery"."radius_acct_delay_time" VARCHAR,
+"field_discovery"."radius_acct_session_id" VARCHAR,
+"field_discovery"."radius_acct_multi_session_id" VARCHAR,
+"field_discovery"."radius_acct_input_octets" VARCHAR,
+"field_discovery"."radius_acct_output_octets" VARCHAR,
+"field_discovery"."radius_acct_input_packets" VARCHAR,
+"field_discovery"."radius_acct_output_packets" VARCHAR,
+"field_discovery"."radius_acct_session_time" VARCHAR,
+"field_discovery"."radius_acct_link_count" VARCHAR,
+"field_discovery"."radius_acct_interim_interval" VARCHAR,
+"field_discovery"."radius_acct_authentic" VARCHAR,
+"field_discovery"."gtp_version" VARCHAR,
+"field_discovery"."gtp_apn" VARCHAR,
+"field_discovery"."gtp_imei" VARCHAR,
+"field_discovery"."gtp_imsi" VARCHAR,
+"field_discovery"."gtp_phone_number" VARCHAR,
+"field_discovery"."gtp_uplink_teid" VARCHAR,
+"field_discovery"."gtp_downlink_teid" VARCHAR,
+"field_discovery"."gtp_msg_type" VARCHAR,
+"field_discovery"."gtp_end_user_ipv4" VARCHAR,
+"field_discovery"."gtp_end_user_ipv6" VARCHAR,
+"field_discovery"."start_time" VARCHAR,
+"field_discovery"."end_time" VARCHAR,
+"field_discovery"."log_id" VARCHAR,
+"field_discovery"."profile_id" VARCHAR,
+"field_discovery"."vsys_id" VARCHAR,
+"field_discovery"."attack_type" VARCHAR,
+"field_discovery"."severity" VARCHAR,
+"field_discovery"."conditions" VARCHAR,
+"field_discovery"."destination_ip" VARCHAR,
+"field_discovery"."destination_country" VARCHAR,
+"field_discovery"."source_ip_list" VARCHAR,
+"field_discovery"."source_country_list" VARCHAR,
+"field_discovery"."session_rate" VARCHAR,
+"field_discovery"."packet_rate" VARCHAR,
+"field_discovery"."bit_rate" VARCHAR);
+
+!quit
+
+EOF
+
diff --git a/MSH-PIC/hbase/bin/dae-hmaster.sh b/MSH-PIC/hbase/bin/dae-hmaster.sh
new file mode 100644
index 0000000..41b6343
--- /dev/null
+++ b/MSH-PIC/hbase/bin/dae-hmaster.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR=/home/tsg/olap
+
+VERSION=hbase-2.2.3
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/masterRes_sum
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/masterRes_sum`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/masterRes_sum
+
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - HBase HMaster服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - HBase HMaster服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_MASTER=`jps | grep -w HMaster | grep -v grep |wc -l`
+if [ "$HAS_MASTER" -lt "1" ];then
+ $BASE_DIR/$VERSION/bin/hbase-daemon.sh start master
+ set_log
+fi
+
+sleep 60
+done
diff --git a/MSH-PIC/hbase/bin/dae-hregion.sh b/MSH-PIC/hbase/bin/dae-hregion.sh
new file mode 100644
index 0000000..e0dedb5
--- /dev/null
+++ b/MSH-PIC/hbase/bin/dae-hregion.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR=/home/tsg/olap
+
+VERSION=hbase-2.2.3
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/regionRes_sum
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/regionRes_sum`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/regionRes_sum
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - HBase HRegionServer服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - HBase HRegionServer服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_MASTER=`jps | grep -w HRegionServer | grep -v grep |wc -l`
+if [ "$HAS_MASTER" -lt "1" ];then
+ $BASE_DIR/$VERSION/bin/hbase-daemon.sh start regionserver
+ set_log
+fi
+
+sleep 60
+done
diff --git a/MSH-PIC/hbase/bin/draining_servers.rb b/MSH-PIC/hbase/bin/draining_servers.rb
new file mode 100644
index 0000000..a8e20f0
--- /dev/null
+++ b/MSH-PIC/hbase/bin/draining_servers.rb
@@ -0,0 +1,156 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Add or remove servers from draining mode via zookeeper
+# Deprecated in 2.0, and will be removed in 3.0. Use Admin decommission
+# API instead.
+
+require 'optparse'
+include Java
+
+java_import org.apache.hadoop.hbase.HBaseConfiguration
+java_import org.apache.hadoop.hbase.client.ConnectionFactory
+java_import org.apache.hadoop.hbase.client.HBaseAdmin
+java_import org.apache.hadoop.hbase.zookeeper.ZKUtil
+java_import org.apache.hadoop.hbase.zookeeper.ZNodePaths
+java_import org.slf4j.LoggerFactory
+
+# Name of this script
+NAME = 'draining_servers'.freeze
+
+# Do command-line parsing
+options = {}
+optparse = OptionParser.new do |opts|
+ opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] add|remove|list <hostname>|<host:port>|<servername> ..."
+ opts.separator 'Add remove or list servers in draining mode. Can accept either hostname to drain all region servers' \
+ 'in that host, a host:port pair or a host,port,startCode triplet. More than one server can be given separated by space'
+ opts.on('-h', '--help', 'Display usage information') do
+ puts opts
+ exit
+ end
+end
+optparse.parse!
+
+# Return array of servernames where servername is hostname+port+startcode
+# comma-delimited
+def getServers(admin)
+ serverInfos = admin.getClusterStatus.getServers
+ servers = []
+ for server in serverInfos
+ servers << server.getServerName
+ end
+ servers
+end
+
+def getServerNames(hostOrServers, config)
+ ret = []
+ connection = ConnectionFactory.createConnection(config)
+
+ for hostOrServer in hostOrServers
+ # check whether it is already serverName. No need to connect to cluster
+ parts = hostOrServer.split(',')
+ if parts.size == 3
+ ret << hostOrServer
+ else
+ admin = connection.getAdmin unless admin
+ servers = getServers(admin)
+
+ hostOrServer = hostOrServer.tr(':', ',')
+ for server in servers
+ ret << server if server.start_with?(hostOrServer)
+ end
+ end
+ end
+
+ admin.close if admin
+ connection.close
+ ret
+end
+
+def addServers(_options, hostOrServers)
+ config = HBaseConfiguration.create
+ servers = getServerNames(hostOrServers, config)
+
+ zkw = org.apache.hadoop.hbase.zookeeper.ZKWatcher.new(config, 'draining_servers', nil)
+
+ begin
+ parentZnode = zkw.getZNodePaths.drainingZNode
+ for server in servers
+ node = ZNodePaths.joinZNode(parentZnode, server)
+ ZKUtil.createAndFailSilent(zkw, node)
+ end
+ ensure
+ zkw.close
+ end
+end
+
+def removeServers(_options, hostOrServers)
+ config = HBaseConfiguration.create
+ servers = getServerNames(hostOrServers, config)
+
+ zkw = org.apache.hadoop.hbase.zookeeper.ZKWatcher.new(config, 'draining_servers', nil)
+
+ begin
+ parentZnode = zkw.getZNodePaths.drainingZNode
+ for server in servers
+ node = ZNodePaths.joinZNode(parentZnode, server)
+ ZKUtil.deleteNodeFailSilent(zkw, node)
+ end
+ ensure
+ zkw.close
+ end
+end
+
+# list servers in draining mode
+def listServers(_options)
+ config = HBaseConfiguration.create
+
+ zkw = org.apache.hadoop.hbase.zookeeper.ZKWatcher.new(config, 'draining_servers', nil)
+
+ begin
+ parentZnode = zkw.getZNodePaths.drainingZNode
+ servers = ZKUtil.listChildrenNoWatch(zkw, parentZnode)
+ servers.each { |server| puts server }
+ ensure
+ zkw.close
+ end
+end
+
+hostOrServers = ARGV[1..ARGV.size]
+
+# Create a logger and save it to ruby global
+$LOG = LoggerFactory.getLogger(NAME)
+case ARGV[0]
+when 'add'
+ if ARGV.length < 2
+ puts optparse
+ exit 1
+ end
+ addServers(options, hostOrServers)
+when 'remove'
+ if ARGV.length < 2
+ puts optparse
+ exit 1
+ end
+ removeServers(options, hostOrServers)
+when 'list'
+ listServers(options)
+else
+ puts optparse
+ exit 3
+end
diff --git a/MSH-PIC/hbase/bin/get-active-master.rb b/MSH-PIC/hbase/bin/get-active-master.rb
new file mode 100644
index 0000000..d8c96fe
--- /dev/null
+++ b/MSH-PIC/hbase/bin/get-active-master.rb
@@ -0,0 +1,38 @@
+#!/usr/bin/env hbase-jruby
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with this
+# work for additional information regarding copyright ownership. The ASF
+# licenses this file to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Prints the hostname of the machine running the active master.
+
+include Java
+java_import org.apache.hadoop.hbase.HBaseConfiguration
+java_import org.apache.hadoop.hbase.ServerName
+java_import org.apache.hadoop.hbase.zookeeper.ZKWatcher
+java_import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker
+
+# disable debug/info logging on this script for clarity
+log_level = org.apache.log4j.Level::ERROR
+org.apache.log4j.Logger.getLogger('org.apache.hadoop.hbase').setLevel(log_level)
+org.apache.log4j.Logger.getLogger('org.apache.zookeeper').setLevel(log_level)
+
+config = HBaseConfiguration.create
+
+zk = ZKWatcher.new(config, 'get-active-master', nil)
+begin
+ puts MasterAddressTracker.getMasterAddress(zk).getHostname
+ensure
+ zk.close
+end
diff --git a/MSH-PIC/hbase/bin/graceful_stop.sh b/MSH-PIC/hbase/bin/graceful_stop.sh
new file mode 100644
index 0000000..89e3dd9
--- /dev/null
+++ b/MSH-PIC/hbase/bin/graceful_stop.sh
@@ -0,0 +1,186 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Move regions off a server then stop it. Optionally restart and reload.
+# Turn off the balancer before running this script.
+function usage {
+ echo "Usage: graceful_stop.sh [--config <conf-dir>] [-e] [--restart [--reload]] [--thrift] \
+[--rest] [-nob |--nobalancer ] <hostname>"
+ echo " thrift If we should stop/start thrift before/after the hbase stop/start"
+ echo " rest If we should stop/start rest before/after the hbase stop/start"
+ echo " restart If we should restart after graceful stop"
+ echo " reload Move offloaded regions back on to the restarted server"
+ echo " n|noack Enable noAck mode in RegionMover. This is a best effort mode for \
+moving regions"
+ echo " maxthreads xx Limit the number of threads used by the region mover. Default value is 1."
+ echo " movetimeout xx Timeout for moving regions. If regions are not moved by the timeout value,\
+exit with error. Default value is INT_MAX."
+ echo " hostname Hostname of server we are to stop"
+ echo " e|failfast Set -e so exit immediately if any command exits with non-zero status"
+ echo " nob| nobalancer Do not manage balancer states. This is only used as optimization in \
+rolling_restart.sh to avoid multiple calls to hbase shell"
+ exit 1
+}
+
+if [ $# -lt 1 ]; then
+ usage
+fi
+
+bin=`dirname "$0"`
+bin=`cd "$bin">/dev/null; pwd`
+# This will set HBASE_HOME, etc.
+. "$bin"/hbase-config.sh
+# Get arguments
+restart=
+reload=
+noack=
+thrift=
+rest=
+movetimeout=2147483647
+maxthreads=1
+failfast=
+nob=false
+while [ $# -gt 0 ]
+do
+ case "$1" in
+ --thrift) thrift=true; shift;;
+ --rest) rest=true; shift;;
+ --restart) restart=true; shift;;
+ --reload) reload=true; shift;;
+ --failfast | -e) failfast=true; shift;;
+ --noack | -n) noack="--noack"; shift;;
+ --maxthreads) shift; maxthreads=$1; shift;;
+ --movetimeout) shift; movetimeout=$1; shift;;
+ --nobalancer | -nob) nob=true; shift;;
+ --) shift; break;;
+ -*) usage ;;
+ *) break;; # terminate while loop
+ esac
+done
+
+# "$@" contains the rest. Must be at least the hostname left.
+if [ $# -lt 1 ]; then
+ usage
+fi
+
+# Emit a log line w/ iso8901 date prefixed
+log() {
+ echo `date +%Y-%m-%dT%H:%M:%S` $1
+}
+
+# See if we should set fail fast before we do anything.
+if [ "$failfast" != "" ]; then
+ log "Set failfast, will exit immediately if any command exits with non-zero status"
+ set -e
+fi
+
+hostname=$1
+filename="/tmp/$hostname"
+
+local=
+localhostname=`/bin/hostname`
+
+if [ "$localhostname" == "$hostname" ]; then
+ local=true
+fi
+
+if [ "$nob" == "true" ]; then
+ log "[ $0 ] skipping disabling balancer -nob argument is used"
+ HBASE_BALANCER_STATE=false
+else
+ log "Disabling load balancer"
+ HBASE_BALANCER_STATE=$(echo 'balance_switch false' | "$bin"/hbase --config "${HBASE_CONF_DIR}" shell -n | tail -1)
+ log "Previous balancer state was $HBASE_BALANCER_STATE"
+fi
+
+log "Unloading $hostname region(s)"
+HBASE_NOEXEC=true "$bin"/hbase --config ${HBASE_CONF_DIR} org.apache.hadoop.hbase.util.RegionMover \
+--filename $filename --maxthreads $maxthreads $noack --operation "unload" --timeout $movetimeout \
+--regionserverhost $hostname
+log "Unloaded $hostname region(s)"
+
+# Stop the server(s). Have to put hostname into its own little file for hbase-daemons.sh
+hosts="/tmp/$(basename $0).$$.tmp"
+echo $hostname >> $hosts
+if [ "$thrift" != "" ]; then
+ log "Stopping thrift server on $hostname"
+ if [ "$local" == true ]; then
+ "$bin"/hbase-daemon.sh --config ${HBASE_CONF_DIR} stop thrift
+ else
+ "$bin"/hbase-daemons.sh --config ${HBASE_CONF_DIR} --hosts ${hosts} stop thrift
+ fi
+fi
+if [ "$rest" != "" ]; then
+ log "Stopping rest server on $hostname"
+ if [ "$local" == true ]; then
+ "$bin"/hbase-daemon.sh --config ${HBASE_CONF_DIR} stop rest
+ else
+ "$bin"/hbase-daemons.sh --config ${HBASE_CONF_DIR} --hosts ${hosts} stop rest
+ fi
+fi
+log "Stopping regionserver on $hostname"
+if [ "$local" == true ]; then
+ "$bin"/hbase-daemon.sh --config ${HBASE_CONF_DIR} stop regionserver
+else
+ "$bin"/hbase-daemons.sh --config ${HBASE_CONF_DIR} --hosts ${hosts} stop regionserver
+fi
+if [ "$restart" != "" ]; then
+ log "Restarting regionserver on $hostname"
+ if [ "$local" == true ]; then
+ "$bin"/hbase-daemon.sh --config ${HBASE_CONF_DIR} start regionserver
+ else
+ "$bin"/hbase-daemons.sh --config ${HBASE_CONF_DIR} --hosts ${hosts} start regionserver
+ fi
+ if [ "$thrift" != "" ]; then
+ log "Restarting thrift server on $hostname"
+ # -b 0.0.0.0 says listen on all interfaces rather than just default.
+ if [ "$local" == true ]; then
+ "$bin"/hbase-daemon.sh --config ${HBASE_CONF_DIR} start thrift -b 0.0.0.0
+ else
+ "$bin"/hbase-daemons.sh --config ${HBASE_CONF_DIR} --hosts ${hosts} start thrift -b 0.0.0.0
+ fi
+ fi
+ if [ "$rest" != "" ]; then
+ log "Restarting rest server on $hostname"
+ if [ "$local" == true ]; then
+ "$bin"/hbase-daemon.sh --config ${HBASE_CONF_DIR} start rest
+ else
+ "$bin"/hbase-daemons.sh --config ${HBASE_CONF_DIR} --hosts ${hosts} start rest
+ fi
+ fi
+ if [ "$reload" != "" ]; then
+ log "Reloading $hostname region(s)"
+ HBASE_NOEXEC=true "$bin"/hbase --config ${HBASE_CONF_DIR} \
+ org.apache.hadoop.hbase.util.RegionMover --filename $filename --maxthreads $maxthreads $noack \
+ --operation "load" --timeout $movetimeout --regionserverhost $hostname
+ log "Reloaded $hostname region(s)"
+ fi
+fi
+
+# Restore balancer state
+if [ "$HBASE_BALANCER_STATE" != "false" ] && [ "$nob" != "true" ]; then
+ log "Restoring balancer state to $HBASE_BALANCER_STATE"
+ echo "balance_switch $HBASE_BALANCER_STATE" | "$bin"/hbase --config ${HBASE_CONF_DIR} shell &> /dev/null
+else
+ log "[ $0 ] skipping restoring balancer"
+fi
+
+# Cleanup tmp files.
+trap "rm -f "/tmp/$(basename $0).*.tmp" &> /dev/null" EXIT
diff --git a/MSH-PIC/hbase/bin/hbase b/MSH-PIC/hbase/bin/hbase
new file mode 100644
index 0000000..cca5b60
--- /dev/null
+++ b/MSH-PIC/hbase/bin/hbase
@@ -0,0 +1,687 @@
+#! /usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# The hbase command script. Based on the hadoop command script putting
+# in hbase classes, libs and configurations ahead of hadoop's.
+#
+# TODO: Narrow the amount of duplicated code.
+#
+# Environment Variables:
+#
+# JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+#
+# HBASE_CLASSPATH Extra Java CLASSPATH entries.
+#
+# HBASE_CLASSPATH_PREFIX Extra Java CLASSPATH entries that should be
+# prefixed to the system classpath.
+#
+# HBASE_HEAPSIZE The maximum amount of heap to use.
+# Default is unset and uses the JVMs default setting
+# (usually 1/4th of the available memory).
+#
+# HBASE_LIBRARY_PATH HBase additions to JAVA_LIBRARY_PATH for adding
+# native libraries.
+#
+# HBASE_OPTS Extra Java runtime options.
+#
+# HBASE_CONF_DIR Alternate conf dir. Default is ${HBASE_HOME}/conf.
+#
+# HBASE_ROOT_LOGGER The root appender. Default is INFO,console
+#
+# JRUBY_HOME JRuby path: $JRUBY_HOME/lib/jruby.jar should exist.
+# Defaults to the jar packaged with HBase.
+#
+# JRUBY_OPTS Extra options (eg '--1.9') passed to hbase.
+# Empty by default.
+#
+# HBASE_SHELL_OPTS Extra options passed to the hbase shell.
+# Empty by default.
+#
+bin=`dirname "$0"`
+bin=`cd "$bin">/dev/null; pwd`
+
+# This will set HBASE_HOME, etc.
+. "$bin"/hbase-config.sh
+
+cygwin=false
+case "`uname`" in
+CYGWIN*) cygwin=true;;
+esac
+
+# Detect if we are in hbase sources dir
+in_dev_env=false
+if [ -d "${HBASE_HOME}/target" ]; then
+ in_dev_env=true
+fi
+
+# Detect if we are in the omnibus tarball
+in_omnibus_tarball="false"
+if [ -f "${HBASE_HOME}/bin/hbase-daemons.sh" ]; then
+ in_omnibus_tarball="true"
+fi
+
+read -d '' options_string << EOF
+Options:
+ --config DIR Configuration direction to use. Default: ./conf
+ --hosts HOSTS Override the list in 'regionservers' file
+ --auth-as-server Authenticate to ZooKeeper using servers configuration
+ --internal-classpath Skip attempting to use client facing jars (WARNING: unstable results between versions)
+EOF
+# if no args specified, show usage
+if [ $# = 0 ]; then
+ echo "Usage: hbase [<options>] <command> [<args>]"
+ echo "$options_string"
+ echo ""
+ echo "Commands:"
+ echo "Some commands take arguments. Pass no args or -h for usage."
+ echo " shell Run the HBase shell"
+ echo " hbck Run the HBase 'fsck' tool. Defaults read-only hbck1."
+ echo " Pass '-j /path/to/HBCK2.jar' to run hbase-2.x HBCK2."
+ echo " snapshot Tool for managing snapshots"
+ if [ "${in_omnibus_tarball}" = "true" ]; then
+ echo " wal Write-ahead-log analyzer"
+ echo " hfile Store file analyzer"
+ echo " zkcli Run the ZooKeeper shell"
+ echo " master Run an HBase HMaster node"
+ echo " regionserver Run an HBase HRegionServer node"
+ echo " zookeeper Run a ZooKeeper server"
+ echo " rest Run an HBase REST server"
+ echo " thrift Run the HBase Thrift server"
+ echo " thrift2 Run the HBase Thrift2 server"
+ echo " clean Run the HBase clean up script"
+ fi
+ echo " classpath Dump hbase CLASSPATH"
+ echo " mapredcp Dump CLASSPATH entries required by mapreduce"
+ echo " pe Run PerformanceEvaluation"
+ echo " ltt Run LoadTestTool"
+ echo " canary Run the Canary tool"
+ echo " version Print the version"
+ echo " completebulkload Run BulkLoadHFiles tool"
+ echo " regionsplitter Run RegionSplitter tool"
+ echo " rowcounter Run RowCounter tool"
+ echo " cellcounter Run CellCounter tool"
+ echo " pre-upgrade Run Pre-Upgrade validator tool"
+ echo " hbtop Run HBTop tool"
+ echo " CLASSNAME Run the class named CLASSNAME"
+ exit 1
+fi
+
+# get arguments
+COMMAND=$1
+shift
+
+JAVA=$JAVA_HOME/bin/java
+
+# override default settings for this command, if applicable
+if [ -f "$HBASE_HOME/conf/hbase-env-$COMMAND.sh" ]; then
+ . "$HBASE_HOME/conf/hbase-env-$COMMAND.sh"
+fi
+
+add_size_suffix() {
+ # add an 'm' suffix if the argument is missing one, otherwise use whats there
+ local val="$1"
+ local lastchar=${val: -1}
+ if [[ "mMgG" == *$lastchar* ]]; then
+ echo $val
+ else
+ echo ${val}m
+ fi
+}
+
+if [[ -n "$HBASE_HEAPSIZE" ]]; then
+ JAVA_HEAP_MAX="-Xmx$(add_size_suffix $HBASE_HEAPSIZE)"
+fi
+
+if [[ -n "$HBASE_OFFHEAPSIZE" ]]; then
+ JAVA_OFFHEAP_MAX="-XX:MaxDirectMemorySize=$(add_size_suffix $HBASE_OFFHEAPSIZE)"
+fi
+
+# so that filenames w/ spaces are handled correctly in loops below
+ORIG_IFS=$IFS
+IFS=
+
+# CLASSPATH initially contains $HBASE_CONF_DIR
+CLASSPATH="${HBASE_CONF_DIR}"
+CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
+
+add_to_cp_if_exists() {
+ if [ -d "$@" ]; then
+ CLASSPATH=${CLASSPATH}:"$@"
+ fi
+}
+
+# For releases, add hbase & webapps to CLASSPATH
+# Webapps must come first else it messes up Jetty
+if [ -d "$HBASE_HOME/hbase-webapps" ]; then
+ add_to_cp_if_exists "${HBASE_HOME}"
+fi
+#add if we are in a dev environment
+if [ -d "$HBASE_HOME/hbase-server/target/hbase-webapps" ]; then
+ if [ "$COMMAND" = "thrift" ] ; then
+ add_to_cp_if_exists "${HBASE_HOME}/hbase-thrift/target"
+ elif [ "$COMMAND" = "thrift2" ] ; then
+ add_to_cp_if_exists "${HBASE_HOME}/hbase-thrift/target"
+ elif [ "$COMMAND" = "rest" ] ; then
+ add_to_cp_if_exists "${HBASE_HOME}/hbase-rest/target"
+ else
+ add_to_cp_if_exists "${HBASE_HOME}/hbase-server/target"
+ # Needed for GetJavaProperty check below
+ add_to_cp_if_exists "${HBASE_HOME}/hbase-server/target/classes"
+ fi
+fi
+
+#If avail, add Hadoop to the CLASSPATH and to the JAVA_LIBRARY_PATH
+# Allow this functionality to be disabled
+if [ "$HBASE_DISABLE_HADOOP_CLASSPATH_LOOKUP" != "true" ] ; then
+ HADOOP_IN_PATH=$(PATH="${HADOOP_HOME:-${HADOOP_PREFIX}}/bin:$PATH" which hadoop 2>/dev/null)
+fi
+
+# Add libs to CLASSPATH
+declare shaded_jar
+
+if [ "${INTERNAL_CLASSPATH}" != "true" ]; then
+ # find our shaded jars
+ declare shaded_client
+ declare shaded_client_byo_hadoop
+ declare shaded_mapreduce
+ for f in "${HBASE_HOME}"/lib/shaded-clients/hbase-shaded-client*.jar; do
+ if [[ "${f}" =~ byo-hadoop ]]; then
+ shaded_client_byo_hadoop="${f}"
+ else
+ shaded_client="${f}"
+ fi
+ done
+ for f in "${HBASE_HOME}"/lib/shaded-clients/hbase-shaded-mapreduce*.jar; do
+ shaded_mapreduce="${f}"
+ done
+
+ # If command can use our shaded client, use it
+ declare -a commands_in_client_jar=("classpath" "version" "hbtop")
+ for c in "${commands_in_client_jar[@]}"; do
+ if [ "${COMMAND}" = "${c}" ]; then
+ if [ -n "${HADOOP_IN_PATH}" ] && [ -f "${HADOOP_IN_PATH}" ]; then
+ # If we didn't find a jar above, this will just be blank and the
+ # check below will then default back to the internal classpath.
+ shaded_jar="${shaded_client_byo_hadoop}"
+ else
+ # If we didn't find a jar above, this will just be blank and the
+ # check below will then default back to the internal classpath.
+ shaded_jar="${shaded_client}"
+ fi
+ break
+ fi
+ done
+
+ # If command needs our shaded mapreduce, use it
+ # N.B "mapredcp" is not included here because in the shaded case it skips our built classpath
+ declare -a commands_in_mr_jar=("hbck" "snapshot" "canary" "regionsplitter" "pre-upgrade")
+ for c in "${commands_in_mr_jar[@]}"; do
+ if [ "${COMMAND}" = "${c}" ]; then
+ # If we didn't find a jar above, this will just be blank and the
+ # check below will then default back to the internal classpath.
+ shaded_jar="${shaded_mapreduce}"
+ break
+ fi
+ done
+
+ # Some commands specifically only can use shaded mapreduce when we'll get a full hadoop classpath at runtime
+ if [ -n "${HADOOP_IN_PATH}" ] && [ -f "${HADOOP_IN_PATH}" ]; then
+ declare -a commands_in_mr_need_hadoop=("backup" "restore" "rowcounter" "cellcounter")
+ for c in "${commands_in_mr_need_hadoop[@]}"; do
+ if [ "${COMMAND}" = "${c}" ]; then
+ # If we didn't find a jar above, this will just be blank and the
+ # check below will then default back to the internal classpath.
+ shaded_jar="${shaded_mapreduce}"
+ break
+ fi
+ done
+ fi
+fi
+
+
+if [ -n "${shaded_jar}" ] && [ -f "${shaded_jar}" ]; then
+ CLASSPATH="${CLASSPATH}:${shaded_jar}"
+# fall through to grabbing all the lib jars and hope we're in the omnibus tarball
+#
+# N.B. shell specifically can't rely on the shaded artifacts because RSGroups is only
+# available as non-shaded
+#
+# N.B. pe and ltt can't easily rely on shaded artifacts because they live in hbase-mapreduce:test-jar
+# and need some other jars that haven't been relocated. Currently enumerating that list
+# is too hard to be worth it.
+#
+else
+ for f in $HBASE_HOME/lib/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+ done
+ # make it easier to check for shaded/not later on.
+ shaded_jar=""
+fi
+for f in "${HBASE_HOME}"/lib/client-facing-thirdparty/*.jar; do
+ if [[ ! "${f}" =~ ^.*/htrace-core-3.*\.jar$ ]] && \
+ [ "${f}" != "htrace-core.jar$" ] && \
+ [[ ! "${f}" =~ ^.*/slf4j-log4j.*$ ]]; then
+ CLASSPATH="${CLASSPATH}:${f}"
+ fi
+done
+
+# default log directory & file
+if [ "$HBASE_LOG_DIR" = "" ]; then
+ HBASE_LOG_DIR="$HBASE_HOME/logs"
+fi
+if [ "$HBASE_LOGFILE" = "" ]; then
+ HBASE_LOGFILE='hbase.log'
+fi
+
+function append_path() {
+ if [ -z "$1" ]; then
+ echo "$2"
+ else
+ echo "$1:$2"
+ fi
+}
+
+JAVA_PLATFORM=""
+
+# if HBASE_LIBRARY_PATH is defined lets use it as first or second option
+if [ "$HBASE_LIBRARY_PATH" != "" ]; then
+ JAVA_LIBRARY_PATH=$(append_path "$JAVA_LIBRARY_PATH" "$HBASE_LIBRARY_PATH")
+fi
+
+#If configured and available, add Hadoop to the CLASSPATH and to the JAVA_LIBRARY_PATH
+if [ -n "${HADOOP_IN_PATH}" ] && [ -f "${HADOOP_IN_PATH}" ]; then
+ # If built hbase, temporarily add hbase-server*.jar to classpath for GetJavaProperty
+ # Exclude hbase-server*-tests.jar
+ temporary_cp=
+ for f in "${HBASE_HOME}"/lib/hbase-server*.jar; do
+ if [[ ! "${f}" =~ ^.*\-tests\.jar$ ]]; then
+ temporary_cp=":$f"
+ fi
+ done
+ HADOOP_JAVA_LIBRARY_PATH=$(HADOOP_CLASSPATH="$CLASSPATH${temporary_cp}" "${HADOOP_IN_PATH}" \
+ org.apache.hadoop.hbase.util.GetJavaProperty java.library.path)
+ if [ -n "$HADOOP_JAVA_LIBRARY_PATH" ]; then
+ JAVA_LIBRARY_PATH=$(append_path "${JAVA_LIBRARY_PATH}" "$HADOOP_JAVA_LIBRARY_PATH")
+ fi
+ CLASSPATH=$(append_path "${CLASSPATH}" "$(${HADOOP_IN_PATH} classpath 2>/dev/null)")
+else
+ # Otherwise, if we're providing Hadoop we should include htrace 3 if we were built with a version that needs it.
+ for f in "${HBASE_HOME}"/lib/client-facing-thirdparty/htrace-core-3*.jar "${HBASE_HOME}"/lib/client-facing-thirdparty/htrace-core.jar; do
+ if [ -f "${f}" ]; then
+ CLASSPATH="${CLASSPATH}:${f}"
+ break
+ fi
+ done
+ # Some commands require special handling when using shaded jars. For these cases, we rely on hbase-shaded-mapreduce
+ # instead of hbase-shaded-client* because we make use of some IA.Private classes that aren't in the latter. However,
+ # we don't invoke them using the "hadoop jar" command so we need to ensure there are some Hadoop classes available
+ # when we're not doing runtime hadoop classpath lookup.
+ #
+ # luckily the set of classes we need are those packaged in the shaded-client.
+ for c in "${commands_in_mr_jar[@]}"; do
+ if [ "${COMMAND}" = "${c}" ] && [ -n "${shaded_jar}" ]; then
+ CLASSPATH="${CLASSPATH}:${shaded_client:?We couldn\'t find the shaded client jar even though we did find the shaded MR jar. for command ${COMMAND} we need both. please use --internal-classpath as a workaround.}"
+ break
+ fi
+ done
+fi
+
+# Add user-specified CLASSPATH last
+if [ "$HBASE_CLASSPATH" != "" ]; then
+ CLASSPATH=${CLASSPATH}:${HBASE_CLASSPATH}
+fi
+
+# Add user-specified CLASSPATH prefix first
+if [ "$HBASE_CLASSPATH_PREFIX" != "" ]; then
+ CLASSPATH=${HBASE_CLASSPATH_PREFIX}:${CLASSPATH}
+fi
+
+# cygwin path translation
+if $cygwin; then
+ CLASSPATH=`cygpath -p -w "$CLASSPATH"`
+ HBASE_HOME=`cygpath -d "$HBASE_HOME"`
+ HBASE_LOG_DIR=`cygpath -d "$HBASE_LOG_DIR"`
+fi
+
+if [ -d "${HBASE_HOME}/build/native" -o -d "${HBASE_HOME}/lib/native" ]; then
+ if [ -z $JAVA_PLATFORM ]; then
+ JAVA_PLATFORM=`CLASSPATH=${CLASSPATH} ${JAVA} org.apache.hadoop.util.PlatformName | sed -e "s/ /_/g"`
+ fi
+ if [ -d "$HBASE_HOME/build/native" ]; then
+ JAVA_LIBRARY_PATH=$(append_path "$JAVA_LIBRARY_PATH" "${HBASE_HOME}/build/native/${JAVA_PLATFORM}/lib")
+ fi
+
+ if [ -d "${HBASE_HOME}/lib/native" ]; then
+ JAVA_LIBRARY_PATH=$(append_path "$JAVA_LIBRARY_PATH" "${HBASE_HOME}/lib/native/${JAVA_PLATFORM}")
+ fi
+fi
+
+# cygwin path translation
+if $cygwin; then
+ JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"`
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+#Set the right GC options based on the what we are running
+declare -a server_cmds=("master" "regionserver" "thrift" "thrift2" "rest" "avro" "zookeeper")
+for cmd in ${server_cmds[@]}; do
+ if [[ $cmd == $COMMAND ]]; then
+ server=true
+ break
+ fi
+done
+
+if [[ $server ]]; then
+ HBASE_OPTS="$HBASE_OPTS $SERVER_GC_OPTS"
+else
+ HBASE_OPTS="$HBASE_OPTS $CLIENT_GC_OPTS"
+fi
+
+if [ "$AUTH_AS_SERVER" == "true" ] || [ "$COMMAND" = "hbck" ]; then
+ if [ -n "$HBASE_SERVER_JAAS_OPTS" ]; then
+ HBASE_OPTS="$HBASE_OPTS $HBASE_SERVER_JAAS_OPTS"
+ else
+ HBASE_OPTS="$HBASE_OPTS $HBASE_REGIONSERVER_OPTS"
+ fi
+fi
+
+# check if the command needs jline
+declare -a jline_cmds=("zkcli" "org.apache.hadoop.hbase.zookeeper.ZKMainServer")
+for cmd in "${jline_cmds[@]}"; do
+ if [[ $cmd == "$COMMAND" ]]; then
+ jline_needed=true
+ break
+ fi
+done
+
+# for jruby
+# (1) for the commands which need jruby (see jruby_cmds defined below)
+# A. when JRUBY_HOME is specified explicitly, eg. export JRUBY_HOME=/usr/local/share/jruby
+# CLASSPATH and HBASE_OPTS are updated according to JRUBY_HOME specified
+# B. when JRUBY_HOME is not specified explicitly
+# add jruby packaged with HBase to CLASSPATH
+# (2) for other commands, do nothing
+
+# check if the commmand needs jruby
+declare -a jruby_cmds=("shell" "org.jruby.Main")
+for cmd in "${jruby_cmds[@]}"; do
+ if [[ $cmd == "$COMMAND" ]]; then
+ jruby_needed=true
+ break
+ fi
+done
+
+add_maven_deps_to_classpath() {
+ f="${HBASE_HOME}/hbase-build-configuration/target/$1"
+
+ if [ ! -f "${f}" ]; then
+ echo "As this is a development environment, we need ${f} to be generated from maven (command: mvn install -DskipTests)"
+ exit 1
+ fi
+ CLASSPATH=${CLASSPATH}:$(cat "${f}")
+}
+
+#Add the development env class path stuff
+if $in_dev_env; then
+ add_maven_deps_to_classpath "cached_classpath.txt"
+
+ if [[ $jline_needed ]]; then
+ add_maven_deps_to_classpath "cached_classpath_jline.txt"
+ elif [[ $jruby_needed ]]; then
+ add_maven_deps_to_classpath "cached_classpath_jruby.txt"
+ fi
+fi
+
+# the command needs jruby
+if [[ $jruby_needed ]]; then
+ if [ "$JRUBY_HOME" != "" ]; then # JRUBY_HOME is specified explicitly, eg. export JRUBY_HOME=/usr/local/share/jruby
+ # add jruby.jar into CLASSPATH
+ CLASSPATH="$JRUBY_HOME/lib/jruby.jar:$CLASSPATH"
+
+ # add jruby to HBASE_OPTS
+ HBASE_OPTS="$HBASE_OPTS -Djruby.home=$JRUBY_HOME -Djruby.lib=$JRUBY_HOME/lib"
+
+ else # JRUBY_HOME is not specified explicitly
+ if ! $in_dev_env; then # not in dev environment
+ # add jruby packaged with HBase to CLASSPATH
+ JRUBY_PACKAGED_WITH_HBASE="$HBASE_HOME/lib/ruby/*.jar"
+ for jruby_jar in $JRUBY_PACKAGED_WITH_HBASE; do
+ CLASSPATH=$jruby_jar:$CLASSPATH;
+ done
+ fi
+ fi
+fi
+
+# figure out which class to run
+if [ "$COMMAND" = "shell" ] ; then
+ #find the hbase ruby sources
+ if [ -d "$HBASE_HOME/lib/ruby" ]; then
+ HBASE_OPTS="$HBASE_OPTS -Dhbase.ruby.sources=$HBASE_HOME/lib/ruby"
+ else
+ HBASE_OPTS="$HBASE_OPTS -Dhbase.ruby.sources=$HBASE_HOME/hbase-shell/src/main/ruby"
+ fi
+ HBASE_OPTS="$HBASE_OPTS $HBASE_SHELL_OPTS"
+ CLASS="org.jruby.Main -X+O ${JRUBY_OPTS} ${HBASE_HOME}/bin/hirb.rb"
+elif [ "$COMMAND" = "hbck" ] ; then
+ # Look for the -j /path/to/HBCK2.jar parameter. Else pass through to hbck.
+ case "${1}" in
+ -j)
+ # Found -j parameter. Add arg to CLASSPATH and set CLASS to HBCK2.
+ shift
+ JAR="${1}"
+ if [ ! -f "${JAR}" ]; then
+ echo "${JAR} file not found!"
+ echo "Usage: hbase [<options>] hbck -jar /path/to/HBCK2.jar [<args>]"
+ exit 1
+ fi
+ CLASSPATH="${JAR}:${CLASSPATH}";
+ CLASS="org.apache.hbase.HBCK2"
+ shift # past argument=value
+ ;;
+ *)
+ CLASS='org.apache.hadoop.hbase.util.HBaseFsck'
+ ;;
+ esac
+elif [ "$COMMAND" = "wal" ] ; then
+ CLASS='org.apache.hadoop.hbase.wal.WALPrettyPrinter'
+elif [ "$COMMAND" = "hfile" ] ; then
+ CLASS='org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter'
+elif [ "$COMMAND" = "zkcli" ] ; then
+ CLASS="org.apache.hadoop.hbase.zookeeper.ZKMainServer"
+ for f in $HBASE_HOME/lib/zkcli/*.jar; do
+ CLASSPATH="${CLASSPATH}:$f";
+ done
+elif [ "$COMMAND" = "upgrade" ] ; then
+ echo "This command was used to upgrade to HBase 0.96, it was removed in HBase 2.0.0."
+ echo "Please follow the documentation at http://hbase.apache.org/book.html#upgrading."
+ exit 1
+elif [ "$COMMAND" = "snapshot" ] ; then
+ SUBCOMMAND=$1
+ shift
+ if [ "$SUBCOMMAND" = "create" ] ; then
+ CLASS="org.apache.hadoop.hbase.snapshot.CreateSnapshot"
+ elif [ "$SUBCOMMAND" = "info" ] ; then
+ CLASS="org.apache.hadoop.hbase.snapshot.SnapshotInfo"
+ elif [ "$SUBCOMMAND" = "export" ] ; then
+ CLASS="org.apache.hadoop.hbase.snapshot.ExportSnapshot"
+ else
+ echo "Usage: hbase [<options>] snapshot <subcommand> [<args>]"
+ echo "$options_string"
+ echo ""
+ echo "Subcommands:"
+ echo " create Create a new snapshot of a table"
+ echo " info Tool for dumping snapshot information"
+ echo " export Export an existing snapshot"
+ exit 1
+ fi
+elif [ "$COMMAND" = "master" ] ; then
+ CLASS='org.apache.hadoop.hbase.master.HMaster'
+ if [ "$1" != "stop" ] && [ "$1" != "clear" ] ; then
+ HBASE_OPTS="$HBASE_OPTS $HBASE_MASTER_OPTS"
+ fi
+elif [ "$COMMAND" = "regionserver" ] ; then
+ CLASS='org.apache.hadoop.hbase.regionserver.HRegionServer'
+ if [ "$1" != "stop" ] ; then
+ HBASE_OPTS="$HBASE_OPTS $HBASE_REGIONSERVER_OPTS $HBASE_REGIONSERVER_JMX_OPTS"
+ fi
+elif [ "$COMMAND" = "thrift" ] ; then
+ CLASS='org.apache.hadoop.hbase.thrift.ThriftServer'
+ if [ "$1" != "stop" ] ; then
+ HBASE_OPTS="$HBASE_OPTS $HBASE_THRIFT_OPTS"
+ fi
+elif [ "$COMMAND" = "thrift2" ] ; then
+ CLASS='org.apache.hadoop.hbase.thrift2.ThriftServer'
+ if [ "$1" != "stop" ] ; then
+ HBASE_OPTS="$HBASE_OPTS $HBASE_THRIFT_OPTS"
+ fi
+elif [ "$COMMAND" = "rest" ] ; then
+ CLASS='org.apache.hadoop.hbase.rest.RESTServer'
+ if [ "$1" != "stop" ] ; then
+ HBASE_OPTS="$HBASE_OPTS $HBASE_REST_OPTS"
+ fi
+elif [ "$COMMAND" = "zookeeper" ] ; then
+ CLASS='org.apache.hadoop.hbase.zookeeper.HQuorumPeer'
+ if [ "$1" != "stop" ] ; then
+ HBASE_OPTS="$HBASE_OPTS $HBASE_ZOOKEEPER_OPTS"
+ fi
+elif [ "$COMMAND" = "clean" ] ; then
+ case $1 in
+ --cleanZk|--cleanHdfs|--cleanAll)
+ matches="yes" ;;
+ *) ;;
+ esac
+ if [ $# -ne 1 -o "$matches" = "" ]; then
+ echo "Usage: hbase clean (--cleanZk|--cleanHdfs|--cleanAll)"
+ echo "Options: "
+ echo " --cleanZk cleans hbase related data from zookeeper."
+ echo " --cleanHdfs cleans hbase related data from hdfs."
+ echo " --cleanAll cleans hbase related data from both zookeeper and hdfs."
+ exit 1;
+ fi
+ "$bin"/hbase-cleanup.sh --config ${HBASE_CONF_DIR} $@
+ exit $?
+elif [ "$COMMAND" = "mapredcp" ] ; then
+ # If we didn't find a jar above, this will just be blank and the
+ # check below will then default back to the internal classpath.
+ shaded_jar="${shaded_mapreduce}"
+ if [ "${INTERNAL_CLASSPATH}" != "true" ] && [ -f "${shaded_jar}" ]; then
+ echo -n "${shaded_jar}"
+ for f in "${HBASE_HOME}"/lib/client-facing-thirdparty/*.jar; do
+ if [[ ! "${f}" =~ ^.*/htrace-core-3.*\.jar$ ]] && \
+ [ "${f}" != "htrace-core.jar$" ] && \
+ [[ ! "${f}" =~ ^.*/slf4j-log4j.*$ ]]; then
+ echo -n ":${f}"
+ fi
+ done
+ echo ""
+ exit 0
+ fi
+ CLASS='org.apache.hadoop.hbase.util.MapreduceDependencyClasspathTool'
+elif [ "$COMMAND" = "classpath" ] ; then
+ echo "$CLASSPATH"
+ exit 0
+elif [ "$COMMAND" = "pe" ] ; then
+ CLASS='org.apache.hadoop.hbase.PerformanceEvaluation'
+ HBASE_OPTS="$HBASE_OPTS $HBASE_PE_OPTS"
+elif [ "$COMMAND" = "ltt" ] ; then
+ CLASS='org.apache.hadoop.hbase.util.LoadTestTool'
+ HBASE_OPTS="$HBASE_OPTS $HBASE_LTT_OPTS"
+elif [ "$COMMAND" = "canary" ] ; then
+ CLASS='org.apache.hadoop.hbase.tool.CanaryTool'
+ HBASE_OPTS="$HBASE_OPTS $HBASE_CANARY_OPTS"
+elif [ "$COMMAND" = "version" ] ; then
+ CLASS='org.apache.hadoop.hbase.util.VersionInfo'
+elif [ "$COMMAND" = "regionsplitter" ] ; then
+ CLASS='org.apache.hadoop.hbase.util.RegionSplitter'
+elif [ "$COMMAND" = "rowcounter" ] ; then
+ CLASS='org.apache.hadoop.hbase.mapreduce.RowCounter'
+elif [ "$COMMAND" = "cellcounter" ] ; then
+ CLASS='org.apache.hadoop.hbase.mapreduce.CellCounter'
+elif [ "$COMMAND" = "pre-upgrade" ] ; then
+ CLASS='org.apache.hadoop.hbase.tool.PreUpgradeValidator'
+elif [ "$COMMAND" = "completebulkload" ] ; then
+ CLASS='org.apache.hadoop.hbase.tool.BulkLoadHFilesTool'
+elif [ "$COMMAND" = "hbtop" ] ; then
+ CLASS='org.apache.hadoop.hbase.hbtop.HBTop'
+ if [ -n "${shaded_jar}" ] ; then
+ for f in "${HBASE_HOME}"/lib/hbase-hbtop*.jar; do
+ if [ -f "${f}" ]; then
+ CLASSPATH="${CLASSPATH}:${f}"
+ break
+ fi
+ done
+ for f in "${HBASE_HOME}"/lib/commons-lang3*.jar; do
+ if [ -f "${f}" ]; then
+ CLASSPATH="${CLASSPATH}:${f}"
+ break
+ fi
+ done
+ fi
+
+ HBASE_OPTS="${HBASE_OPTS} -Dlog4j.configuration=file:${HBASE_HOME}/conf/log4j-hbtop.properties"
+else
+ CLASS=$COMMAND
+fi
+
+# Have JVM dump heap if we run out of memory. Files will be 'launch directory'
+# and are named like the following: java_pid21612.hprof. Apparently it doesn't
+# 'cost' to have this flag enabled. Its a 1.6 flag only. See:
+# http://blogs.sun.com/alanb/entry/outofmemoryerror_looks_a_bit_better
+HBASE_OPTS="$HBASE_OPTS -Dhbase.log.dir=$HBASE_LOG_DIR"
+HBASE_OPTS="$HBASE_OPTS -Dhbase.log.file=$HBASE_LOGFILE"
+HBASE_OPTS="$HBASE_OPTS -Dhbase.home.dir=$HBASE_HOME"
+HBASE_OPTS="$HBASE_OPTS -Dhbase.id.str=$HBASE_IDENT_STRING"
+HBASE_OPTS="$HBASE_OPTS -Dhbase.root.logger=${HBASE_ROOT_LOGGER:-INFO,console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+ HBASE_OPTS="$HBASE_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+ export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$JAVA_LIBRARY_PATH"
+fi
+
+# Enable security logging on the master and regionserver only
+if [ "$COMMAND" = "master" ] || [ "$COMMAND" = "regionserver" ]; then
+ HBASE_OPTS="$HBASE_OPTS -Dhbase.security.logger=${HBASE_SECURITY_LOGGER:-INFO,RFAS}"
+else
+ HBASE_OPTS="$HBASE_OPTS -Dhbase.security.logger=${HBASE_SECURITY_LOGGER:-INFO,NullAppender}"
+fi
+
+HEAP_SETTINGS="$JAVA_HEAP_MAX $JAVA_OFFHEAP_MAX"
+# by now if we're running a command it means we need logging
+for f in ${HBASE_HOME}/lib/client-facing-thirdparty/slf4j-log4j*.jar; do
+ if [ -f "${f}" ]; then
+ CLASSPATH="${CLASSPATH}:${f}"
+ break
+ fi
+done
+
+# Exec unless HBASE_NOEXEC is set.
+export CLASSPATH
+if [ "${DEBUG}" = "true" ]; then
+ echo "classpath=${CLASSPATH}" >&2
+ HBASE_OPTS="${HBASE_OPTS} -Xdiag"
+fi
+
+if [ "${HBASE_NOEXEC}" != "" ]; then
+ "$JAVA" -Dproc_$COMMAND -XX:OnOutOfMemoryError="kill -9 %p" $HEAP_SETTINGS $HBASE_OPTS $CLASS "$@"
+else
+ export JVM_PID="$$"
+ exec "$JAVA" -Dproc_$COMMAND -XX:OnOutOfMemoryError="kill -9 %p" $HEAP_SETTINGS $HBASE_OPTS $CLASS "$@"
+fi
diff --git a/MSH-PIC/hbase/bin/hbase-cleanup.sh b/MSH-PIC/hbase/bin/hbase-cleanup.sh
new file mode 100644
index 0000000..3a764df
--- /dev/null
+++ b/MSH-PIC/hbase/bin/hbase-cleanup.sh
@@ -0,0 +1,147 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# cleans hbase related data from zookeeper and hdfs if no hbase process is alive.
+#
+# Environment Variables
+#
+# HBASE_REGIONSERVERS File naming remote hosts.
+# Default is ${HADOOP_CONF_DIR}/regionservers
+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
+# HBASE_CONF_DIR Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
+# HBASE_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
+# HBASE_SLAVE_TIMEOUT Seconds to wait for timing out a remote command.
+# HBASE_SSH_OPTS Options passed to ssh when running remote commands.
+#
+
+usage="Usage: hbase-cleanup.sh (--cleanZk|--cleanHdfs|--cleanAll|--cleanAcls)"
+
+bin=`dirname "$0"`
+bin=`cd "$bin">/dev/null; pwd`
+
+# This will set HBASE_HOME, etc.
+. "$bin"/hbase-config.sh
+
+case $1 in
+ --cleanZk|--cleanHdfs|--cleanAll|--cleanAcls)
+ matches="yes" ;;
+ *) ;;
+esac
+if [ $# -ne 1 -o "$matches" = "" ]; then
+ echo $usage
+ exit 1;
+fi
+
+format_option=$1;
+
+zparent=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.parent`
+if [ "$zparent" == "null" ]; then zparent="/hbase"; fi
+
+hrootdir=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool hbase.rootdir`
+if [ "$hrootdir" == "null" ]; then hrootdir="file:///tmp/hbase-${USER}/hbase"; fi
+
+check_for_znodes() {
+ command=$1;
+ case $command in
+ regionservers)
+ zchild=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.rs`
+ if [ "$zchild" == "null" ]; then zchild="rs"; fi
+ ;;
+ backupmasters)
+ zchild=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.backup.masters`
+ if [ "$zchild" == "null" ]; then zchild="backup-masters"; fi
+ ;;
+ esac
+ znodes=`"$bin"/hbase zkcli ls $zparent/$zchild 2>&1 | tail -1 | sed "s/\[//" | sed "s/\]//"`
+ if [ "$znodes" != "" ]; then
+ echo -n "ZNode(s) [${znodes}] of $command are not expired. Exiting without cleaning hbase data."
+ echo #force a newline
+ exit 1;
+ else
+ echo -n "All ZNode(s) of $command are expired."
+ fi
+ echo #force a newline
+}
+
+execute_zk_command() {
+ command=$1;
+ "$bin"/hbase zkcli $command 2>&1
+}
+
+execute_hdfs_command() {
+ command=$1;
+ "$bin"/hbase org.apache.hadoop.fs.FsShell $command 2>&1
+}
+
+execute_clean_acls() {
+ command=$1;
+ "$bin"/hbase org.apache.hadoop.hbase.zookeeper.ZkAclReset $command 2>&1
+}
+
+clean_up() {
+ case $1 in
+ --cleanZk)
+ execute_zk_command "rmr ${zparent}";
+ ;;
+ --cleanHdfs)
+ execute_hdfs_command "-rm -R ${hrootdir}"
+ ;;
+ --cleanAll)
+ execute_zk_command "rmr ${zparent}";
+ execute_hdfs_command "-rm -R ${hrootdir}"
+ ;;
+ --cleanAcls)
+ execute_clean_acls;
+ ;;
+ *)
+ ;;
+ esac
+}
+
+check_znode_exists() {
+ command=$1
+ "$bin"/hbase zkcli stat $command 2>&1 | grep "Node does not exist\|Connection refused"
+}
+
+check_znode_exists $zparent
+if [ $? -ne 0 ]; then
+ # make sure the online region server(s) znode(s) have been deleted before continuing
+ check_for_znodes regionservers
+ # make sure the backup master(s) znode(s) has been deleted before continuing
+ check_for_znodes backupmasters
+ # make sure the master znode has been deleted before continuing
+ zmaster=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.master`
+ if [ "$zmaster" == "null" ]; then zmaster="master"; fi
+ zmaster=$zparent/$zmaster
+ check_znode_exists $zmaster
+ if [ $? -ne 0 ]; then
+ echo -n "Master ZNode is not expired. Exiting without cleaning hbase data."
+ echo #force a new line
+ exit 1
+ else
+ echo "Active Master ZNode also expired."
+ fi
+ echo #force a newline
+else
+ echo "HBase parent znode ${zparent} does not exist."
+fi
+
+# cleans zookeeper and/or hdfs data.
+clean_up $format_option
diff --git a/MSH-PIC/hbase/bin/hbase-common.sh b/MSH-PIC/hbase/bin/hbase-common.sh
new file mode 100644
index 0000000..0a474f7
--- /dev/null
+++ b/MSH-PIC/hbase/bin/hbase-common.sh
@@ -0,0 +1,41 @@
+##
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+#Shared function to wait for a process end. Take the pid and the command name as parameters
+waitForProcessEnd() {
+ pidKilled=$1
+ commandName=$2
+ processedAt=`date +%s`
+ while kill -0 $pidKilled > /dev/null 2>&1;
+ do
+ echo -n "."
+ sleep 1;
+ # if process persists more than $HBASE_STOP_TIMEOUT (default 1200 sec) no mercy
+ if [ $(( `date +%s` - $processedAt )) -gt ${HBASE_STOP_TIMEOUT:-1200} ]; then
+ break;
+ fi
+ done
+ # process still there : kill -9
+ if kill -0 $pidKilled > /dev/null 2>&1; then
+ echo -n force stopping $commandName with kill -9 $pidKilled
+ $JAVA_HOME/bin/jstack -l $pidKilled > "$logout" 2>&1
+ kill -9 $pidKilled > /dev/null 2>&1
+ fi
+ # Add a CR after we're done w/ dots.
+ echo
+}
diff --git a/MSH-PIC/hbase/bin/hbase-config.cmd b/MSH-PIC/hbase/bin/hbase-config.cmd
new file mode 100644
index 0000000..5c1f186
--- /dev/null
+++ b/MSH-PIC/hbase/bin/hbase-config.cmd
@@ -0,0 +1,78 @@
+@rem/*
+@rem * Licensed to the Apache Software Foundation (ASF) under one
+@rem * or more contributor license agreements. See the NOTICE file
+@rem * distributed with this work for additional information
+@rem * regarding copyright ownership. The ASF licenses this file
+@rem * to you under the Apache License, Version 2.0 (the
+@rem * "License"); you may not use this file except in compliance
+@rem * with the License. You may obtain a copy of the License at
+@rem *
+@rem * http://www.apache.org/licenses/LICENSE-2.0
+@rem *
+@rem * Unless required by applicable law or agreed to in writing, software
+@rem * distributed under the License is distributed on an "AS IS" BASIS,
+@rem * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem * See the License for the specific language governing permissions and
+@rem * limitations under the License.
+@rem */
+
+@rem included in all the hbase scripts with source command
+@rem should not be executable directly
+@rem also should not be passed any arguments, since we need original $*
+@rem Modelled after $HADOOP_HOME/bin/hadoop-env.sh.
+
+@rem Make sure java environment is set
+@rem
+
+if "%HBASE_BIN_PATH:~-1%" == "\" (
+ set HBASE_BIN_PATH=%HBASE_BIN_PATH:~0,-1%
+)
+
+if "%1" == "--config" (
+ set HBASE_CONF_DIR=%2
+ shift
+ shift
+)
+
+@rem the root of the hbase installation
+if not defined HBASE_HOME (
+ set HBASE_HOME=%HBASE_BIN_PATH%\..
+)
+
+@rem Allow alternate hbase conf dir location.
+if not defined HBASE_CONF_DIR (
+ set HBASE_CONF_DIR=%HBASE_HOME%\conf
+)
+
+@rem List of hbase regions servers.
+if not defined HBASE_REGIONSERVERS (
+ set HBASE_REGIONSERVERS=%HBASE_CONF_DIR%\regionservers
+)
+
+@rem List of hbase secondary masters.
+if not defined HBASE_BACKUP_MASTERS (
+ set HBASE_BACKUP_MASTERS=%HBASE_CONF_DIR%\backup-masters
+)
+
+@rem Source the hbase-env.sh. Will have JAVA_HOME defined.
+if exist "%HBASE_CONF_DIR%\hbase-env.cmd" (
+ call "%HBASE_CONF_DIR%\hbase-env.cmd"
+)
+
+if not defined JAVA_HOME (
+ echo Warning: JAVA_HOME environment variable is not set. Defaulting to c:\apps\java
+ set JAVA_HOME=c:\apps\java
+)
+
+if not exist "%JAVA_HOME%\bin\java.exe" (
+ echo Error: JAVA_HOME is incorrectly set or could not find java at the location %JAVA_HOME%\bin\
+ exit /B 2
+)
+
+set JAVA="%JAVA_HOME%\bin\java"
+
+for %%i in (%0) do (
+ if not defined HBASE_BIN_PATH (
+ set HBASE_BIN_PATH=%%~dpi
+ )
+) \ No newline at end of file
diff --git a/MSH-PIC/hbase/bin/hbase-config.sh b/MSH-PIC/hbase/bin/hbase-config.sh
new file mode 100644
index 0000000..1054751
--- /dev/null
+++ b/MSH-PIC/hbase/bin/hbase-config.sh
@@ -0,0 +1,170 @@
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# included in all the hbase scripts with source command
+# should not be executable directly
+# also should not be passed any arguments, since we need original $*
+# Modelled after $HADOOP_HOME/bin/hadoop-env.sh.
+
+# resolve links - "${BASH_SOURCE-$0}" may be a softlink
+
+this="${BASH_SOURCE-$0}"
+while [ -h "$this" ]; do
+ ls=`ls -ld "$this"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '.*/.*' > /dev/null; then
+ this="$link"
+ else
+ this=`dirname "$this"`/"$link"
+ fi
+done
+
+# convert relative path to absolute path
+bin=`dirname "$this"`
+script=`basename "$this"`
+bin=`cd "$bin">/dev/null; pwd`
+this="$bin/$script"
+
+# the root of the hbase installation
+if [ -z "$HBASE_HOME" ]; then
+ export HBASE_HOME=`dirname "$this"`/..
+fi
+
+#check to see if the conf dir or hbase home are given as an optional arguments
+while [ $# -gt 1 ]
+do
+ if [ "--config" = "$1" ]
+ then
+ shift
+ confdir=$1
+ shift
+ HBASE_CONF_DIR=$confdir
+ elif [ "--hosts" = "$1" ]
+ then
+ shift
+ hosts=$1
+ shift
+ HBASE_REGIONSERVERS=$hosts
+ elif [ "--auth-as-server" = "$1" ]
+ then
+ shift
+ # shellcheck disable=SC2034
+ AUTH_AS_SERVER="true"
+ elif [ "--autostart-window-size" = "$1" ]
+ then
+ shift
+ AUTOSTART_WINDOW_SIZE=$(( $1 + 0 ))
+ if [ $AUTOSTART_WINDOW_SIZE -lt 0 ]; then
+ echo "Invalid value for --autostart-window-size, should be a positive integer"
+ exit 1
+ fi
+ shift
+ elif [ "--autostart-window-retry-limit" = "$1" ]
+ then
+ shift
+ AUTOSTART_WINDOW_RETRY_LIMIT=$(( $1 + 0 ))
+ if [ $AUTOSTART_WINDOW_RETRY_LIMIT -lt 0 ]; then
+ echo "Invalid value for --autostart-window-retry-limit, should be a positive integer"
+ exit 1
+ fi
+ shift
+ elif [ "--internal-classpath" = "$1" ]
+ then
+ shift
+ # shellcheck disable=SC2034
+ INTERNAL_CLASSPATH="true"
+ elif [ "--debug" = "$1" ]
+ then
+ shift
+ # shellcheck disable=SC2034
+ DEBUG="true"
+ else
+ # Presume we are at end of options and break
+ break
+ fi
+done
+
+# Allow alternate hbase conf dir location.
+HBASE_CONF_DIR="${HBASE_CONF_DIR:-$HBASE_HOME/conf}"
+# List of hbase regions servers.
+HBASE_REGIONSERVERS="${HBASE_REGIONSERVERS:-$HBASE_CONF_DIR/regionservers}"
+# List of hbase secondary masters.
+HBASE_BACKUP_MASTERS="${HBASE_BACKUP_MASTERS:-$HBASE_CONF_DIR/backup-masters}"
+if [ -n "$HBASE_JMX_BASE" ] && [ -z "$HBASE_JMX_OPTS" ]; then
+ HBASE_JMX_OPTS="$HBASE_JMX_BASE"
+fi
+# Thrift JMX opts
+if [ -n "$HBASE_JMX_OPTS" ] && [ -z "$HBASE_THRIFT_JMX_OPTS" ]; then
+ HBASE_THRIFT_JMX_OPTS="$HBASE_JMX_OPTS -Dcom.sun.management.jmxremote.port=10103"
+fi
+# Thrift opts
+if [ -z "$HBASE_THRIFT_OPTS" ]; then
+ export HBASE_THRIFT_OPTS="$HBASE_THRIFT_JMX_OPTS"
+fi
+
+# REST JMX opts
+if [ -n "$HBASE_JMX_OPTS" ] && [ -z "$HBASE_REST_JMX_OPTS" ]; then
+ HBASE_REST_JMX_OPTS="$HBASE_JMX_OPTS -Dcom.sun.management.jmxremote.port=10105"
+fi
+# REST opts
+if [ -z "$HBASE_REST_OPTS" ]; then
+ export HBASE_REST_OPTS="$HBASE_REST_JMX_OPTS"
+fi
+
+# Source the hbase-env.sh. Will have JAVA_HOME defined.
+# HBASE-7817 - Source the hbase-env.sh only if it has not already been done. HBASE_ENV_INIT keeps track of it.
+if [ -z "$HBASE_ENV_INIT" ] && [ -f "${HBASE_CONF_DIR}/hbase-env.sh" ]; then
+ . "${HBASE_CONF_DIR}/hbase-env.sh"
+ export HBASE_ENV_INIT="true"
+fi
+
+# Verify if hbase has the mlock agent
+if [ "$HBASE_REGIONSERVER_MLOCK" = "true" ]; then
+ MLOCK_AGENT="$HBASE_HOME/lib/native/libmlockall_agent.so"
+ if [ ! -f "$MLOCK_AGENT" ]; then
+ cat 1>&2 <<EOF
+Unable to find mlockall_agent, hbase must be compiled with -Pnative
+EOF
+ exit 1
+ fi
+ if [ -z "$HBASE_REGIONSERVER_UID" ] || [ "$HBASE_REGIONSERVER_UID" == "$USER" ]; then
+ HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -agentpath:$MLOCK_AGENT"
+ else
+ HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -agentpath:$MLOCK_AGENT=user=$HBASE_REGIONSERVER_UID"
+ fi
+fi
+
+# Newer versions of glibc use an arena memory allocator that causes virtual
+# memory usage to explode. Tune the variable down to prevent vmem explosion.
+export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
+
+# Now having JAVA_HOME defined is required
+if [ -z "$JAVA_HOME" ]; then
+ cat 1>&2 <<EOF
++======================================================================+
+| Error: JAVA_HOME is not set |
++----------------------------------------------------------------------+
+| Please download the latest Sun JDK from the Sun Java web site |
+| > http://www.oracle.com/technetwork/java/javase/downloads |
+| |
+| HBase requires Java 1.8 or later. |
++======================================================================+
+EOF
+ exit 1
+fi
diff --git a/MSH-PIC/hbase/bin/hbase-daemon.sh b/MSH-PIC/hbase/bin/hbase-daemon.sh
new file mode 100644
index 0000000..0e55665
--- /dev/null
+++ b/MSH-PIC/hbase/bin/hbase-daemon.sh
@@ -0,0 +1,371 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# Runs a Hadoop hbase command as a daemon.
+#
+# Environment Variables
+#
+# HBASE_CONF_DIR Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
+# HBASE_LOG_DIR Where log files are stored. PWD by default.
+# HBASE_PID_DIR The pid files are stored. /tmp by default.
+# HBASE_IDENT_STRING A string representing this instance of hadoop. $USER by default
+# HBASE_NICENESS The scheduling priority for daemons. Defaults to 0.
+# HBASE_STOP_TIMEOUT Time, in seconds, after which we kill -9 the server if it has not stopped.
+# Default 1200 seconds.
+#
+# Modelled after $HADOOP_HOME/bin/hadoop-daemon.sh
+
+usage="Usage: hbase-daemon.sh [--config <conf-dir>]\
+ [--autostart-window-size <window size in hours>]\
+ [--autostart-window-retry-limit <retry count limit for autostart>]\
+ (start|stop|restart|autostart|autorestart|foreground_start) <hbase-command> \
+ <args...>"
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+# default autostart args value indicating infinite window size and no retry limit
+AUTOSTART_WINDOW_SIZE=0
+AUTOSTART_WINDOW_RETRY_LIMIT=0
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin">/dev/null; pwd`
+
+. "$bin"/hbase-config.sh
+. "$bin"/hbase-common.sh
+
+# get arguments
+startStop=$1
+shift
+
+command=$1
+shift
+
+hbase_rotate_log ()
+{
+ log=$1;
+ num=5;
+ if [ -n "$2" ]; then
+ num=$2
+ fi
+ if [ -f "$log" ]; then # rotate logs
+ while [ $num -gt 1 ]; do
+ prev=`expr $num - 1`
+ [ -f "$log.$prev" ] && mv -f "$log.$prev" "$log.$num"
+ num=$prev
+ done
+ mv -f "$log" "$log.$num";
+ fi
+}
+
+cleanAfterRun() {
+ if [ -f ${HBASE_PID} ]; then
+ # If the process is still running time to tear it down.
+ kill -9 `cat ${HBASE_PID}` > /dev/null 2>&1
+ rm -f ${HBASE_PID} > /dev/null 2>&1
+ fi
+
+ if [ -f ${HBASE_ZNODE_FILE} ]; then
+ if [ "$command" = "master" ]; then
+ HBASE_OPTS="$HBASE_OPTS $HBASE_MASTER_OPTS" $bin/hbase master clear > /dev/null 2>&1
+ else
+ #call ZK to delete the node
+ ZNODE=`cat ${HBASE_ZNODE_FILE}`
+ HBASE_OPTS="$HBASE_OPTS $HBASE_REGIONSERVER_OPTS" $bin/hbase zkcli delete ${ZNODE} > /dev/null 2>&1
+ fi
+ rm ${HBASE_ZNODE_FILE}
+ fi
+}
+
+check_before_start(){
+ #ckeck if the process is not running
+ mkdir -p "$HBASE_PID_DIR"
+ if [ -f $HBASE_PID ]; then
+ if kill -0 `cat $HBASE_PID` > /dev/null 2>&1; then
+ echo $command running as process `cat $HBASE_PID`. Stop it first.
+ exit 1
+ fi
+ fi
+}
+
+wait_until_done ()
+{
+ p=$1
+ cnt=${HBASE_SLAVE_TIMEOUT:-300}
+ origcnt=$cnt
+ while kill -0 $p > /dev/null 2>&1; do
+ if [ $cnt -gt 1 ]; then
+ cnt=`expr $cnt - 1`
+ sleep 1
+ else
+ echo "Process did not complete after $origcnt seconds, killing."
+ kill -9 $p
+ exit 1
+ fi
+ done
+ return 0
+}
+
+# get log directory
+if [ "$HBASE_LOG_DIR" = "" ]; then
+ export HBASE_LOG_DIR="$HBASE_HOME/logs"
+fi
+mkdir -p "$HBASE_LOG_DIR"
+
+if [ "$HBASE_PID_DIR" = "" ]; then
+ HBASE_PID_DIR=/tmp
+fi
+
+if [ "$HBASE_IDENT_STRING" = "" ]; then
+ export HBASE_IDENT_STRING="$USER"
+fi
+
+# Some variables
+# Work out java location so can print version into log.
+if [ "$JAVA_HOME" != "" ]; then
+ #echo "run java in $JAVA_HOME"
+ JAVA_HOME=$JAVA_HOME
+fi
+if [ "$JAVA_HOME" = "" ]; then
+ echo "Error: JAVA_HOME is not set."
+ exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+export HBASE_LOG_PREFIX=hbase-$HBASE_IDENT_STRING-$command-$HOSTNAME
+export HBASE_LOGFILE=$HBASE_LOG_PREFIX.log
+
+if [ -z "${HBASE_ROOT_LOGGER}" ]; then
+#export HBASE_ROOT_LOGGER=${HBASE_ROOT_LOGGER:-"INFO,RFA"}
+export HBASE_ROOT_LOGGER=${HBASE_ROOT_LOGGER:-"ERROR,RFA"}
+fi
+
+if [ -z "${HBASE_SECURITY_LOGGER}" ]; then
+#export HBASE_SECURITY_LOGGER=${HBASE_SECURITY_LOGGER:-"INFO,RFAS"}
+export HBASE_SECURITY_LOGGER=${HBASE_SECURITY_LOGGER:-"ERROR,RFAS"}
+fi
+
+HBASE_LOGOUT=${HBASE_LOGOUT:-"$HBASE_LOG_DIR/$HBASE_LOG_PREFIX.out"}
+HBASE_LOGGC=${HBASE_LOGGC:-"$HBASE_LOG_DIR/$HBASE_LOG_PREFIX.gc"}
+HBASE_LOGLOG=${HBASE_LOGLOG:-"${HBASE_LOG_DIR}/${HBASE_LOGFILE}"}
+HBASE_PID=$HBASE_PID_DIR/hbase-$HBASE_IDENT_STRING-$command.pid
+export HBASE_ZNODE_FILE=$HBASE_PID_DIR/hbase-$HBASE_IDENT_STRING-$command.znode
+export HBASE_AUTOSTART_FILE=$HBASE_PID_DIR/hbase-$HBASE_IDENT_STRING-$command.autostart
+
+if [ -n "$SERVER_GC_OPTS" ]; then
+ export SERVER_GC_OPTS=${SERVER_GC_OPTS/"-Xloggc:<FILE-PATH>"/"-Xloggc:${HBASE_LOGGC}"}
+fi
+if [ -n "$CLIENT_GC_OPTS" ]; then
+ export CLIENT_GC_OPTS=${CLIENT_GC_OPTS/"-Xloggc:<FILE-PATH>"/"-Xloggc:${HBASE_LOGGC}"}
+fi
+
+# Set default scheduling priority
+if [ "$HBASE_NICENESS" = "" ]; then
+ export HBASE_NICENESS=0
+fi
+
+thiscmd="$bin/$(basename ${BASH_SOURCE-$0})"
+args=$@
+
+case $startStop in
+
+(start)
+ check_before_start
+ hbase_rotate_log $HBASE_LOGOUT
+ hbase_rotate_log $HBASE_LOGGC
+ echo running $command, logging to $HBASE_LOGOUT
+ $thiscmd --config "${HBASE_CONF_DIR}" \
+ foreground_start $command $args < /dev/null > ${HBASE_LOGOUT} 2>&1 &
+ disown -h -r
+ sleep 1; head "${HBASE_LOGOUT}"
+ ;;
+
+(autostart)
+ check_before_start
+ hbase_rotate_log $HBASE_LOGOUT
+ hbase_rotate_log $HBASE_LOGGC
+ echo running $command, logging to $HBASE_LOGOUT
+ nohup $thiscmd --config "${HBASE_CONF_DIR}" --autostart-window-size ${AUTOSTART_WINDOW_SIZE} --autostart-window-retry-limit ${AUTOSTART_WINDOW_RETRY_LIMIT} \
+ internal_autostart $command $args < /dev/null > ${HBASE_LOGOUT} 2>&1 &
+ ;;
+
+(autorestart)
+ echo running $command, logging to $HBASE_LOGOUT
+ # stop the command
+ $thiscmd --config "${HBASE_CONF_DIR}" stop $command $args &
+ wait_until_done $!
+ # wait a user-specified sleep period
+ sp=${HBASE_RESTART_SLEEP:-3}
+ if [ $sp -gt 0 ]; then
+ sleep $sp
+ fi
+
+ check_before_start
+ hbase_rotate_log $HBASE_LOGOUT
+ nohup $thiscmd --config "${HBASE_CONF_DIR}" --autostart-window-size ${AUTOSTART_WINDOW_SIZE} --autostart-window-retry-limit ${AUTOSTART_WINDOW_RETRY_LIMIT} \
+ internal_autostart $command $args < /dev/null > ${HBASE_LOGOUT} 2>&1 &
+ ;;
+
+(foreground_start)
+ trap cleanAfterRun SIGHUP SIGINT SIGTERM EXIT
+ if [ "$HBASE_NO_REDIRECT_LOG" != "" ]; then
+ # NO REDIRECT
+ echo "`date` Starting $command on `hostname`"
+ echo "`ulimit -a`"
+ # in case the parent shell gets the kill make sure to trap signals.
+ # Only one will get called. Either the trap or the flow will go through.
+ nice -n $HBASE_NICENESS "$HBASE_HOME"/bin/hbase \
+ --config "${HBASE_CONF_DIR}" \
+ $command "$@" start &
+ else
+ echo "`date` Starting $command on `hostname`" >> ${HBASE_LOGLOG}
+ echo "`ulimit -a`" >> "$HBASE_LOGLOG" 2>&1
+ # in case the parent shell gets the kill make sure to trap signals.
+ # Only one will get called. Either the trap or the flow will go through.
+ nice -n $HBASE_NICENESS "$HBASE_HOME"/bin/hbase \
+ --config "${HBASE_CONF_DIR}" \
+ $command "$@" start >> ${HBASE_LOGOUT} 2>&1 &
+ fi
+ # Add to the command log file vital stats on our environment.
+ hbase_pid=$!
+ echo $hbase_pid > ${HBASE_PID}
+ wait $hbase_pid
+ ;;
+
+(internal_autostart)
+ ONE_HOUR_IN_SECS=3600
+ autostartWindowStartDate=`date +%s`
+ autostartCount=0
+ touch "$HBASE_AUTOSTART_FILE"
+
+ # keep starting the command until asked to stop. Reloop on software crash
+ while true
+ do
+ hbase_rotate_log $HBASE_LOGGC
+ if [ -f $HBASE_PID ] && kill -0 "$(cat "$HBASE_PID")" > /dev/null 2>&1 ; then
+ wait "$(cat "$HBASE_PID")"
+ else
+ #if the file does not exist it means that it was not stopped properly by the stop command
+ if [ ! -f "$HBASE_AUTOSTART_FILE" ]; then
+ echo "`date` HBase might be stopped removing the autostart file. Exiting Autostart process" >> ${HBASE_LOGOUT}
+ exit 1
+ fi
+
+ echo "`date` Autostarting hbase $command service. Attempt no: $(( $autostartCount + 1))" >> ${HBASE_LOGLOG}
+ touch "$HBASE_AUTOSTART_FILE"
+ $thiscmd --config "${HBASE_CONF_DIR}" foreground_start $command $args
+ autostartCount=$(( $autostartCount + 1 ))
+
+ # HBASE-6504 - only take the first line of the output in case verbose gc is on
+ distMode=`$bin/hbase --config "$HBASE_CONF_DIR" org.apache.hadoop.hbase.util.HBaseConfTool hbase.cluster.distributed | head -n 1`
+
+ if [ "$distMode" != 'false' ]; then
+ #if the cluster is being stopped then do not restart it again.
+ zparent=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.parent`
+ if [ "$zparent" == "null" ]; then zparent="/hbase"; fi
+ zkrunning=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.state`
+ if [ "$zkrunning" == "null" ]; then zkrunning="running"; fi
+ zkFullRunning=$zparent/$zkrunning
+ $bin/hbase zkcli stat $zkFullRunning 2>&1 | grep "Node does not exist" 1>/dev/null 2>&1
+
+ #grep returns 0 if it found something, 1 otherwise
+ if [ $? -eq 0 ]; then
+ echo "`date` hbase znode does not exist. Exiting Autostart process" >> ${HBASE_LOGOUT}
+ rm -f "$HBASE_AUTOSTART_FILE"
+ exit 1
+ fi
+
+ #If ZooKeeper cannot be found, then do not restart
+ $bin/hbase zkcli stat $zkFullRunning 2>&1 | grep Exception | grep ConnectionLoss 1>/dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ echo "`date` zookeeper not found. Exiting Autostart process" >> ${HBASE_LOGOUT}
+ rm -f "$HBASE_AUTOSTART_FILE"
+ exit 1
+ fi
+ fi
+ fi
+
+ curDate=`date +%s`
+ autostartWindowReset=false
+
+ # reset the auto start window size if it exceeds
+ if [ $AUTOSTART_WINDOW_SIZE -gt 0 ] && [ $(( $curDate - $autostartWindowStartDate )) -gt $(( $AUTOSTART_WINDOW_SIZE * $ONE_HOUR_IN_SECS )) ]; then
+ echo "Resetting Autorestart window size: $autostartWindowStartDate" >> ${HBASE_LOGOUT}
+ autostartWindowStartDate=$curDate
+ autostartWindowReset=true
+ autostartCount=0
+ fi
+
+ # kill autostart if the retry limit is exceeded within the given window size (window size other then 0)
+ if ! $autostartWindowReset && [ $AUTOSTART_WINDOW_RETRY_LIMIT -gt 0 ] && [ $autostartCount -gt $AUTOSTART_WINDOW_RETRY_LIMIT ]; then
+ echo "`date` Autostart window retry limit: $AUTOSTART_WINDOW_RETRY_LIMIT exceeded for given window size: $AUTOSTART_WINDOW_SIZE hours.. Exiting..." >> ${HBASE_LOGLOG}
+ rm -f "$HBASE_AUTOSTART_FILE"
+ exit 1
+ fi
+
+ # wait for shutdown hook to complete
+ sleep 20
+ done
+ ;;
+
+(stop)
+ echo running $command, logging to $HBASE_LOGOUT
+ rm -f "$HBASE_AUTOSTART_FILE"
+ if [ -f $HBASE_PID ]; then
+ pidToKill=`cat $HBASE_PID`
+ # kill -0 == see if the PID exists
+ if kill -0 $pidToKill > /dev/null 2>&1; then
+ echo -n stopping $command
+ echo "`date` Terminating $command" >> $HBASE_LOGLOG
+ kill $pidToKill > /dev/null 2>&1
+ waitForProcessEnd $pidToKill $command
+ else
+ retval=$?
+ echo no $command to stop because kill -0 of pid $pidToKill failed with status $retval
+ fi
+ else
+ echo no $command to stop because no pid file $HBASE_PID
+ fi
+ rm -f $HBASE_PID
+ ;;
+
+(restart)
+ echo running $command, logging to $HBASE_LOGOUT
+ # stop the command
+ $thiscmd --config "${HBASE_CONF_DIR}" stop $command $args &
+ wait_until_done $!
+ # wait a user-specified sleep period
+ sp=${HBASE_RESTART_SLEEP:-3}
+ if [ $sp -gt 0 ]; then
+ sleep $sp
+ fi
+ # start the command
+ $thiscmd --config "${HBASE_CONF_DIR}" start $command $args &
+ wait_until_done $!
+ ;;
+
+(*)
+ echo $usage
+ exit 1
+ ;;
+esac
diff --git a/MSH-PIC/hbase/bin/hbase-daemons.sh b/MSH-PIC/hbase/bin/hbase-daemons.sh
new file mode 100644
index 0000000..b1785f6
--- /dev/null
+++ b/MSH-PIC/hbase/bin/hbase-daemons.sh
@@ -0,0 +1,62 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# Run a hbase command on all slave hosts.
+# Modelled after $HADOOP_HOME/bin/hadoop-daemons.sh
+
+usage="Usage: hbase-daemons.sh [--config <hbase-confdir>] [--autostart-window-size <window size in hours>]\
+ [--autostart-window-retry-limit <retry count limit for autostart>] \
+ [--hosts regionserversfile] [autostart|autorestart|restart|start|stop] command args..."
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin">/dev/null; pwd`
+
+# default autostart args value indicating infinite window size and no retry limit
+AUTOSTART_WINDOW_SIZE=0
+AUTOSTART_WINDOW_RETRY_LIMIT=0
+
+. $bin/hbase-config.sh
+
+if [[ "$1" = "autostart" || "$1" = "autorestart" ]]
+then
+ autostart_args="--autostart-window-size ${AUTOSTART_WINDOW_SIZE} --autostart-window-retry-limit ${AUTOSTART_WINDOW_RETRY_LIMIT}"
+fi
+
+remote_cmd="$bin/hbase-daemon.sh --config ${HBASE_CONF_DIR} ${autostart_args} $@"
+args="--hosts ${HBASE_REGIONSERVERS} --config ${HBASE_CONF_DIR} $remote_cmd"
+
+command=$2
+case $command in
+ (zookeeper)
+ exec "$bin/zookeepers.sh" $args
+ ;;
+ (master-backup)
+ exec "$bin/master-backup.sh" $args
+ ;;
+ (*)
+ exec "$bin/regionservers.sh" $args
+ ;;
+esac
diff --git a/MSH-PIC/hbase/bin/hbase-jruby b/MSH-PIC/hbase/bin/hbase-jruby
new file mode 100644
index 0000000..37bce46
--- /dev/null
+++ b/MSH-PIC/hbase/bin/hbase-jruby
@@ -0,0 +1,22 @@
+#!/bin/bash
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+
+`dirname $0`/hbase org.jruby.Main $*
+
diff --git a/MSH-PIC/hbase/bin/hbase.cmd b/MSH-PIC/hbase/bin/hbase.cmd
new file mode 100644
index 0000000..fbeb1f8
--- /dev/null
+++ b/MSH-PIC/hbase/bin/hbase.cmd
@@ -0,0 +1,469 @@
+@echo off
+@rem/*
+@rem * Licensed to the Apache Software Foundation (ASF) under one
+@rem * or more contributor license agreements. See the NOTICE file
+@rem * distributed with this work for additional information
+@rem * regarding copyright ownership. The ASF licenses this file
+@rem * to you under the Apache License, Version 2.0 (the
+@rem * "License"); you may not use this file except in compliance
+@rem * with the License. You may obtain a copy of the License at
+@rem *
+@rem * http://www.apache.org/licenses/LICENSE-2.0
+@rem *
+@rem * Unless required by applicable law or agreed to in writing, software
+@rem * distributed under the License is distributed on an "AS IS" BASIS,
+@rem * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem * See the License for the specific language governing permissions and
+@rem * limitations under the License.
+@rem */
+@rem
+@rem The hbase command script. Based on the hadoop command script putting
+@rem in hbase classes, libs and configurations ahead of hadoop's.
+@rem
+@rem TODO: Narrow the amount of duplicated code.
+@rem
+@rem Environment Variables:
+@rem
+@rem JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+@rem
+@rem HBASE_CLASSPATH Extra Java CLASSPATH entries.
+@rem
+@rem HBASE_HEAPSIZE The maximum amount of heap to use.
+@rem Default is unset and uses the JVMs default setting
+@rem (usually 1/4th of the available memory).
+@rem
+@rem HBASE_OPTS Extra Java runtime options.
+@rem
+@rem HBASE_CONF_DIR Alternate conf dir. Default is ${HBASE_HOME}/conf.
+@rem
+@rem HBASE_ROOT_LOGGER The root appender. Default is INFO,console
+@rem
+@rem JRUBY_HOME JRuby path: $JRUBY_HOME\lib\jruby.jar should exist.
+@rem Defaults to the jar packaged with HBase.
+@rem
+@rem JRUBY_OPTS Extra options (eg '--1.9') passed to hbase.
+@rem Empty by default.
+@rem HBASE_SHELL_OPTS Extra options passed to the hbase shell.
+@rem Empty by default.
+
+
+setlocal enabledelayedexpansion
+
+for %%i in (%0) do (
+ if not defined HBASE_BIN_PATH (
+ set HBASE_BIN_PATH=%%~dpi
+ )
+)
+
+if "%HBASE_BIN_PATH:~-1%" == "\" (
+ set HBASE_BIN_PATH=%HBASE_BIN_PATH:~0,-1%
+)
+
+rem This will set HBASE_HOME, etc.
+set hbase-config-script=%HBASE_BIN_PATH%\hbase-config.cmd
+call "%hbase-config-script%" %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+rem Detect if we are in hbase sources dir
+set in_dev_env=false
+
+if exist "%HBASE_HOME%\target" set in_dev_env=true
+
+rem --service is an internal option. used by MSI setup to install HBase as a windows service
+if "%1" == "--service" (
+ set service_entry=true
+ shift
+)
+
+set hbase-command=%1
+shift
+
+@rem if no args specified, show usage
+if "%hbase-command%"=="" (
+ goto :print_usage
+ endlocal
+ goto :eof
+)
+
+set JAVA_HEAP_MAX=
+set JAVA_OFFHEAP_MAX=
+
+rem check envvars which might override default args
+if defined HBASE_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%HBASE_HEAPSIZE%m
+)
+
+if defined HBASE_OFFHEAPSIZE (
+ set JAVA_OFFHEAP_MAX=-XX:MaxDirectMemory=%HBASE_OFFHEAPSIZE%m
+)
+
+set CLASSPATH=%HBASE_CONF_DIR%;%JAVA_HOME%\lib\tools.jar
+
+rem Add maven target directory
+set cached_classpath_filename=%HBASE_HOME%\hbase-build-configuration\target\cached_classpath.txt
+if "%in_dev_env%"=="true" (
+
+ rem adding maven main classes to classpath
+ for /f %%i in ('dir /b "%HBASE_HOME%\hbase-*"') do (
+ if exist %%i\target\classes set CLASSPATH=!CLASSPATH!;%%i\target\classes
+ )
+
+ rem adding maven test classes to classpath
+ rem For developers, add hbase classes to CLASSPATH
+ for /f %%i in ('dir /b "%HBASE_HOME%\hbase-*"') do (
+ if exist %%i\target\test-classes set CLASSPATH=!CLASSPATH!;%%i\target\test-classes
+ )
+
+ if not exist "%cached_classpath_filename%" (
+ echo "As this is a development environment, we need %cached_classpath_filename% to be generated from maven (command: mvn install -DskipTests)"
+ goto :eof
+ )
+
+ for /f "delims=" %%i in ('type "%cached_classpath_filename%"') do set CLASSPATH=%CLASSPATH%;%%i
+)
+
+@rem For releases add hbase webapps to CLASSPATH
+@rem Webapps must come first else it messes up Jetty
+if exist "%HBASE_HOME%\hbase-webapps" (
+ set CLASSPATH=%CLASSPATH%;%HBASE_HOME%
+)
+
+if exist "%HBASE_HOME%\target\hbase-webapps" (
+ set CLASSPATH=%CLASSPATH%;%HBASE_HOME%\target
+)
+
+for /F %%f in ('dir /b "%HBASE_HOME%\hbase*.jar" 2^>nul') do (
+ if not "%%f:~-11"=="sources.jar" (
+ set CLASSPATH=!CLASSPATH!;%HBASE_HOME%\%%f
+ )
+)
+
+@rem Add libs to CLASSPATH
+if exist "%HBASE_HOME%\lib" (
+ set CLASSPATH=!CLASSPATH!;%HBASE_HOME%\lib\*
+)
+
+@rem Add user-specified CLASSPATH last
+if defined HBASE_CLASSPATH (
+ set CLASSPATH=%CLASSPATH%;%HBASE_CLASSPATH%
+)
+
+@rem Default log directory and file
+if not defined HBASE_LOG_DIR (
+ set HBASE_LOG_DIR=%HBASE_HOME%\logs
+)
+
+if not defined HBASE_LOGFILE (
+ set HBASE_LOGFILE=hbase.log
+)
+
+set JAVA_PLATFORM=
+
+rem If avail, add Hadoop to the CLASSPATH and to the JAVA_LIBRARY_PATH
+set PATH=%PATH%;"%HADOOP_HOME%\bin"
+set HADOOP_IN_PATH=hadoop.cmd
+
+if exist "%HADOOP_HOME%\bin\%HADOOP_IN_PATH%" (
+ set hadoopCpCommand=call %HADOOP_IN_PATH% classpath 2^>nul
+ for /f "eol= delims=" %%i in ('!hadoopCpCommand!') do set CLASSPATH_FROM_HADOOP=%%i
+ if defined CLASSPATH_FROM_HADOOP (
+ set CLASSPATH=%CLASSPATH%;!CLASSPATH_FROM_HADOOP!
+ )
+ set HADOOP_CLASSPATH=%CLASSPATH%
+
+ set hadoopJLPCommand=call %HADOOP_IN_PATH% org.apache.hadoop.hbase.util.GetJavaProperty java.library.path 2^>nul
+ for /f "eol= delims=" %%i in ('!hadoopJLPCommand!') do set HADOOP_JAVA_LIBRARY_PATH=%%i
+ if not defined JAVA_LIBRARY_PATH (
+ set JAVA_LIBRARY_PATH=!HADOOP_JAVA_LIBRARY_PATH!
+ ) else (
+ set JAVA_LIBRARY_PATH=%JAVA_LIBRARY_PATH%;!HADOOP_JAVA_LIBRARY_PATH!
+ )
+)
+
+if exist "%HBASE_HOME%\build\native" (
+ set platformCommand=call %JAVA% -classpath "%CLASSPATH%" org.apache.hadoop.util.PlatformName
+ for /f %%i in ('!platformCommand!') do set JAVA_PLATFORM=%%i
+ set _PATH_TO_APPEND=%HBASE_HOME%\build\native\!JAVA_PLATFORM!;%HBASE_HOME%\build\native\!JAVA_PLATFORM!\lib
+ if not defined JAVA_LIBRARY_PATH (
+ set JAVA_LIBRARY_PATH=!_PATH_TO_APPEND!
+ ) else (
+ set JAVA_LIBRARY_PATH=%JAVA_LIBRARY_PATH%;!_PATH_TO_APPEND!
+ )
+)
+
+rem This loop would set %hbase-command-arguments%
+set _hbasearguments=
+:MakeCmdArgsLoop
+ if [%1]==[] goto :EndLoop
+
+ if not defined _hbasearguments (
+ set _hbasearguments=%1
+ ) else (
+ set _hbasearguments=!_hbasearguments! %1
+ )
+ shift
+goto :MakeCmdArgsLoop
+:EndLoop
+
+set hbase-command-arguments=%_hbasearguments%
+
+@rem figure out which class to run
+set corecommands=shell master regionserver thrift thrift2 rest avro hlog wal hbck hfile zookeeper zkcli mapredcp
+for %%i in ( %corecommands% ) do (
+ if "%hbase-command%"=="%%i" set corecommand=true
+)
+
+if defined corecommand (
+ call :%hbase-command% %hbase-command-arguments%
+) else (
+ if "%hbase-command%" == "classpath" (
+ echo %CLASSPATH%
+ goto :eof
+ )
+ if "%hbase-command%" == "version" (
+ set CLASS=org.apache.hadoop.hbase.util.VersionInfo
+ ) else (
+ set CLASS=%hbase-command%
+ )
+)
+
+if not defined HBASE_IDENT_STRING (
+ set HBASE_IDENT_STRING=%USERNAME%
+)
+
+@rem Set the right GC options based on the what we are running
+set servercommands=master regionserver thrift thrift2 rest avro zookeeper
+for %%i in ( %servercommands% ) do (
+ if "%hbase-command%"=="%%i" set servercommand=true
+)
+
+if "%servercommand%" == "true" (
+ set HBASE_OPTS=%HBASE_OPTS% %SERVER_GC_OPTS%
+) else (
+ set HBASE_OPTS=%HBASE_OPTS% %CLIENT_GC_OPTS%
+)
+
+@rem If HBase is run as a windows service, configure logging
+if defined service_entry (
+ set HBASE_LOG_PREFIX=hbase-%hbase-command%-%COMPUTERNAME%
+ set HBASE_LOGFILE=!HBASE_LOG_PREFIX!.log
+ if not defined HBASE_ROOT_LOGGER (
+ set HBASE_ROOT_LOGGER=INFO,DRFA
+ )
+ set HBASE_SECURITY_LOGGER=INFO,DRFAS
+ set loggc=!HBASE_LOG_DIR!\!HBASE_LOG_PREFIX!.gc
+ set loglog=!HBASE_LOG_DIR!\!HBASE_LOGFILE!
+
+ if "%HBASE_USE_GC_LOGFILE%" == "true" (
+ set HBASE_OPTS=%HBASE_OPTS% -Xloggc:"!loggc!"
+ )
+)
+
+@rem for jruby
+@rem (1) for the commands which need jruby (see jruby-commands defined below)
+@rem A. when JRUBY_HOME is defined
+@rem CLASSPATH and HBASE_OPTS are updated according to JRUBY_HOME defined
+@rem B. when JRUBY_HOME is not defined
+@rem add jruby packaged with HBase to CLASSPATH
+@rem (2) for other commands, do nothing
+
+@rem check if the commmand needs jruby
+set jruby-commands=shell org.jruby.Main
+for %%i in ( %jruby-commands% ) do (
+ if "%hbase-command%"=="%%i" set jruby-needed=true
+)
+
+@rem the command needs jruby
+if defined jruby-needed (
+ @rem JRUBY_HOME is defined
+ if defined JRUBY_HOME (
+ set CLASSPATH=%JRUBY_HOME%\lib\jruby.jar;%CLASSPATH%
+ set HBASE_OPTS=%HBASE_OPTS% -Djruby.home="%JRUBY_HOME%" -Djruby.lib="%JRUBY_HOME%\lib"
+ )
+
+ @rem JRUBY_HOME is not defined
+ if not defined JRUBY_HOME (
+ @rem in dev environment
+ if "%in_dev_env%"=="true" (
+ set cached_classpath_jruby_filename=%HBASE_HOME%\hbase-build-configuration\target\cached_classpath_jruby.txt
+ if not exist "!cached_classpath_jruby_filename!" (
+ echo "As this is a development environment, we need !cached_classpath_jruby_filename! to be generated from maven (command: mvn install -DskipTests)"
+ goto :eof
+ )
+ for /f "delims=" %%i in ('type "!cached_classpath_jruby_filename!"') do set CLASSPATH=%%i;%CLASSPATH%
+ )
+
+ @rem not in dev environment
+ if "%in_dev_env%"=="false" (
+ @rem add jruby packaged with HBase to CLASSPATH
+ set JRUBY_PACKAGED_WITH_HBASE=%HBASE_HOME%\lib\ruby\*
+ if defined jruby-needed (
+ set CLASSPATH=!JRUBY_PACKAGED_WITH_HBASE!;!CLASSPATH!
+ )
+ )
+ )
+)
+
+@rem Have JVM dump heap if we run out of memory. Files will be 'launch directory'
+@rem and are named like the following: java_pid21612.hprof. Apparently it does not
+@rem 'cost' to have this flag enabled. Its a 1.6 flag only. See:
+@rem http://blogs.sun.com/alanb/entry/outofmemoryerror_looks_a_bit_better
+set HBASE_OPTS=%HBASE_OPTS% -Dhbase.log.dir="%HBASE_LOG_DIR%"
+set HBASE_OPTS=%HBASE_OPTS% -Dhbase.log.file="%HBASE_LOGFILE%"
+set HBASE_OPTS=%HBASE_OPTS% -Dhbase.home.dir="%HBASE_HOME%"
+set HBASE_OPTS=%HBASE_OPTS% -Dhbase.id.str="%HBASE_IDENT_STRING%"
+set HBASE_OPTS=%HBASE_OPTS% -XX:OnOutOfMemoryError="taskkill /F /PID %p"
+
+if not defined HBASE_ROOT_LOGGER (
+ set HBASE_ROOT_LOGGER=INFO,console
+)
+set HBASE_OPTS=%HBASE_OPTS% -Dhbase.root.logger="%HBASE_ROOT_LOGGER%"
+
+if defined JAVA_LIBRARY_PATH (
+ set HBASE_OPTS=%HBASE_OPTS% -Djava.library.path="%JAVA_LIBRARY_PATH%"
+)
+
+rem Enable security logging on the master and regionserver only
+if not defined HBASE_SECURITY_LOGGER (
+ set HBASE_SECURITY_LOGGER=INFO,NullAppender
+ if "%hbase-command%"=="master" (
+ set HBASE_SECURITY_LOGGER=INFO,DRFAS
+ )
+ if "%hbase-command%"=="regionserver" (
+ set HBASE_SECURITY_LOGGER=INFO,DRFAS
+ )
+)
+set HBASE_OPTS=%HBASE_OPTS% -Dhbase.security.logger="%HBASE_SECURITY_LOGGER%"
+
+set HEAP_SETTINGS=%JAVA_HEAP_MAX% %JAVA_OFFHEAP_MAX%
+set java_arguments=%HEAP_SETTINGS% %HBASE_OPTS% -classpath "%CLASSPATH%" %CLASS% %hbase-command-arguments%
+
+if defined service_entry (
+ call :makeServiceXml %java_arguments%
+) else (
+ call %JAVA% %java_arguments%
+)
+
+endlocal
+goto :eof
+
+:shell
+ rem find the hbase ruby sources
+ if exist "%HBASE_HOME%\lib\ruby" (
+ set HBASE_OPTS=%HBASE_OPTS% -Dhbase.ruby.sources="%HBASE_HOME%\lib\ruby"
+ ) else (
+ set HBASE_OPTS=%HBASE_OPTS% -Dhbase.ruby.sources="%HBASE_HOME%\hbase-shell\src\main\ruby"
+ )
+ set HBASE_OPTS=%HBASE_OPTS% %HBASE_SHELL_OPTS%
+
+ set CLASS=org.jruby.Main -X+O %JRUBY_OPTS% "%HBASE_HOME%\bin\hirb.rb"
+ goto :eof
+
+:master
+ set CLASS=org.apache.hadoop.hbase.master.HMaster
+ if NOT "%1"=="stop" (
+ set HBASE_OPTS=%HBASE_OPTS% %HBASE_MASTER_OPTS%
+ )
+ goto :eof
+
+:regionserver
+ set CLASS=org.apache.hadoop.hbase.regionserver.HRegionServer
+ if NOT "%1"=="stop" (
+ set HBASE_OPTS=%HBASE_OPTS% %HBASE_REGIONSERVER_OPTS%
+ )
+ goto :eof
+
+:thrift
+ set CLASS=org.apache.hadoop.hbase.thrift.ThriftServer
+ if NOT "%1" == "stop" (
+ set HBASE_OPTS=%HBASE_OPTS% %HBASE_THRIFT_OPTS%
+ )
+ goto :eof
+
+:thrift2
+ set CLASS=org.apache.hadoop.hbase.thrift2.ThriftServer
+ if NOT "%1" == "stop" (
+ set HBASE_OPTS=%HBASE_OPTS% %HBASE_THRIFT_OPTS%
+ )
+ goto :eof
+
+:rest
+ set CLASS=org.apache.hadoop.hbase.rest.RESTServer
+ if NOT "%1"=="stop" (
+ set HBASE_OPTS=%HBASE_OPTS% %HBASE_REST_OPTS%
+ )
+ goto :eof
+
+:avro
+ set CLASS=org.apache.hadoop.hbase.avro.AvroServer
+ if NOT "%1"== "stop" (
+ set HBASE_OPTS=%HBASE_OPTS% %HBASE_AVRO_OPTS%
+ )
+ goto :eof
+
+:zookeeper
+ set CLASS=org.apache.hadoop.hbase.zookeeper.HQuorumPeer
+ if NOT "%1"=="stop" (
+ set HBASE_OPTS=%HBASE_OPTS% %HBASE_ZOOKEEPER_OPTS%
+ )
+ goto :eof
+
+:hbck
+ set CLASS=org.apache.hadoop.hbase.util.HBaseFsck
+ goto :eof
+
+:wal
+ set CLASS=org.apache.hadoop.hbase.wal.WALPrettyPrinter
+ goto :eof
+
+:hfile
+ set CLASS=org.apache.hadoop.hbase.io.hfile.HFile
+ goto :eof
+
+:zkcli
+ set CLASS=org.apache.hadoop.hbase.zookeeper.ZKMainServer
+ set CLASSPATH=!CLASSPATH!;%HBASE_HOME%\lib\zkcli\*
+ goto :eof
+
+:mapredcp
+ set CLASS=org.apache.hadoop.hbase.util.MapreduceDependencyClasspathTool
+ goto :eof
+
+:makeServiceXml
+ set arguments=%*
+ @echo ^<service^>
+ @echo ^<id^>%hbase-command%^</id^>
+ @echo ^<name^>%hbase-command%^</name^>
+ @echo ^<description^>This service runs Isotope %hbase-command%^</description^>
+ @echo ^<executable^>%JAVA%^</executable^>
+ @echo ^<arguments^>%arguments%^</arguments^>
+ @echo ^</service^>
+ goto :eof
+
+:print_usage
+ echo Usage: hbase [^<options^>] ^<command^> [^<args^>]
+ echo where ^<command^> an option from one of these categories::
+ echo Options:
+ echo --config DIR Configuration direction to use. Default: ./conf
+ echo.
+ echo Commands:
+ echo Some commands take arguments. Pass no args or -h for usage."
+ echo shell Run the HBase shell
+ echo hbck Run the hbase 'fsck' tool
+ echo wal Write-ahead-log analyzer
+ echo hfile Store file analyzer
+ echo zkcli Run the ZooKeeper shell
+ echo master Run an HBase HMaster node
+ echo regionserver Run an HBase HRegionServer node
+ echo zookeeper Run a ZooKeeper server
+ echo rest Run an HBase REST server
+ echo thrift Run the HBase Thrift server
+ echo thrift2 Run the HBase Thrift2 server
+ echo classpath Dump hbase CLASSPATH
+ echo mapredcp Dump CLASSPATH entries required by mapreduce
+ echo version Print the version
+ echo CLASSNAME Run the class named CLASSNAME
+ goto :eof
diff --git a/MSH-PIC/hbase/bin/hirb.rb b/MSH-PIC/hbase/bin/hirb.rb
new file mode 100644
index 0000000..e857db7
--- /dev/null
+++ b/MSH-PIC/hbase/bin/hirb.rb
@@ -0,0 +1,264 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# File passed to org.jruby.Main by bin/hbase. Pollutes jirb with hbase imports
+# and hbase commands and then loads jirb. Outputs a banner that tells user
+# where to find help, shell version, and loads up a custom hirb.
+#
+# In noninteractive mode, runs commands from stdin until completion or an error.
+# On success will exit with status 0, on any problem will exit non-zero. Callers
+# should only rely on "not equal to 0", because the current error exit code of 1
+# will likely be updated to diffentiate e.g. invalid commands, incorrect args,
+# permissions, etc.
+
+# TODO: Interrupt a table creation or a connection to a bad master. Currently
+# has to time out. Below we've set down the retries for rpc and hbase but
+# still can be annoying (And there seem to be times when we'll retry for
+# ever regardless)
+# TODO: Add support for listing and manipulating catalog tables, etc.
+# TODO: Encoding; need to know how to go from ruby String to UTF-8 bytes
+
+# Run the java magic include and import basic HBase types that will help ease
+# hbase hacking.
+include Java
+
+# Some goodies for hirb. Should these be left up to the user's discretion?
+require 'irb/completion'
+require 'pathname'
+
+# Add the directory names in hbase.jruby.sources commandline option
+# to the ruby load path so I can load up my HBase ruby modules
+sources = java.lang.System.getProperty('hbase.ruby.sources')
+$LOAD_PATH.unshift Pathname.new(sources)
+
+#
+# FIXME: Switch args processing to getopt
+#
+# See if there are args for this shell. If any, read and then strip from ARGV
+# so they don't go through to irb. Output shell 'usage' if user types '--help'
+cmdline_help = <<HERE # HERE document output as shell usage
+Usage: shell [OPTIONS] [SCRIPTFILE [ARGUMENTS]]
+
+ -d | --debug Set DEBUG log levels.
+ -h | --help This help.
+ -n | --noninteractive Do not run within an IRB session and exit with non-zero
+ status on first error.
+ -Dkey=value Pass hbase-*.xml Configuration overrides. For example, to
+ use an alternate zookeeper ensemble, pass:
+ -Dhbase.zookeeper.quorum=zookeeper.example.org
+ For faster fail, pass the below and vary the values:
+ -Dhbase.client.retries.number=7
+ -Dhbase.ipc.client.connect.max.retries=3
+HERE
+
+# Takes configuration and an arg that is expected to be key=value format.
+# If c is empty, creates one and returns it
+def add_to_configuration(c, arg)
+ kv = arg.split('=')
+ kv.length == 2 || (raise "Expected parameter #{kv} in key=value format")
+ c = org.apache.hadoop.hbase.HBaseConfiguration.create if c.nil?
+ c.set(kv[0], kv[1])
+ c
+end
+
+found = []
+script2run = nil
+log_level = org.apache.log4j.Level::ERROR
+@shell_debug = false
+interactive = true
+_configuration = nil
+D_ARG = '-D'
+while (arg = ARGV.shift)
+ if arg == '-h' || arg == '--help'
+ puts cmdline_help
+ exit
+ elsif arg == D_ARG
+ argValue = ARGV.shift || (raise "#{D_ARG} takes a 'key=value' parameter")
+ _configuration = add_to_configuration(_configuration, argValue)
+ found.push(arg)
+ found.push(argValue)
+ elsif arg.start_with? D_ARG
+ _configuration = add_to_configuration(_configuration, arg[2..-1])
+ found.push(arg)
+ elsif arg == '-d' || arg == '--debug'
+ log_level = org.apache.log4j.Level::DEBUG
+ $fullBackTrace = true
+ @shell_debug = true
+ found.push(arg)
+ puts 'Setting DEBUG log level...'
+ elsif arg == '-n' || arg == '--noninteractive'
+ interactive = false
+ found.push(arg)
+ elsif arg == '-r' || arg == '--return-values'
+ warn '[INFO] the -r | --return-values option is ignored. we always behave '\
+ 'as though it was given.'
+ found.push(arg)
+ else
+ # Presume it a script. Save it off for running later below
+ # after we've set up some environment.
+ script2run = arg
+ found.push(arg)
+ # Presume that any other args are meant for the script.
+ break
+ end
+end
+
+# Delete all processed args
+found.each { |arg| ARGV.delete(arg) }
+# Make sure debug flag gets back to IRB
+ARGV.unshift('-d') if @shell_debug
+
+# Set logging level to avoid verboseness
+org.apache.log4j.Logger.getLogger('org.apache.zookeeper').setLevel(log_level)
+org.apache.log4j.Logger.getLogger('org.apache.hadoop.hbase').setLevel(log_level)
+
+# Require HBase now after setting log levels
+require 'hbase_constants'
+
+# Load hbase shell
+require 'shell'
+
+# Require formatter
+require 'shell/formatter'
+
+# Setup the HBase module. Create a configuration.
+@hbase = _configuration.nil? ? Hbase::Hbase.new : Hbase::Hbase.new(_configuration)
+
+# Setup console
+@shell = Shell::Shell.new(@hbase, interactive)
[email protected] = @shell_debug
+
+# Add commands to this namespace
+# TODO avoid polluting main namespace by using a binding
[email protected]_commands(self)
+
+# Add help command
+def help(command = nil)
+ @shell.help(command)
+end
+
+# Backwards compatibility method
+def tools
+ @shell.help_group('tools')
+end
+
+# Debugging method
+def debug
+ if @shell_debug
+ @shell_debug = false
+ conf.back_trace_limit = 0
+ log_level = org.apache.log4j.Level::ERROR
+ else
+ @shell_debug = true
+ conf.back_trace_limit = 100
+ log_level = org.apache.log4j.Level::DEBUG
+ end
+ org.apache.log4j.Logger.getLogger('org.apache.zookeeper').setLevel(log_level)
+ org.apache.log4j.Logger.getLogger('org.apache.hadoop.hbase').setLevel(log_level)
+ debug?
+end
+
+def debug?
+ puts "Debug mode is #{@shell_debug ? 'ON' : 'OFF'}\n\n"
+ nil
+end
+
+# Include hbase constants
+include HBaseConstants
+
+# If script2run, try running it. If we're in interactive mode, will go on to run the shell unless
+# script calls 'exit' or 'exit 0' or 'exit errcode'.
+load(script2run) if script2run
+
+if interactive
+ # Output a banner message that tells users where to go for help
+ @shell.print_banner
+
+ require 'irb'
+ require 'irb/hirb'
+
+ module IRB
+ def self.start(ap_path = nil)
+ $0 = File.basename(ap_path, '.rb') if ap_path
+
+ IRB.setup(ap_path)
+ @CONF[:IRB_NAME] = 'hbase'
+ @CONF[:AP_NAME] = 'hbase'
+ @CONF[:BACK_TRACE_LIMIT] = 0 unless $fullBackTrace
+
+ hirb = if @CONF[:SCRIPT]
+ HIRB.new(nil, @CONF[:SCRIPT])
+ else
+ HIRB.new
+ end
+
+ @CONF[:IRB_RC].call(hirb.context) if @CONF[:IRB_RC]
+ @CONF[:MAIN_CONTEXT] = hirb.context
+
+ catch(:IRB_EXIT) do
+ hirb.eval_input
+ end
+ end
+ end
+
+ IRB.start
+else
+ begin
+ # Noninteractive mode: if there is input on stdin, do a simple REPL.
+ # XXX Note that this purposefully uses STDIN and not Kernel.gets
+ # in order to maintain compatibility with previous behavior where
+ # a user could pass in script2run and then still pipe commands on
+ # stdin.
+ require 'irb/ruby-lex'
+ require 'irb/workspace'
+ workspace = IRB::WorkSpace.new(binding)
+ scanner = RubyLex.new
+
+ # RubyLex claims to take an IO but really wants an InputMethod
+ module IOExtensions
+ def encoding
+ external_encoding
+ end
+ end
+ IO.include IOExtensions
+
+ scanner.set_input(STDIN)
+ scanner.each_top_level_statement do |statement, linenum|
+ puts(workspace.evaluate(nil, statement, 'stdin', linenum))
+ end
+ # XXX We're catching Exception on purpose, because we want to include
+ # unwrapped java exceptions, syntax errors, eval failures, etc.
+ rescue Exception => exception
+ message = exception.to_s
+ # exception unwrapping in shell means we'll have to handle Java exceptions
+ # as a special case in order to format them properly.
+ if exception.is_a? java.lang.Exception
+ $stderr.puts 'java exception'
+ message = exception.get_message
+ end
+ # Include the 'ERROR' string to try to make transition easier for scripts that
+ # may have already been relying on grepping output.
+ puts "ERROR #{exception.class}: #{message}"
+ if $fullBacktrace
+ # re-raising the will include a backtrace and exit.
+ raise exception
+ else
+ exit 1
+ end
+ end
+end
diff --git a/MSH-PIC/hbase/bin/local-master-backup.sh b/MSH-PIC/hbase/bin/local-master-backup.sh
new file mode 100644
index 0000000..b0aa2f7
--- /dev/null
+++ b/MSH-PIC/hbase/bin/local-master-backup.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+# This is used for starting multiple masters on the same machine.
+# run it from hbase-dir/ just like 'bin/hbase'
+# Supports up to 10 masters (limitation = overlapping ports)
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin" >/dev/null && pwd`
+
+if [ $# -lt 2 ]; then
+ S=`basename "${BASH_SOURCE-$0}"`
+ echo "Usage: $S [--config <conf-dir>] [--autostart-window-size <window size in hours>]"
+ echo " [--autostart-window-retry-limit <retry count limit for autostart>] [autostart|start|stop] offset(s)"
+ echo " e.g. $S start 1"
+ exit
+fi
+
+# default autostart args value indicating infinite window size and no retry limit
+AUTOSTART_WINDOW_SIZE=0
+AUTOSTART_WINDOW_RETRY_LIMIT=0
+
+. "$bin"/hbase-config.sh
+
+# sanity check: make sure your master opts don't use ports [i.e. JMX/DBG]
+export HBASE_MASTER_OPTS=" "
+
+run_master () {
+ DN=$2
+ export HBASE_IDENT_STRING="$USER-$DN"
+ HBASE_MASTER_ARGS="\
+ -D hbase.master.port=`expr 16000 + $DN` \
+ -D hbase.master.info.port=`expr 16010 + $DN` \
+ -D hbase.regionserver.port=`expr 16020 + $DN` \
+ -D hbase.regionserver.info.port=`expr 16030 + $DN` \
+ --backup"
+ "$bin"/hbase-daemon.sh --config "${HBASE_CONF_DIR}" --autostart-window-size "${AUTOSTART_WINDOW_SIZE}" --autostart-window-retry-limit "${AUTOSTART_WINDOW_RETRY_LIMIT}" $1 master $HBASE_MASTER_ARGS
+}
+
+cmd=$1
+shift;
+
+for i in $*
+do
+ if [[ "$i" =~ ^[0-9]+$ ]]; then
+ run_master $cmd $i
+ else
+ echo "Invalid argument"
+ fi
+done
diff --git a/MSH-PIC/hbase/bin/local-regionservers.sh b/MSH-PIC/hbase/bin/local-regionservers.sh
new file mode 100644
index 0000000..97e5eed
--- /dev/null
+++ b/MSH-PIC/hbase/bin/local-regionservers.sh
@@ -0,0 +1,74 @@
+#!/usr/bin/env bash
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+# This is used for starting multiple regionservers on the same machine.
+# run it from hbase-dir/ just like 'bin/hbase'
+# Supports up to 10 regionservers (limitation = overlapping ports)
+# For supporting more instances select different values (e.g. 16200, 16300)
+# for HBASE_RS_BASE_PORT and HBASE_RS_INFO_BASE_PORT below
+if [ -z "$HBASE_RS_BASE_PORT" ]; then
+ HBASE_RS_BASE_PORT=16020
+fi
+if [ -z "$HBASE_RS_INFO_BASE_PORT" ]; then
+ HBASE_RS_INFO_BASE_PORT=16030
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin" >/dev/null && pwd`
+
+if [ $# -lt 2 ]; then
+ S=`basename "${BASH_SOURCE-$0}"`
+ echo "Usage: $S [--config <conf-dir>] [--autostart-window-size <window size in hours>]"
+ echo " [--autostart-window-retry-limit <retry count limit for autostart>] [autostart|start|stop] offset(s)"
+ echo " e.g. $S start 1 2"
+ exit
+fi
+
+# default autostart args value indicating infinite window size and no retry limit
+AUTOSTART_WINDOW_SIZE=0
+AUTOSTART_WINDOW_RETRY_LIMIT=0
+
+. "$bin"/hbase-config.sh
+
+# sanity check: make sure your regionserver opts don't use ports [i.e. JMX/DBG]
+export HBASE_REGIONSERVER_OPTS=" "
+
+run_regionserver () {
+ DN=$2
+ export HBASE_IDENT_STRING="$USER-$DN"
+ HBASE_REGIONSERVER_ARGS="\
+ -Dhbase.regionserver.port=`expr "$HBASE_RS_BASE_PORT" + "$DN"` \
+ -Dhbase.regionserver.info.port=`expr "$HBASE_RS_INFO_BASE_PORT" + "$DN"`"
+
+ "$bin"/hbase-daemon.sh --config "${HBASE_CONF_DIR}" \
+ --autostart-window-size "${AUTOSTART_WINDOW_SIZE}" \
+ --autostart-window-retry-limit "${AUTOSTART_WINDOW_RETRY_LIMIT}" \
+ "$1" regionserver "$HBASE_REGIONSERVER_ARGS"
+}
+
+cmd=$1
+shift;
+
+for i in "$@"
+do
+ if [[ "$i" =~ ^[0-9]+$ ]]; then
+ run_regionserver "$cmd" "$i"
+ else
+ echo "Invalid argument"
+ fi
+done
diff --git a/MSH-PIC/hbase/bin/master-backup.sh b/MSH-PIC/hbase/bin/master-backup.sh
new file mode 100644
index 0000000..feca4ab
--- /dev/null
+++ b/MSH-PIC/hbase/bin/master-backup.sh
@@ -0,0 +1,74 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# Run a shell command on all backup master hosts.
+#
+# Environment Variables
+#
+# HBASE_BACKUP_MASTERS File naming remote hosts.
+# Default is ${HBASE_CONF_DIR}/backup-masters
+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
+# HBASE_CONF_DIR Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
+# HBASE_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
+# HBASE_SSH_OPTS Options passed to ssh when running remote commands.
+#
+# Modelled after $HADOOP_HOME/bin/slaves.sh.
+
+usage="Usage: $0 [--config <hbase-confdir>] command..."
+
+# if no args specified, show usage
+if [ $# -le 0 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin">/dev/null; pwd`
+
+. "$bin"/hbase-config.sh
+
+# If the master backup file is specified in the command line,
+# then it takes precedence over the definition in
+# hbase-env.sh. Save it here.
+HOSTLIST=$HBASE_BACKUP_MASTERS
+
+if [ "$HOSTLIST" = "" ]; then
+ if [ "$HBASE_BACKUP_MASTERS" = "" ]; then
+ export HOSTLIST="${HBASE_CONF_DIR}/backup-masters"
+ else
+ export HOSTLIST="${HBASE_BACKUP_MASTERS}"
+ fi
+fi
+
+
+args=${@// /\\ }
+args=${args/master-backup/master}
+
+if [ -f $HOSTLIST ]; then
+ for hmaster in `cat "$HOSTLIST"`; do
+ ssh $HBASE_SSH_OPTS $hmaster $"$args --backup" \
+ 2>&1 | sed "s/^/$hmaster: /" &
+ if [ "$HBASE_SLAVE_SLEEP" != "" ]; then
+ sleep $HBASE_SLAVE_SLEEP
+ fi
+ done
+fi
+
+wait
diff --git a/MSH-PIC/hbase/bin/region_mover.rb b/MSH-PIC/hbase/bin/region_mover.rb
new file mode 100644
index 0000000..6756145
--- /dev/null
+++ b/MSH-PIC/hbase/bin/region_mover.rb
@@ -0,0 +1,24 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Moves regions. Will confirm region access in current location and will
+# not move a new region until successful confirm of region loading in new
+# location. Presumes balancer is disabled when we run (not harmful if its
+# on but this script and balancer will end up fighting each other).
+$BIN = File.dirname(__FILE__)
+exec "#{$BIN}/hbase org.apache.hadoop.hbase.util.RegionMover #{ARGV.join(' ')}"
diff --git a/MSH-PIC/hbase/bin/region_status.rb b/MSH-PIC/hbase/bin/region_status.rb
new file mode 100644
index 0000000..abd19dd
--- /dev/null
+++ b/MSH-PIC/hbase/bin/region_status.rb
@@ -0,0 +1,150 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# View the current status of all regions on an HBase cluster. This is
+# predominantly used to determined if all the regions in META have been
+# onlined yet on startup.
+#
+# To use this script, run:
+#
+# ${HBASE_HOME}/bin/hbase org.jruby.Main region_status.rb [wait] [--table <table_name>]
+
+require 'optparse'
+
+usage = 'Usage : ./hbase org.jruby.Main region_status.rb [wait]' \
+ '[--table <table_name>]\n'
+OptionParser.new do |o|
+ o.banner = usage
+ o.on('-t', '--table TABLENAME', 'Only process TABLENAME') do |tablename|
+ $tablename = tablename
+ end
+ o.on('-h', '--help', 'Display help message') { puts o; exit }
+ o.parse!
+end
+
+SHOULD_WAIT = ARGV[0] == 'wait'
+if ARGV[0] && !SHOULD_WAIT
+ print usage
+ exit 1
+end
+
+require 'java'
+
+java_import org.apache.hadoop.hbase.HBaseConfiguration
+java_import org.apache.hadoop.hbase.TableName
+java_import org.apache.hadoop.hbase.HConstants
+java_import org.apache.hadoop.hbase.MasterNotRunningException
+java_import org.apache.hadoop.hbase.client.HBaseAdmin
+java_import org.apache.hadoop.hbase.client.Table
+java_import org.apache.hadoop.hbase.client.Scan
+java_import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter
+java_import org.apache.hadoop.hbase.util.Bytes
+java_import org.apache.hadoop.hbase.HRegionInfo
+java_import org.apache.hadoop.hbase.MetaTableAccessor
+java_import org.apache.hadoop.hbase.HTableDescriptor
+java_import org.apache.hadoop.hbase.client.ConnectionFactory
+
+# disable debug logging on this script for clarity
+log_level = org.apache.log4j.Level::ERROR
+org.apache.log4j.Logger.getLogger('org.apache.zookeeper').setLevel(log_level)
+org.apache.log4j.Logger.getLogger('org.apache.hadoop.hbase').setLevel(log_level)
+
+config = HBaseConfiguration.create
+config.set 'fs.defaultFS', config.get(HConstants::HBASE_DIR)
+connection = ConnectionFactory.createConnection(config)
+# wait until the master is running
+admin = nil
+loop do
+ begin
+ admin = connection.getAdmin
+ break
+ rescue MasterNotRunningException => e
+ print 'Waiting for master to start...\n'
+ sleep 1
+ end
+end
+
+meta_count = 0
+server_count = 0
+
+# scan META to see how many regions we should have
+if $tablename.nil?
+ scan = Scan.new
+else
+ tableNameMetaPrefix = $tablename + HConstants::META_ROW_DELIMITER.chr
+ scan = Scan.new(
+ (tableNameMetaPrefix + HConstants::META_ROW_DELIMITER.chr).to_java_bytes
+ )
+end
+scan.setCacheBlocks(false)
+scan.setCaching(10)
+scan.setFilter(FirstKeyOnlyFilter.new)
+INFO = 'info'.to_java_bytes
+REGION_INFO = 'regioninfo'.to_java_bytes
+scan.addColumn INFO, REGION_INFO
+table = nil
+iter = nil
+loop do
+ begin
+ table = connection.getTable(TableName.valueOf('hbase:meta'))
+ scanner = table.getScanner(scan)
+ iter = scanner.iterator
+ break
+ rescue IOException => ioe
+ print "Exception trying to scan META: #{ioe}"
+ sleep 1
+ end
+end
+while iter.hasNext
+ result = iter.next
+ rowid = Bytes.toString(result.getRow)
+ rowidStr = java.lang.String.new(rowid)
+ if !$tablename.nil? && !rowidStr.startsWith(tableNameMetaPrefix)
+ # Gone too far, break
+ break
+ end
+ region = MetaTableAccessor.getHRegionInfo(result)
+ unless region.isOffline
+ # only include regions that should be online
+ meta_count += 1
+ end
+end
+scanner.close
+# If we're trying to see the status of all HBase tables, we need to include the
+# hbase:meta table, that is not included in our scan
+meta_count += 1 if $tablename.nil?
+
+# query the master to see how many regions are on region servers
+$TableName = TableName.valueOf($tablename.to_java_bytes) unless $tablename.nil?
+loop do
+ if $tablename.nil?
+ server_count = admin.getClusterStatus.getRegionsCount
+ else
+ connection = ConnectionFactory.createConnection(config)
+ server_count = MetaTableAccessor.allTableRegions(connection, $TableName).size
+ end
+ print "Region Status: #{server_count} / #{meta_count}\n"
+ if SHOULD_WAIT && server_count < meta_count
+ # continue this loop until server & meta count match
+ sleep 10
+ else
+ break
+ end
+end
+admin.close
+connection.close
+
+exit server_count == meta_count ? 0 : 1
diff --git a/MSH-PIC/hbase/bin/regionservers.sh b/MSH-PIC/hbase/bin/regionservers.sh
new file mode 100644
index 0000000..b83c1f3
--- /dev/null
+++ b/MSH-PIC/hbase/bin/regionservers.sh
@@ -0,0 +1,83 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# Run a shell command on all regionserver hosts.
+#
+# Environment Variables
+#
+# HBASE_REGIONSERVERS File naming remote hosts.
+# Default is ${HADOOP_CONF_DIR}/regionservers
+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
+# HBASE_CONF_DIR Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
+# HBASE_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
+# HBASE_SSH_OPTS Options passed to ssh when running remote commands.
+#
+# Modelled after $HADOOP_HOME/bin/slaves.sh.
+
+usage="Usage: regionservers [--config <hbase-confdir>] command..."
+
+# if no args specified, show usage
+if [ $# -le 0 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin">/dev/null; pwd`
+
+. "$bin"/hbase-config.sh
+
+# If the regionservers file is specified in the command line,
+# then it takes precedence over the definition in
+# hbase-env.sh. Save it here.
+HOSTLIST=$HBASE_REGIONSERVERS
+
+if [ "$HOSTLIST" = "" ]; then
+ if [ "$HBASE_REGIONSERVERS" = "" ]; then
+ export HOSTLIST="${HBASE_CONF_DIR}/regionservers"
+ else
+ export HOSTLIST="${HBASE_REGIONSERVERS}"
+ fi
+fi
+
+regionservers=`cat "$HOSTLIST"`
+if [ "$regionservers" = "localhost" ]; then
+ HBASE_REGIONSERVER_ARGS="\
+ -Dhbase.regionserver.port=16020 \
+ -Dhbase.regionserver.info.port=16030"
+
+ $"${@// /\\ }" ${HBASE_REGIONSERVER_ARGS} \
+ 2>&1 | sed "s/^/$regionserver: /" &
+else
+ for regionserver in `cat "$HOSTLIST"`; do
+ if ${HBASE_SLAVE_PARALLEL:-true}; then
+ ssh $HBASE_SSH_OPTS $regionserver $"${@// /\\ }" \
+ 2>&1 | sed "s/^/$regionserver: /" &
+ else # run each command serially
+ ssh $HBASE_SSH_OPTS $regionserver $"${@// /\\ }" \
+ 2>&1 | sed "s/^/$regionserver: /"
+ fi
+ if [ "$HBASE_SLAVE_SLEEP" != "" ]; then
+ sleep $HBASE_SLAVE_SLEEP
+ fi
+ done
+fi
+
+wait
diff --git a/MSH-PIC/hbase/bin/replication/copy_tables_desc.rb b/MSH-PIC/hbase/bin/replication/copy_tables_desc.rb
new file mode 100644
index 0000000..44a24f9
--- /dev/null
+++ b/MSH-PIC/hbase/bin/replication/copy_tables_desc.rb
@@ -0,0 +1,104 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Script to recreate all tables from one cluster to another
+# To see usage for this script, run:
+#
+# ${HBASE_HOME}/bin/hbase org.jruby.Main copy_tables_desc.rb
+#
+
+include Java
+java_import org.apache.hadoop.conf.Configuration
+java_import org.apache.hadoop.hbase.HBaseConfiguration
+java_import org.apache.hadoop.hbase.HConstants
+java_import org.apache.hadoop.hbase.HTableDescriptor
+java_import org.apache.hadoop.hbase.TableName
+java_import org.apache.hadoop.hbase.client.ConnectionFactory
+java_import org.apache.hadoop.hbase.client.HBaseAdmin
+java_import org.slf4j.LoggerFactory
+
+# Name of this script
+NAME = 'copy_tables_desc'.freeze
+
+# Print usage for this script
+def usage
+ puts format('Usage: %s.rb master_zookeeper.quorum.peers:clientport:znode_parent slave_zookeeper.quorum.peers:clientport:znode_parent [table1,table2,table3,...]', NAME)
+ exit!
+end
+
+def copy(src, dst, table)
+ # verify if table exists in source cluster
+ begin
+ t = src.getTableDescriptor(TableName.valueOf(table))
+ rescue org.apache.hadoop.hbase.TableNotFoundException
+ puts format("Source table \"%s\" doesn't exist, skipping.", table)
+ return
+ end
+
+ # verify if table *doesn't* exists in the target cluster
+ begin
+ dst.createTable(t)
+ rescue org.apache.hadoop.hbase.TableExistsException
+ puts format('Destination table "%s" exists in remote cluster, skipping.', table)
+ return
+ end
+
+ puts format('Schema for table "%s" was succesfully copied to remote cluster.', table)
+end
+
+usage if ARGV.size < 2 || ARGV.size > 3
+
+LOG = LoggerFactory.getLogger(NAME)
+
+parts1 = ARGV[0].split(':')
+
+parts2 = ARGV[1].split(':')
+
+parts3 = ARGV[2].split(',') unless ARGV[2].nil?
+
+c1 = HBaseConfiguration.create
+c1.set(HConstants::ZOOKEEPER_QUORUM, parts1[0])
+c1.set('hbase.zookeeper.property.clientPort', parts1[1])
+c1.set(HConstants::ZOOKEEPER_ZNODE_PARENT, parts1[2])
+
+connection1 = ConnectionFactory.createConnection(c1)
+admin1 = connection1.getAdmin
+
+c2 = HBaseConfiguration.create
+c2.set(HConstants::ZOOKEEPER_QUORUM, parts2[0])
+c2.set('hbase.zookeeper.property.clientPort', parts2[1])
+c2.set(HConstants::ZOOKEEPER_ZNODE_PARENT, parts2[2])
+
+connection2 = ConnectionFactory.createConnection(c2)
+admin2 = connection2.getAdmin
+
+if parts3.nil?
+ admin1.listTableNames.each do |t|
+ copy(admin1, admin2, t.nameAsString)
+ end
+else
+ parts3.each do |t|
+ copy(admin1, admin2, t)
+ end
+end
+
+admin1.close
+admin2.close
+connection1.close
+connection2.close
diff --git a/MSH-PIC/hbase/bin/rolling-restart.sh b/MSH-PIC/hbase/bin/rolling-restart.sh
new file mode 100644
index 0000000..11c091d
--- /dev/null
+++ b/MSH-PIC/hbase/bin/rolling-restart.sh
@@ -0,0 +1,227 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# Run a shell command on all regionserver hosts.
+#
+# Environment Variables
+#
+# HBASE_REGIONSERVERS File naming remote hosts.
+# Default is ${HADOOP_CONF_DIR}/regionservers
+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
+# HBASE_CONF_DIR Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
+# HBASE_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
+# HBASE_SLAVE_TIMEOUT Seconds to wait for timing out a remote command.
+# HBASE_SSH_OPTS Options passed to ssh when running remote commands.
+#
+# Modelled after $HADOOP_HOME/bin/slaves.sh.
+
+usage_str="Usage: `basename $0` [--config <hbase-confdir>] [--autostart-window-size <window size in hours>]\
+ [--autostart-window-retry-limit <retry count limit for autostart>] [--autostart] [--rs-only] [--master-only] \
+ [--graceful] [--maxthreads xx] [--noack] [--movetimeout]]"
+
+function usage() {
+ echo "${usage_str}"
+}
+
+bin=`dirname "$0"`
+bin=`cd "$bin">/dev/null; pwd`
+
+# default autostart args value indicating infinite window size and no retry limit
+AUTOSTART_WINDOW_SIZE=0
+AUTOSTART_WINDOW_RETRY_LIMIT=0
+
+. "$bin"/hbase-config.sh
+
+# start hbase daemons
+errCode=$?
+if [ $errCode -ne 0 ]
+then
+ exit $errCode
+fi
+
+RR_RS=1
+RR_MASTER=1
+RR_GRACEFUL=0
+RR_MAXTHREADS=1
+START_CMD_NON_DIST_MODE=restart
+START_CMD_DIST_MODE=start
+RESTART_CMD_REGIONSERVER=restart
+
+while [ $# -gt 0 ]; do
+ case "$1" in
+ --rs-only|-r)
+ RR_RS=1
+ RR_MASTER=0
+ RR_GRACEFUL=0
+ shift
+ ;;
+ --autostart)
+ START_CMD_NON_DIST_MODE="--autostart-window-size ${AUTOSTART_WINDOW_SIZE} --autostart-window-retry-limit ${AUTOSTART_WINDOW_RETRY_LIMIT} autorestart"
+ START_CMD_DIST_MODE="--autostart-window-size ${AUTOSTART_WINDOW_SIZE} --autostart-window-retry-limit ${AUTOSTART_WINDOW_RETRY_LIMIT} autostart"
+ RESTART_CMD_REGIONSERVER="--autostart-window-size ${AUTOSTART_WINDOW_SIZE} --autostart-window-retry-limit ${AUTOSTART_WINDOW_RETRY_LIMIT} autorestart"
+ shift
+ ;;
+ --master-only)
+ RR_RS=0
+ RR_MASTER=1
+ RR_GRACEFUL=0
+ shift
+ ;;
+ --graceful)
+ RR_RS=0
+ RR_MASTER=0
+ RR_GRACEFUL=1
+ shift
+ ;;
+ --maxthreads)
+ shift
+ RR_MAXTHREADS=$1
+ shift
+ ;;
+ --noack)
+ RR_NOACK="--noack"
+ shift
+ ;;
+ --movetimeout)
+ shift
+ RR_MOVE_TIMEOUT=$1
+ shift
+ ;;
+ --help|-h)
+ usage
+ exit 0
+ ;;
+ *)
+ echo Bad argument: $1
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+# quick function to get a value from the HBase config file
+# HBASE-6504 - only take the first line of the output in case verbose gc is on
+distMode=`HBASE_CONF_DIR=${HBASE_CONF_DIR} $bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool hbase.cluster.distributed | head -n 1`
+if [ "$distMode" == 'false' ]; then
+ if [ $RR_RS -ne 1 ] || [ $RR_MASTER -ne 1 ]; then
+ echo Cant do selective rolling restart if not running distributed
+ exit 1
+ fi
+ "$bin"/hbase-daemon.sh ${START_CMD_NON_DIST_MODE} master
+else
+ zparent=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.parent`
+ if [ "$zparent" == "null" ]; then zparent="/hbase"; fi
+
+ if [ $RR_MASTER -eq 1 ]; then
+ # stop all masters before re-start to avoid races for master znode
+ "$bin"/hbase-daemon.sh --config "${HBASE_CONF_DIR}" stop master
+ "$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" \
+ --hosts "${HBASE_BACKUP_MASTERS}" stop master-backup
+
+ # make sure the master znode has been deleted before continuing
+ zmaster=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.master`
+ if [ "$zmaster" == "null" ]; then zmaster="master"; fi
+ zmaster=$zparent/$zmaster
+ echo -n "Waiting for Master ZNode ${zmaster} to expire"
+ echo
+ while ! "$bin"/hbase zkcli stat $zmaster 2>&1 | grep "Node does not exist"; do
+ echo -n "."
+ sleep 1
+ done
+ echo #force a newline
+
+ # all masters are down, now restart
+ "$bin"/hbase-daemon.sh --config "${HBASE_CONF_DIR}" ${START_CMD_DIST_MODE} master
+ "$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" \
+ --hosts "${HBASE_BACKUP_MASTERS}" ${START_CMD_DIST_MODE} master-backup
+
+ echo "Wait a minute for master to come up join cluster"
+ sleep 60
+
+ # Master joing cluster will start in cleaning out regions in transition.
+ # Wait until the master has cleaned out regions in transition before
+ # giving it a bunch of work to do; master is vulnerable during startup
+ zunassigned=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.unassigned`
+ if [ "$zunassigned" == "null" ]; then zunassigned="region-in-transition"; fi
+ zunassigned="$zparent/$zunassigned"
+ # Checking if /hbase/region-in-transition exist
+ ritZnodeCheck=`$bin/hbase zkcli stat ${zunassigned} 2>&1 | tail -1 \
+ | grep "Node does not exist:" >/dev/null`
+ ret=$?
+ if test 0 -eq ${ret}
+ then
+ echo "Znode ${zunassigned} does not exist"
+ else
+ echo -n "Waiting for ${zunassigned} to empty"
+ while true ; do
+ unassigned=`$bin/hbase zkcli stat ${zunassigned} 2>&1 \
+ | grep -e 'numChildren = '|sed -e 's,numChildren = ,,'`
+ if test 0 -eq ${unassigned}
+ then
+ echo
+ break
+ else
+ echo -n " ${unassigned}"
+ fi
+ sleep 1
+ done
+ fi
+ fi
+
+ if [ $RR_RS -eq 1 ]; then
+ # unlike the masters, roll all regionservers one-at-a-time
+ export HBASE_SLAVE_PARALLEL=false
+ "$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" \
+ --hosts "${HBASE_REGIONSERVERS}" ${RESTART_CMD_REGIONSERVER} regionserver
+ fi
+
+ if [ $RR_GRACEFUL -eq 1 ]; then
+ # gracefully restart all online regionservers
+ masterport=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool hbase.master.port`
+ if [ "$masterport" == "null" ]; then masterport="16000"; fi
+ zkrs=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.rs`
+ if [ "$zkrs" == "null" ]; then zkrs="rs"; fi
+ zkrs="$zparent/$zkrs"
+ online_regionservers=`$bin/hbase zkcli ls $zkrs 2>&1 | tail -1 | sed "s/\[//" | sed "s/\]//"`
+ echo "Disabling load balancer"
+ HBASE_BALANCER_STATE=$(echo 'balance_switch false' | "$bin"/hbase --config "${HBASE_CONF_DIR}" shell -n | tail -1)
+ echo "Previous balancer state was $HBASE_BALANCER_STATE"
+
+ for rs in $online_regionservers
+ do
+ rs_parts=(${rs//,/ })
+ hostname=${rs_parts[0]}
+ port=${rs_parts[1]}
+ if [ "$port" -eq "$masterport" ]; then
+ echo "Skipping regionserver on master machine $hostname:$port"
+ continue
+ else
+ echo "Gracefully restarting: $hostname"
+ "$bin"/graceful_stop.sh --config ${HBASE_CONF_DIR} --restart --reload -nob --maxthreads \
+ ${RR_MAXTHREADS} ${RR_NOACK} --movetimeout ${RR_MOVE_TIMEOUT} $hostname
+ sleep 1
+ fi
+ done
+ if [ "$HBASE_BALANCER_STATE" != "false" ]; then
+ echo "Restoring balancer state to $HBASE_BALANCER_STATE"
+ echo "balance_switch $HBASE_BALANCER_STATE" | "$bin"/hbase --config "${HBASE_CONF_DIR}" shell &> /dev/null
+ fi
+ fi
+fi
diff --git a/MSH-PIC/hbase/bin/rsgroup.sh b/MSH-PIC/hbase/bin/rsgroup.sh
new file mode 100644
index 0000000..2b52e7e
--- /dev/null
+++ b/MSH-PIC/hbase/bin/rsgroup.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+source /etc/profile
+
+hbase shell <<EOF
+
+add_rsgroup 'important'
+
+move_servers_rsgroup 'important',['msh-dellr740-dt001:16020']
+
+move_servers_rsgroup 'important',['msh-dellr740-dt002:16020']
+
+flush 'tsg:report_result'
+
+move_tables_rsgroup 'important',['tsg:report_result']
+
+flush 'tsg_galaxy:job_result'
+
+move_tables_rsgroup 'important',['tsg_galaxy:job_result']
+
+
+EOF
+
diff --git a/MSH-PIC/hbase/bin/set_hbase_env.sh b/MSH-PIC/hbase/bin/set_hbase_env.sh
new file mode 100644
index 0000000..60612e8
--- /dev/null
+++ b/MSH-PIC/hbase/bin/set_hbase_env.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+echo -e "\n#hbase\nexport HBASE_HOME=/home/tsg/olap/hbase-2.2.3\nexport PATH=\$HBASE_HOME/bin:\$PATH" >> /etc/profile.d/hbase.sh
+chmod +x /etc/profile.d/hbase.sh
+
+source /etc/profile
+
+keeppath='/etc/init.d/keephbasemaster'
+if [ -x $keeppath ];then
+ chkconfig --add keephbasemaster
+ chkconfig keephbasemaster on
+ service keephbasemaster start && sleep 5
+ master_dae=`ps -ef | grep dae-hmaster.sh | grep -v grep | wc -l`
+ if [ $master_dae -lt 1 ];then
+ nohup /home/tsg/olap/hbase-2.2.3/bin/dae-hmaster.sh > /dev/null 2>&1 &
+ fi
+fi
+
+keeppath='/etc/init.d/keephbaseregion'
+if [ -x $keeppath ];then
+ chkconfig --add keephbaseregion
+ chkconfig keephbaseregion on
+ service keephbaseregion start && sleep 5
+ region_dae=`ps -ef | grep dae-hregion.sh | grep -v grep | wc -l`
+ if [ $region_dae -lt 1 ];then
+ nohup /home/tsg/olap/hbase-2.2.3/bin/dae-hregion.sh > /dev/null 2>&1 &
+ fi
+fi
+
diff --git a/MSH-PIC/hbase/bin/shutdown_regionserver.rb b/MSH-PIC/hbase/bin/shutdown_regionserver.rb
new file mode 100644
index 0000000..fd1af30
--- /dev/null
+++ b/MSH-PIC/hbase/bin/shutdown_regionserver.rb
@@ -0,0 +1,56 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This script is used to issue a stop command to a regionserver via RPC.
+# Intended for use in environments where sshing around is inappropriate
+# Run it like this by passing it to a jruby interpreter:
+#
+# ./bin/hbase org.jruby.Main bin/shutdown_regionserver.rb c2021:16020
+
+include Java
+java_import org.apache.hadoop.hbase.HBaseConfiguration
+java_import org.apache.hadoop.hbase.client.HBaseAdmin
+java_import org.apache.hadoop.hbase.client.ConnectionFactory
+
+def usage(msg = nil)
+ $stderr.puts 'Usage: shutdown_regionserver.rb <host:port>..'
+ $stderr.puts
+ $stderr.puts 'Stops the specified regionservers via RPC'
+ $stderr.puts format('Error: %s', msg) if msg
+ abort
+end
+
+usage if ARGV.empty?
+
+ARGV.each do |x|
+ usage format('Invalid host:port: %s', x) unless x.include? ':'
+end
+
+config = HBaseConfiguration.create
+connection = ConnectionFactory.createConnection(config)
+begin
+ admin = connection.getAdmin
+rescue
+ abort "Error: Couldn't instantiate HBaseAdmin"
+end
+
+ARGV.each do |hostport|
+ admin.stopRegionServer(hostport)
+end
+admin.close
+connection.close
diff --git a/MSH-PIC/hbase/bin/start-hbase.cmd b/MSH-PIC/hbase/bin/start-hbase.cmd
new file mode 100644
index 0000000..676a11e
--- /dev/null
+++ b/MSH-PIC/hbase/bin/start-hbase.cmd
@@ -0,0 +1,61 @@
+@rem/**
+@rem * Licensed to the Apache Software Foundation (ASF) under one
+@rem * or more contributor license agreements. See the NOTICE file
+@rem * distributed with this work for additional information
+@rem * regarding copyright ownership. The ASF licenses this file
+@rem * to you under the Apache License, Version 2.0 (the
+@rem * "License"); you may not use this file except in compliance
+@rem * with the License. You may obtain a copy of the License at
+@rem *
+@rem * http://www.apache.org/licenses/LICENSE-2.0
+@rem *
+@rem * Unless required by applicable law or agreed to in writing, software
+@rem * distributed under the License is distributed on an "AS IS" BASIS,
+@rem * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem * See the License for the specific language governing permissions and
+@rem * limitations under the License.
+@rem */
+
+@rem Modelled after $HADOOP_HOME/bin/start-hbase.sh.
+
+@rem Start hadoop hbase daemons.
+@rem Run this on master node.
+@echo off
+set usage="Usage: start-hbase.cmd"
+
+setlocal
+
+for %%i in (%0) do (
+ if not defined HBASE_BIN_PATH (
+ set HBASE_BIN_PATH=%%~dpi
+ )
+)
+
+if "%HBASE_BIN_PATH:~-1%" == "\" (
+ set HBASE_BIN_PATH=%HBASE_BIN_PATH:~0,-1%
+)
+
+set hbase-config-script=%HBASE_BIN_PATH%\hbase-config.cmd
+call %hbase-config-script%
+
+set distModeCommand=call %HBASE_BIN_PATH%\hbase.cmd org.apache.hadoop.hbase.util.HBaseConfTool hbase.cluster.distributed
+for /f %%i in ('%distModeCommand%') do set distMode=%%i
+
+if "%distMode%"=="false" (
+ start "HBase Distribution" %HBASE_BIN_PATH%\hbase.cmd master start
+) else (
+ if "%distMode%"=="true" (
+ @echo This is not implemented yet. Stay tuned.
+ @rem call %HBASE_BIN_PATH%\hbase-daemons.cmd --config "${HBASE_CONF_DIR}" start zookeeper
+ @rem call %HBASE_BIN_PATH%\hbase-daemon.cmd --config "${HBASE_CONF_DIR}" start master
+
+ @rem call %HBASE_BIN_PATH%\hbase-daemons.cmd --config "%HBASE_CONF_DIR%" --hosts "%HBASE_REGIONSERVERS%" start regionserver
+ @rem call %HBASE_BIN_PATH%\hbase-daemons.cmd --config "%HBASE_CONF_DIR%" --hosts "%HBASE_BACKUP_MASTERS%" start master-backup
+ ) else (
+ echo ERROR: Could not determine the startup mode.
+ )
+)
+
+@rem -------------- End of main script --------------
+endlocal
+goto :eof \ No newline at end of file
diff --git a/MSH-PIC/hbase/bin/start-hbase.sh b/MSH-PIC/hbase/bin/start-hbase.sh
new file mode 100644
index 0000000..f053526
--- /dev/null
+++ b/MSH-PIC/hbase/bin/start-hbase.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Modelled after $HADOOP_HOME/bin/start-hbase.sh.
+
+# Start hadoop hbase daemons.
+# Run this on master node.
+usage="Usage: start-hbase.sh [--autostart-window-size <window size in hours>]\
+ [--autostart-window-retry-limit <retry count limit for autostart>]\
+ [autostart|start]"
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin">/dev/null; pwd`
+
+# default autostart args value indicating infinite window size and no retry limit
+AUTOSTART_WINDOW_SIZE=0
+AUTOSTART_WINDOW_RETRY_LIMIT=0
+
+. "$bin"/hbase-config.sh
+
+# start hbase daemons
+errCode=$?
+if [ $errCode -ne 0 ]
+then
+ exit $errCode
+fi
+
+if [ "$1" = "autostart" ]
+then
+ commandToRun="--autostart-window-size ${AUTOSTART_WINDOW_SIZE} --autostart-window-retry-limit ${AUTOSTART_WINDOW_RETRY_LIMIT} autostart"
+else
+ commandToRun="start"
+fi
+
+# HBASE-6504 - only take the first line of the output in case verbose gc is on
+distMode=`$bin/hbase --config "$HBASE_CONF_DIR" org.apache.hadoop.hbase.util.HBaseConfTool hbase.cluster.distributed | head -n 1`
+
+if [ "$distMode" == 'false' ]
+then
+ "$bin"/hbase-daemon.sh --config "${HBASE_CONF_DIR}" $commandToRun master
+else
+ "$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" $commandToRun zookeeper
+ "$bin"/hbase-daemon.sh --config "${HBASE_CONF_DIR}" $commandToRun master
+ "$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" \
+ --hosts "${HBASE_REGIONSERVERS}" $commandToRun regionserver
+ "$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" \
+ --hosts "${HBASE_BACKUP_MASTERS}" $commandToRun master-backup
+fi
diff --git a/MSH-PIC/hbase/bin/stop-hbase.cmd b/MSH-PIC/hbase/bin/stop-hbase.cmd
new file mode 100644
index 0000000..9718055
--- /dev/null
+++ b/MSH-PIC/hbase/bin/stop-hbase.cmd
@@ -0,0 +1,54 @@
+@echo off
+@rem/**
+@rem * Licensed to the Apache Software Foundation (ASF) under one
+@rem * or more contributor license agreements. See the NOTICE file
+@rem * distributed with this work for additional information
+@rem * regarding copyright ownership. The ASF licenses this file
+@rem * to you under the Apache License, Version 2.0 (the
+@rem * "License"); you may not use this file except in compliance
+@rem * with the License. You may obtain a copy of the License at
+@rem *
+@rem * http://www.apache.org/licenses/LICENSE-2.0
+@rem *
+@rem * Unless required by applicable law or agreed to in writing, software
+@rem * distributed under the License is distributed on an "AS IS" BASIS,
+@rem * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem * See the License for the specific language governing permissions and
+@rem * limitations under the License.
+@rem */
+
+@rem Modelled after $HADOOP_HOME/bin/stop-hbase.sh.
+
+@rem Stop hadoop hbase daemons. Run this on master node.
+
+setlocal
+
+for %%i in (%0) do (
+ if not defined HBASE_BIN_PATH (
+ set HBASE_BIN_PATH=%%~dpi
+ )
+)
+
+if "%HBASE_BIN_PATH:~-1%" == "\" (
+ set HBASE_BIN_PATH=%HBASE_BIN_PATH:~0,-1%
+)
+set hbase-config-script=%HBASE_BIN_PATH%\hbase-config.cmd
+call %hbase-config-script%
+
+set distModeCommand=call %HBASE_BIN_PATH%\hbase.cmd org.apache.hadoop.hbase.util.HBaseConfTool hbase.cluster.distributed
+for /f %%i in ('%distModeCommand%') do set distMode=%%i
+
+if "%distMode%"=="false" (
+ call %HBASE_BIN_PATH%\hbase.cmd master stop
+
+) else (
+ if "%distMode%"=="true" (
+ @echo This is not implemented yet. Stay tuned.
+ ) else (
+ echo ERROR: Could not determine the startup mode.
+ )
+)
+
+@rem -------------- End of main script --------------
+endlocal
+goto :eof \ No newline at end of file
diff --git a/MSH-PIC/hbase/bin/stop-hbase.sh b/MSH-PIC/hbase/bin/stop-hbase.sh
new file mode 100644
index 0000000..4a19681
--- /dev/null
+++ b/MSH-PIC/hbase/bin/stop-hbase.sh
@@ -0,0 +1,68 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Modelled after $HADOOP_HOME/bin/stop-hbase.sh.
+
+# Stop hadoop hbase daemons. Run this on master node.
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin">/dev/null; pwd`
+
+. "$bin"/hbase-config.sh
+. "$bin"/hbase-common.sh
+
+# variables needed for stop command
+if [ "$HBASE_LOG_DIR" = "" ]; then
+ export HBASE_LOG_DIR="$HBASE_HOME/logs"
+fi
+mkdir -p "$HBASE_LOG_DIR"
+
+if [ "$HBASE_IDENT_STRING" = "" ]; then
+ export HBASE_IDENT_STRING="$USER"
+fi
+
+export HBASE_LOG_PREFIX=hbase-$HBASE_IDENT_STRING-master-$HOSTNAME
+export HBASE_LOGFILE=$HBASE_LOG_PREFIX.log
+logout=$HBASE_LOG_DIR/$HBASE_LOG_PREFIX.out
+loglog="${HBASE_LOG_DIR}/${HBASE_LOGFILE}"
+pid=${HBASE_PID_DIR:-/tmp}/hbase-$HBASE_IDENT_STRING-master.pid
+
+if [[ -e $pid ]]; then
+ echo -n stopping hbase
+ echo "`date` Stopping hbase (via master)" >> $loglog
+
+ nohup nice -n ${HBASE_NICENESS:-0} "$HBASE_HOME"/bin/hbase \
+ --config "${HBASE_CONF_DIR}" \
+ master stop "$@" > "$logout" 2>&1 < /dev/null &
+
+ waitForProcessEnd `cat $pid` 'stop-master-command'
+
+ rm -f $pid
+else
+ echo no hbase master found
+fi
+
+# distributed == false means that the HMaster will kill ZK when it exits
+# HBASE-6504 - only take the first line of the output in case verbose gc is on
+distMode=`$bin/hbase --config "$HBASE_CONF_DIR" org.apache.hadoop.hbase.util.HBaseConfTool hbase.cluster.distributed | head -n 1`
+if [ "$distMode" == 'true' ]
+then
+ "$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" stop zookeeper
+fi
diff --git a/MSH-PIC/hbase/bin/test/process_based_cluster.sh b/MSH-PIC/hbase/bin/test/process_based_cluster.sh
new file mode 100644
index 0000000..eb8633f
--- /dev/null
+++ b/MSH-PIC/hbase/bin/test/process_based_cluster.sh
@@ -0,0 +1,110 @@
+#!/bin/bash
+#
+#/**
+# * Copyright The Apache Software Foundation
+# *
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+set -e -u -o pipefail
+
+SCRIPT_NAME=${0##*/}
+SCRIPT_DIR=$(cd `dirname $0` && pwd )
+
+print_usage() {
+ cat >&2 <<EOT
+Usage: $SCRIPT_NAME <options>
+Options:
+ --kill
+ Kill local process-based HBase cluster using pid files.
+ --show
+ Show HBase processes running on this machine
+EOT
+ exit 1
+}
+
+show_processes() {
+ ps -ef | grep -P "(HRegionServer|HMaster|HQuorumPeer) start" | grep -v grep
+}
+
+cmd_specified() {
+ if [ "$CMD_SPECIFIED" ]; then
+ echo "Only one command can be specified" >&2
+ exit 1
+ fi
+ CMD_SPECIFIED=1
+}
+
+list_pid_files() {
+ LOCAL_CLUSTER_DIR=$SCRIPT_DIR/../../target/local_cluster
+ LOCAL_CLUSTER_DIR=$( cd $LOCAL_CLUSTER_DIR && pwd )
+ find $LOCAL_CLUSTER_DIR -name "*.pid"
+}
+
+if [ $# -eq 0 ]; then
+ print_usage
+fi
+
+IS_KILL=""
+IS_SHOW=""
+CMD_SPECIFIED=""
+
+while [ $# -ne 0 ]; do
+ case "$1" in
+ -h|--help)
+ print_usage ;;
+ --kill)
+ IS_KILL=1
+ cmd_specified ;;
+ --show)
+ IS_SHOW=1
+ cmd_specified ;;
+ *)
+ echo "Invalid option: $1" >&2
+ exit 1
+ esac
+ shift
+done
+
+if [ "$IS_KILL" ]; then
+ list_pid_files | \
+ while read F; do
+ PID=`cat $F`
+ echo "Killing pid $PID from file $F"
+ # Kill may fail but that's OK, so turn off error handling for a moment.
+ set +e
+ kill -9 $PID
+ set -e
+ done
+elif [ "$IS_SHOW" ]; then
+ PIDS=""
+ for F in `list_pid_files`; do
+ PID=`cat $F`
+ if [ -n "$PID" ]; then
+ if [ -n "$PIDS" ]; then
+ PIDS="$PIDS,"
+ fi
+ PIDS="$PIDS$PID"
+ fi
+ done
+ ps -p $PIDS
+else
+ echo "No command specified" >&2
+ exit 1
+fi
+
+
diff --git a/MSH-PIC/hbase/bin/zookeepers.sh b/MSH-PIC/hbase/bin/zookeepers.sh
new file mode 100644
index 0000000..97bf41b
--- /dev/null
+++ b/MSH-PIC/hbase/bin/zookeepers.sh
@@ -0,0 +1,59 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# Run a shell command on all zookeeper hosts.
+#
+# Environment Variables
+#
+# HBASE_CONF_DIR Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
+# HBASE_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
+# HBASE_SSH_OPTS Options passed to ssh when running remote commands.
+#
+# Modelled after $HADOOP_HOME/bin/slaves.sh.
+
+usage="Usage: zookeepers [--config <hbase-confdir>] command..."
+
+# if no args specified, show usage
+if [ $# -le 0 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin">/dev/null; pwd`
+
+. "$bin"/hbase-config.sh
+
+if [ "$HBASE_MANAGES_ZK" = "" ]; then
+ HBASE_MANAGES_ZK=true
+fi
+
+if [ "$HBASE_MANAGES_ZK" = "true" ]; then
+ hosts=`"$bin"/hbase org.apache.hadoop.hbase.zookeeper.ZKServerTool | grep '^ZK host:' | sed 's,^ZK host:,,'`
+ cmd=$"${@// /\\ }"
+ for zookeeper in $hosts; do
+ ssh $HBASE_SSH_OPTS $zookeeper $cmd 2>&1 | sed "s/^/$zookeeper: /" &
+ if [ "$HBASE_SLAVE_SLEEP" != "" ]; then
+ sleep $HBASE_SLAVE_SLEEP
+ fi
+ done
+fi
+
+wait
diff --git a/MSH-PIC/hbase/conf/backup-masters b/MSH-PIC/hbase/conf/backup-masters
new file mode 100644
index 0000000..bac46db
--- /dev/null
+++ b/MSH-PIC/hbase/conf/backup-masters
@@ -0,0 +1,2 @@
+192.168.20.194
+192.168.20.195
diff --git a/MSH-PIC/hbase/conf/core-site.xml b/MSH-PIC/hbase/conf/core-site.xml
new file mode 100644
index 0000000..f380e36
--- /dev/null
+++ b/MSH-PIC/hbase/conf/core-site.xml
@@ -0,0 +1,58 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>fs.defaultFS</name>
+ <value>hdfs://ns1</value>
+ </property>
+ <property>
+ <name>hadoop.tmp.dir</name>
+ <value>file:/home/tsg/olap/hadoop/tmp</value>
+ </property>
+ <property>
+ <name>io.file.buffer.size</name>
+ <value>131702</value>
+ </property>
+ <property>
+ <name>hadoop.proxyuser.root.hosts</name>
+ <value>*</value>
+ </property>
+ <property>
+ <name>hadoop.proxyuser.root.groups</name>
+ <value>*</value>
+ </property>
+ <property>
+ <name>hadoop.logfile.size</name>
+ <value>10000000</value>
+ <description>The max size of each log file</description>
+ </property>
+ <property>
+ <name>hadoop.logfile.count</name>
+ <value>1</value>
+ <description>The max number of log files</description>
+ </property>
+ <property>
+ <name>ha.zookeeper.quorum</name>
+ <value>192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181</value>
+ </property>
+ <property>
+ <name>ipc.client.connect.timeout</name>
+ <value>90000</value>
+ </property>
+</configuration>
diff --git a/MSH-PIC/hbase/conf/hadoop-metrics2-hbase.properties b/MSH-PIC/hbase/conf/hadoop-metrics2-hbase.properties
new file mode 100644
index 0000000..4c7dbbe
--- /dev/null
+++ b/MSH-PIC/hbase/conf/hadoop-metrics2-hbase.properties
@@ -0,0 +1,44 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink].[instance].[options]
+# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
+
+*.sink.file*.class=org.apache.hadoop.metrics2.sink.FileSink
+# default sampling period
+*.period=10
+
+# Below are some examples of sinks that could be used
+# to monitor different hbase daemons.
+
+# hbase.sink.file-all.class=org.apache.hadoop.metrics2.sink.FileSink
+# hbase.sink.file-all.filename=all.metrics
+
+# hbase.sink.file0.class=org.apache.hadoop.metrics2.sink.FileSink
+# hbase.sink.file0.context=hmaster
+# hbase.sink.file0.filename=master.metrics
+
+# hbase.sink.file1.class=org.apache.hadoop.metrics2.sink.FileSink
+# hbase.sink.file1.context=thrift-one
+# hbase.sink.file1.filename=thrift-one.metrics
+
+# hbase.sink.file2.class=org.apache.hadoop.metrics2.sink.FileSink
+# hbase.sink.file2.context=thrift-two
+# hbase.sink.file2.filename=thrift-one.metrics
+
+# hbase.sink.file3.class=org.apache.hadoop.metrics2.sink.FileSink
+# hbase.sink.file3.context=rest
+# hbase.sink.file3.filename=rest.metrics
diff --git a/MSH-PIC/hbase/conf/hbase-env.cmd b/MSH-PIC/hbase/conf/hbase-env.cmd
new file mode 100644
index 0000000..1f1c3e3
--- /dev/null
+++ b/MSH-PIC/hbase/conf/hbase-env.cmd
@@ -0,0 +1,83 @@
+@rem/**
+@rem * Licensed to the Apache Software Foundation (ASF) under one
+@rem * or more contributor license agreements. See the NOTICE file
+@rem * distributed with this work for additional information
+@rem * regarding copyright ownership. The ASF licenses this file
+@rem * to you under the Apache License, Version 2.0 (the
+@rem * "License"); you may not use this file except in compliance
+@rem * with the License. You may obtain a copy of the License at
+@rem *
+@rem * http://www.apache.org/licenses/LICENSE-2.0
+@rem *
+@rem * Unless required by applicable law or agreed to in writing, software
+@rem * distributed under the License is distributed on an "AS IS" BASIS,
+@rem * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem * See the License for the specific language governing permissions and
+@rem * limitations under the License.
+@rem */
+
+@rem Set environment variables here.
+
+@rem The java implementation to use. Java 1.8+ required.
+@rem set JAVA_HOME=c:\apps\java
+
+@rem Extra Java CLASSPATH elements. Optional.
+@rem set HBASE_CLASSPATH=
+
+@rem The maximum amount of heap to use. Default is left to JVM default.
+@rem set HBASE_HEAPSIZE=1000
+
+@rem Uncomment below if you intend to use off heap cache. For example, to allocate 8G of
+@rem offheap, set the value to "8G".
+@rem set HBASE_OFFHEAPSIZE=1000
+
+@rem For example, to allocate 8G of offheap, to 8G:
+@rem etHBASE_OFFHEAPSIZE=8G
+
+@rem Extra Java runtime options.
+@rem Below are what we set by default. May only work with SUN JVM.
+@rem For more on why as well as other possible settings,
+@rem see http://hbase.apache.org/book.html#performance
+@rem JDK6 on Windows has a known bug for IPv6, use preferIPv4Stack unless JDK7.
+@rem @rem See TestIPv6NIOServerSocketChannel.
+set HBASE_OPTS=%HBASE_OPTS% "-XX:+UseConcMarkSweepGC" "-Djava.net.preferIPv4Stack=true"
+
+@rem Uncomment below to enable java garbage collection logging for the server-side processes
+@rem this enables basic gc logging for the server processes to the .out file
+@rem set SERVER_GC_OPTS="-verbose:gc" "-XX:+PrintGCDetails" "-XX:+PrintGCDateStamps" %HBASE_GC_OPTS%
+
+@rem this enables gc logging using automatic GC log rolling. Only applies to jdk 1.6.0_34+ and 1.7.0_2+. Either use this set of options or the one above
+@rem set SERVER_GC_OPTS="-verbose:gc" "-XX:+PrintGCDetails" "-XX:+PrintGCDateStamps" "-XX:+UseGCLogFileRotation" "-XX:NumberOfGCLogFiles=1" "-XX:GCLogFileSize=512M" %HBASE_GC_OPTS%
+
+@rem Uncomment below to enable java garbage collection logging for the client processes in the .out file.
+@rem set CLIENT_GC_OPTS="-verbose:gc" "-XX:+PrintGCDetails" "-XX:+PrintGCDateStamps" %HBASE_GC_OPTS%
+
+@rem Uncomment below (along with above GC logging) to put GC information in its own logfile (will set HBASE_GC_OPTS)
+@rem set HBASE_USE_GC_LOGFILE=true
+
+@rem Uncomment and adjust to enable JMX exporting
+@rem See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+@rem More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+@rem
+@rem set HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false" "-Dcom.sun.management.jmxremote.authenticate=false"
+@rem set HBASE_MASTER_OPTS=%HBASE_JMX_BASE% "-Dcom.sun.management.jmxremote.port=10101"
+@rem set HBASE_REGIONSERVER_OPTS=%HBASE_JMX_BASE% "-Dcom.sun.management.jmxremote.port=10102"
+@rem set HBASE_THRIFT_OPTS=%HBASE_JMX_BASE% "-Dcom.sun.management.jmxremote.port=10103"
+@rem set HBASE_ZOOKEEPER_OPTS=%HBASE_JMX_BASE% -Dcom.sun.management.jmxremote.port=10104"
+
+@rem File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+@rem set HBASE_REGIONSERVERS=%HBASE_HOME%\conf\regionservers
+
+@rem Where log files are stored. $HBASE_HOME/logs by default.
+@rem set HBASE_LOG_DIR=%HBASE_HOME%\logs
+
+@rem A string representing this instance of hbase. $USER by default.
+@rem set HBASE_IDENT_STRING=%USERNAME%
+
+@rem Seconds to sleep between slave commands. Unset by default. This
+@rem can be useful in large clusters, where, e.g., slave rsyncs can
+@rem otherwise arrive faster than the master can service them.
+@rem set HBASE_SLAVE_SLEEP=0.1
+
+@rem Tell HBase whether it should manage it's own instance of ZooKeeper or not.
+@rem set HBASE_MANAGES_ZK=true
diff --git a/MSH-PIC/hbase/conf/hbase-env.sh b/MSH-PIC/hbase/conf/hbase-env.sh
new file mode 100644
index 0000000..b3d225e
--- /dev/null
+++ b/MSH-PIC/hbase/conf/hbase-env.sh
@@ -0,0 +1,143 @@
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Set environment variables here.
+
+# This script sets variables multiple times over the course of starting an hbase process,
+# so try to keep things idempotent unless you want to take an even deeper look
+# into the startup scripts (bin/hbase, etc.)
+
+# The java implementation to use. Java 1.7+ required.
+export JAVA_HOME=/usr/lib/jvm/jdk1.8.0_73
+
+# Extra Java CLASSPATH elements. Optional.
+# export HBASE_CLASSPATH=
+
+# The maximum amount of heap to use. Default is left to JVM default.
+#export HBASE_HEAPSIZE={heap}
+
+# Uncomment below if you intend to use off heap cache. For example, to allocate 8G of
+# offheap, set the value to "8G".
+#export HBASE_OFFHEAPSIZE=5G
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export HBASE_OPTS="-XX:+UseConcMarkSweepGC "
+
+# Configure PermSize. Only needed in JDK7. You can safely remove it for JDK8+
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmx20480m -Xms20480m -Xmn128m -Xss256k -XX:MetaspaceSize=512m -XX:MaxMetaspaceSize=512m -XX:SurvivorRatio=2 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:MaxTenuringThreshold=15 -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=1 -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:-DisableExplicitGC -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:/home/tsg/olap/hbase-2.2.3/logs/gc-regionserver-%t.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=100M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/home/tsg/olap/hbase-2.2.3/logs/"
+
+export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS $HBASE_JMX_BASE -Xmx2048m -Xms2048m -Xss256k -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=128m -XX:SurvivorRatio=2 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:MaxTenuringThreshold=15 -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=1 -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:-DisableExplicitGC -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:/home/tsg/olap/hbase-2.2.3/logs/gc-master-%t.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=100M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/home/tsg/olap/hbase-2.2.3/logs/ -javaagent:/home/tsg/olap/hbase-2.2.3/monitor/jmx_prometheus_javaagent-0.12.0.jar=9907:/home/tsg/olap/hbase-2.2.3/monitor/hbase.yaml"
+
+export HBASE_REGIONSERVER_JMX_OPTS="$HBASE_JMX_BASE -javaagent:/home/tsg/olap/hbase-2.2.3/monitor/jmx_prometheus_javaagent-0.12.0.jar=9908:/home/tsg/olap/hbase-2.2.3/monitor/hbase.yaml"
+
+# Uncomment one of the below three options to enable java garbage collection logging for the server-side processes.
+
+# This enables basic gc logging to the .out file.
+# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps"
+
+# This enables basic gc logging to its own file.
+# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
+# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH>"
+
+# This enables basic GC logging to its own file with automatic log rolling. Only applies to jdk 1.6.0_34+ and 1.7.0_2+.
+# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
+# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M"
+
+# Uncomment one of the below three options to enable java garbage collection logging for the client processes.
+
+# This enables basic gc logging to the .out file.
+# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps"
+
+# This enables basic gc logging to its own file.
+# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
+# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH>"
+
+# This enables basic GC logging to its own file with automatic log rolling. Only applies to jdk 1.6.0_34+ and 1.7.0_2+.
+# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
+# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M"
+
+# See the package documentation for org.apache.hadoop.hbase.io.hfile for other configurations
+# needed setting up off-heap block caching.
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+# NOTE: HBase provides an alternative JMX implementation to fix the random ports issue, please see JMX
+# section in HBase Reference Guide for instructions.
+
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+# export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10101"
+# export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10102"
+# export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+# export HBASE_REST_OPTS="$HBASE_REST_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10105"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+# export HBASE_REGIONSERVERS=${HBASE_HOME}/conf/regionservers
+
+# Uncomment and adjust to keep all the Region Server pages mapped to be memory resident
+#HBASE_REGIONSERVER_MLOCK=true
+#HBASE_REGIONSERVER_UID="hbase"
+
+# File naming hosts on which backup HMaster will run. $HBASE_HOME/conf/backup-masters by default.
+# export HBASE_BACKUP_MASTERS=${HBASE_HOME}/conf/backup-masters
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR=/home/tsg/olap/hbase-2.2.3/logs
+
+# Enable remote JDWP debugging of major HBase processes. Meant for Core Developers
+# export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8070"
+# export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8071"
+# export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8072"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8073"
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR=/home/tsg/olap/hbase-2.2.3/pids
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false
+
+# The default log rolling policy is RFA, where the log file is rolled as per the size defined for the
+# RFA appender. Please refer to the log4j.properties file to see more details on this appender.
+# In case one needs to do log rolling on a date change, one should set the environment property
+# HBASE_ROOT_LOGGER to "<DESIRED_LOG LEVEL>,DRFA".
+# For example:
+#HBASE_ROOT_LOGGER=INFO,DRFA
+HBASE_ROOT_LOGGER=ERROR,DRFA
+# The reason for changing default to RFA is to avoid the boundary case of filling out disk space as
+# DRFA doesn't put any cap on the log size. Please refer to HBase-5655 for more context.
diff --git a/MSH-PIC/hbase/conf/hbase-policy.xml b/MSH-PIC/hbase/conf/hbase-policy.xml
new file mode 100644
index 0000000..bf47240
--- /dev/null
+++ b/MSH-PIC/hbase/conf/hbase-policy.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+ <property>
+ <name>security.client.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ClientProtocol and AdminProtocol implementations (ie.
+ clients talking to HRegionServers)
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.admin.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for HMasterInterface protocol implementation (ie.
+ clients talking to HMaster for admin operations).
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.masterregion.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for HMasterRegionInterface protocol implementations
+ (for HRegionServers communicating with HMaster)
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+</configuration>
diff --git a/MSH-PIC/hbase/conf/hbase-site.xml b/MSH-PIC/hbase/conf/hbase-site.xml
new file mode 100644
index 0000000..0e6cb16
--- /dev/null
+++ b/MSH-PIC/hbase/conf/hbase-site.xml
@@ -0,0 +1,205 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+ <property>
+ <name>hbase.rootdir</name>
+ <value>hdfs://ns1/hbase</value>
+ </property>
+
+ <property>
+ <name>hbase.cluster.distributed</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>hbase.zookeeper.quorum</name>
+ <value>192.168.20.193,192.168.20.194,192.168.20.195</value>
+ </property>
+
+ <property>
+ <name>hbase.zookeeper.property.clientPort</name>
+ <value>2181</value>
+ </property>
+
+ <property>
+ <name>hbase.master.info.port</name>
+ <value>60010</value>
+ </property>
+
+<!--
+ <property>
+ <name>hbase.client.keyvalue.maxsize</name>
+ <value>1073741824</value>
+ </property>
+-->
+
+ <property>
+ <name>hbase.server.keyvalue.maxsize</name>
+ <value>5368709120</value>
+ </property>
+
+ <property>
+ <name>zookeeper.znode.parent</name>
+ <value>/hbase</value>
+ </property>
+
+ <property>
+ <name>hbase.rpc.timeout</name>
+ <value>300000</value>
+ </property>
+
+ <property>
+ <name>zookeeper.session.timeout</name>
+ <value>300000</value>
+ </property>
+
+ <!--小于该值的文件将在mob compaction中合并-->
+ <property>
+ <name>hbase.mob.compaction.mergeable.threshold</name>
+ <value>1342177280</value>
+ </property>
+
+ <property>
+ <name>hbase.mob.file.cache.size</name>
+ <value>1000</value>
+ </property>
+
+ <!--mob cache回收缓存周期-->
+ <property>
+ <name>hbase.mob.cache.evict.period</name>
+ <value>3600</value>
+ </property>
+
+ <!--mob cache回收之后cache中保留文件个数比例,cache数量超过hbase.mob.file.cache.size会回收-->
+ <property>
+ <name>hbase.mob.cache.evict.remain.ratio</name>
+ <value>0.5f</value>
+ </property>
+
+ <!--开启mob-->
+ <property>
+ <name>hfile.format.version</name>
+ <value>3</value>
+ </property>
+
+ <property>
+ <name>hbase.hregion.memstore.flush.size</name>
+ <value>534217728</value>
+ </property>
+
+ <!-- flush线程数 -->
+ <property>
+ <name>hbase.hstore.flusher.count</name>
+ <value>8</value>
+ </property>
+
+ <property>
+ <name>hbase.regionserver.global.memstore.size.lower.limit</name>
+ <value>0.4</value>
+ </property>
+
+ <property>
+ <name>hbase.regionserver.global.memstore.size</name>
+ <value>0.45</value>
+ </property>
+
+ <property>
+ <name>hfile.block.cache.size</name>
+ <value>0.3</value>
+ </property>
+
+ <property>
+ <name>hbase.hregion.memstore.block.multiplier</name>
+ <value>10</value>
+ </property>
+
+ <property>
+ <name>hbase.ipc.server.max.callqueue.length</name>
+ <value>1073741824</value>
+ </property>
+
+ <property>
+ <name>hbase.regionserver.handler.count</name>
+ <value>40</value>
+ <description>Count of RPC Listener instances spun up on RegionServers.
+ Same property is used by the Master for count of master handlers.</description>
+ </property>
+
+ <property>
+ <name>hbase.zookeeper.property.maxClientCnxns</name>
+ <value>1000</value>
+ </property>
+
+ <property>
+ <name>hbase.ipc.max.request.size</name>
+ <value>1173741824</value>
+ </property>
+
+ <property>
+ <name>hbase.hstore.blockingWaitTime</name>
+ <value>30000</value>
+ </property>
+ <property>
+ <name>hbase.hstore.blockingStoreFiles</name>
+ <value>100</value>
+ </property>
+
+ <!--split参数-->
+ <property>
+  <name>hbase.hregion.max.filesize</name>
+  <value>107374182400</value>
+ </property>
+
+ <property>
+ <name>hbase.regionserver.regionSplitLimit</name>
+ <value>1000</value>
+ </property>
+
+<!-- phoenix -->
+ <property>
+    <name>phoenix.schema.isNamespaceMappingEnabled</name>
+    <value>true</value>
+ </property>
+ <property>
+   <name>phoenix.schema.mapSystemTablesToNamespace</name>
+   <value>true</value>
+ </property>
+
+<!-- RsGroup -->
+ <property>
+ <name>hbase.coprocessor.master.classes</name>
+ <value>org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint</value>
+ </property>
+
+ <property>
+ <name>hbase.master.loadbalancer.class</name>
+ <value>org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer</value>
+ </property>
+
+<!--表region自动平衡-->
+ <property>
+   <name>hbase.master.loadbalance.bytable</name>
+   <value>true</value>
+ </property>
+
+</configuration>
diff --git a/MSH-PIC/hbase/conf/hdfs-site.xml b/MSH-PIC/hbase/conf/hdfs-site.xml
new file mode 100644
index 0000000..6d93805
--- /dev/null
+++ b/MSH-PIC/hbase/conf/hdfs-site.xml
@@ -0,0 +1,142 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>dfs.namenode.name.dir</name>
+ <value>file:/home/tsg/olap/hadoop/dfs/name</value>
+ </property>
+ <property>
+ <name>dfs.datanode.data.dir</name>
+ <value>file:/home/tsg/olap/hadoop/dfs/data</value>
+ </property>
+ <property>
+ <name>dfs.replication</name>
+ <value>2</value>
+ </property>
+ <property>
+ <name>dfs.webhdfs.enabled</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>dfs.permissions</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>dfs.permissions.enabled</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>dfs.nameservices</name>
+ <value>ns1</value>
+ </property>
+ <property>
+ <name>dfs.blocksize</name>
+ <value>134217728</value>
+ </property>
+ <property>
+ <name>dfs.ha.namenodes.ns1</name>
+ <value>nn1,nn2</value>
+ </property>
+ <!-- nn1的RPC通信地址,nn1所在地址 -->
+ <property>
+ <name>dfs.namenode.rpc-address.ns1.nn1</name>
+ <value>192.168.20.193:9000</value>
+ </property>
+ <!-- nn1的http通信地址,外部访问地址 -->
+ <property>
+ <name>dfs.namenode.http-address.ns1.nn1</name>
+ <value>192.168.20.193:50070</value>
+ </property>
+ <!-- nn2的RPC通信地址,nn2所在地址 -->
+ <property>
+ <name>dfs.namenode.rpc-address.ns1.nn2</name>
+ <value>192.168.20.194:9000</value>
+ </property>
+ <!-- nn2的http通信地址,外部访问地址 -->
+ <property>
+ <name>dfs.namenode.http-address.ns1.nn2</name>
+ <value>192.168.20.194:50070</value>
+ </property>
+ <!-- 指定NameNode的元数据在JournalNode日志上的存放位置(一般和zookeeper部署在一起) -->
+ <property>
+ <name>dfs.namenode.shared.edits.dir</name>
+ <value>qjournal://192.168.20.193:8485;192.168.20.194:8485;192.168.20.195:8485/ns1</value>
+ </property>
+ <!-- 指定JournalNode在本地磁盘存放数据的位置 -->
+ <property>
+ <name>dfs.journalnode.edits.dir</name>
+ <value>/home/tsg/olap/hadoop/journal</value>
+ </property>
+ <!--客户端通过代理访问namenode,访问文件系统,HDFS 客户端与Active 节点通信的Java 类,使用其确定Active 节点是否活跃 -->
+ <property>
+ <name>dfs.client.failover.proxy.provider.ns1</name>
+ <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
+ </property>
+ <!--这是配置自动切换的方法,有多种使用方法,具体可以看官网,在文末会给地址,这里是远程登录杀死的方法 -->
+ <property>
+ <name>dfs.ha.fencing.methods</name>
+ <value>sshfence</value>
+ <value>shell(true)</value>
+ </property>
+ <!-- 这个是使用sshfence隔离机制时才需要配置ssh免登陆 -->
+ <property>
+ <name>dfs.ha.fencing.ssh.private-key-files</name>
+ <value>/root/.ssh/id_rsa</value>
+ </property>
+ <!-- 配置sshfence隔离机制超时时间,这个属性同上,如果你是用脚本的方法切换,这个应该是可以不配置的 -->
+ <property>
+ <name>dfs.ha.fencing.ssh.connect-timeout</name>
+ <value>30000</value>
+ </property>
+ <!-- 这个是开启自动故障转移,如果你没有自动故障转移,这个可以先不配 -->
+ <property>
+ <name>dfs.ha.automatic-failover.enabled</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>dfs.datanode.max.transfer.threads</name>
+ <value>8192</value>
+ </property>
+ <!-- namenode处理RPC请求线程数,增大该值资源占用不大 -->
+ <property>
+ <name>dfs.namenode.handler.count</name>
+ <value>30</value>
+ </property>
+ <!-- datanode处理RPC请求线程数,增大该值会占用更多内存 -->
+ <property>
+ <name>dfs.datanode.handler.count</name>
+ <value>40</value>
+ </property>
+ <!-- balance时可占用的带宽 -->
+ <property>
+ <name>dfs.balance.bandwidthPerSec</name>
+ <value>104857600</value>
+ </property>
+ <!-- 磁盘预留空间,该空间不会被hdfs占用,单位字节-->
+ <property>
+ <name>dfs.datanode.du.reserved</name>
+ <value>53687091200</value>
+ </property>
+ <!-- datanode与namenode连接超时时间,单位毫秒 2 * heartbeat.recheck.interval + 30000 -->
+ <property>
+ <name>heartbeat.recheck.interval</name>
+ <value>100000</value>
+ </property>
+</configuration>
+
diff --git a/MSH-PIC/hbase/conf/log4j-hbtop.properties b/MSH-PIC/hbase/conf/log4j-hbtop.properties
new file mode 100644
index 0000000..1c93b0f
--- /dev/null
+++ b/MSH-PIC/hbase/conf/log4j-hbtop.properties
@@ -0,0 +1,27 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+log4j.rootLogger=WARN,console
+log4j.threshold=WARN
+
+# console
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %-5p [%t] %c{2}: %m%n
+
+# ZooKeeper will still put stuff at WARN
+log4j.logger.org.apache.zookeeper=ERROR
diff --git a/MSH-PIC/hbase/conf/log4j.properties b/MSH-PIC/hbase/conf/log4j.properties
new file mode 100644
index 0000000..b9863ee
--- /dev/null
+++ b/MSH-PIC/hbase/conf/log4j.properties
@@ -0,0 +1,124 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hbase.root.logger=ERROR,console
+hbase.security.logger=ERROR,console
+hbase.log.dir=.
+hbase.log.file=hbase.log
+hbase.log.level=ERROR
+
+# Define the root logger to the system property "hbase.root.logger".
+log4j.rootLogger=${hbase.root.logger}
+
+# Logging Threshold
+log4j.threshold=ALL
+
+#
+# Daily Rolling File Appender
+#
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %-5p [%t] %c{2}: %.1000m%n
+
+# Rolling File Appender properties
+hbase.log.maxfilesize=256MB
+hbase.log.maxbackupindex=20
+
+# Rolling File Appender
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}
+
+log4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %-5p [%t] %c{2}: %.1000m%n
+
+#
+# Security audit appender
+#
+hbase.security.log.file=SecurityAuth.audit
+hbase.security.log.maxfilesize=256MB
+hbase.security.log.maxbackupindex=20
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}
+log4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %p %c: %.1000m%n
+log4j.category.SecurityLogger=${hbase.security.logger}
+log4j.additivity.SecurityLogger=false
+#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE
+#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.visibility.VisibilityController=TRACE
+
+#
+# Null Appender
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %-5p [%t] %c{2}: %.1000m%n
+
+log4j.appender.asyncconsole=org.apache.hadoop.hbase.AsyncConsoleAppender
+log4j.appender.asyncconsole.target=System.err
+
+# Custom Logging levels
+
+log4j.logger.org.apache.zookeeper=${hbase.log.level}
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.hbase=${hbase.log.level}
+log4j.logger.org.apache.hadoop.hbase.META=${hbase.log.level}
+# Make these two classes INFO-level. Make them DEBUG to see more zk debug.
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=${hbase.log.level}
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKWatcher=${hbase.log.level}
+#log4j.logger.org.apache.hadoop.dfs=DEBUG
+# Set this class to log INFO only otherwise its OTT
+# Enable this to get detailed connection error/retry logging.
+# log4j.logger.org.apache.hadoop.hbase.client.ConnectionImplementation=TRACE
+
+
+# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)
+#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG
+
+# Uncomment the below if you want to remove logging of client region caching'
+# and scan of hbase:meta messages
+# log4j.logger.org.apache.hadoop.hbase.client.ConnectionImplementation=INFO
+
+# EventCounter
+# Add "EventCounter" to rootlogger if you want to use this
+# Uncomment the line below to add EventCounter information
+# log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+# Prevent metrics subsystem start/stop messages (HBASE-17722)
+log4j.logger.org.apache.hadoop.metrics2.impl.MetricsConfig=WARN
+log4j.logger.org.apache.hadoop.metrics2.impl.MetricsSinkAdapter=WARN
+log4j.logger.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=WARN
diff --git a/MSH-PIC/hbase/conf/regionservers b/MSH-PIC/hbase/conf/regionservers
new file mode 100644
index 0000000..3611b9d
--- /dev/null
+++ b/MSH-PIC/hbase/conf/regionservers
@@ -0,0 +1,3 @@
+192.168.20.193
+192.168.20.194
+192.168.20.195
diff --git a/MSH-PIC/hbase/conf/yarn-site.xml b/MSH-PIC/hbase/conf/yarn-site.xml
new file mode 100644
index 0000000..366878b
--- /dev/null
+++ b/MSH-PIC/hbase/conf/yarn-site.xml
@@ -0,0 +1,224 @@
+<?xml version="1.0"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+ <property>
+ <name>yarn.nodemanager.aux-services</name>
+ <value>mapreduce_shuffle</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.enabled</name>
+ <value>true</value>
+ </property>
+
+ <!--声明两台resourcemanager的地址-->
+ <property>
+ <name>yarn.resourcemanager.cluster-id</name>
+ <value>rsmcluster</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.rm-ids</name>
+ <value>rsm1,rsm2</value>
+ </property>
+
+ <!-- 配置rm1-->
+ <!-- 配置rm1 hostname-->
+ <property>
+ <name>yarn.resourcemanager.hostname.rsm1</name>
+ <value>192.168.20.193</value>
+ </property>
+
+ <!-- 配置rm1 web application-->
+ <property>
+ <name>yarn.resourcemanager.webapp.address.rsm1</name>
+ <value>192.168.20.193:8080</value>
+ </property>
+
+ <!-- 配置rm1 调度端口,默认8030-->
+ <property>
+ <name>yarn.resourcemanager.scheduler.address.rsm1</name>
+ <value>192.168.20.193:8030</value>
+ </property>
+
+ <!-- 默认端口8031-->
+ <property>
+ <name>yarn.resourcemanager.resource-tracker.address.rsm1</name>
+ <value>192.168.20.193:8031</value>
+ </property>
+
+ <!-- 配置rm1 应用程序管理器接口的地址端口,默认8032-->
+ <property>
+ <name>yarn.resourcemanager.address.rsm1</name>
+ <value>192.168.20.193:8032</value>
+ </property>
+
+ <!-- 配置rm1 管理端口,默认8033-->
+ <property>
+ <name>yarn.resourcemanager.admin.address.rsm1</name>
+ <value>192.168.20.193:8033</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.admin.address.rsm1</name>
+ <value>192.168.20.193:23142</value>
+ </property>
+
+ <!-- 配置rm2-->
+ <property>
+ <name>yarn.resourcemanager.hostname.rsm2</name>
+ <value>192.168.20.194</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.webapp.address.rsm2</name>
+ <value>192.168.20.194:8080</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.scheduler.address.rsm2</name>
+ <value>192.168.20.194:8030</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.resource-tracker.address.rsm2</name>
+ <value>192.168.20.194:8031</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.address.rsm2</name>
+ <value>192.168.20.194:8032</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.admin.address.rsm2</name>
+ <value>192.168.20.194:8033</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.admin.address.rsm2</name>
+ <value>192.168.20.194:23142</value>
+ </property>
+
+ <!--指定zookeeper集群的地址-->
+ <property>
+ <name>yarn.resourcemanager.zk-address</name>
+ <value>192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181</value>
+ </property>
+
+ <!--启用自动恢复,当任务进行一半,rm坏掉,就要启动自动恢复,默认是false-->
+ <property>
+ <name>yarn.resourcemanager.recovery.enabled</name>
+ <value>true</value>
+ </property>
+
+ <!--启用Nodemanager自动恢复,默认是false-->
+ <property>
+ <name>yarn.nodemanager.recovery.enabled</name>
+ <value>true</value>
+ </property>
+
+ <!--配置NodeManager保存运行状态的本地文件系统目录路径 -->
+ <property>
+ <name>yarn.nodemanager.recovery.dir</name>
+ <value>/home/tsg/olap/hadoop-2.7.1/yarn</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.store.class</name>
+ <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
+ </property>
+
+ <!--配置nm可用的RPC地址,默认${yarn.nodemanager.hostname}:0,为临时端口。集群重启后,nm与rm连接的端口会变化,这里指定端口,保障nm restart功能 -->
+ <property>
+ <name>yarn.nodemanager.address</name>
+ <value>${yarn.nodemanager.hostname}:9923</value>
+ </property>
+
+ <property>
+ <name>yarn.log-aggregation-enable</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
+ <value>3600</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.remote-app-log-dir</name>
+ <value>/home/tsg/olap/hadoop-2.7.1/logs/app-logs/</value>
+ </property>
+
+ <!--NM可以为容器分配的物理内存量,以MB为单位 ,默认8192-->
+ <property>
+ <name>yarn.nodemanager.resource.memory-mb</name>
+ <value>61440</value>
+ </property>
+
+ <!-- RM上每个容器请求的最小分配,以mb为单位,默认1024-->
+ <property>
+ <name>yarn.scheduler.minimum-allocation-mb</name>
+ <value>1024</value>
+ </property>
+
+ <!-- RM上每个容器请求的最大分配,以mb为单位,一般设置为 yarn.nodemanager.resource.memory-mb 一致,默认8192-->
+ <property>
+ <name>yarn.scheduler.maximum-allocation-mb</name>
+ <value>61440</value>
+ </property>
+
+ <!--可为容器分配的vcore数。RM调度器在为容器分配资源时使用它。这不是用来限制YARN容器使用的物理内核的数量,默认8,一般配置为服务器cpu总核数一致 -->
+ <property>
+ <name>yarn.nodemanager.resource.cpu-vcores</name>
+ <value>48</value>
+ </property>
+
+ <!--RM上每个容器请求的最小分配(以虚拟CPU内核为单位) ,默认1-->
+ <property>
+ <name>yarn.scheduler.minimum-allocation-vcores</name>
+ <value>1</value>
+ </property>
+
+ <!--RM上每个容器请求的最大分配(以虚拟CPU内核为单位) ,默认32,一般配置为略小于yarn.nodemanager.resource.cpu-vcores,同时指定任务的slot不应超过该值-->
+ <property>
+ <name>yarn.scheduler.maximum-allocation-vcores</name>
+ <value>48</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.vmem-check-enabled</name>
+ <value>false</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.pmem-check-enabled</name>
+ <value>false</value>
+ </property>
+
+ <!--ApplicationMaster重启次数,配置HA后默认为2,生产环境可增大该值-->
+ <property>
+ <name>yarn.resourcemanager.am.max-attempts</name>
+ <value>10000</value>
+ </property>
+
+ <property>
+ <name>yarn.log.server.url</name>
+ <value>http://192.168.20.193:19888/jobhistory/logs</value>
+ </property>
+
+</configuration>
+
diff --git a/MSH-PIC/kafka/bin/connect-distributed.sh b/MSH-PIC/kafka/bin/connect-distributed.sh
new file mode 100644
index 0000000..99cd27b
--- /dev/null
+++ b/MSH-PIC/kafka/bin/connect-distributed.sh
@@ -0,0 +1,41 @@
+#!/bin/sh
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ $# -lt 1 ];
+then
+ echo "USAGE: $0 [-daemon] connect-distributed.properties"
+ exit 1
+fi
+
+base_dir=$(dirname $0)
+
+if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
+ export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties"
+fi
+
+EXTRA_ARGS=${EXTRA_ARGS-'-name connectDistributed'}
+
+COMMAND=$1
+case $COMMAND in
+ -daemon)
+ EXTRA_ARGS="-daemon "$EXTRA_ARGS
+ shift
+ ;;
+ *)
+ ;;
+esac
+
+exec $(dirname $0)/kafka-run-class.sh $EXTRA_ARGS org.apache.kafka.connect.cli.ConnectDistributed "$@"
diff --git a/MSH-PIC/kafka/bin/connect-standalone.sh b/MSH-PIC/kafka/bin/connect-standalone.sh
new file mode 100644
index 0000000..623562a
--- /dev/null
+++ b/MSH-PIC/kafka/bin/connect-standalone.sh
@@ -0,0 +1,41 @@
+#!/bin/sh
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ $# -lt 1 ];
+then
+ echo "USAGE: $0 [-daemon] connect-standalone.properties"
+ exit 1
+fi
+
+base_dir=$(dirname $0)
+
+if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
+ export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties"
+fi
+
+EXTRA_ARGS=${EXTRA_ARGS-'-name connectStandalone'}
+
+COMMAND=$1
+case $COMMAND in
+ -daemon)
+ EXTRA_ARGS="-daemon "$EXTRA_ARGS
+ shift
+ ;;
+ *)
+ ;;
+esac
+
+exec $(dirname $0)/kafka-run-class.sh $EXTRA_ARGS org.apache.kafka.connect.cli.ConnectStandalone "$@"
diff --git a/MSH-PIC/kafka/bin/create_topic.sh b/MSH-PIC/kafka/bin/create_topic.sh
new file mode 100644
index 0000000..946d5bf
--- /dev/null
+++ b/MSH-PIC/kafka/bin/create_topic.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+source /etc/profile
+
+kafka-operation.sh list > /home/tsg/olap/kafka_2.11-1.0.0/topic_list
+
+function compare(){
+RECORD_PARTITION=3
+res=$(( 24 % 3 ))
+if [ $res = 0 ];then
+ if [ $RECORD_PARTITION -le "24" ];then
+ RECORD_PARTITION=24
+ fi
+fi
+}
+
+function createTopic(){
+REPLICATION=$1
+PARTION_NUMS=$2
+TOPIC_NAME=$3
+
+HAS_TOPIC=`cat /home/tsg/olap/kafka_2.11-1.0.0/topic_list | grep -wx "$TOPIC_NAME" | wc -l`
+
+if [ $HAS_TOPIC -eq '0' ];then
+ kafka-topics.sh '--create' '--zookeeper' '192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181/kafka' '--replication-factor' $REPLICATION '--partitions' $PARTION_NUMS '--topic' $TOPIC_NAME
+fi
+
+}
+
+compare
+
+#泛收类型日志,因量级原因不增加副本
+createTopic 1 3 TRANSACTION-RECORD
+createTopic 1 3 TRANSACTION-RECORD-COMPLETED
+createTopic 1 3 INTERIM-SESSION-RECORD
+createTopic 1 3 INTERIM-SESSION-RECORD-COMPLETED
+createTopic 1 3 SESSION-RECORD
+createTopic 1 3 SESSION-RECORD-COMPLETED
+
+#业务相关日志,数据量较少且重要;增加副本
+createTopic 2 3 ACTIVE-DEFENCE-EVENT
+createTopic 2 3 ACTIVE-DEFENCE-EVENT-COMPLETED
+createTopic 2 3 DOS-EVENT
+createTopic 2 3 DOS-SKETCH-RECORD
+createTopic 2 3 GTPC-RECORD
+createTopic 2 3 GTPC-RECORD-COMPLETED
+createTopic 2 24 PROXY-EVENT
+createTopic 2 24 PROXY-EVENT-COMPLETED
+createTopic 2 3 RADIUS-RECORD
+createTopic 2 3 RADIUS-RECORD-COMPLETED
+createTopic 2 24 SECURITY-EVENT
+createTopic 2 24 SECURITY-EVENT-COMPLETED
+createTopic 2 3 SYS-PACKET-CAPTURE-EVENT
+createTopic 2 3 SYS-PACKET-CAPTURE-EVENT-COMPLETED
+createTopic 2 3 VOIP-RECORD
+createTopic 2 3 VOIP-RECORD-COMPLETED
+createTopic 2 3 BGP-RECORD
+createTopic 2 3 BGP-RECORD-COMPLETED
+
+#功能端使用的Topic
+createTopic 2 3 INTERNAL-RTP-RECORD
+createTopic 2 3 PXY-EXCH-INTERMEDIA-CERT
+createTopic 2 3 INTERNAL-PACKET-CAPTURE-EVENT
+createTopic 2 3 SECURITY-PACKET-CAPTURE-RECORD
+
+#分析日志(预聚合)
+createTopic 2 3 RADIUS-ONFF
+createTopic 2 3 SYS-STORAGE
+createTopic 2 3 VOIP-CONVERSATION-RECORD
+createTopic 2 3 TRAFFIC-TOP-DESTINATION-IP-METRICS
+createTopic 2 3 POLICY-RULE-METRICS
+createTopic 2 3 NETWORK-TRAFFIC-METRICS
+createTopic 2 3 TRAFFIC-TOP-METRICS
diff --git a/MSH-PIC/kafka/bin/dae-kafka.sh b/MSH-PIC/kafka/bin/dae-kafka.sh
new file mode 100644
index 0000000..12d8f85
--- /dev/null
+++ b/MSH-PIC/kafka/bin/dae-kafka.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+source /etc/profile
+
+BASE_DIR=/home/tsg/olap
+VERSION=kafka_2.11-1.0.0
+
+function checkLogFiles(){
+if [ ! -d "$BASE_DIR/$VERSION/logs/reslogs/" ];then
+ mkdir -p $BASE_DIR/$VERSION/logs/reslogs
+fi
+
+if [ ! -f "$BASE_DIR/$VERSION/logs/restart_sum" ];then
+ echo 0 > $BASE_DIR/$VERSION/logs/restart_sum
+fi
+}
+
+checkLogFiles
+
+function set_log(){
+OLD_NUM=`cat $BASE_DIR/$VERSION/logs/restart_sum`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $BASE_DIR/$VERSION/logs/restart_sum
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - kafka服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ RESLOG_NAME=restart_log_`date "+%Y%m%d_%H%M%S"`
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - kafka服务异常重启 - 重启次数 -> $RESTART_NUM ;错误日志归纳文件路径:$BASE_DIR/$VERSION/reslogs/$RESLOG_NAME" >> $BASE_DIR/$VERSION/logs/restart.log
+ tail -n 50000 $BASE_DIR/$VERSION/logs/server.log | egrep "ERROR|WARN" >> $BASE_DIR/$VERSION/logs/reslogs/$RESLOG_NAME
+fi
+}
+
+
+while true ; do
+PROCESS=`jps | grep -w Kafka | grep -v grep |wc -l`
+PORT=`netstat -anlp | egrep "9092|9094|9095" | grep "LISTEN" | wc -l`
+
+if [ $PORT -ne "3" ];then
+ if [ $PROCESS -lt "1" ];then
+ JMX_PORT=9191 nohup $BASE_DIR/$VERSION/bin/kafka-server-start.sh $BASE_DIR/$VERSION/config/server.properties > /dev/null 2>&1 &
+ set_log
+ fi
+#else
+# echo "`date "+%Y-%m-%d %H:%M:%S"` - Kafka端口未监听进程存在,判断为僵尸进程,开始kill本机Kafka进程" >> $BASE_DIR/$VERSION/logs/restart.log
+# jps | grep Kafka | awk '{print $1}' | xargs kill -9
+fi
+
+sleep 60
+done
diff --git a/MSH-PIC/kafka/bin/kafka-acls.sh b/MSH-PIC/kafka/bin/kafka-acls.sh
new file mode 100644
index 0000000..8fa6554
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-acls.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.admin.AclCommand "$@"
diff --git a/MSH-PIC/kafka/bin/kafka-broker-api-versions.sh b/MSH-PIC/kafka/bin/kafka-broker-api-versions.sh
new file mode 100644
index 0000000..4f560a0
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-broker-api-versions.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.admin.BrokerApiVersionsCommand "$@"
diff --git a/MSH-PIC/kafka/bin/kafka-configs.sh b/MSH-PIC/kafka/bin/kafka-configs.sh
new file mode 100644
index 0000000..2f9eb8c
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-configs.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.admin.ConfigCommand "$@"
diff --git a/MSH-PIC/kafka/bin/kafka-console-consumer.sh b/MSH-PIC/kafka/bin/kafka-console-consumer.sh
new file mode 100644
index 0000000..9a95354
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-console-consumer.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+export KAFKA_HEAP_OPTS="-Djava.security.auth.login.config=/home/tsg/olap/kafka_2.11-1.0.0/config/kafka_client_jaas.conf -Xmx512M"
+# export KAFKA_HEAP_OPTS="-Xmx512M"
+fi
+
+exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsoleConsumer "$@"
diff --git a/MSH-PIC/kafka/bin/kafka-console-producer.sh b/MSH-PIC/kafka/bin/kafka-console-producer.sh
new file mode 100644
index 0000000..304df2e
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-console-producer.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+export KAFKA_HEAP_OPTS="-Djava.security.auth.login.config=/home/tsg/olap/kafka_2.11-1.0.0/config/kafka_client_jaas.conf -Xmx512M"
+# export KAFKA_HEAP_OPTS="-Xmx512M"
+fi
+exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsoleProducer "$@"
diff --git a/MSH-PIC/kafka/bin/kafka-consumer-groups.sh b/MSH-PIC/kafka/bin/kafka-consumer-groups.sh
new file mode 100644
index 0000000..feb063d
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-consumer-groups.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.admin.ConsumerGroupCommand "$@"
diff --git a/MSH-PIC/kafka/bin/kafka-consumer-perf-test.sh b/MSH-PIC/kafka/bin/kafka-consumer-perf-test.sh
new file mode 100644
index 0000000..77cda72
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-consumer-perf-test.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+ export KAFKA_HEAP_OPTS="-Xmx512M"
+fi
+exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsumerPerformance "$@"
diff --git a/MSH-PIC/kafka/bin/kafka-delete-records.sh b/MSH-PIC/kafka/bin/kafka-delete-records.sh
new file mode 100644
index 0000000..8726f91
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-delete-records.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.admin.DeleteRecordsCommand "$@"
diff --git a/MSH-PIC/kafka/bin/kafka-log-dirs.sh b/MSH-PIC/kafka/bin/kafka-log-dirs.sh
new file mode 100644
index 0000000..dc16edc
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-log-dirs.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.admin.LogDirsCommand "$@"
diff --git a/MSH-PIC/kafka/bin/kafka-mirror-maker.sh b/MSH-PIC/kafka/bin/kafka-mirror-maker.sh
new file mode 100644
index 0000000..981f271
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-mirror-maker.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.tools.MirrorMaker "$@"
diff --git a/MSH-PIC/kafka/bin/kafka-operation.sh b/MSH-PIC/kafka/bin/kafka-operation.sh
new file mode 100644
index 0000000..201c2ae
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-operation.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+source /etc/profile
+BASE_DIR=$KAFKA_HOME
+IP_LIST=192.168.20.193:9094
+ZK_LISR=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181/kafka
+partitions=1
+case $1 in
+ producer)
+ exec $BASE_DIR/bin/kafka-console-producer.sh '--broker-list' $IP_LIST '--topic' $2 '--producer.config' /home/tsg/olap/kafka_2.11-1.0.0/config/sasl-config.properties
+ ;;
+ consumer)
+ exec $BASE_DIR/bin/kafka-console-consumer.sh '--bootstrap-server' $IP_LIST '--topic' $2 '--consumer.config' /home/tsg/olap/kafka_2.11-1.0.0/config/sasl-config.properties
+ ;;
+ consumer-begin)
+ exec $BASE_DIR/bin/kafka-console-consumer.sh '--bootstrap-server' $IP_LIST '--topic' $2 '--from-beginning' '--consumer.config' /home/tsg/olap/kafka_2.11-1.0.0/config/sasl-config.properties
+ ;;
+ create)
+ exec $BASE_DIR/bin/kafka-topics.sh '--create' '--zookeeper' $ZK_LISR '--replication-factor' 1 '--partitions' $partitions '--topic' $2
+ ;;
+ list)
+ exec $BASE_DIR/bin/kafka-topics.sh '--list' '--zookeeper' $ZK_LISR
+ ;;
+ start)
+ JMX_PORT=9191 nohup $BASE_DIR/bin/kafka-server-start.sh $BASE_DIR/config/server.properties > /dev/null 2>&1 &
+ ;;
+ stop)
+ exec $BASE_DIR/bin/kafka-server-stop.sh
+ ;;
+ group)
+ exec $BASE_DIR/bin/kafka-consumer-groups.sh '--bootstrap-server' $IP_LIST '--describe' '--group' $2
+ ;;
+ election-leader)
+ exec $BASE_DIR/bin/kafka-preferred-replica-election.sh '--zookeeper' $ZK_LISR
+ ;;
+ *)
+ echo 'Usage: kafka-operation.sh {producer|consumer|consumer-begin|create} {topic-name}'
+ echo 'Status: kafka-operation.sh {start|stop|list}'
+ echo 'maintenance: kafka-operation.sh {election-leader}'
+esac
diff --git a/MSH-PIC/kafka/bin/kafka-preferred-replica-election.sh b/MSH-PIC/kafka/bin/kafka-preferred-replica-election.sh
new file mode 100644
index 0000000..638a92a
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-preferred-replica-election.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.admin.PreferredReplicaLeaderElectionCommand "$@"
diff --git a/MSH-PIC/kafka/bin/kafka-producer-perf-test.sh b/MSH-PIC/kafka/bin/kafka-producer-perf-test.sh
new file mode 100644
index 0000000..73a6288
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-producer-perf-test.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+ export KAFKA_HEAP_OPTS="-Xmx512M"
+fi
+exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.ProducerPerformance "$@"
diff --git a/MSH-PIC/kafka/bin/kafka-reassign-partitions.sh b/MSH-PIC/kafka/bin/kafka-reassign-partitions.sh
new file mode 100644
index 0000000..4c7f1bc
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-reassign-partitions.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.admin.ReassignPartitionsCommand "$@"
diff --git a/MSH-PIC/kafka/bin/kafka-replay-log-producer.sh b/MSH-PIC/kafka/bin/kafka-replay-log-producer.sh
new file mode 100644
index 0000000..bba3241
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-replay-log-producer.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.tools.ReplayLogProducer "$@"
diff --git a/MSH-PIC/kafka/bin/kafka-replica-verification.sh b/MSH-PIC/kafka/bin/kafka-replica-verification.sh
new file mode 100644
index 0000000..4960836
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-replica-verification.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.tools.ReplicaVerificationTool "$@"
diff --git a/MSH-PIC/kafka/bin/kafka-run-class.sh b/MSH-PIC/kafka/bin/kafka-run-class.sh
new file mode 100644
index 0000000..bc765de
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-run-class.sh
@@ -0,0 +1,272 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ $# -lt 1 ];
+then
+ echo "USAGE: $0 [-daemon] [-name servicename] [-loggc] classname [opts]"
+ exit 1
+fi
+
+# CYGINW == 1 if Cygwin is detected, else 0.
+if [[ $(uname -a) =~ "CYGWIN" ]]; then
+ CYGWIN=1
+else
+ CYGWIN=0
+fi
+
+if [ -z "$INCLUDE_TEST_JARS" ]; then
+ INCLUDE_TEST_JARS=false
+fi
+
+# Exclude jars not necessary for running commands.
+regex="(-(test|src|scaladoc|javadoc)\.jar|jar.asc)$"
+should_include_file() {
+ if [ "$INCLUDE_TEST_JARS" = true ]; then
+ return 0
+ fi
+ file=$1
+ if [ -z "$(echo "$file" | egrep "$regex")" ] ; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+base_dir=$(dirname $0)/..
+
+if [ -z "$SCALA_VERSION" ]; then
+ SCALA_VERSION=2.11.11
+fi
+
+if [ -z "$SCALA_BINARY_VERSION" ]; then
+ SCALA_BINARY_VERSION=$(echo $SCALA_VERSION | cut -f 1-2 -d '.')
+fi
+
+# run ./gradlew copyDependantLibs to get all dependant jars in a local dir
+shopt -s nullglob
+for dir in "$base_dir"/core/build/dependant-libs-${SCALA_VERSION}*;
+do
+ if [ -z "$CLASSPATH" ] ; then
+ CLASSPATH="$dir/*"
+ else
+ CLASSPATH="$CLASSPATH:$dir/*"
+ fi
+done
+
+for file in "$base_dir"/examples/build/libs/kafka-examples*.jar;
+do
+ if should_include_file "$file"; then
+ CLASSPATH="$CLASSPATH":"$file"
+ fi
+done
+
+for file in "$base_dir"/clients/build/libs/kafka-clients*.jar;
+do
+ if should_include_file "$file"; then
+ CLASSPATH="$CLASSPATH":"$file"
+ fi
+done
+
+for file in "$base_dir"/streams/build/libs/kafka-streams*.jar;
+do
+ if should_include_file "$file"; then
+ CLASSPATH="$CLASSPATH":"$file"
+ fi
+done
+
+for file in "$base_dir"/streams/examples/build/libs/kafka-streams-examples*.jar;
+do
+ if should_include_file "$file"; then
+ CLASSPATH="$CLASSPATH":"$file"
+ fi
+done
+
+for file in "$base_dir"/streams/build/dependant-libs-${SCALA_VERSION}/rocksdb*.jar;
+do
+ CLASSPATH="$CLASSPATH":"$file"
+done
+
+for file in "$base_dir"/tools/build/libs/kafka-tools*.jar;
+do
+ if should_include_file "$file"; then
+ CLASSPATH="$CLASSPATH":"$file"
+ fi
+done
+
+for dir in "$base_dir"/tools/build/dependant-libs-${SCALA_VERSION}*;
+do
+ CLASSPATH="$CLASSPATH:$dir/*"
+done
+
+for cc_pkg in "api" "transforms" "runtime" "file" "json" "tools"
+do
+ for file in "$base_dir"/connect/${cc_pkg}/build/libs/connect-${cc_pkg}*.jar;
+ do
+ if should_include_file "$file"; then
+ CLASSPATH="$CLASSPATH":"$file"
+ fi
+ done
+ if [ -d "$base_dir/connect/${cc_pkg}/build/dependant-libs" ] ; then
+ CLASSPATH="$CLASSPATH:$base_dir/connect/${cc_pkg}/build/dependant-libs/*"
+ fi
+done
+
+# classpath addition for release
+for file in "$base_dir"/libs/*;
+do
+ if should_include_file "$file"; then
+ CLASSPATH="$CLASSPATH":"$file"
+ fi
+done
+
+for file in "$base_dir"/core/build/libs/kafka_${SCALA_BINARY_VERSION}*.jar;
+do
+ if should_include_file "$file"; then
+ CLASSPATH="$CLASSPATH":"$file"
+ fi
+done
+shopt -u nullglob
+
+if [ -z "$CLASSPATH" ] ; then
+ echo "Classpath is empty. Please build the project first e.g. by running './gradlew jar -Pscala_version=$SCALA_VERSION'"
+ exit 1
+fi
+
+# JMX settings
+if [ -z "$KAFKA_JMX_OPTS" ]; then
+ KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false "
+fi
+
+# JMX port to use
+if [ $JMX_PORT ]; then
+ KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT "
+fi
+
+# Log directory to use
+if [ "x$LOG_DIR" = "x" ]; then
+ LOG_DIR="$base_dir/logs"
+fi
+
+# Log4j settings
+if [ -z "$KAFKA_LOG4J_OPTS" ]; then
+ # Log to console. This is a tool.
+ LOG4J_DIR="$base_dir/config/tools-log4j.properties"
+ # If Cygwin is detected, LOG4J_DIR is converted to Windows format.
+ (( CYGWIN )) && LOG4J_DIR=$(cygpath --path --mixed "${LOG4J_DIR}")
+ KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:${LOG4J_DIR}"
+else
+ # create logs directory
+ if [ ! -d "$LOG_DIR" ]; then
+ mkdir -p "$LOG_DIR"
+ fi
+fi
+
+# If Cygwin is detected, LOG_DIR is converted to Windows format.
+(( CYGWIN )) && LOG_DIR=$(cygpath --path --mixed "${LOG_DIR}")
+KAFKA_LOG4J_OPTS="-Dkafka.logs.dir=$LOG_DIR $KAFKA_LOG4J_OPTS"
+
+# Generic jvm settings you want to add
+if [ -z "$KAFKA_OPTS" ]; then
+ KAFKA_OPTS=""
+fi
+
+# Set Debug options if enabled
+if [ "x$KAFKA_DEBUG" != "x" ]; then
+
+ # Use default ports
+ DEFAULT_JAVA_DEBUG_PORT="5005"
+
+ if [ -z "$JAVA_DEBUG_PORT" ]; then
+ JAVA_DEBUG_PORT="$DEFAULT_JAVA_DEBUG_PORT"
+ fi
+
+ # Use the defaults if JAVA_DEBUG_OPTS was not set
+ DEFAULT_JAVA_DEBUG_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=${DEBUG_SUSPEND_FLAG:-n},address=$JAVA_DEBUG_PORT"
+ if [ -z "$JAVA_DEBUG_OPTS" ]; then
+ JAVA_DEBUG_OPTS="$DEFAULT_JAVA_DEBUG_OPTS"
+ fi
+
+ echo "Enabling Java debug options: $JAVA_DEBUG_OPTS"
+ KAFKA_OPTS="$JAVA_DEBUG_OPTS $KAFKA_OPTS"
+fi
+
+# Which java to use
+if [ -z "$JAVA_HOME" ]; then
+ JAVA="java"
+else
+ JAVA="$JAVA_HOME/bin/java"
+fi
+
+# Memory options
+if [ -z "$KAFKA_HEAP_OPTS" ]; then
+ KAFKA_HEAP_OPTS="-Xmx256M"
+fi
+
+# JVM performance options
+if [ -z "$KAFKA_JVM_PERFORMANCE_OPTS" ]; then
+# KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -Djava.awt.headless=true"
+ KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:MetaspaceSize=96m -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:G1HeapRegionSize=16M -XX:MinMetaspaceFreeRatio=50 -XX:MaxMetaspaceFreeRatio=80 -XX:+ExplicitGCInvokesConcurrent -Djava.awt.headless=true"
+fi
+
+
+while [ $# -gt 0 ]; do
+ COMMAND=$1
+ case $COMMAND in
+ -name)
+ DAEMON_NAME=$2
+ CONSOLE_OUTPUT_FILE=$LOG_DIR/$DAEMON_NAME.out
+ shift 2
+ ;;
+ -loggc)
+ if [ -z "$KAFKA_GC_LOG_OPTS" ]; then
+ GC_LOG_ENABLED="true"
+ fi
+ shift
+ ;;
+ -daemon)
+ DAEMON_MODE="true"
+ shift
+ ;;
+ *)
+ break
+ ;;
+ esac
+done
+
+# GC options
+GC_FILE_SUFFIX='-gc.log'
+GC_LOG_FILE_NAME=''
+if [ "x$GC_LOG_ENABLED" = "xtrue" ]; then
+ GC_LOG_FILE_NAME=$DAEMON_NAME$GC_FILE_SUFFIX
+ # the first segment of the version number, which is '1' for releases before Java 9
+ # it then becomes '9', '10', ...
+ JAVA_MAJOR_VERSION=$($JAVA -version 2>&1 | sed -E -n 's/.* version "([^.-]*).*"/\1/p')
+ if [[ "$JAVA_MAJOR_VERSION" -ge "9" ]] ; then
+ KAFKA_GC_LOG_OPTS="-Xlog:gc*:file=$LOG_DIR/$GC_LOG_FILE_NAME:time,tags:filecount=10,filesize=102400"
+ else
+ KAFKA_GC_LOG_OPTS="-Xloggc:$LOG_DIR/$GC_LOG_FILE_NAME -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
+ fi
+fi
+
+# If Cygwin is detected, classpath is converted to Windows format.
+(( CYGWIN )) && CLASSPATH=$(cygpath --path --mixed "${CLASSPATH}")
+
+# Launch mode
+if [ "x$DAEMON_MODE" = "xtrue" ]; then
+ nohup $JAVA $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp $CLASSPATH $KAFKA_OPTS "$@" > "$CONSOLE_OUTPUT_FILE" 2>&1 < /dev/null &
+else
+ exec $JAVA $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp $CLASSPATH $KAFKA_OPTS "$@"
+fi
diff --git a/MSH-PIC/kafka/bin/kafka-server-start.sh b/MSH-PIC/kafka/bin/kafka-server-start.sh
new file mode 100644
index 0000000..93b67a4
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-server-start.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+export KAFKA_OPTS="-Djava.security.auth.login.config=/home/tsg/olap/kafka_2.11-1.0.0/config/kafka_server_jaas.conf -javaagent:/home/tsg/olap/kafka_2.11-1.0.0/monitor/jmx_prometheus_javaagent-0.12.0.jar=9901:/home/tsg/olap/kafka_2.11-1.0.0/monitor/kafka.yml"
+
+if [ $# -lt 1 ];
+then
+ echo "USAGE: $0 [-daemon] server.properties [--override property=value]*"
+ exit 1
+fi
+base_dir=$(dirname $0)
+
+if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
+ export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties"
+fi
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+ export KAFKA_HEAP_OPTS="-Xmx16384m -Xms4096m"
+fi
+
+EXTRA_ARGS=${EXTRA_ARGS-'-name kafkaServer -loggc'}
+
+COMMAND=$1
+case $COMMAND in
+ -daemon)
+ EXTRA_ARGS="-daemon "$EXTRA_ARGS
+ shift
+ ;;
+ *)
+ ;;
+esac
+
+exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@"
diff --git a/MSH-PIC/kafka/bin/kafka-server-start.sh.bak b/MSH-PIC/kafka/bin/kafka-server-start.sh.bak
new file mode 100644
index 0000000..5a53126
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-server-start.sh.bak
@@ -0,0 +1,44 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ $# -lt 1 ];
+then
+ echo "USAGE: $0 [-daemon] server.properties [--override property=value]*"
+ exit 1
+fi
+base_dir=$(dirname $0)
+
+if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
+ export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties"
+fi
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+ export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G"
+fi
+
+EXTRA_ARGS=${EXTRA_ARGS-'-name kafkaServer -loggc'}
+
+COMMAND=$1
+case $COMMAND in
+ -daemon)
+ EXTRA_ARGS="-daemon "$EXTRA_ARGS
+ shift
+ ;;
+ *)
+ ;;
+esac
+
+exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@"
diff --git a/MSH-PIC/kafka/bin/kafka-server-stop.sh b/MSH-PIC/kafka/bin/kafka-server-stop.sh
new file mode 100644
index 0000000..d3c660c
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-server-stop.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+PIDS=$(ps ax | grep -i 'kafka\.Kafka' | grep java | grep -v grep | awk '{print $1}')
+
+if [ -z "$PIDS" ]; then
+ echo "No kafka server to stop"
+ exit 1
+else
+ kill -s TERM $PIDS
+fi
+
diff --git a/MSH-PIC/kafka/bin/kafka-simple-consumer-shell.sh b/MSH-PIC/kafka/bin/kafka-simple-consumer-shell.sh
new file mode 100644
index 0000000..27e386a
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-simple-consumer-shell.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.tools.SimpleConsumerShell "$@"
diff --git a/MSH-PIC/kafka/bin/kafka-streams-application-reset.sh b/MSH-PIC/kafka/bin/kafka-streams-application-reset.sh
new file mode 100644
index 0000000..3363732
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-streams-application-reset.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+ export KAFKA_HEAP_OPTS="-Xmx512M"
+fi
+
+exec $(dirname $0)/kafka-run-class.sh kafka.tools.StreamsResetter "$@"
diff --git a/MSH-PIC/kafka/bin/kafka-topics.sh b/MSH-PIC/kafka/bin/kafka-topics.sh
new file mode 100644
index 0000000..ad6a2d4
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-topics.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.admin.TopicCommand "$@"
diff --git a/MSH-PIC/kafka/bin/kafka-verifiable-consumer.sh b/MSH-PIC/kafka/bin/kafka-verifiable-consumer.sh
new file mode 100644
index 0000000..852847d
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-verifiable-consumer.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+ export KAFKA_HEAP_OPTS="-Xmx512M"
+fi
+exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.VerifiableConsumer "$@"
diff --git a/MSH-PIC/kafka/bin/kafka-verifiable-producer.sh b/MSH-PIC/kafka/bin/kafka-verifiable-producer.sh
new file mode 100644
index 0000000..b59bae7
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kafka-verifiable-producer.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+ export KAFKA_HEAP_OPTS="-Xmx512M"
+fi
+exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.VerifiableProducer "$@"
diff --git a/MSH-PIC/kafka/bin/kflogdelete.sh b/MSH-PIC/kafka/bin/kflogdelete.sh
new file mode 100644
index 0000000..e14e22c
--- /dev/null
+++ b/MSH-PIC/kafka/bin/kflogdelete.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+#只保留最近30天的日志
+#将此脚本加载到系统定时任务中 /etc/crontab
+#脚本会读取环境变量,固需要配置环境变量。
+source /etc/profile
+
+kafka_dir=/home/tsg/olap/kafka_2.11-1.0.0
+
+find /home/tsg/olap/kafka_2.11-1.0.0/logs/ -mtime +30 -name "*.log.*" -exec rm -rf {} \;
+
diff --git a/MSH-PIC/kafka/bin/set_kafka_env.sh b/MSH-PIC/kafka/bin/set_kafka_env.sh
new file mode 100644
index 0000000..e83c44e
--- /dev/null
+++ b/MSH-PIC/kafka/bin/set_kafka_env.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+echo -e "\n#kafka\nexport KAFKA_HOME=/home/tsg/olap/kafka_2.11-1.0.0\nexport PATH=\$KAFKA_HOME/bin:\$PATH" >> /etc/profile.d/kafka.sh
+chmod +x /etc/profile.d/kafka.sh
+source /etc/profile
+
+keeppath='/etc/init.d/keepkafalive'
+if [ -x $keeppath ];then
+ chkconfig --add keepkafalive
+ chkconfig keepkafalive on
+ service keepkafalive start && sleep 5
+ kafka_dae=`ps -ef | grep dae-kafka.sh | grep -v grep | wc -l`
+ if [ $kafka_dae -eq "0" ];then
+ nohup /home/tsg/olap/kafka_2.11-1.0.0/bin/dae-kafka.sh > /dev/null 2>&1 &
+ fi
+fi
+
diff --git a/MSH-PIC/kafka/bin/trogdor.sh b/MSH-PIC/kafka/bin/trogdor.sh
new file mode 100644
index 0000000..b211209
--- /dev/null
+++ b/MSH-PIC/kafka/bin/trogdor.sh
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+usage() {
+ cat <<EOF
+The Trogdor fault injector.
+
+Usage:
+ $0 [action] [options]
+
+Actions:
+ agent: Run the trogdor agent.
+ coordinator: Run the trogdor coordinator.
+ client: Run the client which communicates with the trogdor coordinator.
+ agent-client: Run the client which communicates with the trogdor agent.
+ help: This help message.
+EOF
+}
+
+if [[ $# -lt 1 ]]; then
+ usage
+ exit 0
+fi
+action="${1}"
+shift
+CLASS=""
+case ${action} in
+ agent) CLASS="org.apache.kafka.trogdor.agent.Agent";;
+ coordinator) CLASS="org.apache.kafka.trogdor.coordinator.Coordinator";;
+ client) CLASS="org.apache.kafka.trogdor.coordinator.CoordinatorClient";;
+ agent-client) CLASS="org.apache.kafka.trogdor.agent.AgentClient";;
+ help) usage; exit 0;;
+ *) echo "Unknown action '${action}'. Type '$0 help' for help."; exit 1;;
+esac
+
+export INCLUDE_TEST_JARS=1
+exec $(dirname $0)/kafka-run-class.sh "${CLASS}" "$@"
diff --git a/MSH-PIC/kafka/bin/windows/connect-distributed.bat b/MSH-PIC/kafka/bin/windows/connect-distributed.bat
new file mode 100644
index 0000000..aaa3c41
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/connect-distributed.bat
@@ -0,0 +1,34 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+IF [%1] EQU [] (
+ echo USAGE: %0 connect-distributed.properties
+ EXIT /B 1
+)
+
+SetLocal
+rem Using pushd popd to set BASE_DIR to the absolute path
+pushd %~dp0..\..
+set BASE_DIR=%CD%
+popd
+
+rem Log4j settings
+IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
+ set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/tools-log4j.properties
+)
+
+"%~dp0kafka-run-class.bat" org.apache.kafka.connect.cli.ConnectDistributed %*
+EndLocal
diff --git a/MSH-PIC/kafka/bin/windows/connect-standalone.bat b/MSH-PIC/kafka/bin/windows/connect-standalone.bat
new file mode 100644
index 0000000..54cc11f
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/connect-standalone.bat
@@ -0,0 +1,34 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+IF [%1] EQU [] (
+ echo USAGE: %0 connect-standalone.properties
+ EXIT /B 1
+)
+
+SetLocal
+rem Using pushd popd to set BASE_DIR to the absolute path
+pushd %~dp0..\..
+set BASE_DIR=%CD%
+popd
+
+rem Log4j settings
+IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
+ set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/tools-log4j.properties
+)
+
+"%~dp0kafka-run-class.bat" org.apache.kafka.connect.cli.ConnectStandalone %*
+EndLocal
diff --git a/MSH-PIC/kafka/bin/windows/kafka-acls.bat b/MSH-PIC/kafka/bin/windows/kafka-acls.bat
new file mode 100644
index 0000000..8f0be85
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/kafka-acls.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" kafka.admin.AclCommand %*
diff --git a/MSH-PIC/kafka/bin/windows/kafka-broker-api-versions.bat b/MSH-PIC/kafka/bin/windows/kafka-broker-api-versions.bat
new file mode 100644
index 0000000..f7ec72d
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/kafka-broker-api-versions.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+%~dp0kafka-run-class.bat kafka.admin.BrokerApiVersionsCommand %*
diff --git a/MSH-PIC/kafka/bin/windows/kafka-configs.bat b/MSH-PIC/kafka/bin/windows/kafka-configs.bat
new file mode 100644
index 0000000..3792a5d
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/kafka-configs.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" kafka.admin.ConfigCommand %*
diff --git a/MSH-PIC/kafka/bin/windows/kafka-console-consumer.bat b/MSH-PIC/kafka/bin/windows/kafka-console-consumer.bat
new file mode 100644
index 0000000..bbbd336
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/kafka-console-consumer.bat
@@ -0,0 +1,20 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+SetLocal
+set KAFKA_HEAP_OPTS=-Xmx512M
+"%~dp0kafka-run-class.bat" kafka.tools.ConsoleConsumer %*
+EndLocal
diff --git a/MSH-PIC/kafka/bin/windows/kafka-console-producer.bat b/MSH-PIC/kafka/bin/windows/kafka-console-producer.bat
new file mode 100644
index 0000000..e1834bc
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/kafka-console-producer.bat
@@ -0,0 +1,20 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+SetLocal
+set KAFKA_HEAP_OPTS=-Xmx512M
+"%~dp0kafka-run-class.bat" kafka.tools.ConsoleProducer %*
+EndLocal
diff --git a/MSH-PIC/kafka/bin/windows/kafka-consumer-groups.bat b/MSH-PIC/kafka/bin/windows/kafka-consumer-groups.bat
new file mode 100644
index 0000000..e027b9e
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/kafka-consumer-groups.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" kafka.admin.ConsumerGroupCommand %*
diff --git a/MSH-PIC/kafka/bin/windows/kafka-consumer-offset-checker.bat b/MSH-PIC/kafka/bin/windows/kafka-consumer-offset-checker.bat
new file mode 100644
index 0000000..2baa1b8
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/kafka-consumer-offset-checker.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" kafka.tools.ConsumerOffsetChecker %*
diff --git a/MSH-PIC/kafka/bin/windows/kafka-consumer-perf-test.bat b/MSH-PIC/kafka/bin/windows/kafka-consumer-perf-test.bat
new file mode 100644
index 0000000..606c784
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/kafka-consumer-perf-test.bat
@@ -0,0 +1,20 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+SetLocal
+set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M
+"%~dp0kafka-run-class.bat" kafka.tools.ConsumerPerformance %*
+EndLocal
diff --git a/MSH-PIC/kafka/bin/windows/kafka-mirror-maker.bat b/MSH-PIC/kafka/bin/windows/kafka-mirror-maker.bat
new file mode 100644
index 0000000..a1fae45
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/kafka-mirror-maker.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" kafka.tools.MirrorMaker %*
diff --git a/MSH-PIC/kafka/bin/windows/kafka-preferred-replica-election.bat b/MSH-PIC/kafka/bin/windows/kafka-preferred-replica-election.bat
new file mode 100644
index 0000000..f9f0014
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/kafka-preferred-replica-election.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" kafka.admin.PreferredReplicaLeaderElectionCommand %*
diff --git a/MSH-PIC/kafka/bin/windows/kafka-producer-perf-test.bat b/MSH-PIC/kafka/bin/windows/kafka-producer-perf-test.bat
new file mode 100644
index 0000000..917d211
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/kafka-producer-perf-test.bat
@@ -0,0 +1,20 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+SetLocal
+set KAFKA_HEAP_OPTS=-Xmx512M
+"%~dp0kafka-run-class.bat" org.apache.kafka.tools.ProducerPerformance %*
+EndLocal
diff --git a/MSH-PIC/kafka/bin/windows/kafka-reassign-partitions.bat b/MSH-PIC/kafka/bin/windows/kafka-reassign-partitions.bat
new file mode 100644
index 0000000..62b710d
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/kafka-reassign-partitions.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" kafka.admin.ReassignPartitionsCommand %*
diff --git a/MSH-PIC/kafka/bin/windows/kafka-replay-log-producer.bat b/MSH-PIC/kafka/bin/windows/kafka-replay-log-producer.bat
new file mode 100644
index 0000000..7b51302
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/kafka-replay-log-producer.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" kafka.tools.ReplayLogProducer %*
diff --git a/MSH-PIC/kafka/bin/windows/kafka-replica-verification.bat b/MSH-PIC/kafka/bin/windows/kafka-replica-verification.bat
new file mode 100644
index 0000000..bf4805d
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/kafka-replica-verification.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" kafka.tools.ReplicaVerificationTool %*
diff --git a/MSH-PIC/kafka/bin/windows/kafka-run-class.bat b/MSH-PIC/kafka/bin/windows/kafka-run-class.bat
new file mode 100644
index 0000000..c56f82c
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/kafka-run-class.bat
@@ -0,0 +1,191 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+setlocal enabledelayedexpansion
+
+IF [%1] EQU [] (
+ echo USAGE: %0 classname [opts]
+ EXIT /B 1
+)
+
+rem Using pushd popd to set BASE_DIR to the absolute path
+pushd %~dp0..\..
+set BASE_DIR=%CD%
+popd
+
+IF ["%SCALA_VERSION%"] EQU [""] (
+ set SCALA_VERSION=2.11.11
+)
+
+IF ["%SCALA_BINARY_VERSION%"] EQU [""] (
+ for /f "tokens=1,2 delims=." %%a in ("%SCALA_VERSION%") do (
+ set FIRST=%%a
+ set SECOND=%%b
+ if ["!SECOND!"] EQU [""] (
+ set SCALA_BINARY_VERSION=!FIRST!
+ ) else (
+ set SCALA_BINARY_VERSION=!FIRST!.!SECOND!
+ )
+ )
+)
+
+rem Classpath addition for kafka-core dependencies
+for %%i in ("%BASE_DIR%\core\build\dependant-libs-%SCALA_VERSION%\*.jar") do (
+ call :concat "%%i"
+)
+
+rem Classpath addition for kafka-examples
+for %%i in ("%BASE_DIR%\examples\build\libs\kafka-examples*.jar") do (
+ call :concat "%%i"
+)
+
+rem Classpath addition for kafka-clients
+for %%i in ("%BASE_DIR%\clients\build\libs\kafka-clients*.jar") do (
+ call :concat "%%i"
+)
+
+rem Classpath addition for kafka-streams
+for %%i in ("%BASE_DIR%\streams\build\libs\kafka-streams*.jar") do (
+ call :concat "%%i"
+)
+
+rem Classpath addition for kafka-streams-examples
+for %%i in ("%BASE_DIR%\streams\examples\build\libs\kafka-streams-examples*.jar") do (
+ call :concat "%%i"
+)
+
+for %%i in ("%BASE_DIR%\streams\build\dependant-libs-%SCALA_VERSION%\rocksdb*.jar") do (
+ call :concat "%%i"
+)
+
+rem Classpath addition for kafka tools
+for %%i in ("%BASE_DIR%\tools\build\libs\kafka-tools*.jar") do (
+ call :concat "%%i"
+)
+
+for %%i in ("%BASE_DIR%\tools\build\dependant-libs-%SCALA_VERSION%\*.jar") do (
+ call :concat "%%i"
+)
+
+for %%p in (api runtime file json tools) do (
+ for %%i in ("%BASE_DIR%\connect\%%p\build\libs\connect-%%p*.jar") do (
+ call :concat "%%i"
+ )
+ if exist "%BASE_DIR%\connect\%%p\build\dependant-libs\*" (
+ call :concat "%BASE_DIR%\connect\%%p\build\dependant-libs\*"
+ )
+)
+
+rem Classpath addition for release
+for %%i in ("%BASE_DIR%\libs\*") do (
+ call :concat "%%i"
+)
+
+rem Classpath addition for core
+for %%i in ("%BASE_DIR%\core\build\libs\kafka_%SCALA_BINARY_VERSION%*.jar") do (
+ call :concat "%%i"
+)
+
+rem JMX settings
+IF ["%KAFKA_JMX_OPTS%"] EQU [""] (
+ set KAFKA_JMX_OPTS=-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false
+)
+
+rem JMX port to use
+IF ["%JMX_PORT%"] NEQ [""] (
+ set KAFKA_JMX_OPTS=%KAFKA_JMX_OPTS% -Dcom.sun.management.jmxremote.port=%JMX_PORT%
+)
+
+rem Log directory to use
+IF ["%LOG_DIR%"] EQU [""] (
+ set LOG_DIR="%BASE_DIR~%/logs"
+)
+
+rem Log4j settings
+IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
+ set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/tools-log4j.properties
+) ELSE (
+ rem create logs directory
+ IF not exist "%LOG_DIR%" (
+ mkdir "%LOG_DIR%"
+ )
+)
+
+set KAFKA_LOG4J_OPTS=-Dkafka.logs.dir="%LOG_DIR%" "%KAFKA_LOG4J_OPTS%"
+
+rem Generic jvm settings you want to add
+IF ["%KAFKA_OPTS%"] EQU [""] (
+ set KAFKA_OPTS=
+)
+
+set DEFAULT_JAVA_DEBUG_PORT=5005
+set DEFAULT_DEBUG_SUSPEND_FLAG=n
+rem Set Debug options if enabled
+IF ["%KAFKA_DEBUG%"] NEQ [""] (
+
+
+ IF ["%JAVA_DEBUG_PORT%"] EQU [""] (
+ set JAVA_DEBUG_PORT=%DEFAULT_JAVA_DEBUG_PORT%
+ )
+
+ IF ["%DEBUG_SUSPEND_FLAG%"] EQU [""] (
+ set DEBUG_SUSPEND_FLAG=%DEFAULT_DEBUG_SUSPEND_FLAG%
+ )
+ set DEFAULT_JAVA_DEBUG_OPTS=-agentlib:jdwp=transport=dt_socket,server=y,suspend=!DEBUG_SUSPEND_FLAG!,address=!JAVA_DEBUG_PORT!
+
+ IF ["%JAVA_DEBUG_OPTS%"] EQU [""] (
+ set JAVA_DEBUG_OPTS=!DEFAULT_JAVA_DEBUG_OPTS!
+ )
+
+ echo Enabling Java debug options: !JAVA_DEBUG_OPTS!
+ set KAFKA_OPTS=!JAVA_DEBUG_OPTS! !KAFKA_OPTS!
+)
+
+rem Which java to use
+IF ["%JAVA_HOME%"] EQU [""] (
+ set JAVA=java
+) ELSE (
+ set JAVA="%JAVA_HOME%/bin/java"
+)
+
+rem Memory options
+IF ["%KAFKA_HEAP_OPTS%"] EQU [""] (
+ set KAFKA_HEAP_OPTS=-Xmx256M
+)
+
+rem JVM performance options
+IF ["%KAFKA_JVM_PERFORMANCE_OPTS%"] EQU [""] (
+ set KAFKA_JVM_PERFORMANCE_OPTS=-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -Djava.awt.headless=true
+)
+
+IF not defined CLASSPATH (
+ echo Classpath is empty. Please build the project first e.g. by running 'gradlew jarAll'
+ EXIT /B 2
+)
+
+set COMMAND=%JAVA% %KAFKA_HEAP_OPTS% %KAFKA_JVM_PERFORMANCE_OPTS% %KAFKA_JMX_OPTS% %KAFKA_LOG4J_OPTS% -cp %CLASSPATH% %KAFKA_OPTS% %*
+rem echo.
+rem echo %COMMAND%
+rem echo.
+%COMMAND%
+
+goto :eof
+:concat
+IF not defined CLASSPATH (
+ set CLASSPATH="%~1"
+) ELSE (
+ set CLASSPATH=%CLASSPATH%;"%~1"
+)
diff --git a/MSH-PIC/kafka/bin/windows/kafka-server-start.bat b/MSH-PIC/kafka/bin/windows/kafka-server-start.bat
new file mode 100644
index 0000000..8624eda
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/kafka-server-start.bat
@@ -0,0 +1,38 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+IF [%1] EQU [] (
+ echo USAGE: %0 server.properties
+ EXIT /B 1
+)
+
+SetLocal
+IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
+ set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%~dp0../../config/log4j.properties
+)
+IF ["%KAFKA_HEAP_OPTS%"] EQU [""] (
+ rem detect OS architecture
+ wmic os get osarchitecture | find /i "32-bit" >nul 2>&1
+ IF NOT ERRORLEVEL 1 (
+ rem 32-bit OS
+ set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M
+ ) ELSE (
+ rem 64-bit OS
+ set KAFKA_HEAP_OPTS=-Xmx1G -Xms1G
+ )
+)
+"%~dp0kafka-run-class.bat" kafka.Kafka %*
+EndLocal
diff --git a/MSH-PIC/kafka/bin/windows/kafka-server-stop.bat b/MSH-PIC/kafka/bin/windows/kafka-server-stop.bat
new file mode 100644
index 0000000..676577c
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/kafka-server-stop.bat
@@ -0,0 +1,18 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+wmic process where (commandline like "%%kafka.Kafka%%" and not name="wmic.exe") delete
+rem ps ax | grep -i 'kafka.Kafka' | grep -v grep | awk '{print $1}' | xargs kill -SIGTERM
diff --git a/MSH-PIC/kafka/bin/windows/kafka-simple-consumer-shell.bat b/MSH-PIC/kafka/bin/windows/kafka-simple-consumer-shell.bat
new file mode 100644
index 0000000..8836128
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/kafka-simple-consumer-shell.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" kafka.tools.SimpleConsumerShell %*
diff --git a/MSH-PIC/kafka/bin/windows/kafka-topics.bat b/MSH-PIC/kafka/bin/windows/kafka-topics.bat
new file mode 100644
index 0000000..677b09d
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/kafka-topics.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+"%~dp0kafka-run-class.bat" kafka.admin.TopicCommand %*
diff --git a/MSH-PIC/kafka/bin/windows/zookeeper-server-start.bat b/MSH-PIC/kafka/bin/windows/zookeeper-server-start.bat
new file mode 100644
index 0000000..f201a58
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/zookeeper-server-start.bat
@@ -0,0 +1,30 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+IF [%1] EQU [] (
+ echo USAGE: %0 zookeeper.properties
+ EXIT /B 1
+)
+
+SetLocal
+IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
+ set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%~dp0../../config/log4j.properties
+)
+IF ["%KAFKA_HEAP_OPTS%"] EQU [""] (
+ set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M
+)
+"%~dp0kafka-run-class.bat" org.apache.zookeeper.server.quorum.QuorumPeerMain %*
+EndLocal
diff --git a/MSH-PIC/kafka/bin/windows/zookeeper-server-stop.bat b/MSH-PIC/kafka/bin/windows/zookeeper-server-stop.bat
new file mode 100644
index 0000000..8b57dd8
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/zookeeper-server-stop.bat
@@ -0,0 +1,17 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+wmic process where (commandline like "%%zookeeper%%" and not name="wmic.exe") delete
diff --git a/MSH-PIC/kafka/bin/windows/zookeeper-shell.bat b/MSH-PIC/kafka/bin/windows/zookeeper-shell.bat
new file mode 100644
index 0000000..ea3c398
--- /dev/null
+++ b/MSH-PIC/kafka/bin/windows/zookeeper-shell.bat
@@ -0,0 +1,22 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+IF [%1] EQU [] (
+ echo USAGE: %0 zookeeper_host:port[/path] [args...]
+ EXIT /B 1
+)
+
+"%~dp0kafka-run-class.bat" org.apache.zookeeper.ZooKeeperMain -server %*
diff --git a/MSH-PIC/kafka/bin/zookeeper-security-migration.sh b/MSH-PIC/kafka/bin/zookeeper-security-migration.sh
new file mode 100644
index 0000000..722bde7
--- /dev/null
+++ b/MSH-PIC/kafka/bin/zookeeper-security-migration.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec $(dirname $0)/kafka-run-class.sh kafka.admin.ZkSecurityMigrator "$@"
diff --git a/MSH-PIC/kafka/bin/zookeeper-server-start.sh b/MSH-PIC/kafka/bin/zookeeper-server-start.sh
new file mode 100644
index 0000000..bd9c114
--- /dev/null
+++ b/MSH-PIC/kafka/bin/zookeeper-server-start.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ $# -lt 1 ];
+then
+ echo "USAGE: $0 [-daemon] zookeeper.properties"
+ exit 1
+fi
+base_dir=$(dirname $0)
+
+if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
+ export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties"
+fi
+
+if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
+ export KAFKA_HEAP_OPTS="-Xmx512M -Xms512M"
+fi
+
+EXTRA_ARGS=${EXTRA_ARGS-'-name zookeeper -loggc'}
+
+COMMAND=$1
+case $COMMAND in
+ -daemon)
+ EXTRA_ARGS="-daemon "$EXTRA_ARGS
+ shift
+ ;;
+ *)
+ ;;
+esac
+
+exec $base_dir/kafka-run-class.sh $EXTRA_ARGS org.apache.zookeeper.server.quorum.QuorumPeerMain "$@"
diff --git a/MSH-PIC/kafka/bin/zookeeper-server-stop.sh b/MSH-PIC/kafka/bin/zookeeper-server-stop.sh
new file mode 100644
index 0000000..f771064
--- /dev/null
+++ b/MSH-PIC/kafka/bin/zookeeper-server-stop.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+PIDS=$(ps ax | grep java | grep -i QuorumPeerMain | grep -v grep | awk '{print $1}')
+
+if [ -z "$PIDS" ]; then
+ echo "No zookeeper server to stop"
+ exit 1
+else
+ kill -s TERM $PIDS
+fi
+
diff --git a/MSH-PIC/kafka/bin/zookeeper-shell.sh b/MSH-PIC/kafka/bin/zookeeper-shell.sh
new file mode 100644
index 0000000..95007fa
--- /dev/null
+++ b/MSH-PIC/kafka/bin/zookeeper-shell.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ $# -lt 1 ];
+then
+ echo "USAGE: $0 zookeeper_host:port[/path] [args...]"
+ exit 1
+fi
+
+exec $(dirname $0)/kafka-run-class.sh org.apache.zookeeper.ZooKeeperMain -server "$@"
diff --git a/MSH-PIC/kafka/config/client-ssl.properties b/MSH-PIC/kafka/config/client-ssl.properties
new file mode 100644
index 0000000..050096f
--- /dev/null
+++ b/MSH-PIC/kafka/config/client-ssl.properties
@@ -0,0 +1,6 @@
+security.protocol=SSL
+ssl.truststore.location=/usr/ca/trust/client.truststore.jks
+ssl.truststore.password=ceiec2019
+ssl.keystore.location=/usr/ca/client/client.keystore.jks
+ssl.keystore.password=ceiec2019
+ssl.key.password=ceiec2019
diff --git a/MSH-PIC/kafka/config/connect-console-sink.properties b/MSH-PIC/kafka/config/connect-console-sink.properties
new file mode 100644
index 0000000..e240a8f
--- /dev/null
+++ b/MSH-PIC/kafka/config/connect-console-sink.properties
@@ -0,0 +1,19 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+name=local-console-sink
+connector.class=org.apache.kafka.connect.file.FileStreamSinkConnector
+tasks.max=1
+topics=connect-test \ No newline at end of file
diff --git a/MSH-PIC/kafka/config/connect-console-source.properties b/MSH-PIC/kafka/config/connect-console-source.properties
new file mode 100644
index 0000000..d0e2069
--- /dev/null
+++ b/MSH-PIC/kafka/config/connect-console-source.properties
@@ -0,0 +1,19 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+name=local-console-source
+connector.class=org.apache.kafka.connect.file.FileStreamSourceConnector
+tasks.max=1
+topic=connect-test \ No newline at end of file
diff --git a/MSH-PIC/kafka/config/connect-distributed.properties b/MSH-PIC/kafka/config/connect-distributed.properties
new file mode 100644
index 0000000..5f3f358
--- /dev/null
+++ b/MSH-PIC/kafka/config/connect-distributed.properties
@@ -0,0 +1,93 @@
+##
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+# This file contains some of the configurations for the Kafka Connect distributed worker. This file is intended
+# to be used with the examples, and some settings may differ from those used in a production system, especially
+# the `bootstrap.servers` and those specifying replication factors.
+
+# A list of host/port pairs to use for establishing the initial connection to the Kafka cluster.
+bootstrap.servers=localhost:9092
+
+# unique name for the cluster, used in forming the Connect cluster group. Note that this must not conflict with consumer group IDs
+group.id=connect-cluster
+
+# The converters specify the format of data in Kafka and how to translate it into Connect data. Every Connect user will
+# need to configure these based on the format they want their data in when loaded from or stored into Kafka
+key.converter=org.apache.kafka.connect.json.JsonConverter
+value.converter=org.apache.kafka.connect.json.JsonConverter
+# Converter-specific settings can be passed in by prefixing the Converter's setting with the converter we want to apply
+# it to
+key.converter.schemas.enable=true
+value.converter.schemas.enable=true
+
+# The internal converter used for offsets, config, and status data is configurable and must be specified, but most users will
+# always want to use the built-in default. Offset, config, and status data is never visible outside of Kafka Connect in this format.
+internal.key.converter=org.apache.kafka.connect.json.JsonConverter
+internal.value.converter=org.apache.kafka.connect.json.JsonConverter
+internal.key.converter.schemas.enable=false
+internal.value.converter.schemas.enable=false
+
+# Topic to use for storing offsets. This topic should have many partitions and be replicated and compacted.
+# Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create
+# the topic before starting Kafka Connect if a specific topic configuration is needed.
+# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value.
+# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able
+# to run this example on a single-broker cluster and so here we instead set the replication factor to 1.
+offset.storage.topic=connect-offsets
+offset.storage.replication.factor=1
+#offset.storage.partitions=25
+
+# Topic to use for storing connector and task configurations; note that this should be a single partition, highly replicated,
+# and compacted topic. Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create
+# the topic before starting Kafka Connect if a specific topic configuration is needed.
+# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value.
+# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able
+# to run this example on a single-broker cluster and so here we instead set the replication factor to 1.
+config.storage.topic=connect-configs
+config.storage.replication.factor=1
+
+# Topic to use for storing statuses. This topic can have multiple partitions and should be replicated and compacted.
+# Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create
+# the topic before starting Kafka Connect if a specific topic configuration is needed.
+# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value.
+# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able
+# to run this example on a single-broker cluster and so here we instead set the replication factor to 1.
+status.storage.topic=connect-status
+status.storage.replication.factor=1
+#status.storage.partitions=5
+
+# Flush much faster than normal, which is useful for testing/debugging
+offset.flush.interval.ms=10000
+
+# These are provided to inform the user about the presence of the REST host and port configs
+# Hostname & Port for the REST API to listen on. If this is set, it will bind to the interface used to listen to requests.
+#rest.host.name=
+#rest.port=8083
+
+# The Hostname & Port that will be given out to other workers to connect to i.e. URLs that are routable from other servers.
+#rest.advertised.host.name=
+#rest.advertised.port=
+
+# Set to a list of filesystem paths separated by commas (,) to enable class loading isolation for plugins
+# (connectors, converters, transformations). The list should consist of top level directories that include
+# any combination of:
+# a) directories immediately containing jars with plugins and their dependencies
+# b) uber-jars with plugins and their dependencies
+# c) directories immediately containing the package directory structure of classes of plugins and their dependencies
+# Examples:
+# plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors,
+#plugin.path=
diff --git a/MSH-PIC/kafka/config/connect-file-sink.properties b/MSH-PIC/kafka/config/connect-file-sink.properties
new file mode 100644
index 0000000..594ccc6
--- /dev/null
+++ b/MSH-PIC/kafka/config/connect-file-sink.properties
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+name=local-file-sink
+connector.class=FileStreamSink
+tasks.max=1
+file=test.sink.txt
+topics=connect-test \ No newline at end of file
diff --git a/MSH-PIC/kafka/config/connect-file-source.properties b/MSH-PIC/kafka/config/connect-file-source.properties
new file mode 100644
index 0000000..599cf4c
--- /dev/null
+++ b/MSH-PIC/kafka/config/connect-file-source.properties
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+name=local-file-source
+connector.class=FileStreamSource
+tasks.max=1
+file=test.txt
+topic=connect-test \ No newline at end of file
diff --git a/MSH-PIC/kafka/config/connect-log4j.properties b/MSH-PIC/kafka/config/connect-log4j.properties
new file mode 100644
index 0000000..808addb
--- /dev/null
+++ b/MSH-PIC/kafka/config/connect-log4j.properties
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+log4j.rootLogger=INFO, stdout
+
+
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n
+
+log4j.logger.org.apache.zookeeper=ERROR
+log4j.logger.org.I0Itec.zkclient=ERROR
+log4j.logger.org.reflections=ERROR
diff --git a/MSH-PIC/kafka/config/connect-standalone.properties b/MSH-PIC/kafka/config/connect-standalone.properties
new file mode 100644
index 0000000..0039796
--- /dev/null
+++ b/MSH-PIC/kafka/config/connect-standalone.properties
@@ -0,0 +1,48 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# These are defaults. This file just demonstrates how to override some settings.
+bootstrap.servers=localhost:9092
+
+# The converters specify the format of data in Kafka and how to translate it into Connect data. Every Connect user will
+# need to configure these based on the format they want their data in when loaded from or stored into Kafka
+key.converter=org.apache.kafka.connect.json.JsonConverter
+value.converter=org.apache.kafka.connect.json.JsonConverter
+# Converter-specific settings can be passed in by prefixing the Converter's setting with the converter we want to apply
+# it to
+key.converter.schemas.enable=true
+value.converter.schemas.enable=true
+
+# The internal converter used for offsets and config data is configurable and must be specified, but most users will
+# always want to use the built-in default. Offset and config data is never visible outside of Kafka Connect in this format.
+internal.key.converter=org.apache.kafka.connect.json.JsonConverter
+internal.value.converter=org.apache.kafka.connect.json.JsonConverter
+internal.key.converter.schemas.enable=false
+internal.value.converter.schemas.enable=false
+
+offset.storage.file.filename=/tmp/connect.offsets
+# Flush much faster than normal, which is useful for testing/debugging
+offset.flush.interval.ms=10000
+
+# Set to a list of filesystem paths separated by commas (,) to enable class loading isolation for plugins
+# (connectors, converters, transformations). The list should consist of top level directories that include
+# any combination of:
+# a) directories immediately containing jars with plugins and their dependencies
+# b) uber-jars with plugins and their dependencies
+# c) directories immediately containing the package directory structure of classes of plugins and their dependencies
+# Note: symlinks will be followed to discover dependencies or plugins.
+# Examples:
+# plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors,
+#plugin.path=
diff --git a/MSH-PIC/kafka/config/consumer.properties b/MSH-PIC/kafka/config/consumer.properties
new file mode 100644
index 0000000..01bb12e
--- /dev/null
+++ b/MSH-PIC/kafka/config/consumer.properties
@@ -0,0 +1,26 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# see org.apache.kafka.clients.consumer.ConsumerConfig for more details
+
+# list of brokers used for bootstrapping knowledge about the rest of the cluster
+# format: host1:port1,host2:port2 ...
+bootstrap.servers=localhost:9092
+
+# consumer group id
+group.id=test-consumer-group
+
+# What to do when there is no initial offset in Kafka or if the current
+# offset does not exist any more on the server: latest, earliest, none
+#auto.offset.reset=
diff --git a/MSH-PIC/kafka/config/kafka_client_jaas.conf b/MSH-PIC/kafka/config/kafka_client_jaas.conf
new file mode 100644
index 0000000..5f8cde7
--- /dev/null
+++ b/MSH-PIC/kafka/config/kafka_client_jaas.conf
@@ -0,0 +1,5 @@
+KafkaClient {
+ org.apache.kafka.common.security.plain.PlainLoginModule required
+ username="admin"
+ password="galaxy2019";
+};
diff --git a/MSH-PIC/kafka/config/kafka_server_jaas.conf b/MSH-PIC/kafka/config/kafka_server_jaas.conf
new file mode 100644
index 0000000..e71cd1b
--- /dev/null
+++ b/MSH-PIC/kafka/config/kafka_server_jaas.conf
@@ -0,0 +1,7 @@
+KafkaServer {
+ org.apache.kafka.common.security.plain.PlainLoginModule required
+ username="admin"
+ password="galaxy2019"
+ user_admin="galaxy2019"
+ user_gohangout="galaxy2019";
+};
diff --git a/MSH-PIC/kafka/config/keystore.jks b/MSH-PIC/kafka/config/keystore.jks
new file mode 100644
index 0000000..2e2328b
--- /dev/null
+++ b/MSH-PIC/kafka/config/keystore.jks
Binary files differ
diff --git a/MSH-PIC/kafka/config/log4j.properties b/MSH-PIC/kafka/config/log4j.properties
new file mode 100644
index 0000000..e635624
--- /dev/null
+++ b/MSH-PIC/kafka/config/log4j.properties
@@ -0,0 +1,92 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Unspecified loggers and loggers with additivity=true output to server.log and stdout
+# Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise
+log4j.rootLogger=WARN, stdout, kafkaAppender
+
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %m (%c)%n
+
+log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log
+log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.kafkaAppender.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %m (%c)%n
+
+log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log
+log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %m (%c)%n
+
+log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log
+log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.requestAppender.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %m (%c)%n
+
+log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log
+log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.cleanerAppender.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %m (%c)%n
+
+log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log
+log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.controllerAppender.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %m (%c)%n
+
+log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log
+log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.authorizerAppender.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %m (%c)%n
+
+# Change the two lines below to adjust ZK client logging
+log4j.logger.org.I0Itec.zkclient.ZkClient=WARN
+log4j.logger.org.apache.zookeeper=WARN
+
+# Change the two lines below to adjust the general broker logging level (output to server.log and stdout)
+log4j.logger.kafka=WARN
+log4j.logger.org.apache.kafka=WARN
+
+# Change to DEBUG or TRACE to enable request logging
+log4j.logger.kafka.request.logger=WARN, requestAppender
+log4j.additivity.kafka.request.logger=false
+
+# Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output
+# related to the handling of requests
+#log4j.logger.kafka.network.Processor=TRACE, requestAppender
+#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
+#log4j.additivity.kafka.server.KafkaApis=false
+log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender
+log4j.additivity.kafka.network.RequestChannel$=false
+
+log4j.logger.kafka.controller=TRACE, controllerAppender
+log4j.additivity.kafka.controller=false
+
+log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender
+log4j.additivity.kafka.log.LogCleaner=false
+
+log4j.logger.state.change.logger=TRACE, stateChangeAppender
+log4j.additivity.state.change.logger=false
+
+# Access denials are logged at INFO level, change to DEBUG to also log allowed accesses
+log4j.logger.kafka.authorizer.logger=INFO, authorizerAppender
+log4j.additivity.kafka.authorizer.logger=false
+
diff --git a/MSH-PIC/kafka/config/log4j.properties_bak b/MSH-PIC/kafka/config/log4j.properties_bak
new file mode 100644
index 0000000..d278a67
--- /dev/null
+++ b/MSH-PIC/kafka/config/log4j.properties_bak
@@ -0,0 +1,92 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Unspecified loggers and loggers with additivity=true output to server.log and stdout
+# Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise
+log4j.rootLogger=WARN, stdout, kafkaAppender
+
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log
+log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log
+log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log
+log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log
+log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log
+log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd
+log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log
+log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+# Change the two lines below to adjust ZK client logging
+log4j.logger.org.I0Itec.zkclient.ZkClient=WARN
+log4j.logger.org.apache.zookeeper=WARN
+
+# Change the two lines below to adjust the general broker logging level (output to server.log and stdout)
+log4j.logger.kafka=WARN
+log4j.logger.org.apache.kafka=WARN
+
+# Change to DEBUG or TRACE to enable request logging
+log4j.logger.kafka.request.logger=WARN, requestAppender
+log4j.additivity.kafka.request.logger=false
+
+# Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output
+# related to the handling of requests
+#log4j.logger.kafka.network.Processor=TRACE, requestAppender
+#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
+#log4j.additivity.kafka.server.KafkaApis=false
+log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender
+log4j.additivity.kafka.network.RequestChannel$=false
+
+log4j.logger.kafka.controller=TRACE, controllerAppender
+log4j.additivity.kafka.controller=false
+
+log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender
+log4j.additivity.kafka.log.LogCleaner=false
+
+log4j.logger.state.change.logger=TRACE, stateChangeAppender
+log4j.additivity.state.change.logger=false
+
+# Access denials are logged at INFO level, change to DEBUG to also log allowed accesses
+log4j.logger.kafka.authorizer.logger=INFO, authorizerAppender
+log4j.additivity.kafka.authorizer.logger=false
+
diff --git a/MSH-PIC/kafka/config/producer.properties b/MSH-PIC/kafka/config/producer.properties
new file mode 100644
index 0000000..750b95e
--- /dev/null
+++ b/MSH-PIC/kafka/config/producer.properties
@@ -0,0 +1,45 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# see org.apache.kafka.clients.producer.ProducerConfig for more details
+
+############################# Producer Basics #############################
+
+# list of brokers used for bootstrapping knowledge about the rest of the cluster
+# format: host1:port1,host2:port2 ...
+bootstrap.servers=localhost:9092
+
+# specify the compression codec for all data generated: none, gzip, snappy, lz4
+compression.type=none
+
+# name of the partitioner class for partitioning events; default partition spreads data randomly
+#partitioner.class=
+
+# the maximum amount of time the client will wait for the response of a request
+#request.timeout.ms=
+
+# how long `KafkaProducer.send` and `KafkaProducer.partitionsFor` will block for
+#max.block.ms=
+
+# the producer will wait for up to the given delay to allow other records to be sent so that the sends can be batched together
+#linger.ms=
+
+# the maximum size of a request in bytes
+#max.request.size=
+
+# the default batch size in bytes when batching multiple records sent to a partition
+#batch.size=
+
+# the total bytes of memory the producer can use to buffer records waiting to be sent to the server
+#buffer.memory=
diff --git a/MSH-PIC/kafka/config/sasl-config.properties b/MSH-PIC/kafka/config/sasl-config.properties
new file mode 100644
index 0000000..97af693
--- /dev/null
+++ b/MSH-PIC/kafka/config/sasl-config.properties
@@ -0,0 +1,7 @@
+security.protocol=SASL_PLAINTEXT
+sasl.mechanism=PLAIN
+#sasl.user: gohangout
+#sasl.password: ceiec2019
+#ssl.truststore.location=/usr/ca/trust/client.truststore.jks
+#ssl.truststore.password=ceiec2019
+
diff --git a/MSH-PIC/kafka/config/server.properties b/MSH-PIC/kafka/config/server.properties
new file mode 100644
index 0000000..512db77
--- /dev/null
+++ b/MSH-PIC/kafka/config/server.properties
@@ -0,0 +1,171 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# see kafka.server.KafkaConfig for additional details and defaults
+
+############################# Server Basics #############################
+listeners=SASL_PLAINTEXT://192.168.20.193:9094,PLAINTEXT://192.168.20.193:9092,SSL://192.168.20.193:9095
+advertised.listeners=SASL_PLAINTEXT://192.168.20.193:9094,PLAINTEXT://192.168.20.193:9092,SSL://192.168.20.193:9095
+ssl.keystore.location=/home/tsg/olap/kafka_2.11-1.0.0/config/keystore.jks
+ssl.keystore.password=galaxy2019
+ssl.key.password=galaxy2019
+ssl.truststore.location=/home/tsg/olap/kafka_2.11-1.0.0/config/truststore.jks
+ssl.truststore.password=galaxy2019
+#ssl.client.auth=required
+ssl.enabled.protocols=TLSv1.2,TLSv1.1,TLSv1
+ssl.keystore.type=JKS
+ssl.truststore.type=JKS
+
+# kafka2.0.x开始,将ssl.endpoint.identification.algorithm设置为了HTTPS,即:需要验证主机名
+# 如果不需要验证主机名,那么可以这么设置 ssl.endpoint.identification.algorithm=即可
+ssl.endpoint.identification.algorithm=
+
+# 设置内部访问也用SSL,默认值为security.inter.broker.protocol=PLAINTEXT
+security.inter.broker.protocol=SASL_PLAINTEXT
+
+#sasl配置
+sasl.mechanism.inter.broker.protocol=PLAIN
+sasl.enabled.mechanisms=PLAIN
+
+# The id of the broker. This must be set to a unique integer for each broker.
+broker.id=1
+
+############################# Socket Server Settings #############################
+#Is it deleted directlytopic
+delete.topic.enable=true
+
+#Are you allowed to create automatically topic
+auto.create.topics.enable=false
+
+#Enable log periodic deletion strategy
+log.cleanup.policy=delete
+
+# The number of threads that the server uses for receiving requests from the network and sending responses to the network
+num.network.threads=3
+
+# The number of threads that the server uses for processing requests, which may include disk I/O
+num.io.threads=8
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=10485760
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=10485760
+
+# The maximum size of a request that the socket server will accept (protection against OOM)
+#socket.request.max.bytes=2147483600
+socket.request.max.bytes=104857600
+
+#The maximum size of a message body, unit byte.
+message.max.bytes=10485760
+
+#replicas Maximum size of data obtained eachtime
+replica.fetch.max.bytes=20485760
+
+############################# Log Basics #############################
+
+# A comma seperated list of directories under which to store log files
+log.dirs=/home/tsg/olap/kafka_2.11-1.0.0/kafka-logs
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions=1
+
+# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
+# This value is recommended to be increased for installations with data dirs located in RAID array.
+num.recovery.threads.per.data.dir=1
+
+############################# Internal Topic Settings #############################
+# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
+# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
+offsets.topic.replication.factor=3
+
+#事务主题的复制因子(设置更高以确保可用性)。 内部主题创建将失败,直到群集大小满足此复制因素要求
+transaction.state.log.replication.factor=3
+
+#覆盖事务主题的min.insync.replicas配置,在min.insync.replicas中,replicas数量为1,该参数将默认replicas定义为2
+transaction.state.log.min.isr=2
+
+#是否允许非ISR的Replica参与竞选Leader。
+unclean.leader.election.enable=true
+
+#如果某个Partition的Leader挂掉,则当原来挂掉的Broker恢复正常以后,可以夺回Leader
+auto.leader.rebalance.enable=true
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data to disk.
+# There are a few important trade-offs here:
+# 1. Durability: Unflushed data may be lost if you are not using replication.
+# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
+# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
+# The settings below allow one to configure the flush policy to flush data after a period of time or
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+#log.flush.interval.messages=10000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+#log.flush.interval.ms=1000
+
+############################# Log Retention Policy #############################
+
+# The following configurations control the disposal of log segments. The policy can
+# be set to delete segments after a period of time, or after a given size has accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion due to age
+log.retention.hours=168
+
+# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
+# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
+log.retention.bytes=10737418240
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+log.segment.bytes=1073741824
+
+# The interval at which log segments are checked to see if they can be deleted according
+# to the retention policies
+log.retention.check.interval.ms=300000
+
+############################# Zookeeper #############################
+
+# Zookeeper connection string (see zookeeper docs for details).
+# This is a comma separated host:port pairs, each corresponding to a zk
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
+# You can also append an optional chroot string to the urls to specify the
+# root directory for all kafka znodes.
+zookeeper.connect=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181/kafka
+
+# Timeout in ms for connecting to zookeeper
+zookeeper.connection.timeout.ms=60000
+
+#zookeeper session超时时间
+zookeeper.session.timeout.ms=60000
+
+#Set zookeeper client to use secure ACLs
+zookeeper.set.acl=false
+
+############################# Group Coordinator Settings #############################
+
+# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
+# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
+# The default value for this is 3 seconds.
+# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
+# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
+group.initial.rebalance.delay.ms=0
diff --git a/MSH-PIC/kafka/config/server.properties.bak b/MSH-PIC/kafka/config/server.properties.bak
new file mode 100644
index 0000000..250ce16
--- /dev/null
+++ b/MSH-PIC/kafka/config/server.properties.bak
@@ -0,0 +1,136 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# see kafka.server.KafkaConfig for additional details and defaults
+
+############################# Server Basics #############################
+
+# The id of the broker. This must be set to a unique integer for each broker.
+broker.id=0
+
+############################# Socket Server Settings #############################
+
+# The address the socket server listens on. It will get the value returned from
+# java.net.InetAddress.getCanonicalHostName() if not configured.
+# FORMAT:
+# listeners = listener_name://host_name:port
+# EXAMPLE:
+# listeners = PLAINTEXT://your.host.name:9092
+#listeners=PLAINTEXT://:9092
+
+# Hostname and port the broker will advertise to producers and consumers. If not set,
+# it uses the value for "listeners" if configured. Otherwise, it will use the value
+# returned from java.net.InetAddress.getCanonicalHostName().
+#advertised.listeners=PLAINTEXT://your.host.name:9092
+
+# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
+#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
+
+# The number of threads that the server uses for receiving requests from the network and sending responses to the network
+num.network.threads=3
+
+# The number of threads that the server uses for processing requests, which may include disk I/O
+num.io.threads=8
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=102400
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=102400
+
+# The maximum size of a request that the socket server will accept (protection against OOM)
+socket.request.max.bytes=104857600
+
+
+############################# Log Basics #############################
+
+# A comma seperated list of directories under which to store log files
+log.dirs=/tmp/kafka-logs
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions=1
+
+# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
+# This value is recommended to be increased for installations with data dirs located in RAID array.
+num.recovery.threads.per.data.dir=1
+
+############################# Internal Topic Settings #############################
+# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
+# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
+offsets.topic.replication.factor=1
+transaction.state.log.replication.factor=1
+transaction.state.log.min.isr=1
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data to disk.
+# There are a few important trade-offs here:
+# 1. Durability: Unflushed data may be lost if you are not using replication.
+# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
+# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
+# The settings below allow one to configure the flush policy to flush data after a period of time or
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+log.flush.interval.messages=20000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+log.flush.interval.ms=2000
+
+############################# Log Retention Policy #############################
+
+# The following configurations control the disposal of log segments. The policy can
+# be set to delete segments after a period of time, or after a given size has accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion due to age
+log.retention.hours=168
+
+# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
+# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
+#log.retention.bytes=1073741824
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+log.segment.bytes=1073741824
+
+# The interval at which log segments are checked to see if they can be deleted according
+# to the retention policies
+log.retention.check.interval.ms=300000
+
+############################# Zookeeper #############################
+
+# Zookeeper connection string (see zookeeper docs for details).
+# This is a comma separated host:port pairs, each corresponding to a zk
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
+# You can also append an optional chroot string to the urls to specify the
+# root directory for all kafka znodes.
+zookeeper.connect=localhost:2181
+
+# Timeout in ms for connecting to zookeeper
+zookeeper.connection.timeout.ms=6000
+
+
+############################# Group Coordinator Settings #############################
+
+# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
+# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
+# The default value for this is 3 seconds.
+# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
+# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
+group.initial.rebalance.delay.ms=0
diff --git a/MSH-PIC/kafka/config/tools-log4j.properties b/MSH-PIC/kafka/config/tools-log4j.properties
new file mode 100644
index 0000000..b19e343
--- /dev/null
+++ b/MSH-PIC/kafka/config/tools-log4j.properties
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+log4j.rootLogger=WARN, stderr
+
+log4j.appender.stderr=org.apache.log4j.ConsoleAppender
+log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
+log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n
+log4j.appender.stderr.Target=System.err
diff --git a/MSH-PIC/kafka/config/truststore.jks b/MSH-PIC/kafka/config/truststore.jks
new file mode 100644
index 0000000..b435e09
--- /dev/null
+++ b/MSH-PIC/kafka/config/truststore.jks
Binary files differ
diff --git a/MSH-PIC/kafka/config/zookeeper.properties b/MSH-PIC/kafka/config/zookeeper.properties
new file mode 100644
index 0000000..74cbf90
--- /dev/null
+++ b/MSH-PIC/kafka/config/zookeeper.properties
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# the directory where the snapshot is stored.
+dataDir=/tmp/zookeeper
+# the port at which the clients will connect
+clientPort=2181
+# disable the per-ip limit on the number of connections since this is a non-production config
+maxClientCnxns=0
diff --git a/MSH-PIC/packet_dump/docker-compose.yml b/MSH-PIC/packet_dump/docker-compose.yml
new file mode 100644
index 0000000..a0e7088
--- /dev/null
+++ b/MSH-PIC/packet_dump/docker-compose.yml
@@ -0,0 +1,54 @@
+version: "3"
+services:
+ packet_dump_rtp:
+ image: packet_dump:v3.0.7
+ container_name: rtp_packet_dump
+ working_dir: /data/tsg/olap/galaxy/packet_dump
+ ports:
+ - "6900:6900"
+ privileged: true
+ volumes:
+ - /home/tsg/olap/galaxy/volumes/packet_dump/rtp/config:/data/tsg/olap/galaxy/packet_dump/config
+ - /home/tsg/olap/galaxy/volumes/packet_dump/rtp/logs:/data/tsg/olap/galaxy/packet_dump/logs
+ - /home/tsg/olap/galaxy/volumes/packet_dump/rtp/status:/data/tsg/olap/galaxy/packet_dump/status
+ - /etc/localtime:/etc/localtime:ro
+ restart: always
+ command:
+ - /bin/sh
+ - -c
+ - |
+ cd /data/tsg/olap/galaxy/packet_dump/
+ touch status
+ mkdir -p logs
+ ./packet_dump
+ networks:
+ olap:
+ ipv4_address: 172.20.88.2
+
+ packet_dump_firewall:
+ image: packet_dump:v3.0.7
+ container_name: firewall_packet_dump
+ working_dir: /data/tsg/olap/galaxy/packet_dump
+ ports:
+ - "6910:6910"
+ privileged: true
+ volumes:
+ - /home/tsg/olap/galaxy/volumes/packet_dump/firewall/config:/data/tsg/olap/galaxy/packet_dump/config
+ - /home/tsg/olap/galaxy/volumes/packet_dump/firewall/logs:/data/tsg/olap/galaxy/packet_dump/logs
+ - /home/tsg/olap/galaxy/volumes/packet_dump/firewall/status:/data/tsg/olap/galaxy/packet_dump/status
+ - /etc/localtime:/etc/localtime:ro
+ restart: always
+ command:
+ - /bin/sh
+ - -c
+ - |
+ cd /data/tsg/olap/galaxy/packet_dump/
+ touch status
+ mkdir -p logs
+ ./packet_dump
+ networks:
+ olap:
+ ipv4_address: 172.20.88.3
+networks:
+ olap:
+ external: true
diff --git a/MSH-PIC/packet_dump/firewall/config/packet_dump.yml b/MSH-PIC/packet_dump/firewall/config/packet_dump.yml
new file mode 100644
index 0000000..35379e3
--- /dev/null
+++ b/MSH-PIC/packet_dump/firewall/config/packet_dump.yml
@@ -0,0 +1,34 @@
+project:
+ - project_name: firewall
+ storage_file_type: specify # [pcap, pcapng, suffix]
+ consumer_topic: SECURITY-PACKET-CAPTURE-RECORD
+ cache_queue_size: 100000
+ storage_mode: remote #remote: aws or local
+ aws_bucket: firewall_hos_bucket
+ append_mode: append #append, appendV2
+ append_max_num: 100000
+ upload_goroutine_num: 15
+ specify_filepath_key: raw_packet_url
+ write_back_filepath: packet_url
+ origin_packet_time_ms_key: raw_packet_time_ms
+ origin_packet_key: raw_packet
+ filename_prefix: troubleshooting
+ storage_directory: /var/www/html/firewall/
+ file_timeout: 3600
+ specify_filepath_key: raw_packet_url
+kafka:
+ broker: [192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094]
+ sasl_user: admin
+ sasl_password: galaxy2019
+ offset: -1 # Newest: -1 or Oldest: -2
+aws:
+ endpoint: http://192.168.20.251:9098/hos/
+ token: f5c5186ba4874182b33b9b2b2b6e3f77
+ access_key: default
+ access_key_id: default
+log:
+ path: /data/tsg/olap/galaxy/packet_dump/logs/log
+ level: info # painc, fatal, warnning, info, debug, trace
+ RotationSize: 1000000000
+ RotationCount: 14
+ RotationTime: 1
diff --git a/MSH-PIC/packet_dump/firewall/logs/log b/MSH-PIC/packet_dump/firewall/logs/log
new file mode 100644
index 0000000..bde3f9b
--- /dev/null
+++ b/MSH-PIC/packet_dump/firewall/logs/log
@@ -0,0 +1,8 @@
+time="2023-07-08 15:32:02" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-08 15:32:02" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> firewall specify SECURITY-PACKET-CAPTURE-RECORD 100000 remote firewall_hos_bucket append 100000 15 [] [] [] raw_packet_url troubleshooting /var/www/html/firewall/ [] raw_packet_time_ms raw_packet 3600}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-08 15:32:02" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-08 15:32:02" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-08 15:32:02" level=info msg=partitionList Topic: =SECURITY-PACKET-CAPTURE-RECORD partitionLists ="[0 1 2]"
+time="2023-07-08 15:32:02" level=info Offset: =-1 Topic: =SECURITY-PACKET-CAPTURE-RECORD partition: =0
+time="2023-07-08 15:32:02" level=info Offset: =-1 Topic: =SECURITY-PACKET-CAPTURE-RECORD partition: =1
+time="2023-07-08 15:32:02" level=info Offset: =-1 Topic: =SECURITY-PACKET-CAPTURE-RECORD partition: =2
diff --git a/MSH-PIC/packet_dump/firewall/logs/log.202307070000 b/MSH-PIC/packet_dump/firewall/logs/log.202307070000
new file mode 100644
index 0000000..6ad2479
--- /dev/null
+++ b/MSH-PIC/packet_dump/firewall/logs/log.202307070000
@@ -0,0 +1,63 @@
+time="2023-07-07 18:12:56" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:12:56" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> firewall specify SECURITY-PACKET-CAPTURE-RECORD 100000 remote firewall_hos_bucket append 100000 15 [] [] [] raw_packet_url troubleshooting /var/www/html/firewall/ [] raw_packet_time_ms raw_packet 3600}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:12:56" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:12:56" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:12:56" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =SECURITY-PACKET-CAPTURE-RECORD
+time="2023-07-07 18:12:58" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:12:58" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> firewall specify SECURITY-PACKET-CAPTURE-RECORD 100000 remote firewall_hos_bucket append 100000 15 [] [] [] raw_packet_url troubleshooting /var/www/html/firewall/ [] raw_packet_time_ms raw_packet 3600}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:12:58" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:12:58" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:12:58" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =SECURITY-PACKET-CAPTURE-RECORD
+time="2023-07-07 18:13:00" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:13:00" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> firewall specify SECURITY-PACKET-CAPTURE-RECORD 100000 remote firewall_hos_bucket append 100000 15 [] [] [] raw_packet_url troubleshooting /var/www/html/firewall/ [] raw_packet_time_ms raw_packet 3600}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:13:00" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:00" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:00" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =SECURITY-PACKET-CAPTURE-RECORD
+time="2023-07-07 18:13:02" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:13:02" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> firewall specify SECURITY-PACKET-CAPTURE-RECORD 100000 remote firewall_hos_bucket append 100000 15 [] [] [] raw_packet_url troubleshooting /var/www/html/firewall/ [] raw_packet_time_ms raw_packet 3600}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:13:02" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:02" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:02" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =SECURITY-PACKET-CAPTURE-RECORD
+time="2023-07-07 18:13:04" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:13:04" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> firewall specify SECURITY-PACKET-CAPTURE-RECORD 100000 remote firewall_hos_bucket append 100000 15 [] [] [] raw_packet_url troubleshooting /var/www/html/firewall/ [] raw_packet_time_ms raw_packet 3600}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:13:04" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:04" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:05" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =SECURITY-PACKET-CAPTURE-RECORD
+time="2023-07-07 18:13:07" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:13:07" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> firewall specify SECURITY-PACKET-CAPTURE-RECORD 100000 remote firewall_hos_bucket append 100000 15 [] [] [] raw_packet_url troubleshooting /var/www/html/firewall/ [] raw_packet_time_ms raw_packet 3600}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:13:07" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:07" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:08" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =SECURITY-PACKET-CAPTURE-RECORD
+time="2023-07-07 18:13:12" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:13:12" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> firewall specify SECURITY-PACKET-CAPTURE-RECORD 100000 remote firewall_hos_bucket append 100000 15 [] [] [] raw_packet_url troubleshooting /var/www/html/firewall/ [] raw_packet_time_ms raw_packet 3600}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:13:12" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:12" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:13" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =SECURITY-PACKET-CAPTURE-RECORD
+time="2023-07-07 18:13:20" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:13:20" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> firewall specify SECURITY-PACKET-CAPTURE-RECORD 100000 remote firewall_hos_bucket append 100000 15 [] [] [] raw_packet_url troubleshooting /var/www/html/firewall/ [] raw_packet_time_ms raw_packet 3600}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:13:20" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:20" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:21" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =SECURITY-PACKET-CAPTURE-RECORD
+time="2023-07-07 18:13:34" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:13:34" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> firewall specify SECURITY-PACKET-CAPTURE-RECORD 100000 remote firewall_hos_bucket append 100000 15 [] [] [] raw_packet_url troubleshooting /var/www/html/firewall/ [] raw_packet_time_ms raw_packet 3600}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:13:34" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:34" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:35" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =SECURITY-PACKET-CAPTURE-RECORD
+time="2023-07-07 18:14:02" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:14:02" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> firewall specify SECURITY-PACKET-CAPTURE-RECORD 100000 remote firewall_hos_bucket append 100000 15 [] [] [] raw_packet_url troubleshooting /var/www/html/firewall/ [] raw_packet_time_ms raw_packet 3600}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:14:02" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:14:02" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:14:02" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =SECURITY-PACKET-CAPTURE-RECORD
+time="2023-07-07 18:14:54" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:14:54" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> firewall specify SECURITY-PACKET-CAPTURE-RECORD 100000 remote firewall_hos_bucket append 100000 15 [] [] [] raw_packet_url troubleshooting /var/www/html/firewall/ [] raw_packet_time_ms raw_packet 3600}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:14:54" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:14:54" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:14:55" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =SECURITY-PACKET-CAPTURE-RECORD
+time="2023-07-07 18:15:56" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:15:56" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> firewall specify SECURITY-PACKET-CAPTURE-RECORD 100000 remote firewall_hos_bucket append 100000 15 [] [] [] raw_packet_url troubleshooting /var/www/html/firewall/ [] raw_packet_time_ms raw_packet 3600}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:15:56" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:15:56" level=info msg=partitionList Topic: =SECURITY-PACKET-CAPTURE-RECORD partitionLists ="[0 1 2]"
+time="2023-07-07 18:15:56" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:15:56" level=info Offset: =-1 Topic: =SECURITY-PACKET-CAPTURE-RECORD partition: =0
+time="2023-07-07 18:15:56" level=info Offset: =-1 Topic: =SECURITY-PACKET-CAPTURE-RECORD partition: =1
+time="2023-07-07 18:15:56" level=info Offset: =-1 Topic: =SECURITY-PACKET-CAPTURE-RECORD partition: =2
diff --git a/MSH-PIC/packet_dump/firewall/logs/log.202307080000 b/MSH-PIC/packet_dump/firewall/logs/log.202307080000
new file mode 100644
index 0000000..bde3f9b
--- /dev/null
+++ b/MSH-PIC/packet_dump/firewall/logs/log.202307080000
@@ -0,0 +1,8 @@
+time="2023-07-08 15:32:02" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-08 15:32:02" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> firewall specify SECURITY-PACKET-CAPTURE-RECORD 100000 remote firewall_hos_bucket append 100000 15 [] [] [] raw_packet_url troubleshooting /var/www/html/firewall/ [] raw_packet_time_ms raw_packet 3600}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-08 15:32:02" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-08 15:32:02" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-08 15:32:02" level=info msg=partitionList Topic: =SECURITY-PACKET-CAPTURE-RECORD partitionLists ="[0 1 2]"
+time="2023-07-08 15:32:02" level=info Offset: =-1 Topic: =SECURITY-PACKET-CAPTURE-RECORD partition: =0
+time="2023-07-08 15:32:02" level=info Offset: =-1 Topic: =SECURITY-PACKET-CAPTURE-RECORD partition: =1
+time="2023-07-08 15:32:02" level=info Offset: =-1 Topic: =SECURITY-PACKET-CAPTURE-RECORD partition: =2
diff --git a/MSH-PIC/packet_dump/firewall/status b/MSH-PIC/packet_dump/firewall/status
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/MSH-PIC/packet_dump/firewall/status
diff --git a/MSH-PIC/packet_dump/rtp/config/packet_dump.yml b/MSH-PIC/packet_dump/rtp/config/packet_dump.yml
new file mode 100644
index 0000000..258f574
--- /dev/null
+++ b/MSH-PIC/packet_dump/rtp/config/packet_dump.yml
@@ -0,0 +1,37 @@
+project:
+ - project_name: rtp
+ storage_file_type: pcap # [pcap, pcapng]
+ consumer_topic: INTERNAL-RTP-RECORD
+ producer_topic: VOIP-RECORD
+ cache_queue_size: 100000
+ storage_mode: remote #remote: aws or local
+ aws_bucket: rtp_hos_bucket
+ append_mode: append #append, appendV2
+ append_max_num: 100000
+ upload_goroutine_num: 15
+ check_file_string_key: [common_server_ip, common_client_ip]
+ check_file_intval_key: [common_server_port, common_client_port, common_start_time]
+ write_back_status_key: [raw_log_status]
+ delete_json_key: [raw_packet, raw_packet_len]
+ write_back_filepath_key: rtp_pcap_path
+ origin_packet_time_ms_key: raw_packet_time_ms
+ origin_packet_key: raw_packet
+ filename_prefix: rtp
+ storage_directory: /var/www/html/rtp/
+ file_timeout: 120
+kafka:
+ broker: [192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094]
+ sasl_user: admin
+ sasl_password: galaxy2019
+ offset: -1 # Newest: -1 or Oldest: -2
+aws:
+ endpoint: http://192.168.20.251:9098/hos/
+ token: f5c5186ba4874182b33b9b2b2b6e3f77
+ access_key: default
+ access_key_id: default
+log:
+ path: /data/tsg/olap/galaxy/packet_dump/logs/log
+ level: info # painc, fatal, warnning, info, debug, trace
+ RotationSize: 1000000000
+ RotationCount: 14
+ RotationTime: 1
diff --git a/MSH-PIC/packet_dump/rtp/logs/log b/MSH-PIC/packet_dump/rtp/logs/log
new file mode 100644
index 0000000..e9661bd
--- /dev/null
+++ b/MSH-PIC/packet_dump/rtp/logs/log
@@ -0,0 +1,8 @@
+time="2023-07-08 15:32:02" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-08 15:32:02" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> rtp pcap INTERNAL-RTP-RECORD VOIP-RECORD 100000 remote rtp_hos_bucket append 100000 15 [common_server_ip common_client_ip] [common_server_port common_client_port common_start_time] [raw_log_status] rtp_pcap_path rtp /var/www/html/rtp/ [raw_packet raw_packet_len] raw_packet_time_ms raw_packet 120}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-08 15:32:02" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-08 15:32:02" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-08 15:32:02" level=info msg=partitionList Topic: =INTERNAL-RTP-RECORD partitionLists ="[0 1 2]"
+time="2023-07-08 15:32:02" level=info Offset: =-1 Topic: =INTERNAL-RTP-RECORD partition: =0
+time="2023-07-08 15:32:02" level=info Offset: =-1 Topic: =INTERNAL-RTP-RECORD partition: =1
+time="2023-07-08 15:32:02" level=info Offset: =-1 Topic: =INTERNAL-RTP-RECORD partition: =2
diff --git a/MSH-PIC/packet_dump/rtp/logs/log.202307070000 b/MSH-PIC/packet_dump/rtp/logs/log.202307070000
new file mode 100644
index 0000000..c526fa1
--- /dev/null
+++ b/MSH-PIC/packet_dump/rtp/logs/log.202307070000
@@ -0,0 +1,63 @@
+time="2023-07-07 18:12:56" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:12:56" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> rtp pcap INTERNAL-RTP-RECORD VOIP-RECORD 100000 remote rtp_hos_bucket append 100000 15 [common_server_ip common_client_ip] [common_server_port common_client_port common_start_time] [raw_log_status] rtp_pcap_path rtp /var/www/html/rtp/ [raw_packet raw_packet_len] raw_packet_time_ms raw_packet 120}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:12:56" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:12:56" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:12:56" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =INTERNAL-RTP-RECORD
+time="2023-07-07 18:12:58" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:12:58" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> rtp pcap INTERNAL-RTP-RECORD VOIP-RECORD 100000 remote rtp_hos_bucket append 100000 15 [common_server_ip common_client_ip] [common_server_port common_client_port common_start_time] [raw_log_status] rtp_pcap_path rtp /var/www/html/rtp/ [raw_packet raw_packet_len] raw_packet_time_ms raw_packet 120}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:12:58" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:12:58" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:12:58" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =INTERNAL-RTP-RECORD
+time="2023-07-07 18:13:00" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:13:00" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> rtp pcap INTERNAL-RTP-RECORD VOIP-RECORD 100000 remote rtp_hos_bucket append 100000 15 [common_server_ip common_client_ip] [common_server_port common_client_port common_start_time] [raw_log_status] rtp_pcap_path rtp /var/www/html/rtp/ [raw_packet raw_packet_len] raw_packet_time_ms raw_packet 120}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:13:00" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:00" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:00" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =INTERNAL-RTP-RECORD
+time="2023-07-07 18:13:02" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:13:02" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> rtp pcap INTERNAL-RTP-RECORD VOIP-RECORD 100000 remote rtp_hos_bucket append 100000 15 [common_server_ip common_client_ip] [common_server_port common_client_port common_start_time] [raw_log_status] rtp_pcap_path rtp /var/www/html/rtp/ [raw_packet raw_packet_len] raw_packet_time_ms raw_packet 120}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:13:02" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:02" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:02" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =INTERNAL-RTP-RECORD
+time="2023-07-07 18:13:04" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:13:04" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> rtp pcap INTERNAL-RTP-RECORD VOIP-RECORD 100000 remote rtp_hos_bucket append 100000 15 [common_server_ip common_client_ip] [common_server_port common_client_port common_start_time] [raw_log_status] rtp_pcap_path rtp /var/www/html/rtp/ [raw_packet raw_packet_len] raw_packet_time_ms raw_packet 120}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:13:04" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:04" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:05" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =INTERNAL-RTP-RECORD
+time="2023-07-07 18:13:07" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:13:07" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> rtp pcap INTERNAL-RTP-RECORD VOIP-RECORD 100000 remote rtp_hos_bucket append 100000 15 [common_server_ip common_client_ip] [common_server_port common_client_port common_start_time] [raw_log_status] rtp_pcap_path rtp /var/www/html/rtp/ [raw_packet raw_packet_len] raw_packet_time_ms raw_packet 120}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:13:07" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:07" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:08" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =INTERNAL-RTP-RECORD
+time="2023-07-07 18:13:12" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:13:12" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> rtp pcap INTERNAL-RTP-RECORD VOIP-RECORD 100000 remote rtp_hos_bucket append 100000 15 [common_server_ip common_client_ip] [common_server_port common_client_port common_start_time] [raw_log_status] rtp_pcap_path rtp /var/www/html/rtp/ [raw_packet raw_packet_len] raw_packet_time_ms raw_packet 120}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:13:12" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:12" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:13" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =INTERNAL-RTP-RECORD
+time="2023-07-07 18:13:20" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:13:20" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> rtp pcap INTERNAL-RTP-RECORD VOIP-RECORD 100000 remote rtp_hos_bucket append 100000 15 [common_server_ip common_client_ip] [common_server_port common_client_port common_start_time] [raw_log_status] rtp_pcap_path rtp /var/www/html/rtp/ [raw_packet raw_packet_len] raw_packet_time_ms raw_packet 120}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:13:20" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:20" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:21" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =INTERNAL-RTP-RECORD
+time="2023-07-07 18:13:34" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:13:34" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> rtp pcap INTERNAL-RTP-RECORD VOIP-RECORD 100000 remote rtp_hos_bucket append 100000 15 [common_server_ip common_client_ip] [common_server_port common_client_port common_start_time] [raw_log_status] rtp_pcap_path rtp /var/www/html/rtp/ [raw_packet raw_packet_len] raw_packet_time_ms raw_packet 120}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:13:34" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:34" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:13:35" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =INTERNAL-RTP-RECORD
+time="2023-07-07 18:14:01" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:14:01" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> rtp pcap INTERNAL-RTP-RECORD VOIP-RECORD 100000 remote rtp_hos_bucket append 100000 15 [common_server_ip common_client_ip] [common_server_port common_client_port common_start_time] [raw_log_status] rtp_pcap_path rtp /var/www/html/rtp/ [raw_packet raw_packet_len] raw_packet_time_ms raw_packet 120}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:14:01" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:14:01" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:14:02" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =INTERNAL-RTP-RECORD
+time="2023-07-07 18:14:54" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:14:54" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> rtp pcap INTERNAL-RTP-RECORD VOIP-RECORD 100000 remote rtp_hos_bucket append 100000 15 [common_server_ip common_client_ip] [common_server_port common_client_port common_start_time] [raw_log_status] rtp_pcap_path rtp /var/www/html/rtp/ [raw_packet raw_packet_len] raw_packet_time_ms raw_packet 120}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:14:54" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:14:54" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:14:55" level=panic msg="kafka server: Request was for a topic or partition that does not exist on this broker." Topic: =INTERNAL-RTP-RECORD
+time="2023-07-07 18:15:56" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-07 18:15:56" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> rtp pcap INTERNAL-RTP-RECORD VOIP-RECORD 100000 remote rtp_hos_bucket append 100000 15 [common_server_ip common_client_ip] [common_server_port common_client_port common_start_time] [raw_log_status] rtp_pcap_path rtp /var/www/html/rtp/ [raw_packet raw_packet_len] raw_packet_time_ms raw_packet 120}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-07 18:15:56" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:15:56" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-07 18:15:56" level=info msg=partitionList Topic: =INTERNAL-RTP-RECORD partitionLists ="[0 1 2]"
+time="2023-07-07 18:15:56" level=info Offset: =-1 Topic: =INTERNAL-RTP-RECORD partition: =0
+time="2023-07-07 18:15:56" level=info Offset: =-1 Topic: =INTERNAL-RTP-RECORD partition: =1
+time="2023-07-07 18:15:56" level=info Offset: =-1 Topic: =INTERNAL-RTP-RECORD partition: =2
diff --git a/MSH-PIC/packet_dump/rtp/logs/log.202307080000 b/MSH-PIC/packet_dump/rtp/logs/log.202307080000
new file mode 100644
index 0000000..e9661bd
--- /dev/null
+++ b/MSH-PIC/packet_dump/rtp/logs/log.202307080000
@@ -0,0 +1,8 @@
+time="2023-07-08 15:32:02" level=info msg="BuildAwsS3Session sucess" endpoint: ="http://192.168.20.251:9098/hos/"
+time="2023-07-08 15:32:02" level=info PacketDump ="start time main" Para ="{[{<nil> <nil> map[] <nil> <nil> <nil> rtp pcap INTERNAL-RTP-RECORD VOIP-RECORD 100000 remote rtp_hos_bucket append 100000 15 [common_server_ip common_client_ip] [common_server_port common_client_port common_start_time] [raw_log_status] rtp_pcap_path rtp /var/www/html/rtp/ [raw_packet raw_packet_len] raw_packet_time_ms raw_packet 120}] {[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094] admin galaxy2019 -1} {http://192.168.20.251:9098/hos/ f5c5186ba4874182b33b9b2b2b6e3f77 default default} {/data/tsg/olap/galaxy/packet_dump/logs/log info 1000000000 14 1}}"
+time="2023-07-08 15:32:02" level=info msg="NewProducer Successes" Init kafka producer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-08 15:32:02" level=info msg="NewConsumer Successes" Init kafka consumer handle success, Broker: ="[192.168.20.193:9094 192.168.20.194:9094 192.168.20.195:9094]"
+time="2023-07-08 15:32:02" level=info msg=partitionList Topic: =INTERNAL-RTP-RECORD partitionLists ="[0 1 2]"
+time="2023-07-08 15:32:02" level=info Offset: =-1 Topic: =INTERNAL-RTP-RECORD partition: =0
+time="2023-07-08 15:32:02" level=info Offset: =-1 Topic: =INTERNAL-RTP-RECORD partition: =1
+time="2023-07-08 15:32:02" level=info Offset: =-1 Topic: =INTERNAL-RTP-RECORD partition: =2
diff --git a/MSH-PIC/packet_dump/rtp/status b/MSH-PIC/packet_dump/rtp/status
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/MSH-PIC/packet_dump/rtp/status
diff --git a/MSH-PIC/packet_dump/troubleshooting/config/packet_dump.yml b/MSH-PIC/packet_dump/troubleshooting/config/packet_dump.yml
new file mode 100644
index 0000000..d72cf92
--- /dev/null
+++ b/MSH-PIC/packet_dump/troubleshooting/config/packet_dump.yml
@@ -0,0 +1,35 @@
+project:
+ - project_name: troubleshooting
+ storage_file_type: pcapng # [pcap, pcapng]
+ consumer_topic: INTERNAL-PACKET-CAPTURE-EVENT
+ producer_topic: SYS-PACKET-CAPTURE-EVENT
+ cache_queue_size: 100000
+ storage_mode: remote #remote: aws or local
+ aws_bucket: troubleshooting_hos_bucket
+ append_mode: append #append, appendV2
+ append_max_num: 100000
+ upload_goroutine_num: 15
+ check_file_intval_key: [common_policy_id, pcap_storage_task_id]
+ delete_json_key: [raw_packet, raw_packet_len]
+ write_back_filepath_key: packet_url
+ origin_packet_time_ms_key: raw_packet_time_ms
+ origin_packet_key: raw_packet
+ filename_prefix: troubleshooting
+ storage_directory: /var/www/html/troubleshooting/
+ file_timeout: 3600
+kafka:
+ broker: [192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094]
+ sasl_user: admin
+ sasl_password: galaxy2019
+ offset: -1 # Newest: -1 or Oldest: -2
+aws:
+ endpoint: http://192.168.20.251:9098/hos/
+ token: f5c5186ba4874182b33b9b2b2b6e3f77
+ access_key: default
+ access_key_id: default
+log:
+ path: /data/tsg/olap/galaxy/packet_dump/logs/log
+ level: info # painc, fatal, warnning, info, debug, trace
+ RotationSize: 1000000000
+ RotationCount: 14
+ RotationTime: 1
diff --git a/MSH-PIC/packet_dump/troubleshooting/status b/MSH-PIC/packet_dump/troubleshooting/status
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/MSH-PIC/packet_dump/troubleshooting/status
diff --git a/MSH-PIC/phoenix-hbase/bin/argparse-1.4.0/argparse.py b/MSH-PIC/phoenix-hbase/bin/argparse-1.4.0/argparse.py
new file mode 100644
index 0000000..70a77cc
--- /dev/null
+++ b/MSH-PIC/phoenix-hbase/bin/argparse-1.4.0/argparse.py
@@ -0,0 +1,2392 @@
+# Author: Steven J. Bethard <[email protected]>.
+# Maintainer: Thomas Waldmann <[email protected]>
+
+"""Command-line parsing library
+
+This module is an optparse-inspired command-line parsing library that:
+
+ - handles both optional and positional arguments
+ - produces highly informative usage messages
+ - supports parsers that dispatch to sub-parsers
+
+The following is a simple usage example that sums integers from the
+command-line and writes the result to a file::
+
+ parser = argparse.ArgumentParser(
+ description='sum the integers at the command line')
+ parser.add_argument(
+ 'integers', metavar='int', nargs='+', type=int,
+ help='an integer to be summed')
+ parser.add_argument(
+ '--log', default=sys.stdout, type=argparse.FileType('w'),
+ help='the file where the sum should be written')
+ args = parser.parse_args()
+ args.log.write('%s' % sum(args.integers))
+ args.log.close()
+
+The module contains the following public classes:
+
+ - ArgumentParser -- The main entry point for command-line parsing. As the
+ example above shows, the add_argument() method is used to populate
+ the parser with actions for optional and positional arguments. Then
+ the parse_args() method is invoked to convert the args at the
+ command-line into an object with attributes.
+
+ - ArgumentError -- The exception raised by ArgumentParser objects when
+ there are errors with the parser's actions. Errors raised while
+ parsing the command-line are caught by ArgumentParser and emitted
+ as command-line messages.
+
+ - FileType -- A factory for defining types of files to be created. As the
+ example above shows, instances of FileType are typically passed as
+ the type= argument of add_argument() calls.
+
+ - Action -- The base class for parser actions. Typically actions are
+ selected by passing strings like 'store_true' or 'append_const' to
+ the action= argument of add_argument(). However, for greater
+ customization of ArgumentParser actions, subclasses of Action may
+ be defined and passed as the action= argument.
+
+ - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
+ ArgumentDefaultsHelpFormatter -- Formatter classes which
+ may be passed as the formatter_class= argument to the
+ ArgumentParser constructor. HelpFormatter is the default,
+ RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
+ not to change the formatting for help text, and
+ ArgumentDefaultsHelpFormatter adds information about argument defaults
+ to the help.
+
+All other classes in this module are considered implementation details.
+(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
+considered public as object names -- the API of the formatter objects is
+still considered an implementation detail.)
+"""
+
+__version__ = '1.4.0' # we use our own version number independant of the
+ # one in stdlib and we release this on pypi.
+
+__external_lib__ = True # to make sure the tests really test THIS lib,
+ # not the builtin one in Python stdlib
+
+__all__ = [
+ 'ArgumentParser',
+ 'ArgumentError',
+ 'ArgumentTypeError',
+ 'FileType',
+ 'HelpFormatter',
+ 'ArgumentDefaultsHelpFormatter',
+ 'RawDescriptionHelpFormatter',
+ 'RawTextHelpFormatter',
+ 'Namespace',
+ 'Action',
+ 'ONE_OR_MORE',
+ 'OPTIONAL',
+ 'PARSER',
+ 'REMAINDER',
+ 'SUPPRESS',
+ 'ZERO_OR_MORE',
+]
+
+
+import copy as _copy
+import os as _os
+import re as _re
+import sys as _sys
+import textwrap as _textwrap
+
+from gettext import gettext as _
+
+try:
+ set
+except NameError:
+ # for python < 2.4 compatibility (sets module is there since 2.3):
+ from sets import Set as set
+
+try:
+ basestring
+except NameError:
+ basestring = str
+
+try:
+ sorted
+except NameError:
+ # for python < 2.4 compatibility:
+ def sorted(iterable, reverse=False):
+ result = list(iterable)
+ result.sort()
+ if reverse:
+ result.reverse()
+ return result
+
+
+def _callable(obj):
+ return hasattr(obj, '__call__') or hasattr(obj, '__bases__')
+
+
+SUPPRESS = '==SUPPRESS=='
+
+OPTIONAL = '?'
+ZERO_OR_MORE = '*'
+ONE_OR_MORE = '+'
+PARSER = 'A...'
+REMAINDER = '...'
+_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args'
+
+# =============================
+# Utility functions and classes
+# =============================
+
+class _AttributeHolder(object):
+ """Abstract base class that provides __repr__.
+
+ The __repr__ method returns a string in the format::
+ ClassName(attr=name, attr=name, ...)
+ The attributes are determined either by a class-level attribute,
+ '_kwarg_names', or by inspecting the instance __dict__.
+ """
+
+ def __repr__(self):
+ type_name = type(self).__name__
+ arg_strings = []
+ for arg in self._get_args():
+ arg_strings.append(repr(arg))
+ for name, value in self._get_kwargs():
+ arg_strings.append('%s=%r' % (name, value))
+ return '%s(%s)' % (type_name, ', '.join(arg_strings))
+
+ def _get_kwargs(self):
+ return sorted(self.__dict__.items())
+
+ def _get_args(self):
+ return []
+
+
+def _ensure_value(namespace, name, value):
+ if getattr(namespace, name, None) is None:
+ setattr(namespace, name, value)
+ return getattr(namespace, name)
+
+
+# ===============
+# Formatting Help
+# ===============
+
+class HelpFormatter(object):
+ """Formatter for generating usage messages and argument help strings.
+
+ Only the name of this class is considered a public API. All the methods
+ provided by the class are considered an implementation detail.
+ """
+
+ def __init__(self,
+ prog,
+ indent_increment=2,
+ max_help_position=24,
+ width=None):
+
+ # default setting for width
+ if width is None:
+ try:
+ width = int(_os.environ['COLUMNS'])
+ except (KeyError, ValueError):
+ width = 80
+ width -= 2
+
+ self._prog = prog
+ self._indent_increment = indent_increment
+ self._max_help_position = max_help_position
+ self._width = width
+
+ self._current_indent = 0
+ self._level = 0
+ self._action_max_length = 0
+
+ self._root_section = self._Section(self, None)
+ self._current_section = self._root_section
+
+ self._whitespace_matcher = _re.compile(r'\s+')
+ self._long_break_matcher = _re.compile(r'\n\n\n+')
+
+ # ===============================
+ # Section and indentation methods
+ # ===============================
+ def _indent(self):
+ self._current_indent += self._indent_increment
+ self._level += 1
+
+ def _dedent(self):
+ self._current_indent -= self._indent_increment
+ assert self._current_indent >= 0, 'Indent decreased below 0.'
+ self._level -= 1
+
+ class _Section(object):
+
+ def __init__(self, formatter, parent, heading=None):
+ self.formatter = formatter
+ self.parent = parent
+ self.heading = heading
+ self.items = []
+
+ def format_help(self):
+ # format the indented section
+ if self.parent is not None:
+ self.formatter._indent()
+ join = self.formatter._join_parts
+ for func, args in self.items:
+ func(*args)
+ item_help = join([func(*args) for func, args in self.items])
+ if self.parent is not None:
+ self.formatter._dedent()
+
+ # return nothing if the section was empty
+ if not item_help:
+ return ''
+
+ # add the heading if the section was non-empty
+ if self.heading is not SUPPRESS and self.heading is not None:
+ current_indent = self.formatter._current_indent
+ heading = '%*s%s:\n' % (current_indent, '', self.heading)
+ else:
+ heading = ''
+
+ # join the section-initial newline, the heading and the help
+ return join(['\n', heading, item_help, '\n'])
+
+ def _add_item(self, func, args):
+ self._current_section.items.append((func, args))
+
+ # ========================
+ # Message building methods
+ # ========================
+ def start_section(self, heading):
+ self._indent()
+ section = self._Section(self, self._current_section, heading)
+ self._add_item(section.format_help, [])
+ self._current_section = section
+
+ def end_section(self):
+ self._current_section = self._current_section.parent
+ self._dedent()
+
+ def add_text(self, text):
+ if text is not SUPPRESS and text is not None:
+ self._add_item(self._format_text, [text])
+
+ def add_usage(self, usage, actions, groups, prefix=None):
+ if usage is not SUPPRESS:
+ args = usage, actions, groups, prefix
+ self._add_item(self._format_usage, args)
+
+ def add_argument(self, action):
+ if action.help is not SUPPRESS:
+
+ # find all invocations
+ get_invocation = self._format_action_invocation
+ invocations = [get_invocation(action)]
+ for subaction in self._iter_indented_subactions(action):
+ invocations.append(get_invocation(subaction))
+
+ # update the maximum item length
+ invocation_length = max([len(s) for s in invocations])
+ action_length = invocation_length + self._current_indent
+ self._action_max_length = max(self._action_max_length,
+ action_length)
+
+ # add the item to the list
+ self._add_item(self._format_action, [action])
+
+ def add_arguments(self, actions):
+ for action in actions:
+ self.add_argument(action)
+
+ # =======================
+ # Help-formatting methods
+ # =======================
+ def format_help(self):
+ help = self._root_section.format_help()
+ if help:
+ help = self._long_break_matcher.sub('\n\n', help)
+ help = help.strip('\n') + '\n'
+ return help
+
+ def _join_parts(self, part_strings):
+ return ''.join([part
+ for part in part_strings
+ if part and part is not SUPPRESS])
+
+ def _format_usage(self, usage, actions, groups, prefix):
+ if prefix is None:
+ prefix = _('usage: ')
+
+ # if usage is specified, use that
+ if usage is not None:
+ usage = usage % dict(prog=self._prog)
+
+ # if no optionals or positionals are available, usage is just prog
+ elif usage is None and not actions:
+ usage = '%(prog)s' % dict(prog=self._prog)
+
+ # if optionals and positionals are available, calculate usage
+ elif usage is None:
+ prog = '%(prog)s' % dict(prog=self._prog)
+
+ # split optionals from positionals
+ optionals = []
+ positionals = []
+ for action in actions:
+ if action.option_strings:
+ optionals.append(action)
+ else:
+ positionals.append(action)
+
+ # build full usage string
+ format = self._format_actions_usage
+ action_usage = format(optionals + positionals, groups)
+ usage = ' '.join([s for s in [prog, action_usage] if s])
+
+ # wrap the usage parts if it's too long
+ text_width = self._width - self._current_indent
+ if len(prefix) + len(usage) > text_width:
+
+ # break usage into wrappable parts
+ part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
+ opt_usage = format(optionals, groups)
+ pos_usage = format(positionals, groups)
+ opt_parts = _re.findall(part_regexp, opt_usage)
+ pos_parts = _re.findall(part_regexp, pos_usage)
+ assert ' '.join(opt_parts) == opt_usage
+ assert ' '.join(pos_parts) == pos_usage
+
+ # helper for wrapping lines
+ def get_lines(parts, indent, prefix=None):
+ lines = []
+ line = []
+ if prefix is not None:
+ line_len = len(prefix) - 1
+ else:
+ line_len = len(indent) - 1
+ for part in parts:
+ if line_len + 1 + len(part) > text_width:
+ lines.append(indent + ' '.join(line))
+ line = []
+ line_len = len(indent) - 1
+ line.append(part)
+ line_len += len(part) + 1
+ if line:
+ lines.append(indent + ' '.join(line))
+ if prefix is not None:
+ lines[0] = lines[0][len(indent):]
+ return lines
+
+ # if prog is short, follow it with optionals or positionals
+ if len(prefix) + len(prog) <= 0.75 * text_width:
+ indent = ' ' * (len(prefix) + len(prog) + 1)
+ if opt_parts:
+ lines = get_lines([prog] + opt_parts, indent, prefix)
+ lines.extend(get_lines(pos_parts, indent))
+ elif pos_parts:
+ lines = get_lines([prog] + pos_parts, indent, prefix)
+ else:
+ lines = [prog]
+
+ # if prog is long, put it on its own line
+ else:
+ indent = ' ' * len(prefix)
+ parts = opt_parts + pos_parts
+ lines = get_lines(parts, indent)
+ if len(lines) > 1:
+ lines = []
+ lines.extend(get_lines(opt_parts, indent))
+ lines.extend(get_lines(pos_parts, indent))
+ lines = [prog] + lines
+
+ # join lines into usage
+ usage = '\n'.join(lines)
+
+ # prefix with 'usage:'
+ return '%s%s\n\n' % (prefix, usage)
+
+ def _format_actions_usage(self, actions, groups):
+ # find group indices and identify actions in groups
+ group_actions = set()
+ inserts = {}
+ for group in groups:
+ try:
+ start = actions.index(group._group_actions[0])
+ except ValueError:
+ continue
+ else:
+ end = start + len(group._group_actions)
+ if actions[start:end] == group._group_actions:
+ for action in group._group_actions:
+ group_actions.add(action)
+ if not group.required:
+ if start in inserts:
+ inserts[start] += ' ['
+ else:
+ inserts[start] = '['
+ inserts[end] = ']'
+ else:
+ if start in inserts:
+ inserts[start] += ' ('
+ else:
+ inserts[start] = '('
+ inserts[end] = ')'
+ for i in range(start + 1, end):
+ inserts[i] = '|'
+
+ # collect all actions format strings
+ parts = []
+ for i, action in enumerate(actions):
+
+ # suppressed arguments are marked with None
+ # remove | separators for suppressed arguments
+ if action.help is SUPPRESS:
+ parts.append(None)
+ if inserts.get(i) == '|':
+ inserts.pop(i)
+ elif inserts.get(i + 1) == '|':
+ inserts.pop(i + 1)
+
+ # produce all arg strings
+ elif not action.option_strings:
+ part = self._format_args(action, action.dest)
+
+ # if it's in a group, strip the outer []
+ if action in group_actions:
+ if part[0] == '[' and part[-1] == ']':
+ part = part[1:-1]
+
+ # add the action string to the list
+ parts.append(part)
+
+ # produce the first way to invoke the option in brackets
+ else:
+ option_string = action.option_strings[0]
+
+ # if the Optional doesn't take a value, format is:
+ # -s or --long
+ if action.nargs == 0:
+ part = '%s' % option_string
+
+ # if the Optional takes a value, format is:
+ # -s ARGS or --long ARGS
+ else:
+ default = action.dest.upper()
+ args_string = self._format_args(action, default)
+ part = '%s %s' % (option_string, args_string)
+
+ # make it look optional if it's not required or in a group
+ if not action.required and action not in group_actions:
+ part = '[%s]' % part
+
+ # add the action string to the list
+ parts.append(part)
+
+ # insert things at the necessary indices
+ for i in sorted(inserts, reverse=True):
+ parts[i:i] = [inserts[i]]
+
+ # join all the action items with spaces
+ text = ' '.join([item for item in parts if item is not None])
+
+ # clean up separators for mutually exclusive groups
+ open = r'[\[(]'
+ close = r'[\])]'
+ text = _re.sub(r'(%s) ' % open, r'\1', text)
+ text = _re.sub(r' (%s)' % close, r'\1', text)
+ text = _re.sub(r'%s *%s' % (open, close), r'', text)
+ text = _re.sub(r'\(([^|]*)\)', r'\1', text)
+ text = text.strip()
+
+ # return the text
+ return text
+
+ def _format_text(self, text):
+ if '%(prog)' in text:
+ text = text % dict(prog=self._prog)
+ text_width = self._width - self._current_indent
+ indent = ' ' * self._current_indent
+ return self._fill_text(text, text_width, indent) + '\n\n'
+
+ def _format_action(self, action):
+ # determine the required width and the entry label
+ help_position = min(self._action_max_length + 2,
+ self._max_help_position)
+ help_width = self._width - help_position
+ action_width = help_position - self._current_indent - 2
+ action_header = self._format_action_invocation(action)
+
+ # ho nelp; start on same line and add a final newline
+ if not action.help:
+ tup = self._current_indent, '', action_header
+ action_header = '%*s%s\n' % tup
+
+ # short action name; start on the same line and pad two spaces
+ elif len(action_header) <= action_width:
+ tup = self._current_indent, '', action_width, action_header
+ action_header = '%*s%-*s ' % tup
+ indent_first = 0
+
+ # long action name; start on the next line
+ else:
+ tup = self._current_indent, '', action_header
+ action_header = '%*s%s\n' % tup
+ indent_first = help_position
+
+ # collect the pieces of the action help
+ parts = [action_header]
+
+ # if there was help for the action, add lines of help text
+ if action.help:
+ help_text = self._expand_help(action)
+ help_lines = self._split_lines(help_text, help_width)
+ parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
+ for line in help_lines[1:]:
+ parts.append('%*s%s\n' % (help_position, '', line))
+
+ # or add a newline if the description doesn't end with one
+ elif not action_header.endswith('\n'):
+ parts.append('\n')
+
+ # if there are any sub-actions, add their help as well
+ for subaction in self._iter_indented_subactions(action):
+ parts.append(self._format_action(subaction))
+
+ # return a single string
+ return self._join_parts(parts)
+
+ def _format_action_invocation(self, action):
+ if not action.option_strings:
+ metavar, = self._metavar_formatter(action, action.dest)(1)
+ return metavar
+
+ else:
+ parts = []
+
+ # if the Optional doesn't take a value, format is:
+ # -s, --long
+ if action.nargs == 0:
+ parts.extend(action.option_strings)
+
+ # if the Optional takes a value, format is:
+ # -s ARGS, --long ARGS
+ else:
+ default = action.dest.upper()
+ args_string = self._format_args(action, default)
+ for option_string in action.option_strings:
+ parts.append('%s %s' % (option_string, args_string))
+
+ return ', '.join(parts)
+
+ def _metavar_formatter(self, action, default_metavar):
+ if action.metavar is not None:
+ result = action.metavar
+ elif action.choices is not None:
+ choice_strs = [str(choice) for choice in action.choices]
+ result = '{%s}' % ','.join(choice_strs)
+ else:
+ result = default_metavar
+
+ def format(tuple_size):
+ if isinstance(result, tuple):
+ return result
+ else:
+ return (result, ) * tuple_size
+ return format
+
+ def _format_args(self, action, default_metavar):
+ get_metavar = self._metavar_formatter(action, default_metavar)
+ if action.nargs is None:
+ result = '%s' % get_metavar(1)
+ elif action.nargs == OPTIONAL:
+ result = '[%s]' % get_metavar(1)
+ elif action.nargs == ZERO_OR_MORE:
+ result = '[%s [%s ...]]' % get_metavar(2)
+ elif action.nargs == ONE_OR_MORE:
+ result = '%s [%s ...]' % get_metavar(2)
+ elif action.nargs == REMAINDER:
+ result = '...'
+ elif action.nargs == PARSER:
+ result = '%s ...' % get_metavar(1)
+ else:
+ formats = ['%s' for _ in range(action.nargs)]
+ result = ' '.join(formats) % get_metavar(action.nargs)
+ return result
+
+ def _expand_help(self, action):
+ params = dict(vars(action), prog=self._prog)
+ for name in list(params):
+ if params[name] is SUPPRESS:
+ del params[name]
+ for name in list(params):
+ if hasattr(params[name], '__name__'):
+ params[name] = params[name].__name__
+ if params.get('choices') is not None:
+ choices_str = ', '.join([str(c) for c in params['choices']])
+ params['choices'] = choices_str
+ return self._get_help_string(action) % params
+
+ def _iter_indented_subactions(self, action):
+ try:
+ get_subactions = action._get_subactions
+ except AttributeError:
+ pass
+ else:
+ self._indent()
+ for subaction in get_subactions():
+ yield subaction
+ self._dedent()
+
+ def _split_lines(self, text, width):
+ text = self._whitespace_matcher.sub(' ', text).strip()
+ return _textwrap.wrap(text, width)
+
+ def _fill_text(self, text, width, indent):
+ text = self._whitespace_matcher.sub(' ', text).strip()
+ return _textwrap.fill(text, width, initial_indent=indent,
+ subsequent_indent=indent)
+
+ def _get_help_string(self, action):
+ return action.help
+
+
+class RawDescriptionHelpFormatter(HelpFormatter):
+ """Help message formatter which retains any formatting in descriptions.
+
+ Only the name of this class is considered a public API. All the methods
+ provided by the class are considered an implementation detail.
+ """
+
+ def _fill_text(self, text, width, indent):
+ return ''.join([indent + line for line in text.splitlines(True)])
+
+
+class RawTextHelpFormatter(RawDescriptionHelpFormatter):
+ """Help message formatter which retains formatting of all help text.
+
+ Only the name of this class is considered a public API. All the methods
+ provided by the class are considered an implementation detail.
+ """
+
+ def _split_lines(self, text, width):
+ return text.splitlines()
+
+
+class ArgumentDefaultsHelpFormatter(HelpFormatter):
+ """Help message formatter which adds default values to argument help.
+
+ Only the name of this class is considered a public API. All the methods
+ provided by the class are considered an implementation detail.
+ """
+
+ def _get_help_string(self, action):
+ help = action.help
+ if '%(default)' not in action.help:
+ if action.default is not SUPPRESS:
+ defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
+ if action.option_strings or action.nargs in defaulting_nargs:
+ help += ' (default: %(default)s)'
+ return help
+
+
+# =====================
+# Options and Arguments
+# =====================
+
+def _get_action_name(argument):
+ if argument is None:
+ return None
+ elif argument.option_strings:
+ return '/'.join(argument.option_strings)
+ elif argument.metavar not in (None, SUPPRESS):
+ return argument.metavar
+ elif argument.dest not in (None, SUPPRESS):
+ return argument.dest
+ else:
+ return None
+
+
+class ArgumentError(Exception):
+ """An error from creating or using an argument (optional or positional).
+
+ The string value of this exception is the message, augmented with
+ information about the argument that caused it.
+ """
+
+ def __init__(self, argument, message):
+ self.argument_name = _get_action_name(argument)
+ self.message = message
+
+ def __str__(self):
+ if self.argument_name is None:
+ format = '%(message)s'
+ else:
+ format = 'argument %(argument_name)s: %(message)s'
+ return format % dict(message=self.message,
+ argument_name=self.argument_name)
+
+
+class ArgumentTypeError(Exception):
+ """An error from trying to convert a command line string to a type."""
+ pass
+
+
+# ==============
+# Action classes
+# ==============
+
+class Action(_AttributeHolder):
+ """Information about how to convert command line strings to Python objects.
+
+ Action objects are used by an ArgumentParser to represent the information
+ needed to parse a single argument from one or more strings from the
+ command line. The keyword arguments to the Action constructor are also
+ all attributes of Action instances.
+
+ Keyword Arguments:
+
+ - option_strings -- A list of command-line option strings which
+ should be associated with this action.
+
+ - dest -- The name of the attribute to hold the created object(s)
+
+ - nargs -- The number of command-line arguments that should be
+ consumed. By default, one argument will be consumed and a single
+ value will be produced. Other values include:
+ - N (an integer) consumes N arguments (and produces a list)
+ - '?' consumes zero or one arguments
+ - '*' consumes zero or more arguments (and produces a list)
+ - '+' consumes one or more arguments (and produces a list)
+ Note that the difference between the default and nargs=1 is that
+ with the default, a single value will be produced, while with
+ nargs=1, a list containing a single value will be produced.
+
+ - const -- The value to be produced if the option is specified and the
+ option uses an action that takes no values.
+
+ - default -- The value to be produced if the option is not specified.
+
+ - type -- The type which the command-line arguments should be converted
+ to, should be one of 'string', 'int', 'float', 'complex' or a
+ callable object that accepts a single string argument. If None,
+ 'string' is assumed.
+
+ - choices -- A container of values that should be allowed. If not None,
+ after a command-line argument has been converted to the appropriate
+ type, an exception will be raised if it is not a member of this
+ collection.
+
+ - required -- True if the action must always be specified at the
+ command line. This is only meaningful for optional command-line
+ arguments.
+
+ - help -- The help string describing the argument.
+
+ - metavar -- The name to be used for the option's argument with the
+ help string. If None, the 'dest' value will be used as the name.
+ """
+
+ def __init__(self,
+ option_strings,
+ dest,
+ nargs=None,
+ const=None,
+ default=None,
+ type=None,
+ choices=None,
+ required=False,
+ help=None,
+ metavar=None):
+ self.option_strings = option_strings
+ self.dest = dest
+ self.nargs = nargs
+ self.const = const
+ self.default = default
+ self.type = type
+ self.choices = choices
+ self.required = required
+ self.help = help
+ self.metavar = metavar
+
+ def _get_kwargs(self):
+ names = [
+ 'option_strings',
+ 'dest',
+ 'nargs',
+ 'const',
+ 'default',
+ 'type',
+ 'choices',
+ 'help',
+ 'metavar',
+ ]
+ return [(name, getattr(self, name)) for name in names]
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ raise NotImplementedError(_('.__call__() not defined'))
+
+
+class _StoreAction(Action):
+
+ def __init__(self,
+ option_strings,
+ dest,
+ nargs=None,
+ const=None,
+ default=None,
+ type=None,
+ choices=None,
+ required=False,
+ help=None,
+ metavar=None):
+ if nargs == 0:
+ raise ValueError('nargs for store actions must be > 0; if you '
+ 'have nothing to store, actions such as store '
+ 'true or store const may be more appropriate')
+ if const is not None and nargs != OPTIONAL:
+ raise ValueError('nargs must be %r to supply const' % OPTIONAL)
+ super(_StoreAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ nargs=nargs,
+ const=const,
+ default=default,
+ type=type,
+ choices=choices,
+ required=required,
+ help=help,
+ metavar=metavar)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ setattr(namespace, self.dest, values)
+
+
+class _StoreConstAction(Action):
+
+ def __init__(self,
+ option_strings,
+ dest,
+ const,
+ default=None,
+ required=False,
+ help=None,
+ metavar=None):
+ super(_StoreConstAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ nargs=0,
+ const=const,
+ default=default,
+ required=required,
+ help=help)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ setattr(namespace, self.dest, self.const)
+
+
+class _StoreTrueAction(_StoreConstAction):
+
+ def __init__(self,
+ option_strings,
+ dest,
+ default=False,
+ required=False,
+ help=None):
+ super(_StoreTrueAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ const=True,
+ default=default,
+ required=required,
+ help=help)
+
+
+class _StoreFalseAction(_StoreConstAction):
+
+ def __init__(self,
+ option_strings,
+ dest,
+ default=True,
+ required=False,
+ help=None):
+ super(_StoreFalseAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ const=False,
+ default=default,
+ required=required,
+ help=help)
+
+
+class _AppendAction(Action):
+
+ def __init__(self,
+ option_strings,
+ dest,
+ nargs=None,
+ const=None,
+ default=None,
+ type=None,
+ choices=None,
+ required=False,
+ help=None,
+ metavar=None):
+ if nargs == 0:
+ raise ValueError('nargs for append actions must be > 0; if arg '
+ 'strings are not supplying the value to append, '
+ 'the append const action may be more appropriate')
+ if const is not None and nargs != OPTIONAL:
+ raise ValueError('nargs must be %r to supply const' % OPTIONAL)
+ super(_AppendAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ nargs=nargs,
+ const=const,
+ default=default,
+ type=type,
+ choices=choices,
+ required=required,
+ help=help,
+ metavar=metavar)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ items = _copy.copy(_ensure_value(namespace, self.dest, []))
+ items.append(values)
+ setattr(namespace, self.dest, items)
+
+
+class _AppendConstAction(Action):
+
+ def __init__(self,
+ option_strings,
+ dest,
+ const,
+ default=None,
+ required=False,
+ help=None,
+ metavar=None):
+ super(_AppendConstAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ nargs=0,
+ const=const,
+ default=default,
+ required=required,
+ help=help,
+ metavar=metavar)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ items = _copy.copy(_ensure_value(namespace, self.dest, []))
+ items.append(self.const)
+ setattr(namespace, self.dest, items)
+
+
+class _CountAction(Action):
+
+ def __init__(self,
+ option_strings,
+ dest,
+ default=None,
+ required=False,
+ help=None):
+ super(_CountAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ nargs=0,
+ default=default,
+ required=required,
+ help=help)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ new_count = _ensure_value(namespace, self.dest, 0) + 1
+ setattr(namespace, self.dest, new_count)
+
+
+class _HelpAction(Action):
+
+ def __init__(self,
+ option_strings,
+ dest=SUPPRESS,
+ default=SUPPRESS,
+ help=None):
+ super(_HelpAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ default=default,
+ nargs=0,
+ help=help)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ parser.print_help()
+ parser.exit()
+
+
+class _VersionAction(Action):
+
+ def __init__(self,
+ option_strings,
+ version=None,
+ dest=SUPPRESS,
+ default=SUPPRESS,
+ help="show program's version number and exit"):
+ super(_VersionAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ default=default,
+ nargs=0,
+ help=help)
+ self.version = version
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ version = self.version
+ if version is None:
+ version = parser.version
+ formatter = parser._get_formatter()
+ formatter.add_text(version)
+ parser.exit(message=formatter.format_help())
+
+
+class _SubParsersAction(Action):
+
+ class _ChoicesPseudoAction(Action):
+
+ def __init__(self, name, aliases, help):
+ metavar = dest = name
+ if aliases:
+ metavar += ' (%s)' % ', '.join(aliases)
+ sup = super(_SubParsersAction._ChoicesPseudoAction, self)
+ sup.__init__(option_strings=[], dest=dest, help=help,
+ metavar=metavar)
+
+ def __init__(self,
+ option_strings,
+ prog,
+ parser_class,
+ dest=SUPPRESS,
+ help=None,
+ metavar=None):
+
+ self._prog_prefix = prog
+ self._parser_class = parser_class
+ self._name_parser_map = {}
+ self._choices_actions = []
+
+ super(_SubParsersAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ nargs=PARSER,
+ choices=self._name_parser_map,
+ help=help,
+ metavar=metavar)
+
+ def add_parser(self, name, **kwargs):
+ # set prog from the existing prefix
+ if kwargs.get('prog') is None:
+ kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
+
+ aliases = kwargs.pop('aliases', ())
+
+ # create a pseudo-action to hold the choice help
+ if 'help' in kwargs:
+ help = kwargs.pop('help')
+ choice_action = self._ChoicesPseudoAction(name, aliases, help)
+ self._choices_actions.append(choice_action)
+
+ # create the parser and add it to the map
+ parser = self._parser_class(**kwargs)
+ self._name_parser_map[name] = parser
+
+ # make parser available under aliases also
+ for alias in aliases:
+ self._name_parser_map[alias] = parser
+
+ return parser
+
+ def _get_subactions(self):
+ return self._choices_actions
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ parser_name = values[0]
+ arg_strings = values[1:]
+
+ # set the parser name if requested
+ if self.dest is not SUPPRESS:
+ setattr(namespace, self.dest, parser_name)
+
+ # select the parser
+ try:
+ parser = self._name_parser_map[parser_name]
+ except KeyError:
+ tup = parser_name, ', '.join(self._name_parser_map)
+ msg = _('unknown parser %r (choices: %s)' % tup)
+ raise ArgumentError(self, msg)
+
+ # parse all the remaining options into the namespace
+ # store any unrecognized options on the object, so that the top
+ # level parser can decide what to do with them
+ namespace, arg_strings = parser.parse_known_args(arg_strings, namespace)
+ if arg_strings:
+ vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
+ getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
+
+
+# ==============
+# Type classes
+# ==============
+
+class FileType(object):
+ """Factory for creating file object types
+
+ Instances of FileType are typically passed as type= arguments to the
+ ArgumentParser add_argument() method.
+
+ Keyword Arguments:
+ - mode -- A string indicating how the file is to be opened. Accepts the
+ same values as the builtin open() function.
+ - bufsize -- The file's desired buffer size. Accepts the same values as
+ the builtin open() function.
+ """
+
+ def __init__(self, mode='r', bufsize=None):
+ self._mode = mode
+ self._bufsize = bufsize
+
+ def __call__(self, string):
+ # the special argument "-" means sys.std{in,out}
+ if string == '-':
+ if 'r' in self._mode:
+ return _sys.stdin
+ elif 'w' in self._mode:
+ return _sys.stdout
+ else:
+ msg = _('argument "-" with mode %r' % self._mode)
+ raise ValueError(msg)
+
+ try:
+ # all other arguments are used as file names
+ if self._bufsize:
+ return open(string, self._mode, self._bufsize)
+ else:
+ return open(string, self._mode)
+ except IOError:
+ err = _sys.exc_info()[1]
+ message = _("can't open '%s': %s")
+ raise ArgumentTypeError(message % (string, err))
+
+ def __repr__(self):
+ args = [self._mode, self._bufsize]
+ args_str = ', '.join([repr(arg) for arg in args if arg is not None])
+ return '%s(%s)' % (type(self).__name__, args_str)
+
+# ===========================
+# Optional and Positional Parsing
+# ===========================
+
+class Namespace(_AttributeHolder):
+ """Simple object for storing attributes.
+
+ Implements equality by attribute names and values, and provides a simple
+ string representation.
+ """
+
+ def __init__(self, **kwargs):
+ for name in kwargs:
+ setattr(self, name, kwargs[name])
+
+ __hash__ = None
+
+ def __eq__(self, other):
+ return vars(self) == vars(other)
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __contains__(self, key):
+ return key in self.__dict__
+
+
+class _ActionsContainer(object):
+
+ def __init__(self,
+ description,
+ prefix_chars,
+ argument_default,
+ conflict_handler):
+ super(_ActionsContainer, self).__init__()
+
+ self.description = description
+ self.argument_default = argument_default
+ self.prefix_chars = prefix_chars
+ self.conflict_handler = conflict_handler
+
+ # set up registries
+ self._registries = {}
+
+ # register actions
+ self.register('action', None, _StoreAction)
+ self.register('action', 'store', _StoreAction)
+ self.register('action', 'store_const', _StoreConstAction)
+ self.register('action', 'store_true', _StoreTrueAction)
+ self.register('action', 'store_false', _StoreFalseAction)
+ self.register('action', 'append', _AppendAction)
+ self.register('action', 'append_const', _AppendConstAction)
+ self.register('action', 'count', _CountAction)
+ self.register('action', 'help', _HelpAction)
+ self.register('action', 'version', _VersionAction)
+ self.register('action', 'parsers', _SubParsersAction)
+
+ # raise an exception if the conflict handler is invalid
+ self._get_handler()
+
+ # action storage
+ self._actions = []
+ self._option_string_actions = {}
+
+ # groups
+ self._action_groups = []
+ self._mutually_exclusive_groups = []
+
+ # defaults storage
+ self._defaults = {}
+
+ # determines whether an "option" looks like a negative number
+ self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$')
+
+ # whether or not there are any optionals that look like negative
+ # numbers -- uses a list so it can be shared and edited
+ self._has_negative_number_optionals = []
+
+ # ====================
+ # Registration methods
+ # ====================
+ def register(self, registry_name, value, object):
+ registry = self._registries.setdefault(registry_name, {})
+ registry[value] = object
+
+ def _registry_get(self, registry_name, value, default=None):
+ return self._registries[registry_name].get(value, default)
+
+ # ==================================
+ # Namespace default accessor methods
+ # ==================================
+ def set_defaults(self, **kwargs):
+ self._defaults.update(kwargs)
+
+ # if these defaults match any existing arguments, replace
+ # the previous default on the object with the new one
+ for action in self._actions:
+ if action.dest in kwargs:
+ action.default = kwargs[action.dest]
+
+ def get_default(self, dest):
+ for action in self._actions:
+ if action.dest == dest and action.default is not None:
+ return action.default
+ return self._defaults.get(dest, None)
+
+
+ # =======================
+ # Adding argument actions
+ # =======================
+ def add_argument(self, *args, **kwargs):
+ """
+ add_argument(dest, ..., name=value, ...)
+ add_argument(option_string, option_string, ..., name=value, ...)
+ """
+
+ # if no positional args are supplied or only one is supplied and
+ # it doesn't look like an option string, parse a positional
+ # argument
+ chars = self.prefix_chars
+ if not args or len(args) == 1 and args[0][0] not in chars:
+ if args and 'dest' in kwargs:
+ raise ValueError('dest supplied twice for positional argument')
+ kwargs = self._get_positional_kwargs(*args, **kwargs)
+
+ # otherwise, we're adding an optional argument
+ else:
+ kwargs = self._get_optional_kwargs(*args, **kwargs)
+
+ # if no default was supplied, use the parser-level default
+ if 'default' not in kwargs:
+ dest = kwargs['dest']
+ if dest in self._defaults:
+ kwargs['default'] = self._defaults[dest]
+ elif self.argument_default is not None:
+ kwargs['default'] = self.argument_default
+
+ # create the action object, and add it to the parser
+ action_class = self._pop_action_class(kwargs)
+ if not _callable(action_class):
+ raise ValueError('unknown action "%s"' % action_class)
+ action = action_class(**kwargs)
+
+ # raise an error if the action type is not callable
+ type_func = self._registry_get('type', action.type, action.type)
+ if not _callable(type_func):
+ raise ValueError('%r is not callable' % type_func)
+
+ return self._add_action(action)
+
+ def add_argument_group(self, *args, **kwargs):
+ group = _ArgumentGroup(self, *args, **kwargs)
+ self._action_groups.append(group)
+ return group
+
+ def add_mutually_exclusive_group(self, **kwargs):
+ group = _MutuallyExclusiveGroup(self, **kwargs)
+ self._mutually_exclusive_groups.append(group)
+ return group
+
+ def _add_action(self, action):
+ # resolve any conflicts
+ self._check_conflict(action)
+
+ # add to actions list
+ self._actions.append(action)
+ action.container = self
+
+ # index the action by any option strings it has
+ for option_string in action.option_strings:
+ self._option_string_actions[option_string] = action
+
+ # set the flag if any option strings look like negative numbers
+ for option_string in action.option_strings:
+ if self._negative_number_matcher.match(option_string):
+ if not self._has_negative_number_optionals:
+ self._has_negative_number_optionals.append(True)
+
+ # return the created action
+ return action
+
+ def _remove_action(self, action):
+ self._actions.remove(action)
+
+ def _add_container_actions(self, container):
+ # collect groups by titles
+ title_group_map = {}
+ for group in self._action_groups:
+ if group.title in title_group_map:
+ msg = _('cannot merge actions - two groups are named %r')
+ raise ValueError(msg % (group.title))
+ title_group_map[group.title] = group
+
+ # map each action to its group
+ group_map = {}
+ for group in container._action_groups:
+
+ # if a group with the title exists, use that, otherwise
+ # create a new group matching the container's group
+ if group.title not in title_group_map:
+ title_group_map[group.title] = self.add_argument_group(
+ title=group.title,
+ description=group.description,
+ conflict_handler=group.conflict_handler)
+
+ # map the actions to their new group
+ for action in group._group_actions:
+ group_map[action] = title_group_map[group.title]
+
+ # add container's mutually exclusive groups
+ # NOTE: if add_mutually_exclusive_group ever gains title= and
+ # description= then this code will need to be expanded as above
+ for group in container._mutually_exclusive_groups:
+ mutex_group = self.add_mutually_exclusive_group(
+ required=group.required)
+
+ # map the actions to their new mutex group
+ for action in group._group_actions:
+ group_map[action] = mutex_group
+
+ # add all actions to this container or their group
+ for action in container._actions:
+ group_map.get(action, self)._add_action(action)
+
+ def _get_positional_kwargs(self, dest, **kwargs):
+ # make sure required is not specified
+ if 'required' in kwargs:
+ msg = _("'required' is an invalid argument for positionals")
+ raise TypeError(msg)
+
+ # mark positional arguments as required if at least one is
+ # always required
+ if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
+ kwargs['required'] = True
+ if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
+ kwargs['required'] = True
+
+ # return the keyword arguments with no option strings
+ return dict(kwargs, dest=dest, option_strings=[])
+
+ def _get_optional_kwargs(self, *args, **kwargs):
+ # determine short and long option strings
+ option_strings = []
+ long_option_strings = []
+ for option_string in args:
+ # error on strings that don't start with an appropriate prefix
+ if not option_string[0] in self.prefix_chars:
+ msg = _('invalid option string %r: '
+ 'must start with a character %r')
+ tup = option_string, self.prefix_chars
+ raise ValueError(msg % tup)
+
+ # strings starting with two prefix characters are long options
+ option_strings.append(option_string)
+ if option_string[0] in self.prefix_chars:
+ if len(option_string) > 1:
+ if option_string[1] in self.prefix_chars:
+ long_option_strings.append(option_string)
+
+ # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
+ dest = kwargs.pop('dest', None)
+ if dest is None:
+ if long_option_strings:
+ dest_option_string = long_option_strings[0]
+ else:
+ dest_option_string = option_strings[0]
+ dest = dest_option_string.lstrip(self.prefix_chars)
+ if not dest:
+ msg = _('dest= is required for options like %r')
+ raise ValueError(msg % option_string)
+ dest = dest.replace('-', '_')
+
+ # return the updated keyword arguments
+ return dict(kwargs, dest=dest, option_strings=option_strings)
+
+ def _pop_action_class(self, kwargs, default=None):
+ action = kwargs.pop('action', default)
+ return self._registry_get('action', action, action)
+
+ def _get_handler(self):
+ # determine function from conflict handler string
+ handler_func_name = '_handle_conflict_%s' % self.conflict_handler
+ try:
+ return getattr(self, handler_func_name)
+ except AttributeError:
+ msg = _('invalid conflict_resolution value: %r')
+ raise ValueError(msg % self.conflict_handler)
+
+ def _check_conflict(self, action):
+
+ # find all options that conflict with this option
+ confl_optionals = []
+ for option_string in action.option_strings:
+ if option_string in self._option_string_actions:
+ confl_optional = self._option_string_actions[option_string]
+ confl_optionals.append((option_string, confl_optional))
+
+ # resolve any conflicts
+ if confl_optionals:
+ conflict_handler = self._get_handler()
+ conflict_handler(action, confl_optionals)
+
+ def _handle_conflict_error(self, action, conflicting_actions):
+ message = _('conflicting option string(s): %s')
+ conflict_string = ', '.join([option_string
+ for option_string, action
+ in conflicting_actions])
+ raise ArgumentError(action, message % conflict_string)
+
+ def _handle_conflict_resolve(self, action, conflicting_actions):
+
+ # remove all conflicting options
+ for option_string, action in conflicting_actions:
+
+ # remove the conflicting option
+ action.option_strings.remove(option_string)
+ self._option_string_actions.pop(option_string, None)
+
+ # if the option now has no option string, remove it from the
+ # container holding it
+ if not action.option_strings:
+ action.container._remove_action(action)
+
+
+class _ArgumentGroup(_ActionsContainer):
+
+ def __init__(self, container, title=None, description=None, **kwargs):
+ # add any missing keyword arguments by checking the container
+ update = kwargs.setdefault
+ update('conflict_handler', container.conflict_handler)
+ update('prefix_chars', container.prefix_chars)
+ update('argument_default', container.argument_default)
+ super_init = super(_ArgumentGroup, self).__init__
+ super_init(description=description, **kwargs)
+
+ # group attributes
+ self.title = title
+ self._group_actions = []
+
+ # share most attributes with the container
+ self._registries = container._registries
+ self._actions = container._actions
+ self._option_string_actions = container._option_string_actions
+ self._defaults = container._defaults
+ self._has_negative_number_optionals = \
+ container._has_negative_number_optionals
+
+ def _add_action(self, action):
+ action = super(_ArgumentGroup, self)._add_action(action)
+ self._group_actions.append(action)
+ return action
+
+ def _remove_action(self, action):
+ super(_ArgumentGroup, self)._remove_action(action)
+ self._group_actions.remove(action)
+
+
+class _MutuallyExclusiveGroup(_ArgumentGroup):
+
+ def __init__(self, container, required=False):
+ super(_MutuallyExclusiveGroup, self).__init__(container)
+ self.required = required
+ self._container = container
+
+ def _add_action(self, action):
+ if action.required:
+ msg = _('mutually exclusive arguments must be optional')
+ raise ValueError(msg)
+ action = self._container._add_action(action)
+ self._group_actions.append(action)
+ return action
+
+ def _remove_action(self, action):
+ self._container._remove_action(action)
+ self._group_actions.remove(action)
+
+
+class ArgumentParser(_AttributeHolder, _ActionsContainer):
+ """Object for parsing command line strings into Python objects.
+
+ Keyword Arguments:
+ - prog -- The name of the program (default: sys.argv[0])
+ - usage -- A usage message (default: auto-generated from arguments)
+ - description -- A description of what the program does
+ - epilog -- Text following the argument descriptions
+ - parents -- Parsers whose arguments should be copied into this one
+ - formatter_class -- HelpFormatter class for printing help messages
+ - prefix_chars -- Characters that prefix optional arguments
+ - fromfile_prefix_chars -- Characters that prefix files containing
+ additional arguments
+ - argument_default -- The default value for all arguments
+ - conflict_handler -- String indicating how to handle conflicts
+ - add_help -- Add a -h/-help option
+ """
+
+ def __init__(self,
+ prog=None,
+ usage=None,
+ description=None,
+ epilog=None,
+ version=None,
+ parents=[],
+ formatter_class=HelpFormatter,
+ prefix_chars='-',
+ fromfile_prefix_chars=None,
+ argument_default=None,
+ conflict_handler='error',
+ add_help=True):
+
+ if version is not None:
+ import warnings
+ warnings.warn(
+ """The "version" argument to ArgumentParser is deprecated. """
+ """Please use """
+ """"add_argument(..., action='version', version="N", ...)" """
+ """instead""", DeprecationWarning)
+
+ superinit = super(ArgumentParser, self).__init__
+ superinit(description=description,
+ prefix_chars=prefix_chars,
+ argument_default=argument_default,
+ conflict_handler=conflict_handler)
+
+ # default setting for prog
+ if prog is None:
+ prog = _os.path.basename(_sys.argv[0])
+
+ self.prog = prog
+ self.usage = usage
+ self.epilog = epilog
+ self.version = version
+ self.formatter_class = formatter_class
+ self.fromfile_prefix_chars = fromfile_prefix_chars
+ self.add_help = add_help
+
+ add_group = self.add_argument_group
+ self._positionals = add_group(_('positional arguments'))
+ self._optionals = add_group(_('optional arguments'))
+ self._subparsers = None
+
+ # register types
+ def identity(string):
+ return string
+ self.register('type', None, identity)
+
+ # add help and version arguments if necessary
+ # (using explicit default to override global argument_default)
+ if '-' in prefix_chars:
+ default_prefix = '-'
+ else:
+ default_prefix = prefix_chars[0]
+ if self.add_help:
+ self.add_argument(
+ default_prefix+'h', default_prefix*2+'help',
+ action='help', default=SUPPRESS,
+ help=_('show this help message and exit'))
+ if self.version:
+ self.add_argument(
+ default_prefix+'v', default_prefix*2+'version',
+ action='version', default=SUPPRESS,
+ version=self.version,
+ help=_("show program's version number and exit"))
+
+ # add parent arguments and defaults
+ for parent in parents:
+ self._add_container_actions(parent)
+ try:
+ defaults = parent._defaults
+ except AttributeError:
+ pass
+ else:
+ self._defaults.update(defaults)
+
+ # =======================
+ # Pretty __repr__ methods
+ # =======================
+ def _get_kwargs(self):
+ names = [
+ 'prog',
+ 'usage',
+ 'description',
+ 'version',
+ 'formatter_class',
+ 'conflict_handler',
+ 'add_help',
+ ]
+ return [(name, getattr(self, name)) for name in names]
+
+ # ==================================
+ # Optional/Positional adding methods
+ # ==================================
+ def add_subparsers(self, **kwargs):
+ if self._subparsers is not None:
+ self.error(_('cannot have multiple subparser arguments'))
+
+ # add the parser class to the arguments if it's not present
+ kwargs.setdefault('parser_class', type(self))
+
+ if 'title' in kwargs or 'description' in kwargs:
+ title = _(kwargs.pop('title', 'subcommands'))
+ description = _(kwargs.pop('description', None))
+ self._subparsers = self.add_argument_group(title, description)
+ else:
+ self._subparsers = self._positionals
+
+ # prog defaults to the usage message of this parser, skipping
+ # optional arguments and with no "usage:" prefix
+ if kwargs.get('prog') is None:
+ formatter = self._get_formatter()
+ positionals = self._get_positional_actions()
+ groups = self._mutually_exclusive_groups
+ formatter.add_usage(self.usage, positionals, groups, '')
+ kwargs['prog'] = formatter.format_help().strip()
+
+ # create the parsers action and add it to the positionals list
+ parsers_class = self._pop_action_class(kwargs, 'parsers')
+ action = parsers_class(option_strings=[], **kwargs)
+ self._subparsers._add_action(action)
+
+ # return the created parsers action
+ return action
+
+ def _add_action(self, action):
+ if action.option_strings:
+ self._optionals._add_action(action)
+ else:
+ self._positionals._add_action(action)
+ return action
+
+ def _get_optional_actions(self):
+ return [action
+ for action in self._actions
+ if action.option_strings]
+
+ def _get_positional_actions(self):
+ return [action
+ for action in self._actions
+ if not action.option_strings]
+
+ # =====================================
+ # Command line argument parsing methods
+ # =====================================
+ def parse_args(self, args=None, namespace=None):
+ args, argv = self.parse_known_args(args, namespace)
+ if argv:
+ msg = _('unrecognized arguments: %s')
+ self.error(msg % ' '.join(argv))
+ return args
+
+ def parse_known_args(self, args=None, namespace=None):
+ # args default to the system args
+ if args is None:
+ args = _sys.argv[1:]
+
+ # default Namespace built from parser defaults
+ if namespace is None:
+ namespace = Namespace()
+
+ # add any action defaults that aren't present
+ for action in self._actions:
+ if action.dest is not SUPPRESS:
+ if not hasattr(namespace, action.dest):
+ if action.default is not SUPPRESS:
+ setattr(namespace, action.dest, action.default)
+
+ # add any parser defaults that aren't present
+ for dest in self._defaults:
+ if not hasattr(namespace, dest):
+ setattr(namespace, dest, self._defaults[dest])
+
+ # parse the arguments and exit if there are any errors
+ try:
+ namespace, args = self._parse_known_args(args, namespace)
+ if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR):
+ args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR))
+ delattr(namespace, _UNRECOGNIZED_ARGS_ATTR)
+ return namespace, args
+ except ArgumentError:
+ err = _sys.exc_info()[1]
+ self.error(str(err))
+
+ def _parse_known_args(self, arg_strings, namespace):
+ # replace arg strings that are file references
+ if self.fromfile_prefix_chars is not None:
+ arg_strings = self._read_args_from_files(arg_strings)
+
+ # map all mutually exclusive arguments to the other arguments
+ # they can't occur with
+ action_conflicts = {}
+ for mutex_group in self._mutually_exclusive_groups:
+ group_actions = mutex_group._group_actions
+ for i, mutex_action in enumerate(mutex_group._group_actions):
+ conflicts = action_conflicts.setdefault(mutex_action, [])
+ conflicts.extend(group_actions[:i])
+ conflicts.extend(group_actions[i + 1:])
+
+ # find all option indices, and determine the arg_string_pattern
+ # which has an 'O' if there is an option at an index,
+ # an 'A' if there is an argument, or a '-' if there is a '--'
+ option_string_indices = {}
+ arg_string_pattern_parts = []
+ arg_strings_iter = iter(arg_strings)
+ for i, arg_string in enumerate(arg_strings_iter):
+
+ # all args after -- are non-options
+ if arg_string == '--':
+ arg_string_pattern_parts.append('-')
+ for arg_string in arg_strings_iter:
+ arg_string_pattern_parts.append('A')
+
+ # otherwise, add the arg to the arg strings
+ # and note the index if it was an option
+ else:
+ option_tuple = self._parse_optional(arg_string)
+ if option_tuple is None:
+ pattern = 'A'
+ else:
+ option_string_indices[i] = option_tuple
+ pattern = 'O'
+ arg_string_pattern_parts.append(pattern)
+
+ # join the pieces together to form the pattern
+ arg_strings_pattern = ''.join(arg_string_pattern_parts)
+
+ # converts arg strings to the appropriate and then takes the action
+ seen_actions = set()
+ seen_non_default_actions = set()
+
+ def take_action(action, argument_strings, option_string=None):
+ seen_actions.add(action)
+ argument_values = self._get_values(action, argument_strings)
+
+ # error if this argument is not allowed with other previously
+ # seen arguments, assuming that actions that use the default
+ # value don't really count as "present"
+ if argument_values is not action.default:
+ seen_non_default_actions.add(action)
+ for conflict_action in action_conflicts.get(action, []):
+ if conflict_action in seen_non_default_actions:
+ msg = _('not allowed with argument %s')
+ action_name = _get_action_name(conflict_action)
+ raise ArgumentError(action, msg % action_name)
+
+ # take the action if we didn't receive a SUPPRESS value
+ # (e.g. from a default)
+ if argument_values is not SUPPRESS:
+ action(self, namespace, argument_values, option_string)
+
+ # function to convert arg_strings into an optional action
+ def consume_optional(start_index):
+
+ # get the optional identified at this index
+ option_tuple = option_string_indices[start_index]
+ action, option_string, explicit_arg = option_tuple
+
+ # identify additional optionals in the same arg string
+ # (e.g. -xyz is the same as -x -y -z if no args are required)
+ match_argument = self._match_argument
+ action_tuples = []
+ while True:
+
+ # if we found no optional action, skip it
+ if action is None:
+ extras.append(arg_strings[start_index])
+ return start_index + 1
+
+ # if there is an explicit argument, try to match the
+ # optional's string arguments to only this
+ if explicit_arg is not None:
+ arg_count = match_argument(action, 'A')
+
+ # if the action is a single-dash option and takes no
+ # arguments, try to parse more single-dash options out
+ # of the tail of the option string
+ chars = self.prefix_chars
+ if arg_count == 0 and option_string[1] not in chars:
+ action_tuples.append((action, [], option_string))
+ char = option_string[0]
+ option_string = char + explicit_arg[0]
+ new_explicit_arg = explicit_arg[1:] or None
+ optionals_map = self._option_string_actions
+ if option_string in optionals_map:
+ action = optionals_map[option_string]
+ explicit_arg = new_explicit_arg
+ else:
+ msg = _('ignored explicit argument %r')
+ raise ArgumentError(action, msg % explicit_arg)
+
+ # if the action expect exactly one argument, we've
+ # successfully matched the option; exit the loop
+ elif arg_count == 1:
+ stop = start_index + 1
+ args = [explicit_arg]
+ action_tuples.append((action, args, option_string))
+ break
+
+ # error if a double-dash option did not use the
+ # explicit argument
+ else:
+ msg = _('ignored explicit argument %r')
+ raise ArgumentError(action, msg % explicit_arg)
+
+ # if there is no explicit argument, try to match the
+ # optional's string arguments with the following strings
+ # if successful, exit the loop
+ else:
+ start = start_index + 1
+ selected_patterns = arg_strings_pattern[start:]
+ arg_count = match_argument(action, selected_patterns)
+ stop = start + arg_count
+ args = arg_strings[start:stop]
+ action_tuples.append((action, args, option_string))
+ break
+
+ # add the Optional to the list and return the index at which
+ # the Optional's string args stopped
+ assert action_tuples
+ for action, args, option_string in action_tuples:
+ take_action(action, args, option_string)
+ return stop
+
+ # the list of Positionals left to be parsed; this is modified
+ # by consume_positionals()
+ positionals = self._get_positional_actions()
+
+ # function to convert arg_strings into positional actions
+ def consume_positionals(start_index):
+ # match as many Positionals as possible
+ match_partial = self._match_arguments_partial
+ selected_pattern = arg_strings_pattern[start_index:]
+ arg_counts = match_partial(positionals, selected_pattern)
+
+ # slice off the appropriate arg strings for each Positional
+ # and add the Positional and its args to the list
+ for action, arg_count in zip(positionals, arg_counts):
+ args = arg_strings[start_index: start_index + arg_count]
+ start_index += arg_count
+ take_action(action, args)
+
+ # slice off the Positionals that we just parsed and return the
+ # index at which the Positionals' string args stopped
+ positionals[:] = positionals[len(arg_counts):]
+ return start_index
+
+ # consume Positionals and Optionals alternately, until we have
+ # passed the last option string
+ extras = []
+ start_index = 0
+ if option_string_indices:
+ max_option_string_index = max(option_string_indices)
+ else:
+ max_option_string_index = -1
+ while start_index <= max_option_string_index:
+
+ # consume any Positionals preceding the next option
+ next_option_string_index = min([
+ index
+ for index in option_string_indices
+ if index >= start_index])
+ if start_index != next_option_string_index:
+ positionals_end_index = consume_positionals(start_index)
+
+ # only try to parse the next optional if we didn't consume
+ # the option string during the positionals parsing
+ if positionals_end_index > start_index:
+ start_index = positionals_end_index
+ continue
+ else:
+ start_index = positionals_end_index
+
+ # if we consumed all the positionals we could and we're not
+ # at the index of an option string, there were extra arguments
+ if start_index not in option_string_indices:
+ strings = arg_strings[start_index:next_option_string_index]
+ extras.extend(strings)
+ start_index = next_option_string_index
+
+ # consume the next optional and any arguments for it
+ start_index = consume_optional(start_index)
+
+ # consume any positionals following the last Optional
+ stop_index = consume_positionals(start_index)
+
+ # if we didn't consume all the argument strings, there were extras
+ extras.extend(arg_strings[stop_index:])
+
+ # if we didn't use all the Positional objects, there were too few
+ # arg strings supplied.
+ if positionals:
+ self.error(_('too few arguments'))
+
+ # make sure all required actions were present, and convert defaults.
+ for action in self._actions:
+ if action not in seen_actions:
+ if action.required:
+ name = _get_action_name(action)
+ self.error(_('argument %s is required') % name)
+ else:
+ # Convert action default now instead of doing it before
+ # parsing arguments to avoid calling convert functions
+ # twice (which may fail) if the argument was given, but
+ # only if it was defined already in the namespace
+ if (action.default is not None and
+ isinstance(action.default, basestring) and
+ hasattr(namespace, action.dest) and
+ action.default is getattr(namespace, action.dest)):
+ setattr(namespace, action.dest,
+ self._get_value(action, action.default))
+
+ # make sure all required groups had one option present
+ for group in self._mutually_exclusive_groups:
+ if group.required:
+ for action in group._group_actions:
+ if action in seen_non_default_actions:
+ break
+
+ # if no actions were used, report the error
+ else:
+ names = [_get_action_name(action)
+ for action in group._group_actions
+ if action.help is not SUPPRESS]
+ msg = _('one of the arguments %s is required')
+ self.error(msg % ' '.join(names))
+
+ # return the updated namespace and the extra arguments
+ return namespace, extras
+
+ def _read_args_from_files(self, arg_strings):
+ # expand arguments referencing files
+ new_arg_strings = []
+ for arg_string in arg_strings:
+
+ # for regular arguments, just add them back into the list
+ if arg_string[0] not in self.fromfile_prefix_chars:
+ new_arg_strings.append(arg_string)
+
+ # replace arguments referencing files with the file content
+ else:
+ try:
+ args_file = open(arg_string[1:])
+ try:
+ arg_strings = []
+ for arg_line in args_file.read().splitlines():
+ for arg in self.convert_arg_line_to_args(arg_line):
+ arg_strings.append(arg)
+ arg_strings = self._read_args_from_files(arg_strings)
+ new_arg_strings.extend(arg_strings)
+ finally:
+ args_file.close()
+ except IOError:
+ err = _sys.exc_info()[1]
+ self.error(str(err))
+
+ # return the modified argument list
+ return new_arg_strings
+
+ def convert_arg_line_to_args(self, arg_line):
+ return [arg_line]
+
+ def _match_argument(self, action, arg_strings_pattern):
+ # match the pattern for this action to the arg strings
+ nargs_pattern = self._get_nargs_pattern(action)
+ match = _re.match(nargs_pattern, arg_strings_pattern)
+
+ # raise an exception if we weren't able to find a match
+ if match is None:
+ nargs_errors = {
+ None: _('expected one argument'),
+ OPTIONAL: _('expected at most one argument'),
+ ONE_OR_MORE: _('expected at least one argument'),
+ }
+ default = _('expected %s argument(s)') % action.nargs
+ msg = nargs_errors.get(action.nargs, default)
+ raise ArgumentError(action, msg)
+
+ # return the number of arguments matched
+ return len(match.group(1))
+
+ def _match_arguments_partial(self, actions, arg_strings_pattern):
+ # progressively shorten the actions list by slicing off the
+ # final actions until we find a match
+ result = []
+ for i in range(len(actions), 0, -1):
+ actions_slice = actions[:i]
+ pattern = ''.join([self._get_nargs_pattern(action)
+ for action in actions_slice])
+ match = _re.match(pattern, arg_strings_pattern)
+ if match is not None:
+ result.extend([len(string) for string in match.groups()])
+ break
+
+ # return the list of arg string counts
+ return result
+
+ def _parse_optional(self, arg_string):
+ # if it's an empty string, it was meant to be a positional
+ if not arg_string:
+ return None
+
+ # if it doesn't start with a prefix, it was meant to be positional
+ if not arg_string[0] in self.prefix_chars:
+ return None
+
+ # if the option string is present in the parser, return the action
+ if arg_string in self._option_string_actions:
+ action = self._option_string_actions[arg_string]
+ return action, arg_string, None
+
+ # if it's just a single character, it was meant to be positional
+ if len(arg_string) == 1:
+ return None
+
+ # if the option string before the "=" is present, return the action
+ if '=' in arg_string:
+ option_string, explicit_arg = arg_string.split('=', 1)
+ if option_string in self._option_string_actions:
+ action = self._option_string_actions[option_string]
+ return action, option_string, explicit_arg
+
+ # search through all possible prefixes of the option string
+ # and all actions in the parser for possible interpretations
+ option_tuples = self._get_option_tuples(arg_string)
+
+ # if multiple actions match, the option string was ambiguous
+ if len(option_tuples) > 1:
+ options = ', '.join([option_string
+ for action, option_string, explicit_arg in option_tuples])
+ tup = arg_string, options
+ self.error(_('ambiguous option: %s could match %s') % tup)
+
+ # if exactly one action matched, this segmentation is good,
+ # so return the parsed action
+ elif len(option_tuples) == 1:
+ option_tuple, = option_tuples
+ return option_tuple
+
+ # if it was not found as an option, but it looks like a negative
+ # number, it was meant to be positional
+ # unless there are negative-number-like options
+ if self._negative_number_matcher.match(arg_string):
+ if not self._has_negative_number_optionals:
+ return None
+
+ # if it contains a space, it was meant to be a positional
+ if ' ' in arg_string:
+ return None
+
+ # it was meant to be an optional but there is no such option
+ # in this parser (though it might be a valid option in a subparser)
+ return None, arg_string, None
+
+ def _get_option_tuples(self, option_string):
+ result = []
+
+ # option strings starting with two prefix characters are only
+ # split at the '='
+ chars = self.prefix_chars
+ if option_string[0] in chars and option_string[1] in chars:
+ if '=' in option_string:
+ option_prefix, explicit_arg = option_string.split('=', 1)
+ else:
+ option_prefix = option_string
+ explicit_arg = None
+ for option_string in self._option_string_actions:
+ if option_string.startswith(option_prefix):
+ action = self._option_string_actions[option_string]
+ tup = action, option_string, explicit_arg
+ result.append(tup)
+
+ # single character options can be concatenated with their arguments
+ # but multiple character options always have to have their argument
+ # separate
+ elif option_string[0] in chars and option_string[1] not in chars:
+ option_prefix = option_string
+ explicit_arg = None
+ short_option_prefix = option_string[:2]
+ short_explicit_arg = option_string[2:]
+
+ for option_string in self._option_string_actions:
+ if option_string == short_option_prefix:
+ action = self._option_string_actions[option_string]
+ tup = action, option_string, short_explicit_arg
+ result.append(tup)
+ elif option_string.startswith(option_prefix):
+ action = self._option_string_actions[option_string]
+ tup = action, option_string, explicit_arg
+ result.append(tup)
+
+ # shouldn't ever get here
+ else:
+ self.error(_('unexpected option string: %s') % option_string)
+
+ # return the collected option tuples
+ return result
+
+ def _get_nargs_pattern(self, action):
+ # in all examples below, we have to allow for '--' args
+ # which are represented as '-' in the pattern
+ nargs = action.nargs
+
+ # the default (None) is assumed to be a single argument
+ if nargs is None:
+ nargs_pattern = '(-*A-*)'
+
+ # allow zero or one arguments
+ elif nargs == OPTIONAL:
+ nargs_pattern = '(-*A?-*)'
+
+ # allow zero or more arguments
+ elif nargs == ZERO_OR_MORE:
+ nargs_pattern = '(-*[A-]*)'
+
+ # allow one or more arguments
+ elif nargs == ONE_OR_MORE:
+ nargs_pattern = '(-*A[A-]*)'
+
+ # allow any number of options or arguments
+ elif nargs == REMAINDER:
+ nargs_pattern = '([-AO]*)'
+
+ # allow one argument followed by any number of options or arguments
+ elif nargs == PARSER:
+ nargs_pattern = '(-*A[-AO]*)'
+
+ # all others should be integers
+ else:
+ nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
+
+ # if this is an optional action, -- is not allowed
+ if action.option_strings:
+ nargs_pattern = nargs_pattern.replace('-*', '')
+ nargs_pattern = nargs_pattern.replace('-', '')
+
+ # return the pattern
+ return nargs_pattern
+
+ # ========================
+ # Value conversion methods
+ # ========================
+ def _get_values(self, action, arg_strings):
+ # for everything but PARSER args, strip out '--'
+ if action.nargs not in [PARSER, REMAINDER]:
+ arg_strings = [s for s in arg_strings if s != '--']
+
+ # optional argument produces a default when not present
+ if not arg_strings and action.nargs == OPTIONAL:
+ if action.option_strings:
+ value = action.const
+ else:
+ value = action.default
+ if isinstance(value, basestring):
+ value = self._get_value(action, value)
+ self._check_value(action, value)
+
+ # when nargs='*' on a positional, if there were no command-line
+ # args, use the default if it is anything other than None
+ elif (not arg_strings and action.nargs == ZERO_OR_MORE and
+ not action.option_strings):
+ if action.default is not None:
+ value = action.default
+ else:
+ value = arg_strings
+ self._check_value(action, value)
+
+ # single argument or optional argument produces a single value
+ elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
+ arg_string, = arg_strings
+ value = self._get_value(action, arg_string)
+ self._check_value(action, value)
+
+ # REMAINDER arguments convert all values, checking none
+ elif action.nargs == REMAINDER:
+ value = [self._get_value(action, v) for v in arg_strings]
+
+ # PARSER arguments convert all values, but check only the first
+ elif action.nargs == PARSER:
+ value = [self._get_value(action, v) for v in arg_strings]
+ self._check_value(action, value[0])
+
+ # all other types of nargs produce a list
+ else:
+ value = [self._get_value(action, v) for v in arg_strings]
+ for v in value:
+ self._check_value(action, v)
+
+ # return the converted value
+ return value
+
+ def _get_value(self, action, arg_string):
+ type_func = self._registry_get('type', action.type, action.type)
+ if not _callable(type_func):
+ msg = _('%r is not callable')
+ raise ArgumentError(action, msg % type_func)
+
+ # convert the value to the appropriate type
+ try:
+ result = type_func(arg_string)
+
+ # ArgumentTypeErrors indicate errors
+ except ArgumentTypeError:
+ name = getattr(action.type, '__name__', repr(action.type))
+ msg = str(_sys.exc_info()[1])
+ raise ArgumentError(action, msg)
+
+ # TypeErrors or ValueErrors also indicate errors
+ except (TypeError, ValueError):
+ name = getattr(action.type, '__name__', repr(action.type))
+ msg = _('invalid %s value: %r')
+ raise ArgumentError(action, msg % (name, arg_string))
+
+ # return the converted value
+ return result
+
+ def _check_value(self, action, value):
+ # converted value must be one of the choices (if specified)
+ if action.choices is not None and value not in action.choices:
+ tup = value, ', '.join(map(repr, action.choices))
+ msg = _('invalid choice: %r (choose from %s)') % tup
+ raise ArgumentError(action, msg)
+
+ # =======================
+ # Help-formatting methods
+ # =======================
+ def format_usage(self):
+ formatter = self._get_formatter()
+ formatter.add_usage(self.usage, self._actions,
+ self._mutually_exclusive_groups)
+ return formatter.format_help()
+
+ def format_help(self):
+ formatter = self._get_formatter()
+
+ # usage
+ formatter.add_usage(self.usage, self._actions,
+ self._mutually_exclusive_groups)
+
+ # description
+ formatter.add_text(self.description)
+
+ # positionals, optionals and user-defined groups
+ for action_group in self._action_groups:
+ formatter.start_section(action_group.title)
+ formatter.add_text(action_group.description)
+ formatter.add_arguments(action_group._group_actions)
+ formatter.end_section()
+
+ # epilog
+ formatter.add_text(self.epilog)
+
+ # determine help from format above
+ return formatter.format_help()
+
+ def format_version(self):
+ import warnings
+ warnings.warn(
+ 'The format_version method is deprecated -- the "version" '
+ 'argument to ArgumentParser is no longer supported.',
+ DeprecationWarning)
+ formatter = self._get_formatter()
+ formatter.add_text(self.version)
+ return formatter.format_help()
+
+ def _get_formatter(self):
+ return self.formatter_class(prog=self.prog)
+
+ # =====================
+ # Help-printing methods
+ # =====================
+ def print_usage(self, file=None):
+ if file is None:
+ file = _sys.stdout
+ self._print_message(self.format_usage(), file)
+
+ def print_help(self, file=None):
+ if file is None:
+ file = _sys.stdout
+ self._print_message(self.format_help(), file)
+
+ def print_version(self, file=None):
+ import warnings
+ warnings.warn(
+ 'The print_version method is deprecated -- the "version" '
+ 'argument to ArgumentParser is no longer supported.',
+ DeprecationWarning)
+ self._print_message(self.format_version(), file)
+
+ def _print_message(self, message, file=None):
+ if message:
+ if file is None:
+ file = _sys.stderr
+ file.write(message)
+
+ # ===============
+ # Exiting methods
+ # ===============
+ def exit(self, status=0, message=None):
+ if message:
+ self._print_message(message, _sys.stderr)
+ _sys.exit(status)
+
+ def error(self, message):
+ """error(message: string)
+
+ Prints a usage message incorporating the message to stderr and
+ exits.
+
+ If you override this in a subclass, it should not return -- it
+ should either exit or raise an exception.
+ """
+ self.print_usage(_sys.stderr)
+ self.exit(2, _('%s: error: %s\n') % (self.prog, message))
diff --git a/MSH-PIC/phoenix-hbase/bin/core-site.xml b/MSH-PIC/phoenix-hbase/bin/core-site.xml
new file mode 100644
index 0000000..f380e36
--- /dev/null
+++ b/MSH-PIC/phoenix-hbase/bin/core-site.xml
@@ -0,0 +1,58 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>fs.defaultFS</name>
+ <value>hdfs://ns1</value>
+ </property>
+ <property>
+ <name>hadoop.tmp.dir</name>
+ <value>file:/home/tsg/olap/hadoop/tmp</value>
+ </property>
+ <property>
+ <name>io.file.buffer.size</name>
+ <value>131702</value>
+ </property>
+ <property>
+ <name>hadoop.proxyuser.root.hosts</name>
+ <value>*</value>
+ </property>
+ <property>
+ <name>hadoop.proxyuser.root.groups</name>
+ <value>*</value>
+ </property>
+ <property>
+ <name>hadoop.logfile.size</name>
+ <value>10000000</value>
+ <description>The max size of each log file</description>
+ </property>
+ <property>
+ <name>hadoop.logfile.count</name>
+ <value>1</value>
+ <description>The max number of log files</description>
+ </property>
+ <property>
+ <name>ha.zookeeper.quorum</name>
+ <value>192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181</value>
+ </property>
+ <property>
+ <name>ipc.client.connect.timeout</name>
+ <value>90000</value>
+ </property>
+</configuration>
diff --git a/MSH-PIC/phoenix-hbase/bin/daemon.py b/MSH-PIC/phoenix-hbase/bin/daemon.py
new file mode 100644
index 0000000..bb64148
--- /dev/null
+++ b/MSH-PIC/phoenix-hbase/bin/daemon.py
@@ -0,0 +1,999 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+############################################################################
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+############################################################################
+
+# daemon/daemon.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <[email protected]>
+# Copyright © 2007–2008 Robert Niederreiter, Jens Klein
+# Copyright © 2004–2005 Chad J. Schroeder
+# Copyright © 2003 Clark Evans
+# Copyright © 2002 Noah Spurrier
+# Copyright © 2001 Jürgen Hermann
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+#
+# Apache Phoenix note: this file is `daemon.py` from the package
+# `python-daemon 2.0.5`, https://pypi.python.org/pypi/python-daemon/
+#
+# The class `PidFile` was added for adapting the `lockfile` package's interface
+# without depending on yet another 3rd party package. Based on example from
+# http://code.activestate.com/recipes/577911-context-manager-for-a-daemon-pid-file/
+#
+
+""" Daemon process behaviour.
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+import os
+import sys
+import resource
+import errno
+import signal
+import socket
+import atexit
+import fcntl
+import time
+try:
+ # Python 2 has both ‘str’ (bytes) and ‘unicode’ (text).
+ basestring = basestring
+ unicode = unicode
+except NameError:
+ # Python 3 names the Unicode data type ‘str’.
+ basestring = str
+ unicode = str
+
+
+class DaemonError(Exception):
+ """ Base exception class for errors from this module. """
+
+ def __init__(self, *args, **kwargs):
+ self._chain_from_context()
+
+ super(DaemonError, self).__init__(*args, **kwargs)
+
+ def _chain_from_context(self):
+ _chain_exception_from_existing_exception_context(self, as_cause=True)
+
+
+class DaemonOSEnvironmentError(DaemonError, OSError):
+ """ Exception raised when daemon OS environment setup receives error. """
+
+
+class DaemonProcessDetachError(DaemonError, OSError):
+ """ Exception raised when process detach fails. """
+
+
+class DaemonContext:
+ """ Context for turning the current program into a daemon process.
+
+ A `DaemonContext` instance represents the behaviour settings and
+ process context for the program when it becomes a daemon. The
+ behaviour and environment is customised by setting options on the
+ instance, before calling the `open` method.
+
+ Each option can be passed as a keyword argument to the `DaemonContext`
+ constructor, or subsequently altered by assigning to an attribute on
+ the instance at any time prior to calling `open`. That is, for
+ options named `wibble` and `wubble`, the following invocation::
+
+ foo = daemon.DaemonContext(wibble=bar, wubble=baz)
+ foo.open()
+
+ is equivalent to::
+
+ foo = daemon.DaemonContext()
+ foo.wibble = bar
+ foo.wubble = baz
+ foo.open()
+
+ The following options are defined.
+
+ `files_preserve`
+ :Default: ``None``
+
+ List of files that should *not* be closed when starting the
+ daemon. If ``None``, all open file descriptors will be closed.
+
+ Elements of the list are file descriptors (as returned by a file
+ object's `fileno()` method) or Python `file` objects. Each
+ specifies a file that is not to be closed during daemon start.
+
+ `chroot_directory`
+ :Default: ``None``
+
+ Full path to a directory to set as the effective root directory of
+ the process. If ``None``, specifies that the root directory is not
+ to be changed.
+
+ `working_directory`
+ :Default: ``'/'``
+
+ Full path of the working directory to which the process should
+ change on daemon start.
+
+ Since a filesystem cannot be unmounted if a process has its
+ current working directory on that filesystem, this should either
+ be left at default or set to a directory that is a sensible “home
+ directory” for the daemon while it is running.
+
+ `umask`
+ :Default: ``0``
+
+ File access creation mask (“umask”) to set for the process on
+ daemon start.
+
+ A daemon should not rely on the parent process's umask value,
+ which is beyond its control and may prevent creating a file with
+ the required access mode. So when the daemon context opens, the
+ umask is set to an explicit known value.
+
+ If the conventional value of 0 is too open, consider setting a
+ value such as 0o022, 0o027, 0o077, or another specific value.
+ Otherwise, ensure the daemon creates every file with an
+ explicit access mode for the purpose.
+
+ `pidfile`
+ :Default: ``None``
+
+ Context manager for a PID lock file. When the daemon context opens
+ and closes, it enters and exits the `pidfile` context manager.
+
+ `detach_process`
+ :Default: ``None``
+
+ If ``True``, detach the process context when opening the daemon
+ context; if ``False``, do not detach.
+
+ If unspecified (``None``) during initialisation of the instance,
+ this will be set to ``True`` by default, and ``False`` only if
+ detaching the process is determined to be redundant; for example,
+ in the case when the process was started by `init`, by `initd`, or
+ by `inetd`.
+
+ `signal_map`
+ :Default: system-dependent
+
+ Mapping from operating system signals to callback actions.
+
+ The mapping is used when the daemon context opens, and determines
+ the action for each signal's signal handler:
+
+ * A value of ``None`` will ignore the signal (by setting the
+ signal action to ``signal.SIG_IGN``).
+
+ * A string value will be used as the name of an attribute on the
+ ``DaemonContext`` instance. The attribute's value will be used
+ as the action for the signal handler.
+
+ * Any other value will be used as the action for the
+ signal handler. See the ``signal.signal`` documentation
+ for details of the signal handler interface.
+
+ The default value depends on which signals are defined on the
+ running system. Each item from the list below whose signal is
+ actually defined in the ``signal`` module will appear in the
+ default map:
+
+ * ``signal.SIGTTIN``: ``None``
+
+ * ``signal.SIGTTOU``: ``None``
+
+ * ``signal.SIGTSTP``: ``None``
+
+ * ``signal.SIGTERM``: ``'terminate'``
+
+ Depending on how the program will interact with its child
+ processes, it may need to specify a signal map that
+ includes the ``signal.SIGCHLD`` signal (received when a
+ child process exits). See the specific operating system's
+ documentation for more detail on how to determine what
+ circumstances dictate the need for signal handlers.
+
+ `uid`
+ :Default: ``os.getuid()``
+
+ `gid`
+ :Default: ``os.getgid()``
+
+ The user ID (“UID”) value and group ID (“GID”) value to switch
+ the process to on daemon start.
+
+ The default values, the real UID and GID of the process, will
+ relinquish any effective privilege elevation inherited by the
+ process.
+
+ `prevent_core`
+ :Default: ``True``
+
+ If true, prevents the generation of core files, in order to avoid
+ leaking sensitive information from daemons run as `root`.
+
+ `stdin`
+ :Default: ``None``
+
+ `stdout`
+ :Default: ``None``
+
+ `stderr`
+ :Default: ``None``
+
+ Each of `stdin`, `stdout`, and `stderr` is a file-like object
+ which will be used as the new file for the standard I/O stream
+ `sys.stdin`, `sys.stdout`, and `sys.stderr` respectively. The file
+ should therefore be open, with a minimum of mode 'r' in the case
+ of `stdin`, and mimimum of mode 'w+' in the case of `stdout` and
+ `stderr`.
+
+ If the object has a `fileno()` method that returns a file
+ descriptor, the corresponding file will be excluded from being
+ closed during daemon start (that is, it will be treated as though
+ it were listed in `files_preserve`).
+
+ If ``None``, the corresponding system stream is re-bound to the
+ file named by `os.devnull`.
+
+ """
+
+ __metaclass__ = type
+
+ def __init__(
+ self,
+ chroot_directory=None,
+ working_directory="/",
+ umask=0,
+ uid=None,
+ gid=None,
+ prevent_core=True,
+ detach_process=None,
+ files_preserve=None,
+ pidfile=None,
+ stdin=None,
+ stdout=None,
+ stderr=None,
+ signal_map=None,
+ ):
+ """ Set up a new instance. """
+ self.chroot_directory = chroot_directory
+ self.working_directory = working_directory
+ self.umask = umask
+ self.prevent_core = prevent_core
+ self.files_preserve = files_preserve
+ self.pidfile = pidfile
+ self.stdin = stdin
+ self.stdout = stdout
+ self.stderr = stderr
+
+ if uid is None:
+ uid = os.getuid()
+ self.uid = uid
+ if gid is None:
+ gid = os.getgid()
+ self.gid = gid
+
+ if detach_process is None:
+ detach_process = is_detach_process_context_required()
+ self.detach_process = detach_process
+
+ if signal_map is None:
+ signal_map = make_default_signal_map()
+ self.signal_map = signal_map
+
+ self._is_open = False
+
+ @property
+ def is_open(self):
+ """ ``True`` if the instance is currently open. """
+ return self._is_open
+
+ def open(self):
+ """ Become a daemon process.
+
+ :return: ``None``.
+
+ Open the daemon context, turning the current program into a daemon
+ process. This performs the following steps:
+
+ * If this instance's `is_open` property is true, return
+ immediately. This makes it safe to call `open` multiple times on
+ an instance.
+
+ * If the `prevent_core` attribute is true, set the resource limits
+ for the process to prevent any core dump from the process.
+
+ * If the `chroot_directory` attribute is not ``None``, set the
+ effective root directory of the process to that directory (via
+ `os.chroot`).
+
+ This allows running the daemon process inside a “chroot gaol”
+ as a means of limiting the system's exposure to rogue behaviour
+ by the process. Note that the specified directory needs to
+ already be set up for this purpose.
+
+ * Set the process UID and GID to the `uid` and `gid` attribute
+ values.
+
+ * Close all open file descriptors. This excludes those listed in
+ the `files_preserve` attribute, and those that correspond to the
+ `stdin`, `stdout`, or `stderr` attributes.
+
+ * Change current working directory to the path specified by the
+ `working_directory` attribute.
+
+ * Reset the file access creation mask to the value specified by
+ the `umask` attribute.
+
+ * If the `detach_process` option is true, detach the current
+ process into its own process group, and disassociate from any
+ controlling terminal.
+
+ * Set signal handlers as specified by the `signal_map` attribute.
+
+ * If any of the attributes `stdin`, `stdout`, `stderr` are not
+ ``None``, bind the system streams `sys.stdin`, `sys.stdout`,
+ and/or `sys.stderr` to the files represented by the
+ corresponding attributes. Where the attribute has a file
+ descriptor, the descriptor is duplicated (instead of re-binding
+ the name).
+
+ * If the `pidfile` attribute is not ``None``, enter its context
+ manager.
+
+ * Mark this instance as open (for the purpose of future `open` and
+ `close` calls).
+
+ * Register the `close` method to be called during Python's exit
+ processing.
+
+ When the function returns, the running program is a daemon
+ process.
+
+ """
+ if self.is_open:
+ return
+
+ if self.chroot_directory is not None:
+ change_root_directory(self.chroot_directory)
+
+ if self.prevent_core:
+ prevent_core_dump()
+
+ change_file_creation_mask(self.umask)
+ change_working_directory(self.working_directory)
+ change_process_owner(self.uid, self.gid)
+
+ if self.detach_process:
+ detach_process_context(self.pidfile)
+
+ signal_handler_map = self._make_signal_handler_map()
+ set_signal_handlers(signal_handler_map)
+
+ exclude_fds = self._get_exclude_file_descriptors()
+ close_all_open_files(exclude=exclude_fds)
+
+ redirect_stream(sys.stdin, self.stdin)
+ redirect_stream(sys.stdout, self.stdout)
+ redirect_stream(sys.stderr, self.stderr)
+
+ if self.pidfile is not None:
+ self.pidfile.__enter__()
+
+ self._is_open = True
+
+ register_atexit_function(self.close)
+
+ def __enter__(self):
+ """ Context manager entry point. """
+ self.open()
+ return self
+
+ def close(self):
+ """ Exit the daemon process context.
+
+ :return: ``None``.
+
+ Close the daemon context. This performs the following steps:
+
+ * If this instance's `is_open` property is false, return
+ immediately. This makes it safe to call `close` multiple times
+ on an instance.
+
+ * If the `pidfile` attribute is not ``None``, exit its context
+ manager.
+
+ * Mark this instance as closed (for the purpose of future `open`
+ and `close` calls).
+
+ """
+ if not self.is_open:
+ return
+
+ if self.pidfile is not None:
+ # Follow the interface for telling a context manager to exit,
+ # <URL:http://docs.python.org/library/stdtypes.html#typecontextmanager>.
+ self.pidfile.__exit__(None, None, None)
+
+ self._is_open = False
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ """ Context manager exit point. """
+ self.close()
+
+ def terminate(self, signal_number, stack_frame):
+ """ Signal handler for end-process signals.
+
+ :param signal_number: The OS signal number received.
+ :param stack_frame: The frame object at the point the
+ signal was received.
+ :return: ``None``.
+
+ Signal handler for the ``signal.SIGTERM`` signal. Performs the
+ following step:
+
+ * Raise a ``SystemExit`` exception explaining the signal.
+
+ """
+ exception = SystemExit(
+ "Terminating on signal {signal_number!r}".format(
+ signal_number=signal_number))
+ raise exception
+
+ def _get_exclude_file_descriptors(self):
+ """ Get the set of file descriptors to exclude closing.
+
+ :return: A set containing the file descriptors for the
+ files to be preserved.
+
+ The file descriptors to be preserved are those from the
+ items in `files_preserve`, and also each of `stdin`,
+ `stdout`, and `stderr`. For each item:
+
+ * If the item is ``None``, it is omitted from the return
+ set.
+
+ * If the item's ``fileno()`` method returns a value, that
+ value is in the return set.
+
+ * Otherwise, the item is in the return set verbatim.
+
+ """
+ files_preserve = self.files_preserve
+ if files_preserve is None:
+ files_preserve = []
+ files_preserve.extend(
+ item for item in [self.stdin, self.stdout, self.stderr]
+ if hasattr(item, 'fileno'))
+
+ exclude_descriptors = set()
+ for item in files_preserve:
+ if item is None:
+ continue
+ file_descriptor = _get_file_descriptor(item)
+ if file_descriptor is not None:
+ exclude_descriptors.add(file_descriptor)
+ else:
+ exclude_descriptors.add(item)
+
+ return exclude_descriptors
+
+ def _make_signal_handler(self, target):
+ """ Make the signal handler for a specified target object.
+
+ :param target: A specification of the target for the
+ handler; see below.
+ :return: The value for use by `signal.signal()`.
+
+ If `target` is ``None``, return ``signal.SIG_IGN``. If `target`
+ is a text string, return the attribute of this instance named
+ by that string. Otherwise, return `target` itself.
+
+ """
+ if target is None:
+ result = signal.SIG_IGN
+ elif isinstance(target, unicode):
+ name = target
+ result = getattr(self, name)
+ else:
+ result = target
+
+ return result
+
+ def _make_signal_handler_map(self):
+ """ Make the map from signals to handlers for this instance.
+
+ :return: The constructed signal map for this instance.
+
+ Construct a map from signal numbers to handlers for this
+ context instance, suitable for passing to
+ `set_signal_handlers`.
+
+ """
+ signal_handler_map = dict(
+ (signal_number, self._make_signal_handler(target))
+ for (signal_number, target) in self.signal_map.items())
+ return signal_handler_map
+
+
+def _get_file_descriptor(obj):
+ """ Get the file descriptor, if the object has one.
+
+ :param obj: The object expected to be a file-like object.
+ :return: The file descriptor iff the file supports it; otherwise
+ ``None``.
+
+ The object may be a non-file object. It may also be a
+ file-like object with no support for a file descriptor. In
+ either case, return ``None``.
+
+ """
+ file_descriptor = None
+ if hasattr(obj, 'fileno'):
+ try:
+ file_descriptor = obj.fileno()
+ except ValueError:
+ # The item doesn't support a file descriptor.
+ pass
+
+ return file_descriptor
+
+
+def change_working_directory(directory):
+ """ Change the working directory of this process.
+
+ :param directory: The target directory path.
+ :return: ``None``.
+
+ """
+ try:
+ os.chdir(directory)
+ except Exception as exc:
+ error = DaemonOSEnvironmentError(
+ "Unable to change working directory ({exc})".format(exc=exc))
+ raise error
+
+
+def change_root_directory(directory):
+ """ Change the root directory of this process.
+
+ :param directory: The target directory path.
+ :return: ``None``.
+
+ Set the current working directory, then the process root directory,
+ to the specified `directory`. Requires appropriate OS privileges
+ for this process.
+
+ """
+ try:
+ os.chdir(directory)
+ os.chroot(directory)
+ except Exception as exc:
+ error = DaemonOSEnvironmentError(
+ "Unable to change root directory ({exc})".format(exc=exc))
+ raise error
+
+
+def change_file_creation_mask(mask):
+ """ Change the file creation mask for this process.
+
+ :param mask: The numeric file creation mask to set.
+ :return: ``None``.
+
+ """
+ try:
+ os.umask(mask)
+ except Exception as exc:
+ error = DaemonOSEnvironmentError(
+ "Unable to change file creation mask ({exc})".format(exc=exc))
+ raise error
+
+
+def change_process_owner(uid, gid):
+ """ Change the owning UID and GID of this process.
+
+ :param uid: The target UID for the daemon process.
+ :param gid: The target GID for the daemon process.
+ :return: ``None``.
+
+ Set the GID then the UID of the process (in that order, to avoid
+ permission errors) to the specified `gid` and `uid` values.
+ Requires appropriate OS privileges for this process.
+
+ """
+ try:
+ os.setgid(gid)
+ os.setuid(uid)
+ except Exception as exc:
+ error = DaemonOSEnvironmentError(
+ "Unable to change process owner ({exc})".format(exc=exc))
+ raise error
+
+
+def prevent_core_dump():
+ """ Prevent this process from generating a core dump.
+
+ :return: ``None``.
+
+ Set the soft and hard limits for core dump size to zero. On Unix,
+ this entirely prevents the process from creating core dump.
+
+ """
+ core_resource = resource.RLIMIT_CORE
+
+ try:
+ # Ensure the resource limit exists on this platform, by requesting
+ # its current value.
+ core_limit_prev = resource.getrlimit(core_resource)
+ except ValueError as exc:
+ error = DaemonOSEnvironmentError(
+ "System does not support RLIMIT_CORE resource limit"
+ " ({exc})".format(exc=exc))
+ raise error
+
+ # Set hard and soft limits to zero, i.e. no core dump at all.
+ core_limit = (0, 0)
+ resource.setrlimit(core_resource, core_limit)
+
+
+def detach_process_context(pidfile):
+ """ Detach the process context from parent and session.
+
+ :return: ``None``.
+
+ Detach from the parent process and session group, allowing the
+ parent to exit while this process continues running.
+
+ Reference: “Advanced Programming in the Unix Environment”,
+ section 13.3, by W. Richard Stevens, published 1993 by
+ Addison-Wesley.
+
+ """
+
+ def fork_then_exit_parent(error_message):
+ """ Fork a child process, then exit the parent process.
+
+ :param error_message: Message for the exception in case of a
+ detach failure.
+ :return: ``None``.
+ :raise DaemonProcessDetachError: If the fork fails.
+
+ """
+ try:
+ pid = os.fork()
+ if pid > 0:
+ while not os.path.exists(pidfile.path):
+ time.sleep(0.1)
+ os._exit(0)
+ except OSError as exc:
+ error = DaemonProcessDetachError(
+ "{message}: [{exc.errno:d}] {exc.strerror}".format(
+ message=error_message, exc=exc))
+ raise error
+
+ fork_then_exit_parent(error_message="Failed first fork")
+ os.setsid()
+ fork_then_exit_parent(error_message="Failed second fork")
+
+
+def is_process_started_by_init():
+ """ Determine whether the current process is started by `init`.
+
+ :return: ``True`` iff the parent process is `init`; otherwise
+ ``False``.
+
+ The `init` process is the one with process ID of 1.
+
+ """
+ result = False
+
+ init_pid = 1
+ if os.getppid() == init_pid:
+ result = True
+
+ return result
+
+
+def is_socket(fd):
+ """ Determine whether the file descriptor is a socket.
+
+ :param fd: The file descriptor to interrogate.
+ :return: ``True`` iff the file descriptor is a socket; otherwise
+ ``False``.
+
+ Query the socket type of `fd`. If there is no error, the file is a
+ socket.
+
+ """
+ result = False
+
+ file_socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW)
+
+ try:
+ socket_type = file_socket.getsockopt(
+ socket.SOL_SOCKET, socket.SO_TYPE)
+ except socket.error as exc:
+ exc_errno = exc.args[0]
+ if exc_errno == errno.ENOTSOCK:
+ # Socket operation on non-socket.
+ pass
+ else:
+ # Some other socket error.
+ result = True
+ else:
+ # No error getting socket type.
+ result = True
+
+ return result
+
+
+def is_process_started_by_superserver():
+ """ Determine whether the current process is started by the superserver.
+
+ :return: ``True`` if this process was started by the internet
+ superserver; otherwise ``False``.
+
+ The internet superserver creates a network socket, and
+ attaches it to the standard streams of the child process. If
+ that is the case for this process, return ``True``, otherwise
+ ``False``.
+
+ """
+ result = False
+
+ stdin_fd = sys.__stdin__.fileno()
+ if is_socket(stdin_fd):
+ result = True
+
+ return result
+
+
+def is_detach_process_context_required():
+ """ Determine whether detaching the process context is required.
+
+ :return: ``True`` iff the process is already detached; otherwise
+ ``False``.
+
+ The process environment is interrogated for the following:
+
+ * Process was started by `init`; or
+
+ * Process was started by `inetd`.
+
+ If any of the above are true, the process is deemed to be already
+ detached.
+
+ """
+ result = True
+ if is_process_started_by_init() or is_process_started_by_superserver():
+ result = False
+
+ return result
+
+
+def close_file_descriptor_if_open(fd):
+ """ Close a file descriptor if already open.
+
+ :param fd: The file descriptor to close.
+ :return: ``None``.
+
+ Close the file descriptor `fd`, suppressing an error in the
+ case the file was not open.
+
+ """
+ try:
+ os.close(fd)
+ except EnvironmentError as exc:
+ if exc.errno == errno.EBADF:
+ # File descriptor was not open.
+ pass
+ else:
+ error = DaemonOSEnvironmentError(
+ "Failed to close file descriptor {fd:d} ({exc})".format(
+ fd=fd, exc=exc))
+ raise error
+
+
+MAXFD = 2048
+
+def get_maximum_file_descriptors():
+ """ Get the maximum number of open file descriptors for this process.
+
+ :return: The number (integer) to use as the maximum number of open
+ files for this process.
+
+ The maximum is the process hard resource limit of maximum number of
+ open file descriptors. If the limit is “infinity”, a default value
+ of ``MAXFD`` is returned.
+
+ """
+ limits = resource.getrlimit(resource.RLIMIT_NOFILE)
+ result = limits[1]
+ if result == resource.RLIM_INFINITY:
+ result = MAXFD
+ return result
+
+
+def close_all_open_files(exclude=set()):
+ """ Close all open file descriptors.
+
+ :param exclude: Collection of file descriptors to skip when closing
+ files.
+ :return: ``None``.
+
+ Closes every file descriptor (if open) of this process. If
+ specified, `exclude` is a set of file descriptors to *not*
+ close.
+
+ """
+ maxfd = get_maximum_file_descriptors()
+ for fd in reversed(range(maxfd)):
+ if fd not in exclude:
+ close_file_descriptor_if_open(fd)
+
+
+def redirect_stream(system_stream, target_stream):
+ """ Redirect a system stream to a specified file.
+
+ :param standard_stream: A file object representing a standard I/O
+ stream.
+ :param target_stream: The target file object for the redirected
+ stream, or ``None`` to specify the null device.
+ :return: ``None``.
+
+ `system_stream` is a standard system stream such as
+ ``sys.stdout``. `target_stream` is an open file object that
+ should replace the corresponding system stream object.
+
+ If `target_stream` is ``None``, defaults to opening the
+ operating system's null device and using its file descriptor.
+
+ """
+ if target_stream is None:
+ target_fd = os.open(os.devnull, os.O_RDWR)
+ else:
+ target_fd = target_stream.fileno()
+ os.dup2(target_fd, system_stream.fileno())
+
+
+def make_default_signal_map():
+ """ Make the default signal map for this system.
+
+ :return: A mapping from signal number to handler object.
+
+ The signals available differ by system. The map will not contain
+ any signals not defined on the running system.
+
+ """
+ name_map = {
+ 'SIGTSTP': None,
+ 'SIGTTIN': None,
+ 'SIGTTOU': None,
+ 'SIGTERM': 'terminate',
+ }
+ signal_map = dict(
+ (getattr(signal, name), target)
+ for (name, target) in name_map.items()
+ if hasattr(signal, name))
+
+ return signal_map
+
+
+def set_signal_handlers(signal_handler_map):
+ """ Set the signal handlers as specified.
+
+ :param signal_handler_map: A map from signal number to handler
+ object.
+ :return: ``None``.
+
+ See the `signal` module for details on signal numbers and signal
+ handlers.
+
+ """
+ for (signal_number, handler) in signal_handler_map.items():
+ signal.signal(signal_number, handler)
+
+
+def register_atexit_function(func):
+ """ Register a function for processing at program exit.
+
+ :param func: A callable function expecting no arguments.
+ :return: ``None``.
+
+ The function `func` is registered for a call with no arguments
+ at program exit.
+
+ """
+ atexit.register(func)
+
+
+def _chain_exception_from_existing_exception_context(exc, as_cause=False):
+ """ Decorate the specified exception with the existing exception context.
+
+ :param exc: The exception instance to decorate.
+ :param as_cause: If true, the existing context is declared to be
+ the cause of the exception.
+ :return: ``None``.
+
+ :PEP:`344` describes syntax and attributes (`__traceback__`,
+ `__context__`, `__cause__`) for use in exception chaining.
+
+ Python 2 does not have that syntax, so this function decorates
+ the exception with values from the current exception context.
+
+ """
+ (existing_exc_type, existing_exc, existing_traceback) = sys.exc_info()
+ if as_cause:
+ exc.__cause__ = existing_exc
+ else:
+ exc.__context__ = existing_exc
+ exc.__traceback__ = existing_traceback
+
+class PidFile(object):
+ """
+Adapter between a file path string and the `lockfile` API [0]. Based example
+found at [1].
+
+[0]: https://pythonhosted.org/lockfile/lockfile.html
+[1]: http://code.activestate.com/recipes/577911-context-manager-for-a-daemon-pid-file/
+"""
+ def __init__(self, path, enter_err_msg=None):
+ self.path = path
+ self.enter_err_msg = enter_err_msg
+ self.pidfile = open(self.path, 'a+')
+ try:
+ fcntl.flock(self.pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
+ fcntl.flock(self.pidfile.fileno(), fcntl.LOCK_UN)
+ self.pidfile.close()
+ os.remove(self.path)
+ except IOError:
+ sys.exit(self.enter_err_msg)
+
+ def __enter__(self):
+ self.pidfile = open(self.path, 'a+')
+ try:
+ fcntl.flock(self.pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError:
+ sys.exit(self.enter_err_msg)
+ self.pidfile.seek(0)
+ self.pidfile.truncate()
+ self.pidfile.write(str(os.getpid()))
+ self.pidfile.flush()
+ self.pidfile.seek(0)
+ return self.pidfile
+
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ try:
+ self.pidfile.close()
+ except IOError as err:
+ if err.errno != 9:
+ raise
+ os.remove(self.path)
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/MSH-PIC/phoenix-hbase/bin/end2endTest.py b/MSH-PIC/phoenix-hbase/bin/end2endTest.py
new file mode 100644
index 0000000..40954d1
--- /dev/null
+++ b/MSH-PIC/phoenix-hbase/bin/end2endTest.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+############################################################################
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+############################################################################
+
+# !!! PLEASE READ !!!
+# !!! Do NOT run the script against a prodcution cluster because it wipes out
+# !!! existing data of the cluster
+
+from __future__ import print_function
+import os
+import subprocess
+import sys
+import phoenix_utils
+
+phoenix_utils.setPath()
+
+phoenix_jar_path = os.getenv(phoenix_utils.phoenix_class_path, phoenix_utils.phoenix_test_jar_path)
+
+# HBase configuration folder path (where hbase-site.xml reside) for
+# HBase/Phoenix client side property override
+hbase_library_path = os.getenv('HBASE_LIBRARY_DIR', '')
+
+print("Current ClassPath=%s:%s:%s" % (phoenix_utils.hbase_conf_dir, phoenix_jar_path,
+ hbase_library_path))
+
+java_cmd = "java -cp " + phoenix_utils.hbase_conf_dir + os.pathsep + phoenix_jar_path + os.pathsep + \
+ hbase_library_path + " org.apache.phoenix.end2end.End2EndTestDriver " + \
+ ' '.join(sys.argv[1:])
+
+os.execl("/bin/sh", "/bin/sh", "-c", java_cmd)
diff --git a/MSH-PIC/phoenix-hbase/bin/hadoop-metrics2-hbase.properties b/MSH-PIC/phoenix-hbase/bin/hadoop-metrics2-hbase.properties
new file mode 100644
index 0000000..bafd444
--- /dev/null
+++ b/MSH-PIC/phoenix-hbase/bin/hadoop-metrics2-hbase.properties
@@ -0,0 +1,36 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# HBase Server Sink Configuration
+#################################
+#
+# Configuration for the metrics2 system for the HBase RegionServers
+# to enable phoenix trace collection on the HBase servers.
+#
+# See hadoop-metrics2-phoenix.properties for how these configurations
+# are utilized.
+#
+# Either this file can be used in place of the standard
+# hadoop-metrics2-hbase.properties file or the below
+# properties should be added to the file of the same name on
+# the HBase classpath (likely in the HBase conf/ folder)
+
+# ensure that we receive traces on the server
+hbase.sink.tracing.class=org.apache.phoenix.trace.PhoenixMetricsSink
+# Tell the sink where to write the metrics
+hbase.sink.tracing.writer-class=org.apache.phoenix.trace.PhoenixTableMetricsWriter
+# Only handle traces with a context of "tracing"
+hbase.sink.tracing.context=tracing
diff --git a/MSH-PIC/phoenix-hbase/bin/hadoop-metrics2-phoenix.properties b/MSH-PIC/phoenix-hbase/bin/hadoop-metrics2-phoenix.properties
new file mode 100644
index 0000000..f8c7223
--- /dev/null
+++ b/MSH-PIC/phoenix-hbase/bin/hadoop-metrics2-phoenix.properties
@@ -0,0 +1,70 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Metrics properties for phoenix
+####################################
+#
+#There are two options with file names:
+# 1. hadoop-metrics2-[prefix].properties
+# 2. hadoop-metrics2.properties
+# Either will be loaded by the metrics system (but not both).
+#
+# NOTE: The metrics system is only initialized once per JVM (but does ref-counting, so we can't
+#shutdown and restart), so we only load the first prefix that we find. Generally, this will be
+#phoenix (unless someone else registers first, but for many clients, there should only be one).
+#
+# Usually, you would use hadoop-metrics2-phoenix.properties, but we use the generic
+# hadoop-metrics2.properties to ensure it these are loaded regardless of where we are running,
+# assuming there isn't another config on the classpath.
+
+# When specifying sinks, the syntax to use is:
+# [prefix].[source|sink].[instance].[options]
+# The interesting thing to note is that [instance] can literally be anything (as long as its
+# not zero-length). It is only there to differentiate the properties that are stored for
+# objects of the same type (e.g. differentiating between two phoenix.sink objects).
+#
+#You could the following lines in your config
+#
+# phoenix.sink.thingA.class=com.your-company.SpecialSink
+# phoenix.sink.thingA.option1=value1
+#
+# and also
+#
+# phoenix.sink.thingB.class=org.apache.phoenix.trace.PhoenixMetricsSink
+# phoenix.sink.thingB.doGoodStuff=true
+#
+# which will create both SpecialSink and PhoenixMetricsSink and register them
+# as a MetricsSink, but Special sink will only see option1=value1 in its
+# configuration, which similarly, the instantiated PhoenixMetricsSink will
+# only see doGoodStuff=true in its configuration
+#
+# See javadoc of package-info.java for org.apache.hadoop.metrics2 for detail
+
+# Uncomment to NOT start MBeans
+# *.source.start_mbeans=false
+
+# Sample from all the sources every 10 seconds
+*.period=10
+
+# Write Traces to Phoenix
+##########################
+# ensure that we receive traces on the server
+phoenix.sink.tracing.class=org.apache.phoenix.trace.PhoenixMetricsSink
+# Tell the sink where to write the metrics
+phoenix.sink.tracing.writer-class=org.apache.phoenix.trace.PhoenixTableMetricsWriter
+# Only handle traces with a context of "tracing"
+phoenix.sink.tracing.context=tracing
diff --git a/MSH-PIC/phoenix-hbase/bin/hbase-omid-client-config.yml b/MSH-PIC/phoenix-hbase/bin/hbase-omid-client-config.yml
new file mode 100644
index 0000000..b3301d4
--- /dev/null
+++ b/MSH-PIC/phoenix-hbase/bin/hbase-omid-client-config.yml
@@ -0,0 +1,33 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#Omid TSO connection
+connectionType: !!org.apache.omid.tso.client.OmidClientConfiguration$ConnType DIRECT
+connectionString: "localhost:54758"
+
+# When Omid is working in High Availability mode, two or more replicas of the TSO server are running in primary/backup
+# mode. When a TSO server replica is elected as master, it publishes its address through ZK. In order to configure
+# the Omid client to access the TSO server in HA mode:
+# 1) set 'connectionType' to !!org.apache.omid.tso.client.OmidClientConfiguration$ConnType HA
+# 2) set 'connectionString' to the ZK cluster connection string where the server is publishing its address
+zkConnectionTimeoutInSecs: 10
+# In HA mode, make sure that the next settings match same settings on the TSO server side
+zkNamespace: "omid"
+zkCurrentTsoPath: "/current-tso"
+
+# Configure whether the TM performs the post-commit actions for a tx (update shadow cells and clean commit table entry)
+# before returning to the control to the client (SYNC) or in parallel (ASYNC)
+postCommitMode: !!org.apache.omid.tso.client.OmidClientConfiguration$PostCommitMode ASYNC
diff --git a/MSH-PIC/phoenix-hbase/bin/hbase-site.xml b/MSH-PIC/phoenix-hbase/bin/hbase-site.xml
new file mode 100644
index 0000000..0e6cb16
--- /dev/null
+++ b/MSH-PIC/phoenix-hbase/bin/hbase-site.xml
@@ -0,0 +1,205 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+ <property>
+ <name>hbase.rootdir</name>
+ <value>hdfs://ns1/hbase</value>
+ </property>
+
+ <property>
+ <name>hbase.cluster.distributed</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>hbase.zookeeper.quorum</name>
+ <value>192.168.20.193,192.168.20.194,192.168.20.195</value>
+ </property>
+
+ <property>
+ <name>hbase.zookeeper.property.clientPort</name>
+ <value>2181</value>
+ </property>
+
+ <property>
+ <name>hbase.master.info.port</name>
+ <value>60010</value>
+ </property>
+
+<!--
+ <property>
+ <name>hbase.client.keyvalue.maxsize</name>
+ <value>1073741824</value>
+ </property>
+-->
+
+ <property>
+ <name>hbase.server.keyvalue.maxsize</name>
+ <value>5368709120</value>
+ </property>
+
+ <property>
+ <name>zookeeper.znode.parent</name>
+ <value>/hbase</value>
+ </property>
+
+ <property>
+ <name>hbase.rpc.timeout</name>
+ <value>300000</value>
+ </property>
+
+ <property>
+ <name>zookeeper.session.timeout</name>
+ <value>300000</value>
+ </property>
+
+ <!--小于该值的文件将在mob compaction中合并-->
+ <property>
+ <name>hbase.mob.compaction.mergeable.threshold</name>
+ <value>1342177280</value>
+ </property>
+
+ <property>
+ <name>hbase.mob.file.cache.size</name>
+ <value>1000</value>
+ </property>
+
+ <!--mob cache回收缓存周期-->
+ <property>
+ <name>hbase.mob.cache.evict.period</name>
+ <value>3600</value>
+ </property>
+
+ <!--mob cache回收之后cache中保留文件个数比例,cache数量超过hbase.mob.file.cache.size会回收-->
+ <property>
+ <name>hbase.mob.cache.evict.remain.ratio</name>
+ <value>0.5f</value>
+ </property>
+
+ <!--开启mob-->
+ <property>
+ <name>hfile.format.version</name>
+ <value>3</value>
+ </property>
+
+ <property>
+ <name>hbase.hregion.memstore.flush.size</name>
+ <value>534217728</value>
+ </property>
+
+ <!-- flush线程数 -->
+ <property>
+ <name>hbase.hstore.flusher.count</name>
+ <value>8</value>
+ </property>
+
+ <property>
+ <name>hbase.regionserver.global.memstore.size.lower.limit</name>
+ <value>0.4</value>
+ </property>
+
+ <property>
+ <name>hbase.regionserver.global.memstore.size</name>
+ <value>0.45</value>
+ </property>
+
+ <property>
+ <name>hfile.block.cache.size</name>
+ <value>0.3</value>
+ </property>
+
+ <property>
+ <name>hbase.hregion.memstore.block.multiplier</name>
+ <value>10</value>
+ </property>
+
+ <property>
+ <name>hbase.ipc.server.max.callqueue.length</name>
+ <value>1073741824</value>
+ </property>
+
+ <property>
+ <name>hbase.regionserver.handler.count</name>
+ <value>40</value>
+ <description>Count of RPC Listener instances spun up on RegionServers.
+ Same property is used by the Master for count of master handlers.</description>
+ </property>
+
+ <property>
+ <name>hbase.zookeeper.property.maxClientCnxns</name>
+ <value>1000</value>
+ </property>
+
+ <property>
+ <name>hbase.ipc.max.request.size</name>
+ <value>1173741824</value>
+ </property>
+
+ <property>
+ <name>hbase.hstore.blockingWaitTime</name>
+ <value>30000</value>
+ </property>
+ <property>
+ <name>hbase.hstore.blockingStoreFiles</name>
+ <value>100</value>
+ </property>
+
+ <!--split参数-->
+ <property>
+  <name>hbase.hregion.max.filesize</name>
+  <value>107374182400</value>
+ </property>
+
+ <property>
+ <name>hbase.regionserver.regionSplitLimit</name>
+ <value>1000</value>
+ </property>
+
+<!-- phoenix -->
+ <property>
+    <name>phoenix.schema.isNamespaceMappingEnabled</name>
+    <value>true</value>
+ </property>
+ <property>
+   <name>phoenix.schema.mapSystemTablesToNamespace</name>
+   <value>true</value>
+ </property>
+
+<!-- RsGroup -->
+ <property>
+ <name>hbase.coprocessor.master.classes</name>
+ <value>org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint</value>
+ </property>
+
+ <property>
+ <name>hbase.master.loadbalancer.class</name>
+ <value>org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer</value>
+ </property>
+
+<!--表region自动平衡-->
+ <property>
+   <name>hbase.master.loadbalance.bytable</name>
+   <value>true</value>
+ </property>
+
+</configuration>
diff --git a/MSH-PIC/phoenix-hbase/bin/hdfs-site.xml b/MSH-PIC/phoenix-hbase/bin/hdfs-site.xml
new file mode 100644
index 0000000..6d93805
--- /dev/null
+++ b/MSH-PIC/phoenix-hbase/bin/hdfs-site.xml
@@ -0,0 +1,142 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>dfs.namenode.name.dir</name>
+ <value>file:/home/tsg/olap/hadoop/dfs/name</value>
+ </property>
+ <property>
+ <name>dfs.datanode.data.dir</name>
+ <value>file:/home/tsg/olap/hadoop/dfs/data</value>
+ </property>
+ <property>
+ <name>dfs.replication</name>
+ <value>2</value>
+ </property>
+ <property>
+ <name>dfs.webhdfs.enabled</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>dfs.permissions</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>dfs.permissions.enabled</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>dfs.nameservices</name>
+ <value>ns1</value>
+ </property>
+ <property>
+ <name>dfs.blocksize</name>
+ <value>134217728</value>
+ </property>
+ <property>
+ <name>dfs.ha.namenodes.ns1</name>
+ <value>nn1,nn2</value>
+ </property>
+ <!-- nn1的RPC通信地址,nn1所在地址 -->
+ <property>
+ <name>dfs.namenode.rpc-address.ns1.nn1</name>
+ <value>192.168.20.193:9000</value>
+ </property>
+ <!-- nn1的http通信地址,外部访问地址 -->
+ <property>
+ <name>dfs.namenode.http-address.ns1.nn1</name>
+ <value>192.168.20.193:50070</value>
+ </property>
+ <!-- nn2的RPC通信地址,nn2所在地址 -->
+ <property>
+ <name>dfs.namenode.rpc-address.ns1.nn2</name>
+ <value>192.168.20.194:9000</value>
+ </property>
+ <!-- nn2的http通信地址,外部访问地址 -->
+ <property>
+ <name>dfs.namenode.http-address.ns1.nn2</name>
+ <value>192.168.20.194:50070</value>
+ </property>
+ <!-- 指定NameNode的元数据在JournalNode日志上的存放位置(一般和zookeeper部署在一起) -->
+ <property>
+ <name>dfs.namenode.shared.edits.dir</name>
+ <value>qjournal://192.168.20.193:8485;192.168.20.194:8485;192.168.20.195:8485/ns1</value>
+ </property>
+ <!-- 指定JournalNode在本地磁盘存放数据的位置 -->
+ <property>
+ <name>dfs.journalnode.edits.dir</name>
+ <value>/home/tsg/olap/hadoop/journal</value>
+ </property>
+ <!--客户端通过代理访问namenode,访问文件系统,HDFS 客户端与Active 节点通信的Java 类,使用其确定Active 节点是否活跃 -->
+ <property>
+ <name>dfs.client.failover.proxy.provider.ns1</name>
+ <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
+ </property>
+ <!--这是配置自动切换的方法,有多种使用方法,具体可以看官网,在文末会给地址,这里是远程登录杀死的方法 -->
+ <property>
+ <name>dfs.ha.fencing.methods</name>
+ <value>sshfence</value>
+ <value>shell(true)</value>
+ </property>
+ <!-- 这个是使用sshfence隔离机制时才需要配置ssh免登陆 -->
+ <property>
+ <name>dfs.ha.fencing.ssh.private-key-files</name>
+ <value>/root/.ssh/id_rsa</value>
+ </property>
+ <!-- 配置sshfence隔离机制超时时间,这个属性同上,如果你是用脚本的方法切换,这个应该是可以不配置的 -->
+ <property>
+ <name>dfs.ha.fencing.ssh.connect-timeout</name>
+ <value>30000</value>
+ </property>
+ <!-- 这个是开启自动故障转移,如果你没有自动故障转移,这个可以先不配 -->
+ <property>
+ <name>dfs.ha.automatic-failover.enabled</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>dfs.datanode.max.transfer.threads</name>
+ <value>8192</value>
+ </property>
+ <!-- namenode处理RPC请求线程数,增大该值资源占用不大 -->
+ <property>
+ <name>dfs.namenode.handler.count</name>
+ <value>30</value>
+ </property>
+ <!-- datanode处理RPC请求线程数,增大该值会占用更多内存 -->
+ <property>
+ <name>dfs.datanode.handler.count</name>
+ <value>40</value>
+ </property>
+ <!-- balance时可占用的带宽 -->
+ <property>
+ <name>dfs.balance.bandwidthPerSec</name>
+ <value>104857600</value>
+ </property>
+ <!-- 磁盘预留空间,该空间不会被hdfs占用,单位字节-->
+ <property>
+ <name>dfs.datanode.du.reserved</name>
+ <value>53687091200</value>
+ </property>
+ <!-- datanode与namenode连接超时时间,单位毫秒 2 * heartbeat.recheck.interval + 30000 -->
+ <property>
+ <name>heartbeat.recheck.interval</name>
+ <value>100000</value>
+ </property>
+</configuration>
+
diff --git a/MSH-PIC/phoenix-hbase/bin/log4j.properties b/MSH-PIC/phoenix-hbase/bin/log4j.properties
new file mode 100644
index 0000000..c39af0b
--- /dev/null
+++ b/MSH-PIC/phoenix-hbase/bin/log4j.properties
@@ -0,0 +1,76 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+# Define some default values that can be overridden by system properties
+psql.root.logger=WARN,console
+psql.log.dir=.
+psql.log.file=psql.log
+hadoop.log.dir=.
+
+# Define the root logger to the system property "plsql.root.logger".
+log4j.rootLogger=${psql.root.logger}
+
+# Logging Threshold to ERROR for queryserver. root logger still at WARN for sqlline clients.
+log4j.threshold=ERROR
+
+#
+# Daily Rolling File Appender
+#
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${psql.log.dir}/${psql.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %p %c: %m%n
+
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+#
+# Null Appender
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %p %c{2}: %m%n
+
+# Custom Logging levels
+log4j.logger.org.apache.zookeeper=ERROR
+log4j.logger.org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper=ERROR
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=ERROR
+log4j.logger.org.apache.hadoop.hbase.HBaseConfiguration=ERROR
+
+# query server packages
+log4j.logger.org.apache.calcite.avatica=ERROR
+log4j.logger.org.apache.phoenix.queryserver.server=ERROR
+log4j.logger.org.eclipse.jetty.server=ERROR
diff --git a/MSH-PIC/phoenix-hbase/bin/performance.py b/MSH-PIC/phoenix-hbase/bin/performance.py
new file mode 100644
index 0000000..16fee48
--- /dev/null
+++ b/MSH-PIC/phoenix-hbase/bin/performance.py
@@ -0,0 +1,147 @@
+#!/usr/bin/env python
+############################################################################
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+############################################################################
+
+from __future__ import print_function
+import os
+import subprocess
+import sys
+import tempfile
+import phoenix_utils
+
+def queryex(description, statement):
+ global statements
+ print("Query # %s - %s" % (description, statement))
+ statements = statements + statement
+
+def delfile(filename):
+ if os.path.exists(filename):
+ os.remove(filename)
+
+def usage():
+ print("Performance script arguments not specified. Usage: performance.py \
+<zookeeper> <row count>")
+ print("Example: performance.py localhost 100000")
+
+
+def createFileWithContent(filename, content):
+ fo = open(filename, "w+")
+ fo.write(content)
+ fo.close()
+
+if len(sys.argv) < 3:
+ usage()
+ sys.exit()
+
+# command line arguments
+zookeeper = sys.argv[1]
+rowcount = sys.argv[2]
+table = "PERFORMANCE_" + sys.argv[2]
+
+# helper variable and functions
+ddl = tempfile.mkstemp(prefix='ddl_', suffix='.sql')[1]
+data = tempfile.mkstemp(prefix='data_', suffix='.csv')[1]
+qry = tempfile.mkstemp(prefix='query_', suffix='.sql')[1]
+statements = ""
+
+phoenix_utils.setPath()
+
+# HBase configuration folder path (where hbase-site.xml reside) for
+# HBase/Phoenix client side property override
+hbase_config_path = os.getenv('HBASE_CONF_DIR', phoenix_utils.current_dir)
+
+java_home = os.getenv('JAVA_HOME')
+
+# load hbase-env.??? to extract JAVA_HOME, HBASE_PID_DIR, HBASE_LOG_DIR
+hbase_env_path = None
+hbase_env_cmd = None
+if os.name == 'posix':
+ hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.sh')
+ hbase_env_cmd = ['bash', '-c', 'source %s && env' % hbase_env_path]
+elif os.name == 'nt':
+ hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.cmd')
+ hbase_env_cmd = ['cmd.exe', '/c', 'call %s & set' % hbase_env_path]
+if not hbase_env_path or not hbase_env_cmd:
+ sys.stderr.write("hbase-env file unknown on platform {}{}".format(os.name, os.linesep))
+ sys.exit(-1)
+
+hbase_env = {}
+if os.path.isfile(hbase_env_path):
+ p = subprocess.Popen(hbase_env_cmd, stdout = subprocess.PIPE)
+ for x in p.stdout:
+ (k, _, v) = x.decode().partition('=')
+ hbase_env[k.strip()] = v.strip()
+
+if 'JAVA_HOME' in hbase_env:
+ java_home = hbase_env['JAVA_HOME']
+
+if java_home:
+ java_cmd = os.path.join(java_home, 'bin', 'java')
+else:
+ java_cmd = 'java'
+
+execute = ('%s $PHOENIX_OPTS -cp "%s%s%s" -Dlog4j.configuration=file:' +
+ os.path.join(phoenix_utils.current_dir, "log4j.properties") +
+ ' org.apache.phoenix.util.PhoenixRuntime -t %s %s ') % \
+ (java_cmd, hbase_config_path, os.pathsep, phoenix_utils.phoenix_client_jar, table, zookeeper)
+
+# Create Table DDL
+createtable = "CREATE TABLE IF NOT EXISTS %s (HOST CHAR(2) NOT NULL,\
+DOMAIN VARCHAR NOT NULL, FEATURE VARCHAR NOT NULL,DATE DATE NOT NULL,\
+USAGE.CORE BIGINT,USAGE.DB BIGINT,STATS.ACTIVE_VISITOR \
+INTEGER CONSTRAINT PK PRIMARY KEY (HOST, DOMAIN, FEATURE, DATE)) \
+SPLIT ON ('CSGoogle','CSSalesforce','EUApple','EUGoogle','EUSalesforce',\
+'NAApple','NAGoogle','NASalesforce');" % (table)
+
+# generate and upsert data
+print("Phoenix Performance Evaluation Script 1.0")
+print("-----------------------------------------")
+
+print("\nCreating performance table...")
+createFileWithContent(ddl, createtable)
+
+exitcode = subprocess.call(execute + ddl, shell=True)
+if exitcode != 0:
+ sys.exit(exitcode)
+
+# Write real,user,sys time on console for the following queries
+queryex("1 - Count", "SELECT COUNT(1) FROM %s;" % (table))
+queryex("2 - Group By First PK", "SELECT HOST FROM %s GROUP BY HOST;" % (table))
+queryex("3 - Group By Second PK", "SELECT DOMAIN FROM %s GROUP BY DOMAIN;" % (table))
+queryex("4 - Truncate + Group By", "SELECT TRUNC(DATE,'DAY') DAY FROM %s GROUP BY TRUNC(DATE,'DAY');" % (table))
+queryex("5 - Filter + Count", "SELECT COUNT(1) FROM %s WHERE CORE<10;" % (table))
+
+print("\nGenerating and upserting data...")
+exitcode = subprocess.call('%s -jar %s %s %s' % (java_cmd, phoenix_utils.testjar, data, rowcount),
+ shell=True)
+if exitcode != 0:
+ sys.exit(exitcode)
+
+print("\n")
+createFileWithContent(qry, statements)
+
+exitcode = subprocess.call(execute + data + ' ' + qry, shell=True)
+if exitcode != 0:
+ sys.exit(exitcode)
+
+# clear temporary files
+delfile(ddl)
+delfile(data)
+delfile(qry)
diff --git a/MSH-PIC/phoenix-hbase/bin/pherf-standalone.py b/MSH-PIC/phoenix-hbase/bin/pherf-standalone.py
new file mode 100644
index 0000000..b87585e
--- /dev/null
+++ b/MSH-PIC/phoenix-hbase/bin/pherf-standalone.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+############################################################################
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+############################################################################
+
+from __future__ import print_function
+from phoenix_utils import tryDecode
+import os
+import subprocess
+import sys
+import phoenix_utils
+
+phoenix_utils.setPath()
+
+args = phoenix_utils.shell_quote(sys.argv[1:])
+
+# HBase configuration folder path (where hbase-site.xml reside) for
+# HBase/Phoenix client side property override
+hbase_config_path = os.getenv('HBASE_CONF_DIR', phoenix_utils.current_dir)
+
+java_home = os.getenv('JAVA_HOME')
+
+# load hbase-env.??? to extract JAVA_HOME, HBASE_PID_DIR, HBASE_LOG_DIR
+hbase_env_path = None
+hbase_env_cmd = None
+if os.name == 'posix':
+ hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.sh')
+ hbase_env_cmd = ['bash', '-c', 'source %s && env' % hbase_env_path]
+elif os.name == 'nt':
+ hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.cmd')
+ hbase_env_cmd = ['cmd.exe', '/c', 'call %s & set' % hbase_env_path]
+if not hbase_env_path or not hbase_env_cmd:
+ sys.stderr.write("hbase-env file unknown on platform {}{}".format(os.name, os.linesep))
+ sys.exit(-1)
+
+hbase_env = {}
+if os.path.isfile(hbase_env_path):
+ p = subprocess.Popen(hbase_env_cmd, stdout = subprocess.PIPE)
+ for x in p.stdout:
+ (k, _, v) = tryDecode(x).partition('=')
+ hbase_env[k.strip()] = v.strip()
+
+if 'JAVA_HOME' in hbase_env:
+ java_home = hbase_env['JAVA_HOME']
+
+if java_home:
+ java = os.path.join(java_home, 'bin', 'java')
+else:
+ java = 'java'
+
+java_cmd = java +' -Xms512m -Xmx3072m -cp "' + phoenix_utils.pherf_conf_path + os.pathsep + phoenix_utils.hbase_conf_dir + os.pathsep + phoenix_utils.phoenix_client_jar + os.pathsep + phoenix_utils.phoenix_pherf_jar + \
+ '" -Dlog4j.configuration=file:' + \
+ os.path.join(phoenix_utils.current_dir, "log4j.properties") + \
+ " org.apache.phoenix.pherf.Pherf " + args
+
+os.execl("/bin/sh", "/bin/sh", "-c", java_cmd)
diff --git a/MSH-PIC/phoenix-hbase/bin/phoenix_utils.py b/MSH-PIC/phoenix-hbase/bin/phoenix_utils.py
new file mode 100644
index 0000000..126139d
--- /dev/null
+++ b/MSH-PIC/phoenix-hbase/bin/phoenix_utils.py
@@ -0,0 +1,218 @@
+#!/usr/bin/env python
+############################################################################
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+############################################################################
+
+from __future__ import print_function
+import os
+import fnmatch
+import subprocess
+
+def find(pattern, classPaths):
+ paths = classPaths.split(os.pathsep)
+
+ # for each class path
+ for path in paths:
+ # remove * if it's at the end of path
+ if ((path is not None) and (len(path) > 0) and (path[-1] == '*')) :
+ path = path[:-1]
+
+ for root, dirs, files in os.walk(path):
+ # sort the file names so *-client always precedes *-thin-client
+ files.sort()
+ for name in files:
+ if fnmatch.fnmatch(name, pattern):
+ return os.path.join(root, name)
+
+ return ""
+
+def tryDecode(input):
+ """ Python 2/3 compatibility hack
+ """
+ try:
+ return input.decode()
+ except:
+ return input
+
+def findFileInPathWithoutRecursion(pattern, path):
+ if not os.path.exists(path):
+ return ""
+ files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path,f))]
+ # sort the file names so *-client always precedes *-thin-client
+ files.sort()
+ for name in files:
+ if fnmatch.fnmatch(name, pattern):
+ return os.path.join(path, name)
+
+ return ""
+
+def which(command):
+ for path in os.environ["PATH"].split(os.pathsep):
+ if os.path.exists(os.path.join(path, command)):
+ return os.path.join(path, command)
+ return None
+
+def findClasspath(command_name):
+ command_path = which(command_name)
+ if command_path is None:
+ # We don't have this command, so we can't get its classpath
+ return ''
+ command = "%s%s" %(command_path, ' classpath')
+ return tryDecode(subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read())
+
+def setPath():
+ PHOENIX_CLIENT_JAR_PATTERN = "phoenix-client-hbase-*[!s].jar"
+ PHOENIX_TRACESERVER_JAR_PATTERN = "phoenix-tracing-webapp-*-runnable.jar"
+ PHOENIX_TESTS_JAR_PATTERN = "phoenix-core-*-tests*.jar"
+ PHOENIX_PHERF_JAR_PATTERN = "phoenix-pherf-*[!s].jar"
+
+ # Backward support old env variable PHOENIX_LIB_DIR replaced by PHOENIX_CLASS_PATH
+ global phoenix_class_path
+ phoenix_class_path = os.getenv('PHOENIX_LIB_DIR','')
+ if phoenix_class_path == "":
+ phoenix_class_path = os.getenv('PHOENIX_CLASS_PATH','')
+
+ global hbase_conf_dir
+ # if HBASE_CONF_DIR set explicitly, use that
+ hbase_conf_dir = os.getenv('HBASE_CONF_DIR', os.getenv('HBASE_CONF_PATH'))
+ if not hbase_conf_dir:
+ # else fall back to HBASE_HOME
+ if os.getenv('HBASE_HOME'):
+ hbase_conf_dir = os.path.join(os.getenv('HBASE_HOME'), "conf")
+ elif os.name == 'posix':
+ # default to the bigtop configuration dir
+ hbase_conf_dir = '/etc/hbase/conf'
+ else:
+ # Try to provide something valid
+ hbase_conf_dir = '.'
+ global hbase_conf_path # keep conf_path around for backward compatibility
+ hbase_conf_path = hbase_conf_dir
+
+ global current_dir
+ current_dir = os.path.dirname(os.path.abspath(__file__))
+
+ global pherf_conf_path
+ pherf_conf_path = os.path.join(current_dir, "config")
+ pherf_properties_file = find("pherf.properties", pherf_conf_path)
+ if pherf_properties_file == "":
+ pherf_conf_path = os.path.join(current_dir, "..", "phoenix-pherf", "config")
+
+ global phoenix_jar_path
+ phoenix_jar_path = os.path.join(current_dir, "..", "phoenix-client-parent" , "phoenix-client", "target","*")
+
+ global phoenix_client_jar
+ phoenix_client_jar = find(PHOENIX_CLIENT_JAR_PATTERN, phoenix_jar_path)
+ if phoenix_client_jar == "":
+ phoenix_client_jar = findFileInPathWithoutRecursion(PHOENIX_CLIENT_JAR_PATTERN, os.path.join(current_dir, ".."))
+ if phoenix_client_jar == "":
+ phoenix_client_jar = find(PHOENIX_CLIENT_JAR_PATTERN, phoenix_class_path)
+
+ global phoenix_test_jar_path
+ phoenix_test_jar_path = os.path.join(current_dir, "..", "phoenix-core", "target","*")
+
+ global hadoop_conf
+ hadoop_conf = os.getenv('HADOOP_CONF_DIR', None)
+ if not hadoop_conf:
+ if os.name == 'posix':
+ # Try to provide a sane configuration directory for Hadoop if not otherwise provided.
+ # If there's no jaas file specified by the caller, this is necessary when Kerberos is enabled.
+ hadoop_conf = '/etc/hadoop/conf'
+ else:
+ # Try to provide something valid..
+ hadoop_conf = '.'
+
+ global hadoop_classpath
+ if (os.name != 'nt'):
+ hadoop_classpath = findClasspath('hadoop').rstrip()
+ else:
+ hadoop_classpath = os.getenv('HADOOP_CLASSPATH', '').rstrip()
+
+ global hadoop_common_jar_path
+ hadoop_common_jar_path = os.path.join(current_dir, "..", "phoenix-client", "target","*").rstrip()
+
+ global hadoop_common_jar
+ hadoop_common_jar = find("hadoop-common*.jar", hadoop_common_jar_path)
+
+ global hadoop_hdfs_jar_path
+ hadoop_hdfs_jar_path = os.path.join(current_dir, "..", "phoenix-client", "target","*").rstrip()
+
+ global hadoop_hdfs_jar
+ hadoop_hdfs_jar = find("hadoop-hdfs*.jar", hadoop_hdfs_jar_path)
+
+ global testjar
+ testjar = find(PHOENIX_TESTS_JAR_PATTERN, phoenix_test_jar_path)
+ if testjar == "":
+ testjar = findFileInPathWithoutRecursion(PHOENIX_TESTS_JAR_PATTERN, os.path.join(current_dir, "..", 'lib'))
+ if testjar == "":
+ testjar = find(PHOENIX_TESTS_JAR_PATTERN, phoenix_class_path)
+
+ global phoenix_traceserver_jar
+ phoenix_traceserver_jar = find(PHOENIX_TRACESERVER_JAR_PATTERN, os.path.join(current_dir, "..", "phoenix-tracing-webapp", "target", "*"))
+ if phoenix_traceserver_jar == "":
+ phoenix_traceserver_jar = findFileInPathWithoutRecursion(PHOENIX_TRACESERVER_JAR_PATTERN, os.path.join(current_dir, "..", "lib"))
+ if phoenix_traceserver_jar == "":
+ phoenix_traceserver_jar = findFileInPathWithoutRecursion(PHOENIX_TRACESERVER_JAR_PATTERN, os.path.join(current_dir, ".."))
+
+ global phoenix_pherf_jar
+ phoenix_pherf_jar = find(PHOENIX_PHERF_JAR_PATTERN, os.path.join(current_dir, "..", "phoenix-pherf", "target", "*"))
+ if phoenix_pherf_jar == "":
+ phoenix_pherf_jar = findFileInPathWithoutRecursion(PHOENIX_PHERF_JAR_PATTERN, os.path.join(current_dir, "..", "lib"))
+ if phoenix_pherf_jar == "":
+ phoenix_pherf_jar = findFileInPathWithoutRecursion(PHOENIX_PHERF_JAR_PATTERN, os.path.join(current_dir, ".."))
+
+ return ""
+
+def shell_quote(args):
+ """
+ Return the platform specific shell quoted string. Handles Windows and *nix platforms.
+
+ :param args: array of shell arguments
+ :return: shell quoted string
+ """
+ if os.name == 'nt':
+ import subprocess
+ return subprocess.list2cmdline(args)
+ else:
+ # pipes module isn't available on Windows
+ import pipes
+ return " ".join([pipes.quote(tryDecode(v)) for v in args])
+
+def common_sqlline_args(parser):
+ parser.add_argument('-v', '--verbose', help='Verbosity on sqlline.', default='true')
+ parser.add_argument('-c', '--color', help='Color setting for sqlline.', default='true')
+ parser.add_argument('-fc', '--fastconnect', help='Fetch all schemas on initial connection', default='false')
+
+if __name__ == "__main__":
+ setPath()
+ print("phoenix_class_path:", phoenix_class_path)
+ print("hbase_conf_dir:", hbase_conf_dir)
+ print("hbase_conf_path:", hbase_conf_path)
+ print("current_dir:", current_dir)
+ print("phoenix_jar_path:", phoenix_jar_path)
+ print("phoenix_client_jar:", phoenix_client_jar)
+ print("phoenix_test_jar_path:", phoenix_test_jar_path)
+ print("hadoop_common_jar_path:", hadoop_common_jar_path)
+ print("hadoop_common_jar:", hadoop_common_jar)
+ print("hadoop_hdfs_jar_path:", hadoop_hdfs_jar_path)
+ print("hadoop_hdfs_jar:", hadoop_hdfs_jar)
+ print("testjar:", testjar)
+ print("phoenix_queryserver_jar:", phoenix_queryserver_jar)
+ print("phoenix_loadbalancer_jar:", phoenix_loadbalancer_jar)
+ print("phoenix_thin_client_jar:", phoenix_thin_client_jar)
+ print("hadoop_classpath:", hadoop_classpath)
diff --git a/MSH-PIC/phoenix-hbase/bin/psql.py b/MSH-PIC/phoenix-hbase/bin/psql.py
new file mode 100644
index 0000000..0e57c77
--- /dev/null
+++ b/MSH-PIC/phoenix-hbase/bin/psql.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+############################################################################
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+############################################################################
+
+from __future__ import print_function
+from phoenix_utils import tryDecode
+import os
+import subprocess
+import sys
+import phoenix_utils
+
+phoenix_utils.setPath()
+
+args = phoenix_utils.shell_quote(sys.argv[1:])
+
+# HBase configuration folder path (where hbase-site.xml reside) for
+# HBase/Phoenix client side property override
+hbase_config_path = os.getenv('HBASE_CONF_DIR', phoenix_utils.current_dir)
+
+java_home = os.getenv('JAVA_HOME')
+
+# load hbase-env.??? to extract JAVA_HOME, HBASE_PID_DIR, HBASE_LOG_DIR
+hbase_env_path = None
+hbase_env_cmd = None
+if os.name == 'posix':
+ hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.sh')
+ hbase_env_cmd = ['bash', '-c', 'source %s && env' % hbase_env_path]
+elif os.name == 'nt':
+ hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.cmd')
+ hbase_env_cmd = ['cmd.exe', '/c', 'call %s & set' % hbase_env_path]
+if not hbase_env_path or not hbase_env_cmd:
+ sys.stderr.write("hbase-env file unknown on platform {}{}".format(os.name, os.linesep))
+ sys.exit(-1)
+
+hbase_env = {}
+if os.path.isfile(hbase_env_path):
+ p = subprocess.Popen(hbase_env_cmd, stdout = subprocess.PIPE)
+ for x in p.stdout:
+ (k, _, v) = tryDecode(x).partition('=')
+ hbase_env[k.strip()] = v.strip()
+
+if 'JAVA_HOME' in hbase_env:
+ java_home = hbase_env['JAVA_HOME']
+
+if java_home:
+ java = os.path.join(java_home, 'bin', 'java')
+else:
+ java = 'java'
+
+java_cmd = java + ' $PHOENIX_OPTS ' + \
+ ' -cp "' + phoenix_utils.hbase_conf_dir + os.pathsep + phoenix_utils.phoenix_client_jar + \
+ os.pathsep + phoenix_utils.hadoop_conf + os.pathsep + phoenix_utils.hadoop_classpath + '" -Dlog4j.configuration=file:' + \
+ os.path.join(phoenix_utils.current_dir, "log4j.properties") + \
+ " org.apache.phoenix.util.PhoenixRuntime " + args
+
+os.execl("/bin/sh", "/bin/sh", "-c", java_cmd)
diff --git a/MSH-PIC/phoenix-hbase/bin/readme.txt b/MSH-PIC/phoenix-hbase/bin/readme.txt
new file mode 100644
index 0000000..e9c5243
--- /dev/null
+++ b/MSH-PIC/phoenix-hbase/bin/readme.txt
@@ -0,0 +1,50 @@
+SqlLine
+=======
+https://github.com/julianhyde/sqlline
+
+Execute SQL from command line. Sqlline manual is available at https://julianhyde.github.io/sqlline/manual.html
+
+ Usage:
+ $ sqlline.py <zookeeper> <optional_sql_file>
+ Example:
+ $ sqlline.py localhost
+ $ sqlline.py localhost <examples dir>/STOCK_SYMBOL.sql
+
+psql.py
+=======
+
+Usage: psql [-t table-name] [-h comma-separated-column-names | in-line] <zookeeper> <path-to-sql-or-csv-file>...
+
+Example 1. Create table, upsert row and run query using single .sql file
+./psql.py localhost <examples dir>/STOCK_SYMBOL.sql
+
+Example 2. Create table, load CSV data and run queries using .csv and .sql files:
+./psql.py localhost <examples dir>/WEB_STAT.sql <examples dir>/WEB_STAT.csv <examples dir>/WEB_STAT_QUERIES.sql
+
+Note: Please see comments in WEB_STAT_QUERIES.sql for the sample queries being executed
+
+performance.py
+==============
+
+Usage: performance <zookeeper> <row count>
+
+Example: Generates and upserts 1000000 rows and time basic queries on this data
+./performance.py localhost 1000000
+
+CSV MapReduce Loader
+====================
+
+Usage: hadoop jar phoneix-[version]-mapreduce.jar <parameters>
+
+ -a,--array-delimiter <arg> Array element delimiter (optional)
+ -c,--import-columns <arg> Comma-separated list of columns to be
+ imported
+ -d,--delimiter <arg> Input delimiter, defaults to comma
+ -g,--ignore-errors Ignore input errors
+ -h,--help Show this help and quit
+ -i,--input <arg> Input CSV path (mandatory)
+ -o,--output <arg> Output path for temporary HFiles (optional)
+ -s,--schema <arg> Phoenix schema name (optional)
+ -t,--table <arg> Phoenix table name (mandatory)
+ -z,--zookeeper <arg> Zookeeper quorum to connect to (optional)
+
diff --git a/MSH-PIC/phoenix-hbase/bin/sqlline.py b/MSH-PIC/phoenix-hbase/bin/sqlline.py
new file mode 100644
index 0000000..23e54c5
--- /dev/null
+++ b/MSH-PIC/phoenix-hbase/bin/sqlline.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python
+############################################################################
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+############################################################################
+
+from __future__ import print_function
+from phoenix_utils import tryDecode
+import os
+import subprocess
+import sys
+import phoenix_utils
+import atexit
+
+# import argparse
+try:
+ import argparse
+except ImportError:
+ current_dir = os.path.dirname(os.path.abspath(__file__))
+ sys.path.append(os.path.join(current_dir, 'argparse-1.4.0'))
+ import argparse
+
+global childProc
+childProc = None
+def kill_child():
+ if childProc is not None:
+ childProc.terminate()
+ childProc.kill()
+ if os.name != 'nt':
+ os.system("reset")
+atexit.register(kill_child)
+
+phoenix_utils.setPath()
+
+parser = argparse.ArgumentParser(description='Launches the Apache Phoenix Client.')
+# Positional argument 'zookeepers' is optional. The PhoenixDriver will automatically populate
+# this if it's not provided by the user (so, we want to leave a default value of empty)
+parser.add_argument('zookeepers', nargs='?', help='The ZooKeeper quorum string', default='')
+# Positional argument 'sqlfile' is optional
+parser.add_argument('sqlfile', nargs='?', help='A file of SQL commands to execute', default='')
+# Common arguments across sqlline.py and sqlline-thin.py
+phoenix_utils.common_sqlline_args(parser)
+# Parse the args
+args=parser.parse_args()
+
+zookeeper = tryDecode(args.zookeepers)
+sqlfile = tryDecode(args.sqlfile)
+
+# HBase configuration folder path (where hbase-site.xml reside) for
+# HBase/Phoenix client side property override
+hbase_config_path = os.getenv('HBASE_CONF_DIR', phoenix_utils.current_dir)
+
+if sqlfile and not os.path.isfile(sqlfile):
+ parser.print_help()
+ sys.exit(-1)
+
+if sqlfile:
+ sqlfile = "--run=" + phoenix_utils.shell_quote([sqlfile])
+
+java_home = os.getenv('JAVA_HOME')
+
+# load hbase-env.??? to extract JAVA_HOME, HBASE_PID_DIR, HBASE_LOG_DIR
+hbase_env_path = None
+hbase_env_cmd = None
+if os.name == 'posix':
+ hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.sh')
+ hbase_env_cmd = ['bash', '-c', 'source %s && env' % hbase_env_path]
+elif os.name == 'nt':
+ hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.cmd')
+ hbase_env_cmd = ['cmd.exe', '/c', 'call %s & set' % hbase_env_path]
+if not hbase_env_path or not hbase_env_cmd:
+ sys.stderr.write("hbase-env file unknown on platform {}{}".format(os.name, os.linesep))
+ sys.exit(-1)
+
+hbase_env = {}
+if os.path.isfile(hbase_env_path):
+ p = subprocess.Popen(hbase_env_cmd, stdout = subprocess.PIPE)
+ for x in p.stdout:
+ (k, _, v) = x.decode().partition('=')
+ hbase_env[k.strip()] = v.strip()
+
+if 'JAVA_HOME' in hbase_env:
+ java_home = hbase_env['JAVA_HOME']
+
+if java_home:
+ java = os.path.join(java_home, 'bin', 'java')
+else:
+ java = 'java'
+
+colorSetting = tryDecode(args.color)
+# disable color setting for windows OS
+if os.name == 'nt':
+ colorSetting = "false"
+
+java_cmd = java + ' $PHOENIX_OPTS ' + \
+ ' -cp "' + hbase_config_path + os.pathsep + phoenix_utils.hbase_conf_dir + os.pathsep + phoenix_utils.phoenix_client_jar + \
+ os.pathsep + phoenix_utils.hadoop_common_jar + os.pathsep + phoenix_utils.hadoop_hdfs_jar + \
+ os.pathsep + phoenix_utils.hadoop_conf + os.pathsep + phoenix_utils.hadoop_classpath + '" -Dlog4j.configuration=file:' + \
+ os.path.join(phoenix_utils.current_dir, "log4j.properties") + \
+ " sqlline.SqlLine -d org.apache.phoenix.jdbc.PhoenixDriver" + \
+ " -u jdbc:phoenix:" + phoenix_utils.shell_quote([zookeeper]) + \
+ " -n none -p none --color=" + colorSetting + " --fastConnect=" + tryDecode(args.fastconnect) + \
+ " --verbose=" + tryDecode(args.verbose) + " --incremental=false --isolation=TRANSACTION_READ_COMMITTED " + sqlfile
+
+os.execl("/bin/sh", "/bin/sh", "-c", java_cmd)
diff --git a/MSH-PIC/phoenix-hbase/bin/startsql.sh b/MSH-PIC/phoenix-hbase/bin/startsql.sh
new file mode 100644
index 0000000..6358c4a
--- /dev/null
+++ b/MSH-PIC/phoenix-hbase/bin/startsql.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR=$(cd $(dirname $0); pwd)
+
+cd $BASE_DIR
+
+exec python sqlline.py 192.168.20.193,192.168.20.194,192.168.20.195 <<EOF
+
+!quit
+
+EOF
diff --git a/MSH-PIC/phoenix-hbase/bin/traceserver.py b/MSH-PIC/phoenix-hbase/bin/traceserver.py
new file mode 100644
index 0000000..35a918c
--- /dev/null
+++ b/MSH-PIC/phoenix-hbase/bin/traceserver.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python
+############################################################################
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+############################################################################
+
+#
+# Script to handle launching the trace server process.
+#
+# usage: traceserver.py [start|stop]
+#
+
+from __future__ import print_function
+from phoenix_utils import tryDecode
+import datetime
+import getpass
+import os
+import os.path
+import signal
+import subprocess
+import sys
+import tempfile
+
+try:
+ import daemon
+ daemon_supported = True
+except ImportError:
+ # daemon script not supported on some platforms (windows?)
+ daemon_supported = False
+
+import phoenix_utils
+
+phoenix_utils.setPath()
+
+command = None
+args = sys.argv
+
+if len(args) > 1:
+ if tryDecode(args[1]) == 'start':
+ command = 'start'
+ elif tryDecode(args[1]) == 'stop':
+ command = 'stop'
+if command:
+ args = args[2:]
+
+if os.name == 'nt':
+ args = subprocess.list2cmdline(args[1:])
+else:
+ import pipes # pipes module isn't available on Windows
+ args = " ".join([pipes.quote(tryDecode(v)) for v in args[1:]])
+
+# HBase configuration folder path (where hbase-site.xml reside) for
+# HBase/Phoenix client side property override
+hbase_config_path = phoenix_utils.hbase_conf_dir
+
+# default paths ## TODO: add windows support
+java_home = os.getenv('JAVA_HOME')
+hbase_pid_dir = os.path.join(tempfile.gettempdir(), 'phoenix')
+phoenix_log_dir = os.path.join(tempfile.gettempdir(), 'phoenix')
+phoenix_file_basename = 'phoenix-%s-traceserver' % getpass.getuser()
+phoenix_log_file = '%s.log' % phoenix_file_basename
+phoenix_out_file = '%s.out' % phoenix_file_basename
+phoenix_pid_file = '%s.pid' % phoenix_file_basename
+opts = os.getenv('PHOENIX_TRACESERVER_OPTS', '')
+
+# load hbase-env.??? to extract JAVA_HOME, HBASE_PID_DIR, HBASE_LOG_DIR
+hbase_env_path = None
+hbase_env_cmd = None
+if os.name == 'posix':
+ hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.sh')
+ hbase_env_cmd = ['bash', '-c', 'source %s && env' % hbase_env_path]
+elif os.name == 'nt':
+ hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.cmd')
+ hbase_env_cmd = ['cmd.exe', '/c', 'call %s & set' % hbase_env_path]
+if not hbase_env_path or not hbase_env_cmd:
+ sys.stderr.write("hbase-env file unknown on platform {}{}".format(os.name, os.linesep))
+ sys.exit(-1)
+
+hbase_env = {}
+if os.path.isfile(hbase_env_path):
+ p = subprocess.Popen(hbase_env_cmd, stdout = subprocess.PIPE)
+ for x in p.stdout:
+ (k, _, v) = tryDecode(x).partition('=')
+ hbase_env[k.strip()] = v.strip()
+
+if 'JAVA_HOME' in hbase_env:
+ java_home = hbase_env['JAVA_HOME']
+if 'HBASE_PID_DIR' in hbase_env:
+ hbase_pid_dir = hbase_env['HBASE_PID_DIR']
+if 'HBASE_LOG_DIR' in hbase_env:
+ phoenix_log_dir = hbase_env['HBASE_LOG_DIR']
+if 'PHOENIX_TRACESERVER_OPTS' in hbase_env:
+ opts = hbase_env['PHOENIX_TRACESERVER_OPTS']
+
+log_file_path = os.path.join(phoenix_log_dir, phoenix_log_file)
+out_file_path = os.path.join(phoenix_log_dir, phoenix_out_file)
+pid_file_path = os.path.join(hbase_pid_dir, phoenix_pid_file)
+
+if java_home:
+ java = os.path.join(java_home, 'bin', 'java')
+else:
+ java = 'java'
+
+# " -Xdebug -Xrunjdwp:transport=dt_socket,address=5005,server=y,suspend=n " + \
+# " -XX:+UnlockCommercialFeatures -XX:+FlightRecorder -XX:FlightRecorderOptions=defaultrecording=true,dumponexit=true" + \
+java_cmd = '%(java)s ' + \
+ '-cp ' + hbase_config_path + os.pathsep + phoenix_utils.phoenix_traceserver_jar + os.pathsep + \
+ phoenix_utils.phoenix_client_jar + os.pathsep + phoenix_utils.phoenix_queryserver_jar + \
+ os.pathsep + phoenix_utils.hadoop_classpath + \
+ " -Dproc_phoenixtraceserver" + \
+ " -Dlog4j.configuration=file:" + os.path.join(phoenix_utils.current_dir, "log4j.properties") + \
+ " -Dpsql.root.logger=%(root_logger)s" + \
+ " -Dpsql.log.dir=%(log_dir)s" + \
+ " -Dpsql.log.file=%(log_file)s" + \
+ " " + opts + \
+ " org.apache.phoenix.tracingwebapp.http.Main " + args
+
+if command == 'start':
+ if not daemon_supported:
+ sys.stderr.write("daemon mode not supported on this platform{}".format(os.linesep))
+ sys.exit(-1)
+
+ # run in the background
+ d = os.path.dirname(out_file_path)
+ if not os.path.exists(d):
+ os.makedirs(d)
+ with open(out_file_path, 'a+') as out:
+ context = daemon.DaemonContext(
+ pidfile = daemon.PidFile(pid_file_path, 'Trace Server already running, PID file found: %s' % pid_file_path),
+ stdout = out,
+ stderr = out,
+ )
+ print('starting Trace Server, logging to %s' % log_file_path)
+ with context:
+ # this block is the main() for the forked daemon process
+ child = None
+ cmd = java_cmd % {'java': java, 'root_logger': 'INFO,DRFA', 'log_dir': phoenix_log_dir, 'log_file': phoenix_log_file}
+
+ # notify the child when we're killed
+ def handler(signum, frame):
+ if child:
+ child.send_signal(signum)
+ sys.exit(0)
+ signal.signal(signal.SIGTERM, handler)
+
+ print('%s launching %s' % (datetime.datetime.now(), cmd))
+ child = subprocess.Popen(cmd.split())
+ sys.exit(child.wait())
+
+elif command == 'stop':
+ if not daemon_supported:
+ sys.stderr.write("daemon mode not supported on this platform{}".format(os.linesep))
+ sys.exit(-1)
+
+ if not os.path.exists(pid_file_path):
+ sys.stderr.write("no Trace Server to stop because PID file not found, {}{}"
+ .format(pid_file_path, os.linesep))
+ sys.exit(0)
+
+ if not os.path.isfile(pid_file_path):
+ sys.stderr.write("PID path exists but is not a file! {}{}"
+ .format(pid_file_path, os.linesep))
+ sys.exit(1)
+
+ pid = None
+ with open(pid_file_path, 'r') as p:
+ pid = int(p.read())
+ if not pid:
+ sys.exit("cannot read PID file, %s" % pid_file_path)
+
+ print("stopping Trace Server pid %s" % pid)
+ with open(out_file_path, 'a+') as out:
+ out.write("%s terminating Trace Server%s" % (datetime.datetime.now(), os.linesep))
+ os.kill(pid, signal.SIGTERM)
+
+else:
+ # run in the foreground using defaults from log4j.properties
+ cmd = java_cmd % {'java': java, 'root_logger': 'INFO,console', 'log_dir': '.', 'log_file': 'psql.log'}
+ splitcmd = cmd.split()
+ os.execvp(splitcmd[0], splitcmd)
diff --git a/MSH-PIC/zookeeper/bin/README.txt b/MSH-PIC/zookeeper/bin/README.txt
new file mode 100644
index 0000000..e70506d
--- /dev/null
+++ b/MSH-PIC/zookeeper/bin/README.txt
@@ -0,0 +1,6 @@
+This directory contain scripts that allow easy access (classpath in particular)
+to the ZooKeeper server and command line client.
+
+Files ending in .sh are unix and cygwin compatible
+
+Files ending in .cmd are msdos/windows compatible
diff --git a/MSH-PIC/zookeeper/bin/change_myid.sh b/MSH-PIC/zookeeper/bin/change_myid.sh
new file mode 100644
index 0000000..cd1db8d
--- /dev/null
+++ b/MSH-PIC/zookeeper/bin/change_myid.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+id=1
+
+for i in `echo "[u'192.168.20.193', u'192.168.20.194', u'192.168.20.195']" | grep -E -o "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"`
+do
+ip=`echo $i | grep -E -o "[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+"`
+echo 'server.'$id'='$ip':2888:3888' >> /home/tsg/olap/zookeeper-3.4.10/conf/zoo.cfg
+if [[ $ip == 192.168.20.193 ]];then
+ echo $id > /home/tsg/olap/zookeeper-3.4.10/data/myid
+fi
+((id++))
+done
+
diff --git a/MSH-PIC/zookeeper/bin/create_cmak_node.sh b/MSH-PIC/zookeeper/bin/create_cmak_node.sh
new file mode 100644
index 0000000..4de2b84
--- /dev/null
+++ b/MSH-PIC/zookeeper/bin/create_cmak_node.sh
@@ -0,0 +1,7 @@
+/home/tsg/olap/zookeeper-3.4.10/bin/zkCli.sh create /kafka-manager ""
+sleep 3
+/home/tsg/olap/zookeeper-3.4.10/bin/zkCli.sh create /kafka-manager/mutex ""
+sleep 3
+/home/tsg/olap/zookeeper-3.4.10/bin/zkCli.sh create /kafka-manager/mutex/locks ""
+sleep 3
+/home/tsg/olap/zookeeper-3.4.10/bin/zkCli.sh create /kafka-manager/mutex/leases ""
diff --git a/MSH-PIC/zookeeper/bin/dae-zookeeper.sh b/MSH-PIC/zookeeper/bin/dae-zookeeper.sh
new file mode 100644
index 0000000..ce46b38
--- /dev/null
+++ b/MSH-PIC/zookeeper/bin/dae-zookeeper.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+source /etc/profile
+
+#安装路径
+BASE_DIR=/home/tsg/olap
+VERSION=zookeeper-3.4.10
+
+function setlog(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/zkRes_sum
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/zkRes_sum`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/zkRes_sum
+
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Zookeeper服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Zookeeper服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+while true ; do
+HAS_ZK=`$BASE_DIR/$VERSION/bin/zkServer.sh status | egrep 'leader|follower' | wc -l`
+if [ $HAS_ZK -lt "1" ];then
+ $BASE_DIR/$VERSION/bin/zkServer.sh start
+ setlog
+fi
+sleep 60
+done
+
diff --git a/MSH-PIC/zookeeper/bin/old/zkEnv.sh b/MSH-PIC/zookeeper/bin/old/zkEnv.sh
new file mode 100644
index 0000000..687c45a
--- /dev/null
+++ b/MSH-PIC/zookeeper/bin/old/zkEnv.sh
@@ -0,0 +1,115 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script should be sourced into other zookeeper
+# scripts to setup the env variables
+
+# We use ZOOCFGDIR if defined,
+# otherwise we use /etc/zookeeper
+# or the conf directory that is
+# a sibling of this script's directory
+
+ZOOBINDIR="${ZOOBINDIR:-/usr/bin}"
+ZOOKEEPER_PREFIX="${ZOOBINDIR}/.."
+
+if [ "x$ZOOCFGDIR" = "x" ]
+then
+ if [ -e "${ZOOKEEPER_PREFIX}/conf" ]; then
+ ZOOCFGDIR="$ZOOBINDIR/../conf"
+ else
+ ZOOCFGDIR="$ZOOBINDIR/../etc/zookeeper"
+ fi
+fi
+
+if [ -f "${ZOOCFGDIR}/zookeeper-env.sh" ]; then
+ . "${ZOOCFGDIR}/zookeeper-env.sh"
+fi
+
+if [ "x$ZOOCFG" = "x" ]
+then
+ ZOOCFG="zoo.cfg"
+fi
+
+ZOOCFG="$ZOOCFGDIR/$ZOOCFG"
+
+if [ -f "$ZOOCFGDIR/java.env" ]
+then
+ . "$ZOOCFGDIR/java.env"
+fi
+
+if [ "x${ZOO_LOG_DIR}" = "x" ]
+then
+ ZOO_LOG_DIR="${ZOOKEEPER_PREFIX}/logs/system"
+fi
+
+if [ "x${ZOO_LOG4J_PROP}" = "x" ]
+then
+ ZOO_LOG4J_PROP="ERROR,CONSOLE"
+fi
+
+if [ "$JAVA_HOME" != "" ]; then
+ JAVA="$JAVA_HOME/bin/java"
+else
+ JAVA=java
+fi
+
+#add the zoocfg dir to classpath
+CLASSPATH="$ZOOCFGDIR:$CLASSPATH"
+
+for i in "$ZOOBINDIR"/../src/java/lib/*.jar
+do
+ CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work in the binary package
+#(use array for LIBPATH to account for spaces within wildcard expansion)
+if [ -e "${ZOOKEEPER_PREFIX}"/share/zookeeper/zookeeper-*.jar ]; then
+ LIBPATH=("${ZOOKEEPER_PREFIX}"/share/zookeeper/*.jar)
+else
+ #release tarball format
+ for i in "$ZOOBINDIR"/../zookeeper-*.jar
+ do
+ CLASSPATH="$i:$CLASSPATH"
+ done
+ LIBPATH=("${ZOOBINDIR}"/../lib/*.jar)
+fi
+
+for i in "${LIBPATH[@]}"
+do
+ CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work for developers
+for d in "$ZOOBINDIR"/../build/lib/*.jar
+do
+ CLASSPATH="$d:$CLASSPATH"
+done
+
+#make it work for developers
+CLASSPATH="$ZOOBINDIR/../build/classes:$CLASSPATH"
+
+case "`uname`" in
+ CYGWIN*) cygwin=true ;;
+ *) cygwin=false ;;
+esac
+
+if $cygwin
+then
+ CLASSPATH=`cygpath -wp "$CLASSPATH"`
+fi
+
+#echo "CLASSPATH=$CLASSPATH"
diff --git a/MSH-PIC/zookeeper/bin/old/zkServer.sh b/MSH-PIC/zookeeper/bin/old/zkServer.sh
new file mode 100644
index 0000000..396aedd
--- /dev/null
+++ b/MSH-PIC/zookeeper/bin/old/zkServer.sh
@@ -0,0 +1,225 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# If this scripted is run out of /usr/bin or some other system bin directory
+# it should be linked to and not copied. Things like java jar files are found
+# relative to the canonical path of this script.
+#
+
+
+
+# use POSTIX interface, symlink is followed automatically
+ZOOBIN="${BASH_SOURCE-$0}"
+ZOOBIN="$(dirname "${ZOOBIN}")"
+ZOOBINDIR="$(cd "${ZOOBIN}"; pwd)"
+
+if [ -e "$ZOOBIN/../libexec/zkEnv.sh" ]; then
+ . "$ZOOBINDIR/../libexec/zkEnv.sh"
+else
+ . "$ZOOBINDIR/zkEnv.sh"
+fi
+
+# See the following page for extensive details on setting
+# up the JVM to accept JMX remote management:
+# http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+# by default we allow local JMX connections
+if [ "x$JMXLOCALONLY" = "x" ]
+then
+ JMXLOCALONLY=false
+fi
+
+if [ "x$JMXDISABLE" = "x" ] || [ "$JMXDISABLE" = 'false' ]
+then
+ echo "ZooKeeper JMX enabled by default" >&2
+ if [ "x$JMXPORT" = "x" ]
+ then
+ # for some reason these two options are necessary on jdk6 on Ubuntu
+ # accord to the docs they are not necessary, but otw jconsole cannot
+ # do a local attach
+ ZOOMAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY org.apache.zookeeper.server.quorum.QuorumPeerMain"
+ else
+ if [ "x$JMXAUTH" = "x" ]
+ then
+ JMXAUTH=false
+ fi
+ if [ "x$JMXSSL" = "x" ]
+ then
+ JMXSSL=false
+ fi
+ if [ "x$JMXLOG4J" = "x" ]
+ then
+ JMXLOG4J=true
+ fi
+ echo "ZooKeeper remote JMX Port set to $JMXPORT" >&2
+ echo "ZooKeeper remote JMX authenticate set to $JMXAUTH" >&2
+ echo "ZooKeeper remote JMX ssl set to $JMXSSL" >&2
+ echo "ZooKeeper remote JMX log4j set to $JMXLOG4J" >&2
+ ZOOMAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=$JMXPORT -Dcom.sun.management.jmxremote.authenticate=$JMXAUTH -Dcom.sun.management.jmxremote.ssl=$JMXSSL -Dzookeeper.jmx.log4j.disable=$JMXLOG4J org.apache.zookeeper.server.quorum.QuorumPeerMain"
+ fi
+else
+ echo "JMX disabled by user request" >&2
+ ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
+fi
+
+if [ "x$SERVER_JVMFLAGS" != "x" ]
+then
+ JVMFLAGS="$SERVER_JVMFLAGS $JVMFLAGS"
+fi
+
+if [ "x$2" != "x" ]
+then
+ ZOOCFG="$ZOOCFGDIR/$2"
+fi
+
+# if we give a more complicated path to the config, don't screw around in $ZOOCFGDIR
+if [ "x$(dirname "$ZOOCFG")" != "x$ZOOCFGDIR" ]
+then
+ ZOOCFG="$2"
+fi
+
+if $cygwin
+then
+ ZOOCFG=`cygpath -wp "$ZOOCFG"`
+ # cygwin has a "kill" in the shell itself, gets confused
+ KILL=/bin/kill
+else
+ KILL=kill
+fi
+
+echo "Using config: $ZOOCFG" >&2
+
+case "$OSTYPE" in
+*solaris*)
+ GREP=/usr/xpg4/bin/grep
+ ;;
+*)
+ GREP=grep
+ ;;
+esac
+if [ -z "$ZOOPIDFILE" ]; then
+ ZOO_DATADIR="$($GREP "^[[:space:]]*dataDir" "$ZOOCFG" | sed -e 's/.*=//')"
+ if [ ! -d "$ZOO_DATADIR" ]; then
+ mkdir -p "$ZOO_DATADIR"
+ fi
+ ZOOPIDFILE="$ZOO_DATADIR/zookeeper_server.pid"
+else
+ # ensure it exists, otw stop will fail
+ mkdir -p "$(dirname "$ZOOPIDFILE")"
+fi
+
+if [ ! -w "$ZOO_LOG_DIR" ] ; then
+mkdir -p "$ZOO_LOG_DIR"
+fi
+
+_ZOO_DAEMON_OUT="$ZOO_LOG_DIR/zookeeper.log"
+
+case $1 in
+start)
+ echo -n "Starting zookeeper ... "
+ if [ -f "$ZOOPIDFILE" ]; then
+ if kill -0 `cat "$ZOOPIDFILE"` > /dev/null 2>&1; then
+ echo $command already running as process `cat "$ZOOPIDFILE"`.
+ exit 0
+ fi
+ fi
+ nohup "$JAVA" "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+ -cp "$CLASSPATH" $JVMFLAGS $ZOOMAIN "$ZOOCFG" > "$_ZOO_DAEMON_OUT" 2>&1 < /dev/null &
+ if [ $? -eq 0 ]
+ then
+ case "$OSTYPE" in
+ *solaris*)
+ /bin/echo "${!}\\c" > "$ZOOPIDFILE"
+ ;;
+ *)
+ /bin/echo -n $! > "$ZOOPIDFILE"
+ ;;
+ esac
+ if [ $? -eq 0 ];
+ then
+ sleep 1
+ echo STARTED
+ else
+ echo FAILED TO WRITE PID
+ exit 1
+ fi
+ else
+ echo SERVER DID NOT START
+ exit 1
+ fi
+ ;;
+start-foreground)
+ ZOO_CMD=(exec "$JAVA")
+ if [ "${ZOO_NOEXEC}" != "" ]; then
+ ZOO_CMD=("$JAVA")
+ fi
+ "${ZOO_CMD[@]}" "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+ -cp "$CLASSPATH" $JVMFLAGS $ZOOMAIN "$ZOOCFG"
+ ;;
+print-cmd)
+ echo "\"$JAVA\" -Dzookeeper.log.dir=\"${ZOO_LOG_DIR}\" -Dzookeeper.root.logger=\"${ZOO_LOG4J_PROP}\" -cp \"$CLASSPATH\" $JVMFLAGS $ZOOMAIN \"$ZOOCFG\" > \"$_ZOO_DAEMON_OUT\" 2>&1 < /dev/null"
+ ;;
+stop)
+ echo -n "Stopping zookeeper ... "
+ if [ ! -f "$ZOOPIDFILE" ]
+ then
+ echo "no zookeeper to stop (could not find file $ZOOPIDFILE)"
+ else
+ $KILL -9 $(cat "$ZOOPIDFILE")
+ rm "$ZOOPIDFILE"
+ echo STOPPED
+ fi
+ exit 0
+ ;;
+upgrade)
+ shift
+ echo "upgrading the servers to 3.*"
+ "$JAVA" "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+ -cp "$CLASSPATH" $JVMFLAGS org.apache.zookeeper.server.upgrade.UpgradeMain ${@}
+ echo "Upgrading ... "
+ ;;
+restart)
+ shift
+ "$0" stop ${@}
+ sleep 3
+ "$0" start ${@}
+ ;;
+status)
+ # -q is necessary on some versions of linux where nc returns too quickly, and no stat result is output
+ clientPortAddress=`$GREP "^[[:space:]]*clientPortAddress[^[:alpha:]]" "$ZOOCFG" | sed -e 's/.*=//'`
+ if ! [ $clientPortAddress ]
+ then
+ clientPortAddress="localhost"
+ fi
+ clientPort=`$GREP "^[[:space:]]*clientPort[^[:alpha:]]" "$ZOOCFG" | sed -e 's/.*=//'`
+ STAT=`"$JAVA" "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+ -cp "$CLASSPATH" $JVMFLAGS org.apache.zookeeper.client.FourLetterWordMain \
+ $clientPortAddress $clientPort srvr 2> /dev/null \
+ | $GREP Mode`
+ if [ "x$STAT" = "x" ]
+ then
+ echo "Error contacting service. It is probably not running."
+ exit 1
+ else
+ echo $STAT
+ exit 0
+ fi
+ ;;
+*)
+ echo "Usage: $0 {start|start-foreground|stop|restart|status|upgrade|print-cmd}" >&2
+
+esac
diff --git a/MSH-PIC/zookeeper/bin/set_zk_env.sh b/MSH-PIC/zookeeper/bin/set_zk_env.sh
new file mode 100644
index 0000000..57774d9
--- /dev/null
+++ b/MSH-PIC/zookeeper/bin/set_zk_env.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+echo -e "\n#zookeeper\nexport ZOOKEEPER_HOME=/home/tsg/olap/zookeeper-3.4.10\nexport PATH=\$ZOOKEEPER_HOME/bin:\$PATH" >> /etc/profile.d/zookeeper.sh
+chmod +x /etc/profile.d/zookeeper.sh
+
+keeppsth='/etc/init.d/keepzkalive'
+if [ -x $keeppsth ];then
+ chkconfig --add keepzkalive
+ chkconfig keepzkalive on
+ service keepzkalive start && sleep 5
+ zk_dae=`ps -ef | grep dae-zookeeper.sh | grep -v grep | wc -l`
+ if [ $zk_dae -eq "0" ];then
+ nohup /home/tsg/olap/zookeeper-3.4.10/bin/dae-zookeeper.sh > /dev/null 2>&1 &
+ fi
+fi
+
diff --git a/MSH-PIC/zookeeper/bin/zkCleanup.sh b/MSH-PIC/zookeeper/bin/zkCleanup.sh
new file mode 100644
index 0000000..38ee2e8
--- /dev/null
+++ b/MSH-PIC/zookeeper/bin/zkCleanup.sh
@@ -0,0 +1,51 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# This script cleans up old transaction logs and snapshots
+#
+
+#
+# If this scripted is run out of /usr/bin or some other system bin directory
+# it should be linked to and not copied. Things like java jar files are found
+# relative to the canonical path of this script.
+#
+
+# use POSTIX interface, symlink is followed automatically
+ZOOBIN="${BASH_SOURCE-$0}"
+ZOOBIN="$(dirname "${ZOOBIN}")"
+ZOOBINDIR="$(cd "${ZOOBIN}"; pwd)"
+
+if [ -e "$ZOOBIN/../libexec/zkEnv.sh" ]; then
+ . "$ZOOBINDIR"/../libexec/zkEnv.sh
+else
+ . "$ZOOBINDIR"/zkEnv.sh
+fi
+
+ZOODATADIR="$(grep "^[[:space:]]*dataDir=" "$ZOOCFG" | sed -e 's/.*=//')"
+ZOODATALOGDIR="$(grep "^[[:space:]]*dataLogDir=" "$ZOOCFG" | sed -e 's/.*=//')"
+
+if [ "x$ZOODATALOGDIR" = "x" ]
+then
+"$JAVA" "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+ -cp "$CLASSPATH" $JVMFLAGS \
+ org.apache.zookeeper.server.PurgeTxnLog "$ZOODATADIR" $*
+else
+"$JAVA" "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+ -cp "$CLASSPATH" $JVMFLAGS \
+ org.apache.zookeeper.server.PurgeTxnLog "$ZOODATALOGDIR" "$ZOODATADIR" $*
+fi
diff --git a/MSH-PIC/zookeeper/bin/zkCli.cmd b/MSH-PIC/zookeeper/bin/zkCli.cmd
new file mode 100644
index 0000000..0ffa030
--- /dev/null
+++ b/MSH-PIC/zookeeper/bin/zkCli.cmd
@@ -0,0 +1,24 @@
+@echo off
+REM Licensed to the Apache Software Foundation (ASF) under one or more
+REM contributor license agreements. See the NOTICE file distributed with
+REM this work for additional information regarding copyright ownership.
+REM The ASF licenses this file to You under the Apache License, Version 2.0
+REM (the "License"); you may not use this file except in compliance with
+REM the License. You may obtain a copy of the License at
+REM
+REM http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
+
+setlocal
+call "%~dp0zkEnv.cmd"
+
+set ZOOMAIN=org.apache.zookeeper.ZooKeeperMain
+call %JAVA% "-Dzookeeper.log.dir=%ZOO_LOG_DIR%" "-Dzookeeper.root.logger=%ZOO_LOG4J_PROP%" -cp "%CLASSPATH%" %ZOOMAIN% %*
+
+endlocal
+
diff --git a/MSH-PIC/zookeeper/bin/zkCli.sh b/MSH-PIC/zookeeper/bin/zkCli.sh
new file mode 100644
index 0000000..992a913
--- /dev/null
+++ b/MSH-PIC/zookeeper/bin/zkCli.sh
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# This script cleans up old transaction logs and snapshots
+#
+
+#
+# If this scripted is run out of /usr/bin or some other system bin directory
+# it should be linked to and not copied. Things like java jar files are found
+# relative to the canonical path of this script.
+#
+
+# use POSTIX interface, symlink is followed automatically
+ZOOBIN="${BASH_SOURCE-$0}"
+ZOOBIN="$(dirname "${ZOOBIN}")"
+ZOOBINDIR="$(cd "${ZOOBIN}"; pwd)"
+
+if [ -e "$ZOOBIN/../libexec/zkEnv.sh" ]; then
+ . "$ZOOBINDIR"/../libexec/zkEnv.sh
+else
+ . "$ZOOBINDIR"/zkEnv.sh
+fi
+
+"$JAVA" "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+ -cp "$CLASSPATH" $CLIENT_JVMFLAGS $JVMFLAGS \
+ org.apache.zookeeper.ZooKeeperMain "$@"
diff --git a/MSH-PIC/zookeeper/bin/zkEnv.cmd b/MSH-PIC/zookeeper/bin/zkEnv.cmd
new file mode 100644
index 0000000..41eed11
--- /dev/null
+++ b/MSH-PIC/zookeeper/bin/zkEnv.cmd
@@ -0,0 +1,49 @@
+@echo off
+REM Licensed to the Apache Software Foundation (ASF) under one or more
+REM contributor license agreements. See the NOTICE file distributed with
+REM this work for additional information regarding copyright ownership.
+REM The ASF licenses this file to You under the Apache License, Version 2.0
+REM (the "License"); you may not use this file except in compliance with
+REM the License. You may obtain a copy of the License at
+REM
+REM http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
+
+set ZOOCFGDIR=%~dp0%..\conf
+set ZOO_LOG_DIR=%~dp0%..
+set ZOO_LOG4J_PROP=INFO,CONSOLE
+
+REM for sanity sake assume Java 1.6
+REM see: http://java.sun.com/javase/6/docs/technotes/tools/windows/java.html
+
+REM add the zoocfg dir to classpath
+set CLASSPATH=%ZOOCFGDIR%
+
+REM make it work in the release
+SET CLASSPATH=%~dp0..\*;%~dp0..\lib\*;%CLASSPATH%
+
+REM make it work for developers
+SET CLASSPATH=%~dp0..\build\classes;%~dp0..\build\lib\*;%CLASSPATH%
+
+set ZOOCFG=%ZOOCFGDIR%\zoo.cfg
+
+@REM setup java environment variables
+
+if not defined JAVA_HOME (
+ echo Error: JAVA_HOME is not set.
+ goto :eof
+)
+
+set JAVA_HOME=%JAVA_HOME:"=%
+
+if not exist "%JAVA_HOME%"\bin\java.exe (
+ echo Error: JAVA_HOME is incorrectly set.
+ goto :eof
+)
+
+set JAVA="%JAVA_HOME%"\bin\java
diff --git a/MSH-PIC/zookeeper/bin/zkEnv.sh b/MSH-PIC/zookeeper/bin/zkEnv.sh
new file mode 100644
index 0000000..273be21
--- /dev/null
+++ b/MSH-PIC/zookeeper/bin/zkEnv.sh
@@ -0,0 +1,116 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script should be sourced into other zookeeper
+# scripts to setup the env variables
+
+# We use ZOOCFGDIR if defined,
+# otherwise we use /etc/zookeeper
+# or the conf directory that is
+# a sibling of this script's directory
+
+ZOOBINDIR="${ZOOBINDIR:-/usr/bin}"
+ZOOKEEPER_PREFIX="${ZOOBINDIR}/.."
+
+if [ "x$ZOOCFGDIR" = "x" ]
+then
+ if [ -e "${ZOOKEEPER_PREFIX}/conf" ]; then
+ ZOOCFGDIR="$ZOOBINDIR/../conf"
+ else
+ ZOOCFGDIR="$ZOOBINDIR/../etc/zookeeper"
+ fi
+fi
+
+if [ -f "${ZOOCFGDIR}/zookeeper-env.sh" ]; then
+ . "${ZOOCFGDIR}/zookeeper-env.sh"
+fi
+
+if [ "x$ZOOCFG" = "x" ]
+then
+ ZOOCFG="zoo.cfg"
+fi
+
+ZOOCFG="$ZOOCFGDIR/$ZOOCFG"
+
+if [ -f "$ZOOCFGDIR/java.env" ]
+then
+ . "$ZOOCFGDIR/java.env"
+fi
+
+if [ "x${ZOO_LOG_DIR}" = "x" ]
+then
+ ZOO_LOG_DIR="${ZOOKEEPER_PREFIX}/logs/system"
+fi
+
+if [ "x${ZOO_LOG4J_PROP}" = "x" ]
+then
+ #ZOO_LOG4J_PROP="INFO,CONSOLE"
+ ZOO_LOG4J_PROP="ERROR,ROLLINGFILE"
+fi
+
+if [ "$JAVA_HOME" != "" ]; then
+ JAVA="$JAVA_HOME/bin/java"
+else
+ JAVA=java
+fi
+
+#add the zoocfg dir to classpath
+CLASSPATH="$ZOOCFGDIR:$CLASSPATH"
+
+for i in "$ZOOBINDIR"/../src/java/lib/*.jar
+do
+ CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work in the binary package
+#(use array for LIBPATH to account for spaces within wildcard expansion)
+if [ -e "${ZOOKEEPER_PREFIX}"/share/zookeeper/zookeeper-*.jar ]; then
+ LIBPATH=("${ZOOKEEPER_PREFIX}"/share/zookeeper/*.jar)
+else
+ #release tarball format
+ for i in "$ZOOBINDIR"/../zookeeper-*.jar
+ do
+ CLASSPATH="$i:$CLASSPATH"
+ done
+ LIBPATH=("${ZOOBINDIR}"/../lib/*.jar)
+fi
+
+for i in "${LIBPATH[@]}"
+do
+ CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work for developers
+for d in "$ZOOBINDIR"/../build/lib/*.jar
+do
+ CLASSPATH="$d:$CLASSPATH"
+done
+
+#make it work for developers
+CLASSPATH="$ZOOBINDIR/../build/classes:$CLASSPATH"
+
+case "`uname`" in
+ CYGWIN*) cygwin=true ;;
+ *) cygwin=false ;;
+esac
+
+if $cygwin
+then
+ CLASSPATH=`cygpath -wp "$CLASSPATH"`
+fi
+
+#echo "CLASSPATH=$CLASSPATH"
diff --git a/MSH-PIC/zookeeper/bin/zkServer.cmd b/MSH-PIC/zookeeper/bin/zkServer.cmd
new file mode 100644
index 0000000..6b4cf02
--- /dev/null
+++ b/MSH-PIC/zookeeper/bin/zkServer.cmd
@@ -0,0 +1,24 @@
+@echo off
+REM Licensed to the Apache Software Foundation (ASF) under one or more
+REM contributor license agreements. See the NOTICE file distributed with
+REM this work for additional information regarding copyright ownership.
+REM The ASF licenses this file to You under the Apache License, Version 2.0
+REM (the "License"); you may not use this file except in compliance with
+REM the License. You may obtain a copy of the License at
+REM
+REM http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
+
+setlocal
+call "%~dp0zkEnv.cmd"
+
+set ZOOMAIN=org.apache.zookeeper.server.quorum.QuorumPeerMain
+echo on
+call %JAVA% "-Dzookeeper.log.dir=%ZOO_LOG_DIR%" "-Dzookeeper.root.logger=%ZOO_LOG4J_PROP%" -cp "%CLASSPATH%" %ZOOMAIN% "%ZOOCFG%" %*
+
+endlocal
diff --git a/MSH-PIC/zookeeper/bin/zkServer.sh b/MSH-PIC/zookeeper/bin/zkServer.sh
new file mode 100644
index 0000000..396aedd
--- /dev/null
+++ b/MSH-PIC/zookeeper/bin/zkServer.sh
@@ -0,0 +1,225 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# If this scripted is run out of /usr/bin or some other system bin directory
+# it should be linked to and not copied. Things like java jar files are found
+# relative to the canonical path of this script.
+#
+
+
+
+# use POSTIX interface, symlink is followed automatically
+ZOOBIN="${BASH_SOURCE-$0}"
+ZOOBIN="$(dirname "${ZOOBIN}")"
+ZOOBINDIR="$(cd "${ZOOBIN}"; pwd)"
+
+if [ -e "$ZOOBIN/../libexec/zkEnv.sh" ]; then
+ . "$ZOOBINDIR/../libexec/zkEnv.sh"
+else
+ . "$ZOOBINDIR/zkEnv.sh"
+fi
+
+# See the following page for extensive details on setting
+# up the JVM to accept JMX remote management:
+# http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+# by default we allow local JMX connections
+if [ "x$JMXLOCALONLY" = "x" ]
+then
+ JMXLOCALONLY=false
+fi
+
+if [ "x$JMXDISABLE" = "x" ] || [ "$JMXDISABLE" = 'false' ]
+then
+ echo "ZooKeeper JMX enabled by default" >&2
+ if [ "x$JMXPORT" = "x" ]
+ then
+ # for some reason these two options are necessary on jdk6 on Ubuntu
+ # accord to the docs they are not necessary, but otw jconsole cannot
+ # do a local attach
+ ZOOMAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY org.apache.zookeeper.server.quorum.QuorumPeerMain"
+ else
+ if [ "x$JMXAUTH" = "x" ]
+ then
+ JMXAUTH=false
+ fi
+ if [ "x$JMXSSL" = "x" ]
+ then
+ JMXSSL=false
+ fi
+ if [ "x$JMXLOG4J" = "x" ]
+ then
+ JMXLOG4J=true
+ fi
+ echo "ZooKeeper remote JMX Port set to $JMXPORT" >&2
+ echo "ZooKeeper remote JMX authenticate set to $JMXAUTH" >&2
+ echo "ZooKeeper remote JMX ssl set to $JMXSSL" >&2
+ echo "ZooKeeper remote JMX log4j set to $JMXLOG4J" >&2
+ ZOOMAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=$JMXPORT -Dcom.sun.management.jmxremote.authenticate=$JMXAUTH -Dcom.sun.management.jmxremote.ssl=$JMXSSL -Dzookeeper.jmx.log4j.disable=$JMXLOG4J org.apache.zookeeper.server.quorum.QuorumPeerMain"
+ fi
+else
+ echo "JMX disabled by user request" >&2
+ ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
+fi
+
+if [ "x$SERVER_JVMFLAGS" != "x" ]
+then
+ JVMFLAGS="$SERVER_JVMFLAGS $JVMFLAGS"
+fi
+
+if [ "x$2" != "x" ]
+then
+ ZOOCFG="$ZOOCFGDIR/$2"
+fi
+
+# if we give a more complicated path to the config, don't screw around in $ZOOCFGDIR
+if [ "x$(dirname "$ZOOCFG")" != "x$ZOOCFGDIR" ]
+then
+ ZOOCFG="$2"
+fi
+
+if $cygwin
+then
+ ZOOCFG=`cygpath -wp "$ZOOCFG"`
+ # cygwin has a "kill" in the shell itself, gets confused
+ KILL=/bin/kill
+else
+ KILL=kill
+fi
+
+echo "Using config: $ZOOCFG" >&2
+
+case "$OSTYPE" in
+*solaris*)
+ GREP=/usr/xpg4/bin/grep
+ ;;
+*)
+ GREP=grep
+ ;;
+esac
+if [ -z "$ZOOPIDFILE" ]; then
+ ZOO_DATADIR="$($GREP "^[[:space:]]*dataDir" "$ZOOCFG" | sed -e 's/.*=//')"
+ if [ ! -d "$ZOO_DATADIR" ]; then
+ mkdir -p "$ZOO_DATADIR"
+ fi
+ ZOOPIDFILE="$ZOO_DATADIR/zookeeper_server.pid"
+else
+ # ensure it exists, otw stop will fail
+ mkdir -p "$(dirname "$ZOOPIDFILE")"
+fi
+
+if [ ! -w "$ZOO_LOG_DIR" ] ; then
+mkdir -p "$ZOO_LOG_DIR"
+fi
+
+_ZOO_DAEMON_OUT="$ZOO_LOG_DIR/zookeeper.log"
+
+case $1 in
+start)
+ echo -n "Starting zookeeper ... "
+ if [ -f "$ZOOPIDFILE" ]; then
+ if kill -0 `cat "$ZOOPIDFILE"` > /dev/null 2>&1; then
+ echo $command already running as process `cat "$ZOOPIDFILE"`.
+ exit 0
+ fi
+ fi
+ nohup "$JAVA" "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+ -cp "$CLASSPATH" $JVMFLAGS $ZOOMAIN "$ZOOCFG" > "$_ZOO_DAEMON_OUT" 2>&1 < /dev/null &
+ if [ $? -eq 0 ]
+ then
+ case "$OSTYPE" in
+ *solaris*)
+ /bin/echo "${!}\\c" > "$ZOOPIDFILE"
+ ;;
+ *)
+ /bin/echo -n $! > "$ZOOPIDFILE"
+ ;;
+ esac
+ if [ $? -eq 0 ];
+ then
+ sleep 1
+ echo STARTED
+ else
+ echo FAILED TO WRITE PID
+ exit 1
+ fi
+ else
+ echo SERVER DID NOT START
+ exit 1
+ fi
+ ;;
+start-foreground)
+ ZOO_CMD=(exec "$JAVA")
+ if [ "${ZOO_NOEXEC}" != "" ]; then
+ ZOO_CMD=("$JAVA")
+ fi
+ "${ZOO_CMD[@]}" "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+ -cp "$CLASSPATH" $JVMFLAGS $ZOOMAIN "$ZOOCFG"
+ ;;
+print-cmd)
+ echo "\"$JAVA\" -Dzookeeper.log.dir=\"${ZOO_LOG_DIR}\" -Dzookeeper.root.logger=\"${ZOO_LOG4J_PROP}\" -cp \"$CLASSPATH\" $JVMFLAGS $ZOOMAIN \"$ZOOCFG\" > \"$_ZOO_DAEMON_OUT\" 2>&1 < /dev/null"
+ ;;
+stop)
+ echo -n "Stopping zookeeper ... "
+ if [ ! -f "$ZOOPIDFILE" ]
+ then
+ echo "no zookeeper to stop (could not find file $ZOOPIDFILE)"
+ else
+ $KILL -9 $(cat "$ZOOPIDFILE")
+ rm "$ZOOPIDFILE"
+ echo STOPPED
+ fi
+ exit 0
+ ;;
+upgrade)
+ shift
+ echo "upgrading the servers to 3.*"
+ "$JAVA" "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+ -cp "$CLASSPATH" $JVMFLAGS org.apache.zookeeper.server.upgrade.UpgradeMain ${@}
+ echo "Upgrading ... "
+ ;;
+restart)
+ shift
+ "$0" stop ${@}
+ sleep 3
+ "$0" start ${@}
+ ;;
+status)
+ # -q is necessary on some versions of linux where nc returns too quickly, and no stat result is output
+ clientPortAddress=`$GREP "^[[:space:]]*clientPortAddress[^[:alpha:]]" "$ZOOCFG" | sed -e 's/.*=//'`
+ if ! [ $clientPortAddress ]
+ then
+ clientPortAddress="localhost"
+ fi
+ clientPort=`$GREP "^[[:space:]]*clientPort[^[:alpha:]]" "$ZOOCFG" | sed -e 's/.*=//'`
+ STAT=`"$JAVA" "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+ -cp "$CLASSPATH" $JVMFLAGS org.apache.zookeeper.client.FourLetterWordMain \
+ $clientPortAddress $clientPort srvr 2> /dev/null \
+ | $GREP Mode`
+ if [ "x$STAT" = "x" ]
+ then
+ echo "Error contacting service. It is probably not running."
+ exit 1
+ else
+ echo $STAT
+ exit 0
+ fi
+ ;;
+*)
+ echo "Usage: $0 {start|start-foreground|stop|restart|status|upgrade|print-cmd}" >&2
+
+esac
diff --git a/MSH-PIC/zookeeper/bin/zklogdelete.sh b/MSH-PIC/zookeeper/bin/zklogdelete.sh
new file mode 100644
index 0000000..ec85406
--- /dev/null
+++ b/MSH-PIC/zookeeper/bin/zklogdelete.sh
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+#只保留最近三天的日志,如需要多保留几天修改最后 -n days
+#将此脚本加载到系统定时任务中 /etc/crontab
+#脚本会读取环境变量,固需要配置环境变量。
+source /etc/profile
+day=$(date +"%Y-%m-%d" -d "-7 days")
+
+zk=`jps | grep QuorumPeerMain | wc -l`
+if [[ $zk = "1" ]];then
+ rm -rf $ZOOKEEPER_HOME/logs/system/*.$day*
+fi
+
+kafka=`jps | grep Kafka | wc -l`
+if [[ $kafka = "1" ]];then
+ rm -rf $KAFKA_HOME/logs/*.$day*
+fi
+
diff --git a/MSH-PIC/zookeeper/conf/configuration.xsl b/MSH-PIC/zookeeper/conf/configuration.xsl
new file mode 100644
index 0000000..377cdbe
--- /dev/null
+++ b/MSH-PIC/zookeeper/conf/configuration.xsl
@@ -0,0 +1,24 @@
+<?xml version="1.0"?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="html"/>
+<xsl:template match="configuration">
+<html>
+<body>
+<table border="1">
+<tr>
+ <td>name</td>
+ <td>value</td>
+ <td>description</td>
+</tr>
+<xsl:for-each select="property">
+<tr>
+ <td><a name="{name}"><xsl:value-of select="name"/></a></td>
+ <td><xsl:value-of select="value"/></td>
+ <td><xsl:value-of select="description"/></td>
+</tr>
+</xsl:for-each>
+</table>
+</body>
+</html>
+</xsl:template>
+</xsl:stylesheet>
diff --git a/MSH-PIC/zookeeper/conf/java.env b/MSH-PIC/zookeeper/conf/java.env
new file mode 100644
index 0000000..4c70fdb
--- /dev/null
+++ b/MSH-PIC/zookeeper/conf/java.env
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+export JAVA_HOME=/usr/lib/jvm/jdk1.8.0_73
+# heap size MUST be modified according to cluster environment
+
+export JVMFLAGS="-Xmx4096m -Xms1024m $JVMFLAGS"
diff --git a/MSH-PIC/zookeeper/conf/log4j.properties b/MSH-PIC/zookeeper/conf/log4j.properties
new file mode 100644
index 0000000..30d6d7b
--- /dev/null
+++ b/MSH-PIC/zookeeper/conf/log4j.properties
@@ -0,0 +1,63 @@
+# Define some default values that can be overridden by system properties
+#zookeeper.root.logger=INFO, CONSOLE
+zookeeper.root.logger=ERROR, ROLLINGFILE
+zookeeper.console.threshold=ERROR
+zookeeper.log.dir=.
+zookeeper.log.file=zookeeper.log
+zookeeper.log.threshold=ERROR
+zookeeper.tracelog.dir=.
+zookeeper.tracelog.file=zookeeper_trace.log
+
+#
+# ZooKeeper Logging Configuration
+#
+
+# Format is "<default threshold> (, <appender>)+
+
+# DEFAULT: console appender only
+log4j.rootLogger=${zookeeper.root.logger}
+
+# Example with rolling log file
+#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
+
+# Example with rolling log file and tracing
+#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
+
+#
+# Log INFO level and above messages to the console
+#
+log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
+log4j.appender.CONSOLE.Threshold=${zookeeper.console.threshold}
+log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
+log4j.appender.CONSOLE.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ssZ}{UTC} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
+
+#
+# Add ROLLINGFILE to rootLogger to get log file output
+# Log DEBUG level and above messages to a log file
+#log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
+log4j.appender.ROLLINGFILE=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.ROLLINGFILE.Threshold=${zookeeper.log.threshold}
+log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/${zookeeper.log.file}
+log4j.appender.ROLLINGFILE.DataPattern='.'yyyy-MM-dd-HH
+log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.ROLLINGFILE.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %p %m (%c)%n
+
+# Max log file size of 10MB
+log4j.appender.ROLLINGFILE.MaxFileSize=10MB
+# uncomment the next line to limit number of backup files
+#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
+
+log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ssZ}{UTC} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
+
+
+#
+# Add TRACEFILE to rootLogger to get log file output
+# Log DEBUG level and above messages to a log file
+log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
+log4j.appender.TRACEFILE.Threshold=TRACE
+log4j.appender.TRACEFILE.File=${zookeeper.tracelog.dir}/${zookeeper.tracelog.file}
+
+log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
+### Notice we are including log4j's NDC here (%x)
+log4j.appender.TRACEFILE.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ssZ}{UTC} [myid:%X{myid}] - %-5p [%t:%C{1}@%L][%x] - %m%n
diff --git a/MSH-PIC/zookeeper/conf/zoo.cfg b/MSH-PIC/zookeeper/conf/zoo.cfg
new file mode 100644
index 0000000..2ba4ab6
--- /dev/null
+++ b/MSH-PIC/zookeeper/conf/zoo.cfg
@@ -0,0 +1,51 @@
+# The number of milliseconds of each tick
+# Zookeeper 服务器之间或客户端与服务器之间维持心跳的时间间隔,也就是每个 tickTime 时间就会发送一个心跳。tickTime以毫秒为单位。
+tickTime=9000
+
+# The number of ticks that the initial synchronization phase can take
+# 集群中的follower服务器(F)与leader服务器(L)之间初始连接时能容忍的最多心跳数(tickTime的数量)。
+initLimit=10
+
+# The number of ticks that can pass between sending a request and getting an acknowledgement
+# 集群中的follower服务器与leader服务器之间请求和应答之间能容忍的最多心跳数(tickTime的数量)。
+syncLimit=5
+
+#ZooKeeper将会对客户端进行限流,系统中未处理的请求数量不超过设置的值。(default:1000)
+globalOutstandingLimit=1000
+
+# the maximum number of client connections.increase this if you need to handle more clients
+# socket级别限制单个客户端到ZooKeeper集群中单台服务器的并发连接数量.(default:60)
+maxClientCnxns=5000
+
+#忽略ACL验证,可以减少权限验证的相关操作,提升性能
+skipACL=yes
+
+#yes:每次写请求的数据都要从pagecache中固化到磁盘上,才算成功返回.后续写请求会等待前面写请求.
+#no:数据写到pagecache后就返回,提升性能,但是机器断电的时候,pagecache中的数据有可能丢失。
+forceSync=yes
+
+#当事务日志(WAL)中的fsync时间超过此值时,将向日志输出警告消息,需要forceSync为yes。
+fsync.warningthresholdms=20
+
+# the directory where the snapshot is stored.
+# do not use /tmp for storage, /tmp here is just example sakes.
+# Zookeeper保存数据的目录,默认情况下,Zookeeper将写数据的日志文件也保存在这个目录里。
+dataDir=/home/tsg/olap/zookeeper-3.4.10/data
+
+#Zookeeper保存日志文件的目录。
+dataLogDir=/home/tsg/olap/zookeeper-3.4.10/logs
+
+# the port at which the clients will connect
+#客户端连接 Zookeeper 服务器的端口
+clientPort=2181
+
+#指定需要保留的文件数目(default:3)
+autopurge.snapRetainCount=3
+
+#指定清理频率,单位为小时(default:0 表示不开启自动清理)
+autopurge.purgeInterval=1
+
+#the servers
+server.1=192.168.20.193:2888:3888
+server.2=192.168.20.194:2888:3888
+server.3=192.168.20.195:2888:3888
diff --git a/MSH-PIC/zookeeper/conf/zoo_sample.cfg b/MSH-PIC/zookeeper/conf/zoo_sample.cfg
new file mode 100644
index 0000000..a5a2c0b
--- /dev/null
+++ b/MSH-PIC/zookeeper/conf/zoo_sample.cfg
@@ -0,0 +1,28 @@
+# The number of milliseconds of each tick
+tickTime=2000
+# The number of ticks that the initial
+# synchronization phase can take
+initLimit=10
+# The number of ticks that can pass between
+# sending a request and getting an acknowledgement
+syncLimit=5
+# the directory where the snapshot is stored.
+# do not use /tmp for storage, /tmp here is just
+# example sakes.
+dataDir=/tmp/zookeeper
+# the port at which the clients will connect
+clientPort=2181
+# the maximum number of client connections.
+# increase this if you need to handle more clients
+#maxClientCnxns=60
+#
+# Be sure to read the maintenance section of the
+# administrator guide before turning on autopurge.
+#
+# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
+#
+# The number of snapshots to retain in dataDir
+#autopurge.snapRetainCount=3
+# Purge task interval in hours
+# Set to "0" to disable auto purge feature
+#autopurge.purgeInterval=1