summaryrefslogtreecommitdiff
path: root/PCAP-PIC/hbase/bin
diff options
context:
space:
mode:
authorwangchengcheng <[email protected]>2023-07-27 15:43:51 +0800
committerwangchengcheng <[email protected]>2023-07-27 15:43:51 +0800
commit124f687daace8b85e5c74abac04bcd0a92744a8d (patch)
tree4f563326b1be67cfb51bf6a04f1ca4d953536e76 /PCAP-PIC/hbase/bin
parent08686ae87f9efe7a590f48db74ed133b481c85b1 (diff)
P19 23.07 online-configP19
Diffstat (limited to 'PCAP-PIC/hbase/bin')
-rw-r--r--PCAP-PIC/hbase/bin/alter-hbase-table.sh19
-rw-r--r--PCAP-PIC/hbase/bin/considerAsDead.sh61
-rw-r--r--PCAP-PIC/hbase/bin/create-hbase-table.sh23
-rw-r--r--PCAP-PIC/hbase/bin/create-phoenix-table.sh394
-rw-r--r--PCAP-PIC/hbase/bin/dae-hmaster.sh41
-rw-r--r--PCAP-PIC/hbase/bin/dae-hregion.sh40
-rw-r--r--PCAP-PIC/hbase/bin/draining_servers.rb156
-rw-r--r--PCAP-PIC/hbase/bin/get-active-master.rb38
-rw-r--r--PCAP-PIC/hbase/bin/graceful_stop.sh186
-rw-r--r--PCAP-PIC/hbase/bin/hbase687
-rw-r--r--PCAP-PIC/hbase/bin/hbase-cleanup.sh147
-rw-r--r--PCAP-PIC/hbase/bin/hbase-common.sh41
-rw-r--r--PCAP-PIC/hbase/bin/hbase-config.cmd78
-rw-r--r--PCAP-PIC/hbase/bin/hbase-config.sh170
-rw-r--r--PCAP-PIC/hbase/bin/hbase-daemon.sh371
-rw-r--r--PCAP-PIC/hbase/bin/hbase-daemons.sh62
-rw-r--r--PCAP-PIC/hbase/bin/hbase-jruby22
-rw-r--r--PCAP-PIC/hbase/bin/hbase.cmd469
-rw-r--r--PCAP-PIC/hbase/bin/hirb.rb264
-rw-r--r--PCAP-PIC/hbase/bin/local-master-backup.sh65
-rw-r--r--PCAP-PIC/hbase/bin/local-regionservers.sh74
-rw-r--r--PCAP-PIC/hbase/bin/master-backup.sh74
-rw-r--r--PCAP-PIC/hbase/bin/region_mover.rb24
-rw-r--r--PCAP-PIC/hbase/bin/region_status.rb150
-rw-r--r--PCAP-PIC/hbase/bin/regionservers.sh83
-rw-r--r--PCAP-PIC/hbase/bin/replication/copy_tables_desc.rb104
-rw-r--r--PCAP-PIC/hbase/bin/rolling-restart.sh227
-rw-r--r--PCAP-PIC/hbase/bin/rsgroup.sh23
-rw-r--r--PCAP-PIC/hbase/bin/set_hbase_env.sh29
-rw-r--r--PCAP-PIC/hbase/bin/shutdown_regionserver.rb56
-rw-r--r--PCAP-PIC/hbase/bin/start-hbase.cmd61
-rw-r--r--PCAP-PIC/hbase/bin/start-hbase.sh65
-rw-r--r--PCAP-PIC/hbase/bin/stop-hbase.cmd54
-rw-r--r--PCAP-PIC/hbase/bin/stop-hbase.sh68
-rw-r--r--PCAP-PIC/hbase/bin/test/process_based_cluster.sh110
-rw-r--r--PCAP-PIC/hbase/bin/zookeepers.sh59
36 files changed, 4595 insertions, 0 deletions
diff --git a/PCAP-PIC/hbase/bin/alter-hbase-table.sh b/PCAP-PIC/hbase/bin/alter-hbase-table.sh
new file mode 100644
index 0000000..a174368
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/alter-hbase-table.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+source /etc/profile
+
+hbase shell <<EOF
+
+disable "tsg_galaxy:job_result"
+alter "tsg_galaxy:job_result",NAME=>'detail',TTL=>'300'
+alter "tsg_galaxy:job_result",NAME=>'statistics',TTL=>'300'
+alter "tsg_galaxy:job_result",NAME=>'field_discovery',TTL=>'300'
+enable "tsg_galaxy:job_result"
+
+alter 'knowledge_base_hos_bucket',{DURABILITY => 'SYNC_WAL'}
+alter 'index_filename_knowledge_base_hos_bucket',{DURABILITY => 'SYNC_WAL'}
+alter 'index_time_knowledge_base_hos_bucket',{DURABILITY => 'SYNC_WAL'}
+alter 'index_partfile_knowledge_base_hos_bucket',{DURABILITY => 'SYNC_WAL'}
+
+EOF
+
diff --git a/PCAP-PIC/hbase/bin/considerAsDead.sh b/PCAP-PIC/hbase/bin/considerAsDead.sh
new file mode 100644
index 0000000..ae1b8d8
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/considerAsDead.sh
@@ -0,0 +1,61 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+
+usage="Usage: considerAsDead.sh --hostname serverName"
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin">/dev/null; pwd`
+
+. $bin/hbase-config.sh
+
+shift
+deadhost=$@
+
+remote_cmd="cd ${HBASE_HOME}; $bin/hbase-daemon.sh --config ${HBASE_CONF_DIR} restart"
+
+zparent=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.parent`
+if [ "$zparent" == "null" ]; then zparent="/hbase"; fi
+
+zkrs=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.rs`
+if [ "$zkrs" == "null" ]; then zkrs="rs"; fi
+
+zkrs="$zparent/$zkrs"
+online_regionservers=`$bin/hbase zkcli ls $zkrs 2>&1 | tail -1 | sed "s/\[//" | sed "s/\]//"`
+for rs in $online_regionservers
+do
+ rs_parts=(${rs//,/ })
+ hostname=${rs_parts[0]}
+ echo $deadhost
+ echo $hostname
+ if [ "$deadhost" == "$hostname" ]; then
+ znode="$zkrs/$rs"
+ echo "ZNode Deleting:" $znode
+ $bin/hbase zkcli delete $znode > /dev/null 2>&1
+ sleep 1
+ ssh $HBASE_SSH_OPTS $hostname $remote_cmd 2>&1 | sed "s/^/$hostname: /"
+ fi
+done
diff --git a/PCAP-PIC/hbase/bin/create-hbase-table.sh b/PCAP-PIC/hbase/bin/create-hbase-table.sh
new file mode 100644
index 0000000..b83754f
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/create-hbase-table.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+source /etc/profile
+
+hbase shell <<EOF
+
+create_namespace 'tsg'
+create_namespace 'dos'
+create_namespace 'tsg_galaxy'
+
+create 'tsg:report_result', {NAME => 'response', VERSIONS => 1,COMPRESSION => 'GZ',IS_MOB => true, MOB_THRESHOLD => 0}, {NAME => 'detail',COMPRESSION => 'GZ',VERSIONS => 1}
+
+create 'dos:ddos_traffic_baselines', 'TCP SYN Flood','UDP Flood','ICMP Flood','DNS Flood'
+
+create 'tsg_galaxy:relation_account_framedip', {NAME => 'radius', VERSIONS => 1,TTL=> '2592000'}, {NAME => 'common', VERSIONS => 1,TTL=> '2592000'}
+create 'tsg_galaxy:relation_framedip_account', {NAME => 'radius', VERSIONS => 1,TTL=> '2592000'}, {NAME => 'common', VERSIONS => 1,TTL=> '2592000'}
+
+create 'tsg_galaxy:recommendation_app_cip', {NAME => 'common', VERSIONS => 1}
+
+create 'tsg_galaxy:relation_user_teid',{NAME=>'gtp',TTL=> '604800'}, {NAME => 'common',TTL=> '604800'}
+create 'tsg_galaxy:gtpc_knowledge_base',{NAME => 'gtp',TTL=> '604800'}, {NAME => 'common',TTL=> '604800'},SPLITS => ['1','2','3']
+
+EOF
diff --git a/PCAP-PIC/hbase/bin/create-phoenix-table.sh b/PCAP-PIC/hbase/bin/create-phoenix-table.sh
new file mode 100644
index 0000000..57b8ea3
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/create-phoenix-table.sh
@@ -0,0 +1,394 @@
+#!/bin/bash
+
+source /etc/profile
+
+phoenix_path=/home/tsg/olap/phoenix-hbase-2.2-5.1.2-bin/bin
+
+$phoenix_path/sqlline.py<<EOF
+
+CREATE schema IF NOT EXISTS "tsg_galaxy";
+CREATE schema IF NOT EXISTS "tsg";
+
+CREATE view "tsg"."report_result"( ROWKEY VARCHAR PRIMARY KEY, "detail"."excute_sql" VARCHAR, "detail"."read_rows" UNSIGNED_LONG, "detail"."result_id" UNSIGNED_INT, "response"."result" VARCHAR);
+
+CREATE view IF NOT EXISTS "tsg_galaxy"."relation_account_framedip"(
+ROWKEY VARCHAR PRIMARY KEY,
+"common"."vsys_id" UNSIGNED_INT,
+"radius"."account" VARCHAR,
+"radius"."framed_ip" VARCHAR,
+"radius"."first_found_time" UNSIGNED_LONG,
+"radius"."last_update_time" UNSIGNED_LONG,
+"radius"."acct_status_type" UNSIGNED_INT);
+
+CREATE view "tsg_galaxy"."recommendation_app_cip"(
+ROWKEY VARCHAR PRIMARY KEY,
+"common"."app_label" VARCHAR,
+"common"."client_ip_list" VARCHAR,
+"common"."last_update_time" UNSIGNED_LONG);
+
+CREATE view IF NOT EXISTS "tsg_galaxy"."gtpc_knowledge_base"(
+ROWKEY VARCHAR PRIMARY KEY,
+"common"."vsys_id" UNSIGNED_INT,
+"gtp"."teid" UNSIGNED_LONG,
+"gtp"."uplink_teid" UNSIGNED_LONG,
+"gtp"."downlink_teid" UNSIGNED_LONG,
+"gtp"."apn" VARCHAR,
+"gtp"."phone_number" VARCHAR,
+"gtp"."imsi" VARCHAR,
+"gtp"."imei" VARCHAR,
+"gtp"."msg_type" UNSIGNED_INT,
+"gtp"."last_update_time" UNSIGNED_LONG);
+
+CREATE table IF NOT EXISTS "tsg_galaxy"."job_result"(
+ROWKEY VARCHAR PRIMARY KEY,
+"detail"."is_failed" BOOLEAN,
+"detail"."is_canceled" BOOLEAN,
+"detail"."is_done" BOOLEAN,
+"detail"."done_progress" UNSIGNED_FLOAT,
+"detail"."last_query_time" UNSIGNED_LONG,
+"detail"."duration_time" UNSIGNED_LONG,
+"detail"."count" UNSIGNED_LONG,
+"detail"."job_property" VARCHAR,
+"statistics"."result" VARCHAR,
+"field_discovery"."common_recv_time" VARCHAR,
+"field_discovery"."common_log_id" VARCHAR,
+"field_discovery"."common_policy_id" VARCHAR,
+"field_discovery"."common_subscriber_id" VARCHAR,
+"field_discovery"."common_imei" VARCHAR,
+"field_discovery"."common_imsi" VARCHAR,
+"field_discovery"."common_phone_number" VARCHAR,
+"field_discovery"."common_client_ip" VARCHAR,
+"field_discovery"."common_internal_ip" VARCHAR,
+"field_discovery"."common_client_port" VARCHAR,
+"field_discovery"."common_l4_protocol" VARCHAR,
+"field_discovery"."common_address_type" VARCHAR,
+"field_discovery"."common_server_ip" VARCHAR,
+"field_discovery"."common_server_port" VARCHAR,
+"field_discovery"."common_external_ip" VARCHAR,
+"field_discovery"."common_action" VARCHAR,
+"field_discovery"."common_direction" VARCHAR,
+"field_discovery"."common_entrance_id" VARCHAR,
+"field_discovery"."common_sled_ip" VARCHAR,
+"field_discovery"."common_client_location" VARCHAR,
+"field_discovery"."common_client_asn" VARCHAR,
+"field_discovery"."common_server_location" VARCHAR,
+"field_discovery"."common_server_asn" VARCHAR,
+"field_discovery"."common_server_fqdn" VARCHAR,
+"field_discovery"."common_server_domain" VARCHAR,
+"field_discovery"."common_sessions" VARCHAR,
+"field_discovery"."common_c2s_pkt_num" VARCHAR,
+"field_discovery"."common_s2c_pkt_num" VARCHAR,
+"field_discovery"."common_c2s_byte_num" VARCHAR,
+"field_discovery"."common_s2c_byte_num" VARCHAR,
+"field_discovery"."common_c2s_pkt_diff" VARCHAR,
+"field_discovery"."common_s2c_pkt_diff" VARCHAR,
+"field_discovery"."common_c2s_byte_diff" VARCHAR,
+"field_discovery"."common_s2c_byte_diff" VARCHAR,
+"field_discovery"."common_service" VARCHAR,
+"field_discovery"."common_schema_type" VARCHAR,
+"field_discovery"."common_vsys_id" VARCHAR,
+"field_discovery"."common_t_vsys_id" VARCHAR,
+"field_discovery"."common_flags" VARCHAR,
+"field_discovery"."common_flags_identify_info" VARCHAR,
+"field_discovery"."common_user_tags" VARCHAR,
+"field_discovery"."common_sub_action" VARCHAR,
+"field_discovery"."common_user_region" VARCHAR,
+"field_discovery"."common_shaping_rule_ids" VARCHAR,
+"field_discovery"."common_device_id" VARCHAR,
+"field_discovery"."common_egress_link_id" VARCHAR,
+"field_discovery"."common_ingress_link_id" VARCHAR,
+"field_discovery"."common_isp" VARCHAR,
+"field_discovery"."common_device_tag" VARCHAR,
+"field_discovery"."common_data_center" VARCHAR,
+"field_discovery"."common_device_group" VARCHAR,
+"field_discovery"."common_app_behavior" VARCHAR,
+"field_discovery"."common_encapsulation" VARCHAR,
+"field_discovery"."common_app_label" VARCHAR,
+"field_discovery"."common_tunnels" VARCHAR,
+"field_discovery"."common_protocol_label" VARCHAR,
+"field_discovery"."common_app_id" VARCHAR,
+"field_discovery"."common_app_full_path" VARCHAR,
+"field_discovery"."common_userdefine_app_name" VARCHAR,
+"field_discovery"."common_app_identify_info" VARCHAR,
+"field_discovery"."common_app_surrogate_id" VARCHAR,
+"field_discovery"."common_l7_protocol" VARCHAR,
+"field_discovery"."common_service_category" VARCHAR,
+"field_discovery"."common_start_time" VARCHAR,
+"field_discovery"."common_end_time" VARCHAR,
+"field_discovery"."common_establish_latency_ms" VARCHAR,
+"field_discovery"."common_con_duration_ms" VARCHAR,
+"field_discovery"."common_stream_dir" VARCHAR,
+"field_discovery"."common_address_list" VARCHAR,
+"field_discovery"."common_has_dup_traffic" VARCHAR,
+"field_discovery"."common_stream_error" VARCHAR,
+"field_discovery"."common_stream_trace_id" VARCHAR,
+"field_discovery"."common_link_info_c2s" VARCHAR,
+"field_discovery"."common_link_info_s2c" VARCHAR,
+"field_discovery"."common_packet_capture_file" VARCHAR,
+"field_discovery"."common_tunnel_endpoint_a_desc" VARCHAR,
+"field_discovery"."common_tunnel_endpoint_b_desc" VARCHAR,
+"field_discovery"."common_c2s_ipfrag_num" VARCHAR,
+"field_discovery"."common_s2c_ipfrag_num" VARCHAR,
+"field_discovery"."common_c2s_tcp_lostlen" VARCHAR,
+"field_discovery"."common_s2c_tcp_lostlen" VARCHAR,
+"field_discovery"."common_c2s_tcp_unorder_num" VARCHAR,
+"field_discovery"."common_s2c_tcp_unorder_num" VARCHAR,
+"field_discovery"."common_c2s_pkt_retrans" VARCHAR,
+"field_discovery"."common_s2c_pkt_retrans" VARCHAR,
+"field_discovery"."common_c2s_byte_retrans" VARCHAR,
+"field_discovery"."common_s2c_byte_retrans" VARCHAR,
+"field_discovery"."common_tcp_client_isn" VARCHAR,
+"field_discovery"."common_tcp_server_isn" VARCHAR,
+"field_discovery"."common_first_ttl" VARCHAR,
+"field_discovery"."common_processing_time" VARCHAR,
+"field_discovery"."common_ingestion_time" VARCHAR,
+"field_discovery"."common_mirrored_pkts" VARCHAR,
+"field_discovery"."common_mirrored_bytes" VARCHAR,
+"field_discovery"."http_url" VARCHAR,
+"field_discovery"."http_host" VARCHAR,
+"field_discovery"."http_domain" VARCHAR,
+"field_discovery"."http_request_line" VARCHAR,
+"field_discovery"."http_response_line" VARCHAR,
+"field_discovery"."http_request_header" VARCHAR,
+"field_discovery"."http_response_header" VARCHAR,
+"field_discovery"."http_request_content" VARCHAR,
+"field_discovery"."http_request_content_length" VARCHAR,
+"field_discovery"."http_request_content_type" VARCHAR,
+"field_discovery"."http_response_content" VARCHAR,
+"field_discovery"."http_response_content_length" VARCHAR,
+"field_discovery"."http_response_content_type" VARCHAR,
+"field_discovery"."http_request_body" VARCHAR,
+"field_discovery"."http_response_body" VARCHAR,
+"field_discovery"."http_request_body_key" VARCHAR,
+"field_discovery"."http_response_body_key" VARCHAR,
+"field_discovery"."http_proxy_flag" VARCHAR,
+"field_discovery"."http_sequence" VARCHAR,
+"field_discovery"."http_snapshot" VARCHAR,
+"field_discovery"."http_cookie" VARCHAR,
+"field_discovery"."http_referer" VARCHAR,
+"field_discovery"."http_user_agent" VARCHAR,
+"field_discovery"."http_content_length" VARCHAR,
+"field_discovery"."http_content_type" VARCHAR,
+"field_discovery"."http_set_cookie" VARCHAR,
+"field_discovery"."http_version" VARCHAR,
+"field_discovery"."http_response_latency_ms" VARCHAR,
+"field_discovery"."http_action_file_size" VARCHAR,
+"field_discovery"."http_session_duration_ms" VARCHAR,
+"field_discovery"."mail_protocol_type" VARCHAR,
+"field_discovery"."mail_account" VARCHAR,
+"field_discovery"."mail_from_cmd" VARCHAR,
+"field_discovery"."mail_to_cmd" VARCHAR,
+"field_discovery"."mail_from" VARCHAR,
+"field_discovery"."mail_to" VARCHAR,
+"field_discovery"."mail_cc" VARCHAR,
+"field_discovery"."mail_bcc" VARCHAR,
+"field_discovery"."mail_subject" VARCHAR,
+"field_discovery"."mail_subject_charset" VARCHAR,
+"field_discovery"."mail_content" VARCHAR,
+"field_discovery"."mail_content_charset" VARCHAR,
+"field_discovery"."mail_attachment_name" VARCHAR,
+"field_discovery"."mail_attachment_name_charset" VARCHAR,
+"field_discovery"."mail_attachment_content" VARCHAR,
+"field_discovery"."mail_eml_file" VARCHAR,
+"field_discovery"."mail_snapshot" VARCHAR,
+"field_discovery"."dns_message_id" VARCHAR,
+"field_discovery"."dns_qr" VARCHAR,
+"field_discovery"."dns_opcode" VARCHAR,
+"field_discovery"."dns_aa" VARCHAR,
+"field_discovery"."dns_tc" VARCHAR,
+"field_discovery"."dns_rd" VARCHAR,
+"field_discovery"."dns_ra" VARCHAR,
+"field_discovery"."dns_rcode" VARCHAR,
+"field_discovery"."dns_qdcount" VARCHAR,
+"field_discovery"."dns_ancount" VARCHAR,
+"field_discovery"."dns_nscount" VARCHAR,
+"field_discovery"."dns_arcount" VARCHAR,
+"field_discovery"."dns_qname" VARCHAR,
+"field_discovery"."dns_qtype" VARCHAR,
+"field_discovery"."dns_qclass" VARCHAR,
+"field_discovery"."dns_cname" VARCHAR,
+"field_discovery"."dns_sub" VARCHAR,
+"field_discovery"."dns_rr" VARCHAR,
+"field_discovery"."dns_response_latency_ms" VARCHAR,
+"field_discovery"."ssl_version" VARCHAR,
+"field_discovery"."ssl_sni" VARCHAR,
+"field_discovery"."ssl_san" VARCHAR,
+"field_discovery"."ssl_cn" VARCHAR,
+"field_discovery"."ssl_pinningst" VARCHAR,
+"field_discovery"."ssl_intercept_state" VARCHAR,
+"field_discovery"."ssl_passthrough_reason" VARCHAR,
+"field_discovery"."ssl_server_side_latency" VARCHAR,
+"field_discovery"."ssl_client_side_latency" VARCHAR,
+"field_discovery"."ssl_server_side_version" VARCHAR,
+"field_discovery"."ssl_client_side_version" VARCHAR,
+"field_discovery"."ssl_cert_verify" VARCHAR,
+"field_discovery"."ssl_error" VARCHAR,
+"field_discovery"."ssl_con_latency_ms" VARCHAR,
+"field_discovery"."ssl_ja3_fingerprint" VARCHAR,
+"field_discovery"."ssl_ja3_hash" VARCHAR,
+"field_discovery"."ssl_ja3s_fingerprint" VARCHAR,
+"field_discovery"."ssl_ja3s_hash" VARCHAR,
+"field_discovery"."ssl_cert_issuer" VARCHAR,
+"field_discovery"."ssl_cert_subject" VARCHAR,
+"field_discovery"."dtls_cookie" VARCHAR,
+"field_discovery"."dtls_version" VARCHAR,
+"field_discovery"."dtls_sni" VARCHAR,
+"field_discovery"."dtls_san" VARCHAR,
+"field_discovery"."dtls_cn" VARCHAR,
+"field_discovery"."dtls_con_latency_ms" VARCHAR,
+"field_discovery"."dtls_ja3_fingerprint" VARCHAR,
+"field_discovery"."dtls_ja3_hash" VARCHAR,
+"field_discovery"."dtls_cert_issuer" VARCHAR,
+"field_discovery"."dtls_cert_subject" VARCHAR,
+"field_discovery"."quic_version" VARCHAR,
+"field_discovery"."quic_sni" VARCHAR,
+"field_discovery"."quic_user_agent" VARCHAR,
+"field_discovery"."ftp_account" VARCHAR,
+"field_discovery"."ftp_url" VARCHAR,
+"field_discovery"."ftp_content" VARCHAR,
+"field_discovery"."ftp_link_type" VARCHAR,
+"field_discovery"."bgp_type" VARCHAR,
+"field_discovery"."bgp_as_num" VARCHAR,
+"field_discovery"."bgp_route" VARCHAR,
+"field_discovery"."voip_calling_account" VARCHAR,
+"field_discovery"."voip_called_account" VARCHAR,
+"field_discovery"."voip_calling_number" VARCHAR,
+"field_discovery"."voip_called_number" VARCHAR,
+"field_discovery"."streaming_media_url" VARCHAR,
+"field_discovery"."streaming_media_protocol" VARCHAR,
+"field_discovery"."app_extra_info" VARCHAR,
+"field_discovery"."sip_call_id" VARCHAR,
+"field_discovery"."sip_originator_description" VARCHAR,
+"field_discovery"."sip_responder_description" VARCHAR,
+"field_discovery"."sip_user_agent" VARCHAR,
+"field_discovery"."sip_server" VARCHAR,
+"field_discovery"."sip_originator_sdp_connect_ip" VARCHAR,
+"field_discovery"."sip_originator_sdp_media_port" VARCHAR,
+"field_discovery"."sip_originator_sdp_media_type" VARCHAR,
+"field_discovery"."sip_originator_sdp_content" VARCHAR,
+"field_discovery"."sip_responder_sdp_connect_ip" VARCHAR,
+"field_discovery"."sip_responder_sdp_media_port" VARCHAR,
+"field_discovery"."sip_responder_sdp_media_type" VARCHAR,
+"field_discovery"."sip_responder_sdp_content" VARCHAR,
+"field_discovery"."sip_duration_s" VARCHAR,
+"field_discovery"."sip_bye" VARCHAR,
+"field_discovery"."rtp_payload_type_c2s" VARCHAR,
+"field_discovery"."rtp_payload_type_s2c" VARCHAR,
+"field_discovery"."rtp_pcap_path" VARCHAR,
+"field_discovery"."rtp_originator_dir" VARCHAR,
+"field_discovery"."ssh_version" VARCHAR,
+"field_discovery"."ssh_auth_success" VARCHAR,
+"field_discovery"."ssh_client_version" VARCHAR,
+"field_discovery"."ssh_server_version" VARCHAR,
+"field_discovery"."ssh_cipher_alg" VARCHAR,
+"field_discovery"."ssh_mac_alg" VARCHAR,
+"field_discovery"."ssh_compression_alg" VARCHAR,
+"field_discovery"."ssh_kex_alg" VARCHAR,
+"field_discovery"."ssh_host_key_alg" VARCHAR,
+"field_discovery"."ssh_host_key" VARCHAR,
+"field_discovery"."ssh_hassh" VARCHAR,
+"field_discovery"."stratum_cryptocurrency" VARCHAR,
+"field_discovery"."stratum_mining_pools" VARCHAR,
+"field_discovery"."stratum_mining_program" VARCHAR,
+"field_discovery"."rdp_cookie" VARCHAR,
+"field_discovery"."rdp_security_protocol" VARCHAR,
+"field_discovery"."rdp_client_channels" VARCHAR,
+"field_discovery"."rdp_keyboard_layout" VARCHAR,
+"field_discovery"."rdp_client_version" VARCHAR,
+"field_discovery"."rdp_client_name" VARCHAR,
+"field_discovery"."rdp_client_product_id" VARCHAR,
+"field_discovery"."rdp_desktop_width" VARCHAR,
+"field_discovery"."rdp_desktop_height" VARCHAR,
+"field_discovery"."rdp_requested_color_depth" VARCHAR,
+"field_discovery"."rdp_certificate_type" VARCHAR,
+"field_discovery"."rdp_certificate_count" VARCHAR,
+"field_discovery"."rdp_certificate_permanent" VARCHAR,
+"field_discovery"."rdp_encryption_level" VARCHAR,
+"field_discovery"."rdp_encryption_method" VARCHAR,
+"field_discovery"."doh_url" VARCHAR,
+"field_discovery"."doh_host" VARCHAR,
+"field_discovery"."doh_request_line" VARCHAR,
+"field_discovery"."doh_response_line" VARCHAR,
+"field_discovery"."doh_cookie" VARCHAR,
+"field_discovery"."doh_referer" VARCHAR,
+"field_discovery"."doh_user_agent" VARCHAR,
+"field_discovery"."doh_content_length" VARCHAR,
+"field_discovery"."doh_content_type" VARCHAR,
+"field_discovery"."doh_set_cookie" VARCHAR,
+"field_discovery"."doh_version" VARCHAR,
+"field_discovery"."doh_message_id" VARCHAR,
+"field_discovery"."doh_qr" VARCHAR,
+"field_discovery"."doh_opcode" VARCHAR,
+"field_discovery"."doh_aa" VARCHAR,
+"field_discovery"."doh_tc" VARCHAR,
+"field_discovery"."doh_rd" VARCHAR,
+"field_discovery"."doh_ra" VARCHAR,
+"field_discovery"."doh_rcode" VARCHAR,
+"field_discovery"."doh_qdcount" VARCHAR,
+"field_discovery"."doh_ancount" VARCHAR,
+"field_discovery"."doh_nscount" VARCHAR,
+"field_discovery"."doh_arcount" VARCHAR,
+"field_discovery"."doh_qname" VARCHAR,
+"field_discovery"."doh_qtype" VARCHAR,
+"field_discovery"."doh_qclass" VARCHAR,
+"field_discovery"."doh_cname" VARCHAR,
+"field_discovery"."doh_sub" VARCHAR,
+"field_discovery"."doh_rr" VARCHAR,
+"field_discovery"."radius_packet_type" VARCHAR,
+"field_discovery"."radius_account" VARCHAR,
+"field_discovery"."radius_nas_ip" VARCHAR,
+"field_discovery"."radius_framed_ip" VARCHAR,
+"field_discovery"."radius_session_timeout" VARCHAR,
+"field_discovery"."radius_idle_timeout" VARCHAR,
+"field_discovery"."radius_acct_status_type" VARCHAR,
+"field_discovery"."radius_acct_terminate_cause" VARCHAR,
+"field_discovery"."radius_event_timestamp" VARCHAR,
+"field_discovery"."radius_service_type" VARCHAR,
+"field_discovery"."radius_nas_port" VARCHAR,
+"field_discovery"."radius_framed_protocol" VARCHAR,
+"field_discovery"."radius_callback_number" VARCHAR,
+"field_discovery"."radius_callback_id" VARCHAR,
+"field_discovery"."radius_termination_action" VARCHAR,
+"field_discovery"."radius_called_station_id" VARCHAR,
+"field_discovery"."radius_calling_station_id" VARCHAR,
+"field_discovery"."radius_acct_delay_time" VARCHAR,
+"field_discovery"."radius_acct_session_id" VARCHAR,
+"field_discovery"."radius_acct_multi_session_id" VARCHAR,
+"field_discovery"."radius_acct_input_octets" VARCHAR,
+"field_discovery"."radius_acct_output_octets" VARCHAR,
+"field_discovery"."radius_acct_input_packets" VARCHAR,
+"field_discovery"."radius_acct_output_packets" VARCHAR,
+"field_discovery"."radius_acct_session_time" VARCHAR,
+"field_discovery"."radius_acct_link_count" VARCHAR,
+"field_discovery"."radius_acct_interim_interval" VARCHAR,
+"field_discovery"."radius_acct_authentic" VARCHAR,
+"field_discovery"."gtp_version" VARCHAR,
+"field_discovery"."gtp_apn" VARCHAR,
+"field_discovery"."gtp_imei" VARCHAR,
+"field_discovery"."gtp_imsi" VARCHAR,
+"field_discovery"."gtp_phone_number" VARCHAR,
+"field_discovery"."gtp_uplink_teid" VARCHAR,
+"field_discovery"."gtp_downlink_teid" VARCHAR,
+"field_discovery"."gtp_msg_type" VARCHAR,
+"field_discovery"."gtp_end_user_ipv4" VARCHAR,
+"field_discovery"."gtp_end_user_ipv6" VARCHAR,
+"field_discovery"."start_time" VARCHAR,
+"field_discovery"."end_time" VARCHAR,
+"field_discovery"."log_id" VARCHAR,
+"field_discovery"."profile_id" VARCHAR,
+"field_discovery"."vsys_id" VARCHAR,
+"field_discovery"."attack_type" VARCHAR,
+"field_discovery"."severity" VARCHAR,
+"field_discovery"."conditions" VARCHAR,
+"field_discovery"."destination_ip" VARCHAR,
+"field_discovery"."destination_country" VARCHAR,
+"field_discovery"."source_ip_list" VARCHAR,
+"field_discovery"."source_country_list" VARCHAR,
+"field_discovery"."session_rate" VARCHAR,
+"field_discovery"."packet_rate" VARCHAR,
+"field_discovery"."bit_rate" VARCHAR);
+
+!quit
+
+EOF
+
diff --git a/PCAP-PIC/hbase/bin/dae-hmaster.sh b/PCAP-PIC/hbase/bin/dae-hmaster.sh
new file mode 100644
index 0000000..41b6343
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/dae-hmaster.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR=/home/tsg/olap
+
+VERSION=hbase-2.2.3
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/masterRes_sum
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/masterRes_sum`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/masterRes_sum
+
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - HBase HMaster服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - HBase HMaster服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_MASTER=`jps | grep -w HMaster | grep -v grep |wc -l`
+if [ "$HAS_MASTER" -lt "1" ];then
+ $BASE_DIR/$VERSION/bin/hbase-daemon.sh start master
+ set_log
+fi
+
+sleep 60
+done
diff --git a/PCAP-PIC/hbase/bin/dae-hregion.sh b/PCAP-PIC/hbase/bin/dae-hregion.sh
new file mode 100644
index 0000000..e0dedb5
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/dae-hregion.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR=/home/tsg/olap
+
+VERSION=hbase-2.2.3
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/regionRes_sum
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/regionRes_sum`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/regionRes_sum
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - HBase HRegionServer服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - HBase HRegionServer服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_MASTER=`jps | grep -w HRegionServer | grep -v grep |wc -l`
+if [ "$HAS_MASTER" -lt "1" ];then
+ $BASE_DIR/$VERSION/bin/hbase-daemon.sh start regionserver
+ set_log
+fi
+
+sleep 60
+done
diff --git a/PCAP-PIC/hbase/bin/draining_servers.rb b/PCAP-PIC/hbase/bin/draining_servers.rb
new file mode 100644
index 0000000..a8e20f0
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/draining_servers.rb
@@ -0,0 +1,156 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Add or remove servers from draining mode via zookeeper
+# Deprecated in 2.0, and will be removed in 3.0. Use Admin decommission
+# API instead.
+
+require 'optparse'
+include Java
+
+java_import org.apache.hadoop.hbase.HBaseConfiguration
+java_import org.apache.hadoop.hbase.client.ConnectionFactory
+java_import org.apache.hadoop.hbase.client.HBaseAdmin
+java_import org.apache.hadoop.hbase.zookeeper.ZKUtil
+java_import org.apache.hadoop.hbase.zookeeper.ZNodePaths
+java_import org.slf4j.LoggerFactory
+
+# Name of this script
+NAME = 'draining_servers'.freeze
+
+# Do command-line parsing
+options = {}
+optparse = OptionParser.new do |opts|
+ opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] add|remove|list <hostname>|<host:port>|<servername> ..."
+ opts.separator 'Add remove or list servers in draining mode. Can accept either hostname to drain all region servers' \
+ 'in that host, a host:port pair or a host,port,startCode triplet. More than one server can be given separated by space'
+ opts.on('-h', '--help', 'Display usage information') do
+ puts opts
+ exit
+ end
+end
+optparse.parse!
+
+# Return array of servernames where servername is hostname+port+startcode
+# comma-delimited
+def getServers(admin)
+ serverInfos = admin.getClusterStatus.getServers
+ servers = []
+ for server in serverInfos
+ servers << server.getServerName
+ end
+ servers
+end
+
+def getServerNames(hostOrServers, config)
+ ret = []
+ connection = ConnectionFactory.createConnection(config)
+
+ for hostOrServer in hostOrServers
+ # check whether it is already serverName. No need to connect to cluster
+ parts = hostOrServer.split(',')
+ if parts.size == 3
+ ret << hostOrServer
+ else
+ admin = connection.getAdmin unless admin
+ servers = getServers(admin)
+
+ hostOrServer = hostOrServer.tr(':', ',')
+ for server in servers
+ ret << server if server.start_with?(hostOrServer)
+ end
+ end
+ end
+
+ admin.close if admin
+ connection.close
+ ret
+end
+
+def addServers(_options, hostOrServers)
+ config = HBaseConfiguration.create
+ servers = getServerNames(hostOrServers, config)
+
+ zkw = org.apache.hadoop.hbase.zookeeper.ZKWatcher.new(config, 'draining_servers', nil)
+
+ begin
+ parentZnode = zkw.getZNodePaths.drainingZNode
+ for server in servers
+ node = ZNodePaths.joinZNode(parentZnode, server)
+ ZKUtil.createAndFailSilent(zkw, node)
+ end
+ ensure
+ zkw.close
+ end
+end
+
+def removeServers(_options, hostOrServers)
+ config = HBaseConfiguration.create
+ servers = getServerNames(hostOrServers, config)
+
+ zkw = org.apache.hadoop.hbase.zookeeper.ZKWatcher.new(config, 'draining_servers', nil)
+
+ begin
+ parentZnode = zkw.getZNodePaths.drainingZNode
+ for server in servers
+ node = ZNodePaths.joinZNode(parentZnode, server)
+ ZKUtil.deleteNodeFailSilent(zkw, node)
+ end
+ ensure
+ zkw.close
+ end
+end
+
+# list servers in draining mode
+def listServers(_options)
+ config = HBaseConfiguration.create
+
+ zkw = org.apache.hadoop.hbase.zookeeper.ZKWatcher.new(config, 'draining_servers', nil)
+
+ begin
+ parentZnode = zkw.getZNodePaths.drainingZNode
+ servers = ZKUtil.listChildrenNoWatch(zkw, parentZnode)
+ servers.each { |server| puts server }
+ ensure
+ zkw.close
+ end
+end
+
+hostOrServers = ARGV[1..ARGV.size]
+
+# Create a logger and save it to ruby global
+$LOG = LoggerFactory.getLogger(NAME)
+case ARGV[0]
+when 'add'
+ if ARGV.length < 2
+ puts optparse
+ exit 1
+ end
+ addServers(options, hostOrServers)
+when 'remove'
+ if ARGV.length < 2
+ puts optparse
+ exit 1
+ end
+ removeServers(options, hostOrServers)
+when 'list'
+ listServers(options)
+else
+ puts optparse
+ exit 3
+end
diff --git a/PCAP-PIC/hbase/bin/get-active-master.rb b/PCAP-PIC/hbase/bin/get-active-master.rb
new file mode 100644
index 0000000..d8c96fe
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/get-active-master.rb
@@ -0,0 +1,38 @@
+#!/usr/bin/env hbase-jruby
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with this
+# work for additional information regarding copyright ownership. The ASF
+# licenses this file to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Prints the hostname of the machine running the active master.
+
+include Java
+java_import org.apache.hadoop.hbase.HBaseConfiguration
+java_import org.apache.hadoop.hbase.ServerName
+java_import org.apache.hadoop.hbase.zookeeper.ZKWatcher
+java_import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker
+
+# disable debug/info logging on this script for clarity
+log_level = org.apache.log4j.Level::ERROR
+org.apache.log4j.Logger.getLogger('org.apache.hadoop.hbase').setLevel(log_level)
+org.apache.log4j.Logger.getLogger('org.apache.zookeeper').setLevel(log_level)
+
+config = HBaseConfiguration.create
+
+zk = ZKWatcher.new(config, 'get-active-master', nil)
+begin
+ puts MasterAddressTracker.getMasterAddress(zk).getHostname
+ensure
+ zk.close
+end
diff --git a/PCAP-PIC/hbase/bin/graceful_stop.sh b/PCAP-PIC/hbase/bin/graceful_stop.sh
new file mode 100644
index 0000000..89e3dd9
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/graceful_stop.sh
@@ -0,0 +1,186 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Move regions off a server then stop it. Optionally restart and reload.
+# Turn off the balancer before running this script.
+function usage {
+ echo "Usage: graceful_stop.sh [--config <conf-dir>] [-e] [--restart [--reload]] [--thrift] \
+[--rest] [-nob |--nobalancer ] <hostname>"
+ echo " thrift If we should stop/start thrift before/after the hbase stop/start"
+ echo " rest If we should stop/start rest before/after the hbase stop/start"
+ echo " restart If we should restart after graceful stop"
+ echo " reload Move offloaded regions back on to the restarted server"
+ echo " n|noack Enable noAck mode in RegionMover. This is a best effort mode for \
+moving regions"
+ echo " maxthreads xx Limit the number of threads used by the region mover. Default value is 1."
+ echo " movetimeout xx Timeout for moving regions. If regions are not moved by the timeout value,\
+exit with error. Default value is INT_MAX."
+ echo " hostname Hostname of server we are to stop"
+ echo " e|failfast Set -e so exit immediately if any command exits with non-zero status"
+ echo " nob| nobalancer Do not manage balancer states. This is only used as optimization in \
+rolling_restart.sh to avoid multiple calls to hbase shell"
+ exit 1
+}
+
+if [ $# -lt 1 ]; then
+ usage
+fi
+
+bin=`dirname "$0"`
+bin=`cd "$bin">/dev/null; pwd`
+# This will set HBASE_HOME, etc.
+. "$bin"/hbase-config.sh
+# Get arguments
+restart=
+reload=
+noack=
+thrift=
+rest=
+movetimeout=2147483647
+maxthreads=1
+failfast=
+nob=false
+while [ $# -gt 0 ]
+do
+ case "$1" in
+ --thrift) thrift=true; shift;;
+ --rest) rest=true; shift;;
+ --restart) restart=true; shift;;
+ --reload) reload=true; shift;;
+ --failfast | -e) failfast=true; shift;;
+ --noack | -n) noack="--noack"; shift;;
+ --maxthreads) shift; maxthreads=$1; shift;;
+ --movetimeout) shift; movetimeout=$1; shift;;
+ --nobalancer | -nob) nob=true; shift;;
+ --) shift; break;;
+ -*) usage ;;
+ *) break;; # terminate while loop
+ esac
+done
+
+# "$@" contains the rest. Must be at least the hostname left.
+if [ $# -lt 1 ]; then
+ usage
+fi
+
+# Emit a log line w/ iso8901 date prefixed
+log() {
+ echo `date +%Y-%m-%dT%H:%M:%S` $1
+}
+
+# See if we should set fail fast before we do anything.
+if [ "$failfast" != "" ]; then
+ log "Set failfast, will exit immediately if any command exits with non-zero status"
+ set -e
+fi
+
+hostname=$1
+filename="/tmp/$hostname"
+
+local=
+localhostname=`/bin/hostname`
+
+if [ "$localhostname" == "$hostname" ]; then
+ local=true
+fi
+
+if [ "$nob" == "true" ]; then
+ log "[ $0 ] skipping disabling balancer -nob argument is used"
+ HBASE_BALANCER_STATE=false
+else
+ log "Disabling load balancer"
+ HBASE_BALANCER_STATE=$(echo 'balance_switch false' | "$bin"/hbase --config "${HBASE_CONF_DIR}" shell -n | tail -1)
+ log "Previous balancer state was $HBASE_BALANCER_STATE"
+fi
+
+log "Unloading $hostname region(s)"
+HBASE_NOEXEC=true "$bin"/hbase --config ${HBASE_CONF_DIR} org.apache.hadoop.hbase.util.RegionMover \
+--filename $filename --maxthreads $maxthreads $noack --operation "unload" --timeout $movetimeout \
+--regionserverhost $hostname
+log "Unloaded $hostname region(s)"
+
+# Stop the server(s). Have to put hostname into its own little file for hbase-daemons.sh
+hosts="/tmp/$(basename $0).$$.tmp"
+echo $hostname >> $hosts
+if [ "$thrift" != "" ]; then
+ log "Stopping thrift server on $hostname"
+ if [ "$local" == true ]; then
+ "$bin"/hbase-daemon.sh --config ${HBASE_CONF_DIR} stop thrift
+ else
+ "$bin"/hbase-daemons.sh --config ${HBASE_CONF_DIR} --hosts ${hosts} stop thrift
+ fi
+fi
+if [ "$rest" != "" ]; then
+ log "Stopping rest server on $hostname"
+ if [ "$local" == true ]; then
+ "$bin"/hbase-daemon.sh --config ${HBASE_CONF_DIR} stop rest
+ else
+ "$bin"/hbase-daemons.sh --config ${HBASE_CONF_DIR} --hosts ${hosts} stop rest
+ fi
+fi
+log "Stopping regionserver on $hostname"
+if [ "$local" == true ]; then
+ "$bin"/hbase-daemon.sh --config ${HBASE_CONF_DIR} stop regionserver
+else
+ "$bin"/hbase-daemons.sh --config ${HBASE_CONF_DIR} --hosts ${hosts} stop regionserver
+fi
+if [ "$restart" != "" ]; then
+ log "Restarting regionserver on $hostname"
+ if [ "$local" == true ]; then
+ "$bin"/hbase-daemon.sh --config ${HBASE_CONF_DIR} start regionserver
+ else
+ "$bin"/hbase-daemons.sh --config ${HBASE_CONF_DIR} --hosts ${hosts} start regionserver
+ fi
+ if [ "$thrift" != "" ]; then
+ log "Restarting thrift server on $hostname"
+ # -b 0.0.0.0 says listen on all interfaces rather than just default.
+ if [ "$local" == true ]; then
+ "$bin"/hbase-daemon.sh --config ${HBASE_CONF_DIR} start thrift -b 0.0.0.0
+ else
+ "$bin"/hbase-daemons.sh --config ${HBASE_CONF_DIR} --hosts ${hosts} start thrift -b 0.0.0.0
+ fi
+ fi
+ if [ "$rest" != "" ]; then
+ log "Restarting rest server on $hostname"
+ if [ "$local" == true ]; then
+ "$bin"/hbase-daemon.sh --config ${HBASE_CONF_DIR} start rest
+ else
+ "$bin"/hbase-daemons.sh --config ${HBASE_CONF_DIR} --hosts ${hosts} start rest
+ fi
+ fi
+ if [ "$reload" != "" ]; then
+ log "Reloading $hostname region(s)"
+ HBASE_NOEXEC=true "$bin"/hbase --config ${HBASE_CONF_DIR} \
+ org.apache.hadoop.hbase.util.RegionMover --filename $filename --maxthreads $maxthreads $noack \
+ --operation "load" --timeout $movetimeout --regionserverhost $hostname
+ log "Reloaded $hostname region(s)"
+ fi
+fi
+
+# Restore balancer state
+if [ "$HBASE_BALANCER_STATE" != "false" ] && [ "$nob" != "true" ]; then
+ log "Restoring balancer state to $HBASE_BALANCER_STATE"
+ echo "balance_switch $HBASE_BALANCER_STATE" | "$bin"/hbase --config ${HBASE_CONF_DIR} shell &> /dev/null
+else
+ log "[ $0 ] skipping restoring balancer"
+fi
+
+# Cleanup tmp files.
+trap "rm -f "/tmp/$(basename $0).*.tmp" &> /dev/null" EXIT
diff --git a/PCAP-PIC/hbase/bin/hbase b/PCAP-PIC/hbase/bin/hbase
new file mode 100644
index 0000000..cca5b60
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/hbase
@@ -0,0 +1,687 @@
+#! /usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# The hbase command script. Based on the hadoop command script putting
+# in hbase classes, libs and configurations ahead of hadoop's.
+#
+# TODO: Narrow the amount of duplicated code.
+#
+# Environment Variables:
+#
+# JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+#
+# HBASE_CLASSPATH Extra Java CLASSPATH entries.
+#
+# HBASE_CLASSPATH_PREFIX Extra Java CLASSPATH entries that should be
+# prefixed to the system classpath.
+#
+# HBASE_HEAPSIZE The maximum amount of heap to use.
+# Default is unset and uses the JVMs default setting
+# (usually 1/4th of the available memory).
+#
+# HBASE_LIBRARY_PATH HBase additions to JAVA_LIBRARY_PATH for adding
+# native libraries.
+#
+# HBASE_OPTS Extra Java runtime options.
+#
+# HBASE_CONF_DIR Alternate conf dir. Default is ${HBASE_HOME}/conf.
+#
+# HBASE_ROOT_LOGGER The root appender. Default is INFO,console
+#
+# JRUBY_HOME JRuby path: $JRUBY_HOME/lib/jruby.jar should exist.
+# Defaults to the jar packaged with HBase.
+#
+# JRUBY_OPTS Extra options (eg '--1.9') passed to hbase.
+# Empty by default.
+#
+# HBASE_SHELL_OPTS Extra options passed to the hbase shell.
+# Empty by default.
+#
+bin=`dirname "$0"`
+bin=`cd "$bin">/dev/null; pwd`
+
+# This will set HBASE_HOME, etc.
+. "$bin"/hbase-config.sh
+
+cygwin=false
+case "`uname`" in
+CYGWIN*) cygwin=true;;
+esac
+
+# Detect if we are in hbase sources dir
+in_dev_env=false
+if [ -d "${HBASE_HOME}/target" ]; then
+ in_dev_env=true
+fi
+
+# Detect if we are in the omnibus tarball
+in_omnibus_tarball="false"
+if [ -f "${HBASE_HOME}/bin/hbase-daemons.sh" ]; then
+ in_omnibus_tarball="true"
+fi
+
+read -d '' options_string << EOF
+Options:
+ --config DIR Configuration direction to use. Default: ./conf
+ --hosts HOSTS Override the list in 'regionservers' file
+ --auth-as-server Authenticate to ZooKeeper using servers configuration
+ --internal-classpath Skip attempting to use client facing jars (WARNING: unstable results between versions)
+EOF
+# if no args specified, show usage
+if [ $# = 0 ]; then
+ echo "Usage: hbase [<options>] <command> [<args>]"
+ echo "$options_string"
+ echo ""
+ echo "Commands:"
+ echo "Some commands take arguments. Pass no args or -h for usage."
+ echo " shell Run the HBase shell"
+ echo " hbck Run the HBase 'fsck' tool. Defaults read-only hbck1."
+ echo " Pass '-j /path/to/HBCK2.jar' to run hbase-2.x HBCK2."
+ echo " snapshot Tool for managing snapshots"
+ if [ "${in_omnibus_tarball}" = "true" ]; then
+ echo " wal Write-ahead-log analyzer"
+ echo " hfile Store file analyzer"
+ echo " zkcli Run the ZooKeeper shell"
+ echo " master Run an HBase HMaster node"
+ echo " regionserver Run an HBase HRegionServer node"
+ echo " zookeeper Run a ZooKeeper server"
+ echo " rest Run an HBase REST server"
+ echo " thrift Run the HBase Thrift server"
+ echo " thrift2 Run the HBase Thrift2 server"
+ echo " clean Run the HBase clean up script"
+ fi
+ echo " classpath Dump hbase CLASSPATH"
+ echo " mapredcp Dump CLASSPATH entries required by mapreduce"
+ echo " pe Run PerformanceEvaluation"
+ echo " ltt Run LoadTestTool"
+ echo " canary Run the Canary tool"
+ echo " version Print the version"
+ echo " completebulkload Run BulkLoadHFiles tool"
+ echo " regionsplitter Run RegionSplitter tool"
+ echo " rowcounter Run RowCounter tool"
+ echo " cellcounter Run CellCounter tool"
+ echo " pre-upgrade Run Pre-Upgrade validator tool"
+ echo " hbtop Run HBTop tool"
+ echo " CLASSNAME Run the class named CLASSNAME"
+ exit 1
+fi
+
+# get arguments
+COMMAND=$1
+shift
+
+JAVA=$JAVA_HOME/bin/java
+
+# override default settings for this command, if applicable
+if [ -f "$HBASE_HOME/conf/hbase-env-$COMMAND.sh" ]; then
+ . "$HBASE_HOME/conf/hbase-env-$COMMAND.sh"
+fi
+
+add_size_suffix() {
+ # add an 'm' suffix if the argument is missing one, otherwise use whats there
+ local val="$1"
+ local lastchar=${val: -1}
+ if [[ "mMgG" == *$lastchar* ]]; then
+ echo $val
+ else
+ echo ${val}m
+ fi
+}
+
+if [[ -n "$HBASE_HEAPSIZE" ]]; then
+ JAVA_HEAP_MAX="-Xmx$(add_size_suffix $HBASE_HEAPSIZE)"
+fi
+
+if [[ -n "$HBASE_OFFHEAPSIZE" ]]; then
+ JAVA_OFFHEAP_MAX="-XX:MaxDirectMemorySize=$(add_size_suffix $HBASE_OFFHEAPSIZE)"
+fi
+
+# so that filenames w/ spaces are handled correctly in loops below
+ORIG_IFS=$IFS
+IFS=
+
+# CLASSPATH initially contains $HBASE_CONF_DIR
+CLASSPATH="${HBASE_CONF_DIR}"
+CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
+
+add_to_cp_if_exists() {
+ if [ -d "$@" ]; then
+ CLASSPATH=${CLASSPATH}:"$@"
+ fi
+}
+
+# For releases, add hbase & webapps to CLASSPATH
+# Webapps must come first else it messes up Jetty
+if [ -d "$HBASE_HOME/hbase-webapps" ]; then
+ add_to_cp_if_exists "${HBASE_HOME}"
+fi
+#add if we are in a dev environment
+if [ -d "$HBASE_HOME/hbase-server/target/hbase-webapps" ]; then
+ if [ "$COMMAND" = "thrift" ] ; then
+ add_to_cp_if_exists "${HBASE_HOME}/hbase-thrift/target"
+ elif [ "$COMMAND" = "thrift2" ] ; then
+ add_to_cp_if_exists "${HBASE_HOME}/hbase-thrift/target"
+ elif [ "$COMMAND" = "rest" ] ; then
+ add_to_cp_if_exists "${HBASE_HOME}/hbase-rest/target"
+ else
+ add_to_cp_if_exists "${HBASE_HOME}/hbase-server/target"
+ # Needed for GetJavaProperty check below
+ add_to_cp_if_exists "${HBASE_HOME}/hbase-server/target/classes"
+ fi
+fi
+
+#If avail, add Hadoop to the CLASSPATH and to the JAVA_LIBRARY_PATH
+# Allow this functionality to be disabled
+if [ "$HBASE_DISABLE_HADOOP_CLASSPATH_LOOKUP" != "true" ] ; then
+ HADOOP_IN_PATH=$(PATH="${HADOOP_HOME:-${HADOOP_PREFIX}}/bin:$PATH" which hadoop 2>/dev/null)
+fi
+
+# Add libs to CLASSPATH
+declare shaded_jar
+
+if [ "${INTERNAL_CLASSPATH}" != "true" ]; then
+ # find our shaded jars
+ declare shaded_client
+ declare shaded_client_byo_hadoop
+ declare shaded_mapreduce
+ for f in "${HBASE_HOME}"/lib/shaded-clients/hbase-shaded-client*.jar; do
+ if [[ "${f}" =~ byo-hadoop ]]; then
+ shaded_client_byo_hadoop="${f}"
+ else
+ shaded_client="${f}"
+ fi
+ done
+ for f in "${HBASE_HOME}"/lib/shaded-clients/hbase-shaded-mapreduce*.jar; do
+ shaded_mapreduce="${f}"
+ done
+
+ # If command can use our shaded client, use it
+ declare -a commands_in_client_jar=("classpath" "version" "hbtop")
+ for c in "${commands_in_client_jar[@]}"; do
+ if [ "${COMMAND}" = "${c}" ]; then
+ if [ -n "${HADOOP_IN_PATH}" ] && [ -f "${HADOOP_IN_PATH}" ]; then
+ # If we didn't find a jar above, this will just be blank and the
+ # check below will then default back to the internal classpath.
+ shaded_jar="${shaded_client_byo_hadoop}"
+ else
+ # If we didn't find a jar above, this will just be blank and the
+ # check below will then default back to the internal classpath.
+ shaded_jar="${shaded_client}"
+ fi
+ break
+ fi
+ done
+
+ # If command needs our shaded mapreduce, use it
+ # N.B "mapredcp" is not included here because in the shaded case it skips our built classpath
+ declare -a commands_in_mr_jar=("hbck" "snapshot" "canary" "regionsplitter" "pre-upgrade")
+ for c in "${commands_in_mr_jar[@]}"; do
+ if [ "${COMMAND}" = "${c}" ]; then
+ # If we didn't find a jar above, this will just be blank and the
+ # check below will then default back to the internal classpath.
+ shaded_jar="${shaded_mapreduce}"
+ break
+ fi
+ done
+
+ # Some commands specifically only can use shaded mapreduce when we'll get a full hadoop classpath at runtime
+ if [ -n "${HADOOP_IN_PATH}" ] && [ -f "${HADOOP_IN_PATH}" ]; then
+ declare -a commands_in_mr_need_hadoop=("backup" "restore" "rowcounter" "cellcounter")
+ for c in "${commands_in_mr_need_hadoop[@]}"; do
+ if [ "${COMMAND}" = "${c}" ]; then
+ # If we didn't find a jar above, this will just be blank and the
+ # check below will then default back to the internal classpath.
+ shaded_jar="${shaded_mapreduce}"
+ break
+ fi
+ done
+ fi
+fi
+
+
+if [ -n "${shaded_jar}" ] && [ -f "${shaded_jar}" ]; then
+ CLASSPATH="${CLASSPATH}:${shaded_jar}"
+# fall through to grabbing all the lib jars and hope we're in the omnibus tarball
+#
+# N.B. shell specifically can't rely on the shaded artifacts because RSGroups is only
+# available as non-shaded
+#
+# N.B. pe and ltt can't easily rely on shaded artifacts because they live in hbase-mapreduce:test-jar
+# and need some other jars that haven't been relocated. Currently enumerating that list
+# is too hard to be worth it.
+#
+else
+ for f in $HBASE_HOME/lib/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+ done
+ # make it easier to check for shaded/not later on.
+ shaded_jar=""
+fi
+for f in "${HBASE_HOME}"/lib/client-facing-thirdparty/*.jar; do
+ if [[ ! "${f}" =~ ^.*/htrace-core-3.*\.jar$ ]] && \
+ [ "${f}" != "htrace-core.jar$" ] && \
+ [[ ! "${f}" =~ ^.*/slf4j-log4j.*$ ]]; then
+ CLASSPATH="${CLASSPATH}:${f}"
+ fi
+done
+
+# default log directory & file
+if [ "$HBASE_LOG_DIR" = "" ]; then
+ HBASE_LOG_DIR="$HBASE_HOME/logs"
+fi
+if [ "$HBASE_LOGFILE" = "" ]; then
+ HBASE_LOGFILE='hbase.log'
+fi
+
+function append_path() {
+ if [ -z "$1" ]; then
+ echo "$2"
+ else
+ echo "$1:$2"
+ fi
+}
+
+JAVA_PLATFORM=""
+
+# if HBASE_LIBRARY_PATH is defined lets use it as first or second option
+if [ "$HBASE_LIBRARY_PATH" != "" ]; then
+ JAVA_LIBRARY_PATH=$(append_path "$JAVA_LIBRARY_PATH" "$HBASE_LIBRARY_PATH")
+fi
+
+#If configured and available, add Hadoop to the CLASSPATH and to the JAVA_LIBRARY_PATH
+if [ -n "${HADOOP_IN_PATH}" ] && [ -f "${HADOOP_IN_PATH}" ]; then
+ # If built hbase, temporarily add hbase-server*.jar to classpath for GetJavaProperty
+ # Exclude hbase-server*-tests.jar
+ temporary_cp=
+ for f in "${HBASE_HOME}"/lib/hbase-server*.jar; do
+ if [[ ! "${f}" =~ ^.*\-tests\.jar$ ]]; then
+ temporary_cp=":$f"
+ fi
+ done
+ HADOOP_JAVA_LIBRARY_PATH=$(HADOOP_CLASSPATH="$CLASSPATH${temporary_cp}" "${HADOOP_IN_PATH}" \
+ org.apache.hadoop.hbase.util.GetJavaProperty java.library.path)
+ if [ -n "$HADOOP_JAVA_LIBRARY_PATH" ]; then
+ JAVA_LIBRARY_PATH=$(append_path "${JAVA_LIBRARY_PATH}" "$HADOOP_JAVA_LIBRARY_PATH")
+ fi
+ CLASSPATH=$(append_path "${CLASSPATH}" "$(${HADOOP_IN_PATH} classpath 2>/dev/null)")
+else
+ # Otherwise, if we're providing Hadoop we should include htrace 3 if we were built with a version that needs it.
+ for f in "${HBASE_HOME}"/lib/client-facing-thirdparty/htrace-core-3*.jar "${HBASE_HOME}"/lib/client-facing-thirdparty/htrace-core.jar; do
+ if [ -f "${f}" ]; then
+ CLASSPATH="${CLASSPATH}:${f}"
+ break
+ fi
+ done
+ # Some commands require special handling when using shaded jars. For these cases, we rely on hbase-shaded-mapreduce
+ # instead of hbase-shaded-client* because we make use of some IA.Private classes that aren't in the latter. However,
+ # we don't invoke them using the "hadoop jar" command so we need to ensure there are some Hadoop classes available
+ # when we're not doing runtime hadoop classpath lookup.
+ #
+ # luckily the set of classes we need are those packaged in the shaded-client.
+ for c in "${commands_in_mr_jar[@]}"; do
+ if [ "${COMMAND}" = "${c}" ] && [ -n "${shaded_jar}" ]; then
+ CLASSPATH="${CLASSPATH}:${shaded_client:?We couldn\'t find the shaded client jar even though we did find the shaded MR jar. for command ${COMMAND} we need both. please use --internal-classpath as a workaround.}"
+ break
+ fi
+ done
+fi
+
+# Add user-specified CLASSPATH last
+if [ "$HBASE_CLASSPATH" != "" ]; then
+ CLASSPATH=${CLASSPATH}:${HBASE_CLASSPATH}
+fi
+
+# Add user-specified CLASSPATH prefix first
+if [ "$HBASE_CLASSPATH_PREFIX" != "" ]; then
+ CLASSPATH=${HBASE_CLASSPATH_PREFIX}:${CLASSPATH}
+fi
+
+# cygwin path translation
+if $cygwin; then
+ CLASSPATH=`cygpath -p -w "$CLASSPATH"`
+ HBASE_HOME=`cygpath -d "$HBASE_HOME"`
+ HBASE_LOG_DIR=`cygpath -d "$HBASE_LOG_DIR"`
+fi
+
+if [ -d "${HBASE_HOME}/build/native" -o -d "${HBASE_HOME}/lib/native" ]; then
+ if [ -z $JAVA_PLATFORM ]; then
+ JAVA_PLATFORM=`CLASSPATH=${CLASSPATH} ${JAVA} org.apache.hadoop.util.PlatformName | sed -e "s/ /_/g"`
+ fi
+ if [ -d "$HBASE_HOME/build/native" ]; then
+ JAVA_LIBRARY_PATH=$(append_path "$JAVA_LIBRARY_PATH" "${HBASE_HOME}/build/native/${JAVA_PLATFORM}/lib")
+ fi
+
+ if [ -d "${HBASE_HOME}/lib/native" ]; then
+ JAVA_LIBRARY_PATH=$(append_path "$JAVA_LIBRARY_PATH" "${HBASE_HOME}/lib/native/${JAVA_PLATFORM}")
+ fi
+fi
+
+# cygwin path translation
+if $cygwin; then
+ JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"`
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+#Set the right GC options based on the what we are running
+declare -a server_cmds=("master" "regionserver" "thrift" "thrift2" "rest" "avro" "zookeeper")
+for cmd in ${server_cmds[@]}; do
+ if [[ $cmd == $COMMAND ]]; then
+ server=true
+ break
+ fi
+done
+
+if [[ $server ]]; then
+ HBASE_OPTS="$HBASE_OPTS $SERVER_GC_OPTS"
+else
+ HBASE_OPTS="$HBASE_OPTS $CLIENT_GC_OPTS"
+fi
+
+if [ "$AUTH_AS_SERVER" == "true" ] || [ "$COMMAND" = "hbck" ]; then
+ if [ -n "$HBASE_SERVER_JAAS_OPTS" ]; then
+ HBASE_OPTS="$HBASE_OPTS $HBASE_SERVER_JAAS_OPTS"
+ else
+ HBASE_OPTS="$HBASE_OPTS $HBASE_REGIONSERVER_OPTS"
+ fi
+fi
+
+# check if the command needs jline
+declare -a jline_cmds=("zkcli" "org.apache.hadoop.hbase.zookeeper.ZKMainServer")
+for cmd in "${jline_cmds[@]}"; do
+ if [[ $cmd == "$COMMAND" ]]; then
+ jline_needed=true
+ break
+ fi
+done
+
+# for jruby
+# (1) for the commands which need jruby (see jruby_cmds defined below)
+# A. when JRUBY_HOME is specified explicitly, eg. export JRUBY_HOME=/usr/local/share/jruby
+# CLASSPATH and HBASE_OPTS are updated according to JRUBY_HOME specified
+# B. when JRUBY_HOME is not specified explicitly
+# add jruby packaged with HBase to CLASSPATH
+# (2) for other commands, do nothing
+
+# check if the commmand needs jruby
+declare -a jruby_cmds=("shell" "org.jruby.Main")
+for cmd in "${jruby_cmds[@]}"; do
+ if [[ $cmd == "$COMMAND" ]]; then
+ jruby_needed=true
+ break
+ fi
+done
+
+add_maven_deps_to_classpath() {
+ f="${HBASE_HOME}/hbase-build-configuration/target/$1"
+
+ if [ ! -f "${f}" ]; then
+ echo "As this is a development environment, we need ${f} to be generated from maven (command: mvn install -DskipTests)"
+ exit 1
+ fi
+ CLASSPATH=${CLASSPATH}:$(cat "${f}")
+}
+
+#Add the development env class path stuff
+if $in_dev_env; then
+ add_maven_deps_to_classpath "cached_classpath.txt"
+
+ if [[ $jline_needed ]]; then
+ add_maven_deps_to_classpath "cached_classpath_jline.txt"
+ elif [[ $jruby_needed ]]; then
+ add_maven_deps_to_classpath "cached_classpath_jruby.txt"
+ fi
+fi
+
+# the command needs jruby
+if [[ $jruby_needed ]]; then
+ if [ "$JRUBY_HOME" != "" ]; then # JRUBY_HOME is specified explicitly, eg. export JRUBY_HOME=/usr/local/share/jruby
+ # add jruby.jar into CLASSPATH
+ CLASSPATH="$JRUBY_HOME/lib/jruby.jar:$CLASSPATH"
+
+ # add jruby to HBASE_OPTS
+ HBASE_OPTS="$HBASE_OPTS -Djruby.home=$JRUBY_HOME -Djruby.lib=$JRUBY_HOME/lib"
+
+ else # JRUBY_HOME is not specified explicitly
+ if ! $in_dev_env; then # not in dev environment
+ # add jruby packaged with HBase to CLASSPATH
+ JRUBY_PACKAGED_WITH_HBASE="$HBASE_HOME/lib/ruby/*.jar"
+ for jruby_jar in $JRUBY_PACKAGED_WITH_HBASE; do
+ CLASSPATH=$jruby_jar:$CLASSPATH;
+ done
+ fi
+ fi
+fi
+
+# figure out which class to run
+if [ "$COMMAND" = "shell" ] ; then
+ #find the hbase ruby sources
+ if [ -d "$HBASE_HOME/lib/ruby" ]; then
+ HBASE_OPTS="$HBASE_OPTS -Dhbase.ruby.sources=$HBASE_HOME/lib/ruby"
+ else
+ HBASE_OPTS="$HBASE_OPTS -Dhbase.ruby.sources=$HBASE_HOME/hbase-shell/src/main/ruby"
+ fi
+ HBASE_OPTS="$HBASE_OPTS $HBASE_SHELL_OPTS"
+ CLASS="org.jruby.Main -X+O ${JRUBY_OPTS} ${HBASE_HOME}/bin/hirb.rb"
+elif [ "$COMMAND" = "hbck" ] ; then
+ # Look for the -j /path/to/HBCK2.jar parameter. Else pass through to hbck.
+ case "${1}" in
+ -j)
+ # Found -j parameter. Add arg to CLASSPATH and set CLASS to HBCK2.
+ shift
+ JAR="${1}"
+ if [ ! -f "${JAR}" ]; then
+ echo "${JAR} file not found!"
+ echo "Usage: hbase [<options>] hbck -jar /path/to/HBCK2.jar [<args>]"
+ exit 1
+ fi
+ CLASSPATH="${JAR}:${CLASSPATH}";
+ CLASS="org.apache.hbase.HBCK2"
+ shift # past argument=value
+ ;;
+ *)
+ CLASS='org.apache.hadoop.hbase.util.HBaseFsck'
+ ;;
+ esac
+elif [ "$COMMAND" = "wal" ] ; then
+ CLASS='org.apache.hadoop.hbase.wal.WALPrettyPrinter'
+elif [ "$COMMAND" = "hfile" ] ; then
+ CLASS='org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter'
+elif [ "$COMMAND" = "zkcli" ] ; then
+ CLASS="org.apache.hadoop.hbase.zookeeper.ZKMainServer"
+ for f in $HBASE_HOME/lib/zkcli/*.jar; do
+ CLASSPATH="${CLASSPATH}:$f";
+ done
+elif [ "$COMMAND" = "upgrade" ] ; then
+ echo "This command was used to upgrade to HBase 0.96, it was removed in HBase 2.0.0."
+ echo "Please follow the documentation at http://hbase.apache.org/book.html#upgrading."
+ exit 1
+elif [ "$COMMAND" = "snapshot" ] ; then
+ SUBCOMMAND=$1
+ shift
+ if [ "$SUBCOMMAND" = "create" ] ; then
+ CLASS="org.apache.hadoop.hbase.snapshot.CreateSnapshot"
+ elif [ "$SUBCOMMAND" = "info" ] ; then
+ CLASS="org.apache.hadoop.hbase.snapshot.SnapshotInfo"
+ elif [ "$SUBCOMMAND" = "export" ] ; then
+ CLASS="org.apache.hadoop.hbase.snapshot.ExportSnapshot"
+ else
+ echo "Usage: hbase [<options>] snapshot <subcommand> [<args>]"
+ echo "$options_string"
+ echo ""
+ echo "Subcommands:"
+ echo " create Create a new snapshot of a table"
+ echo " info Tool for dumping snapshot information"
+ echo " export Export an existing snapshot"
+ exit 1
+ fi
+elif [ "$COMMAND" = "master" ] ; then
+ CLASS='org.apache.hadoop.hbase.master.HMaster'
+ if [ "$1" != "stop" ] && [ "$1" != "clear" ] ; then
+ HBASE_OPTS="$HBASE_OPTS $HBASE_MASTER_OPTS"
+ fi
+elif [ "$COMMAND" = "regionserver" ] ; then
+ CLASS='org.apache.hadoop.hbase.regionserver.HRegionServer'
+ if [ "$1" != "stop" ] ; then
+ HBASE_OPTS="$HBASE_OPTS $HBASE_REGIONSERVER_OPTS $HBASE_REGIONSERVER_JMX_OPTS"
+ fi
+elif [ "$COMMAND" = "thrift" ] ; then
+ CLASS='org.apache.hadoop.hbase.thrift.ThriftServer'
+ if [ "$1" != "stop" ] ; then
+ HBASE_OPTS="$HBASE_OPTS $HBASE_THRIFT_OPTS"
+ fi
+elif [ "$COMMAND" = "thrift2" ] ; then
+ CLASS='org.apache.hadoop.hbase.thrift2.ThriftServer'
+ if [ "$1" != "stop" ] ; then
+ HBASE_OPTS="$HBASE_OPTS $HBASE_THRIFT_OPTS"
+ fi
+elif [ "$COMMAND" = "rest" ] ; then
+ CLASS='org.apache.hadoop.hbase.rest.RESTServer'
+ if [ "$1" != "stop" ] ; then
+ HBASE_OPTS="$HBASE_OPTS $HBASE_REST_OPTS"
+ fi
+elif [ "$COMMAND" = "zookeeper" ] ; then
+ CLASS='org.apache.hadoop.hbase.zookeeper.HQuorumPeer'
+ if [ "$1" != "stop" ] ; then
+ HBASE_OPTS="$HBASE_OPTS $HBASE_ZOOKEEPER_OPTS"
+ fi
+elif [ "$COMMAND" = "clean" ] ; then
+ case $1 in
+ --cleanZk|--cleanHdfs|--cleanAll)
+ matches="yes" ;;
+ *) ;;
+ esac
+ if [ $# -ne 1 -o "$matches" = "" ]; then
+ echo "Usage: hbase clean (--cleanZk|--cleanHdfs|--cleanAll)"
+ echo "Options: "
+ echo " --cleanZk cleans hbase related data from zookeeper."
+ echo " --cleanHdfs cleans hbase related data from hdfs."
+ echo " --cleanAll cleans hbase related data from both zookeeper and hdfs."
+ exit 1;
+ fi
+ "$bin"/hbase-cleanup.sh --config ${HBASE_CONF_DIR} $@
+ exit $?
+elif [ "$COMMAND" = "mapredcp" ] ; then
+ # If we didn't find a jar above, this will just be blank and the
+ # check below will then default back to the internal classpath.
+ shaded_jar="${shaded_mapreduce}"
+ if [ "${INTERNAL_CLASSPATH}" != "true" ] && [ -f "${shaded_jar}" ]; then
+ echo -n "${shaded_jar}"
+ for f in "${HBASE_HOME}"/lib/client-facing-thirdparty/*.jar; do
+ if [[ ! "${f}" =~ ^.*/htrace-core-3.*\.jar$ ]] && \
+ [ "${f}" != "htrace-core.jar$" ] && \
+ [[ ! "${f}" =~ ^.*/slf4j-log4j.*$ ]]; then
+ echo -n ":${f}"
+ fi
+ done
+ echo ""
+ exit 0
+ fi
+ CLASS='org.apache.hadoop.hbase.util.MapreduceDependencyClasspathTool'
+elif [ "$COMMAND" = "classpath" ] ; then
+ echo "$CLASSPATH"
+ exit 0
+elif [ "$COMMAND" = "pe" ] ; then
+ CLASS='org.apache.hadoop.hbase.PerformanceEvaluation'
+ HBASE_OPTS="$HBASE_OPTS $HBASE_PE_OPTS"
+elif [ "$COMMAND" = "ltt" ] ; then
+ CLASS='org.apache.hadoop.hbase.util.LoadTestTool'
+ HBASE_OPTS="$HBASE_OPTS $HBASE_LTT_OPTS"
+elif [ "$COMMAND" = "canary" ] ; then
+ CLASS='org.apache.hadoop.hbase.tool.CanaryTool'
+ HBASE_OPTS="$HBASE_OPTS $HBASE_CANARY_OPTS"
+elif [ "$COMMAND" = "version" ] ; then
+ CLASS='org.apache.hadoop.hbase.util.VersionInfo'
+elif [ "$COMMAND" = "regionsplitter" ] ; then
+ CLASS='org.apache.hadoop.hbase.util.RegionSplitter'
+elif [ "$COMMAND" = "rowcounter" ] ; then
+ CLASS='org.apache.hadoop.hbase.mapreduce.RowCounter'
+elif [ "$COMMAND" = "cellcounter" ] ; then
+ CLASS='org.apache.hadoop.hbase.mapreduce.CellCounter'
+elif [ "$COMMAND" = "pre-upgrade" ] ; then
+ CLASS='org.apache.hadoop.hbase.tool.PreUpgradeValidator'
+elif [ "$COMMAND" = "completebulkload" ] ; then
+ CLASS='org.apache.hadoop.hbase.tool.BulkLoadHFilesTool'
+elif [ "$COMMAND" = "hbtop" ] ; then
+ CLASS='org.apache.hadoop.hbase.hbtop.HBTop'
+ if [ -n "${shaded_jar}" ] ; then
+ for f in "${HBASE_HOME}"/lib/hbase-hbtop*.jar; do
+ if [ -f "${f}" ]; then
+ CLASSPATH="${CLASSPATH}:${f}"
+ break
+ fi
+ done
+ for f in "${HBASE_HOME}"/lib/commons-lang3*.jar; do
+ if [ -f "${f}" ]; then
+ CLASSPATH="${CLASSPATH}:${f}"
+ break
+ fi
+ done
+ fi
+
+ HBASE_OPTS="${HBASE_OPTS} -Dlog4j.configuration=file:${HBASE_HOME}/conf/log4j-hbtop.properties"
+else
+ CLASS=$COMMAND
+fi
+
+# Have JVM dump heap if we run out of memory. Files will be 'launch directory'
+# and are named like the following: java_pid21612.hprof. Apparently it doesn't
+# 'cost' to have this flag enabled. Its a 1.6 flag only. See:
+# http://blogs.sun.com/alanb/entry/outofmemoryerror_looks_a_bit_better
+HBASE_OPTS="$HBASE_OPTS -Dhbase.log.dir=$HBASE_LOG_DIR"
+HBASE_OPTS="$HBASE_OPTS -Dhbase.log.file=$HBASE_LOGFILE"
+HBASE_OPTS="$HBASE_OPTS -Dhbase.home.dir=$HBASE_HOME"
+HBASE_OPTS="$HBASE_OPTS -Dhbase.id.str=$HBASE_IDENT_STRING"
+HBASE_OPTS="$HBASE_OPTS -Dhbase.root.logger=${HBASE_ROOT_LOGGER:-INFO,console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+ HBASE_OPTS="$HBASE_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+ export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$JAVA_LIBRARY_PATH"
+fi
+
+# Enable security logging on the master and regionserver only
+if [ "$COMMAND" = "master" ] || [ "$COMMAND" = "regionserver" ]; then
+ HBASE_OPTS="$HBASE_OPTS -Dhbase.security.logger=${HBASE_SECURITY_LOGGER:-INFO,RFAS}"
+else
+ HBASE_OPTS="$HBASE_OPTS -Dhbase.security.logger=${HBASE_SECURITY_LOGGER:-INFO,NullAppender}"
+fi
+
+HEAP_SETTINGS="$JAVA_HEAP_MAX $JAVA_OFFHEAP_MAX"
+# by now if we're running a command it means we need logging
+for f in ${HBASE_HOME}/lib/client-facing-thirdparty/slf4j-log4j*.jar; do
+ if [ -f "${f}" ]; then
+ CLASSPATH="${CLASSPATH}:${f}"
+ break
+ fi
+done
+
+# Exec unless HBASE_NOEXEC is set.
+export CLASSPATH
+if [ "${DEBUG}" = "true" ]; then
+ echo "classpath=${CLASSPATH}" >&2
+ HBASE_OPTS="${HBASE_OPTS} -Xdiag"
+fi
+
+if [ "${HBASE_NOEXEC}" != "" ]; then
+ "$JAVA" -Dproc_$COMMAND -XX:OnOutOfMemoryError="kill -9 %p" $HEAP_SETTINGS $HBASE_OPTS $CLASS "$@"
+else
+ export JVM_PID="$$"
+ exec "$JAVA" -Dproc_$COMMAND -XX:OnOutOfMemoryError="kill -9 %p" $HEAP_SETTINGS $HBASE_OPTS $CLASS "$@"
+fi
diff --git a/PCAP-PIC/hbase/bin/hbase-cleanup.sh b/PCAP-PIC/hbase/bin/hbase-cleanup.sh
new file mode 100644
index 0000000..3a764df
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/hbase-cleanup.sh
@@ -0,0 +1,147 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# cleans hbase related data from zookeeper and hdfs if no hbase process is alive.
+#
+# Environment Variables
+#
+# HBASE_REGIONSERVERS File naming remote hosts.
+# Default is ${HADOOP_CONF_DIR}/regionservers
+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
+# HBASE_CONF_DIR Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
+# HBASE_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
+# HBASE_SLAVE_TIMEOUT Seconds to wait for timing out a remote command.
+# HBASE_SSH_OPTS Options passed to ssh when running remote commands.
+#
+
+usage="Usage: hbase-cleanup.sh (--cleanZk|--cleanHdfs|--cleanAll|--cleanAcls)"
+
+bin=`dirname "$0"`
+bin=`cd "$bin">/dev/null; pwd`
+
+# This will set HBASE_HOME, etc.
+. "$bin"/hbase-config.sh
+
+case $1 in
+ --cleanZk|--cleanHdfs|--cleanAll|--cleanAcls)
+ matches="yes" ;;
+ *) ;;
+esac
+if [ $# -ne 1 -o "$matches" = "" ]; then
+ echo $usage
+ exit 1;
+fi
+
+format_option=$1;
+
+zparent=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.parent`
+if [ "$zparent" == "null" ]; then zparent="/hbase"; fi
+
+hrootdir=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool hbase.rootdir`
+if [ "$hrootdir" == "null" ]; then hrootdir="file:///tmp/hbase-${USER}/hbase"; fi
+
+check_for_znodes() {
+ command=$1;
+ case $command in
+ regionservers)
+ zchild=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.rs`
+ if [ "$zchild" == "null" ]; then zchild="rs"; fi
+ ;;
+ backupmasters)
+ zchild=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.backup.masters`
+ if [ "$zchild" == "null" ]; then zchild="backup-masters"; fi
+ ;;
+ esac
+ znodes=`"$bin"/hbase zkcli ls $zparent/$zchild 2>&1 | tail -1 | sed "s/\[//" | sed "s/\]//"`
+ if [ "$znodes" != "" ]; then
+ echo -n "ZNode(s) [${znodes}] of $command are not expired. Exiting without cleaning hbase data."
+ echo #force a newline
+ exit 1;
+ else
+ echo -n "All ZNode(s) of $command are expired."
+ fi
+ echo #force a newline
+}
+
+execute_zk_command() {
+ command=$1;
+ "$bin"/hbase zkcli $command 2>&1
+}
+
+execute_hdfs_command() {
+ command=$1;
+ "$bin"/hbase org.apache.hadoop.fs.FsShell $command 2>&1
+}
+
+execute_clean_acls() {
+ command=$1;
+ "$bin"/hbase org.apache.hadoop.hbase.zookeeper.ZkAclReset $command 2>&1
+}
+
+clean_up() {
+ case $1 in
+ --cleanZk)
+ execute_zk_command "rmr ${zparent}";
+ ;;
+ --cleanHdfs)
+ execute_hdfs_command "-rm -R ${hrootdir}"
+ ;;
+ --cleanAll)
+ execute_zk_command "rmr ${zparent}";
+ execute_hdfs_command "-rm -R ${hrootdir}"
+ ;;
+ --cleanAcls)
+ execute_clean_acls;
+ ;;
+ *)
+ ;;
+ esac
+}
+
+check_znode_exists() {
+ command=$1
+ "$bin"/hbase zkcli stat $command 2>&1 | grep "Node does not exist\|Connection refused"
+}
+
+check_znode_exists $zparent
+if [ $? -ne 0 ]; then
+ # make sure the online region server(s) znode(s) have been deleted before continuing
+ check_for_znodes regionservers
+ # make sure the backup master(s) znode(s) has been deleted before continuing
+ check_for_znodes backupmasters
+ # make sure the master znode has been deleted before continuing
+ zmaster=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.master`
+ if [ "$zmaster" == "null" ]; then zmaster="master"; fi
+ zmaster=$zparent/$zmaster
+ check_znode_exists $zmaster
+ if [ $? -ne 0 ]; then
+ echo -n "Master ZNode is not expired. Exiting without cleaning hbase data."
+ echo #force a new line
+ exit 1
+ else
+ echo "Active Master ZNode also expired."
+ fi
+ echo #force a newline
+else
+ echo "HBase parent znode ${zparent} does not exist."
+fi
+
+# cleans zookeeper and/or hdfs data.
+clean_up $format_option
diff --git a/PCAP-PIC/hbase/bin/hbase-common.sh b/PCAP-PIC/hbase/bin/hbase-common.sh
new file mode 100644
index 0000000..0a474f7
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/hbase-common.sh
@@ -0,0 +1,41 @@
+##
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+#Shared function to wait for a process end. Take the pid and the command name as parameters
+waitForProcessEnd() {
+ pidKilled=$1
+ commandName=$2
+ processedAt=`date +%s`
+ while kill -0 $pidKilled > /dev/null 2>&1;
+ do
+ echo -n "."
+ sleep 1;
+ # if process persists more than $HBASE_STOP_TIMEOUT (default 1200 sec) no mercy
+ if [ $(( `date +%s` - $processedAt )) -gt ${HBASE_STOP_TIMEOUT:-1200} ]; then
+ break;
+ fi
+ done
+ # process still there : kill -9
+ if kill -0 $pidKilled > /dev/null 2>&1; then
+ echo -n force stopping $commandName with kill -9 $pidKilled
+ $JAVA_HOME/bin/jstack -l $pidKilled > "$logout" 2>&1
+ kill -9 $pidKilled > /dev/null 2>&1
+ fi
+ # Add a CR after we're done w/ dots.
+ echo
+}
diff --git a/PCAP-PIC/hbase/bin/hbase-config.cmd b/PCAP-PIC/hbase/bin/hbase-config.cmd
new file mode 100644
index 0000000..5c1f186
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/hbase-config.cmd
@@ -0,0 +1,78 @@
+@rem/*
+@rem * Licensed to the Apache Software Foundation (ASF) under one
+@rem * or more contributor license agreements. See the NOTICE file
+@rem * distributed with this work for additional information
+@rem * regarding copyright ownership. The ASF licenses this file
+@rem * to you under the Apache License, Version 2.0 (the
+@rem * "License"); you may not use this file except in compliance
+@rem * with the License. You may obtain a copy of the License at
+@rem *
+@rem * http://www.apache.org/licenses/LICENSE-2.0
+@rem *
+@rem * Unless required by applicable law or agreed to in writing, software
+@rem * distributed under the License is distributed on an "AS IS" BASIS,
+@rem * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem * See the License for the specific language governing permissions and
+@rem * limitations under the License.
+@rem */
+
+@rem included in all the hbase scripts with source command
+@rem should not be executable directly
+@rem also should not be passed any arguments, since we need original $*
+@rem Modelled after $HADOOP_HOME/bin/hadoop-env.sh.
+
+@rem Make sure java environment is set
+@rem
+
+if "%HBASE_BIN_PATH:~-1%" == "\" (
+ set HBASE_BIN_PATH=%HBASE_BIN_PATH:~0,-1%
+)
+
+if "%1" == "--config" (
+ set HBASE_CONF_DIR=%2
+ shift
+ shift
+)
+
+@rem the root of the hbase installation
+if not defined HBASE_HOME (
+ set HBASE_HOME=%HBASE_BIN_PATH%\..
+)
+
+@rem Allow alternate hbase conf dir location.
+if not defined HBASE_CONF_DIR (
+ set HBASE_CONF_DIR=%HBASE_HOME%\conf
+)
+
+@rem List of hbase regions servers.
+if not defined HBASE_REGIONSERVERS (
+ set HBASE_REGIONSERVERS=%HBASE_CONF_DIR%\regionservers
+)
+
+@rem List of hbase secondary masters.
+if not defined HBASE_BACKUP_MASTERS (
+ set HBASE_BACKUP_MASTERS=%HBASE_CONF_DIR%\backup-masters
+)
+
+@rem Source the hbase-env.sh. Will have JAVA_HOME defined.
+if exist "%HBASE_CONF_DIR%\hbase-env.cmd" (
+ call "%HBASE_CONF_DIR%\hbase-env.cmd"
+)
+
+if not defined JAVA_HOME (
+ echo Warning: JAVA_HOME environment variable is not set. Defaulting to c:\apps\java
+ set JAVA_HOME=c:\apps\java
+)
+
+if not exist "%JAVA_HOME%\bin\java.exe" (
+ echo Error: JAVA_HOME is incorrectly set or could not find java at the location %JAVA_HOME%\bin\
+ exit /B 2
+)
+
+set JAVA="%JAVA_HOME%\bin\java"
+
+for %%i in (%0) do (
+ if not defined HBASE_BIN_PATH (
+ set HBASE_BIN_PATH=%%~dpi
+ )
+) \ No newline at end of file
diff --git a/PCAP-PIC/hbase/bin/hbase-config.sh b/PCAP-PIC/hbase/bin/hbase-config.sh
new file mode 100644
index 0000000..1054751
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/hbase-config.sh
@@ -0,0 +1,170 @@
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# included in all the hbase scripts with source command
+# should not be executable directly
+# also should not be passed any arguments, since we need original $*
+# Modelled after $HADOOP_HOME/bin/hadoop-env.sh.
+
+# resolve links - "${BASH_SOURCE-$0}" may be a softlink
+
+this="${BASH_SOURCE-$0}"
+while [ -h "$this" ]; do
+ ls=`ls -ld "$this"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '.*/.*' > /dev/null; then
+ this="$link"
+ else
+ this=`dirname "$this"`/"$link"
+ fi
+done
+
+# convert relative path to absolute path
+bin=`dirname "$this"`
+script=`basename "$this"`
+bin=`cd "$bin">/dev/null; pwd`
+this="$bin/$script"
+
+# the root of the hbase installation
+if [ -z "$HBASE_HOME" ]; then
+ export HBASE_HOME=`dirname "$this"`/..
+fi
+
+#check to see if the conf dir or hbase home are given as an optional arguments
+while [ $# -gt 1 ]
+do
+ if [ "--config" = "$1" ]
+ then
+ shift
+ confdir=$1
+ shift
+ HBASE_CONF_DIR=$confdir
+ elif [ "--hosts" = "$1" ]
+ then
+ shift
+ hosts=$1
+ shift
+ HBASE_REGIONSERVERS=$hosts
+ elif [ "--auth-as-server" = "$1" ]
+ then
+ shift
+ # shellcheck disable=SC2034
+ AUTH_AS_SERVER="true"
+ elif [ "--autostart-window-size" = "$1" ]
+ then
+ shift
+ AUTOSTART_WINDOW_SIZE=$(( $1 + 0 ))
+ if [ $AUTOSTART_WINDOW_SIZE -lt 0 ]; then
+ echo "Invalid value for --autostart-window-size, should be a positive integer"
+ exit 1
+ fi
+ shift
+ elif [ "--autostart-window-retry-limit" = "$1" ]
+ then
+ shift
+ AUTOSTART_WINDOW_RETRY_LIMIT=$(( $1 + 0 ))
+ if [ $AUTOSTART_WINDOW_RETRY_LIMIT -lt 0 ]; then
+ echo "Invalid value for --autostart-window-retry-limit, should be a positive integer"
+ exit 1
+ fi
+ shift
+ elif [ "--internal-classpath" = "$1" ]
+ then
+ shift
+ # shellcheck disable=SC2034
+ INTERNAL_CLASSPATH="true"
+ elif [ "--debug" = "$1" ]
+ then
+ shift
+ # shellcheck disable=SC2034
+ DEBUG="true"
+ else
+ # Presume we are at end of options and break
+ break
+ fi
+done
+
+# Allow alternate hbase conf dir location.
+HBASE_CONF_DIR="${HBASE_CONF_DIR:-$HBASE_HOME/conf}"
+# List of hbase regions servers.
+HBASE_REGIONSERVERS="${HBASE_REGIONSERVERS:-$HBASE_CONF_DIR/regionservers}"
+# List of hbase secondary masters.
+HBASE_BACKUP_MASTERS="${HBASE_BACKUP_MASTERS:-$HBASE_CONF_DIR/backup-masters}"
+if [ -n "$HBASE_JMX_BASE" ] && [ -z "$HBASE_JMX_OPTS" ]; then
+ HBASE_JMX_OPTS="$HBASE_JMX_BASE"
+fi
+# Thrift JMX opts
+if [ -n "$HBASE_JMX_OPTS" ] && [ -z "$HBASE_THRIFT_JMX_OPTS" ]; then
+ HBASE_THRIFT_JMX_OPTS="$HBASE_JMX_OPTS -Dcom.sun.management.jmxremote.port=10103"
+fi
+# Thrift opts
+if [ -z "$HBASE_THRIFT_OPTS" ]; then
+ export HBASE_THRIFT_OPTS="$HBASE_THRIFT_JMX_OPTS"
+fi
+
+# REST JMX opts
+if [ -n "$HBASE_JMX_OPTS" ] && [ -z "$HBASE_REST_JMX_OPTS" ]; then
+ HBASE_REST_JMX_OPTS="$HBASE_JMX_OPTS -Dcom.sun.management.jmxremote.port=10105"
+fi
+# REST opts
+if [ -z "$HBASE_REST_OPTS" ]; then
+ export HBASE_REST_OPTS="$HBASE_REST_JMX_OPTS"
+fi
+
+# Source the hbase-env.sh. Will have JAVA_HOME defined.
+# HBASE-7817 - Source the hbase-env.sh only if it has not already been done. HBASE_ENV_INIT keeps track of it.
+if [ -z "$HBASE_ENV_INIT" ] && [ -f "${HBASE_CONF_DIR}/hbase-env.sh" ]; then
+ . "${HBASE_CONF_DIR}/hbase-env.sh"
+ export HBASE_ENV_INIT="true"
+fi
+
+# Verify if hbase has the mlock agent
+if [ "$HBASE_REGIONSERVER_MLOCK" = "true" ]; then
+ MLOCK_AGENT="$HBASE_HOME/lib/native/libmlockall_agent.so"
+ if [ ! -f "$MLOCK_AGENT" ]; then
+ cat 1>&2 <<EOF
+Unable to find mlockall_agent, hbase must be compiled with -Pnative
+EOF
+ exit 1
+ fi
+ if [ -z "$HBASE_REGIONSERVER_UID" ] || [ "$HBASE_REGIONSERVER_UID" == "$USER" ]; then
+ HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -agentpath:$MLOCK_AGENT"
+ else
+ HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -agentpath:$MLOCK_AGENT=user=$HBASE_REGIONSERVER_UID"
+ fi
+fi
+
+# Newer versions of glibc use an arena memory allocator that causes virtual
+# memory usage to explode. Tune the variable down to prevent vmem explosion.
+export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
+
+# Now having JAVA_HOME defined is required
+if [ -z "$JAVA_HOME" ]; then
+ cat 1>&2 <<EOF
++======================================================================+
+| Error: JAVA_HOME is not set |
++----------------------------------------------------------------------+
+| Please download the latest Sun JDK from the Sun Java web site |
+| > http://www.oracle.com/technetwork/java/javase/downloads |
+| |
+| HBase requires Java 1.8 or later. |
++======================================================================+
+EOF
+ exit 1
+fi
diff --git a/PCAP-PIC/hbase/bin/hbase-daemon.sh b/PCAP-PIC/hbase/bin/hbase-daemon.sh
new file mode 100644
index 0000000..0e55665
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/hbase-daemon.sh
@@ -0,0 +1,371 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# Runs a Hadoop hbase command as a daemon.
+#
+# Environment Variables
+#
+# HBASE_CONF_DIR Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
+# HBASE_LOG_DIR Where log files are stored. PWD by default.
+# HBASE_PID_DIR The pid files are stored. /tmp by default.
+# HBASE_IDENT_STRING A string representing this instance of hadoop. $USER by default
+# HBASE_NICENESS The scheduling priority for daemons. Defaults to 0.
+# HBASE_STOP_TIMEOUT Time, in seconds, after which we kill -9 the server if it has not stopped.
+# Default 1200 seconds.
+#
+# Modelled after $HADOOP_HOME/bin/hadoop-daemon.sh
+
+usage="Usage: hbase-daemon.sh [--config <conf-dir>]\
+ [--autostart-window-size <window size in hours>]\
+ [--autostart-window-retry-limit <retry count limit for autostart>]\
+ (start|stop|restart|autostart|autorestart|foreground_start) <hbase-command> \
+ <args...>"
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+# default autostart args value indicating infinite window size and no retry limit
+AUTOSTART_WINDOW_SIZE=0
+AUTOSTART_WINDOW_RETRY_LIMIT=0
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin">/dev/null; pwd`
+
+. "$bin"/hbase-config.sh
+. "$bin"/hbase-common.sh
+
+# get arguments
+startStop=$1
+shift
+
+command=$1
+shift
+
+hbase_rotate_log ()
+{
+ log=$1;
+ num=5;
+ if [ -n "$2" ]; then
+ num=$2
+ fi
+ if [ -f "$log" ]; then # rotate logs
+ while [ $num -gt 1 ]; do
+ prev=`expr $num - 1`
+ [ -f "$log.$prev" ] && mv -f "$log.$prev" "$log.$num"
+ num=$prev
+ done
+ mv -f "$log" "$log.$num";
+ fi
+}
+
+cleanAfterRun() {
+ if [ -f ${HBASE_PID} ]; then
+ # If the process is still running time to tear it down.
+ kill -9 `cat ${HBASE_PID}` > /dev/null 2>&1
+ rm -f ${HBASE_PID} > /dev/null 2>&1
+ fi
+
+ if [ -f ${HBASE_ZNODE_FILE} ]; then
+ if [ "$command" = "master" ]; then
+ HBASE_OPTS="$HBASE_OPTS $HBASE_MASTER_OPTS" $bin/hbase master clear > /dev/null 2>&1
+ else
+ #call ZK to delete the node
+ ZNODE=`cat ${HBASE_ZNODE_FILE}`
+ HBASE_OPTS="$HBASE_OPTS $HBASE_REGIONSERVER_OPTS" $bin/hbase zkcli delete ${ZNODE} > /dev/null 2>&1
+ fi
+ rm ${HBASE_ZNODE_FILE}
+ fi
+}
+
+check_before_start(){
+ #ckeck if the process is not running
+ mkdir -p "$HBASE_PID_DIR"
+ if [ -f $HBASE_PID ]; then
+ if kill -0 `cat $HBASE_PID` > /dev/null 2>&1; then
+ echo $command running as process `cat $HBASE_PID`. Stop it first.
+ exit 1
+ fi
+ fi
+}
+
+wait_until_done ()
+{
+ p=$1
+ cnt=${HBASE_SLAVE_TIMEOUT:-300}
+ origcnt=$cnt
+ while kill -0 $p > /dev/null 2>&1; do
+ if [ $cnt -gt 1 ]; then
+ cnt=`expr $cnt - 1`
+ sleep 1
+ else
+ echo "Process did not complete after $origcnt seconds, killing."
+ kill -9 $p
+ exit 1
+ fi
+ done
+ return 0
+}
+
+# get log directory
+if [ "$HBASE_LOG_DIR" = "" ]; then
+ export HBASE_LOG_DIR="$HBASE_HOME/logs"
+fi
+mkdir -p "$HBASE_LOG_DIR"
+
+if [ "$HBASE_PID_DIR" = "" ]; then
+ HBASE_PID_DIR=/tmp
+fi
+
+if [ "$HBASE_IDENT_STRING" = "" ]; then
+ export HBASE_IDENT_STRING="$USER"
+fi
+
+# Some variables
+# Work out java location so can print version into log.
+if [ "$JAVA_HOME" != "" ]; then
+ #echo "run java in $JAVA_HOME"
+ JAVA_HOME=$JAVA_HOME
+fi
+if [ "$JAVA_HOME" = "" ]; then
+ echo "Error: JAVA_HOME is not set."
+ exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+export HBASE_LOG_PREFIX=hbase-$HBASE_IDENT_STRING-$command-$HOSTNAME
+export HBASE_LOGFILE=$HBASE_LOG_PREFIX.log
+
+if [ -z "${HBASE_ROOT_LOGGER}" ]; then
+#export HBASE_ROOT_LOGGER=${HBASE_ROOT_LOGGER:-"INFO,RFA"}
+export HBASE_ROOT_LOGGER=${HBASE_ROOT_LOGGER:-"ERROR,RFA"}
+fi
+
+if [ -z "${HBASE_SECURITY_LOGGER}" ]; then
+#export HBASE_SECURITY_LOGGER=${HBASE_SECURITY_LOGGER:-"INFO,RFAS"}
+export HBASE_SECURITY_LOGGER=${HBASE_SECURITY_LOGGER:-"ERROR,RFAS"}
+fi
+
+HBASE_LOGOUT=${HBASE_LOGOUT:-"$HBASE_LOG_DIR/$HBASE_LOG_PREFIX.out"}
+HBASE_LOGGC=${HBASE_LOGGC:-"$HBASE_LOG_DIR/$HBASE_LOG_PREFIX.gc"}
+HBASE_LOGLOG=${HBASE_LOGLOG:-"${HBASE_LOG_DIR}/${HBASE_LOGFILE}"}
+HBASE_PID=$HBASE_PID_DIR/hbase-$HBASE_IDENT_STRING-$command.pid
+export HBASE_ZNODE_FILE=$HBASE_PID_DIR/hbase-$HBASE_IDENT_STRING-$command.znode
+export HBASE_AUTOSTART_FILE=$HBASE_PID_DIR/hbase-$HBASE_IDENT_STRING-$command.autostart
+
+if [ -n "$SERVER_GC_OPTS" ]; then
+ export SERVER_GC_OPTS=${SERVER_GC_OPTS/"-Xloggc:<FILE-PATH>"/"-Xloggc:${HBASE_LOGGC}"}
+fi
+if [ -n "$CLIENT_GC_OPTS" ]; then
+ export CLIENT_GC_OPTS=${CLIENT_GC_OPTS/"-Xloggc:<FILE-PATH>"/"-Xloggc:${HBASE_LOGGC}"}
+fi
+
+# Set default scheduling priority
+if [ "$HBASE_NICENESS" = "" ]; then
+ export HBASE_NICENESS=0
+fi
+
+thiscmd="$bin/$(basename ${BASH_SOURCE-$0})"
+args=$@
+
+case $startStop in
+
+(start)
+ check_before_start
+ hbase_rotate_log $HBASE_LOGOUT
+ hbase_rotate_log $HBASE_LOGGC
+ echo running $command, logging to $HBASE_LOGOUT
+ $thiscmd --config "${HBASE_CONF_DIR}" \
+ foreground_start $command $args < /dev/null > ${HBASE_LOGOUT} 2>&1 &
+ disown -h -r
+ sleep 1; head "${HBASE_LOGOUT}"
+ ;;
+
+(autostart)
+ check_before_start
+ hbase_rotate_log $HBASE_LOGOUT
+ hbase_rotate_log $HBASE_LOGGC
+ echo running $command, logging to $HBASE_LOGOUT
+ nohup $thiscmd --config "${HBASE_CONF_DIR}" --autostart-window-size ${AUTOSTART_WINDOW_SIZE} --autostart-window-retry-limit ${AUTOSTART_WINDOW_RETRY_LIMIT} \
+ internal_autostart $command $args < /dev/null > ${HBASE_LOGOUT} 2>&1 &
+ ;;
+
+(autorestart)
+ echo running $command, logging to $HBASE_LOGOUT
+ # stop the command
+ $thiscmd --config "${HBASE_CONF_DIR}" stop $command $args &
+ wait_until_done $!
+ # wait a user-specified sleep period
+ sp=${HBASE_RESTART_SLEEP:-3}
+ if [ $sp -gt 0 ]; then
+ sleep $sp
+ fi
+
+ check_before_start
+ hbase_rotate_log $HBASE_LOGOUT
+ nohup $thiscmd --config "${HBASE_CONF_DIR}" --autostart-window-size ${AUTOSTART_WINDOW_SIZE} --autostart-window-retry-limit ${AUTOSTART_WINDOW_RETRY_LIMIT} \
+ internal_autostart $command $args < /dev/null > ${HBASE_LOGOUT} 2>&1 &
+ ;;
+
+(foreground_start)
+ trap cleanAfterRun SIGHUP SIGINT SIGTERM EXIT
+ if [ "$HBASE_NO_REDIRECT_LOG" != "" ]; then
+ # NO REDIRECT
+ echo "`date` Starting $command on `hostname`"
+ echo "`ulimit -a`"
+ # in case the parent shell gets the kill make sure to trap signals.
+ # Only one will get called. Either the trap or the flow will go through.
+ nice -n $HBASE_NICENESS "$HBASE_HOME"/bin/hbase \
+ --config "${HBASE_CONF_DIR}" \
+ $command "$@" start &
+ else
+ echo "`date` Starting $command on `hostname`" >> ${HBASE_LOGLOG}
+ echo "`ulimit -a`" >> "$HBASE_LOGLOG" 2>&1
+ # in case the parent shell gets the kill make sure to trap signals.
+ # Only one will get called. Either the trap or the flow will go through.
+ nice -n $HBASE_NICENESS "$HBASE_HOME"/bin/hbase \
+ --config "${HBASE_CONF_DIR}" \
+ $command "$@" start >> ${HBASE_LOGOUT} 2>&1 &
+ fi
+ # Add to the command log file vital stats on our environment.
+ hbase_pid=$!
+ echo $hbase_pid > ${HBASE_PID}
+ wait $hbase_pid
+ ;;
+
+(internal_autostart)
+ ONE_HOUR_IN_SECS=3600
+ autostartWindowStartDate=`date +%s`
+ autostartCount=0
+ touch "$HBASE_AUTOSTART_FILE"
+
+ # keep starting the command until asked to stop. Reloop on software crash
+ while true
+ do
+ hbase_rotate_log $HBASE_LOGGC
+ if [ -f $HBASE_PID ] && kill -0 "$(cat "$HBASE_PID")" > /dev/null 2>&1 ; then
+ wait "$(cat "$HBASE_PID")"
+ else
+ #if the file does not exist it means that it was not stopped properly by the stop command
+ if [ ! -f "$HBASE_AUTOSTART_FILE" ]; then
+ echo "`date` HBase might be stopped removing the autostart file. Exiting Autostart process" >> ${HBASE_LOGOUT}
+ exit 1
+ fi
+
+ echo "`date` Autostarting hbase $command service. Attempt no: $(( $autostartCount + 1))" >> ${HBASE_LOGLOG}
+ touch "$HBASE_AUTOSTART_FILE"
+ $thiscmd --config "${HBASE_CONF_DIR}" foreground_start $command $args
+ autostartCount=$(( $autostartCount + 1 ))
+
+ # HBASE-6504 - only take the first line of the output in case verbose gc is on
+ distMode=`$bin/hbase --config "$HBASE_CONF_DIR" org.apache.hadoop.hbase.util.HBaseConfTool hbase.cluster.distributed | head -n 1`
+
+ if [ "$distMode" != 'false' ]; then
+ #if the cluster is being stopped then do not restart it again.
+ zparent=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.parent`
+ if [ "$zparent" == "null" ]; then zparent="/hbase"; fi
+ zkrunning=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.state`
+ if [ "$zkrunning" == "null" ]; then zkrunning="running"; fi
+ zkFullRunning=$zparent/$zkrunning
+ $bin/hbase zkcli stat $zkFullRunning 2>&1 | grep "Node does not exist" 1>/dev/null 2>&1
+
+ #grep returns 0 if it found something, 1 otherwise
+ if [ $? -eq 0 ]; then
+ echo "`date` hbase znode does not exist. Exiting Autostart process" >> ${HBASE_LOGOUT}
+ rm -f "$HBASE_AUTOSTART_FILE"
+ exit 1
+ fi
+
+ #If ZooKeeper cannot be found, then do not restart
+ $bin/hbase zkcli stat $zkFullRunning 2>&1 | grep Exception | grep ConnectionLoss 1>/dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ echo "`date` zookeeper not found. Exiting Autostart process" >> ${HBASE_LOGOUT}
+ rm -f "$HBASE_AUTOSTART_FILE"
+ exit 1
+ fi
+ fi
+ fi
+
+ curDate=`date +%s`
+ autostartWindowReset=false
+
+ # reset the auto start window size if it exceeds
+ if [ $AUTOSTART_WINDOW_SIZE -gt 0 ] && [ $(( $curDate - $autostartWindowStartDate )) -gt $(( $AUTOSTART_WINDOW_SIZE * $ONE_HOUR_IN_SECS )) ]; then
+ echo "Resetting Autorestart window size: $autostartWindowStartDate" >> ${HBASE_LOGOUT}
+ autostartWindowStartDate=$curDate
+ autostartWindowReset=true
+ autostartCount=0
+ fi
+
+ # kill autostart if the retry limit is exceeded within the given window size (window size other then 0)
+ if ! $autostartWindowReset && [ $AUTOSTART_WINDOW_RETRY_LIMIT -gt 0 ] && [ $autostartCount -gt $AUTOSTART_WINDOW_RETRY_LIMIT ]; then
+ echo "`date` Autostart window retry limit: $AUTOSTART_WINDOW_RETRY_LIMIT exceeded for given window size: $AUTOSTART_WINDOW_SIZE hours.. Exiting..." >> ${HBASE_LOGLOG}
+ rm -f "$HBASE_AUTOSTART_FILE"
+ exit 1
+ fi
+
+ # wait for shutdown hook to complete
+ sleep 20
+ done
+ ;;
+
+(stop)
+ echo running $command, logging to $HBASE_LOGOUT
+ rm -f "$HBASE_AUTOSTART_FILE"
+ if [ -f $HBASE_PID ]; then
+ pidToKill=`cat $HBASE_PID`
+ # kill -0 == see if the PID exists
+ if kill -0 $pidToKill > /dev/null 2>&1; then
+ echo -n stopping $command
+ echo "`date` Terminating $command" >> $HBASE_LOGLOG
+ kill $pidToKill > /dev/null 2>&1
+ waitForProcessEnd $pidToKill $command
+ else
+ retval=$?
+ echo no $command to stop because kill -0 of pid $pidToKill failed with status $retval
+ fi
+ else
+ echo no $command to stop because no pid file $HBASE_PID
+ fi
+ rm -f $HBASE_PID
+ ;;
+
+(restart)
+ echo running $command, logging to $HBASE_LOGOUT
+ # stop the command
+ $thiscmd --config "${HBASE_CONF_DIR}" stop $command $args &
+ wait_until_done $!
+ # wait a user-specified sleep period
+ sp=${HBASE_RESTART_SLEEP:-3}
+ if [ $sp -gt 0 ]; then
+ sleep $sp
+ fi
+ # start the command
+ $thiscmd --config "${HBASE_CONF_DIR}" start $command $args &
+ wait_until_done $!
+ ;;
+
+(*)
+ echo $usage
+ exit 1
+ ;;
+esac
diff --git a/PCAP-PIC/hbase/bin/hbase-daemons.sh b/PCAP-PIC/hbase/bin/hbase-daemons.sh
new file mode 100644
index 0000000..b1785f6
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/hbase-daemons.sh
@@ -0,0 +1,62 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# Run a hbase command on all slave hosts.
+# Modelled after $HADOOP_HOME/bin/hadoop-daemons.sh
+
+usage="Usage: hbase-daemons.sh [--config <hbase-confdir>] [--autostart-window-size <window size in hours>]\
+ [--autostart-window-retry-limit <retry count limit for autostart>] \
+ [--hosts regionserversfile] [autostart|autorestart|restart|start|stop] command args..."
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin">/dev/null; pwd`
+
+# default autostart args value indicating infinite window size and no retry limit
+AUTOSTART_WINDOW_SIZE=0
+AUTOSTART_WINDOW_RETRY_LIMIT=0
+
+. $bin/hbase-config.sh
+
+if [[ "$1" = "autostart" || "$1" = "autorestart" ]]
+then
+ autostart_args="--autostart-window-size ${AUTOSTART_WINDOW_SIZE} --autostart-window-retry-limit ${AUTOSTART_WINDOW_RETRY_LIMIT}"
+fi
+
+remote_cmd="$bin/hbase-daemon.sh --config ${HBASE_CONF_DIR} ${autostart_args} $@"
+args="--hosts ${HBASE_REGIONSERVERS} --config ${HBASE_CONF_DIR} $remote_cmd"
+
+command=$2
+case $command in
+ (zookeeper)
+ exec "$bin/zookeepers.sh" $args
+ ;;
+ (master-backup)
+ exec "$bin/master-backup.sh" $args
+ ;;
+ (*)
+ exec "$bin/regionservers.sh" $args
+ ;;
+esac
diff --git a/PCAP-PIC/hbase/bin/hbase-jruby b/PCAP-PIC/hbase/bin/hbase-jruby
new file mode 100644
index 0000000..37bce46
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/hbase-jruby
@@ -0,0 +1,22 @@
+#!/bin/bash
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+
+`dirname $0`/hbase org.jruby.Main $*
+
diff --git a/PCAP-PIC/hbase/bin/hbase.cmd b/PCAP-PIC/hbase/bin/hbase.cmd
new file mode 100644
index 0000000..fbeb1f8
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/hbase.cmd
@@ -0,0 +1,469 @@
+@echo off
+@rem/*
+@rem * Licensed to the Apache Software Foundation (ASF) under one
+@rem * or more contributor license agreements. See the NOTICE file
+@rem * distributed with this work for additional information
+@rem * regarding copyright ownership. The ASF licenses this file
+@rem * to you under the Apache License, Version 2.0 (the
+@rem * "License"); you may not use this file except in compliance
+@rem * with the License. You may obtain a copy of the License at
+@rem *
+@rem * http://www.apache.org/licenses/LICENSE-2.0
+@rem *
+@rem * Unless required by applicable law or agreed to in writing, software
+@rem * distributed under the License is distributed on an "AS IS" BASIS,
+@rem * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem * See the License for the specific language governing permissions and
+@rem * limitations under the License.
+@rem */
+@rem
+@rem The hbase command script. Based on the hadoop command script putting
+@rem in hbase classes, libs and configurations ahead of hadoop's.
+@rem
+@rem TODO: Narrow the amount of duplicated code.
+@rem
+@rem Environment Variables:
+@rem
+@rem JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+@rem
+@rem HBASE_CLASSPATH Extra Java CLASSPATH entries.
+@rem
+@rem HBASE_HEAPSIZE The maximum amount of heap to use.
+@rem Default is unset and uses the JVMs default setting
+@rem (usually 1/4th of the available memory).
+@rem
+@rem HBASE_OPTS Extra Java runtime options.
+@rem
+@rem HBASE_CONF_DIR Alternate conf dir. Default is ${HBASE_HOME}/conf.
+@rem
+@rem HBASE_ROOT_LOGGER The root appender. Default is INFO,console
+@rem
+@rem JRUBY_HOME JRuby path: $JRUBY_HOME\lib\jruby.jar should exist.
+@rem Defaults to the jar packaged with HBase.
+@rem
+@rem JRUBY_OPTS Extra options (eg '--1.9') passed to hbase.
+@rem Empty by default.
+@rem HBASE_SHELL_OPTS Extra options passed to the hbase shell.
+@rem Empty by default.
+
+
+setlocal enabledelayedexpansion
+
+for %%i in (%0) do (
+ if not defined HBASE_BIN_PATH (
+ set HBASE_BIN_PATH=%%~dpi
+ )
+)
+
+if "%HBASE_BIN_PATH:~-1%" == "\" (
+ set HBASE_BIN_PATH=%HBASE_BIN_PATH:~0,-1%
+)
+
+rem This will set HBASE_HOME, etc.
+set hbase-config-script=%HBASE_BIN_PATH%\hbase-config.cmd
+call "%hbase-config-script%" %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+rem Detect if we are in hbase sources dir
+set in_dev_env=false
+
+if exist "%HBASE_HOME%\target" set in_dev_env=true
+
+rem --service is an internal option. used by MSI setup to install HBase as a windows service
+if "%1" == "--service" (
+ set service_entry=true
+ shift
+)
+
+set hbase-command=%1
+shift
+
+@rem if no args specified, show usage
+if "%hbase-command%"=="" (
+ goto :print_usage
+ endlocal
+ goto :eof
+)
+
+set JAVA_HEAP_MAX=
+set JAVA_OFFHEAP_MAX=
+
+rem check envvars which might override default args
+if defined HBASE_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%HBASE_HEAPSIZE%m
+)
+
+if defined HBASE_OFFHEAPSIZE (
+ set JAVA_OFFHEAP_MAX=-XX:MaxDirectMemory=%HBASE_OFFHEAPSIZE%m
+)
+
+set CLASSPATH=%HBASE_CONF_DIR%;%JAVA_HOME%\lib\tools.jar
+
+rem Add maven target directory
+set cached_classpath_filename=%HBASE_HOME%\hbase-build-configuration\target\cached_classpath.txt
+if "%in_dev_env%"=="true" (
+
+ rem adding maven main classes to classpath
+ for /f %%i in ('dir /b "%HBASE_HOME%\hbase-*"') do (
+ if exist %%i\target\classes set CLASSPATH=!CLASSPATH!;%%i\target\classes
+ )
+
+ rem adding maven test classes to classpath
+ rem For developers, add hbase classes to CLASSPATH
+ for /f %%i in ('dir /b "%HBASE_HOME%\hbase-*"') do (
+ if exist %%i\target\test-classes set CLASSPATH=!CLASSPATH!;%%i\target\test-classes
+ )
+
+ if not exist "%cached_classpath_filename%" (
+ echo "As this is a development environment, we need %cached_classpath_filename% to be generated from maven (command: mvn install -DskipTests)"
+ goto :eof
+ )
+
+ for /f "delims=" %%i in ('type "%cached_classpath_filename%"') do set CLASSPATH=%CLASSPATH%;%%i
+)
+
+@rem For releases add hbase webapps to CLASSPATH
+@rem Webapps must come first else it messes up Jetty
+if exist "%HBASE_HOME%\hbase-webapps" (
+ set CLASSPATH=%CLASSPATH%;%HBASE_HOME%
+)
+
+if exist "%HBASE_HOME%\target\hbase-webapps" (
+ set CLASSPATH=%CLASSPATH%;%HBASE_HOME%\target
+)
+
+for /F %%f in ('dir /b "%HBASE_HOME%\hbase*.jar" 2^>nul') do (
+ if not "%%f:~-11"=="sources.jar" (
+ set CLASSPATH=!CLASSPATH!;%HBASE_HOME%\%%f
+ )
+)
+
+@rem Add libs to CLASSPATH
+if exist "%HBASE_HOME%\lib" (
+ set CLASSPATH=!CLASSPATH!;%HBASE_HOME%\lib\*
+)
+
+@rem Add user-specified CLASSPATH last
+if defined HBASE_CLASSPATH (
+ set CLASSPATH=%CLASSPATH%;%HBASE_CLASSPATH%
+)
+
+@rem Default log directory and file
+if not defined HBASE_LOG_DIR (
+ set HBASE_LOG_DIR=%HBASE_HOME%\logs
+)
+
+if not defined HBASE_LOGFILE (
+ set HBASE_LOGFILE=hbase.log
+)
+
+set JAVA_PLATFORM=
+
+rem If avail, add Hadoop to the CLASSPATH and to the JAVA_LIBRARY_PATH
+set PATH=%PATH%;"%HADOOP_HOME%\bin"
+set HADOOP_IN_PATH=hadoop.cmd
+
+if exist "%HADOOP_HOME%\bin\%HADOOP_IN_PATH%" (
+ set hadoopCpCommand=call %HADOOP_IN_PATH% classpath 2^>nul
+ for /f "eol= delims=" %%i in ('!hadoopCpCommand!') do set CLASSPATH_FROM_HADOOP=%%i
+ if defined CLASSPATH_FROM_HADOOP (
+ set CLASSPATH=%CLASSPATH%;!CLASSPATH_FROM_HADOOP!
+ )
+ set HADOOP_CLASSPATH=%CLASSPATH%
+
+ set hadoopJLPCommand=call %HADOOP_IN_PATH% org.apache.hadoop.hbase.util.GetJavaProperty java.library.path 2^>nul
+ for /f "eol= delims=" %%i in ('!hadoopJLPCommand!') do set HADOOP_JAVA_LIBRARY_PATH=%%i
+ if not defined JAVA_LIBRARY_PATH (
+ set JAVA_LIBRARY_PATH=!HADOOP_JAVA_LIBRARY_PATH!
+ ) else (
+ set JAVA_LIBRARY_PATH=%JAVA_LIBRARY_PATH%;!HADOOP_JAVA_LIBRARY_PATH!
+ )
+)
+
+if exist "%HBASE_HOME%\build\native" (
+ set platformCommand=call %JAVA% -classpath "%CLASSPATH%" org.apache.hadoop.util.PlatformName
+ for /f %%i in ('!platformCommand!') do set JAVA_PLATFORM=%%i
+ set _PATH_TO_APPEND=%HBASE_HOME%\build\native\!JAVA_PLATFORM!;%HBASE_HOME%\build\native\!JAVA_PLATFORM!\lib
+ if not defined JAVA_LIBRARY_PATH (
+ set JAVA_LIBRARY_PATH=!_PATH_TO_APPEND!
+ ) else (
+ set JAVA_LIBRARY_PATH=%JAVA_LIBRARY_PATH%;!_PATH_TO_APPEND!
+ )
+)
+
+rem This loop would set %hbase-command-arguments%
+set _hbasearguments=
+:MakeCmdArgsLoop
+ if [%1]==[] goto :EndLoop
+
+ if not defined _hbasearguments (
+ set _hbasearguments=%1
+ ) else (
+ set _hbasearguments=!_hbasearguments! %1
+ )
+ shift
+goto :MakeCmdArgsLoop
+:EndLoop
+
+set hbase-command-arguments=%_hbasearguments%
+
+@rem figure out which class to run
+set corecommands=shell master regionserver thrift thrift2 rest avro hlog wal hbck hfile zookeeper zkcli mapredcp
+for %%i in ( %corecommands% ) do (
+ if "%hbase-command%"=="%%i" set corecommand=true
+)
+
+if defined corecommand (
+ call :%hbase-command% %hbase-command-arguments%
+) else (
+ if "%hbase-command%" == "classpath" (
+ echo %CLASSPATH%
+ goto :eof
+ )
+ if "%hbase-command%" == "version" (
+ set CLASS=org.apache.hadoop.hbase.util.VersionInfo
+ ) else (
+ set CLASS=%hbase-command%
+ )
+)
+
+if not defined HBASE_IDENT_STRING (
+ set HBASE_IDENT_STRING=%USERNAME%
+)
+
+@rem Set the right GC options based on the what we are running
+set servercommands=master regionserver thrift thrift2 rest avro zookeeper
+for %%i in ( %servercommands% ) do (
+ if "%hbase-command%"=="%%i" set servercommand=true
+)
+
+if "%servercommand%" == "true" (
+ set HBASE_OPTS=%HBASE_OPTS% %SERVER_GC_OPTS%
+) else (
+ set HBASE_OPTS=%HBASE_OPTS% %CLIENT_GC_OPTS%
+)
+
+@rem If HBase is run as a windows service, configure logging
+if defined service_entry (
+ set HBASE_LOG_PREFIX=hbase-%hbase-command%-%COMPUTERNAME%
+ set HBASE_LOGFILE=!HBASE_LOG_PREFIX!.log
+ if not defined HBASE_ROOT_LOGGER (
+ set HBASE_ROOT_LOGGER=INFO,DRFA
+ )
+ set HBASE_SECURITY_LOGGER=INFO,DRFAS
+ set loggc=!HBASE_LOG_DIR!\!HBASE_LOG_PREFIX!.gc
+ set loglog=!HBASE_LOG_DIR!\!HBASE_LOGFILE!
+
+ if "%HBASE_USE_GC_LOGFILE%" == "true" (
+ set HBASE_OPTS=%HBASE_OPTS% -Xloggc:"!loggc!"
+ )
+)
+
+@rem for jruby
+@rem (1) for the commands which need jruby (see jruby-commands defined below)
+@rem A. when JRUBY_HOME is defined
+@rem CLASSPATH and HBASE_OPTS are updated according to JRUBY_HOME defined
+@rem B. when JRUBY_HOME is not defined
+@rem add jruby packaged with HBase to CLASSPATH
+@rem (2) for other commands, do nothing
+
+@rem check if the commmand needs jruby
+set jruby-commands=shell org.jruby.Main
+for %%i in ( %jruby-commands% ) do (
+ if "%hbase-command%"=="%%i" set jruby-needed=true
+)
+
+@rem the command needs jruby
+if defined jruby-needed (
+ @rem JRUBY_HOME is defined
+ if defined JRUBY_HOME (
+ set CLASSPATH=%JRUBY_HOME%\lib\jruby.jar;%CLASSPATH%
+ set HBASE_OPTS=%HBASE_OPTS% -Djruby.home="%JRUBY_HOME%" -Djruby.lib="%JRUBY_HOME%\lib"
+ )
+
+ @rem JRUBY_HOME is not defined
+ if not defined JRUBY_HOME (
+ @rem in dev environment
+ if "%in_dev_env%"=="true" (
+ set cached_classpath_jruby_filename=%HBASE_HOME%\hbase-build-configuration\target\cached_classpath_jruby.txt
+ if not exist "!cached_classpath_jruby_filename!" (
+ echo "As this is a development environment, we need !cached_classpath_jruby_filename! to be generated from maven (command: mvn install -DskipTests)"
+ goto :eof
+ )
+ for /f "delims=" %%i in ('type "!cached_classpath_jruby_filename!"') do set CLASSPATH=%%i;%CLASSPATH%
+ )
+
+ @rem not in dev environment
+ if "%in_dev_env%"=="false" (
+ @rem add jruby packaged with HBase to CLASSPATH
+ set JRUBY_PACKAGED_WITH_HBASE=%HBASE_HOME%\lib\ruby\*
+ if defined jruby-needed (
+ set CLASSPATH=!JRUBY_PACKAGED_WITH_HBASE!;!CLASSPATH!
+ )
+ )
+ )
+)
+
+@rem Have JVM dump heap if we run out of memory. Files will be 'launch directory'
+@rem and are named like the following: java_pid21612.hprof. Apparently it does not
+@rem 'cost' to have this flag enabled. Its a 1.6 flag only. See:
+@rem http://blogs.sun.com/alanb/entry/outofmemoryerror_looks_a_bit_better
+set HBASE_OPTS=%HBASE_OPTS% -Dhbase.log.dir="%HBASE_LOG_DIR%"
+set HBASE_OPTS=%HBASE_OPTS% -Dhbase.log.file="%HBASE_LOGFILE%"
+set HBASE_OPTS=%HBASE_OPTS% -Dhbase.home.dir="%HBASE_HOME%"
+set HBASE_OPTS=%HBASE_OPTS% -Dhbase.id.str="%HBASE_IDENT_STRING%"
+set HBASE_OPTS=%HBASE_OPTS% -XX:OnOutOfMemoryError="taskkill /F /PID %p"
+
+if not defined HBASE_ROOT_LOGGER (
+ set HBASE_ROOT_LOGGER=INFO,console
+)
+set HBASE_OPTS=%HBASE_OPTS% -Dhbase.root.logger="%HBASE_ROOT_LOGGER%"
+
+if defined JAVA_LIBRARY_PATH (
+ set HBASE_OPTS=%HBASE_OPTS% -Djava.library.path="%JAVA_LIBRARY_PATH%"
+)
+
+rem Enable security logging on the master and regionserver only
+if not defined HBASE_SECURITY_LOGGER (
+ set HBASE_SECURITY_LOGGER=INFO,NullAppender
+ if "%hbase-command%"=="master" (
+ set HBASE_SECURITY_LOGGER=INFO,DRFAS
+ )
+ if "%hbase-command%"=="regionserver" (
+ set HBASE_SECURITY_LOGGER=INFO,DRFAS
+ )
+)
+set HBASE_OPTS=%HBASE_OPTS% -Dhbase.security.logger="%HBASE_SECURITY_LOGGER%"
+
+set HEAP_SETTINGS=%JAVA_HEAP_MAX% %JAVA_OFFHEAP_MAX%
+set java_arguments=%HEAP_SETTINGS% %HBASE_OPTS% -classpath "%CLASSPATH%" %CLASS% %hbase-command-arguments%
+
+if defined service_entry (
+ call :makeServiceXml %java_arguments%
+) else (
+ call %JAVA% %java_arguments%
+)
+
+endlocal
+goto :eof
+
+:shell
+ rem find the hbase ruby sources
+ if exist "%HBASE_HOME%\lib\ruby" (
+ set HBASE_OPTS=%HBASE_OPTS% -Dhbase.ruby.sources="%HBASE_HOME%\lib\ruby"
+ ) else (
+ set HBASE_OPTS=%HBASE_OPTS% -Dhbase.ruby.sources="%HBASE_HOME%\hbase-shell\src\main\ruby"
+ )
+ set HBASE_OPTS=%HBASE_OPTS% %HBASE_SHELL_OPTS%
+
+ set CLASS=org.jruby.Main -X+O %JRUBY_OPTS% "%HBASE_HOME%\bin\hirb.rb"
+ goto :eof
+
+:master
+ set CLASS=org.apache.hadoop.hbase.master.HMaster
+ if NOT "%1"=="stop" (
+ set HBASE_OPTS=%HBASE_OPTS% %HBASE_MASTER_OPTS%
+ )
+ goto :eof
+
+:regionserver
+ set CLASS=org.apache.hadoop.hbase.regionserver.HRegionServer
+ if NOT "%1"=="stop" (
+ set HBASE_OPTS=%HBASE_OPTS% %HBASE_REGIONSERVER_OPTS%
+ )
+ goto :eof
+
+:thrift
+ set CLASS=org.apache.hadoop.hbase.thrift.ThriftServer
+ if NOT "%1" == "stop" (
+ set HBASE_OPTS=%HBASE_OPTS% %HBASE_THRIFT_OPTS%
+ )
+ goto :eof
+
+:thrift2
+ set CLASS=org.apache.hadoop.hbase.thrift2.ThriftServer
+ if NOT "%1" == "stop" (
+ set HBASE_OPTS=%HBASE_OPTS% %HBASE_THRIFT_OPTS%
+ )
+ goto :eof
+
+:rest
+ set CLASS=org.apache.hadoop.hbase.rest.RESTServer
+ if NOT "%1"=="stop" (
+ set HBASE_OPTS=%HBASE_OPTS% %HBASE_REST_OPTS%
+ )
+ goto :eof
+
+:avro
+ set CLASS=org.apache.hadoop.hbase.avro.AvroServer
+ if NOT "%1"== "stop" (
+ set HBASE_OPTS=%HBASE_OPTS% %HBASE_AVRO_OPTS%
+ )
+ goto :eof
+
+:zookeeper
+ set CLASS=org.apache.hadoop.hbase.zookeeper.HQuorumPeer
+ if NOT "%1"=="stop" (
+ set HBASE_OPTS=%HBASE_OPTS% %HBASE_ZOOKEEPER_OPTS%
+ )
+ goto :eof
+
+:hbck
+ set CLASS=org.apache.hadoop.hbase.util.HBaseFsck
+ goto :eof
+
+:wal
+ set CLASS=org.apache.hadoop.hbase.wal.WALPrettyPrinter
+ goto :eof
+
+:hfile
+ set CLASS=org.apache.hadoop.hbase.io.hfile.HFile
+ goto :eof
+
+:zkcli
+ set CLASS=org.apache.hadoop.hbase.zookeeper.ZKMainServer
+ set CLASSPATH=!CLASSPATH!;%HBASE_HOME%\lib\zkcli\*
+ goto :eof
+
+:mapredcp
+ set CLASS=org.apache.hadoop.hbase.util.MapreduceDependencyClasspathTool
+ goto :eof
+
+:makeServiceXml
+ set arguments=%*
+ @echo ^<service^>
+ @echo ^<id^>%hbase-command%^</id^>
+ @echo ^<name^>%hbase-command%^</name^>
+ @echo ^<description^>This service runs Isotope %hbase-command%^</description^>
+ @echo ^<executable^>%JAVA%^</executable^>
+ @echo ^<arguments^>%arguments%^</arguments^>
+ @echo ^</service^>
+ goto :eof
+
+:print_usage
+ echo Usage: hbase [^<options^>] ^<command^> [^<args^>]
+ echo where ^<command^> an option from one of these categories::
+ echo Options:
+ echo --config DIR Configuration direction to use. Default: ./conf
+ echo.
+ echo Commands:
+ echo Some commands take arguments. Pass no args or -h for usage."
+ echo shell Run the HBase shell
+ echo hbck Run the hbase 'fsck' tool
+ echo wal Write-ahead-log analyzer
+ echo hfile Store file analyzer
+ echo zkcli Run the ZooKeeper shell
+ echo master Run an HBase HMaster node
+ echo regionserver Run an HBase HRegionServer node
+ echo zookeeper Run a ZooKeeper server
+ echo rest Run an HBase REST server
+ echo thrift Run the HBase Thrift server
+ echo thrift2 Run the HBase Thrift2 server
+ echo classpath Dump hbase CLASSPATH
+ echo mapredcp Dump CLASSPATH entries required by mapreduce
+ echo version Print the version
+ echo CLASSNAME Run the class named CLASSNAME
+ goto :eof
diff --git a/PCAP-PIC/hbase/bin/hirb.rb b/PCAP-PIC/hbase/bin/hirb.rb
new file mode 100644
index 0000000..e857db7
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/hirb.rb
@@ -0,0 +1,264 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# File passed to org.jruby.Main by bin/hbase. Pollutes jirb with hbase imports
+# and hbase commands and then loads jirb. Outputs a banner that tells user
+# where to find help, shell version, and loads up a custom hirb.
+#
+# In noninteractive mode, runs commands from stdin until completion or an error.
+# On success will exit with status 0, on any problem will exit non-zero. Callers
+# should only rely on "not equal to 0", because the current error exit code of 1
+# will likely be updated to diffentiate e.g. invalid commands, incorrect args,
+# permissions, etc.
+
+# TODO: Interrupt a table creation or a connection to a bad master. Currently
+# has to time out. Below we've set down the retries for rpc and hbase but
+# still can be annoying (And there seem to be times when we'll retry for
+# ever regardless)
+# TODO: Add support for listing and manipulating catalog tables, etc.
+# TODO: Encoding; need to know how to go from ruby String to UTF-8 bytes
+
+# Run the java magic include and import basic HBase types that will help ease
+# hbase hacking.
+include Java
+
+# Some goodies for hirb. Should these be left up to the user's discretion?
+require 'irb/completion'
+require 'pathname'
+
+# Add the directory names in hbase.jruby.sources commandline option
+# to the ruby load path so I can load up my HBase ruby modules
+sources = java.lang.System.getProperty('hbase.ruby.sources')
+$LOAD_PATH.unshift Pathname.new(sources)
+
+#
+# FIXME: Switch args processing to getopt
+#
+# See if there are args for this shell. If any, read and then strip from ARGV
+# so they don't go through to irb. Output shell 'usage' if user types '--help'
+cmdline_help = <<HERE # HERE document output as shell usage
+Usage: shell [OPTIONS] [SCRIPTFILE [ARGUMENTS]]
+
+ -d | --debug Set DEBUG log levels.
+ -h | --help This help.
+ -n | --noninteractive Do not run within an IRB session and exit with non-zero
+ status on first error.
+ -Dkey=value Pass hbase-*.xml Configuration overrides. For example, to
+ use an alternate zookeeper ensemble, pass:
+ -Dhbase.zookeeper.quorum=zookeeper.example.org
+ For faster fail, pass the below and vary the values:
+ -Dhbase.client.retries.number=7
+ -Dhbase.ipc.client.connect.max.retries=3
+HERE
+
+# Takes configuration and an arg that is expected to be key=value format.
+# If c is empty, creates one and returns it
+def add_to_configuration(c, arg)
+ kv = arg.split('=')
+ kv.length == 2 || (raise "Expected parameter #{kv} in key=value format")
+ c = org.apache.hadoop.hbase.HBaseConfiguration.create if c.nil?
+ c.set(kv[0], kv[1])
+ c
+end
+
+found = []
+script2run = nil
+log_level = org.apache.log4j.Level::ERROR
+@shell_debug = false
+interactive = true
+_configuration = nil
+D_ARG = '-D'
+while (arg = ARGV.shift)
+ if arg == '-h' || arg == '--help'
+ puts cmdline_help
+ exit
+ elsif arg == D_ARG
+ argValue = ARGV.shift || (raise "#{D_ARG} takes a 'key=value' parameter")
+ _configuration = add_to_configuration(_configuration, argValue)
+ found.push(arg)
+ found.push(argValue)
+ elsif arg.start_with? D_ARG
+ _configuration = add_to_configuration(_configuration, arg[2..-1])
+ found.push(arg)
+ elsif arg == '-d' || arg == '--debug'
+ log_level = org.apache.log4j.Level::DEBUG
+ $fullBackTrace = true
+ @shell_debug = true
+ found.push(arg)
+ puts 'Setting DEBUG log level...'
+ elsif arg == '-n' || arg == '--noninteractive'
+ interactive = false
+ found.push(arg)
+ elsif arg == '-r' || arg == '--return-values'
+ warn '[INFO] the -r | --return-values option is ignored. we always behave '\
+ 'as though it was given.'
+ found.push(arg)
+ else
+ # Presume it a script. Save it off for running later below
+ # after we've set up some environment.
+ script2run = arg
+ found.push(arg)
+ # Presume that any other args are meant for the script.
+ break
+ end
+end
+
+# Delete all processed args
+found.each { |arg| ARGV.delete(arg) }
+# Make sure debug flag gets back to IRB
+ARGV.unshift('-d') if @shell_debug
+
+# Set logging level to avoid verboseness
+org.apache.log4j.Logger.getLogger('org.apache.zookeeper').setLevel(log_level)
+org.apache.log4j.Logger.getLogger('org.apache.hadoop.hbase').setLevel(log_level)
+
+# Require HBase now after setting log levels
+require 'hbase_constants'
+
+# Load hbase shell
+require 'shell'
+
+# Require formatter
+require 'shell/formatter'
+
+# Setup the HBase module. Create a configuration.
+@hbase = _configuration.nil? ? Hbase::Hbase.new : Hbase::Hbase.new(_configuration)
+
+# Setup console
+@shell = Shell::Shell.new(@hbase, interactive)
[email protected] = @shell_debug
+
+# Add commands to this namespace
+# TODO avoid polluting main namespace by using a binding
[email protected]_commands(self)
+
+# Add help command
+def help(command = nil)
+ @shell.help(command)
+end
+
+# Backwards compatibility method
+def tools
+ @shell.help_group('tools')
+end
+
+# Debugging method
+def debug
+ if @shell_debug
+ @shell_debug = false
+ conf.back_trace_limit = 0
+ log_level = org.apache.log4j.Level::ERROR
+ else
+ @shell_debug = true
+ conf.back_trace_limit = 100
+ log_level = org.apache.log4j.Level::DEBUG
+ end
+ org.apache.log4j.Logger.getLogger('org.apache.zookeeper').setLevel(log_level)
+ org.apache.log4j.Logger.getLogger('org.apache.hadoop.hbase').setLevel(log_level)
+ debug?
+end
+
+def debug?
+ puts "Debug mode is #{@shell_debug ? 'ON' : 'OFF'}\n\n"
+ nil
+end
+
+# Include hbase constants
+include HBaseConstants
+
+# If script2run, try running it. If we're in interactive mode, will go on to run the shell unless
+# script calls 'exit' or 'exit 0' or 'exit errcode'.
+load(script2run) if script2run
+
+if interactive
+ # Output a banner message that tells users where to go for help
+ @shell.print_banner
+
+ require 'irb'
+ require 'irb/hirb'
+
+ module IRB
+ def self.start(ap_path = nil)
+ $0 = File.basename(ap_path, '.rb') if ap_path
+
+ IRB.setup(ap_path)
+ @CONF[:IRB_NAME] = 'hbase'
+ @CONF[:AP_NAME] = 'hbase'
+ @CONF[:BACK_TRACE_LIMIT] = 0 unless $fullBackTrace
+
+ hirb = if @CONF[:SCRIPT]
+ HIRB.new(nil, @CONF[:SCRIPT])
+ else
+ HIRB.new
+ end
+
+ @CONF[:IRB_RC].call(hirb.context) if @CONF[:IRB_RC]
+ @CONF[:MAIN_CONTEXT] = hirb.context
+
+ catch(:IRB_EXIT) do
+ hirb.eval_input
+ end
+ end
+ end
+
+ IRB.start
+else
+ begin
+ # Noninteractive mode: if there is input on stdin, do a simple REPL.
+ # XXX Note that this purposefully uses STDIN and not Kernel.gets
+ # in order to maintain compatibility with previous behavior where
+ # a user could pass in script2run and then still pipe commands on
+ # stdin.
+ require 'irb/ruby-lex'
+ require 'irb/workspace'
+ workspace = IRB::WorkSpace.new(binding)
+ scanner = RubyLex.new
+
+ # RubyLex claims to take an IO but really wants an InputMethod
+ module IOExtensions
+ def encoding
+ external_encoding
+ end
+ end
+ IO.include IOExtensions
+
+ scanner.set_input(STDIN)
+ scanner.each_top_level_statement do |statement, linenum|
+ puts(workspace.evaluate(nil, statement, 'stdin', linenum))
+ end
+ # XXX We're catching Exception on purpose, because we want to include
+ # unwrapped java exceptions, syntax errors, eval failures, etc.
+ rescue Exception => exception
+ message = exception.to_s
+ # exception unwrapping in shell means we'll have to handle Java exceptions
+ # as a special case in order to format them properly.
+ if exception.is_a? java.lang.Exception
+ $stderr.puts 'java exception'
+ message = exception.get_message
+ end
+ # Include the 'ERROR' string to try to make transition easier for scripts that
+ # may have already been relying on grepping output.
+ puts "ERROR #{exception.class}: #{message}"
+ if $fullBacktrace
+ # re-raising the will include a backtrace and exit.
+ raise exception
+ else
+ exit 1
+ end
+ end
+end
diff --git a/PCAP-PIC/hbase/bin/local-master-backup.sh b/PCAP-PIC/hbase/bin/local-master-backup.sh
new file mode 100644
index 0000000..b0aa2f7
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/local-master-backup.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+# This is used for starting multiple masters on the same machine.
+# run it from hbase-dir/ just like 'bin/hbase'
+# Supports up to 10 masters (limitation = overlapping ports)
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin" >/dev/null && pwd`
+
+if [ $# -lt 2 ]; then
+ S=`basename "${BASH_SOURCE-$0}"`
+ echo "Usage: $S [--config <conf-dir>] [--autostart-window-size <window size in hours>]"
+ echo " [--autostart-window-retry-limit <retry count limit for autostart>] [autostart|start|stop] offset(s)"
+ echo " e.g. $S start 1"
+ exit
+fi
+
+# default autostart args value indicating infinite window size and no retry limit
+AUTOSTART_WINDOW_SIZE=0
+AUTOSTART_WINDOW_RETRY_LIMIT=0
+
+. "$bin"/hbase-config.sh
+
+# sanity check: make sure your master opts don't use ports [i.e. JMX/DBG]
+export HBASE_MASTER_OPTS=" "
+
+run_master () {
+ DN=$2
+ export HBASE_IDENT_STRING="$USER-$DN"
+ HBASE_MASTER_ARGS="\
+ -D hbase.master.port=`expr 16000 + $DN` \
+ -D hbase.master.info.port=`expr 16010 + $DN` \
+ -D hbase.regionserver.port=`expr 16020 + $DN` \
+ -D hbase.regionserver.info.port=`expr 16030 + $DN` \
+ --backup"
+ "$bin"/hbase-daemon.sh --config "${HBASE_CONF_DIR}" --autostart-window-size "${AUTOSTART_WINDOW_SIZE}" --autostart-window-retry-limit "${AUTOSTART_WINDOW_RETRY_LIMIT}" $1 master $HBASE_MASTER_ARGS
+}
+
+cmd=$1
+shift;
+
+for i in $*
+do
+ if [[ "$i" =~ ^[0-9]+$ ]]; then
+ run_master $cmd $i
+ else
+ echo "Invalid argument"
+ fi
+done
diff --git a/PCAP-PIC/hbase/bin/local-regionservers.sh b/PCAP-PIC/hbase/bin/local-regionservers.sh
new file mode 100644
index 0000000..97e5eed
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/local-regionservers.sh
@@ -0,0 +1,74 @@
+#!/usr/bin/env bash
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+# This is used for starting multiple regionservers on the same machine.
+# run it from hbase-dir/ just like 'bin/hbase'
+# Supports up to 10 regionservers (limitation = overlapping ports)
+# For supporting more instances select different values (e.g. 16200, 16300)
+# for HBASE_RS_BASE_PORT and HBASE_RS_INFO_BASE_PORT below
+if [ -z "$HBASE_RS_BASE_PORT" ]; then
+ HBASE_RS_BASE_PORT=16020
+fi
+if [ -z "$HBASE_RS_INFO_BASE_PORT" ]; then
+ HBASE_RS_INFO_BASE_PORT=16030
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin" >/dev/null && pwd`
+
+if [ $# -lt 2 ]; then
+ S=`basename "${BASH_SOURCE-$0}"`
+ echo "Usage: $S [--config <conf-dir>] [--autostart-window-size <window size in hours>]"
+ echo " [--autostart-window-retry-limit <retry count limit for autostart>] [autostart|start|stop] offset(s)"
+ echo " e.g. $S start 1 2"
+ exit
+fi
+
+# default autostart args value indicating infinite window size and no retry limit
+AUTOSTART_WINDOW_SIZE=0
+AUTOSTART_WINDOW_RETRY_LIMIT=0
+
+. "$bin"/hbase-config.sh
+
+# sanity check: make sure your regionserver opts don't use ports [i.e. JMX/DBG]
+export HBASE_REGIONSERVER_OPTS=" "
+
+run_regionserver () {
+ DN=$2
+ export HBASE_IDENT_STRING="$USER-$DN"
+ HBASE_REGIONSERVER_ARGS="\
+ -Dhbase.regionserver.port=`expr "$HBASE_RS_BASE_PORT" + "$DN"` \
+ -Dhbase.regionserver.info.port=`expr "$HBASE_RS_INFO_BASE_PORT" + "$DN"`"
+
+ "$bin"/hbase-daemon.sh --config "${HBASE_CONF_DIR}" \
+ --autostart-window-size "${AUTOSTART_WINDOW_SIZE}" \
+ --autostart-window-retry-limit "${AUTOSTART_WINDOW_RETRY_LIMIT}" \
+ "$1" regionserver "$HBASE_REGIONSERVER_ARGS"
+}
+
+cmd=$1
+shift;
+
+for i in "$@"
+do
+ if [[ "$i" =~ ^[0-9]+$ ]]; then
+ run_regionserver "$cmd" "$i"
+ else
+ echo "Invalid argument"
+ fi
+done
diff --git a/PCAP-PIC/hbase/bin/master-backup.sh b/PCAP-PIC/hbase/bin/master-backup.sh
new file mode 100644
index 0000000..feca4ab
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/master-backup.sh
@@ -0,0 +1,74 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# Run a shell command on all backup master hosts.
+#
+# Environment Variables
+#
+# HBASE_BACKUP_MASTERS File naming remote hosts.
+# Default is ${HBASE_CONF_DIR}/backup-masters
+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
+# HBASE_CONF_DIR Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
+# HBASE_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
+# HBASE_SSH_OPTS Options passed to ssh when running remote commands.
+#
+# Modelled after $HADOOP_HOME/bin/slaves.sh.
+
+usage="Usage: $0 [--config <hbase-confdir>] command..."
+
+# if no args specified, show usage
+if [ $# -le 0 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin">/dev/null; pwd`
+
+. "$bin"/hbase-config.sh
+
+# If the master backup file is specified in the command line,
+# then it takes precedence over the definition in
+# hbase-env.sh. Save it here.
+HOSTLIST=$HBASE_BACKUP_MASTERS
+
+if [ "$HOSTLIST" = "" ]; then
+ if [ "$HBASE_BACKUP_MASTERS" = "" ]; then
+ export HOSTLIST="${HBASE_CONF_DIR}/backup-masters"
+ else
+ export HOSTLIST="${HBASE_BACKUP_MASTERS}"
+ fi
+fi
+
+
+args=${@// /\\ }
+args=${args/master-backup/master}
+
+if [ -f $HOSTLIST ]; then
+ for hmaster in `cat "$HOSTLIST"`; do
+ ssh $HBASE_SSH_OPTS $hmaster $"$args --backup" \
+ 2>&1 | sed "s/^/$hmaster: /" &
+ if [ "$HBASE_SLAVE_SLEEP" != "" ]; then
+ sleep $HBASE_SLAVE_SLEEP
+ fi
+ done
+fi
+
+wait
diff --git a/PCAP-PIC/hbase/bin/region_mover.rb b/PCAP-PIC/hbase/bin/region_mover.rb
new file mode 100644
index 0000000..6756145
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/region_mover.rb
@@ -0,0 +1,24 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Moves regions. Will confirm region access in current location and will
+# not move a new region until successful confirm of region loading in new
+# location. Presumes balancer is disabled when we run (not harmful if its
+# on but this script and balancer will end up fighting each other).
+$BIN = File.dirname(__FILE__)
+exec "#{$BIN}/hbase org.apache.hadoop.hbase.util.RegionMover #{ARGV.join(' ')}"
diff --git a/PCAP-PIC/hbase/bin/region_status.rb b/PCAP-PIC/hbase/bin/region_status.rb
new file mode 100644
index 0000000..abd19dd
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/region_status.rb
@@ -0,0 +1,150 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# View the current status of all regions on an HBase cluster. This is
+# predominantly used to determined if all the regions in META have been
+# onlined yet on startup.
+#
+# To use this script, run:
+#
+# ${HBASE_HOME}/bin/hbase org.jruby.Main region_status.rb [wait] [--table <table_name>]
+
+require 'optparse'
+
+usage = 'Usage : ./hbase org.jruby.Main region_status.rb [wait]' \
+ '[--table <table_name>]\n'
+OptionParser.new do |o|
+ o.banner = usage
+ o.on('-t', '--table TABLENAME', 'Only process TABLENAME') do |tablename|
+ $tablename = tablename
+ end
+ o.on('-h', '--help', 'Display help message') { puts o; exit }
+ o.parse!
+end
+
+SHOULD_WAIT = ARGV[0] == 'wait'
+if ARGV[0] && !SHOULD_WAIT
+ print usage
+ exit 1
+end
+
+require 'java'
+
+java_import org.apache.hadoop.hbase.HBaseConfiguration
+java_import org.apache.hadoop.hbase.TableName
+java_import org.apache.hadoop.hbase.HConstants
+java_import org.apache.hadoop.hbase.MasterNotRunningException
+java_import org.apache.hadoop.hbase.client.HBaseAdmin
+java_import org.apache.hadoop.hbase.client.Table
+java_import org.apache.hadoop.hbase.client.Scan
+java_import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter
+java_import org.apache.hadoop.hbase.util.Bytes
+java_import org.apache.hadoop.hbase.HRegionInfo
+java_import org.apache.hadoop.hbase.MetaTableAccessor
+java_import org.apache.hadoop.hbase.HTableDescriptor
+java_import org.apache.hadoop.hbase.client.ConnectionFactory
+
+# disable debug logging on this script for clarity
+log_level = org.apache.log4j.Level::ERROR
+org.apache.log4j.Logger.getLogger('org.apache.zookeeper').setLevel(log_level)
+org.apache.log4j.Logger.getLogger('org.apache.hadoop.hbase').setLevel(log_level)
+
+config = HBaseConfiguration.create
+config.set 'fs.defaultFS', config.get(HConstants::HBASE_DIR)
+connection = ConnectionFactory.createConnection(config)
+# wait until the master is running
+admin = nil
+loop do
+ begin
+ admin = connection.getAdmin
+ break
+ rescue MasterNotRunningException => e
+ print 'Waiting for master to start...\n'
+ sleep 1
+ end
+end
+
+meta_count = 0
+server_count = 0
+
+# scan META to see how many regions we should have
+if $tablename.nil?
+ scan = Scan.new
+else
+ tableNameMetaPrefix = $tablename + HConstants::META_ROW_DELIMITER.chr
+ scan = Scan.new(
+ (tableNameMetaPrefix + HConstants::META_ROW_DELIMITER.chr).to_java_bytes
+ )
+end
+scan.setCacheBlocks(false)
+scan.setCaching(10)
+scan.setFilter(FirstKeyOnlyFilter.new)
+INFO = 'info'.to_java_bytes
+REGION_INFO = 'regioninfo'.to_java_bytes
+scan.addColumn INFO, REGION_INFO
+table = nil
+iter = nil
+loop do
+ begin
+ table = connection.getTable(TableName.valueOf('hbase:meta'))
+ scanner = table.getScanner(scan)
+ iter = scanner.iterator
+ break
+ rescue IOException => ioe
+ print "Exception trying to scan META: #{ioe}"
+ sleep 1
+ end
+end
+while iter.hasNext
+ result = iter.next
+ rowid = Bytes.toString(result.getRow)
+ rowidStr = java.lang.String.new(rowid)
+ if !$tablename.nil? && !rowidStr.startsWith(tableNameMetaPrefix)
+ # Gone too far, break
+ break
+ end
+ region = MetaTableAccessor.getHRegionInfo(result)
+ unless region.isOffline
+ # only include regions that should be online
+ meta_count += 1
+ end
+end
+scanner.close
+# If we're trying to see the status of all HBase tables, we need to include the
+# hbase:meta table, that is not included in our scan
+meta_count += 1 if $tablename.nil?
+
+# query the master to see how many regions are on region servers
+$TableName = TableName.valueOf($tablename.to_java_bytes) unless $tablename.nil?
+loop do
+ if $tablename.nil?
+ server_count = admin.getClusterStatus.getRegionsCount
+ else
+ connection = ConnectionFactory.createConnection(config)
+ server_count = MetaTableAccessor.allTableRegions(connection, $TableName).size
+ end
+ print "Region Status: #{server_count} / #{meta_count}\n"
+ if SHOULD_WAIT && server_count < meta_count
+ # continue this loop until server & meta count match
+ sleep 10
+ else
+ break
+ end
+end
+admin.close
+connection.close
+
+exit server_count == meta_count ? 0 : 1
diff --git a/PCAP-PIC/hbase/bin/regionservers.sh b/PCAP-PIC/hbase/bin/regionservers.sh
new file mode 100644
index 0000000..b83c1f3
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/regionservers.sh
@@ -0,0 +1,83 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# Run a shell command on all regionserver hosts.
+#
+# Environment Variables
+#
+# HBASE_REGIONSERVERS File naming remote hosts.
+# Default is ${HADOOP_CONF_DIR}/regionservers
+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
+# HBASE_CONF_DIR Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
+# HBASE_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
+# HBASE_SSH_OPTS Options passed to ssh when running remote commands.
+#
+# Modelled after $HADOOP_HOME/bin/slaves.sh.
+
+usage="Usage: regionservers [--config <hbase-confdir>] command..."
+
+# if no args specified, show usage
+if [ $# -le 0 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin">/dev/null; pwd`
+
+. "$bin"/hbase-config.sh
+
+# If the regionservers file is specified in the command line,
+# then it takes precedence over the definition in
+# hbase-env.sh. Save it here.
+HOSTLIST=$HBASE_REGIONSERVERS
+
+if [ "$HOSTLIST" = "" ]; then
+ if [ "$HBASE_REGIONSERVERS" = "" ]; then
+ export HOSTLIST="${HBASE_CONF_DIR}/regionservers"
+ else
+ export HOSTLIST="${HBASE_REGIONSERVERS}"
+ fi
+fi
+
+regionservers=`cat "$HOSTLIST"`
+if [ "$regionservers" = "localhost" ]; then
+ HBASE_REGIONSERVER_ARGS="\
+ -Dhbase.regionserver.port=16020 \
+ -Dhbase.regionserver.info.port=16030"
+
+ $"${@// /\\ }" ${HBASE_REGIONSERVER_ARGS} \
+ 2>&1 | sed "s/^/$regionserver: /" &
+else
+ for regionserver in `cat "$HOSTLIST"`; do
+ if ${HBASE_SLAVE_PARALLEL:-true}; then
+ ssh $HBASE_SSH_OPTS $regionserver $"${@// /\\ }" \
+ 2>&1 | sed "s/^/$regionserver: /" &
+ else # run each command serially
+ ssh $HBASE_SSH_OPTS $regionserver $"${@// /\\ }" \
+ 2>&1 | sed "s/^/$regionserver: /"
+ fi
+ if [ "$HBASE_SLAVE_SLEEP" != "" ]; then
+ sleep $HBASE_SLAVE_SLEEP
+ fi
+ done
+fi
+
+wait
diff --git a/PCAP-PIC/hbase/bin/replication/copy_tables_desc.rb b/PCAP-PIC/hbase/bin/replication/copy_tables_desc.rb
new file mode 100644
index 0000000..44a24f9
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/replication/copy_tables_desc.rb
@@ -0,0 +1,104 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Script to recreate all tables from one cluster to another
+# To see usage for this script, run:
+#
+# ${HBASE_HOME}/bin/hbase org.jruby.Main copy_tables_desc.rb
+#
+
+include Java
+java_import org.apache.hadoop.conf.Configuration
+java_import org.apache.hadoop.hbase.HBaseConfiguration
+java_import org.apache.hadoop.hbase.HConstants
+java_import org.apache.hadoop.hbase.HTableDescriptor
+java_import org.apache.hadoop.hbase.TableName
+java_import org.apache.hadoop.hbase.client.ConnectionFactory
+java_import org.apache.hadoop.hbase.client.HBaseAdmin
+java_import org.slf4j.LoggerFactory
+
+# Name of this script
+NAME = 'copy_tables_desc'.freeze
+
+# Print usage for this script
+def usage
+ puts format('Usage: %s.rb master_zookeeper.quorum.peers:clientport:znode_parent slave_zookeeper.quorum.peers:clientport:znode_parent [table1,table2,table3,...]', NAME)
+ exit!
+end
+
+def copy(src, dst, table)
+ # verify if table exists in source cluster
+ begin
+ t = src.getTableDescriptor(TableName.valueOf(table))
+ rescue org.apache.hadoop.hbase.TableNotFoundException
+ puts format("Source table \"%s\" doesn't exist, skipping.", table)
+ return
+ end
+
+ # verify if table *doesn't* exists in the target cluster
+ begin
+ dst.createTable(t)
+ rescue org.apache.hadoop.hbase.TableExistsException
+ puts format('Destination table "%s" exists in remote cluster, skipping.', table)
+ return
+ end
+
+ puts format('Schema for table "%s" was succesfully copied to remote cluster.', table)
+end
+
+usage if ARGV.size < 2 || ARGV.size > 3
+
+LOG = LoggerFactory.getLogger(NAME)
+
+parts1 = ARGV[0].split(':')
+
+parts2 = ARGV[1].split(':')
+
+parts3 = ARGV[2].split(',') unless ARGV[2].nil?
+
+c1 = HBaseConfiguration.create
+c1.set(HConstants::ZOOKEEPER_QUORUM, parts1[0])
+c1.set('hbase.zookeeper.property.clientPort', parts1[1])
+c1.set(HConstants::ZOOKEEPER_ZNODE_PARENT, parts1[2])
+
+connection1 = ConnectionFactory.createConnection(c1)
+admin1 = connection1.getAdmin
+
+c2 = HBaseConfiguration.create
+c2.set(HConstants::ZOOKEEPER_QUORUM, parts2[0])
+c2.set('hbase.zookeeper.property.clientPort', parts2[1])
+c2.set(HConstants::ZOOKEEPER_ZNODE_PARENT, parts2[2])
+
+connection2 = ConnectionFactory.createConnection(c2)
+admin2 = connection2.getAdmin
+
+if parts3.nil?
+ admin1.listTableNames.each do |t|
+ copy(admin1, admin2, t.nameAsString)
+ end
+else
+ parts3.each do |t|
+ copy(admin1, admin2, t)
+ end
+end
+
+admin1.close
+admin2.close
+connection1.close
+connection2.close
diff --git a/PCAP-PIC/hbase/bin/rolling-restart.sh b/PCAP-PIC/hbase/bin/rolling-restart.sh
new file mode 100644
index 0000000..11c091d
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/rolling-restart.sh
@@ -0,0 +1,227 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# Run a shell command on all regionserver hosts.
+#
+# Environment Variables
+#
+# HBASE_REGIONSERVERS File naming remote hosts.
+# Default is ${HADOOP_CONF_DIR}/regionservers
+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
+# HBASE_CONF_DIR Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
+# HBASE_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
+# HBASE_SLAVE_TIMEOUT Seconds to wait for timing out a remote command.
+# HBASE_SSH_OPTS Options passed to ssh when running remote commands.
+#
+# Modelled after $HADOOP_HOME/bin/slaves.sh.
+
+usage_str="Usage: `basename $0` [--config <hbase-confdir>] [--autostart-window-size <window size in hours>]\
+ [--autostart-window-retry-limit <retry count limit for autostart>] [--autostart] [--rs-only] [--master-only] \
+ [--graceful] [--maxthreads xx] [--noack] [--movetimeout]]"
+
+function usage() {
+ echo "${usage_str}"
+}
+
+bin=`dirname "$0"`
+bin=`cd "$bin">/dev/null; pwd`
+
+# default autostart args value indicating infinite window size and no retry limit
+AUTOSTART_WINDOW_SIZE=0
+AUTOSTART_WINDOW_RETRY_LIMIT=0
+
+. "$bin"/hbase-config.sh
+
+# start hbase daemons
+errCode=$?
+if [ $errCode -ne 0 ]
+then
+ exit $errCode
+fi
+
+RR_RS=1
+RR_MASTER=1
+RR_GRACEFUL=0
+RR_MAXTHREADS=1
+START_CMD_NON_DIST_MODE=restart
+START_CMD_DIST_MODE=start
+RESTART_CMD_REGIONSERVER=restart
+
+while [ $# -gt 0 ]; do
+ case "$1" in
+ --rs-only|-r)
+ RR_RS=1
+ RR_MASTER=0
+ RR_GRACEFUL=0
+ shift
+ ;;
+ --autostart)
+ START_CMD_NON_DIST_MODE="--autostart-window-size ${AUTOSTART_WINDOW_SIZE} --autostart-window-retry-limit ${AUTOSTART_WINDOW_RETRY_LIMIT} autorestart"
+ START_CMD_DIST_MODE="--autostart-window-size ${AUTOSTART_WINDOW_SIZE} --autostart-window-retry-limit ${AUTOSTART_WINDOW_RETRY_LIMIT} autostart"
+ RESTART_CMD_REGIONSERVER="--autostart-window-size ${AUTOSTART_WINDOW_SIZE} --autostart-window-retry-limit ${AUTOSTART_WINDOW_RETRY_LIMIT} autorestart"
+ shift
+ ;;
+ --master-only)
+ RR_RS=0
+ RR_MASTER=1
+ RR_GRACEFUL=0
+ shift
+ ;;
+ --graceful)
+ RR_RS=0
+ RR_MASTER=0
+ RR_GRACEFUL=1
+ shift
+ ;;
+ --maxthreads)
+ shift
+ RR_MAXTHREADS=$1
+ shift
+ ;;
+ --noack)
+ RR_NOACK="--noack"
+ shift
+ ;;
+ --movetimeout)
+ shift
+ RR_MOVE_TIMEOUT=$1
+ shift
+ ;;
+ --help|-h)
+ usage
+ exit 0
+ ;;
+ *)
+ echo Bad argument: $1
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+# quick function to get a value from the HBase config file
+# HBASE-6504 - only take the first line of the output in case verbose gc is on
+distMode=`HBASE_CONF_DIR=${HBASE_CONF_DIR} $bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool hbase.cluster.distributed | head -n 1`
+if [ "$distMode" == 'false' ]; then
+ if [ $RR_RS -ne 1 ] || [ $RR_MASTER -ne 1 ]; then
+ echo Cant do selective rolling restart if not running distributed
+ exit 1
+ fi
+ "$bin"/hbase-daemon.sh ${START_CMD_NON_DIST_MODE} master
+else
+ zparent=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.parent`
+ if [ "$zparent" == "null" ]; then zparent="/hbase"; fi
+
+ if [ $RR_MASTER -eq 1 ]; then
+ # stop all masters before re-start to avoid races for master znode
+ "$bin"/hbase-daemon.sh --config "${HBASE_CONF_DIR}" stop master
+ "$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" \
+ --hosts "${HBASE_BACKUP_MASTERS}" stop master-backup
+
+ # make sure the master znode has been deleted before continuing
+ zmaster=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.master`
+ if [ "$zmaster" == "null" ]; then zmaster="master"; fi
+ zmaster=$zparent/$zmaster
+ echo -n "Waiting for Master ZNode ${zmaster} to expire"
+ echo
+ while ! "$bin"/hbase zkcli stat $zmaster 2>&1 | grep "Node does not exist"; do
+ echo -n "."
+ sleep 1
+ done
+ echo #force a newline
+
+ # all masters are down, now restart
+ "$bin"/hbase-daemon.sh --config "${HBASE_CONF_DIR}" ${START_CMD_DIST_MODE} master
+ "$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" \
+ --hosts "${HBASE_BACKUP_MASTERS}" ${START_CMD_DIST_MODE} master-backup
+
+ echo "Wait a minute for master to come up join cluster"
+ sleep 60
+
+ # Master joing cluster will start in cleaning out regions in transition.
+ # Wait until the master has cleaned out regions in transition before
+ # giving it a bunch of work to do; master is vulnerable during startup
+ zunassigned=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.unassigned`
+ if [ "$zunassigned" == "null" ]; then zunassigned="region-in-transition"; fi
+ zunassigned="$zparent/$zunassigned"
+ # Checking if /hbase/region-in-transition exist
+ ritZnodeCheck=`$bin/hbase zkcli stat ${zunassigned} 2>&1 | tail -1 \
+ | grep "Node does not exist:" >/dev/null`
+ ret=$?
+ if test 0 -eq ${ret}
+ then
+ echo "Znode ${zunassigned} does not exist"
+ else
+ echo -n "Waiting for ${zunassigned} to empty"
+ while true ; do
+ unassigned=`$bin/hbase zkcli stat ${zunassigned} 2>&1 \
+ | grep -e 'numChildren = '|sed -e 's,numChildren = ,,'`
+ if test 0 -eq ${unassigned}
+ then
+ echo
+ break
+ else
+ echo -n " ${unassigned}"
+ fi
+ sleep 1
+ done
+ fi
+ fi
+
+ if [ $RR_RS -eq 1 ]; then
+ # unlike the masters, roll all regionservers one-at-a-time
+ export HBASE_SLAVE_PARALLEL=false
+ "$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" \
+ --hosts "${HBASE_REGIONSERVERS}" ${RESTART_CMD_REGIONSERVER} regionserver
+ fi
+
+ if [ $RR_GRACEFUL -eq 1 ]; then
+ # gracefully restart all online regionservers
+ masterport=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool hbase.master.port`
+ if [ "$masterport" == "null" ]; then masterport="16000"; fi
+ zkrs=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.rs`
+ if [ "$zkrs" == "null" ]; then zkrs="rs"; fi
+ zkrs="$zparent/$zkrs"
+ online_regionservers=`$bin/hbase zkcli ls $zkrs 2>&1 | tail -1 | sed "s/\[//" | sed "s/\]//"`
+ echo "Disabling load balancer"
+ HBASE_BALANCER_STATE=$(echo 'balance_switch false' | "$bin"/hbase --config "${HBASE_CONF_DIR}" shell -n | tail -1)
+ echo "Previous balancer state was $HBASE_BALANCER_STATE"
+
+ for rs in $online_regionservers
+ do
+ rs_parts=(${rs//,/ })
+ hostname=${rs_parts[0]}
+ port=${rs_parts[1]}
+ if [ "$port" -eq "$masterport" ]; then
+ echo "Skipping regionserver on master machine $hostname:$port"
+ continue
+ else
+ echo "Gracefully restarting: $hostname"
+ "$bin"/graceful_stop.sh --config ${HBASE_CONF_DIR} --restart --reload -nob --maxthreads \
+ ${RR_MAXTHREADS} ${RR_NOACK} --movetimeout ${RR_MOVE_TIMEOUT} $hostname
+ sleep 1
+ fi
+ done
+ if [ "$HBASE_BALANCER_STATE" != "false" ]; then
+ echo "Restoring balancer state to $HBASE_BALANCER_STATE"
+ echo "balance_switch $HBASE_BALANCER_STATE" | "$bin"/hbase --config "${HBASE_CONF_DIR}" shell &> /dev/null
+ fi
+ fi
+fi
diff --git a/PCAP-PIC/hbase/bin/rsgroup.sh b/PCAP-PIC/hbase/bin/rsgroup.sh
new file mode 100644
index 0000000..b1f8496
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/rsgroup.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+source /etc/profile
+
+hbase shell <<EOF
+
+add_rsgroup 'important'
+
+move_servers_rsgroup 'important',['pcap-dellr740-dt001:16020']
+
+move_servers_rsgroup 'important',['pcap-dellr740-dt002:16020']
+
+flush 'tsg:report_result'
+
+move_tables_rsgroup 'important',['tsg:report_result']
+
+flush 'tsg_galaxy:job_result'
+
+move_tables_rsgroup 'important',['tsg_galaxy:job_result']
+
+
+EOF
+
diff --git a/PCAP-PIC/hbase/bin/set_hbase_env.sh b/PCAP-PIC/hbase/bin/set_hbase_env.sh
new file mode 100644
index 0000000..60612e8
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/set_hbase_env.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+echo -e "\n#hbase\nexport HBASE_HOME=/home/tsg/olap/hbase-2.2.3\nexport PATH=\$HBASE_HOME/bin:\$PATH" >> /etc/profile.d/hbase.sh
+chmod +x /etc/profile.d/hbase.sh
+
+source /etc/profile
+
+keeppath='/etc/init.d/keephbasemaster'
+if [ -x $keeppath ];then
+ chkconfig --add keephbasemaster
+ chkconfig keephbasemaster on
+ service keephbasemaster start && sleep 5
+ master_dae=`ps -ef | grep dae-hmaster.sh | grep -v grep | wc -l`
+ if [ $master_dae -lt 1 ];then
+ nohup /home/tsg/olap/hbase-2.2.3/bin/dae-hmaster.sh > /dev/null 2>&1 &
+ fi
+fi
+
+keeppath='/etc/init.d/keephbaseregion'
+if [ -x $keeppath ];then
+ chkconfig --add keephbaseregion
+ chkconfig keephbaseregion on
+ service keephbaseregion start && sleep 5
+ region_dae=`ps -ef | grep dae-hregion.sh | grep -v grep | wc -l`
+ if [ $region_dae -lt 1 ];then
+ nohup /home/tsg/olap/hbase-2.2.3/bin/dae-hregion.sh > /dev/null 2>&1 &
+ fi
+fi
+
diff --git a/PCAP-PIC/hbase/bin/shutdown_regionserver.rb b/PCAP-PIC/hbase/bin/shutdown_regionserver.rb
new file mode 100644
index 0000000..fd1af30
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/shutdown_regionserver.rb
@@ -0,0 +1,56 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This script is used to issue a stop command to a regionserver via RPC.
+# Intended for use in environments where sshing around is inappropriate
+# Run it like this by passing it to a jruby interpreter:
+#
+# ./bin/hbase org.jruby.Main bin/shutdown_regionserver.rb c2021:16020
+
+include Java
+java_import org.apache.hadoop.hbase.HBaseConfiguration
+java_import org.apache.hadoop.hbase.client.HBaseAdmin
+java_import org.apache.hadoop.hbase.client.ConnectionFactory
+
+def usage(msg = nil)
+ $stderr.puts 'Usage: shutdown_regionserver.rb <host:port>..'
+ $stderr.puts
+ $stderr.puts 'Stops the specified regionservers via RPC'
+ $stderr.puts format('Error: %s', msg) if msg
+ abort
+end
+
+usage if ARGV.empty?
+
+ARGV.each do |x|
+ usage format('Invalid host:port: %s', x) unless x.include? ':'
+end
+
+config = HBaseConfiguration.create
+connection = ConnectionFactory.createConnection(config)
+begin
+ admin = connection.getAdmin
+rescue
+ abort "Error: Couldn't instantiate HBaseAdmin"
+end
+
+ARGV.each do |hostport|
+ admin.stopRegionServer(hostport)
+end
+admin.close
+connection.close
diff --git a/PCAP-PIC/hbase/bin/start-hbase.cmd b/PCAP-PIC/hbase/bin/start-hbase.cmd
new file mode 100644
index 0000000..676a11e
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/start-hbase.cmd
@@ -0,0 +1,61 @@
+@rem/**
+@rem * Licensed to the Apache Software Foundation (ASF) under one
+@rem * or more contributor license agreements. See the NOTICE file
+@rem * distributed with this work for additional information
+@rem * regarding copyright ownership. The ASF licenses this file
+@rem * to you under the Apache License, Version 2.0 (the
+@rem * "License"); you may not use this file except in compliance
+@rem * with the License. You may obtain a copy of the License at
+@rem *
+@rem * http://www.apache.org/licenses/LICENSE-2.0
+@rem *
+@rem * Unless required by applicable law or agreed to in writing, software
+@rem * distributed under the License is distributed on an "AS IS" BASIS,
+@rem * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem * See the License for the specific language governing permissions and
+@rem * limitations under the License.
+@rem */
+
+@rem Modelled after $HADOOP_HOME/bin/start-hbase.sh.
+
+@rem Start hadoop hbase daemons.
+@rem Run this on master node.
+@echo off
+set usage="Usage: start-hbase.cmd"
+
+setlocal
+
+for %%i in (%0) do (
+ if not defined HBASE_BIN_PATH (
+ set HBASE_BIN_PATH=%%~dpi
+ )
+)
+
+if "%HBASE_BIN_PATH:~-1%" == "\" (
+ set HBASE_BIN_PATH=%HBASE_BIN_PATH:~0,-1%
+)
+
+set hbase-config-script=%HBASE_BIN_PATH%\hbase-config.cmd
+call %hbase-config-script%
+
+set distModeCommand=call %HBASE_BIN_PATH%\hbase.cmd org.apache.hadoop.hbase.util.HBaseConfTool hbase.cluster.distributed
+for /f %%i in ('%distModeCommand%') do set distMode=%%i
+
+if "%distMode%"=="false" (
+ start "HBase Distribution" %HBASE_BIN_PATH%\hbase.cmd master start
+) else (
+ if "%distMode%"=="true" (
+ @echo This is not implemented yet. Stay tuned.
+ @rem call %HBASE_BIN_PATH%\hbase-daemons.cmd --config "${HBASE_CONF_DIR}" start zookeeper
+ @rem call %HBASE_BIN_PATH%\hbase-daemon.cmd --config "${HBASE_CONF_DIR}" start master
+
+ @rem call %HBASE_BIN_PATH%\hbase-daemons.cmd --config "%HBASE_CONF_DIR%" --hosts "%HBASE_REGIONSERVERS%" start regionserver
+ @rem call %HBASE_BIN_PATH%\hbase-daemons.cmd --config "%HBASE_CONF_DIR%" --hosts "%HBASE_BACKUP_MASTERS%" start master-backup
+ ) else (
+ echo ERROR: Could not determine the startup mode.
+ )
+)
+
+@rem -------------- End of main script --------------
+endlocal
+goto :eof \ No newline at end of file
diff --git a/PCAP-PIC/hbase/bin/start-hbase.sh b/PCAP-PIC/hbase/bin/start-hbase.sh
new file mode 100644
index 0000000..f053526
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/start-hbase.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Modelled after $HADOOP_HOME/bin/start-hbase.sh.
+
+# Start hadoop hbase daemons.
+# Run this on master node.
+usage="Usage: start-hbase.sh [--autostart-window-size <window size in hours>]\
+ [--autostart-window-retry-limit <retry count limit for autostart>]\
+ [autostart|start]"
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin">/dev/null; pwd`
+
+# default autostart args value indicating infinite window size and no retry limit
+AUTOSTART_WINDOW_SIZE=0
+AUTOSTART_WINDOW_RETRY_LIMIT=0
+
+. "$bin"/hbase-config.sh
+
+# start hbase daemons
+errCode=$?
+if [ $errCode -ne 0 ]
+then
+ exit $errCode
+fi
+
+if [ "$1" = "autostart" ]
+then
+ commandToRun="--autostart-window-size ${AUTOSTART_WINDOW_SIZE} --autostart-window-retry-limit ${AUTOSTART_WINDOW_RETRY_LIMIT} autostart"
+else
+ commandToRun="start"
+fi
+
+# HBASE-6504 - only take the first line of the output in case verbose gc is on
+distMode=`$bin/hbase --config "$HBASE_CONF_DIR" org.apache.hadoop.hbase.util.HBaseConfTool hbase.cluster.distributed | head -n 1`
+
+if [ "$distMode" == 'false' ]
+then
+ "$bin"/hbase-daemon.sh --config "${HBASE_CONF_DIR}" $commandToRun master
+else
+ "$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" $commandToRun zookeeper
+ "$bin"/hbase-daemon.sh --config "${HBASE_CONF_DIR}" $commandToRun master
+ "$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" \
+ --hosts "${HBASE_REGIONSERVERS}" $commandToRun regionserver
+ "$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" \
+ --hosts "${HBASE_BACKUP_MASTERS}" $commandToRun master-backup
+fi
diff --git a/PCAP-PIC/hbase/bin/stop-hbase.cmd b/PCAP-PIC/hbase/bin/stop-hbase.cmd
new file mode 100644
index 0000000..9718055
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/stop-hbase.cmd
@@ -0,0 +1,54 @@
+@echo off
+@rem/**
+@rem * Licensed to the Apache Software Foundation (ASF) under one
+@rem * or more contributor license agreements. See the NOTICE file
+@rem * distributed with this work for additional information
+@rem * regarding copyright ownership. The ASF licenses this file
+@rem * to you under the Apache License, Version 2.0 (the
+@rem * "License"); you may not use this file except in compliance
+@rem * with the License. You may obtain a copy of the License at
+@rem *
+@rem * http://www.apache.org/licenses/LICENSE-2.0
+@rem *
+@rem * Unless required by applicable law or agreed to in writing, software
+@rem * distributed under the License is distributed on an "AS IS" BASIS,
+@rem * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem * See the License for the specific language governing permissions and
+@rem * limitations under the License.
+@rem */
+
+@rem Modelled after $HADOOP_HOME/bin/stop-hbase.sh.
+
+@rem Stop hadoop hbase daemons. Run this on master node.
+
+setlocal
+
+for %%i in (%0) do (
+ if not defined HBASE_BIN_PATH (
+ set HBASE_BIN_PATH=%%~dpi
+ )
+)
+
+if "%HBASE_BIN_PATH:~-1%" == "\" (
+ set HBASE_BIN_PATH=%HBASE_BIN_PATH:~0,-1%
+)
+set hbase-config-script=%HBASE_BIN_PATH%\hbase-config.cmd
+call %hbase-config-script%
+
+set distModeCommand=call %HBASE_BIN_PATH%\hbase.cmd org.apache.hadoop.hbase.util.HBaseConfTool hbase.cluster.distributed
+for /f %%i in ('%distModeCommand%') do set distMode=%%i
+
+if "%distMode%"=="false" (
+ call %HBASE_BIN_PATH%\hbase.cmd master stop
+
+) else (
+ if "%distMode%"=="true" (
+ @echo This is not implemented yet. Stay tuned.
+ ) else (
+ echo ERROR: Could not determine the startup mode.
+ )
+)
+
+@rem -------------- End of main script --------------
+endlocal
+goto :eof \ No newline at end of file
diff --git a/PCAP-PIC/hbase/bin/stop-hbase.sh b/PCAP-PIC/hbase/bin/stop-hbase.sh
new file mode 100644
index 0000000..4a19681
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/stop-hbase.sh
@@ -0,0 +1,68 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Modelled after $HADOOP_HOME/bin/stop-hbase.sh.
+
+# Stop hadoop hbase daemons. Run this on master node.
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin">/dev/null; pwd`
+
+. "$bin"/hbase-config.sh
+. "$bin"/hbase-common.sh
+
+# variables needed for stop command
+if [ "$HBASE_LOG_DIR" = "" ]; then
+ export HBASE_LOG_DIR="$HBASE_HOME/logs"
+fi
+mkdir -p "$HBASE_LOG_DIR"
+
+if [ "$HBASE_IDENT_STRING" = "" ]; then
+ export HBASE_IDENT_STRING="$USER"
+fi
+
+export HBASE_LOG_PREFIX=hbase-$HBASE_IDENT_STRING-master-$HOSTNAME
+export HBASE_LOGFILE=$HBASE_LOG_PREFIX.log
+logout=$HBASE_LOG_DIR/$HBASE_LOG_PREFIX.out
+loglog="${HBASE_LOG_DIR}/${HBASE_LOGFILE}"
+pid=${HBASE_PID_DIR:-/tmp}/hbase-$HBASE_IDENT_STRING-master.pid
+
+if [[ -e $pid ]]; then
+ echo -n stopping hbase
+ echo "`date` Stopping hbase (via master)" >> $loglog
+
+ nohup nice -n ${HBASE_NICENESS:-0} "$HBASE_HOME"/bin/hbase \
+ --config "${HBASE_CONF_DIR}" \
+ master stop "$@" > "$logout" 2>&1 < /dev/null &
+
+ waitForProcessEnd `cat $pid` 'stop-master-command'
+
+ rm -f $pid
+else
+ echo no hbase master found
+fi
+
+# distributed == false means that the HMaster will kill ZK when it exits
+# HBASE-6504 - only take the first line of the output in case verbose gc is on
+distMode=`$bin/hbase --config "$HBASE_CONF_DIR" org.apache.hadoop.hbase.util.HBaseConfTool hbase.cluster.distributed | head -n 1`
+if [ "$distMode" == 'true' ]
+then
+ "$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" stop zookeeper
+fi
diff --git a/PCAP-PIC/hbase/bin/test/process_based_cluster.sh b/PCAP-PIC/hbase/bin/test/process_based_cluster.sh
new file mode 100644
index 0000000..eb8633f
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/test/process_based_cluster.sh
@@ -0,0 +1,110 @@
+#!/bin/bash
+#
+#/**
+# * Copyright The Apache Software Foundation
+# *
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+set -e -u -o pipefail
+
+SCRIPT_NAME=${0##*/}
+SCRIPT_DIR=$(cd `dirname $0` && pwd )
+
+print_usage() {
+ cat >&2 <<EOT
+Usage: $SCRIPT_NAME <options>
+Options:
+ --kill
+ Kill local process-based HBase cluster using pid files.
+ --show
+ Show HBase processes running on this machine
+EOT
+ exit 1
+}
+
+show_processes() {
+ ps -ef | grep -P "(HRegionServer|HMaster|HQuorumPeer) start" | grep -v grep
+}
+
+cmd_specified() {
+ if [ "$CMD_SPECIFIED" ]; then
+ echo "Only one command can be specified" >&2
+ exit 1
+ fi
+ CMD_SPECIFIED=1
+}
+
+list_pid_files() {
+ LOCAL_CLUSTER_DIR=$SCRIPT_DIR/../../target/local_cluster
+ LOCAL_CLUSTER_DIR=$( cd $LOCAL_CLUSTER_DIR && pwd )
+ find $LOCAL_CLUSTER_DIR -name "*.pid"
+}
+
+if [ $# -eq 0 ]; then
+ print_usage
+fi
+
+IS_KILL=""
+IS_SHOW=""
+CMD_SPECIFIED=""
+
+while [ $# -ne 0 ]; do
+ case "$1" in
+ -h|--help)
+ print_usage ;;
+ --kill)
+ IS_KILL=1
+ cmd_specified ;;
+ --show)
+ IS_SHOW=1
+ cmd_specified ;;
+ *)
+ echo "Invalid option: $1" >&2
+ exit 1
+ esac
+ shift
+done
+
+if [ "$IS_KILL" ]; then
+ list_pid_files | \
+ while read F; do
+ PID=`cat $F`
+ echo "Killing pid $PID from file $F"
+ # Kill may fail but that's OK, so turn off error handling for a moment.
+ set +e
+ kill -9 $PID
+ set -e
+ done
+elif [ "$IS_SHOW" ]; then
+ PIDS=""
+ for F in `list_pid_files`; do
+ PID=`cat $F`
+ if [ -n "$PID" ]; then
+ if [ -n "$PIDS" ]; then
+ PIDS="$PIDS,"
+ fi
+ PIDS="$PIDS$PID"
+ fi
+ done
+ ps -p $PIDS
+else
+ echo "No command specified" >&2
+ exit 1
+fi
+
+
diff --git a/PCAP-PIC/hbase/bin/zookeepers.sh b/PCAP-PIC/hbase/bin/zookeepers.sh
new file mode 100644
index 0000000..97bf41b
--- /dev/null
+++ b/PCAP-PIC/hbase/bin/zookeepers.sh
@@ -0,0 +1,59 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# Run a shell command on all zookeeper hosts.
+#
+# Environment Variables
+#
+# HBASE_CONF_DIR Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
+# HBASE_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
+# HBASE_SSH_OPTS Options passed to ssh when running remote commands.
+#
+# Modelled after $HADOOP_HOME/bin/slaves.sh.
+
+usage="Usage: zookeepers [--config <hbase-confdir>] command..."
+
+# if no args specified, show usage
+if [ $# -le 0 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin">/dev/null; pwd`
+
+. "$bin"/hbase-config.sh
+
+if [ "$HBASE_MANAGES_ZK" = "" ]; then
+ HBASE_MANAGES_ZK=true
+fi
+
+if [ "$HBASE_MANAGES_ZK" = "true" ]; then
+ hosts=`"$bin"/hbase org.apache.hadoop.hbase.zookeeper.ZKServerTool | grep '^ZK host:' | sed 's,^ZK host:,,'`
+ cmd=$"${@// /\\ }"
+ for zookeeper in $hosts; do
+ ssh $HBASE_SSH_OPTS $zookeeper $cmd 2>&1 | sed "s/^/$zookeeper: /" &
+ if [ "$HBASE_SLAVE_SLEEP" != "" ]; then
+ sleep $HBASE_SLAVE_SLEEP
+ fi
+ done
+fi
+
+wait