summaryrefslogtreecommitdiff
path: root/PCAP-PIC/hadoop
diff options
context:
space:
mode:
Diffstat (limited to 'PCAP-PIC/hadoop')
-rw-r--r--PCAP-PIC/hadoop/bin/container-executorbin0 -> 160127 bytes
-rw-r--r--PCAP-PIC/hadoop/bin/hadoop169
-rw-r--r--PCAP-PIC/hadoop/bin/hadoop.cmd272
-rw-r--r--PCAP-PIC/hadoop/bin/hdfs308
-rw-r--r--PCAP-PIC/hadoop/bin/hdfs.cmd234
-rw-r--r--PCAP-PIC/hadoop/bin/ini_hdfs.sh46
-rw-r--r--PCAP-PIC/hadoop/bin/mapred172
-rw-r--r--PCAP-PIC/hadoop/bin/mapred.cmd216
-rw-r--r--PCAP-PIC/hadoop/bin/rcc61
-rw-r--r--PCAP-PIC/hadoop/bin/set_hdfs_env.sh71
-rw-r--r--PCAP-PIC/hadoop/bin/set_yarn_env.sh58
-rw-r--r--PCAP-PIC/hadoop/bin/test-container-executorbin0 -> 204075 bytes
-rw-r--r--PCAP-PIC/hadoop/bin/yarn330
-rw-r--r--PCAP-PIC/hadoop/bin/yarn.cmd332
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/capacity-scheduler.xml134
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/configuration.xsl40
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/container-executor.cfg4
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/core-site.xml58
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/hadoop-env.cmd81
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/hadoop-env.sh105
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/hadoop-metrics.properties75
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/hadoop-metrics2.properties68
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/hadoop-policy.xml226
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/hdfs-site.xml142
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/httpfs-env.sh53
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/httpfs-log4j.properties35
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/httpfs-signature.secret1
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/httpfs-site.xml17
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/kms-acls.xml135
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/kms-env.sh55
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/kms-log4j.properties38
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/kms-site.xml173
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/log4j.properties268
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/log4j.properties_bak268
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/mapred-env.cmd20
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/mapred-env.sh27
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/mapred-queues.xml.template92
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/mapred-site.xml33
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/slaves6
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/ssl-client.xml.example80
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/ssl-server.xml.example78
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/yarn-env.cmd60
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/yarn-env.sh127
-rw-r--r--PCAP-PIC/hadoop/etc/hadoop/yarn-site.xml224
-rw-r--r--PCAP-PIC/hadoop/sbin/dae-hdfsjournal.sh42
-rw-r--r--PCAP-PIC/hadoop/sbin/dae-hdfsmaster.sh53
-rw-r--r--PCAP-PIC/hadoop/sbin/dae-hdfsworker.sh47
-rw-r--r--PCAP-PIC/hadoop/sbin/dae-yarnhistory.sh41
-rw-r--r--PCAP-PIC/hadoop/sbin/dae-yarnmaster.sh41
-rw-r--r--PCAP-PIC/hadoop/sbin/dae-yarnworker.sh41
-rw-r--r--PCAP-PIC/hadoop/sbin/distribute-exclude.sh81
-rw-r--r--PCAP-PIC/hadoop/sbin/hadoop-daemon.sh214
-rw-r--r--PCAP-PIC/hadoop/sbin/hadoop-daemons.sh36
-rw-r--r--PCAP-PIC/hadoop/sbin/hdfs-config.cmd43
-rw-r--r--PCAP-PIC/hadoop/sbin/hdfs-config.sh36
-rw-r--r--PCAP-PIC/hadoop/sbin/httpfs.sh65
-rw-r--r--PCAP-PIC/hadoop/sbin/kms.sh83
-rw-r--r--PCAP-PIC/hadoop/sbin/mr-jobhistory-daemon.sh147
-rw-r--r--PCAP-PIC/hadoop/sbin/refresh-namenodes.sh48
-rw-r--r--PCAP-PIC/hadoop/sbin/slaves.sh67
-rw-r--r--PCAP-PIC/hadoop/sbin/start-all.cmd52
-rw-r--r--PCAP-PIC/hadoop/sbin/start-all.sh38
-rw-r--r--PCAP-PIC/hadoop/sbin/start-balancer.sh27
-rw-r--r--PCAP-PIC/hadoop/sbin/start-dfs.cmd41
-rw-r--r--PCAP-PIC/hadoop/sbin/start-dfs.sh118
-rw-r--r--PCAP-PIC/hadoop/sbin/start-secure-dns.sh33
-rw-r--r--PCAP-PIC/hadoop/sbin/start-yarn.cmd47
-rw-r--r--PCAP-PIC/hadoop/sbin/start-yarn.sh35
-rw-r--r--PCAP-PIC/hadoop/sbin/stop-all.cmd52
-rw-r--r--PCAP-PIC/hadoop/sbin/stop-all.sh38
-rw-r--r--PCAP-PIC/hadoop/sbin/stop-balancer.sh28
-rw-r--r--PCAP-PIC/hadoop/sbin/stop-dfs.cmd41
-rw-r--r--PCAP-PIC/hadoop/sbin/stop-dfs.sh89
-rw-r--r--PCAP-PIC/hadoop/sbin/stop-secure-dns.sh33
-rw-r--r--PCAP-PIC/hadoop/sbin/stop-yarn.cmd47
-rw-r--r--PCAP-PIC/hadoop/sbin/stop-yarn.sh35
-rw-r--r--PCAP-PIC/hadoop/sbin/yarn-daemon.sh161
-rw-r--r--PCAP-PIC/hadoop/sbin/yarn-daemons.sh38
78 files changed, 7030 insertions, 0 deletions
diff --git a/PCAP-PIC/hadoop/bin/container-executor b/PCAP-PIC/hadoop/bin/container-executor
new file mode 100644
index 0000000..5e228bc
--- /dev/null
+++ b/PCAP-PIC/hadoop/bin/container-executor
Binary files differ
diff --git a/PCAP-PIC/hadoop/bin/hadoop b/PCAP-PIC/hadoop/bin/hadoop
new file mode 100644
index 0000000..a5e8885
--- /dev/null
+++ b/PCAP-PIC/hadoop/bin/hadoop
@@ -0,0 +1,169 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script runs the hadoop core commands.
+
+bin=`which $0`
+bin=`dirname ${bin}`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+
+function print_usage(){
+ echo "Usage: hadoop [--config confdir] [COMMAND | CLASSNAME]"
+ echo " CLASSNAME run the class named CLASSNAME"
+ echo " or"
+ echo " where COMMAND is one of:"
+ echo " fs run a generic filesystem user client"
+ echo " version print the version"
+ echo " jar <jar> run a jar file"
+ echo " note: please use \"yarn jar\" to launch"
+ echo " YARN applications, not this command."
+ echo " checknative [-a|-h] check native hadoop and compression libraries availability"
+ echo " distcp <srcurl> <desturl> copy file or directories recursively"
+ echo " archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive"
+ echo " classpath prints the class path needed to get the"
+ echo " credential interact with credential providers"
+ echo " Hadoop jar and the required libraries"
+ echo " daemonlog get/set the log level for each daemon"
+ echo " trace view and modify Hadoop tracing settings"
+ echo ""
+ echo "Most commands print help when invoked w/o parameters."
+}
+
+if [ $# = 0 ]; then
+ print_usage
+ exit
+fi
+
+COMMAND=$1
+case $COMMAND in
+ # usage flags
+ --help|-help|-h)
+ print_usage
+ exit
+ ;;
+
+ #hdfs commands
+ namenode|secondarynamenode|datanode|dfs|dfsadmin|fsck|balancer|fetchdt|oiv|dfsgroups|portmap|nfs3)
+ echo "DEPRECATED: Use of this script to execute hdfs command is deprecated." 1>&2
+ echo "Instead use the hdfs command for it." 1>&2
+ echo "" 1>&2
+ #try to locate hdfs and if present, delegate to it.
+ shift
+ if [ -f "${HADOOP_HDFS_HOME}"/bin/hdfs ]; then
+ exec "${HADOOP_HDFS_HOME}"/bin/hdfs ${COMMAND/dfsgroups/groups} "$@"
+ elif [ -f "${HADOOP_PREFIX}"/bin/hdfs ]; then
+ exec "${HADOOP_PREFIX}"/bin/hdfs ${COMMAND/dfsgroups/groups} "$@"
+ else
+ echo "HADOOP_HDFS_HOME not found!"
+ exit 1
+ fi
+ ;;
+
+ #mapred commands for backwards compatibility
+ pipes|job|queue|mrgroups|mradmin|jobtracker|tasktracker)
+ echo "DEPRECATED: Use of this script to execute mapred command is deprecated." 1>&2
+ echo "Instead use the mapred command for it." 1>&2
+ echo "" 1>&2
+ #try to locate mapred and if present, delegate to it.
+ shift
+ if [ -f "${HADOOP_MAPRED_HOME}"/bin/mapred ]; then
+ exec "${HADOOP_MAPRED_HOME}"/bin/mapred ${COMMAND/mrgroups/groups} "$@"
+ elif [ -f "${HADOOP_PREFIX}"/bin/mapred ]; then
+ exec "${HADOOP_PREFIX}"/bin/mapred ${COMMAND/mrgroups/groups} "$@"
+ else
+ echo "HADOOP_MAPRED_HOME not found!"
+ exit 1
+ fi
+ ;;
+
+ #core commands
+ *)
+ # the core commands
+ if [ "$COMMAND" = "fs" ] ; then
+ CLASS=org.apache.hadoop.fs.FsShell
+ elif [ "$COMMAND" = "version" ] ; then
+ CLASS=org.apache.hadoop.util.VersionInfo
+ elif [ "$COMMAND" = "jar" ] ; then
+ CLASS=org.apache.hadoop.util.RunJar
+ if [[ -n "${YARN_OPTS}" ]] || [[ -n "${YARN_CLIENT_OPTS}" ]]; then
+ echo "WARNING: Use \"yarn jar\" to launch YARN applications." 1>&2
+ fi
+ elif [ "$COMMAND" = "key" ] ; then
+ CLASS=org.apache.hadoop.crypto.key.KeyShell
+ elif [ "$COMMAND" = "checknative" ] ; then
+ CLASS=org.apache.hadoop.util.NativeLibraryChecker
+ elif [ "$COMMAND" = "distcp" ] ; then
+ CLASS=org.apache.hadoop.tools.DistCp
+ CLASSPATH=${CLASSPATH}:${TOOL_PATH}
+ elif [ "$COMMAND" = "daemonlog" ] ; then
+ CLASS=org.apache.hadoop.log.LogLevel
+ elif [ "$COMMAND" = "archive" ] ; then
+ CLASS=org.apache.hadoop.tools.HadoopArchives
+ CLASSPATH=${CLASSPATH}:${TOOL_PATH}
+ elif [ "$COMMAND" = "credential" ] ; then
+ CLASS=org.apache.hadoop.security.alias.CredentialShell
+ elif [ "$COMMAND" = "trace" ] ; then
+ CLASS=org.apache.hadoop.tracing.TraceAdmin
+ elif [ "$COMMAND" = "classpath" ] ; then
+ if [ "$#" -gt 1 ]; then
+ CLASS=org.apache.hadoop.util.Classpath
+ else
+ # No need to bother starting up a JVM for this simple case.
+ if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ fi
+ echo $CLASSPATH
+ exit
+ fi
+ elif [[ "$COMMAND" = -* ]] ; then
+ # class and package names cannot begin with a -
+ echo "Error: No command named \`$COMMAND' was found. Perhaps you meant \`hadoop ${COMMAND#-}'"
+ exit 1
+ else
+ CLASS=$COMMAND
+ fi
+
+ # cygwin path translation
+ if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ HADOOP_LOG_DIR=$(cygpath -w "$HADOOP_LOG_DIR" 2>/dev/null)
+ HADOOP_PREFIX=$(cygpath -w "$HADOOP_PREFIX" 2>/dev/null)
+ HADOOP_CONF_DIR=$(cygpath -w "$HADOOP_CONF_DIR" 2>/dev/null)
+ HADOOP_COMMON_HOME=$(cygpath -w "$HADOOP_COMMON_HOME" 2>/dev/null)
+ HADOOP_HDFS_HOME=$(cygpath -w "$HADOOP_HDFS_HOME" 2>/dev/null)
+ HADOOP_YARN_HOME=$(cygpath -w "$HADOOP_YARN_HOME" 2>/dev/null)
+ HADOOP_MAPRED_HOME=$(cygpath -w "$HADOOP_MAPRED_HOME" 2>/dev/null)
+ fi
+
+ shift
+
+ # Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+
+ #make sure security appender is turned off
+ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
+
+ export CLASSPATH=$CLASSPATH
+ exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@"
+ ;;
+
+esac
diff --git a/PCAP-PIC/hadoop/bin/hadoop.cmd b/PCAP-PIC/hadoop/bin/hadoop.cmd
new file mode 100644
index 0000000..ccf2fff
--- /dev/null
+++ b/PCAP-PIC/hadoop/bin/hadoop.cmd
@@ -0,0 +1,272 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+
+@rem This script runs the hadoop core commands.
+
+@rem Environment Variables
+@rem
+@rem JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+@rem
+@rem HADOOP_CLASSPATH Extra Java CLASSPATH entries.
+@rem
+@rem HADOOP_USER_CLASSPATH_FIRST When defined, the HADOOP_CLASSPATH is
+@rem added in the beginning of the global
+@rem classpath. Can be defined, for example,
+@rem by doing
+@rem export HADOOP_USER_CLASSPATH_FIRST=true
+@rem
+@rem HADOOP_HEAPSIZE The maximum amount of heap to use, in MB.
+@rem Default is 1000.
+@rem
+@rem HADOOP_OPTS Extra Java runtime options.
+@rem
+@rem HADOOP_CLIENT_OPTS when the respective command is run.
+@rem HADOOP_{COMMAND}_OPTS etc HADOOP_JT_OPTS applies to JobTracker
+@rem for e.g. HADOOP_CLIENT_OPTS applies to
+@rem more than one command (fs, dfs, fsck,
+@rem dfsadmin etc)
+@rem
+@rem HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
+@rem
+@rem HADOOP_ROOT_LOGGER The root appender. Default is INFO,console
+@rem
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+call :updatepath %HADOOP_BIN_PATH%
+
+:main
+ setlocal enabledelayedexpansion
+
+ set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+ if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+ )
+
+ call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %*
+ if "%1" == "--config" (
+ shift
+ shift
+ )
+ if "%1" == "--loglevel" (
+ shift
+ shift
+ )
+
+ set hadoop-command=%1
+ if not defined hadoop-command (
+ goto print_usage
+ )
+
+ call :make_command_arguments %*
+
+ set hdfscommands=namenode secondarynamenode datanode dfs dfsadmin fsck balancer fetchdt oiv dfsgroups
+ for %%i in ( %hdfscommands% ) do (
+ if %hadoop-command% == %%i set hdfscommand=true
+ )
+ if defined hdfscommand (
+ @echo DEPRECATED: Use of this script to execute hdfs command is deprecated. 1>&2
+ @echo Instead use the hdfs command for it. 1>&2
+ if exist %HADOOP_HDFS_HOME%\bin\hdfs.cmd (
+ call %HADOOP_HDFS_HOME%\bin\hdfs.cmd %*
+ goto :eof
+ ) else if exist %HADOOP_HOME%\bin\hdfs.cmd (
+ call %HADOOP_HOME%\bin\hdfs.cmd %*
+ goto :eof
+ ) else (
+ echo HADOOP_HDFS_HOME not found!
+ goto :eof
+ )
+ )
+
+ set mapredcommands=pipes job queue mrgroups mradmin jobtracker tasktracker
+ for %%i in ( %mapredcommands% ) do (
+ if %hadoop-command% == %%i set mapredcommand=true
+ )
+ if defined mapredcommand (
+ @echo DEPRECATED: Use of this script to execute mapred command is deprecated. 1>&2
+ @echo Instead use the mapred command for it. 1>&2
+ if exist %HADOOP_MAPRED_HOME%\bin\mapred.cmd (
+ call %HADOOP_MAPRED_HOME%\bin\mapred.cmd %*
+ goto :eof
+ ) else if exist %HADOOP_HOME%\bin\mapred.cmd (
+ call %HADOOP_HOME%\bin\mapred.cmd %*
+ goto :eof
+ ) else (
+ echo HADOOP_MAPRED_HOME not found!
+ goto :eof
+ )
+ )
+
+ if %hadoop-command% == classpath (
+ if not defined hadoop-command-arguments (
+ @rem No need to bother starting up a JVM for this simple case.
+ @echo %CLASSPATH%
+ exit /b
+ )
+ )
+
+ set corecommands=fs version jar checknative distcp daemonlog archive classpath credential key
+ for %%i in ( %corecommands% ) do (
+ if %hadoop-command% == %%i set corecommand=true
+ )
+ if defined corecommand (
+ call :%hadoop-command%
+ ) else (
+ set CLASSPATH=%CLASSPATH%;%CD%
+ set CLASS=%hadoop-command%
+ )
+
+ set path=%PATH%;%HADOOP_BIN_PATH%
+
+ @rem Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+
+ @rem make sure security appender is turned off
+ if not defined HADOOP_SECURITY_LOGGER (
+ set HADOOP_SECURITY_LOGGER=INFO,NullAppender
+ )
+ set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER%
+
+ call %JAVA% %JAVA_HEAP_MAX% %HADOOP_OPTS% -classpath %CLASSPATH% %CLASS% %hadoop-command-arguments%
+
+ exit /b %ERRORLEVEL%
+
+:fs
+ set CLASS=org.apache.hadoop.fs.FsShell
+ goto :eof
+
+:version
+ set CLASS=org.apache.hadoop.util.VersionInfo
+ goto :eof
+
+:jar
+ if defined YARN_OPTS (
+ @echo WARNING: Use "yarn jar" to launch YARN applications. 1>&2
+ ) else if defined YARN_CLIENT_OPTS (
+ @echo WARNING: Use "yarn jar" to launch YARN applications. 1>&2
+ )
+ set CLASS=org.apache.hadoop.util.RunJar
+ goto :eof
+
+:checknative
+ set CLASS=org.apache.hadoop.util.NativeLibraryChecker
+ goto :eof
+
+:distcp
+ set CLASS=org.apache.hadoop.tools.DistCp
+ set CLASSPATH=%CLASSPATH%;%TOOL_PATH%
+ goto :eof
+
+:daemonlog
+ set CLASS=org.apache.hadoop.log.LogLevel
+ goto :eof
+
+:archive
+ set CLASS=org.apache.hadoop.tools.HadoopArchives
+ set CLASSPATH=%CLASSPATH%;%TOOL_PATH%
+ goto :eof
+
+:classpath
+ set CLASS=org.apache.hadoop.util.Classpath
+ goto :eof
+
+:credential
+ set CLASS=org.apache.hadoop.security.alias.CredentialShell
+ goto :eof
+
+:key
+ set CLASS=org.apache.hadoop.crypto.key.KeyShell
+ goto :eof
+
+:updatepath
+ set path_to_add=%*
+ set current_path_comparable=%path%
+ set current_path_comparable=%current_path_comparable: =_%
+ set current_path_comparable=%current_path_comparable:(=_%
+ set current_path_comparable=%current_path_comparable:)=_%
+ set path_to_add_comparable=%path_to_add%
+ set path_to_add_comparable=%path_to_add_comparable: =_%
+ set path_to_add_comparable=%path_to_add_comparable:(=_%
+ set path_to_add_comparable=%path_to_add_comparable:)=_%
+
+ for %%i in ( %current_path_comparable% ) do (
+ if /i "%%i" == "%path_to_add_comparable%" (
+ set path_to_add_exist=true
+ )
+ )
+ set system_path_comparable=
+ set path_to_add_comparable=
+ if not defined path_to_add_exist path=%path_to_add%;%path%
+ set path_to_add=
+ goto :eof
+
+@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
+:make_command_arguments
+ if "%1" == "--config" (
+ shift
+ shift
+ )
+ if "%1" == "--loglevel" (
+ shift
+ shift
+ )
+ if [%2] == [] goto :eof
+ shift
+ set _arguments=
+ :MakeCmdArgsLoop
+ if [%1]==[] goto :EndLoop
+
+ if not defined _arguments (
+ set _arguments=%1
+ ) else (
+ set _arguments=!_arguments! %1
+ )
+ shift
+ goto :MakeCmdArgsLoop
+ :EndLoop
+ set hadoop-command-arguments=%_arguments%
+ goto :eof
+
+:print_usage
+ @echo Usage: hadoop [--config confdir] [--loglevel loglevel] COMMAND
+ @echo where COMMAND is one of:
+ @echo fs run a generic filesystem user client
+ @echo version print the version
+ @echo jar ^<jar^> run a jar file
+ @echo note: please use "yarn jar" to launch
+ @echo YARN applications, not this command.
+ @echo checknative [-a^|-h] check native hadoop and compression libraries availability
+ @echo distcp ^<srcurl^> ^<desturl^> copy file or directories recursively
+ @echo archive -archiveName NAME -p ^<parent path^> ^<src^>* ^<dest^> create a hadoop archive
+ @echo classpath prints the class path needed to get the
+ @echo Hadoop jar and the required libraries
+ @echo credential interact with credential providers
+ @echo key manage keys via the KeyProvider
+ @echo daemonlog get/set the log level for each daemon
+ @echo or
+ @echo CLASSNAME run the class named CLASSNAME
+ @echo.
+ @echo Most commands print help when invoked w/o parameters.
+
+endlocal
diff --git a/PCAP-PIC/hadoop/bin/hdfs b/PCAP-PIC/hadoop/bin/hdfs
new file mode 100644
index 0000000..7f93738
--- /dev/null
+++ b/PCAP-PIC/hadoop/bin/hdfs
@@ -0,0 +1,308 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Environment Variables
+#
+# JSVC_HOME home directory of jsvc binary. Required for starting secure
+# datanode.
+#
+# JSVC_OUTFILE path to jsvc output file. Defaults to
+# $HADOOP_LOG_DIR/jsvc.out.
+#
+# JSVC_ERRFILE path to jsvc error file. Defaults to $HADOOP_LOG_DIR/jsvc.err.
+
+bin=`which $0`
+bin=`dirname ${bin}`
+bin=`cd "$bin" > /dev/null; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+
+function print_usage(){
+ echo "Usage: hdfs [--config confdir] [--loglevel loglevel] COMMAND"
+ echo " where COMMAND is one of:"
+ echo " dfs run a filesystem command on the file systems supported in Hadoop."
+ echo " classpath prints the classpath"
+ echo " namenode -format format the DFS filesystem"
+ echo " secondarynamenode run the DFS secondary namenode"
+ echo " namenode run the DFS namenode"
+ echo " journalnode run the DFS journalnode"
+ echo " zkfc run the ZK Failover Controller daemon"
+ echo " datanode run a DFS datanode"
+ echo " dfsadmin run a DFS admin client"
+ echo " haadmin run a DFS HA admin client"
+ echo " fsck run a DFS filesystem checking utility"
+ echo " balancer run a cluster balancing utility"
+ echo " jmxget get JMX exported values from NameNode or DataNode."
+ echo " mover run a utility to move block replicas across"
+ echo " storage types"
+ echo " oiv apply the offline fsimage viewer to an fsimage"
+ echo " oiv_legacy apply the offline fsimage viewer to an legacy fsimage"
+ echo " oev apply the offline edits viewer to an edits file"
+ echo " fetchdt fetch a delegation token from the NameNode"
+ echo " getconf get config values from configuration"
+ echo " groups get the groups which users belong to"
+ echo " snapshotDiff diff two snapshots of a directory or diff the"
+ echo " current directory contents with a snapshot"
+ echo " lsSnapshottableDir list all snapshottable dirs owned by the current user"
+ echo " Use -help to see options"
+ echo " portmap run a portmap service"
+ echo " nfs3 run an NFS version 3 gateway"
+ echo " cacheadmin configure the HDFS cache"
+ echo " crypto configure HDFS encryption zones"
+ echo " storagepolicies list/get/set block storage policies"
+ echo " version print the version"
+ echo ""
+ echo "Most commands print help when invoked w/o parameters."
+ # There are also debug commands, but they don't show up in this listing.
+}
+
+if [ $# = 0 ]; then
+ print_usage
+ exit
+fi
+
+COMMAND=$1
+shift
+
+case $COMMAND in
+ # usage flags
+ --help|-help|-h)
+ print_usage
+ exit
+ ;;
+esac
+
+# Determine if we're starting a secure datanode, and if so, redefine appropriate variables
+if [ "$COMMAND" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
+ if [ -n "$JSVC_HOME" ]; then
+ if [ -n "$HADOOP_SECURE_DN_PID_DIR" ]; then
+ HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
+ fi
+
+ if [ -n "$HADOOP_SECURE_DN_LOG_DIR" ]; then
+ HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
+ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
+ fi
+
+ HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
+ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
+ starting_secure_dn="true"
+ else
+ echo "It looks like you're trying to start a secure DN, but \$JSVC_HOME"\
+ "isn't set. Falling back to starting insecure DN."
+ fi
+fi
+
+# Determine if we're starting a privileged NFS daemon, and if so, redefine appropriate variables
+if [ "$COMMAND" == "nfs3" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_PRIVILEGED_NFS_USER" ]; then
+ if [ -n "$JSVC_HOME" ]; then
+ if [ -n "$HADOOP_PRIVILEGED_NFS_PID_DIR" ]; then
+ HADOOP_PID_DIR=$HADOOP_PRIVILEGED_NFS_PID_DIR
+ fi
+
+ if [ -n "$HADOOP_PRIVILEGED_NFS_LOG_DIR" ]; then
+ HADOOP_LOG_DIR=$HADOOP_PRIVILEGED_NFS_LOG_DIR
+ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
+ fi
+
+ HADOOP_IDENT_STRING=$HADOOP_PRIVILEGED_NFS_USER
+ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
+ starting_privileged_nfs="true"
+ else
+ echo "It looks like you're trying to start a privileged NFS server, but"\
+ "\$JSVC_HOME isn't set. Falling back to starting unprivileged NFS server."
+ fi
+fi
+
+if [ "$COMMAND" = "namenode" ] ; then
+ CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode'
+# HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_OPTS"
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_JMX_OPTS $HADOOP_NAMENODE_OPTS"
+elif [ "$COMMAND" = "zkfc" ] ; then
+ CLASS='org.apache.hadoop.hdfs.tools.DFSZKFailoverController'
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_ZKFC_OPTS"
+elif [ "$COMMAND" = "secondarynamenode" ] ; then
+ CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_SECONDARYNAMENODE_OPTS"
+elif [ "$COMMAND" = "datanode" ] ; then
+ CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode'
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_DATANODE_JMX_OPTS"
+ if [ "$starting_secure_dn" = "true" ]; then
+ HADOOP_OPTS="$HADOOP_OPTS -jvm server $HADOOP_DATANODE_OPTS"
+ else
+ HADOOP_OPTS="$HADOOP_OPTS -server $HADOOP_DATANODE_OPTS"
+ fi
+elif [ "$COMMAND" = "journalnode" ] ; then
+ CLASS='org.apache.hadoop.hdfs.qjournal.server.JournalNode'
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_JOURNALNODE_OPTS"
+elif [ "$COMMAND" = "dfs" ] ; then
+ CLASS=org.apache.hadoop.fs.FsShell
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "dfsadmin" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "haadmin" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin
+ CLASSPATH=${CLASSPATH}:${TOOL_PATH}
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "fsck" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.DFSck
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "balancer" ] ; then
+ CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_BALANCER_OPTS"
+elif [ "$COMMAND" = "mover" ] ; then
+ CLASS=org.apache.hadoop.hdfs.server.mover.Mover
+ HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_MOVER_OPTS}"
+elif [ "$COMMAND" = "storagepolicies" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin
+elif [ "$COMMAND" = "jmxget" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.JMXGet
+elif [ "$COMMAND" = "oiv" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB
+elif [ "$COMMAND" = "oiv_legacy" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
+elif [ "$COMMAND" = "oev" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
+elif [ "$COMMAND" = "fetchdt" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
+elif [ "$COMMAND" = "getconf" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.GetConf
+elif [ "$COMMAND" = "groups" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.GetGroups
+elif [ "$COMMAND" = "snapshotDiff" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff
+elif [ "$COMMAND" = "lsSnapshottableDir" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
+elif [ "$COMMAND" = "portmap" ] ; then
+ CLASS=org.apache.hadoop.portmap.Portmap
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_PORTMAP_OPTS"
+elif [ "$COMMAND" = "nfs3" ] ; then
+ CLASS=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NFS3_OPTS"
+elif [ "$COMMAND" = "cacheadmin" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin
+elif [ "$COMMAND" = "crypto" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin
+elif [ "$COMMAND" = "version" ] ; then
+ CLASS=org.apache.hadoop.util.VersionInfo
+elif [ "$COMMAND" = "debug" ]; then
+ CLASS=org.apache.hadoop.hdfs.tools.DebugAdmin
+elif [ "$COMMAND" = "classpath" ]; then
+ if [ "$#" -gt 0 ]; then
+ CLASS=org.apache.hadoop.util.Classpath
+ else
+ # No need to bother starting up a JVM for this simple case.
+ if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ fi
+ echo $CLASSPATH
+ exit 0
+ fi
+else
+ CLASS="$COMMAND"
+fi
+
+# cygwin path translation
+if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ HADOOP_LOG_DIR=$(cygpath -w "$HADOOP_LOG_DIR" 2>/dev/null)
+ HADOOP_PREFIX=$(cygpath -w "$HADOOP_PREFIX" 2>/dev/null)
+ HADOOP_CONF_DIR=$(cygpath -w "$HADOOP_CONF_DIR" 2>/dev/null)
+ HADOOP_COMMON_HOME=$(cygpath -w "$HADOOP_COMMON_HOME" 2>/dev/null)
+ HADOOP_HDFS_HOME=$(cygpath -w "$HADOOP_HDFS_HOME" 2>/dev/null)
+ HADOOP_YARN_HOME=$(cygpath -w "$HADOOP_YARN_HOME" 2>/dev/null)
+ HADOOP_MAPRED_HOME=$(cygpath -w "$HADOOP_MAPRED_HOME" 2>/dev/null)
+fi
+
+export CLASSPATH=$CLASSPATH
+
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
+
+# Check to see if we should start a secure datanode
+if [ "$starting_secure_dn" = "true" ]; then
+ if [ "$HADOOP_PID_DIR" = "" ]; then
+ HADOOP_SECURE_DN_PID="/tmp/hadoop_secure_dn.pid"
+ else
+ HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
+ fi
+
+ JSVC=$JSVC_HOME/jsvc
+ if [ ! -f $JSVC ]; then
+ echo "JSVC_HOME is not set correctly so jsvc cannot be found. jsvc is required to run secure datanodes. "
+ echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\
+ "and set JSVC_HOME to the directory containing the jsvc binary."
+ exit
+ fi
+
+ if [[ ! $JSVC_OUTFILE ]]; then
+ JSVC_OUTFILE="$HADOOP_LOG_DIR/jsvc.out"
+ fi
+
+ if [[ ! $JSVC_ERRFILE ]]; then
+ JSVC_ERRFILE="$HADOOP_LOG_DIR/jsvc.err"
+ fi
+
+ exec "$JSVC" \
+ -Dproc_$COMMAND -outfile "$JSVC_OUTFILE" \
+ -errfile "$JSVC_ERRFILE" \
+ -pidfile "$HADOOP_SECURE_DN_PID" \
+ -nodetach \
+ -user "$HADOOP_SECURE_DN_USER" \
+ -cp "$CLASSPATH" \
+ $JAVA_HEAP_MAX $HADOOP_OPTS \
+ org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter "$@"
+elif [ "$starting_privileged_nfs" = "true" ] ; then
+ if [ "$HADOOP_PID_DIR" = "" ]; then
+ HADOOP_PRIVILEGED_NFS_PID="/tmp/hadoop_privileged_nfs3.pid"
+ else
+ HADOOP_PRIVILEGED_NFS_PID="$HADOOP_PID_DIR/hadoop_privileged_nfs3.pid"
+ fi
+
+ JSVC=$JSVC_HOME/jsvc
+ if [ ! -f $JSVC ]; then
+ echo "JSVC_HOME is not set correctly so jsvc cannot be found. jsvc is required to run privileged NFS gateways. "
+ echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\
+ "and set JSVC_HOME to the directory containing the jsvc binary."
+ exit
+ fi
+
+ if [[ ! $JSVC_OUTFILE ]]; then
+ JSVC_OUTFILE="$HADOOP_LOG_DIR/nfs3_jsvc.out"
+ fi
+
+ if [[ ! $JSVC_ERRFILE ]]; then
+ JSVC_ERRFILE="$HADOOP_LOG_DIR/nfs3_jsvc.err"
+ fi
+
+ exec "$JSVC" \
+ -Dproc_$COMMAND -outfile "$JSVC_OUTFILE" \
+ -errfile "$JSVC_ERRFILE" \
+ -pidfile "$HADOOP_PRIVILEGED_NFS_PID" \
+ -nodetach \
+ -user "$HADOOP_PRIVILEGED_NFS_USER" \
+ -cp "$CLASSPATH" \
+ $JAVA_HEAP_MAX $HADOOP_OPTS \
+ org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter "$@"
+else
+ # run it
+ exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@"
+fi
+
diff --git a/PCAP-PIC/hadoop/bin/hdfs.cmd b/PCAP-PIC/hadoop/bin/hdfs.cmd
new file mode 100644
index 0000000..d52f52e
--- /dev/null
+++ b/PCAP-PIC/hadoop/bin/hdfs.cmd
@@ -0,0 +1,234 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+setlocal enabledelayedexpansion
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\hdfs-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+if "%1" == "--loglevel" (
+ shift
+ shift
+)
+
+:main
+ if exist %HADOOP_CONF_DIR%\hadoop-env.cmd (
+ call %HADOOP_CONF_DIR%\hadoop-env.cmd
+ )
+
+ set hdfs-command=%1
+ call :make_command_arguments %*
+
+ if not defined hdfs-command (
+ goto print_usage
+ )
+
+ if %hdfs-command% == classpath (
+ if not defined hdfs-command-arguments (
+ @rem No need to bother starting up a JVM for this simple case.
+ @echo %CLASSPATH%
+ exit /b
+ )
+ )
+ set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath
+ for %%i in ( %hdfscommands% ) do (
+ if %hdfs-command% == %%i set hdfscommand=true
+ )
+ if defined hdfscommand (
+ call :%hdfs-command%
+ ) else (
+ set CLASSPATH=%CLASSPATH%;%CD%
+ set CLASS=%hdfs-command%
+ )
+
+ set java_arguments=%JAVA_HEAP_MAX% %HADOOP_OPTS% -classpath %CLASSPATH% %CLASS% %hdfs-command-arguments%
+ call %JAVA% %java_arguments%
+
+goto :eof
+
+:namenode
+ set CLASS=org.apache.hadoop.hdfs.server.namenode.NameNode
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_NAMENODE_OPTS%
+ goto :eof
+
+:journalnode
+ set CLASS=org.apache.hadoop.hdfs.qjournal.server.JournalNode
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_JOURNALNODE_OPTS%
+ goto :eof
+
+:zkfc
+ set CLASS=org.apache.hadoop.hdfs.tools.DFSZKFailoverController
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ZKFC_OPTS%
+ goto :eof
+
+:secondarynamenode
+ set CLASS=org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_SECONDARYNAMENODE_OPTS%
+ goto :eof
+
+:datanode
+ set CLASS=org.apache.hadoop.hdfs.server.datanode.DataNode
+ set HADOOP_OPTS=%HADOOP_OPTS% -server %HADOOP_DATANODE_OPTS%
+ goto :eof
+
+:dfs
+ set CLASS=org.apache.hadoop.fs.FsShell
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:dfsadmin
+ set CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:haadmin
+ set CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin
+ set CLASSPATH=%CLASSPATH%;%TOOL_PATH%
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:fsck
+ set CLASS=org.apache.hadoop.hdfs.tools.DFSck
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:balancer
+ set CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_BALANCER_OPTS%
+ goto :eof
+
+:jmxget
+ set CLASS=org.apache.hadoop.hdfs.tools.JMXGet
+ goto :eof
+
+:classpath
+ set CLASS=org.apache.hadoop.util.Classpath
+ goto :eof
+
+:oiv
+ set CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB
+ goto :eof
+
+:oev
+ set CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
+ goto :eof
+
+:fetchdt
+ set CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
+ goto :eof
+
+:getconf
+ set CLASS=org.apache.hadoop.hdfs.tools.GetConf
+ goto :eof
+
+:groups
+ set CLASS=org.apache.hadoop.hdfs.tools.GetGroups
+ goto :eof
+
+:snapshotDiff
+ set CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff
+ goto :eof
+
+:lsSnapshottableDir
+ set CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
+ goto :eof
+
+:cacheadmin
+ set CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin
+ goto :eof
+
+:mover
+ set CLASS=org.apache.hadoop.hdfs.server.mover.Mover
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_MOVER_OPTS%
+ goto :eof
+
+:storagepolicies
+ set CLASS=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin
+ goto :eof
+
+@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
+:make_command_arguments
+ if "%1" == "--config" (
+ shift
+ shift
+ )
+ if "%1" == "--loglevel" (
+ shift
+ shift
+ )
+ if [%2] == [] goto :eof
+ shift
+ set _hdfsarguments=
+ :MakeCmdArgsLoop
+ if [%1]==[] goto :EndLoop
+
+ if not defined _hdfsarguments (
+ set _hdfsarguments=%1
+ ) else (
+ set _hdfsarguments=!_hdfsarguments! %1
+ )
+ shift
+ goto :MakeCmdArgsLoop
+ :EndLoop
+ set hdfs-command-arguments=%_hdfsarguments%
+ goto :eof
+
+:print_usage
+ @echo Usage: hdfs [--config confdir] [--loglevel loglevel] COMMAND
+ @echo where COMMAND is one of:
+ @echo dfs run a filesystem command on the file systems supported in Hadoop.
+ @echo namenode -format format the DFS filesystem
+ @echo secondarynamenode run the DFS secondary namenode
+ @echo namenode run the DFS namenode
+ @echo journalnode run the DFS journalnode
+ @echo zkfc run the ZK Failover Controller daemon
+ @echo datanode run a DFS datanode
+ @echo dfsadmin run a DFS admin client
+ @echo haadmin run a DFS HA admin client
+ @echo fsck run a DFS filesystem checking utility
+ @echo balancer run a cluster balancing utility
+ @echo jmxget get JMX exported values from NameNode or DataNode.
+ @echo oiv apply the offline fsimage viewer to an fsimage
+ @echo oev apply the offline edits viewer to an edits file
+ @echo fetchdt fetch a delegation token from the NameNode
+ @echo getconf get config values from configuration
+ @echo groups get the groups which users belong to
+ @echo snapshotDiff diff two snapshots of a directory or diff the
+ @echo current directory contents with a snapshot
+ @echo lsSnapshottableDir list all snapshottable dirs owned by the current user
+ @echo Use -help to see options
+ @echo cacheadmin configure the HDFS cache
+ @echo mover run a utility to move block replicas across storage types
+ @echo storagepolicies list/get/set block storage policies
+ @echo.
+ @echo Most commands print help when invoked w/o parameters.
+
+endlocal
diff --git a/PCAP-PIC/hadoop/bin/ini_hdfs.sh b/PCAP-PIC/hadoop/bin/ini_hdfs.sh
new file mode 100644
index 0000000..96f1f31
--- /dev/null
+++ b/PCAP-PIC/hadoop/bin/ini_hdfs.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+
+MASTER_IP=192.168.10.193
+SLAVE1_IP=192.168.10.194
+
+BASE_DIR=/home/tsg/olap
+VERSION=hadoop-2.7.1
+
+function ini_namenode() {
+
+cd $BASE_DIR/$VERSION/bin
+yes | ./hadoop namenode -format
+
+if [ $? -eq "0" ];then
+# scp -r $BASE_DIR/hadoop/ root@$SLAVE1_IP:$BASE_DIR/
+ echo yes
+else
+ echo no
+fi
+}
+
+function ini_zk() {
+
+cd $BASE_DIR/$VERSION/bin
+yes | ./hdfs zkfc -formatZK
+
+if [ $? -eq "0" ];then
+ echo yes
+else
+ echo no
+fi
+}
+
+case $1 in
+[namenode]*)
+ini_namenode
+;;
+[zkfc]*)
+ini_zk
+;;
+* )
+echo "请输入已有的指令."
+;;
+esac
+
diff --git a/PCAP-PIC/hadoop/bin/mapred b/PCAP-PIC/hadoop/bin/mapred
new file mode 100644
index 0000000..fe16e07
--- /dev/null
+++ b/PCAP-PIC/hadoop/bin/mapred
@@ -0,0 +1,172 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`which $0`
+bin=`dirname ${bin}`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+if [ -e ${HADOOP_LIBEXEC_DIR}/mapred-config.sh ]; then
+ . ${HADOOP_LIBEXEC_DIR}/mapred-config.sh
+else
+ . "$bin/mapred-config.sh"
+fi
+
+function print_usage(){
+ echo "Usage: mapred [--config confdir] [--loglevel loglevel] COMMAND"
+ echo " where COMMAND is one of:"
+ echo " pipes run a Pipes job"
+ echo " job manipulate MapReduce jobs"
+ echo " queue get information regarding JobQueues"
+ echo " classpath prints the class path needed for running"
+ echo " mapreduce subcommands"
+ echo " historyserver run job history servers as a standalone daemon"
+ echo " distcp <srcurl> <desturl> copy file or directories recursively"
+ echo " archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive"
+ echo " hsadmin job history server admin interface"
+ echo ""
+ echo "Most commands print help when invoked w/o parameters."
+}
+
+if [ $# = 0 ]; then
+ print_usage
+ exit
+fi
+
+COMMAND=$1
+shift
+
+case $COMMAND in
+ # usage flags
+ --help|-help|-h)
+ print_usage
+ exit
+ ;;
+esac
+
+if [ "$COMMAND" = "job" ] ; then
+ CLASS=org.apache.hadoop.mapred.JobClient
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "queue" ] ; then
+ CLASS=org.apache.hadoop.mapred.JobQueueClient
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "pipes" ] ; then
+ CLASS=org.apache.hadoop.mapred.pipes.Submitter
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "sampler" ] ; then
+ CLASS=org.apache.hadoop.mapred.lib.InputSampler
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "classpath" ] ; then
+ echo -n
+elif [ "$COMMAND" = "historyserver" ] ; then
+ CLASS=org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer
+ HADOOP_OPTS="$HADOOP_OPTS -Dmapred.jobsummary.logger=${HADOOP_JHS_LOGGER:-INFO,console} $HADOOP_JOB_HISTORYSERVER_OPTS"
+ if [ "$HADOOP_JOB_HISTORYSERVER_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$HADOOP_JOB_HISTORYSERVER_HEAPSIZE""m"
+ fi
+elif [ "$COMMAND" = "mradmin" ] \
+ || [ "$COMMAND" = "jobtracker" ] \
+ || [ "$COMMAND" = "tasktracker" ] \
+ || [ "$COMMAND" = "groups" ] ; then
+ echo "Sorry, the $COMMAND command is no longer supported."
+ echo "You may find similar functionality with the \"yarn\" shell command."
+ print_usage
+ exit 1
+elif [ "$COMMAND" = "distcp" ] ; then
+ CLASS=org.apache.hadoop.tools.DistCp
+ CLASSPATH=${CLASSPATH}:${TOOL_PATH}
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "archive" ] ; then
+ CLASS=org.apache.hadoop.tools.HadoopArchives
+ CLASSPATH=${CLASSPATH}:${TOOL_PATH}
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "hsadmin" ] ; then
+ CLASS=org.apache.hadoop.mapreduce.v2.hs.client.HSAdmin
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+else
+ echo $COMMAND - invalid command
+ print_usage
+ exit 1
+fi
+
+# for developers, add mapred classes to CLASSPATH
+if [ -d "$HADOOP_MAPRED_HOME/build/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build/classes
+fi
+if [ -d "$HADOOP_MAPRED_HOME/build/webapps" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build
+fi
+if [ -d "$HADOOP_MAPRED_HOME/build/test/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build/test/classes
+fi
+if [ -d "$HADOOP_MAPRED_HOME/build/tools" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build/tools
+fi
+
+# for releases, add core mapred jar & webapps to CLASSPATH
+if [ -d "$HADOOP_PREFIX/${MAPRED_DIR}/webapps" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/${MAPRED_DIR}
+fi
+for f in $HADOOP_MAPRED_HOME/${MAPRED_DIR}/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+# Need YARN jars also
+for f in $HADOOP_YARN_HOME/${YARN_DIR}/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+# add libs to CLASSPATH
+for f in $HADOOP_MAPRED_HOME/${MAPRED_LIB_JARS_DIR}/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+# add modules to CLASSPATH
+for f in $HADOOP_MAPRED_HOME/modules/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+if [ "$COMMAND" = "classpath" ] ; then
+ if [ "$#" -gt 0 ]; then
+ CLASS=org.apache.hadoop.util.Classpath
+ else
+ if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ fi
+ echo $CLASSPATH
+ exit 0
+ fi
+fi
+
+# cygwin path translation
+if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ HADOOP_LOG_DIR=$(cygpath -w "$HADOOP_LOG_DIR" 2>/dev/null)
+ HADOOP_PREFIX=$(cygpath -w "$HADOOP_PREFIX" 2>/dev/null)
+ HADOOP_CONF_DIR=$(cygpath -w "$HADOOP_CONF_DIR" 2>/dev/null)
+ HADOOP_COMMON_HOME=$(cygpath -w "$HADOOP_COMMON_HOME" 2>/dev/null)
+ HADOOP_HDFS_HOME=$(cygpath -w "$HADOOP_HDFS_HOME" 2>/dev/null)
+ HADOOP_YARN_HOME=$(cygpath -w "$HADOOP_YARN_HOME" 2>/dev/null)
+ HADOOP_MAPRED_HOME=$(cygpath -w "$HADOOP_MAPRED_HOME" 2>/dev/null)
+fi
+
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
+
+export CLASSPATH
+exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@"
diff --git a/PCAP-PIC/hadoop/bin/mapred.cmd b/PCAP-PIC/hadoop/bin/mapred.cmd
new file mode 100644
index 0000000..550b1ed
--- /dev/null
+++ b/PCAP-PIC/hadoop/bin/mapred.cmd
@@ -0,0 +1,216 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem The Hadoop mapred command script
+
+setlocal enabledelayedexpansion
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~`%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %DEFAULT_LIBEXEC_DIR%\mapred-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+if "%1" == "--loglevel" (
+ shift
+ shift
+)
+
+:main
+ if exist %MAPRED_CONF_DIR%\mapred-env.cmd (
+ call %MAPRED_CONF_DIR%\mapred-env.cmd
+ )
+ set mapred-command=%1
+ call :make_command_arguments %*
+
+ if not defined mapred-command (
+ goto print_usage
+ )
+
+ @rem JAVA and JAVA_HEAP_MAX are set in hadoop-confg.cmd
+
+ if defined MAPRED_HEAPSIZE (
+ @rem echo run with Java heapsize %MAPRED_HEAPSIZE%
+ set JAVA_HEAP_SIZE=-Xmx%MAPRED_HEAPSIZE%m
+ )
+
+ @rem CLASSPATH initially contains HADOOP_CONF_DIR and MAPRED_CONF_DIR
+ if not defined HADOOP_CONF_DIR (
+ echo NO HADOOP_CONF_DIR set.
+ echo Please specify it either in mapred-env.cmd or in the environment.
+ goto :eof
+ )
+
+ set CLASSPATH=%HADOOP_CONF_DIR%;%MAPRED_CONF_DIR%;%CLASSPATH%
+
+ @rem for developers, add Hadoop classes to CLASSPATH
+ if exist %HADOOP_MAPRED_HOME%\build\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\build\classes
+ )
+
+ if exist %HADOOP_MAPRED_HOME%\build\webapps (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\build
+ )
+
+ if exist %HADOOP_MAPRED_HOME%\build\test\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\build\test\classes
+ )
+
+ if exist %HADOOP_MAPRED_HOME%\build\tools (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\build\tools
+ )
+
+ @rem Need YARN jars also
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_DIR%\*
+
+ @rem add libs to CLASSPATH
+ set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\%MAPRED_LIB_JARS_DIR%\*
+
+ @rem add modules to CLASSPATH
+ set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\modules\*
+
+ if %mapred-command% == classpath (
+ if not defined mapred-command-arguments (
+ @rem No need to bother starting up a JVM for this simple case.
+ @echo %CLASSPATH%
+ exit /b
+ )
+ )
+
+ call :%mapred-command% %mapred-command-arguments%
+ set java_arguments=%JAVA_HEAP_MAX% %HADOOP_OPTS% -classpath %CLASSPATH% %CLASS% %mapred-command-arguments%
+ call %JAVA% %java_arguments%
+
+goto :eof
+
+
+:classpath
+ set CLASS=org.apache.hadoop.util.Classpath
+ goto :eof
+
+:job
+ set CLASS=org.apache.hadoop.mapred.JobClient
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:queue
+ set CLASS=org.apache.hadoop.mapred.JobQueueClient
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:sampler
+ set CLASS=org.apache.hadoop.mapred.lib.InputSampler
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:historyserver
+ set CLASS=org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer
+ set HADOOP_OPTS=%HADOOP_OPTS% -Dmapred.jobsummary.logger=%HADOOP_JHS_LOGGER% %HADOOP_JOB_HISTORYSERVER_OPTS%
+ if defined HADOOP_JOB_HISTORYSERVER_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%HADOOP_JOB_HISTORYSERVER_HEAPSIZE%m
+ )
+ goto :eof
+
+:distcp
+ set CLASS=org.apache.hadoop.tools.DistCp
+ set CLASSPATH=%CLASSPATH%;%TOO_PATH%
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:archive
+ set CLASS=org.apache.hadop.tools.HadoopArchives
+ set CLASSPATH=%CLASSPATH%;%TOO_PATH%
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+
+:hsadmin
+ set CLASS=org.apache.hadoop.mapreduce.v2.hs.client.HSAdmin
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+
+:pipes
+ goto not_supported
+
+:mradmin
+ goto not_supported
+
+:jobtracker
+ goto not_supported
+
+:tasktracker
+ goto not_supported
+
+:groups
+ goto not_supported
+
+
+@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
+:make_command_arguments
+ if [%2] == [] goto :eof
+ if "%1" == "--config" (
+ shift
+ shift
+ )
+ if "%1" == "--loglevel" (
+ shift
+ shift
+ )
+ shift
+ set _mapredarguments=
+ :MakeCmdArgsLoop
+ if [%1]==[] goto :EndLoop
+
+ if not defined _mapredarguments (
+ set _mapredarguments=%1
+ ) else (
+ set _mapredarguments=!_mapredarguments! %1
+ )
+ shift
+ goto :MakeCmdArgsLoop
+ :EndLoop
+ set mapred-command-arguments=%_mapredarguments%
+ goto :eof
+
+:not_supported
+ @echo Sorry, the %COMMAND% command is no longer supported.
+ @echo You may find similar functionality with the "yarn" shell command.
+ goto print_usage
+
+:print_usage
+ @echo Usage: mapred [--config confdir] [--loglevel loglevel] COMMAND
+ @echo where COMMAND is one of:
+ @echo job manipulate MapReduce jobs
+ @echo queue get information regarding JobQueues
+ @echo classpath prints the class path needed for running
+ @echo mapreduce subcommands
+ @echo historyserver run job history servers as a standalone daemon
+ @echo distcp ^<srcurl^> ^<desturl^> copy file or directories recursively
+ @echo archive -archiveName NAME -p ^<parent path^> ^<src^>* ^<dest^> create a hadoop archive
+ @echo hsadmin job history server admin interface
+ @echo
+ @echo Most commands print help when invoked w/o parameters.
+
+endlocal
diff --git a/PCAP-PIC/hadoop/bin/rcc b/PCAP-PIC/hadoop/bin/rcc
new file mode 100644
index 0000000..22bffff
--- /dev/null
+++ b/PCAP-PIC/hadoop/bin/rcc
@@ -0,0 +1,61 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# The Hadoop record compiler
+#
+# Environment Variables
+#
+# JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+#
+# HADOOP_OPTS Extra Java runtime options.
+#
+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_PREFIX}/conf.
+#
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+
+if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
+ . "${HADOOP_CONF_DIR}/hadoop-env.sh"
+fi
+
+# some Java parameters
+if [ "$JAVA_HOME" != "" ]; then
+ #echo "run java in $JAVA_HOME"
+ JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+ echo "Error: JAVA_HOME is not set."
+ exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# restore ordinary behaviour
+unset IFS
+
+CLASS='org.apache.hadoop.record.compiler.generated.Rcc'
+
+# run it
+exec "$JAVA" $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@"
diff --git a/PCAP-PIC/hadoop/bin/set_hdfs_env.sh b/PCAP-PIC/hadoop/bin/set_hdfs_env.sh
new file mode 100644
index 0000000..146be84
--- /dev/null
+++ b/PCAP-PIC/hadoop/bin/set_hdfs_env.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+
+source /etc/profile
+
+function setChkconfig(){
+echo -e "\n#hadoop\nexport HADOOP_HOME=/home/tsg/olap/hadoop-2.7.1\nexport PATH=\$HADOOP_HOME/sbin:\$PATH\nexport PATH=\$HADOOP_HOME/bin:\$PATH\nexport HADOOP_CLASSPATH=\`hadoop classpath\`" >> /etc/profile.d/hadoop.sh
+chmod +x /etc/profile.d/hadoop.sh
+
+if [ -x '/etc/init.d/keephdfsmaster' ];then
+ chkconfig --add keephdfsmaster
+ chkconfig keephdfsmaster on
+fi
+
+if [ -x '/etc/init.d/keephdfsslave' ];then
+ chkconfig --add keephdfsslave
+ chkconfig keephdfsslave on
+fi
+
+if [ -x '/etc/init.d/keephdfsworker' ];then
+ chkconfig --add keephdfsworker
+ chkconfig keephdfsworker on
+fi
+
+if [ -x '/etc/init.d/keephdfsjournal' ];then
+ chkconfig --add keephdfsjournal
+ chkconfig keephdfsjournal on
+fi
+}
+
+case $1 in
+journal)
+if [ -x '/etc/init.d/keephdfsjournal' ];then
+ service keephdfsjournal start && sleep 5
+ journal_dae=`ps -ef | grep dae-hdfsjournal.sh | grep -v grep | wc -l`
+ if [ $journal_dae -lt 1 ];then
+ nohup /home/tsg/olap/hadoop-2.7.1/sbin/dae-hdfsjournal.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+master)
+if [ -x '/etc/init.d/keephdfsmaster' ];then
+ service keephdfsmaster start && sleep 5
+ master_dae=`ps -ef | grep dae-hdfsmaster.sh | grep -v grep | wc -l`
+ if [ $master_dae -lt 1 ];then
+ nohup /home/tsg/olap/hadoop-2.7.1/sbin/dae-hdfsmaster.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+slave)
+if [ -x '/etc/init.d/keephdfsslave' ];then
+ service keephdfsslave start && sleep 5
+ slave_dae=`ps -ef | grep dae-hdfsslave.sh | grep -v grep | wc -l`
+ if [ $slave_dae -lt 1 ];then
+ nohup /home/tsg/olap/hadoop-2.7.1/sbin/dae-hdfsslave.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+worker)
+if [ -x '/etc/init.d/keephdfsworker' ];then
+ service keephdfsworker start && sleep 5
+ worker_dae=`ps -ef | grep dae-hdfsworker.sh | grep -v grep | wc -l`
+ if [ $worker_dae -lt 1 ];then
+ nohup /home/tsg/olap/hadoop-2.7.1/sbin/dae-hdfsworker.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+chkconfig)
+ setChkconfig;;
+* )
+;;
+esac
diff --git a/PCAP-PIC/hadoop/bin/set_yarn_env.sh b/PCAP-PIC/hadoop/bin/set_yarn_env.sh
new file mode 100644
index 0000000..8ee107f
--- /dev/null
+++ b/PCAP-PIC/hadoop/bin/set_yarn_env.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+source /etc/profile
+
+function setChkconfig(){
+echo -e "\n#hadoop\nexport HADOOP_HOME=/home/tsg/olap/hadoop-2.7.1\nexport PATH=\$HADOOP_HOME/sbin:\$PATH\nexport PATH=\$HADOOP_HOME/bin:\$PATH\nexport HADOOP_CLASSPATH=\`hadoop classpath\`" >> /etc/profile.d/hadoop.sh
+chmod +x /etc/profile.d/hadoop.sh
+
+if [ -x '/etc/init.d/keepyarnhistory' ];then
+ chkconfig --add keepyarnhistory
+ chkconfig keepyarnhistory on
+fi
+
+if [ -x '/etc/init.d/keepyarnmaster' ];then
+ chkconfig --add keepyarnmaster
+ chkconfig keepyarnmaster on
+fi
+
+if [ -x '/etc/init.d/keepyarnworker' ];then
+ chkconfig --add keepyarnworker
+ chkconfig keepyarnworker on
+fi
+}
+
+case $1 in
+history)
+if [ -x '/etc/init.d/keepyarnhistory' ];then
+ service keepyarnhistory start && sleep 5
+ history_dae=`ps -ef | grep "dae-yarnhistory.sh" | grep -v grep | wc -l`
+ if [ $history_dae -lt 1 ];then
+ nohup /home/tsg/olap/hadoop-2.7.1/sbin/dae-yarnhistory.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+master)
+if [ -x '/etc/init.d/keepyarnmaster' ];then
+ service keepyarnmaster start && sleep 5
+ master_dae=`ps -ef | grep "dae-yarnmaster.sh" | grep -v grep | wc -l`
+ if [ $master_dae -lt 1 ];then
+ nohup /home/tsg/olap/hadoop-2.7.1/sbin/dae-yarnmaster.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+worker)
+if [ -x '/etc/init.d/keepyarnworker' ];then
+ service keepyarnworker start && sleep 5
+ worker_dae=`ps -ef | grep dae-yarnworker.sh | grep -v grep | wc -l`
+ if [ $worker_dae -lt 1 ];then
+ nohup /home/tsg/olap/hadoop-2.7.1/sbin/dae-yarnworker.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+chkconfig)
+ setChkconfig;;
+* )
+;;
+esac
+
diff --git a/PCAP-PIC/hadoop/bin/test-container-executor b/PCAP-PIC/hadoop/bin/test-container-executor
new file mode 100644
index 0000000..df8c3db
--- /dev/null
+++ b/PCAP-PIC/hadoop/bin/test-container-executor
Binary files differ
diff --git a/PCAP-PIC/hadoop/bin/yarn b/PCAP-PIC/hadoop/bin/yarn
new file mode 100644
index 0000000..0e4c5a2
--- /dev/null
+++ b/PCAP-PIC/hadoop/bin/yarn
@@ -0,0 +1,330 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# The Hadoop command script
+#
+# Environment Variables
+#
+# JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+#
+# YARN_USER_CLASSPATH Additional user CLASSPATH entries.
+#
+# YARN_USER_CLASSPATH_FIRST If set to non empty value then the user classpath
+# specified in YARN_USER_CLASSPATH will be
+# appended at the beginning of YARN's final
+# classpath instead of at the end.
+#
+# YARN_HEAPSIZE The maximum amount of heap to use, in MB.
+# Default is 1000.
+#
+# YARN_{COMMAND}_HEAPSIZE overrides YARN_HEAPSIZE for a given command
+# eg YARN_NODEMANAGER_HEAPSIZE sets the heap
+# size for the NodeManager. If you set the
+# heap size in YARN_{COMMAND}_OPTS or YARN_OPTS
+# they take precedence.
+#
+# YARN_OPTS Extra Java runtime options.
+#
+# YARN_CLIENT_OPTS when the respective command is run.
+# YARN_{COMMAND}_OPTS etc YARN_NODEMANAGER_OPTS applies to NodeManager
+# for e.g. YARN_CLIENT_OPTS applies to
+# more than one command (fs, dfs, fsck,
+# dfsadmin etc)
+#
+# YARN_CONF_DIR Alternate conf dir. Default is ${HADOOP_YARN_HOME}/conf.
+#
+# YARN_ROOT_LOGGER The root appender. Default is INFO,console
+#
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/yarn-config.sh
+
+function print_usage(){
+ echo "Usage: yarn [--config confdir] [COMMAND | CLASSNAME]"
+ echo " CLASSNAME run the class named CLASSNAME"
+ echo " or"
+ echo " where COMMAND is one of:"
+ echo " resourcemanager -format-state-store deletes the RMStateStore"
+ echo " resourcemanager run the ResourceManager"
+ echo " nodemanager run a nodemanager on each slave"
+ echo " timelineserver run the timeline server"
+ echo " rmadmin admin tools"
+ echo " sharedcachemanager run the SharedCacheManager daemon"
+ echo " scmadmin SharedCacheManager admin tools"
+ echo " version print the version"
+ echo " jar <jar> run a jar file"
+ echo " application prints application(s)"
+ echo " report/kill application"
+ echo " applicationattempt prints applicationattempt(s)"
+ echo " report"
+ echo " container prints container(s) report"
+ echo " node prints node report(s)"
+ echo " queue prints queue information"
+ echo " logs dump container logs"
+ echo " classpath prints the class path needed to"
+ echo " get the Hadoop jar and the"
+ echo " required libraries"
+ echo " cluster prints cluster information"
+ echo " daemonlog get/set the log level for each"
+ echo " daemon"
+ echo ""
+ echo "Most commands print help when invoked w/o parameters."
+}
+
+# if no args specified, show usage
+if [ $# = 0 ]; then
+ print_usage
+ exit 1
+fi
+
+# get arguments
+COMMAND=$1
+shift
+
+case $COMMAND in
+ # usage flags
+ --help|-help|-h)
+ print_usage
+ exit
+ ;;
+esac
+
+if [ -f "${YARN_CONF_DIR}/yarn-env.sh" ]; then
+ . "${YARN_CONF_DIR}/yarn-env.sh"
+fi
+
+# some Java parameters
+if [ "$JAVA_HOME" != "" ]; then
+ #echo "run java in $JAVA_HOME"
+ JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+ echo "Error: JAVA_HOME is not set."
+ exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# check envvars which might override default args
+if [ "$YARN_HEAPSIZE" != "" ]; then
+ #echo "run with heapsize $YARN_HEAPSIZE"
+ JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+ #echo $JAVA_HEAP_MAX
+fi
+
+# CLASSPATH initially contains $HADOOP_CONF_DIR & $YARN_CONF_DIR
+if [ ! -d "$HADOOP_CONF_DIR" ]; then
+ echo No HADOOP_CONF_DIR set.
+ echo Please specify it either in yarn-env.sh or in the environment.
+ exit 1
+fi
+
+CLASSPATH="${HADOOP_CONF_DIR}:${YARN_CONF_DIR}:${CLASSPATH}"
+
+# for developers, add Hadoop classes to CLASSPATH
+if [ -d "$HADOOP_YARN_HOME/yarn-api/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-api/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/yarn-common/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-common/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/yarn-mapreduce/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-mapreduce/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/yarn-master-worker/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-master-worker/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/yarn-server/yarn-server-nodemanager/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-server/yarn-server-nodemanager/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/yarn-server/yarn-server-common/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-server/yarn-server-common/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/yarn-server/yarn-server-resourcemanager/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-server/yarn-server-resourcemanager/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/yarn-server/yarn-server-applicationhistoryservice/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-server/yarn-server-applicationhistoryservice/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/build/test/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/target/test/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/build/tools" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/build/tools
+fi
+
+CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/${YARN_DIR}/*
+CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/${YARN_LIB_JARS_DIR}/*
+
+# Add user defined YARN_USER_CLASSPATH to the class path (if defined)
+if [ -n "$YARN_USER_CLASSPATH" ]; then
+ if [ -n "$YARN_USER_CLASSPATH_FIRST" ]; then
+ # User requested to add the custom entries at the beginning
+ CLASSPATH=${YARN_USER_CLASSPATH}:${CLASSPATH}
+ else
+ # By default we will just append the extra entries at the end
+ CLASSPATH=${CLASSPATH}:${YARN_USER_CLASSPATH}
+ fi
+fi
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+# default log directory & file
+if [ "$YARN_LOG_DIR" = "" ]; then
+ YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+if [ "$YARN_LOGFILE" = "" ]; then
+ YARN_LOGFILE='yarn.log'
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+# figure out which class to run
+if [ "$COMMAND" = "classpath" ] ; then
+ if [ "$#" -gt 0 ]; then
+ CLASS=org.apache.hadoop.util.Classpath
+ else
+ if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ fi
+ echo $CLASSPATH
+ exit 0
+ fi
+elif [ "$COMMAND" = "rmadmin" ] ; then
+ CLASS='org.apache.hadoop.yarn.client.cli.RMAdminCLI'
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "scmadmin" ] ; then
+ CLASS='org.apache.hadoop.yarn.client.SCMAdmin'
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "application" ] ||
+ [ "$COMMAND" = "applicationattempt" ] ||
+ [ "$COMMAND" = "container" ]; then
+ CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+ set -- $COMMAND $@
+elif [ "$COMMAND" = "node" ] ; then
+ CLASS=org.apache.hadoop.yarn.client.cli.NodeCLI
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "queue" ] ; then
+ CLASS=org.apache.hadoop.yarn.client.cli.QueueCLI
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "resourcemanager" ] ; then
+ CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/rm-config/log4j.properties
+ CLASS='org.apache.hadoop.yarn.server.resourcemanager.ResourceManager'
+ YARN_OPTS="$YARN_OPTS $YARN_RESOURCEMANAGER_OPTS $YARN_RESOURCEMANAGER_JMX_OPTS"
+ if [ "$YARN_RESOURCEMANAGER_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_RESOURCEMANAGER_HEAPSIZE""m"
+ fi
+elif [ "$COMMAND" = "historyserver" ] ; then
+ echo "DEPRECATED: Use of this command to start the timeline server is deprecated." 1>&2
+ echo "Instead use the timelineserver command for it." 1>&2
+ CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/ahs-config/log4j.properties
+ CLASS='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer'
+ YARN_OPTS="$YARN_OPTS $YARN_HISTORYSERVER_OPTS"
+ if [ "$YARN_HISTORYSERVER_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_HISTORYSERVER_HEAPSIZE""m"
+ fi
+elif [ "$COMMAND" = "timelineserver" ] ; then
+ CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/timelineserver-config/log4j.properties
+ CLASS='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer'
+ YARN_OPTS="$YARN_OPTS $YARN_TIMELINESERVER_OPTS"
+ if [ "$YARN_TIMELINESERVER_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_TIMELINESERVER_HEAPSIZE""m"
+ fi
+elif [ "$COMMAND" = "sharedcachemanager" ] ; then
+ CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/scm-config/log4j.properties
+ CLASS='org.apache.hadoop.yarn.server.sharedcachemanager.SharedCacheManager'
+ YARN_OPTS="$YARN_OPTS $YARN_SHAREDCACHEMANAGER_OPTS"
+ if [ "$YARN_SHAREDCACHEMANAGER_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_SHAREDCACHEMANAGER_HEAPSIZE""m"
+ fi
+elif [ "$COMMAND" = "nodemanager" ] ; then
+ CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/nm-config/log4j.properties
+ CLASS='org.apache.hadoop.yarn.server.nodemanager.NodeManager'
+ YARN_OPTS="$YARN_OPTS -server $YARN_NODEMANAGER_OPTS $YARN_NODEMANAGER_JMX_OPTS"
+ if [ "$YARN_NODEMANAGER_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_NODEMANAGER_HEAPSIZE""m"
+ fi
+elif [ "$COMMAND" = "proxyserver" ] ; then
+ CLASS='org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer'
+ YARN_OPTS="$YARN_OPTS $YARN_PROXYSERVER_OPTS"
+ if [ "$YARN_PROXYSERVER_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_PROXYSERVER_HEAPSIZE""m"
+ fi
+elif [ "$COMMAND" = "version" ] ; then
+ CLASS=org.apache.hadoop.util.VersionInfo
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "jar" ] ; then
+ CLASS=org.apache.hadoop.util.RunJar
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "logs" ] ; then
+ CLASS=org.apache.hadoop.yarn.client.cli.LogsCLI
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "daemonlog" ] ; then
+ CLASS=org.apache.hadoop.log.LogLevel
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "cluster" ] ; then
+ CLASS=org.apache.hadoop.yarn.client.cli.ClusterCLI
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+else
+ CLASS=$COMMAND
+fi
+
+# cygwin path translation
+if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ HADOOP_LOG_DIR=$(cygpath -w "$HADOOP_LOG_DIR" 2>/dev/null)
+ HADOOP_PREFIX=$(cygpath -w "$HADOOP_PREFIX" 2>/dev/null)
+ HADOOP_CONF_DIR=$(cygpath -w "$HADOOP_CONF_DIR" 2>/dev/null)
+ HADOOP_COMMON_HOME=$(cygpath -w "$HADOOP_COMMON_HOME" 2>/dev/null)
+ HADOOP_HDFS_HOME=$(cygpath -w "$HADOOP_HDFS_HOME" 2>/dev/null)
+ HADOOP_YARN_HOME=$(cygpath -w "$HADOOP_YARN_HOME" 2>/dev/null)
+ HADOOP_MAPRED_HOME=$(cygpath -w "$HADOOP_MAPRED_HOME" 2>/dev/null)
+ YARN_LOG_DIR=$(cygpath -w "$YARN_LOG_DIR" 2>/dev/null)
+fi
+
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$HADOOP_YARN_HOME"
+HADOOP_HOME=$HADOOP_PREFIX
+if $cygwin; then
+ HADOOP_HOME=$(cygpath -w "$HADOOP_HOME" 2>/dev/null)
+fi
+export HADOOP_HOME
+YARN_OPTS="$YARN_OPTS -Dhadoop.home.dir=$HADOOP_HOME"
+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-${HADOOP_LOGLEVEL},console}"
+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-${HADOOP_LOGLEVEL},console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+ if $cygwin; then
+ JAVA_LIBRARY_PATH=$(cygpath -w "$JAVA_LIBRARY_PATH" 2>/dev/null)
+ fi
+ YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+
+exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $YARN_OPTS -classpath "$CLASSPATH" $CLASS "$@"
diff --git a/PCAP-PIC/hadoop/bin/yarn.cmd b/PCAP-PIC/hadoop/bin/yarn.cmd
new file mode 100644
index 0000000..3cd57a7
--- /dev/null
+++ b/PCAP-PIC/hadoop/bin/yarn.cmd
@@ -0,0 +1,332 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem The Hadoop command script
+@rem
+@rem Environment Variables
+@rem
+@rem JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+@rem
+@rem YARN_CLASSPATH Extra Java CLASSPATH entries.
+@rem
+@rem YARN_HEAPSIZE The maximum amount of heap to use, in MB.
+@rem Default is 1000.
+@rem
+@rem YARN_{COMMAND}_HEAPSIZE overrides YARN_HEAPSIZE for a given command
+@rem eg YARN_NODEMANAGER_HEAPSIZE sets the heap
+@rem size for the NodeManager. If you set the
+@rem heap size in YARN_{COMMAND}_OPTS or YARN_OPTS
+@rem they take precedence.
+@rem
+@rem YARN_OPTS Extra Java runtime options.
+@rem
+@rem YARN_CLIENT_OPTS when the respective command is run.
+@rem YARN_{COMMAND}_OPTS etc YARN_NODEMANAGER_OPTS applies to NodeManager
+@rem for e.g. YARN_CLIENT_OPTS applies to
+@rem more than one command (fs, dfs, fsck,
+@rem dfsadmin etc)
+@rem
+@rem YARN_CONF_DIR Alternate conf dir. Default is ${HADOOP_YARN_HOME}/conf.
+@rem
+@rem YARN_ROOT_LOGGER The root appender. Default is INFO,console
+@rem
+
+setlocal enabledelayedexpansion
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %DEFAULT_LIBEXEC_DIR%\yarn-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+if "%1" == "--loglevel" (
+ shift
+ shift
+)
+
+:main
+ if exist %YARN_CONF_DIR%\yarn-env.cmd (
+ call %YARN_CONF_DIR%\yarn-env.cmd
+ )
+
+ set yarn-command=%1
+ call :make_command_arguments %*
+
+ if not defined yarn-command (
+ goto print_usage
+ )
+
+ @rem JAVA and JAVA_HEAP_MAX and set in hadoop-config.cmd
+
+ if defined YARN_HEAPSIZE (
+ @rem echo run with Java heapsize %YARN_HEAPSIZE%
+ set JAVA_HEAP_MAX=-Xmx%YARN_HEAPSIZE%m
+ )
+
+ @rem CLASSPATH initially contains HADOOP_CONF_DIR & YARN_CONF_DIR
+ if not defined HADOOP_CONF_DIR (
+ echo No HADOOP_CONF_DIR set.
+ echo Please specify it either in yarn-env.cmd or in the environment.
+ goto :eof
+ )
+
+ set CLASSPATH=%HADOOP_CONF_DIR%;%YARN_CONF_DIR%;%CLASSPATH%
+
+ @rem for developers, add Hadoop classes to CLASSPATH
+ if exist %HADOOP_YARN_HOME%\yarn-api\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-api\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-common\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-common\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-mapreduce\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-mapreduce\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-master-worker\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-master-worker\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-nodemanager\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-nodemanager\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-common\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-common\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-resourcemanager\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-resourcemanager\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-applicationhistoryservice\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-applicationhistoryservice\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\build\test\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\build\test\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\build\tools (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\build\tools
+ )
+
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_DIR%\*
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR%\*
+
+ if %yarn-command% == classpath (
+ if not defined yarn-command-arguments (
+ @rem No need to bother starting up a JVM for this simple case.
+ @echo %CLASSPATH%
+ exit /b
+ )
+ )
+
+ set yarncommands=resourcemanager nodemanager proxyserver rmadmin version jar ^
+ application applicationattempt cluster container node queue logs daemonlog historyserver ^
+ timelineserver classpath
+ for %%i in ( %yarncommands% ) do (
+ if %yarn-command% == %%i set yarncommand=true
+ )
+ if defined yarncommand (
+ call :%yarn-command%
+ ) else (
+ set CLASSPATH=%CLASSPATH%;%CD%
+ set CLASS=%yarn-command%
+ )
+
+ if defined JAVA_LIBRARY_PATH (
+ set YARN_OPTS=%YARN_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH%
+ )
+
+ set java_arguments=%JAVA_HEAP_MAX% %YARN_OPTS% -classpath %CLASSPATH% %CLASS% %yarn-command-arguments%
+ call %JAVA% %java_arguments%
+
+goto :eof
+
+:classpath
+ set CLASS=org.apache.hadoop.util.Classpath
+ goto :eof
+
+:rmadmin
+ set CLASS=org.apache.hadoop.yarn.client.cli.RMAdminCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:application
+ set CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ set yarn-command-arguments=%yarn-command% %yarn-command-arguments%
+ goto :eof
+
+:applicationattempt
+ set CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ set yarn-command-arguments=%yarn-command% %yarn-command-arguments%
+ goto :eof
+
+:cluster
+ set CLASS=org.apache.hadoop.yarn.client.cli.ClusterCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:container
+ set CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ set yarn-command-arguments=%yarn-command% %yarn-command-arguments%
+ goto :eof
+
+:node
+ set CLASS=org.apache.hadoop.yarn.client.cli.NodeCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:queue
+ set CLASS=org.apache.hadoop.yarn.client.cli.QueueCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:resourcemanager
+ set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\rm-config\log4j.properties
+ set CLASS=org.apache.hadoop.yarn.server.resourcemanager.ResourceManager
+ set YARN_OPTS=%YARN_OPTS% %YARN_RESOURCEMANAGER_OPTS%
+ if defined YARN_RESOURCEMANAGER_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%YARN_RESOURCEMANAGER_HEAPSIZE%m
+ )
+ goto :eof
+
+:historyserver
+ @echo DEPRECATED: Use of this command to start the timeline server is deprecated. 1>&2
+ @echo Instead use the timelineserver command for it. 1>&2
+ set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\ahs-config\log4j.properties
+ set CLASS=org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer
+ set YARN_OPTS=%YARN_OPTS% %HADOOP_HISTORYSERVER_OPTS%
+ if defined YARN_HISTORYSERVER_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%YARN_HISTORYSERVER_HEAPSIZE%m
+ )
+ goto :eof
+
+:timelineserver
+ set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\timelineserver-config\log4j.properties
+ set CLASS=org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer
+ set YARN_OPTS=%YARN_OPTS% %HADOOP_TIMELINESERVER_OPTS%
+ if defined YARN_TIMELINESERVER_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%YARN_TIMELINESERVER_HEAPSIZE%m
+ )
+ goto :eof
+
+:nodemanager
+ set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\nm-config\log4j.properties
+ set CLASS=org.apache.hadoop.yarn.server.nodemanager.NodeManager
+ set YARN_OPTS=%YARN_OPTS% -server %HADOOP_NODEMANAGER_OPTS%
+ if defined YARN_NODEMANAGER_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%YARN_NODEMANAGER_HEAPSIZE%m
+ )
+ goto :eof
+
+:proxyserver
+ set CLASS=org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer
+ set YARN_OPTS=%YARN_OPTS% %HADOOP_PROXYSERVER_OPTS%
+ if defined YARN_PROXYSERVER_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%YARN_PROXYSERVER_HEAPSIZE%m
+ )
+ goto :eof
+
+:version
+ set CLASS=org.apache.hadoop.util.VersionInfo
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:jar
+ set CLASS=org.apache.hadoop.util.RunJar
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:logs
+ set CLASS=org.apache.hadoop.yarn.client.cli.LogsCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:daemonlog
+ set CLASS=org.apache.hadoop.log.LogLevel
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
+:make_command_arguments
+ if "%1" == "--config" (
+ shift
+ shift
+ )
+ if "%1" == "--loglevel" (
+ shift
+ shift
+ )
+ if [%2] == [] goto :eof
+ shift
+ set _yarnarguments=
+ :MakeCmdArgsLoop
+ if [%1]==[] goto :EndLoop
+
+ if not defined _yarnarguments (
+ set _yarnarguments=%1
+ ) else (
+ set _yarnarguments=!_yarnarguments! %1
+ )
+ shift
+ goto :MakeCmdArgsLoop
+ :EndLoop
+ set yarn-command-arguments=%_yarnarguments%
+ goto :eof
+
+:print_usage
+ @echo Usage: yarn [--config confdir] [--loglevel loglevel] COMMAND
+ @echo where COMMAND is one of:
+ @echo resourcemanager run the ResourceManager
+ @echo nodemanager run a nodemanager on each slave
+ @echo timelineserver run the timeline server
+ @echo rmadmin admin tools
+ @echo version print the version
+ @echo jar ^<jar^> run a jar file
+ @echo application prints application(s) report/kill application
+ @echo applicationattempt prints applicationattempt(s) report
+ @echo cluster prints cluster information
+ @echo container prints container(s) report
+ @echo node prints node report(s)
+ @echo queue prints queue information
+ @echo logs dump container logs
+ @echo classpath prints the class path needed to get the
+ @echo Hadoop jar and the required libraries
+ @echo daemonlog get/set the log level for each daemon
+ @echo or
+ @echo CLASSNAME run the class named CLASSNAME
+ @echo Most commands print help when invoked w/o parameters.
+
+endlocal
diff --git a/PCAP-PIC/hadoop/etc/hadoop/capacity-scheduler.xml b/PCAP-PIC/hadoop/etc/hadoop/capacity-scheduler.xml
new file mode 100644
index 0000000..1e97505
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/capacity-scheduler.xml
@@ -0,0 +1,134 @@
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+ <property>
+ <name>yarn.scheduler.capacity.maximum-applications</name>
+ <value>10000</value>
+ <description>
+ Maximum number of applications that can be pending and running.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
+ <value>0.5</value>
+ <description>
+ Maximum percent of resources in the cluster which can be used to run
+ application masters i.e. controls number of concurrent running
+ applications.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.resource-calculator</name>
+ <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+ <description>
+ The ResourceCalculator implementation to be used to compare
+ Resources in the scheduler.
+ The default i.e. DefaultResourceCalculator only uses Memory while
+ DominantResourceCalculator uses dominant-resource to compare
+ multi-dimensional resources such as Memory, CPU etc.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.queues</name>
+ <value>default</value>
+ <description>
+ The queues at the this level (root is the root queue).
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.default.capacity</name>
+ <value>100</value>
+ <description>Default queue target capacity.</description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
+ <value>1</value>
+ <description>
+ Default queue user limit a percentage from 0.0 to 1.0.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
+ <value>100</value>
+ <description>
+ The maximum capacity of the default queue.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.default.state</name>
+ <value>RUNNING</value>
+ <description>
+ The state of the default queue. State can be one of RUNNING or STOPPED.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
+ <value>*</value>
+ <description>
+ The ACL of who can submit jobs to the default queue.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.root.default.acl_administer_queue</name>
+ <value>*</value>
+ <description>
+ The ACL of who can administer jobs on the default queue.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.node-locality-delay</name>
+ <value>40</value>
+ <description>
+ Number of missed scheduling opportunities after which the CapacityScheduler
+ attempts to schedule rack-local containers.
+ Typically this should be set to number of nodes in the cluster, By default is setting
+ approximately number of nodes in one rack which is 40.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.queue-mappings</name>
+ <value></value>
+ <description>
+ A list of mappings that will be used to assign jobs to queues
+ The syntax for this list is [u|g]:[name]:[queue_name][,next mapping]*
+ Typically this list will be used to map users to queues,
+ for example, u:%user:%user maps all users to queues with the same name
+ as the user.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.scheduler.capacity.queue-mappings-override.enable</name>
+ <value>false</value>
+ <description>
+ If a queue mapping is present, will it override the value specified
+ by the user? This can be used by administrators to place jobs in queues
+ that are different than the one specified by the user.
+ The default is false.
+ </description>
+ </property>
+
+</configuration>
diff --git a/PCAP-PIC/hadoop/etc/hadoop/configuration.xsl b/PCAP-PIC/hadoop/etc/hadoop/configuration.xsl
new file mode 100644
index 0000000..d50d80b
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/configuration.xsl
@@ -0,0 +1,40 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="html"/>
+<xsl:template match="configuration">
+<html>
+<body>
+<table border="1">
+<tr>
+ <td>name</td>
+ <td>value</td>
+ <td>description</td>
+</tr>
+<xsl:for-each select="property">
+<tr>
+ <td><a name="{name}"><xsl:value-of select="name"/></a></td>
+ <td><xsl:value-of select="value"/></td>
+ <td><xsl:value-of select="description"/></td>
+</tr>
+</xsl:for-each>
+</table>
+</body>
+</html>
+</xsl:template>
+</xsl:stylesheet>
diff --git a/PCAP-PIC/hadoop/etc/hadoop/container-executor.cfg b/PCAP-PIC/hadoop/etc/hadoop/container-executor.cfg
new file mode 100644
index 0000000..d68cee8
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/container-executor.cfg
@@ -0,0 +1,4 @@
+yarn.nodemanager.linux-container-executor.group=#configured value of yarn.nodemanager.linux-container-executor.group
+banned.users=#comma separated list of users who can not run applications
+min.user.id=1000#Prevent other super-users
+allowed.system.users=##comma separated list of system users who CAN run applications
diff --git a/PCAP-PIC/hadoop/etc/hadoop/core-site.xml b/PCAP-PIC/hadoop/etc/hadoop/core-site.xml
new file mode 100644
index 0000000..2e774d6
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/core-site.xml
@@ -0,0 +1,58 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>fs.defaultFS</name>
+ <value>hdfs://ns1</value>
+ </property>
+ <property>
+ <name>hadoop.tmp.dir</name>
+ <value>file:/home/tsg/olap/hadoop/tmp</value>
+ </property>
+ <property>
+ <name>io.file.buffer.size</name>
+ <value>131702</value>
+ </property>
+ <property>
+ <name>hadoop.proxyuser.root.hosts</name>
+ <value>*</value>
+ </property>
+ <property>
+ <name>hadoop.proxyuser.root.groups</name>
+ <value>*</value>
+ </property>
+ <property>
+ <name>hadoop.logfile.size</name>
+ <value>10000000</value>
+ <description>The max size of each log file</description>
+ </property>
+ <property>
+ <name>hadoop.logfile.count</name>
+ <value>1</value>
+ <description>The max number of log files</description>
+ </property>
+ <property>
+ <name>ha.zookeeper.quorum</name>
+ <value>192.168.10.193:2181,192.168.10.194:2181,192.168.10.195:2181</value>
+ </property>
+ <property>
+ <name>ipc.client.connect.timeout</name>
+ <value>90000</value>
+ </property>
+</configuration>
diff --git a/PCAP-PIC/hadoop/etc/hadoop/hadoop-env.cmd b/PCAP-PIC/hadoop/etc/hadoop/hadoop-env.cmd
new file mode 100644
index 0000000..5dbd635
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/hadoop-env.cmd
@@ -0,0 +1,81 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem Set Hadoop-specific environment variables here.
+
+@rem The only required environment variable is JAVA_HOME. All others are
+@rem optional. When running a distributed configuration it is best to
+@rem set JAVA_HOME in this file, so that it is correctly defined on
+@rem remote nodes.
+
+@rem The java implementation to use. Required.
+set JAVA_HOME=%JAVA_HOME%
+
+@rem The jsvc implementation to use. Jsvc is required to run secure datanodes.
+@rem set JSVC_HOME=%JSVC_HOME%
+
+@rem set HADOOP_CONF_DIR=
+
+@rem Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
+if exist %HADOOP_HOME%\contrib\capacity-scheduler (
+ if not defined HADOOP_CLASSPATH (
+ set HADOOP_CLASSPATH=%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
+ ) else (
+ set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
+ )
+)
+
+@rem The maximum amount of heap to use, in MB. Default is 1000.
+@rem set HADOOP_HEAPSIZE=
+@rem set HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+@rem Extra Java runtime options. Empty by default.
+@rem set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true
+
+@rem Command specific options appended to HADOOP_OPTS when specified
+if not defined HADOOP_SECURITY_LOGGER (
+ set HADOOP_SECURITY_LOGGER=INFO,RFAS
+)
+if not defined HDFS_AUDIT_LOGGER (
+ set HDFS_AUDIT_LOGGER=INFO,NullAppender
+)
+
+set HADOOP_NAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_NAMENODE_OPTS%
+set HADOOP_DATANODE_OPTS=-Dhadoop.security.logger=ERROR,RFAS %HADOOP_DATANODE_OPTS%
+set HADOOP_SECONDARYNAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_SECONDARYNAMENODE_OPTS%
+
+@rem The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+set HADOOP_CLIENT_OPTS=-Xmx512m %HADOOP_CLIENT_OPTS%
+@rem set HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData %HADOOP_JAVA_PLATFORM_OPTS%"
+
+@rem On secure datanodes, user to run the datanode as after dropping privileges
+set HADOOP_SECURE_DN_USER=%HADOOP_SECURE_DN_USER%
+
+@rem Where log files are stored. %HADOOP_HOME%/logs by default.
+@rem set HADOOP_LOG_DIR=%HADOOP_LOG_DIR%\%USERNAME%
+
+@rem Where log files are stored in the secure data environment.
+set HADOOP_SECURE_DN_LOG_DIR=%HADOOP_LOG_DIR%\%HADOOP_HDFS_USER%
+
+@rem The directory where pid files are stored. /tmp by default.
+@rem NOTE: this should be set to a directory that can only be written to by
+@rem the user that will run the hadoop daemons. Otherwise there is the
+@rem potential for a symlink attack.
+set HADOOP_PID_DIR=%HADOOP_PID_DIR%
+set HADOOP_SECURE_DN_PID_DIR=%HADOOP_PID_DIR%
+
+@rem A string representing this instance of hadoop. %USERNAME% by default.
+set HADOOP_IDENT_STRING=%USERNAME%
diff --git a/PCAP-PIC/hadoop/etc/hadoop/hadoop-env.sh b/PCAP-PIC/hadoop/etc/hadoop/hadoop-env.sh
new file mode 100644
index 0000000..7b9e5b1
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/hadoop-env.sh
@@ -0,0 +1,105 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME. All others are
+# optional. When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+export HADOOP_NAMENODE_JMX_OPTS="-Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false -javaagent:/home/tsg/olap/hadoop-2.7.1/monitor/jmx_prometheus_javaagent-0.12.0.jar=9905:/home/tsg/olap/hadoop-2.7.1/monitor/hdfs.yaml"
+export HADOOP_DATANODE_JMX_OPTS="-Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false -javaagent:/home/tsg/olap/hadoop-2.7.1/monitor/jmx_prometheus_javaagent-0.12.0.jar=9906:/home/tsg/olap/hadoop-2.7.1/monitor/hdfs.yaml"
+
+# The java implementation to use.
+#export HADOOP_HEAPSIZE=m
+#export JAVA_HOME=/usr/local/jdk/jdk1.8.0_73
+export JAVA_HOME=/usr/lib/jvm/jdk1.8.0_73
+# The jsvc implementation to use. Jsvc is required to run secure datanodes
+# that bind to privileged ports to provide authentication of data transfer
+# protocol. Jsvc is not required if SASL is configured for authentication of
+# data transfer protocol using non-privileged ports.
+#export JSVC_HOME=${JSVC_HOME}
+
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
+
+# Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
+for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
+ if [ "$HADOOP_CLASSPATH" ]; then
+ export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
+ else
+ export HADOOP_CLASSPATH=$f
+ fi
+done
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+#export HADOOP_HEAPSIZE=
+#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+# Extra Java runtime options. Empty by default.
+export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="$HADOOP_NAMENODE_OPTS -Xmx10240m -Xms10240m -Xss256k -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:SurvivorRatio=2 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:MaxTenuringThreshold=15 -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=1 -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:-DisableExplicitGC -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:/home/tsg/olap/hadoop-2.7.1/logs/gc-namenode-%t.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=100M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/home/tsg/olap/hadoop-2.7.1/logs/ -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender}"
+
+export HADOOP_DATANODE_OPTS="$HADOOP_DATANODE_OPTS -Xmx5120m -Xms5120m -Xss256k -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:SurvivorRatio=2 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:MaxTenuringThreshold=15 -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=1 -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:/home/tsg/olap/hadoop-2.7.1/logs/gc-datanode-%t.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=100M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/home/tsg/olap/hadoop-2.7.1/logs/ -Dhadoop.security.logger=ERROR,RFAS"
+
+export HADOOP_JOURNALNODE_OPTS="$HADOOP_JOURNALNODE_OPTS -Xmx1024m -Xms1024m"
+
+export HADOOP_ZKFC_OPTS="$HADOOP_ZKFC_OPTS -Xmx1024m -Xms1024m"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
+
+export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
+export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
+#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
+
+# On secure datanodes, user to run the datanode as after dropping privileges.
+# This **MUST** be uncommented to enable secure HDFS if using privileged ports
+# to provide authentication of data transfer protocol. This **MUST NOT** be
+# defined if SASL is configured for authentication of data transfer protocol
+# using non-privileged ports.
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
+
+# Where log files are stored. $HADOOP_HOME/logs by default.
+#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
+
+###
+# HDFS Mover specific parameters
+###
+# Specify the JVM options to be used when starting the HDFS Mover.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_MOVER_OPTS=""
+
+###
+# Advanced Users Only!
+###
+
+# The directory where pid files are stored. /tmp by default.
+# NOTE: this should be set to a directory that can only be written to by
+# the user that will run the hadoop daemons. Otherwise there is the
+# potential for a symlink attack.
+export HADOOP_PID_DIR=/home/tsg/olap/hadoop-2.7.1/pids
+export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
diff --git a/PCAP-PIC/hadoop/etc/hadoop/hadoop-metrics.properties b/PCAP-PIC/hadoop/etc/hadoop/hadoop-metrics.properties
new file mode 100644
index 0000000..c1b2eb7
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/hadoop-metrics.properties
@@ -0,0 +1,75 @@
+# Configuration of the "dfs" context for null
+dfs.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "dfs" context for file
+#dfs.class=org.apache.hadoop.metrics.file.FileContext
+#dfs.period=10
+#dfs.fileName=/tmp/dfsmetrics.log
+
+# Configuration of the "dfs" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# dfs.period=10
+# dfs.servers=localhost:8649
+
+
+# Configuration of the "mapred" context for null
+mapred.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "mapred" context for file
+#mapred.class=org.apache.hadoop.metrics.file.FileContext
+#mapred.period=10
+#mapred.fileName=/tmp/mrmetrics.log
+
+# Configuration of the "mapred" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# mapred.period=10
+# mapred.servers=localhost:8649
+
+
+# Configuration of the "jvm" context for null
+#jvm.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "jvm" context for file
+#jvm.class=org.apache.hadoop.metrics.file.FileContext
+#jvm.period=10
+#jvm.fileName=/tmp/jvmmetrics.log
+
+# Configuration of the "jvm" context for ganglia
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# jvm.period=10
+# jvm.servers=localhost:8649
+
+# Configuration of the "rpc" context for null
+rpc.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "rpc" context for file
+#rpc.class=org.apache.hadoop.metrics.file.FileContext
+#rpc.period=10
+#rpc.fileName=/tmp/rpcmetrics.log
+
+# Configuration of the "rpc" context for ganglia
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# rpc.period=10
+# rpc.servers=localhost:8649
+
+
+# Configuration of the "ugi" context for null
+ugi.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "ugi" context for file
+#ugi.class=org.apache.hadoop.metrics.file.FileContext
+#ugi.period=10
+#ugi.fileName=/tmp/ugimetrics.log
+
+# Configuration of the "ugi" context for ganglia
+# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# ugi.period=10
+# ugi.servers=localhost:8649
+
diff --git a/PCAP-PIC/hadoop/etc/hadoop/hadoop-metrics2.properties b/PCAP-PIC/hadoop/etc/hadoop/hadoop-metrics2.properties
new file mode 100644
index 0000000..0c09228
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/hadoop-metrics2.properties
@@ -0,0 +1,68 @@
+# syntax: [prefix].[source|sink].[instance].[options]
+# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
+
+*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
+# default sampling period, in seconds
+*.period=10
+
+# The namenode-metrics.out will contain metrics from all context
+#namenode.sink.file.filename=namenode-metrics.out
+# Specifying a special sampling period for namenode:
+#namenode.sink.*.period=8
+
+#datanode.sink.file.filename=datanode-metrics.out
+
+#resourcemanager.sink.file.filename=resourcemanager-metrics.out
+
+#nodemanager.sink.file.filename=nodemanager-metrics.out
+
+#mrappmaster.sink.file.filename=mrappmaster-metrics.out
+
+#jobhistoryserver.sink.file.filename=jobhistoryserver-metrics.out
+
+# the following example split metrics of different
+# context to different sinks (in this case files)
+#nodemanager.sink.file_jvm.class=org.apache.hadoop.metrics2.sink.FileSink
+#nodemanager.sink.file_jvm.context=jvm
+#nodemanager.sink.file_jvm.filename=nodemanager-jvm-metrics.out
+#nodemanager.sink.file_mapred.class=org.apache.hadoop.metrics2.sink.FileSink
+#nodemanager.sink.file_mapred.context=mapred
+#nodemanager.sink.file_mapred.filename=nodemanager-mapred-metrics.out
+
+#
+# Below are for sending metrics to Ganglia
+#
+# for Ganglia 3.0 support
+# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink30
+#
+# for Ganglia 3.1 support
+# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+
+# *.sink.ganglia.period=10
+
+# default for supportsparse is false
+# *.sink.ganglia.supportsparse=true
+
+#*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Tag values to use for the ganglia prefix. If not defined no tags are used.
+# If '*' all tags are used. If specifiying multiple tags separate them with
+# commas. Note that the last segment of the property name is the context name.
+#
+#*.sink.ganglia.tagsForPrefix.jvm=ProcesName
+#*.sink.ganglia.tagsForPrefix.dfs=
+#*.sink.ganglia.tagsForPrefix.rpc=
+#*.sink.ganglia.tagsForPrefix.mapred=
+
+#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#resourcemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#nodemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#mrappmaster.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#jobhistoryserver.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
diff --git a/PCAP-PIC/hadoop/etc/hadoop/hadoop-policy.xml b/PCAP-PIC/hadoop/etc/hadoop/hadoop-policy.xml
new file mode 100644
index 0000000..2bf5c02
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/hadoop-policy.xml
@@ -0,0 +1,226 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>security.client.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ClientProtocol, which is used by user code
+ via the DistributedFileSystem.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.client.datanode.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+ for block recovery.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.datanode.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for DatanodeProtocol, which is used by datanodes to
+ communicate with the namenode.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.inter.datanode.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
+ for updating generation timestamp.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.namenode.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for NamenodeProtocol, the protocol used by the secondary
+ namenode to communicate with the namenode.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.admin.operations.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for AdminOperationsProtocol. Used for admin commands.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.refresh.user.mappings.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for RefreshUserMappingsProtocol. Used to refresh
+ users mappings. The ACL is a comma-separated list of user and
+ group names. The user and group list is separated by a blank. For
+ e.g. "alice,bob users,wheel". A special value of "*" means all
+ users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.refresh.policy.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
+ dfsadmin and mradmin commands to refresh the security policy in-effect.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.ha.service.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for HAService protocol used by HAAdmin to manage the
+ active and stand-by states of namenode.</description>
+ </property>
+
+ <property>
+ <name>security.zkfc.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for access to the ZK Failover Controller
+ </description>
+ </property>
+
+ <property>
+ <name>security.qjournal.service.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for QJournalProtocol, used by the NN to communicate with
+ JNs when using the QuorumJournalManager for edit logs.</description>
+ </property>
+
+ <property>
+ <name>security.mrhs.client.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for HSClientProtocol, used by job clients to
+ communciate with the MR History Server job status etc.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <!-- YARN Protocols -->
+
+ <property>
+ <name>security.resourcetracker.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ResourceTrackerProtocol, used by the
+ ResourceManager and NodeManager to communicate with each other.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.resourcemanager-administration.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ResourceManagerAdministrationProtocol, for admin commands.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.applicationclient.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ApplicationClientProtocol, used by the ResourceManager
+ and applications submission clients to communicate with each other.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.applicationmaster.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ApplicationMasterProtocol, used by the ResourceManager
+ and ApplicationMasters to communicate with each other.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.containermanagement.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ContainerManagementProtocol protocol, used by the NodeManager
+ and ApplicationMasters to communicate with each other.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.resourcelocalizer.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ResourceLocalizer protocol, used by the NodeManager
+ and ResourceLocalizer to communicate with each other.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.job.task.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
+ tasks to communicate with the parent tasktracker.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.job.client.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for MRClientProtocol, used by job clients to
+ communciate with the MR ApplicationMaster to query job status etc.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.applicationhistory.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ApplicationHistoryProtocol, used by the timeline
+ server and the generic history service client to communicate with each other.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+</configuration>
diff --git a/PCAP-PIC/hadoop/etc/hadoop/hdfs-site.xml b/PCAP-PIC/hadoop/etc/hadoop/hdfs-site.xml
new file mode 100644
index 0000000..31905bc
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/hdfs-site.xml
@@ -0,0 +1,142 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>dfs.namenode.name.dir</name>
+ <value>file:/home/tsg/olap/hadoop/dfs/name</value>
+ </property>
+ <property>
+ <name>dfs.datanode.data.dir</name>
+ <value>file:/home/tsg/olap/hadoop/dfs/data</value>
+ </property>
+ <property>
+ <name>dfs.replication</name>
+ <value>2</value>
+ </property>
+ <property>
+ <name>dfs.webhdfs.enabled</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>dfs.permissions</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>dfs.permissions.enabled</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>dfs.nameservices</name>
+ <value>ns1</value>
+ </property>
+ <property>
+ <name>dfs.blocksize</name>
+ <value>134217728</value>
+ </property>
+ <property>
+ <name>dfs.ha.namenodes.ns1</name>
+ <value>nn1,nn2</value>
+ </property>
+ <!-- nn1的RPC通信地址,nn1所在地址 -->
+ <property>
+ <name>dfs.namenode.rpc-address.ns1.nn1</name>
+ <value>192.168.10.193:9000</value>
+ </property>
+ <!-- nn1的http通信地址,外部访问地址 -->
+ <property>
+ <name>dfs.namenode.http-address.ns1.nn1</name>
+ <value>192.168.10.193:50070</value>
+ </property>
+ <!-- nn2的RPC通信地址,nn2所在地址 -->
+ <property>
+ <name>dfs.namenode.rpc-address.ns1.nn2</name>
+ <value>192.168.10.194:9000</value>
+ </property>
+ <!-- nn2的http通信地址,外部访问地址 -->
+ <property>
+ <name>dfs.namenode.http-address.ns1.nn2</name>
+ <value>192.168.10.194:50070</value>
+ </property>
+ <!-- 指定NameNode的元数据在JournalNode日志上的存放位置(一般和zookeeper部署在一起) -->
+ <property>
+ <name>dfs.namenode.shared.edits.dir</name>
+ <value>qjournal://192.168.10.193:8485;192.168.10.194:8485;192.168.10.195:8485/ns1</value>
+ </property>
+ <!-- 指定JournalNode在本地磁盘存放数据的位置 -->
+ <property>
+ <name>dfs.journalnode.edits.dir</name>
+ <value>/home/tsg/olap/hadoop/journal</value>
+ </property>
+ <!--客户端通过代理访问namenode,访问文件系统,HDFS 客户端与Active 节点通信的Java 类,使用其确定Active 节点是否活跃 -->
+ <property>
+ <name>dfs.client.failover.proxy.provider.ns1</name>
+ <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
+ </property>
+ <!--这是配置自动切换的方法,有多种使用方法,具体可以看官网,在文末会给地址,这里是远程登录杀死的方法 -->
+ <property>
+ <name>dfs.ha.fencing.methods</name>
+ <value>sshfence</value>
+ <value>shell(true)</value>
+ </property>
+ <!-- 这个是使用sshfence隔离机制时才需要配置ssh免登陆 -->
+ <property>
+ <name>dfs.ha.fencing.ssh.private-key-files</name>
+ <value>/root/.ssh/id_rsa</value>
+ </property>
+ <!-- 配置sshfence隔离机制超时时间,这个属性同上,如果你是用脚本的方法切换,这个应该是可以不配置的 -->
+ <property>
+ <name>dfs.ha.fencing.ssh.connect-timeout</name>
+ <value>30000</value>
+ </property>
+ <!-- 这个是开启自动故障转移,如果你没有自动故障转移,这个可以先不配 -->
+ <property>
+ <name>dfs.ha.automatic-failover.enabled</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>dfs.datanode.max.transfer.threads</name>
+ <value>8192</value>
+ </property>
+ <!-- namenode处理RPC请求线程数,增大该值资源占用不大 -->
+ <property>
+ <name>dfs.namenode.handler.count</name>
+ <value>30</value>
+ </property>
+ <!-- datanode处理RPC请求线程数,增大该值会占用更多内存 -->
+ <property>
+ <name>dfs.datanode.handler.count</name>
+ <value>40</value>
+ </property>
+ <!-- balance时可占用的带宽 -->
+ <property>
+ <name>dfs.balance.bandwidthPerSec</name>
+ <value>104857600</value>
+ </property>
+ <!-- 磁盘预留空间,该空间不会被hdfs占用,单位字节-->
+ <property>
+ <name>dfs.datanode.du.reserved</name>
+ <value>53687091200</value>
+ </property>
+ <!-- datanode与namenode连接超时时间,单位毫秒 2 * heartbeat.recheck.interval + 30000 -->
+ <property>
+ <name>heartbeat.recheck.interval</name>
+ <value>100000</value>
+ </property>
+</configuration>
+
diff --git a/PCAP-PIC/hadoop/etc/hadoop/httpfs-env.sh b/PCAP-PIC/hadoop/etc/hadoop/httpfs-env.sh
new file mode 100644
index 0000000..a2701d4
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/httpfs-env.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# Set httpfs specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs HttpFS
+# Java System properties for HttpFS should be specified in this variable
+#
+# export CATALINA_OPTS=
+
+# HttpFS logs directory
+#
+# export HTTPFS_LOG=${HTTPFS_HOME}/logs
+
+# HttpFS temporary directory
+#
+# export HTTPFS_TEMP=${HTTPFS_HOME}/temp
+
+# The HTTP port used by HttpFS
+#
+# export HTTPFS_HTTP_PORT=14000
+
+# The Admin port used by HttpFS
+#
+# export HTTPFS_ADMIN_PORT=`expr ${HTTPFS_HTTP_PORT} + 1`
+
+# The hostname HttpFS server runs on
+#
+# export HTTPFS_HTTP_HOSTNAME=`hostname -f`
+
+# Indicates if HttpFS is using SSL
+#
+# export HTTPFS_SSL_ENABLED=false
+
+# The location of the SSL keystore if using SSL
+#
+# export HTTPFS_SSL_KEYSTORE_FILE=${HOME}/.keystore
+
+# The password of the SSL keystore if using SSL
+#
+# export HTTPFS_SSL_KEYSTORE_PASS=password
diff --git a/PCAP-PIC/hadoop/etc/hadoop/httpfs-log4j.properties b/PCAP-PIC/hadoop/etc/hadoop/httpfs-log4j.properties
new file mode 100644
index 0000000..a924a48
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/httpfs-log4j.properties
@@ -0,0 +1,35 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'httpfs.log.dir' is not defined at HttpFSServer start up time
+# Setup sets its value to '${httpfs.home}/logs'
+
+log4j.appender.httpfs=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.httpfs.DatePattern='.'yyyy-MM-dd
+log4j.appender.httpfs.File=${httpfs.log.dir}/httpfs.log
+log4j.appender.httpfs.Append=true
+log4j.appender.httpfs.layout=org.apache.log4j.PatternLayout
+log4j.appender.httpfs.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %5p %c{1} [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n
+
+log4j.appender.httpfsaudit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.httpfsaudit.DatePattern='.'yyyy-MM-dd
+log4j.appender.httpfsaudit.File=${httpfs.log.dir}/httpfs-audit.log
+log4j.appender.httpfsaudit.Append=true
+log4j.appender.httpfsaudit.layout=org.apache.log4j.PatternLayout
+log4j.appender.httpfsaudit.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %5p [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n
+
+log4j.logger.httpfsaudit=INFO, httpfsaudit
+
+log4j.logger.org.apache.hadoop.fs.http.server=INFO, httpfs
+log4j.logger.org.apache.hadoop.lib=INFO, httpfs
diff --git a/PCAP-PIC/hadoop/etc/hadoop/httpfs-signature.secret b/PCAP-PIC/hadoop/etc/hadoop/httpfs-signature.secret
new file mode 100644
index 0000000..56466e9
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/httpfs-signature.secret
@@ -0,0 +1 @@
+hadoop httpfs secret
diff --git a/PCAP-PIC/hadoop/etc/hadoop/httpfs-site.xml b/PCAP-PIC/hadoop/etc/hadoop/httpfs-site.xml
new file mode 100644
index 0000000..4a718e1
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/httpfs-site.xml
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration>
+
+</configuration>
diff --git a/PCAP-PIC/hadoop/etc/hadoop/kms-acls.xml b/PCAP-PIC/hadoop/etc/hadoop/kms-acls.xml
new file mode 100644
index 0000000..cba69f4
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/kms-acls.xml
@@ -0,0 +1,135 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration>
+
+ <!-- This file is hot-reloaded when it changes -->
+
+ <!-- KMS ACLs -->
+
+ <property>
+ <name>hadoop.kms.acl.CREATE</name>
+ <value>*</value>
+ <description>
+ ACL for create-key operations.
+ If the user is not in the GET ACL, the key material is not returned
+ as part of the response.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.acl.DELETE</name>
+ <value>*</value>
+ <description>
+ ACL for delete-key operations.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.acl.ROLLOVER</name>
+ <value>*</value>
+ <description>
+ ACL for rollover-key operations.
+ If the user is not in the GET ACL, the key material is not returned
+ as part of the response.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.acl.GET</name>
+ <value>*</value>
+ <description>
+ ACL for get-key-version and get-current-key operations.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.acl.GET_KEYS</name>
+ <value>*</value>
+ <description>
+ ACL for get-keys operations.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.acl.GET_METADATA</name>
+ <value>*</value>
+ <description>
+ ACL for get-key-metadata and get-keys-metadata operations.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.acl.SET_KEY_MATERIAL</name>
+ <value>*</value>
+ <description>
+ Complementary ACL for CREATE and ROLLOVER operations to allow the client
+ to provide the key material when creating or rolling a key.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.acl.GENERATE_EEK</name>
+ <value>*</value>
+ <description>
+ ACL for generateEncryptedKey CryptoExtension operations.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.acl.DECRYPT_EEK</name>
+ <value>*</value>
+ <description>
+ ACL for decryptEncryptedKey CryptoExtension operations.
+ </description>
+ </property>
+
+ <property>
+ <name>default.key.acl.MANAGEMENT</name>
+ <value>*</value>
+ <description>
+ default ACL for MANAGEMENT operations for all key acls that are not
+ explicitly defined.
+ </description>
+ </property>
+
+ <property>
+ <name>default.key.acl.GENERATE_EEK</name>
+ <value>*</value>
+ <description>
+ default ACL for GENERATE_EEK operations for all key acls that are not
+ explicitly defined.
+ </description>
+ </property>
+
+ <property>
+ <name>default.key.acl.DECRYPT_EEK</name>
+ <value>*</value>
+ <description>
+ default ACL for DECRYPT_EEK operations for all key acls that are not
+ explicitly defined.
+ </description>
+ </property>
+
+ <property>
+ <name>default.key.acl.READ</name>
+ <value>*</value>
+ <description>
+ default ACL for READ operations for all key acls that are not
+ explicitly defined.
+ </description>
+ </property>
+
+
+</configuration>
diff --git a/PCAP-PIC/hadoop/etc/hadoop/kms-env.sh b/PCAP-PIC/hadoop/etc/hadoop/kms-env.sh
new file mode 100644
index 0000000..44dfe6a
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/kms-env.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# Set kms specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs KMS
+# Java System properties for KMS should be specified in this variable
+#
+# export CATALINA_OPTS=
+
+# KMS logs directory
+#
+# export KMS_LOG=${KMS_HOME}/logs
+
+# KMS temporary directory
+#
+# export KMS_TEMP=${KMS_HOME}/temp
+
+# The HTTP port used by KMS
+#
+# export KMS_HTTP_PORT=16000
+
+# The Admin port used by KMS
+#
+# export KMS_ADMIN_PORT=`expr ${KMS_HTTP_PORT} + 1`
+
+# The maximum number of Tomcat handler threads
+#
+# export KMS_MAX_THREADS=1000
+
+# The location of the SSL keystore if using SSL
+#
+# export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore
+
+# The password of the SSL keystore if using SSL
+#
+# export KMS_SSL_KEYSTORE_PASS=password
+
+# The full path to any native libraries that need to be loaded
+# (For eg. location of natively compiled tomcat Apache portable
+# runtime (APR) libraries
+#
+# export JAVA_LIBRARY_PATH=${HOME}/lib/native
diff --git a/PCAP-PIC/hadoop/etc/hadoop/kms-log4j.properties b/PCAP-PIC/hadoop/etc/hadoop/kms-log4j.properties
new file mode 100644
index 0000000..9c189f2
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/kms-log4j.properties
@@ -0,0 +1,38 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'kms.log.dir' is not defined at KMS start up time
+# Setup sets its value to '${kms.home}/logs'
+
+log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.kms.DatePattern='.'yyyy-MM-dd
+log4j.appender.kms.File=${kms.log.dir}/kms.log
+log4j.appender.kms.Append=true
+log4j.appender.kms.layout=org.apache.log4j.PatternLayout
+log4j.appender.kms.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ssZ} %-5p %c{1} - %m%n
+
+log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.kms-audit.DatePattern='.'yyyy-MM-dd
+log4j.appender.kms-audit.File=${kms.log.dir}/kms-audit.log
+log4j.appender.kms-audit.Append=true
+log4j.appender.kms-audit.layout=org.apache.log4j.PatternLayout
+log4j.appender.kms-audit.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ssZ} %m%n
+
+log4j.logger.kms-audit=INFO, kms-audit
+log4j.additivity.kms-audit=false
+
+log4j.rootLogger=ALL, kms
+log4j.logger.org.apache.hadoop.conf=ERROR
+log4j.logger.org.apache.hadoop=INFO
+log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF
diff --git a/PCAP-PIC/hadoop/etc/hadoop/kms-site.xml b/PCAP-PIC/hadoop/etc/hadoop/kms-site.xml
new file mode 100644
index 0000000..a810ca4
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/kms-site.xml
@@ -0,0 +1,173 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration>
+
+ <!-- KMS Backend KeyProvider -->
+
+ <property>
+ <name>hadoop.kms.key.provider.uri</name>
+ <value>jceks://file@/${user.home}/kms.keystore</value>
+ <description>
+ URI of the backing KeyProvider for the KMS.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.security.keystore.JavaKeyStoreProvider.password</name>
+ <value>none</value>
+ <description>
+ If using the JavaKeyStoreProvider, the password for the keystore file.
+ </description>
+ </property>
+
+ <!-- KMS Cache -->
+
+ <property>
+ <name>hadoop.kms.cache.enable</name>
+ <value>true</value>
+ <description>
+ Whether the KMS will act as a cache for the backing KeyProvider.
+ When the cache is enabled, operations like getKeyVersion, getMetadata,
+ and getCurrentKey will sometimes return cached data without consulting
+ the backing KeyProvider. Cached values are flushed when keys are deleted
+ or modified.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.cache.timeout.ms</name>
+ <value>600000</value>
+ <description>
+ Expiry time for the KMS key version and key metadata cache, in
+ milliseconds. This affects getKeyVersion and getMetadata.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.current.key.cache.timeout.ms</name>
+ <value>30000</value>
+ <description>
+ Expiry time for the KMS current key cache, in milliseconds. This
+ affects getCurrentKey operations.
+ </description>
+ </property>
+
+ <!-- KMS Audit -->
+
+ <property>
+ <name>hadoop.kms.audit.aggregation.window.ms</name>
+ <value>10000</value>
+ <description>
+ Duplicate audit log events within the aggregation window (specified in
+ ms) are quashed to reduce log traffic. A single message for aggregated
+ events is printed at the end of the window, along with a count of the
+ number of aggregated events.
+ </description>
+ </property>
+
+ <!-- KMS Security -->
+
+ <property>
+ <name>hadoop.kms.authentication.type</name>
+ <value>simple</value>
+ <description>
+ Authentication type for the KMS. Can be either &quot;simple&quot;
+ or &quot;kerberos&quot;.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.authentication.kerberos.keytab</name>
+ <value>${user.home}/kms.keytab</value>
+ <description>
+ Path to the keytab with credentials for the configured Kerberos principal.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.authentication.kerberos.principal</name>
+ <value>HTTP/localhost</value>
+ <description>
+ The Kerberos principal to use for the HTTP endpoint.
+ The principal must start with 'HTTP/' as per the Kerberos HTTP SPNEGO specification.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.authentication.kerberos.name.rules</name>
+ <value>DEFAULT</value>
+ <description>
+ Rules used to resolve Kerberos principal names.
+ </description>
+ </property>
+
+ <!-- Authentication cookie signature source -->
+
+ <property>
+ <name>hadoop.kms.authentication.signer.secret.provider</name>
+ <value>random</value>
+ <description>
+ Indicates how the secret to sign the authentication cookies will be
+ stored. Options are 'random' (default), 'string' and 'zookeeper'.
+ If using a setup with multiple KMS instances, 'zookeeper' should be used.
+ </description>
+ </property>
+
+ <!-- Configuration for 'zookeeper' authentication cookie signature source -->
+
+ <property>
+ <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.path</name>
+ <value>/hadoop-kms/hadoop-auth-signature-secret</value>
+ <description>
+ The Zookeeper ZNode path where the KMS instances will store and retrieve
+ the secret from.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string</name>
+ <value>#HOSTNAME#:#PORT#,...</value>
+ <description>
+ The Zookeeper connection string, a list of hostnames and port comma
+ separated.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type</name>
+ <value>kerberos</value>
+ <description>
+ The Zookeeper authentication type, 'none' or 'sasl' (Kerberos).
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab</name>
+ <value>/etc/hadoop/conf/kms.keytab</value>
+ <description>
+ The absolute path for the Kerberos keytab with the credentials to
+ connect to Zookeeper.
+ </description>
+ </property>
+
+ <property>
+ <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal</name>
+ <value>kms/#HOSTNAME#</value>
+ <description>
+ The Kerberos service principal used to connect to Zookeeper.
+ </description>
+ </property>
+
+</configuration>
diff --git a/PCAP-PIC/hadoop/etc/hadoop/log4j.properties b/PCAP-PIC/hadoop/etc/hadoop/log4j.properties
new file mode 100644
index 0000000..a7c7a9a
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/log4j.properties
@@ -0,0 +1,268 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=ERROR,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshold=ALL
+
+# Null Appender
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Rolling File Appender - cap space usage at 5gb.
+#
+hadoop.log.maxfilesize=256MB
+hadoop.log.maxbackupindex=20
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.RFA.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.RFA.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollover at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %c: %m%n
+
+#
+# HDFS block state change log from block manager
+#
+# Uncomment the following to suppress normal block state change
+# messages from BlockManager in NameNode.
+#log4j.logger.BlockStateChange=WARN
+
+#
+#Security appender
+#
+hadoop.security.logger=INFO,NullAppender
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth-${user.name}.audit
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# Daily Rolling Security appender
+#
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+#
+# hadoop configuration logging
+#
+
+# Uncomment the following line to turn off configuration deprecation warnings.
+# log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,NullAppender
+hdfs.audit.log.maxfilesize=256MB
+hdfs.audit.log.maxbackupindex=20
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAAUDIT.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %c{2}: %m%n
+log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
+log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,NullAppender
+mapred.audit.log.maxfilesize=256MB
+mapred.audit.log.maxbackupindex=20
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %c{2}: %m%n
+log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
+log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}
+
+# Custom Logging levels
+
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+# AWS SDK & S3A FileSystem
+log4j.logger.com.amazonaws=ERROR
+log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
+log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+#
+# Job Summary Appender
+#
+# Use following logger to send summary to separate file defined by
+# hadoop.mapreduce.jobsummary.log.file :
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+#
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
+hadoop.mapreduce.jobsummary.log.maxbackupindex=20
+log4j.appender.JSA=org.apache.log4j.RollingFileAppender
+log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
+log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
+log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %c{2}: %m%n
+log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
+log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
+
+#
+# Yarn ResourceManager Application Summary Log
+#
+# Set the ResourceManager summary log filename
+yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
+# Set the ResourceManager summary log level and appender
+yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
+#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+
+# To enable AppSummaryLogging for the RM,
+# set yarn.server.resourcemanager.appsummary.logger to
+# <LEVEL>,RMSUMMARY in hadoop-env.sh
+
+# Appender for ResourceManager Application Summary Log
+# Requires the following properties to be set
+# - hadoop.log.dir (Hadoop Log directory)
+# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
+log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+log4j.appender.RMSUMMARY.MaxFileSize=256MB
+log4j.appender.RMSUMMARY.MaxBackupIndex=20
+log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+log4j.appender.RMSUMMARY.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %c{2}: %m%n
+
+# HS audit log configs
+#mapreduce.hs.audit.logger=INFO,HSAUDIT
+#log4j.logger.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=${mapreduce.hs.audit.logger}
+#log4j.additivity.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=false
+#log4j.appender.HSAUDIT=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.HSAUDIT.File=${hadoop.log.dir}/hs-audit.log
+#log4j.appender.HSAUDIT.layout=org.apache.log4j.PatternLayout
+#log4j.appender.HSAUDIT.layout.ConversionPattern=[%d{yyyy-MM-dd HH:mm:ssZ}] %p %c{2}: %m%n
+#log4j.appender.HSAUDIT.DatePattern=.yyyy-MM-dd
+
+# Http Server Request Logs
+#log4j.logger.http.requests.namenode=INFO,namenoderequestlog
+#log4j.appender.namenoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.namenoderequestlog.Filename=${hadoop.log.dir}/jetty-namenode-yyyy_mm_dd.log
+#log4j.appender.namenoderequestlog.RetainDays=3
+
+#log4j.logger.http.requests.datanode=INFO,datanoderequestlog
+#log4j.appender.datanoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.datanoderequestlog.Filename=${hadoop.log.dir}/jetty-datanode-yyyy_mm_dd.log
+#log4j.appender.datanoderequestlog.RetainDays=3
+
+#log4j.logger.http.requests.resourcemanager=INFO,resourcemanagerrequestlog
+#log4j.appender.resourcemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.resourcemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-resourcemanager-yyyy_mm_dd.log
+#log4j.appender.resourcemanagerrequestlog.RetainDays=3
+
+#log4j.logger.http.requests.jobhistory=INFO,jobhistoryrequestlog
+#log4j.appender.jobhistoryrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.jobhistoryrequestlog.Filename=${hadoop.log.dir}/jetty-jobhistory-yyyy_mm_dd.log
+#log4j.appender.jobhistoryrequestlog.RetainDays=3
+
+#log4j.logger.http.requests.nodemanager=INFO,nodemanagerrequestlog
+#log4j.appender.nodemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-yyyy_mm_dd.log
+#log4j.appender.nodemanagerrequestlog.RetainDays=3
diff --git a/PCAP-PIC/hadoop/etc/hadoop/log4j.properties_bak b/PCAP-PIC/hadoop/etc/hadoop/log4j.properties_bak
new file mode 100644
index 0000000..7e2ffc7
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/log4j.properties_bak
@@ -0,0 +1,268 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshold=ALL
+
+# Null Appender
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Rolling File Appender - cap space usage at 5gb.
+#
+hadoop.log.maxfilesize=256MB
+hadoop.log.maxbackupindex=20
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollover at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# HDFS block state change log from block manager
+#
+# Uncomment the following to suppress normal block state change
+# messages from BlockManager in NameNode.
+#log4j.logger.BlockStateChange=WARN
+
+#
+#Security appender
+#
+hadoop.security.logger=INFO,NullAppender
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth-${user.name}.audit
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# Daily Rolling Security appender
+#
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+#
+# hadoop configuration logging
+#
+
+# Uncomment the following line to turn off configuration deprecation warnings.
+# log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,NullAppender
+hdfs.audit.log.maxfilesize=256MB
+hdfs.audit.log.maxbackupindex=20
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
+log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,NullAppender
+mapred.audit.log.maxfilesize=256MB
+mapred.audit.log.maxbackupindex=20
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
+log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}
+
+# Custom Logging levels
+
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+# AWS SDK & S3A FileSystem
+log4j.logger.com.amazonaws=ERROR
+log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
+log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+#
+# Job Summary Appender
+#
+# Use following logger to send summary to separate file defined by
+# hadoop.mapreduce.jobsummary.log.file :
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+#
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
+hadoop.mapreduce.jobsummary.log.maxbackupindex=20
+log4j.appender.JSA=org.apache.log4j.RollingFileAppender
+log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
+log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
+log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ssZ} %p %c{2}: %m%n
+log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
+log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
+
+#
+# Yarn ResourceManager Application Summary Log
+#
+# Set the ResourceManager summary log filename
+yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
+# Set the ResourceManager summary log level and appender
+yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
+#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+
+# To enable AppSummaryLogging for the RM,
+# set yarn.server.resourcemanager.appsummary.logger to
+# <LEVEL>,RMSUMMARY in hadoop-env.sh
+
+# Appender for ResourceManager Application Summary Log
+# Requires the following properties to be set
+# - hadoop.log.dir (Hadoop Log directory)
+# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
+log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+log4j.appender.RMSUMMARY.MaxFileSize=256MB
+log4j.appender.RMSUMMARY.MaxBackupIndex=20
+log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+
+# HS audit log configs
+#mapreduce.hs.audit.logger=INFO,HSAUDIT
+#log4j.logger.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=${mapreduce.hs.audit.logger}
+#log4j.additivity.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=false
+#log4j.appender.HSAUDIT=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.HSAUDIT.File=${hadoop.log.dir}/hs-audit.log
+#log4j.appender.HSAUDIT.layout=org.apache.log4j.PatternLayout
+#log4j.appender.HSAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+#log4j.appender.HSAUDIT.DatePattern=.yyyy-MM-dd
+
+# Http Server Request Logs
+#log4j.logger.http.requests.namenode=INFO,namenoderequestlog
+#log4j.appender.namenoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.namenoderequestlog.Filename=${hadoop.log.dir}/jetty-namenode-yyyy_mm_dd.log
+#log4j.appender.namenoderequestlog.RetainDays=3
+
+#log4j.logger.http.requests.datanode=INFO,datanoderequestlog
+#log4j.appender.datanoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.datanoderequestlog.Filename=${hadoop.log.dir}/jetty-datanode-yyyy_mm_dd.log
+#log4j.appender.datanoderequestlog.RetainDays=3
+
+#log4j.logger.http.requests.resourcemanager=INFO,resourcemanagerrequestlog
+#log4j.appender.resourcemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.resourcemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-resourcemanager-yyyy_mm_dd.log
+#log4j.appender.resourcemanagerrequestlog.RetainDays=3
+
+#log4j.logger.http.requests.jobhistory=INFO,jobhistoryrequestlog
+#log4j.appender.jobhistoryrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.jobhistoryrequestlog.Filename=${hadoop.log.dir}/jetty-jobhistory-yyyy_mm_dd.log
+#log4j.appender.jobhistoryrequestlog.RetainDays=3
+
+#log4j.logger.http.requests.nodemanager=INFO,nodemanagerrequestlog
+#log4j.appender.nodemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-yyyy_mm_dd.log
+#log4j.appender.nodemanagerrequestlog.RetainDays=3
diff --git a/PCAP-PIC/hadoop/etc/hadoop/mapred-env.cmd b/PCAP-PIC/hadoop/etc/hadoop/mapred-env.cmd
new file mode 100644
index 0000000..f27943f
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/mapred-env.cmd
@@ -0,0 +1,20 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000
+
+set HADOOP_MAPRED_ROOT_LOGGER=%HADOOP_LOGLEVEL%,RFA
+
diff --git a/PCAP-PIC/hadoop/etc/hadoop/mapred-env.sh b/PCAP-PIC/hadoop/etc/hadoop/mapred-env.sh
new file mode 100644
index 0000000..6be1e27
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/mapred-env.sh
@@ -0,0 +1,27 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+
+export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000
+
+export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
+
+#export HADOOP_JOB_HISTORYSERVER_OPTS=
+#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored. $HADOOP_MAPRED_HOME/logs by default.
+#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
+#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
+#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
+#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.
diff --git a/PCAP-PIC/hadoop/etc/hadoop/mapred-queues.xml.template b/PCAP-PIC/hadoop/etc/hadoop/mapred-queues.xml.template
new file mode 100644
index 0000000..ce6cd20
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/mapred-queues.xml.template
@@ -0,0 +1,92 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- This is the template for queue configuration. The format supports nesting of
+ queues within queues - a feature called hierarchical queues. All queues are
+ defined within the 'queues' tag which is the top level element for this
+ XML document. The queue acls configured here for different queues are
+ checked for authorization only if the configuration property
+ mapreduce.cluster.acls.enabled is set to true. -->
+<queues>
+
+ <!-- Configuration for a queue is specified by defining a 'queue' element. -->
+ <queue>
+
+ <!-- Name of a queue. Queue name cannot contain a ':' -->
+ <name>default</name>
+
+ <!-- properties for a queue, typically used by schedulers,
+ can be defined here -->
+ <properties>
+ </properties>
+
+ <!-- State of the queue. If running, the queue will accept new jobs.
+ If stopped, the queue will not accept new jobs. -->
+ <state>running</state>
+
+ <!-- Specifies the ACLs to check for submitting jobs to this queue.
+ If set to '*', it allows all users to submit jobs to the queue.
+ If set to ' '(i.e. space), no user will be allowed to do this
+ operation. The default value for any queue acl is ' '.
+ For specifying a list of users and groups the format to use is
+ user1,user2 group1,group2
+
+ It is only used if authorization is enabled in Map/Reduce by setting
+ the configuration property mapreduce.cluster.acls.enabled to true.
+
+ Irrespective of this ACL configuration, the user who started the
+ cluster and cluster administrators configured via
+ mapreduce.cluster.administrators can do this operation. -->
+ <acl-submit-job> </acl-submit-job>
+
+ <!-- Specifies the ACLs to check for viewing and modifying jobs in this
+ queue. Modifications include killing jobs, tasks of jobs or changing
+ priorities.
+ If set to '*', it allows all users to view, modify jobs of the queue.
+ If set to ' '(i.e. space), no user will be allowed to do this
+ operation.
+ For specifying a list of users and groups the format to use is
+ user1,user2 group1,group2
+
+ It is only used if authorization is enabled in Map/Reduce by setting
+ the configuration property mapreduce.cluster.acls.enabled to true.
+
+ Irrespective of this ACL configuration, the user who started the
+ cluster and cluster administrators configured via
+ mapreduce.cluster.administrators can do the above operations on all
+ the jobs in all the queues. The job owner can do all the above
+ operations on his/her job irrespective of this ACL configuration. -->
+ <acl-administer-jobs> </acl-administer-jobs>
+ </queue>
+
+ <!-- Here is a sample of a hierarchical queue configuration
+ where q2 is a child of q1. In this example, q2 is a leaf level
+ queue as it has no queues configured within it. Currently, ACLs
+ and state are only supported for the leaf level queues.
+ Note also the usage of properties for the queue q2.
+ <queue>
+ <name>q1</name>
+ <queue>
+ <name>q2</name>
+ <properties>
+ <property key="capacity" value="20"/>
+ <property key="user-limit" value="30"/>
+ </properties>
+ </queue>
+ </queue>
+ -->
+</queues>
diff --git a/PCAP-PIC/hadoop/etc/hadoop/mapred-site.xml b/PCAP-PIC/hadoop/etc/hadoop/mapred-site.xml
new file mode 100644
index 0000000..304a568
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/mapred-site.xml
@@ -0,0 +1,33 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>mapreduce.framework.name</name>
+ <value>yarn</value>
+ </property>
+ <property>
+ <name>mapreduce.jobhistory.address</name>
+ <value>192.168.10.193:10020</value>
+ </property>
+ <property>
+ <name>mapreduce.jobhistory.webapp.address</name>
+ <value>192.168.10.193:19888</value>
+ </property>
+</configuration>
+
diff --git a/PCAP-PIC/hadoop/etc/hadoop/slaves b/PCAP-PIC/hadoop/etc/hadoop/slaves
new file mode 100644
index 0000000..13d2586
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/slaves
@@ -0,0 +1,6 @@
+192.168.10.193
+192.168.10.194
+192.168.10.195
+192.168.10.193
+192.168.10.194
+192.168.10.195
diff --git a/PCAP-PIC/hadoop/etc/hadoop/ssl-client.xml.example b/PCAP-PIC/hadoop/etc/hadoop/ssl-client.xml.example
new file mode 100644
index 0000000..a50dce4
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/ssl-client.xml.example
@@ -0,0 +1,80 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration>
+
+<property>
+ <name>ssl.client.truststore.location</name>
+ <value></value>
+ <description>Truststore to be used by clients like distcp. Must be
+ specified.
+ </description>
+</property>
+
+<property>
+ <name>ssl.client.truststore.password</name>
+ <value></value>
+ <description>Optional. Default value is "".
+ </description>
+</property>
+
+<property>
+ <name>ssl.client.truststore.type</name>
+ <value>jks</value>
+ <description>Optional. The keystore file format, default value is "jks".
+ </description>
+</property>
+
+<property>
+ <name>ssl.client.truststore.reload.interval</name>
+ <value>10000</value>
+ <description>Truststore reload check interval, in milliseconds.
+ Default value is 10000 (10 seconds).
+ </description>
+</property>
+
+<property>
+ <name>ssl.client.keystore.location</name>
+ <value></value>
+ <description>Keystore to be used by clients like distcp. Must be
+ specified.
+ </description>
+</property>
+
+<property>
+ <name>ssl.client.keystore.password</name>
+ <value></value>
+ <description>Optional. Default value is "".
+ </description>
+</property>
+
+<property>
+ <name>ssl.client.keystore.keypassword</name>
+ <value></value>
+ <description>Optional. Default value is "".
+ </description>
+</property>
+
+<property>
+ <name>ssl.client.keystore.type</name>
+ <value>jks</value>
+ <description>Optional. The keystore file format, default value is "jks".
+ </description>
+</property>
+
+</configuration>
diff --git a/PCAP-PIC/hadoop/etc/hadoop/ssl-server.xml.example b/PCAP-PIC/hadoop/etc/hadoop/ssl-server.xml.example
new file mode 100644
index 0000000..02d300c
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/ssl-server.xml.example
@@ -0,0 +1,78 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration>
+
+<property>
+ <name>ssl.server.truststore.location</name>
+ <value></value>
+ <description>Truststore to be used by NN and DN. Must be specified.
+ </description>
+</property>
+
+<property>
+ <name>ssl.server.truststore.password</name>
+ <value></value>
+ <description>Optional. Default value is "".
+ </description>
+</property>
+
+<property>
+ <name>ssl.server.truststore.type</name>
+ <value>jks</value>
+ <description>Optional. The keystore file format, default value is "jks".
+ </description>
+</property>
+
+<property>
+ <name>ssl.server.truststore.reload.interval</name>
+ <value>10000</value>
+ <description>Truststore reload check interval, in milliseconds.
+ Default value is 10000 (10 seconds).
+ </description>
+</property>
+
+<property>
+ <name>ssl.server.keystore.location</name>
+ <value></value>
+ <description>Keystore to be used by NN and DN. Must be specified.
+ </description>
+</property>
+
+<property>
+ <name>ssl.server.keystore.password</name>
+ <value></value>
+ <description>Must be specified.
+ </description>
+</property>
+
+<property>
+ <name>ssl.server.keystore.keypassword</name>
+ <value></value>
+ <description>Must be specified.
+ </description>
+</property>
+
+<property>
+ <name>ssl.server.keystore.type</name>
+ <value>jks</value>
+ <description>Optional. The keystore file format, default value is "jks".
+ </description>
+</property>
+
+</configuration>
diff --git a/PCAP-PIC/hadoop/etc/hadoop/yarn-env.cmd b/PCAP-PIC/hadoop/etc/hadoop/yarn-env.cmd
new file mode 100644
index 0000000..d863c1e
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/yarn-env.cmd
@@ -0,0 +1,60 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem User for YARN daemons
+if not defined HADOOP_YARN_USER (
+ set HADOOP_YARN_USER=%yarn%
+)
+
+if not defined YARN_CONF_DIR (
+ set YARN_CONF_DIR=%HADOOP_YARN_HOME%\conf
+)
+
+if defined YARN_HEAPSIZE (
+ @rem echo run with Java heapsize %YARN_HEAPSIZE%
+ set JAVA_HEAP_MAX=-Xmx%YARN_HEAPSIZE%m
+)
+
+if not defined YARN_LOG_DIR (
+ set YARN_LOG_DIR=%HADOOP_YARN_HOME%\logs
+)
+
+if not defined YARN_LOGFILE (
+ set YARN_LOGFILE=yarn.log
+)
+
+@rem default policy file for service-level authorization
+if not defined YARN_POLICYFILE (
+ set YARN_POLICYFILE=hadoop-policy.xml
+)
+
+if not defined YARN_ROOT_LOGGER (
+ set YARN_ROOT_LOGGER=%HADOOP_LOGLEVEL%,console
+)
+
+set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.dir=%YARN_LOG_DIR%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.log.dir=%YARN_LOG_DIR%
+set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.file=%YARN_LOGFILE%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.log.file=%YARN_LOGFILE%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.home.dir=%HADOOP_YARN_HOME%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.id.str=%YARN_IDENT_STRING%
+set YARN_OPTS=%YARN_OPTS% -Dhadoop.home.dir=%HADOOP_YARN_HOME%
+set YARN_OPTS=%YARN_OPTS% -Dhadoop.root.logger=%YARN_ROOT_LOGGER%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.root.logger=%YARN_ROOT_LOGGER%
+if defined JAVA_LIBRARY_PATH (
+ set YARN_OPTS=%YARN_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH%
+)
+set YARN_OPTS=%YARN_OPTS% -Dyarn.policy.file=%YARN_POLICYFILE% \ No newline at end of file
diff --git a/PCAP-PIC/hadoop/etc/hadoop/yarn-env.sh b/PCAP-PIC/hadoop/etc/hadoop/yarn-env.sh
new file mode 100644
index 0000000..810ba1b
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/yarn-env.sh
@@ -0,0 +1,127 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+export YARN_RESOURCEMANAGER_JMX_OPTS="-Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false -javaagent:/home/tsg/olap/hadoop-2.7.1/monitor/jmx_prometheus_javaagent-0.12.0.jar=9909:/home/tsg/olap/hadoop-2.7.1/monitor/yarn.yaml"
+
+export YARN_NODEMANAGER_JMX_OPTS="-Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false -javaagent:/home/tsg/olap/hadoop-2.7.1/monitor/jmx_prometheus_javaagent-0.12.0.jar=9910:/home/tsg/olap/hadoop-2.7.1/monitor/yarn.yaml"
+
+# User for YARN daemons
+export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+
+# resolve links - $0 may be a softlink
+export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
+
+# some Java parameters
+export JAVA_HOME=/usr/lib/jvm/jdk1.8.0_73
+if [ "$JAVA_HOME" != "" ]; then
+ #echo "run java in $JAVA_HOME"
+ JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+ echo "Error: JAVA_HOME is not set."
+ exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# For setting YARN specific HEAP sizes please use this
+# Parameter and set appropriately
+# YARN_HEAPSIZE=1000
+
+# check envvars which might override default args
+if [ "$YARN_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+fi
+
+# Resource Manager specific parameters
+
+# Specify the max Heapsize for the ResourceManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_RESOURCEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+#export YARN_RESOURCEMANAGER_HEAPSIZE=1000
+export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Xmx2048m -Xms1024m"
+
+# Specify the max Heapsize for the timeline server using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_TIMELINESERVER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+#export YARN_TIMELINESERVER_HEAPSIZE=1000
+
+# Specify the JVM options to be used when starting the ResourceManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_RESOURCEMANAGER_OPTS=
+
+# Node Manager specific parameters
+
+# Specify the max Heapsize for the NodeManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_NODEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+#export YARN_NODEMANAGER_HEAPSIZE=1000
+
+export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Xmx2048m -Xms1024m"
+
+# Specify the JVM options to be used when starting the NodeManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_NODEMANAGER_OPTS=
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+
+# default log directory & file
+if [ "$YARN_LOG_DIR" = "" ]; then
+ YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+if [ "$YARN_LOGFILE" = "" ]; then
+ YARN_LOGFILE='yarn.log'
+fi
+
+# default policy file for service-level authorization
+if [ "$YARN_POLICYFILE" = "" ]; then
+ YARN_POLICYFILE="hadoop-policy.xml"
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
+YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+ YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
diff --git a/PCAP-PIC/hadoop/etc/hadoop/yarn-site.xml b/PCAP-PIC/hadoop/etc/hadoop/yarn-site.xml
new file mode 100644
index 0000000..12fbea7
--- /dev/null
+++ b/PCAP-PIC/hadoop/etc/hadoop/yarn-site.xml
@@ -0,0 +1,224 @@
+<?xml version="1.0"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+ <property>
+ <name>yarn.nodemanager.aux-services</name>
+ <value>mapreduce_shuffle</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.enabled</name>
+ <value>true</value>
+ </property>
+
+ <!--声明两台resourcemanager的地址-->
+ <property>
+ <name>yarn.resourcemanager.cluster-id</name>
+ <value>rsmcluster</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.rm-ids</name>
+ <value>rsm1,rsm2</value>
+ </property>
+
+ <!-- 配置rm1-->
+ <!-- 配置rm1 hostname-->
+ <property>
+ <name>yarn.resourcemanager.hostname.rsm1</name>
+ <value>192.168.10.193</value>
+ </property>
+
+ <!-- 配置rm1 web application-->
+ <property>
+ <name>yarn.resourcemanager.webapp.address.rsm1</name>
+ <value>192.168.10.193:8080</value>
+ </property>
+
+ <!-- 配置rm1 调度端口,默认8030-->
+ <property>
+ <name>yarn.resourcemanager.scheduler.address.rsm1</name>
+ <value>192.168.10.193:8030</value>
+ </property>
+
+ <!-- 默认端口8031-->
+ <property>
+ <name>yarn.resourcemanager.resource-tracker.address.rsm1</name>
+ <value>192.168.10.193:8031</value>
+ </property>
+
+ <!-- 配置rm1 应用程序管理器接口的地址端口,默认8032-->
+ <property>
+ <name>yarn.resourcemanager.address.rsm1</name>
+ <value>192.168.10.193:8032</value>
+ </property>
+
+ <!-- 配置rm1 管理端口,默认8033-->
+ <property>
+ <name>yarn.resourcemanager.admin.address.rsm1</name>
+ <value>192.168.10.193:8033</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.admin.address.rsm1</name>
+ <value>192.168.10.193:23142</value>
+ </property>
+
+ <!-- 配置rm2-->
+ <property>
+ <name>yarn.resourcemanager.hostname.rsm2</name>
+ <value>192.168.10.194</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.webapp.address.rsm2</name>
+ <value>192.168.10.194:8080</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.scheduler.address.rsm2</name>
+ <value>192.168.10.194:8030</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.resource-tracker.address.rsm2</name>
+ <value>192.168.10.194:8031</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.address.rsm2</name>
+ <value>192.168.10.194:8032</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.admin.address.rsm2</name>
+ <value>192.168.10.194:8033</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.admin.address.rsm2</name>
+ <value>192.168.10.194:23142</value>
+ </property>
+
+ <!--指定zookeeper集群的地址-->
+ <property>
+ <name>yarn.resourcemanager.zk-address</name>
+ <value>192.168.10.193:2181,192.168.10.194:2181,192.168.10.195:2181</value>
+ </property>
+
+ <!--启用自动恢复,当任务进行一半,rm坏掉,就要启动自动恢复,默认是false-->
+ <property>
+ <name>yarn.resourcemanager.recovery.enabled</name>
+ <value>true</value>
+ </property>
+
+ <!--启用Nodemanager自动恢复,默认是false-->
+ <property>
+ <name>yarn.nodemanager.recovery.enabled</name>
+ <value>true</value>
+ </property>
+
+ <!--配置NodeManager保存运行状态的本地文件系统目录路径 -->
+ <property>
+ <name>yarn.nodemanager.recovery.dir</name>
+ <value>/home/tsg/olap/hadoop-2.7.1/yarn</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.store.class</name>
+ <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
+ </property>
+
+ <!--配置nm可用的RPC地址,默认${yarn.nodemanager.hostname}:0,为临时端口。集群重启后,nm与rm连接的端口会变化,这里指定端口,保障nm restart功能 -->
+ <property>
+ <name>yarn.nodemanager.address</name>
+ <value>${yarn.nodemanager.hostname}:9923</value>
+ </property>
+
+ <property>
+ <name>yarn.log-aggregation-enable</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
+ <value>3600</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.remote-app-log-dir</name>
+ <value>/home/tsg/olap/hadoop-2.7.1/logs/app-logs/</value>
+ </property>
+
+ <!--NM可以为容器分配的物理内存量,以MB为单位 ,默认8192-->
+ <property>
+ <name>yarn.nodemanager.resource.memory-mb</name>
+ <value>61440</value>
+ </property>
+
+ <!-- RM上每个容器请求的最小分配,以mb为单位,默认1024-->
+ <property>
+ <name>yarn.scheduler.minimum-allocation-mb</name>
+ <value>1024</value>
+ </property>
+
+ <!-- RM上每个容器请求的最大分配,以mb为单位,一般设置为 yarn.nodemanager.resource.memory-mb 一致,默认8192-->
+ <property>
+ <name>yarn.scheduler.maximum-allocation-mb</name>
+ <value>61440</value>
+ </property>
+
+ <!--可为容器分配的vcore数。RM调度器在为容器分配资源时使用它。这不是用来限制YARN容器使用的物理内核的数量,默认8,一般配置为服务器cpu总核数一致 -->
+ <property>
+ <name>yarn.nodemanager.resource.cpu-vcores</name>
+ <value>48</value>
+ </property>
+
+ <!--RM上每个容器请求的最小分配(以虚拟CPU内核为单位) ,默认1-->
+ <property>
+ <name>yarn.scheduler.minimum-allocation-vcores</name>
+ <value>1</value>
+ </property>
+
+ <!--RM上每个容器请求的最大分配(以虚拟CPU内核为单位) ,默认32,一般配置为略小于yarn.nodemanager.resource.cpu-vcores,同时指定任务的slot不应超过该值-->
+ <property>
+ <name>yarn.scheduler.maximum-allocation-vcores</name>
+ <value>48</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.vmem-check-enabled</name>
+ <value>false</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.pmem-check-enabled</name>
+ <value>false</value>
+ </property>
+
+ <!--ApplicationMaster重启次数,配置HA后默认为2,生产环境可增大该值-->
+ <property>
+ <name>yarn.resourcemanager.am.max-attempts</name>
+ <value>10000</value>
+ </property>
+
+ <property>
+ <name>yarn.log.server.url</name>
+ <value>http://192.168.10.193:19888/jobhistory/logs</value>
+ </property>
+
+</configuration>
+
diff --git a/PCAP-PIC/hadoop/sbin/dae-hdfsjournal.sh b/PCAP-PIC/hadoop/sbin/dae-hdfsjournal.sh
new file mode 100644
index 0000000..4ec61f4
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/dae-hdfsjournal.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR=/home/tsg/olap
+
+VERSION=hadoop-2.7.1
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/$1
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/$1`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/$1
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_JN=`ps -ef | grep JournalNode | grep -v grep | wc -l`
+
+if [ $HAS_JN -eq "0" ];then
+ yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start journalnode > /dev/null
+ set_log jnRes_sum JournalNode
+fi
+
+sleep 60
+done
+
diff --git a/PCAP-PIC/hadoop/sbin/dae-hdfsmaster.sh b/PCAP-PIC/hadoop/sbin/dae-hdfsmaster.sh
new file mode 100644
index 0000000..57f6519
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/dae-hdfsmaster.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR=/home/tsg/olap
+
+VERSION=hadoop-2.7.1
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/$1
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/$1`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/$1
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_NN=`ps -ef | grep NameNode | grep -v grep | wc -l`
+HAS_ZKFC=`ps -ef | grep DFSZKFailoverController | grep -v grep | wc -l`
+#HAS_NM=`ps -ef | grep NodeManager | grep -v grep | wc -l`
+
+if [ $HAS_NN -eq "0" ];then
+ yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start namenode > /dev/null
+ set_log nnRes_sum NameNode
+fi
+
+if [ $HAS_ZKFC -eq "0" ];then
+ yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start zkfc > /dev/null
+ set_log zkfcRes_sum DFSZKFailoverController
+fi
+
+#if [ $HAS_NM -eq "0" ];then
+# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null
+# set_log nmRes_sum NodeManager
+#fi
+
+sleep 60
+done
diff --git a/PCAP-PIC/hadoop/sbin/dae-hdfsworker.sh b/PCAP-PIC/hadoop/sbin/dae-hdfsworker.sh
new file mode 100644
index 0000000..d504768
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/dae-hdfsworker.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR=/home/tsg/olap
+
+VERSION=hadoop-2.7.1
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/$1
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/$1`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/$1
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_DN=`ps -ef | grep DataNode | grep -v grep | wc -l`
+#HAS_NM=`ps -ef | grep NodeManager | grep -v grep | wc -l`
+
+if [ $HAS_DN -eq "0" ];then
+ yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start datanode > /dev/null
+ set_log dnRes_sum DataNode
+fi
+
+#if [ $HAS_NM -eq "0" ];then
+# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null
+# set_log nmRes_sum NodeManager
+#fi
+
+sleep 60
+done
diff --git a/PCAP-PIC/hadoop/sbin/dae-yarnhistory.sh b/PCAP-PIC/hadoop/sbin/dae-yarnhistory.sh
new file mode 100644
index 0000000..f732d6f
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/dae-yarnhistory.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR=/home/tsg/olap
+
+VERSION=hadoop-2.7.1
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/$1
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/$1`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/$1
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Yarn $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Yarn $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_HISTORY=`ps -ef | grep "org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer" | grep -v grep | wc -l`
+
+if [ $HAS_HISTORY -eq "0" ];then
+ $BASE_DIR/$VERSION/sbin/mr-jobhistory-daemon.sh start historyserver > /dev/null
+ set_log nmRes_sum JobHistoryServer
+fi
+
+sleep 60
+done
diff --git a/PCAP-PIC/hadoop/sbin/dae-yarnmaster.sh b/PCAP-PIC/hadoop/sbin/dae-yarnmaster.sh
new file mode 100644
index 0000000..0cb98c2
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/dae-yarnmaster.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR=/home/tsg/olap
+
+VERSION=hadoop-2.7.1
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/$1
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/$1`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/$1
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Yarn $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Yarn $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_RM=`ps -ef | grep "org.apache.hadoop.yarn.server.resourcemanager.ResourceManager" | grep -v grep | wc -l`
+
+if [ $HAS_RM -eq "0" ];then
+ $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start resourcemanager > /dev/null
+ set_log nmRes_sum ResourceManager
+fi
+
+sleep 60
+done
diff --git a/PCAP-PIC/hadoop/sbin/dae-yarnworker.sh b/PCAP-PIC/hadoop/sbin/dae-yarnworker.sh
new file mode 100644
index 0000000..a2db47f
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/dae-yarnworker.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+source /etc/profile
+
+BASE_DIR=/home/tsg/olap
+
+VERSION=hadoop-2.7.1
+
+function set_log(){
+RES_SUM_FILE=$BASE_DIR/$VERSION/logs
+
+if [ ! -f "$RES_SUM_FILE/" ]
+then
+ mkdir -p $RES_SUM_FILE
+fi
+
+if [ ! -d "$RES_SUM_FILE/$1" ];then
+ echo "0" > $RES_SUM_FILE/$1
+fi
+
+OLD_NUM=`cat $RES_SUM_FILE/$1`
+RESTART_NUM=`expr $OLD_NUM + 1`
+echo $RESTART_NUM > $RES_SUM_FILE/$1
+if [ $OLD_NUM -eq "0" ];then
+ echo "`date "+%Y-%m-%d %H:%M:%S"` - Yarn $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
+else
+ echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Yarn $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
+fi
+}
+
+
+while true ; do
+
+HAS_NM=`ps -ef | grep "org.apache.hadoop.yarn.server.nodemanager.NodeManager" | grep -v grep | wc -l`
+
+if [ $HAS_NM -eq "0" ];then
+ $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null
+ set_log nmRes_sum NodeManager
+fi
+
+sleep 60
+done
diff --git a/PCAP-PIC/hadoop/sbin/distribute-exclude.sh b/PCAP-PIC/hadoop/sbin/distribute-exclude.sh
new file mode 100644
index 0000000..66fc14a
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/distribute-exclude.sh
@@ -0,0 +1,81 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# ------------------------------------------------------------------
+#
+# The purpose of this script is to distribute the exclude file (see
+# "dfs.hosts.exclude" in hdfs-site.xml).
+#
+# Input of the script is a local exclude file. The exclude file
+# will be distributed to all the namenodes. The location on the namenodes
+# is determined by the configuration "dfs.hosts.exclude" in hdfs-site.xml
+# (this value is read from the local copy of hdfs-site.xml and must be same
+# on all the namenodes).
+#
+# The user running this script needs write permissions on the target
+# directory on namenodes.
+#
+# After this command, run refresh-namenodes.sh so that namenodes start
+# using the new exclude file.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+
+if [ "$1" = '' ] ; then
+ "Error: please specify local exclude file as a first argument"
+ exit 1
+else
+ excludeFilenameLocal=$1
+fi
+
+if [ ! -f "$excludeFilenameLocal" ] ; then
+ echo "Error: exclude file [$excludeFilenameLocal] does not exist."
+ exit 1
+fi
+
+namenodes=$("$HADOOP_PREFIX/bin/hdfs" getconf -namenodes)
+excludeFilenameRemote=$("$HADOOP_PREFIX/bin/hdfs" getconf -excludeFile)
+
+if [ "$excludeFilenameRemote" = '' ] ; then
+ echo \
+ "Error: hdfs getconf -excludeFile returned empty string, " \
+ "please setup dfs.hosts.exclude in hdfs-site.xml in local cluster " \
+ "configuration and on all namenodes"
+ exit 1
+fi
+
+echo "Copying exclude file [$excludeFilenameRemote] to namenodes:"
+
+for namenode in $namenodes ; do
+ echo " [$namenode]"
+ scp "$excludeFilenameLocal" "$namenode:$excludeFilenameRemote"
+ if [ "$?" != '0' ] ; then errorFlag='1' ; fi
+done
+
+if [ "$errorFlag" = '1' ] ; then
+ echo "Error: transfer of exclude file failed, see error messages above."
+ exit 1
+else
+ echo "Transfer of exclude file to all namenodes succeeded."
+fi
+
+# eof
diff --git a/PCAP-PIC/hadoop/sbin/hadoop-daemon.sh b/PCAP-PIC/hadoop/sbin/hadoop-daemon.sh
new file mode 100644
index 0000000..6a4cd69
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/hadoop-daemon.sh
@@ -0,0 +1,214 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Runs a Hadoop command as a daemon.
+#
+# Environment Variables
+#
+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_PREFIX}/conf.
+# HADOOP_LOG_DIR Where log files are stored. PWD by default.
+# HADOOP_MASTER host:path where hadoop code should be rsync'd from
+# HADOOP_PID_DIR The pid files are stored. /tmp by default.
+# HADOOP_IDENT_STRING A string representing this instance of hadoop. $USER by default
+# HADOOP_NICENESS The scheduling priority for daemons. Defaults to 0.
+##
+
+usage="Usage: hadoop-daemon.sh [--config <conf-dir>] [--hosts hostlistfile] [--script script] (start|stop) <hadoop-command> <args...>"
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+
+# get arguments
+
+#default value
+hadoopScript="$HADOOP_PREFIX"/bin/hadoop
+if [ "--script" = "$1" ]
+ then
+ shift
+ hadoopScript=$1
+ shift
+fi
+startStop=$1
+shift
+command=$1
+shift
+
+hadoop_rotate_log ()
+{
+ log=$1;
+ num=5;
+ if [ -n "$2" ]; then
+ num=$2
+ fi
+ if [ -f "$log" ]; then # rotate logs
+ while [ $num -gt 1 ]; do
+ prev=`expr $num - 1`
+ [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num"
+ num=$prev
+ done
+ mv "$log" "$log.$num";
+ fi
+}
+
+if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
+ . "${HADOOP_CONF_DIR}/hadoop-env.sh"
+fi
+
+# Determine if we're starting a secure datanode, and if so, redefine appropriate variables
+if [ "$command" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
+ export HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
+ export HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
+ export HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
+ starting_secure_dn="true"
+fi
+
+#Determine if we're starting a privileged NFS, if so, redefine the appropriate variables
+if [ "$command" == "nfs3" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_PRIVILEGED_NFS_USER" ]; then
+ export HADOOP_PID_DIR=$HADOOP_PRIVILEGED_NFS_PID_DIR
+ export HADOOP_LOG_DIR=$HADOOP_PRIVILEGED_NFS_LOG_DIR
+ export HADOOP_IDENT_STRING=$HADOOP_PRIVILEGED_NFS_USER
+ starting_privileged_nfs="true"
+fi
+
+if [ "$HADOOP_IDENT_STRING" = "" ]; then
+ export HADOOP_IDENT_STRING="$USER"
+fi
+
+
+# get log directory
+if [ "$HADOOP_LOG_DIR" = "" ]; then
+ export HADOOP_LOG_DIR="$HADOOP_PREFIX/logs"
+fi
+
+if [ ! -w "$HADOOP_LOG_DIR" ] ; then
+ mkdir -p "$HADOOP_LOG_DIR"
+ chown $HADOOP_IDENT_STRING $HADOOP_LOG_DIR
+fi
+
+if [ "$HADOOP_PID_DIR" = "" ]; then
+ HADOOP_PID_DIR=/tmp
+fi
+
+# some variables
+export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log
+export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-"INFO,RFA"}
+export HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-"INFO,RFAS"}
+export HDFS_AUDIT_LOGGER=${HDFS_AUDIT_LOGGER:-"INFO,NullAppender"}
+log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out
+pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid
+HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5}
+
+# Set default scheduling priority
+if [ "$HADOOP_NICENESS" = "" ]; then
+ export HADOOP_NICENESS=0
+fi
+
+case $startStop in
+
+ (start)
+
+ [ -w "$HADOOP_PID_DIR" ] || mkdir -p "$HADOOP_PID_DIR"
+
+ if [ -f $pid ]; then
+ if kill -0 `cat $pid` > /dev/null 2>&1; then
+ echo $command running as process `cat $pid`. Stop it first.
+ exit 1
+ fi
+ fi
+
+ if [ "$HADOOP_MASTER" != "" ]; then
+ echo rsync from $HADOOP_MASTER
+ rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $HADOOP_MASTER/ "$HADOOP_PREFIX"
+ fi
+
+ hadoop_rotate_log $log
+ echo starting $command, logging to $log
+ cd "$HADOOP_PREFIX"
+ case $command in
+ namenode|secondarynamenode|datanode|journalnode|dfs|dfsadmin|fsck|balancer|zkfc)
+ if [ -z "$HADOOP_HDFS_HOME" ]; then
+ hdfsScript="$HADOOP_PREFIX"/bin/hdfs
+ else
+ hdfsScript="$HADOOP_HDFS_HOME"/bin/hdfs
+ fi
+ nohup nice -n $HADOOP_NICENESS $hdfsScript --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
+ ;;
+ (*)
+ nohup nice -n $HADOOP_NICENESS $hadoopScript --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
+ ;;
+ esac
+ echo $! > $pid
+ sleep 1
+ head "$log"
+ # capture the ulimit output
+ if [ "true" = "$starting_secure_dn" ]; then
+ echo "ulimit -a for secure datanode user $HADOOP_SECURE_DN_USER" >> $log
+ # capture the ulimit info for the appropriate user
+ su --shell=/bin/bash $HADOOP_SECURE_DN_USER -c 'ulimit -a' >> $log 2>&1
+ elif [ "true" = "$starting_privileged_nfs" ]; then
+ echo "ulimit -a for privileged nfs user $HADOOP_PRIVILEGED_NFS_USER" >> $log
+ su --shell=/bin/bash $HADOOP_PRIVILEGED_NFS_USER -c 'ulimit -a' >> $log 2>&1
+ else
+ echo "ulimit -a for user $USER" >> $log
+ ulimit -a >> $log 2>&1
+ fi
+ sleep 3;
+ if ! ps -p $! > /dev/null ; then
+ exit 1
+ fi
+ ;;
+
+ (stop)
+
+ if [ -f $pid ]; then
+ TARGET_PID=`cat $pid`
+ if kill -0 $TARGET_PID > /dev/null 2>&1; then
+ echo stopping $command
+ kill $TARGET_PID
+ sleep $HADOOP_STOP_TIMEOUT
+ if kill -0 $TARGET_PID > /dev/null 2>&1; then
+ echo "$command did not stop gracefully after $HADOOP_STOP_TIMEOUT seconds: killing with kill -9"
+ kill -9 $TARGET_PID
+ fi
+ else
+ echo no $command to stop
+ fi
+ rm -f $pid
+ else
+ echo no $command to stop
+ fi
+ ;;
+
+ (*)
+ echo $usage
+ exit 1
+ ;;
+
+esac
+
+
diff --git a/PCAP-PIC/hadoop/sbin/hadoop-daemons.sh b/PCAP-PIC/hadoop/sbin/hadoop-daemons.sh
new file mode 100644
index 0000000..181d7ac
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/hadoop-daemons.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Run a Hadoop command on all slave hosts.
+
+usage="Usage: hadoop-daemons.sh [--config confdir] [--hosts hostlistfile] [start|stop] command args..."
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+
+exec "$bin/slaves.sh" --config $HADOOP_CONF_DIR cd "$HADOOP_PREFIX" \; "$bin/hadoop-daemon.sh" --config $HADOOP_CONF_DIR "$@"
diff --git a/PCAP-PIC/hadoop/sbin/hdfs-config.cmd b/PCAP-PIC/hadoop/sbin/hdfs-config.cmd
new file mode 100644
index 0000000..f3aa733
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/hdfs-config.cmd
@@ -0,0 +1,43 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem included in all the hdfs scripts with source command
+@rem should not be executed directly
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+if exist %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd (
+ call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %*
+) else if exist %HADOOP_COMMON_HOME%\libexec\hadoop-config.cmd (
+ call %HADOOP_COMMON_HOME%\libexec\hadoop-config.cmd %*
+) else if exist %HADOOP_HOME%\libexec\hadoop-config.cmd (
+ call %HADOOP_HOME%\libexec\hadoop-config.cmd %*
+) else (
+ echo Hadoop common not found.
+)
+
+:eof
diff --git a/PCAP-PIC/hadoop/sbin/hdfs-config.sh b/PCAP-PIC/hadoop/sbin/hdfs-config.sh
new file mode 100644
index 0000000..2aabf53
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/hdfs-config.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# included in all the hdfs scripts with source command
+# should not be executed directly
+
+bin=`which "$0"`
+bin=`dirname "${bin}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+if [ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]; then
+ . ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh
+elif [ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]; then
+ . "$HADOOP_COMMON_HOME"/libexec/hadoop-config.sh
+elif [ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]; then
+ . "$HADOOP_HOME"/libexec/hadoop-config.sh
+else
+ echo "Hadoop common not found."
+ exit
+fi
diff --git a/PCAP-PIC/hadoop/sbin/httpfs.sh b/PCAP-PIC/hadoop/sbin/httpfs.sh
new file mode 100644
index 0000000..a593b67
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/httpfs.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# resolve links - $0 may be a softlink
+PRG="${0}"
+
+while [ -h "${PRG}" ]; do
+ ls=`ls -ld "${PRG}"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '/.*' > /dev/null; then
+ PRG="$link"
+ else
+ PRG=`dirname "${PRG}"`/"$link"
+ fi
+done
+
+BASEDIR=`dirname ${PRG}`
+BASEDIR=`cd ${BASEDIR}/..;pwd`
+
+source ${HADOOP_LIBEXEC_DIR:-${BASEDIR}/libexec}/httpfs-config.sh
+
+# The Java System property 'httpfs.http.port' it is not used by HttpFS,
+# it is used in Tomcat's server.xml configuration file
+#
+print "Using CATALINA_OPTS: ${CATALINA_OPTS}"
+
+catalina_opts="-Dhttpfs.home.dir=${HTTPFS_HOME}";
+catalina_opts="${catalina_opts} -Dhttpfs.config.dir=${HTTPFS_CONFIG}";
+catalina_opts="${catalina_opts} -Dhttpfs.log.dir=${HTTPFS_LOG}";
+catalina_opts="${catalina_opts} -Dhttpfs.temp.dir=${HTTPFS_TEMP}";
+catalina_opts="${catalina_opts} -Dhttpfs.admin.port=${HTTPFS_ADMIN_PORT}";
+catalina_opts="${catalina_opts} -Dhttpfs.http.port=${HTTPFS_HTTP_PORT}";
+catalina_opts="${catalina_opts} -Dhttpfs.http.hostname=${HTTPFS_HTTP_HOSTNAME}";
+catalina_opts="${catalina_opts} -Dhttpfs.ssl.enabled=${HTTPFS_SSL_ENABLED}";
+catalina_opts="${catalina_opts} -Dhttpfs.ssl.keystore.file=${HTTPFS_SSL_KEYSTORE_FILE}";
+catalina_opts="${catalina_opts} -Dhttpfs.ssl.keystore.pass=${HTTPFS_SSL_KEYSTORE_PASS}";
+
+print "Adding to CATALINA_OPTS: ${catalina_opts}"
+
+export CATALINA_OPTS="${CATALINA_OPTS} ${catalina_opts}"
+
+# A bug in catalina.sh script does not use CATALINA_OPTS for stopping the server
+#
+if [ "${1}" = "stop" ]; then
+ export JAVA_OPTS=${CATALINA_OPTS}
+fi
+
+if [ "${HTTPFS_SILENT}" != "true" ]; then
+ exec ${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@"
+else
+ exec ${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@" > /dev/null
+fi
+
diff --git a/PCAP-PIC/hadoop/sbin/kms.sh b/PCAP-PIC/hadoop/sbin/kms.sh
new file mode 100644
index 0000000..f6ef6a5
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/kms.sh
@@ -0,0 +1,83 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# resolve links - $0 may be a softlink
+PRG="${0}"
+
+while [ -h "${PRG}" ]; do
+ ls=`ls -ld "${PRG}"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '/.*' > /dev/null; then
+ PRG="$link"
+ else
+ PRG=`dirname "${PRG}"`/"$link"
+ fi
+done
+
+BASEDIR=`dirname ${PRG}`
+BASEDIR=`cd ${BASEDIR}/..;pwd`
+
+KMS_SILENT=${KMS_SILENT:-true}
+
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-${BASEDIR}/libexec}"
+source ${HADOOP_LIBEXEC_DIR}/kms-config.sh
+
+
+if [ "x$JAVA_LIBRARY_PATH" = "x" ]; then
+ JAVA_LIBRARY_PATH="${HADOOP_LIBEXEC_DIR}/../lib/native/"
+else
+ JAVA_LIBRARY_PATH="${HADOOP_LIBEXEC_DIR}/../lib/native/:${JAVA_LIBRARY_PATH}"
+fi
+
+# The Java System property 'kms.http.port' it is not used by Kms,
+# it is used in Tomcat's server.xml configuration file
+#
+
+# Mask the trustStorePassword
+KMS_SSL_TRUSTSTORE_PASS=`echo $CATALINA_OPTS | grep -o 'trustStorePassword=[^ ]*' | awk -F'=' '{print $2}'`
+CATALINA_OPTS_DISP=`echo ${CATALINA_OPTS} | sed -e 's/trustStorePassword=[^ ]*/trustStorePassword=***/'`
+print "Using CATALINA_OPTS: ${CATALINA_OPTS_DISP}"
+
+catalina_opts="-Dkms.home.dir=${KMS_HOME}";
+catalina_opts="${catalina_opts} -Dkms.config.dir=${KMS_CONFIG}";
+catalina_opts="${catalina_opts} -Dkms.log.dir=${KMS_LOG}";
+catalina_opts="${catalina_opts} -Dkms.temp.dir=${KMS_TEMP}";
+catalina_opts="${catalina_opts} -Dkms.admin.port=${KMS_ADMIN_PORT}";
+catalina_opts="${catalina_opts} -Dkms.http.port=${KMS_HTTP_PORT}";
+catalina_opts="${catalina_opts} -Dkms.max.threads=${KMS_MAX_THREADS}";
+catalina_opts="${catalina_opts} -Dkms.ssl.keystore.file=${KMS_SSL_KEYSTORE_FILE}";
+catalina_opts="${catalina_opts} -Djava.library.path=${JAVA_LIBRARY_PATH}";
+
+print "Adding to CATALINA_OPTS: ${catalina_opts}"
+print "Found KMS_SSL_KEYSTORE_PASS: `echo ${KMS_SSL_KEYSTORE_PASS} | sed 's/./*/g'`"
+
+export CATALINA_OPTS="${CATALINA_OPTS} ${catalina_opts}"
+
+# A bug in catalina.sh script does not use CATALINA_OPTS for stopping the server
+#
+if [ "${1}" = "stop" ]; then
+ export JAVA_OPTS=${CATALINA_OPTS}
+fi
+
+# If ssl, the populate the passwords into ssl-server.xml before starting tomcat
+if [ ! "${KMS_SSL_KEYSTORE_PASS}" = "" ] || [ ! "${KMS_SSL_TRUSTSTORE_PASS}" = "" ]; then
+ # Set a KEYSTORE_PASS if not already set
+ KMS_SSL_KEYSTORE_PASS=${KMS_SSL_KEYSTORE_PASS:-password}
+ cat ${CATALINA_BASE}/conf/ssl-server.xml.conf \
+ | sed 's/_kms_ssl_keystore_pass_/'${KMS_SSL_KEYSTORE_PASS}'/g' \
+ | sed 's/_kms_ssl_truststore_pass_/'${KMS_SSL_TRUSTSTORE_PASS}'/g' > ${CATALINA_BASE}/conf/ssl-server.xml
+fi
+
+exec ${KMS_CATALINA_HOME}/bin/catalina.sh "$@"
diff --git a/PCAP-PIC/hadoop/sbin/mr-jobhistory-daemon.sh b/PCAP-PIC/hadoop/sbin/mr-jobhistory-daemon.sh
new file mode 100644
index 0000000..7585c9a
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/mr-jobhistory-daemon.sh
@@ -0,0 +1,147 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+#
+# Environment Variables
+#
+# HADOOP_JHS_LOGGER Hadoop JobSummary logger.
+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_MAPRED_HOME}/conf.
+# HADOOP_MAPRED_PID_DIR The pid files are stored. /tmp by default.
+# HADOOP_MAPRED_NICENESS The scheduling priority for daemons. Defaults to 0.
+##
+
+usage="Usage: mr-jobhistory-daemon.sh [--config <conf-dir>] (start|stop) <mapred-command> "
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+if [ -e ${HADOOP_LIBEXEC_DIR}/mapred-config.sh ]; then
+ . $HADOOP_LIBEXEC_DIR/mapred-config.sh
+fi
+
+# get arguments
+startStop=$1
+shift
+command=$1
+shift
+
+hadoop_rotate_log ()
+{
+ log=$1;
+ num=5;
+ if [ -n "$2" ]; then
+ num=$2
+ fi
+ if [ -f "$log" ]; then # rotate logs
+ while [ $num -gt 1 ]; do
+ prev=`expr $num - 1`
+ [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num"
+ num=$prev
+ done
+ mv "$log" "$log.$num";
+ fi
+}
+
+if [ "$HADOOP_MAPRED_IDENT_STRING" = "" ]; then
+ export HADOOP_MAPRED_IDENT_STRING="$USER"
+fi
+
+export HADOOP_MAPRED_HOME=${HADOOP_MAPRED_HOME:-${HADOOP_PREFIX}}
+export HADOOP_MAPRED_LOGFILE=mapred-$HADOOP_MAPRED_IDENT_STRING-$command-$HOSTNAME.log
+export HADOOP_MAPRED_ROOT_LOGGER=${HADOOP_MAPRED_ROOT_LOGGER:-INFO,RFA}
+export HADOOP_JHS_LOGGER=${HADOOP_JHS_LOGGER:-INFO,JSA}
+
+if [ -f "${HADOOP_CONF_DIR}/mapred-env.sh" ]; then
+ . "${HADOOP_CONF_DIR}/mapred-env.sh"
+fi
+
+mkdir -p "$HADOOP_MAPRED_LOG_DIR"
+chown $HADOOP_MAPRED_IDENT_STRING $HADOOP_MAPRED_LOG_DIR
+
+if [ "$HADOOP_MAPRED_PID_DIR" = "" ]; then
+ HADOOP_MAPRED_PID_DIR=/tmp
+fi
+
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_MAPRED_IDENT_STRING"
+
+log=$HADOOP_MAPRED_LOG_DIR/mapred-$HADOOP_MAPRED_IDENT_STRING-$command-$HOSTNAME.out
+pid=$HADOOP_MAPRED_PID_DIR/mapred-$HADOOP_MAPRED_IDENT_STRING-$command.pid
+
+HADOOP_MAPRED_STOP_TIMEOUT=${HADOOP_MAPRED_STOP_TIMEOUT:-5}
+
+# Set default scheduling priority
+if [ "$HADOOP_MAPRED_NICENESS" = "" ]; then
+ export HADOOP_MAPRED_NICENESS=0
+fi
+
+case $startStop in
+
+ (start)
+
+ mkdir -p "$HADOOP_MAPRED_PID_DIR"
+
+ if [ -f $pid ]; then
+ if kill -0 `cat $pid` > /dev/null 2>&1; then
+ echo $command running as process `cat $pid`. Stop it first.
+ exit 1
+ fi
+ fi
+
+ hadoop_rotate_log $log
+ echo starting $command, logging to $log
+ cd "$HADOOP_MAPRED_HOME"
+ nohup nice -n $HADOOP_MAPRED_NICENESS "$HADOOP_MAPRED_HOME"/bin/mapred --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
+ echo $! > $pid
+ sleep 1; head "$log"
+ ;;
+
+ (stop)
+
+ if [ -f $pid ]; then
+ TARGET_PID=`cat $pid`
+ if kill -0 $TARGET_PID > /dev/null 2>&1; then
+ echo stopping $command
+ kill $TARGET_PID
+ sleep $HADOOP_MAPRED_STOP_TIMEOUT
+ if kill -0 $TARGET_PID > /dev/null 2>&1; then
+ echo "$command did not stop gracefully after $HADOOP_MAPRED_STOP_TIMEOUT seconds: killing with kill -9"
+ kill -9 $TARGET_PID
+ fi
+ else
+ echo no $command to stop
+ fi
+ rm -f $pid
+ else
+ echo no $command to stop
+ fi
+ ;;
+
+ (*)
+ echo $usage
+ exit 1
+ ;;
+
+esac
diff --git a/PCAP-PIC/hadoop/sbin/refresh-namenodes.sh b/PCAP-PIC/hadoop/sbin/refresh-namenodes.sh
new file mode 100644
index 0000000..d3f6759
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/refresh-namenodes.sh
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# ------------------------------------------------------------------
+# This script refreshes all namenodes, it's a simple wrapper
+# for dfsadmin to support multiple namenodes.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+
+namenodes=$("$HADOOP_PREFIX/bin/hdfs" getconf -nnRpcAddresses)
+if [ "$?" != '0' ] ; then errorFlag='1' ;
+else
+ for namenode in $namenodes ; do
+ echo "Refreshing namenode [$namenode]"
+ "$HADOOP_PREFIX/bin/hdfs" dfsadmin -fs hdfs://$namenode -refreshNodes
+ if [ "$?" != '0' ] ; then errorFlag='1' ; fi
+ done
+fi
+
+if [ "$errorFlag" = '1' ] ; then
+ echo "Error: refresh of namenodes failed, see error messages above."
+ exit 1
+else
+ echo "Refresh of namenodes done."
+fi
+
+
+# eof
diff --git a/PCAP-PIC/hadoop/sbin/slaves.sh b/PCAP-PIC/hadoop/sbin/slaves.sh
new file mode 100644
index 0000000..016392f
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/slaves.sh
@@ -0,0 +1,67 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Run a shell command on all slave hosts.
+#
+# Environment Variables
+#
+# HADOOP_SLAVES File naming remote hosts.
+# Default is ${HADOOP_CONF_DIR}/slaves.
+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_PREFIX}/conf.
+# HADOOP_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
+# HADOOP_SSH_OPTS Options passed to ssh when running remote commands.
+##
+
+usage="Usage: slaves.sh [--config confdir] command..."
+
+# if no args specified, show usage
+if [ $# -le 0 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+
+if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
+ . "${HADOOP_CONF_DIR}/hadoop-env.sh"
+fi
+
+# Where to start the script, see hadoop-config.sh
+# (it set up the variables based on command line options)
+if [ "$HADOOP_SLAVE_NAMES" != '' ] ; then
+ SLAVE_NAMES=$HADOOP_SLAVE_NAMES
+else
+ SLAVE_FILE=${HADOOP_SLAVES:-${HADOOP_CONF_DIR}/slaves}
+ SLAVE_NAMES=$(cat "$SLAVE_FILE" | sed 's/#.*$//;/^$/d')
+fi
+
+# start the daemons
+for slave in $SLAVE_NAMES ; do
+ ssh $HADOOP_SSH_OPTS $slave $"${@// /\\ }" \
+ 2>&1 | sed "s/^/$slave: /" &
+ if [ "$HADOOP_SLAVE_SLEEP" != "" ]; then
+ sleep $HADOOP_SLAVE_SLEEP
+ fi
+done
+
+wait
diff --git a/PCAP-PIC/hadoop/sbin/start-all.cmd b/PCAP-PIC/hadoop/sbin/start-all.cmd
new file mode 100644
index 0000000..9f65b5d
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/start-all.cmd
@@ -0,0 +1,52 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+setlocal enabledelayedexpansion
+
+@rem Start all hadoop daemons. Run this on master node.
+
+echo This script is Deprecated. Instead use start-dfs.cmd and start-yarn.cmd
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+@rem start hdfs daemons if hdfs is present
+if exist %HADOOP_HDFS_HOME%\sbin\start-dfs.cmd (
+ call %HADOOP_HDFS_HOME%\sbin\start-dfs.cmd --config %HADOOP_CONF_DIR%
+)
+
+@rem start yarn daemons if yarn is present
+if exist %HADOOP_YARN_HOME%\sbin\start-yarn.cmd (
+ call %HADOOP_YARN_HOME%\sbin\start-yarn.cmd --config %HADOOP_CONF_DIR%
+)
+
+endlocal
diff --git a/PCAP-PIC/hadoop/sbin/start-all.sh b/PCAP-PIC/hadoop/sbin/start-all.sh
new file mode 100644
index 0000000..3124328
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/start-all.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Start all hadoop daemons. Run this on master node.
+
+echo "This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh"
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+
+# start hdfs daemons if hdfs is present
+if [ -f "${HADOOP_HDFS_HOME}"/sbin/start-dfs.sh ]; then
+ "${HADOOP_HDFS_HOME}"/sbin/start-dfs.sh --config $HADOOP_CONF_DIR
+fi
+
+# start yarn daemons if yarn is present
+if [ -f "${HADOOP_YARN_HOME}"/sbin/start-yarn.sh ]; then
+ "${HADOOP_YARN_HOME}"/sbin/start-yarn.sh --config $HADOOP_CONF_DIR
+fi
diff --git a/PCAP-PIC/hadoop/sbin/start-balancer.sh b/PCAP-PIC/hadoop/sbin/start-balancer.sh
new file mode 100644
index 0000000..2c14a59
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/start-balancer.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+
+# Start balancer daemon.
+
+"$HADOOP_PREFIX"/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start balancer $@
diff --git a/PCAP-PIC/hadoop/sbin/start-dfs.cmd b/PCAP-PIC/hadoop/sbin/start-dfs.cmd
new file mode 100644
index 0000000..9f20e5a
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/start-dfs.cmd
@@ -0,0 +1,41 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+setlocal enabledelayedexpansion
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\hdfs-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+start "Apache Hadoop Distribution" hadoop namenode
+start "Apache Hadoop Distribution" hadoop datanode
+
+endlocal
diff --git a/PCAP-PIC/hadoop/sbin/start-dfs.sh b/PCAP-PIC/hadoop/sbin/start-dfs.sh
new file mode 100644
index 0000000..a8c2b98
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/start-dfs.sh
@@ -0,0 +1,118 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Start hadoop dfs daemons.
+# Optinally upgrade or rollback dfs state.
+# Run this on master node.
+
+usage="Usage: start-dfs.sh [-upgrade|-rollback] [other options such as -clusterId]"
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+
+# get arguments
+if [[ $# -ge 1 ]]; then
+ startOpt="$1"
+ shift
+ case "$startOpt" in
+ -upgrade)
+ nameStartOpt="$startOpt"
+ ;;
+ -rollback)
+ dataStartOpt="$startOpt"
+ ;;
+ *)
+ echo $usage
+ exit 1
+ ;;
+ esac
+fi
+
+#Add other possible options
+nameStartOpt="$nameStartOpt $@"
+
+#---------------------------------------------------------
+# namenodes
+
+NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -namenodes)
+
+echo "Starting namenodes on [$NAMENODES]"
+
+"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$NAMENODES" \
+ --script "$bin/hdfs" start namenode $nameStartOpt
+
+#---------------------------------------------------------
+# datanodes (using default slaves file)
+
+if [ -n "$HADOOP_SECURE_DN_USER" ]; then
+ echo \
+ "Attempting to start secure cluster, skipping datanodes. " \
+ "Run start-secure-dns.sh as root to complete startup."
+else
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --script "$bin/hdfs" start datanode $dataStartOpt
+fi
+
+#---------------------------------------------------------
+# secondary namenodes (if any)
+
+SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>/dev/null)
+
+if [ -n "$SECONDARY_NAMENODES" ]; then
+ echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
+
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$SECONDARY_NAMENODES" \
+ --script "$bin/hdfs" start secondarynamenode
+fi
+
+#---------------------------------------------------------
+# quorumjournal nodes (if any)
+
+SHARED_EDITS_DIR=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
+
+case "$SHARED_EDITS_DIR" in
+qjournal://*)
+ JOURNAL_NODES=$(echo "$SHARED_EDITS_DIR" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
+ echo "Starting journal nodes [$JOURNAL_NODES]"
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$JOURNAL_NODES" \
+ --script "$bin/hdfs" start journalnode ;;
+esac
+
+#---------------------------------------------------------
+# ZK Failover controllers, if auto-HA is enabled
+AUTOHA_ENABLED=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled)
+if [ "$(echo "$AUTOHA_ENABLED" | tr A-Z a-z)" = "true" ]; then
+ echo "Starting ZK Failover Controllers on NN hosts [$NAMENODES]"
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$NAMENODES" \
+ --script "$bin/hdfs" start zkfc
+fi
+
+# eof
diff --git a/PCAP-PIC/hadoop/sbin/start-secure-dns.sh b/PCAP-PIC/hadoop/sbin/start-secure-dns.sh
new file mode 100644
index 0000000..7ddf687
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/start-secure-dns.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Run as root to start secure datanodes in a security-enabled cluster.
+
+usage="Usage (run as root in order to start secure datanodes): start-secure-dns.sh"
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+
+if [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
+ "$HADOOP_PREFIX"/sbin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start datanode $dataStartOpt
+else
+ echo $usage
+fi
diff --git a/PCAP-PIC/hadoop/sbin/start-yarn.cmd b/PCAP-PIC/hadoop/sbin/start-yarn.cmd
new file mode 100644
index 0000000..989510b
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/start-yarn.cmd
@@ -0,0 +1,47 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+setlocal enabledelayedexpansion
+
+echo starting yarn daemons
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\yarn-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+@rem start resourceManager
+start "Apache Hadoop Distribution" yarn resourcemanager
+@rem start nodeManager
+start "Apache Hadoop Distribution" yarn nodemanager
+@rem start proxyserver
+@rem start "Apache Hadoop Distribution" yarn proxyserver
+
+endlocal
diff --git a/PCAP-PIC/hadoop/sbin/start-yarn.sh b/PCAP-PIC/hadoop/sbin/start-yarn.sh
new file mode 100644
index 0000000..40b77fb
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/start-yarn.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Start all yarn daemons. Run this on master node.
+
+echo "starting yarn daemons"
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/yarn-config.sh
+
+# start resourceManager
+"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager
+# start nodeManager
+"$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager
+# start proxyserver
+#"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver
diff --git a/PCAP-PIC/hadoop/sbin/stop-all.cmd b/PCAP-PIC/hadoop/sbin/stop-all.cmd
new file mode 100644
index 0000000..1d22c79
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/stop-all.cmd
@@ -0,0 +1,52 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+setlocal enabledelayedexpansion
+
+@rem Stop all hadoop daemons. Run this on master node.
+
+echo This script is Deprecated. Instead use stop-dfs.cmd and stop-yarn.cmd
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+@rem stop hdfs daemons if hdfs is present
+if exist %HADOOP_HDFS_HOME%\sbin\stop-dfs.cmd (
+ call %HADOOP_HDFS_HOME%\sbin\stop-dfs.cmd --config %HADOOP_CONF_DIR%
+)
+
+@rem stop yarn daemons if yarn is present
+if exist %HADOOP_YARN_HOME%\sbin\stop-yarn.cmd (
+ call %HADOOP_YARN_HOME%\sbin\stop-yarn.cmd --config %HADOOP_CONF_DIR%
+)
+
+endlocal
diff --git a/PCAP-PIC/hadoop/sbin/stop-all.sh b/PCAP-PIC/hadoop/sbin/stop-all.sh
new file mode 100644
index 0000000..9a2fe98
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/stop-all.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Stop all hadoop daemons. Run this on master node.
+
+echo "This script is Deprecated. Instead use stop-dfs.sh and stop-yarn.sh"
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+
+# stop hdfs daemons if hdfs is present
+if [ -f "${HADOOP_HDFS_HOME}"/sbin/stop-dfs.sh ]; then
+ "${HADOOP_HDFS_HOME}"/sbin/stop-dfs.sh --config $HADOOP_CONF_DIR
+fi
+
+# stop yarn daemons if yarn is present
+if [ -f "${HADOOP_HDFS_HOME}"/sbin/stop-yarn.sh ]; then
+ "${HADOOP_HDFS_HOME}"/sbin/stop-yarn.sh --config $HADOOP_CONF_DIR
+fi
diff --git a/PCAP-PIC/hadoop/sbin/stop-balancer.sh b/PCAP-PIC/hadoop/sbin/stop-balancer.sh
new file mode 100644
index 0000000..df82456
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/stop-balancer.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+
+# Stop balancer daemon.
+# Run this on the machine where the balancer is running
+
+"$HADOOP_PREFIX"/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop balancer
diff --git a/PCAP-PIC/hadoop/sbin/stop-dfs.cmd b/PCAP-PIC/hadoop/sbin/stop-dfs.cmd
new file mode 100644
index 0000000..f0cf015
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/stop-dfs.cmd
@@ -0,0 +1,41 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+setlocal enabledelayedexpansion
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+Taskkill /FI "WINDOWTITLE eq Apache Hadoop Distribution - hadoop namenode"
+Taskkill /FI "WINDOWTITLE eq Apache Hadoop Distribution - hadoop datanode"
+
+endlocal
diff --git a/PCAP-PIC/hadoop/sbin/stop-dfs.sh b/PCAP-PIC/hadoop/sbin/stop-dfs.sh
new file mode 100644
index 0000000..6a622fa
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/stop-dfs.sh
@@ -0,0 +1,89 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+
+#---------------------------------------------------------
+# namenodes
+
+NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -namenodes)
+
+echo "Stopping namenodes on [$NAMENODES]"
+
+"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$NAMENODES" \
+ --script "$bin/hdfs" stop namenode
+
+#---------------------------------------------------------
+# datanodes (using default slaves file)
+
+if [ -n "$HADOOP_SECURE_DN_USER" ]; then
+ echo \
+ "Attempting to stop secure cluster, skipping datanodes. " \
+ "Run stop-secure-dns.sh as root to complete shutdown."
+else
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --script "$bin/hdfs" stop datanode
+fi
+
+#---------------------------------------------------------
+# secondary namenodes (if any)
+
+SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>/dev/null)
+
+if [ -n "$SECONDARY_NAMENODES" ]; then
+ echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]"
+
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$SECONDARY_NAMENODES" \
+ --script "$bin/hdfs" stop secondarynamenode
+fi
+
+#---------------------------------------------------------
+# quorumjournal nodes (if any)
+
+SHARED_EDITS_DIR=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
+
+case "$SHARED_EDITS_DIR" in
+qjournal://*)
+ JOURNAL_NODES=$(echo "$SHARED_EDITS_DIR" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
+ echo "Stopping journal nodes [$JOURNAL_NODES]"
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$JOURNAL_NODES" \
+ --script "$bin/hdfs" stop journalnode ;;
+esac
+
+#---------------------------------------------------------
+# ZK Failover controllers, if auto-HA is enabled
+AUTOHA_ENABLED=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled)
+if [ "$(echo "$AUTOHA_ENABLED" | tr A-Z a-z)" = "true" ]; then
+ echo "Stopping ZK Failover Controllers on NN hosts [$NAMENODES]"
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$NAMENODES" \
+ --script "$bin/hdfs" stop zkfc
+fi
+# eof
diff --git a/PCAP-PIC/hadoop/sbin/stop-secure-dns.sh b/PCAP-PIC/hadoop/sbin/stop-secure-dns.sh
new file mode 100644
index 0000000..fdd47c3
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/stop-secure-dns.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Run as root to start secure datanodes in a security-enabled cluster.
+
+usage="Usage (run as root in order to stop secure datanodes): stop-secure-dns.sh"
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+
+if [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
+ "$HADOOP_PREFIX"/sbin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop datanode
+else
+ echo $usage
+fi
diff --git a/PCAP-PIC/hadoop/sbin/stop-yarn.cmd b/PCAP-PIC/hadoop/sbin/stop-yarn.cmd
new file mode 100644
index 0000000..0914337
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/stop-yarn.cmd
@@ -0,0 +1,47 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+setlocal enabledelayedexpansion
+
+echo stopping yarn daemons
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\yarn-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+@rem stop resourceManager
+Taskkill /FI "WINDOWTITLE eq Apache Hadoop Distribution - yarn resourcemanager"
+@rem stop nodeManager
+Taskkill /FI "WINDOWTITLE eq Apache Hadoop Distribution - yarn nodemanager"
+@rem stop proxy server
+Taskkill /FI "WINDOWTITLE eq Apache Hadoop Distribution - yarn proxyserver"
+
+endlocal
diff --git a/PCAP-PIC/hadoop/sbin/stop-yarn.sh b/PCAP-PIC/hadoop/sbin/stop-yarn.sh
new file mode 100644
index 0000000..a8498ef
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/stop-yarn.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Stop all yarn daemons. Run this on master node.
+
+echo "stopping yarn daemons"
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/yarn-config.sh
+
+# stop resourceManager
+"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR stop resourcemanager
+# stop nodeManager
+"$bin"/yarn-daemons.sh --config $YARN_CONF_DIR stop nodemanager
+# stop proxy server
+"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR stop proxyserver
diff --git a/PCAP-PIC/hadoop/sbin/yarn-daemon.sh b/PCAP-PIC/hadoop/sbin/yarn-daemon.sh
new file mode 100644
index 0000000..fbfa71d
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/yarn-daemon.sh
@@ -0,0 +1,161 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Runs a yarn command as a daemon.
+#
+# Environment Variables
+#
+# YARN_CONF_DIR Alternate conf dir. Default is ${HADOOP_YARN_HOME}/conf.
+# YARN_LOG_DIR Where log files are stored. PWD by default.
+# YARN_MASTER host:path where hadoop code should be rsync'd from
+# YARN_PID_DIR The pid files are stored. /tmp by default.
+# YARN_IDENT_STRING A string representing this instance of hadoop. $USER by default
+# YARN_NICENESS The scheduling priority for daemons. Defaults to 0.
+##
+
+usage="Usage: yarn-daemon.sh [--config <conf-dir>] [--hosts hostlistfile] (start|stop) <yarn-command> "
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/yarn-config.sh
+
+# get arguments
+startStop=$1
+shift
+command=$1
+shift
+
+hadoop_rotate_log ()
+{
+ log=$1;
+ num=5;
+ if [ -n "$2" ]; then
+ num=$2
+ fi
+ if [ -f "$log" ]; then # rotate logs
+ while [ $num -gt 1 ]; do
+ prev=`expr $num - 1`
+ [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num"
+ num=$prev
+ done
+ mv "$log" "$log.$num";
+ fi
+}
+
+if [ -f "${YARN_CONF_DIR}/yarn-env.sh" ]; then
+ . "${YARN_CONF_DIR}/yarn-env.sh"
+fi
+
+if [ "$YARN_IDENT_STRING" = "" ]; then
+ export YARN_IDENT_STRING="$USER"
+fi
+
+# get log directory
+if [ "$YARN_LOG_DIR" = "" ]; then
+ export YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+
+if [ ! -w "$YARN_LOG_DIR" ] ; then
+ mkdir -p "$YARN_LOG_DIR"
+ chown $YARN_IDENT_STRING $YARN_LOG_DIR
+fi
+
+if [ "$YARN_PID_DIR" = "" ]; then
+ YARN_PID_DIR=/tmp
+fi
+
+# some variables
+export YARN_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log
+export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,RFA}
+log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out
+pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid
+YARN_STOP_TIMEOUT=${YARN_STOP_TIMEOUT:-5}
+
+# Set default scheduling priority
+if [ "$YARN_NICENESS" = "" ]; then
+ export YARN_NICENESS=0
+fi
+
+case $startStop in
+
+ (start)
+
+ [ -w "$YARN_PID_DIR" ] || mkdir -p "$YARN_PID_DIR"
+
+ if [ -f $pid ]; then
+ if kill -0 `cat $pid` > /dev/null 2>&1; then
+ echo $command running as process `cat $pid`. Stop it first.
+ exit 1
+ fi
+ fi
+
+ if [ "$YARN_MASTER" != "" ]; then
+ echo rsync from $YARN_MASTER
+ rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $YARN_MASTER/ "$HADOOP_YARN_HOME"
+ fi
+
+ hadoop_rotate_log $log
+ echo starting $command, logging to $log
+ cd "$HADOOP_YARN_HOME"
+ nohup nice -n $YARN_NICENESS "$HADOOP_YARN_HOME"/bin/yarn --config $YARN_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
+ echo $! > $pid
+ sleep 1
+ head "$log"
+ # capture the ulimit output
+ echo "ulimit -a" >> $log
+ ulimit -a >> $log 2>&1
+ ;;
+
+ (stop)
+
+ if [ -f $pid ]; then
+ TARGET_PID=`cat $pid`
+ if kill -0 $TARGET_PID > /dev/null 2>&1; then
+ echo stopping $command
+ kill $TARGET_PID
+ sleep $YARN_STOP_TIMEOUT
+ if kill -0 $TARGET_PID > /dev/null 2>&1; then
+ echo "$command did not stop gracefully after $YARN_STOP_TIMEOUT seconds: killing with kill -9"
+ kill -9 $TARGET_PID
+ fi
+ else
+ echo no $command to stop
+ fi
+ rm -f $pid
+ else
+ echo no $command to stop
+ fi
+ ;;
+
+ (*)
+ echo $usage
+ exit 1
+ ;;
+
+esac
+
+
diff --git a/PCAP-PIC/hadoop/sbin/yarn-daemons.sh b/PCAP-PIC/hadoop/sbin/yarn-daemons.sh
new file mode 100644
index 0000000..a7858e4
--- /dev/null
+++ b/PCAP-PIC/hadoop/sbin/yarn-daemons.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Run a Yarn command on all slave hosts.
+
+usage="Usage: yarn-daemons.sh [--config confdir] [--hosts hostlistfile] [start
+|stop] command args..."
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/yarn-config.sh
+
+exec "$bin/slaves.sh" --config $YARN_CONF_DIR cd "$HADOOP_YARN_HOME" \; "$bin/yarn-daemon.sh" --config $YARN_CONF_DIR "$@"
+