summaryrefslogtreecommitdiff
path: root/MPE/hadoop-2.7.1/bin
diff options
context:
space:
mode:
Diffstat (limited to 'MPE/hadoop-2.7.1/bin')
-rw-r--r--MPE/hadoop-2.7.1/bin/container-executorbin0 -> 160127 bytes
-rw-r--r--MPE/hadoop-2.7.1/bin/hadoop169
-rw-r--r--MPE/hadoop-2.7.1/bin/hadoop.cmd272
-rw-r--r--MPE/hadoop-2.7.1/bin/hdfs308
-rw-r--r--MPE/hadoop-2.7.1/bin/hdfs.cmd234
-rw-r--r--MPE/hadoop-2.7.1/bin/ini_hdfs.sh46
-rw-r--r--MPE/hadoop-2.7.1/bin/mapred172
-rw-r--r--MPE/hadoop-2.7.1/bin/mapred.cmd216
-rw-r--r--MPE/hadoop-2.7.1/bin/rcc61
-rw-r--r--MPE/hadoop-2.7.1/bin/set_hdfs_env.sh71
-rw-r--r--MPE/hadoop-2.7.1/bin/set_yarn_env.sh58
-rw-r--r--MPE/hadoop-2.7.1/bin/test-container-executorbin0 -> 204075 bytes
-rw-r--r--MPE/hadoop-2.7.1/bin/yarn330
-rw-r--r--MPE/hadoop-2.7.1/bin/yarn.cmd332
14 files changed, 2269 insertions, 0 deletions
diff --git a/MPE/hadoop-2.7.1/bin/container-executor b/MPE/hadoop-2.7.1/bin/container-executor
new file mode 100644
index 0000000..5e228bc
--- /dev/null
+++ b/MPE/hadoop-2.7.1/bin/container-executor
Binary files differ
diff --git a/MPE/hadoop-2.7.1/bin/hadoop b/MPE/hadoop-2.7.1/bin/hadoop
new file mode 100644
index 0000000..a5e8885
--- /dev/null
+++ b/MPE/hadoop-2.7.1/bin/hadoop
@@ -0,0 +1,169 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script runs the hadoop core commands.
+
+bin=`which $0`
+bin=`dirname ${bin}`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+
+function print_usage(){
+ echo "Usage: hadoop [--config confdir] [COMMAND | CLASSNAME]"
+ echo " CLASSNAME run the class named CLASSNAME"
+ echo " or"
+ echo " where COMMAND is one of:"
+ echo " fs run a generic filesystem user client"
+ echo " version print the version"
+ echo " jar <jar> run a jar file"
+ echo " note: please use \"yarn jar\" to launch"
+ echo " YARN applications, not this command."
+ echo " checknative [-a|-h] check native hadoop and compression libraries availability"
+ echo " distcp <srcurl> <desturl> copy file or directories recursively"
+ echo " archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive"
+ echo " classpath prints the class path needed to get the"
+ echo " credential interact with credential providers"
+ echo " Hadoop jar and the required libraries"
+ echo " daemonlog get/set the log level for each daemon"
+ echo " trace view and modify Hadoop tracing settings"
+ echo ""
+ echo "Most commands print help when invoked w/o parameters."
+}
+
+if [ $# = 0 ]; then
+ print_usage
+ exit
+fi
+
+COMMAND=$1
+case $COMMAND in
+ # usage flags
+ --help|-help|-h)
+ print_usage
+ exit
+ ;;
+
+ #hdfs commands
+ namenode|secondarynamenode|datanode|dfs|dfsadmin|fsck|balancer|fetchdt|oiv|dfsgroups|portmap|nfs3)
+ echo "DEPRECATED: Use of this script to execute hdfs command is deprecated." 1>&2
+ echo "Instead use the hdfs command for it." 1>&2
+ echo "" 1>&2
+ #try to locate hdfs and if present, delegate to it.
+ shift
+ if [ -f "${HADOOP_HDFS_HOME}"/bin/hdfs ]; then
+ exec "${HADOOP_HDFS_HOME}"/bin/hdfs ${COMMAND/dfsgroups/groups} "$@"
+ elif [ -f "${HADOOP_PREFIX}"/bin/hdfs ]; then
+ exec "${HADOOP_PREFIX}"/bin/hdfs ${COMMAND/dfsgroups/groups} "$@"
+ else
+ echo "HADOOP_HDFS_HOME not found!"
+ exit 1
+ fi
+ ;;
+
+ #mapred commands for backwards compatibility
+ pipes|job|queue|mrgroups|mradmin|jobtracker|tasktracker)
+ echo "DEPRECATED: Use of this script to execute mapred command is deprecated." 1>&2
+ echo "Instead use the mapred command for it." 1>&2
+ echo "" 1>&2
+ #try to locate mapred and if present, delegate to it.
+ shift
+ if [ -f "${HADOOP_MAPRED_HOME}"/bin/mapred ]; then
+ exec "${HADOOP_MAPRED_HOME}"/bin/mapred ${COMMAND/mrgroups/groups} "$@"
+ elif [ -f "${HADOOP_PREFIX}"/bin/mapred ]; then
+ exec "${HADOOP_PREFIX}"/bin/mapred ${COMMAND/mrgroups/groups} "$@"
+ else
+ echo "HADOOP_MAPRED_HOME not found!"
+ exit 1
+ fi
+ ;;
+
+ #core commands
+ *)
+ # the core commands
+ if [ "$COMMAND" = "fs" ] ; then
+ CLASS=org.apache.hadoop.fs.FsShell
+ elif [ "$COMMAND" = "version" ] ; then
+ CLASS=org.apache.hadoop.util.VersionInfo
+ elif [ "$COMMAND" = "jar" ] ; then
+ CLASS=org.apache.hadoop.util.RunJar
+ if [[ -n "${YARN_OPTS}" ]] || [[ -n "${YARN_CLIENT_OPTS}" ]]; then
+ echo "WARNING: Use \"yarn jar\" to launch YARN applications." 1>&2
+ fi
+ elif [ "$COMMAND" = "key" ] ; then
+ CLASS=org.apache.hadoop.crypto.key.KeyShell
+ elif [ "$COMMAND" = "checknative" ] ; then
+ CLASS=org.apache.hadoop.util.NativeLibraryChecker
+ elif [ "$COMMAND" = "distcp" ] ; then
+ CLASS=org.apache.hadoop.tools.DistCp
+ CLASSPATH=${CLASSPATH}:${TOOL_PATH}
+ elif [ "$COMMAND" = "daemonlog" ] ; then
+ CLASS=org.apache.hadoop.log.LogLevel
+ elif [ "$COMMAND" = "archive" ] ; then
+ CLASS=org.apache.hadoop.tools.HadoopArchives
+ CLASSPATH=${CLASSPATH}:${TOOL_PATH}
+ elif [ "$COMMAND" = "credential" ] ; then
+ CLASS=org.apache.hadoop.security.alias.CredentialShell
+ elif [ "$COMMAND" = "trace" ] ; then
+ CLASS=org.apache.hadoop.tracing.TraceAdmin
+ elif [ "$COMMAND" = "classpath" ] ; then
+ if [ "$#" -gt 1 ]; then
+ CLASS=org.apache.hadoop.util.Classpath
+ else
+ # No need to bother starting up a JVM for this simple case.
+ if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ fi
+ echo $CLASSPATH
+ exit
+ fi
+ elif [[ "$COMMAND" = -* ]] ; then
+ # class and package names cannot begin with a -
+ echo "Error: No command named \`$COMMAND' was found. Perhaps you meant \`hadoop ${COMMAND#-}'"
+ exit 1
+ else
+ CLASS=$COMMAND
+ fi
+
+ # cygwin path translation
+ if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ HADOOP_LOG_DIR=$(cygpath -w "$HADOOP_LOG_DIR" 2>/dev/null)
+ HADOOP_PREFIX=$(cygpath -w "$HADOOP_PREFIX" 2>/dev/null)
+ HADOOP_CONF_DIR=$(cygpath -w "$HADOOP_CONF_DIR" 2>/dev/null)
+ HADOOP_COMMON_HOME=$(cygpath -w "$HADOOP_COMMON_HOME" 2>/dev/null)
+ HADOOP_HDFS_HOME=$(cygpath -w "$HADOOP_HDFS_HOME" 2>/dev/null)
+ HADOOP_YARN_HOME=$(cygpath -w "$HADOOP_YARN_HOME" 2>/dev/null)
+ HADOOP_MAPRED_HOME=$(cygpath -w "$HADOOP_MAPRED_HOME" 2>/dev/null)
+ fi
+
+ shift
+
+ # Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+
+ #make sure security appender is turned off
+ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
+
+ export CLASSPATH=$CLASSPATH
+ exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@"
+ ;;
+
+esac
diff --git a/MPE/hadoop-2.7.1/bin/hadoop.cmd b/MPE/hadoop-2.7.1/bin/hadoop.cmd
new file mode 100644
index 0000000..ccf2fff
--- /dev/null
+++ b/MPE/hadoop-2.7.1/bin/hadoop.cmd
@@ -0,0 +1,272 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+
+@rem This script runs the hadoop core commands.
+
+@rem Environment Variables
+@rem
+@rem JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+@rem
+@rem HADOOP_CLASSPATH Extra Java CLASSPATH entries.
+@rem
+@rem HADOOP_USER_CLASSPATH_FIRST When defined, the HADOOP_CLASSPATH is
+@rem added in the beginning of the global
+@rem classpath. Can be defined, for example,
+@rem by doing
+@rem export HADOOP_USER_CLASSPATH_FIRST=true
+@rem
+@rem HADOOP_HEAPSIZE The maximum amount of heap to use, in MB.
+@rem Default is 1000.
+@rem
+@rem HADOOP_OPTS Extra Java runtime options.
+@rem
+@rem HADOOP_CLIENT_OPTS when the respective command is run.
+@rem HADOOP_{COMMAND}_OPTS etc HADOOP_JT_OPTS applies to JobTracker
+@rem for e.g. HADOOP_CLIENT_OPTS applies to
+@rem more than one command (fs, dfs, fsck,
+@rem dfsadmin etc)
+@rem
+@rem HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
+@rem
+@rem HADOOP_ROOT_LOGGER The root appender. Default is INFO,console
+@rem
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+call :updatepath %HADOOP_BIN_PATH%
+
+:main
+ setlocal enabledelayedexpansion
+
+ set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+ if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+ )
+
+ call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %*
+ if "%1" == "--config" (
+ shift
+ shift
+ )
+ if "%1" == "--loglevel" (
+ shift
+ shift
+ )
+
+ set hadoop-command=%1
+ if not defined hadoop-command (
+ goto print_usage
+ )
+
+ call :make_command_arguments %*
+
+ set hdfscommands=namenode secondarynamenode datanode dfs dfsadmin fsck balancer fetchdt oiv dfsgroups
+ for %%i in ( %hdfscommands% ) do (
+ if %hadoop-command% == %%i set hdfscommand=true
+ )
+ if defined hdfscommand (
+ @echo DEPRECATED: Use of this script to execute hdfs command is deprecated. 1>&2
+ @echo Instead use the hdfs command for it. 1>&2
+ if exist %HADOOP_HDFS_HOME%\bin\hdfs.cmd (
+ call %HADOOP_HDFS_HOME%\bin\hdfs.cmd %*
+ goto :eof
+ ) else if exist %HADOOP_HOME%\bin\hdfs.cmd (
+ call %HADOOP_HOME%\bin\hdfs.cmd %*
+ goto :eof
+ ) else (
+ echo HADOOP_HDFS_HOME not found!
+ goto :eof
+ )
+ )
+
+ set mapredcommands=pipes job queue mrgroups mradmin jobtracker tasktracker
+ for %%i in ( %mapredcommands% ) do (
+ if %hadoop-command% == %%i set mapredcommand=true
+ )
+ if defined mapredcommand (
+ @echo DEPRECATED: Use of this script to execute mapred command is deprecated. 1>&2
+ @echo Instead use the mapred command for it. 1>&2
+ if exist %HADOOP_MAPRED_HOME%\bin\mapred.cmd (
+ call %HADOOP_MAPRED_HOME%\bin\mapred.cmd %*
+ goto :eof
+ ) else if exist %HADOOP_HOME%\bin\mapred.cmd (
+ call %HADOOP_HOME%\bin\mapred.cmd %*
+ goto :eof
+ ) else (
+ echo HADOOP_MAPRED_HOME not found!
+ goto :eof
+ )
+ )
+
+ if %hadoop-command% == classpath (
+ if not defined hadoop-command-arguments (
+ @rem No need to bother starting up a JVM for this simple case.
+ @echo %CLASSPATH%
+ exit /b
+ )
+ )
+
+ set corecommands=fs version jar checknative distcp daemonlog archive classpath credential key
+ for %%i in ( %corecommands% ) do (
+ if %hadoop-command% == %%i set corecommand=true
+ )
+ if defined corecommand (
+ call :%hadoop-command%
+ ) else (
+ set CLASSPATH=%CLASSPATH%;%CD%
+ set CLASS=%hadoop-command%
+ )
+
+ set path=%PATH%;%HADOOP_BIN_PATH%
+
+ @rem Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+
+ @rem make sure security appender is turned off
+ if not defined HADOOP_SECURITY_LOGGER (
+ set HADOOP_SECURITY_LOGGER=INFO,NullAppender
+ )
+ set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER%
+
+ call %JAVA% %JAVA_HEAP_MAX% %HADOOP_OPTS% -classpath %CLASSPATH% %CLASS% %hadoop-command-arguments%
+
+ exit /b %ERRORLEVEL%
+
+:fs
+ set CLASS=org.apache.hadoop.fs.FsShell
+ goto :eof
+
+:version
+ set CLASS=org.apache.hadoop.util.VersionInfo
+ goto :eof
+
+:jar
+ if defined YARN_OPTS (
+ @echo WARNING: Use "yarn jar" to launch YARN applications. 1>&2
+ ) else if defined YARN_CLIENT_OPTS (
+ @echo WARNING: Use "yarn jar" to launch YARN applications. 1>&2
+ )
+ set CLASS=org.apache.hadoop.util.RunJar
+ goto :eof
+
+:checknative
+ set CLASS=org.apache.hadoop.util.NativeLibraryChecker
+ goto :eof
+
+:distcp
+ set CLASS=org.apache.hadoop.tools.DistCp
+ set CLASSPATH=%CLASSPATH%;%TOOL_PATH%
+ goto :eof
+
+:daemonlog
+ set CLASS=org.apache.hadoop.log.LogLevel
+ goto :eof
+
+:archive
+ set CLASS=org.apache.hadoop.tools.HadoopArchives
+ set CLASSPATH=%CLASSPATH%;%TOOL_PATH%
+ goto :eof
+
+:classpath
+ set CLASS=org.apache.hadoop.util.Classpath
+ goto :eof
+
+:credential
+ set CLASS=org.apache.hadoop.security.alias.CredentialShell
+ goto :eof
+
+:key
+ set CLASS=org.apache.hadoop.crypto.key.KeyShell
+ goto :eof
+
+:updatepath
+ set path_to_add=%*
+ set current_path_comparable=%path%
+ set current_path_comparable=%current_path_comparable: =_%
+ set current_path_comparable=%current_path_comparable:(=_%
+ set current_path_comparable=%current_path_comparable:)=_%
+ set path_to_add_comparable=%path_to_add%
+ set path_to_add_comparable=%path_to_add_comparable: =_%
+ set path_to_add_comparable=%path_to_add_comparable:(=_%
+ set path_to_add_comparable=%path_to_add_comparable:)=_%
+
+ for %%i in ( %current_path_comparable% ) do (
+ if /i "%%i" == "%path_to_add_comparable%" (
+ set path_to_add_exist=true
+ )
+ )
+ set system_path_comparable=
+ set path_to_add_comparable=
+ if not defined path_to_add_exist path=%path_to_add%;%path%
+ set path_to_add=
+ goto :eof
+
+@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
+:make_command_arguments
+ if "%1" == "--config" (
+ shift
+ shift
+ )
+ if "%1" == "--loglevel" (
+ shift
+ shift
+ )
+ if [%2] == [] goto :eof
+ shift
+ set _arguments=
+ :MakeCmdArgsLoop
+ if [%1]==[] goto :EndLoop
+
+ if not defined _arguments (
+ set _arguments=%1
+ ) else (
+ set _arguments=!_arguments! %1
+ )
+ shift
+ goto :MakeCmdArgsLoop
+ :EndLoop
+ set hadoop-command-arguments=%_arguments%
+ goto :eof
+
+:print_usage
+ @echo Usage: hadoop [--config confdir] [--loglevel loglevel] COMMAND
+ @echo where COMMAND is one of:
+ @echo fs run a generic filesystem user client
+ @echo version print the version
+ @echo jar ^<jar^> run a jar file
+ @echo note: please use "yarn jar" to launch
+ @echo YARN applications, not this command.
+ @echo checknative [-a^|-h] check native hadoop and compression libraries availability
+ @echo distcp ^<srcurl^> ^<desturl^> copy file or directories recursively
+ @echo archive -archiveName NAME -p ^<parent path^> ^<src^>* ^<dest^> create a hadoop archive
+ @echo classpath prints the class path needed to get the
+ @echo Hadoop jar and the required libraries
+ @echo credential interact with credential providers
+ @echo key manage keys via the KeyProvider
+ @echo daemonlog get/set the log level for each daemon
+ @echo or
+ @echo CLASSNAME run the class named CLASSNAME
+ @echo.
+ @echo Most commands print help when invoked w/o parameters.
+
+endlocal
diff --git a/MPE/hadoop-2.7.1/bin/hdfs b/MPE/hadoop-2.7.1/bin/hdfs
new file mode 100644
index 0000000..7f93738
--- /dev/null
+++ b/MPE/hadoop-2.7.1/bin/hdfs
@@ -0,0 +1,308 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Environment Variables
+#
+# JSVC_HOME home directory of jsvc binary. Required for starting secure
+# datanode.
+#
+# JSVC_OUTFILE path to jsvc output file. Defaults to
+# $HADOOP_LOG_DIR/jsvc.out.
+#
+# JSVC_ERRFILE path to jsvc error file. Defaults to $HADOOP_LOG_DIR/jsvc.err.
+
+bin=`which $0`
+bin=`dirname ${bin}`
+bin=`cd "$bin" > /dev/null; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+
+function print_usage(){
+ echo "Usage: hdfs [--config confdir] [--loglevel loglevel] COMMAND"
+ echo " where COMMAND is one of:"
+ echo " dfs run a filesystem command on the file systems supported in Hadoop."
+ echo " classpath prints the classpath"
+ echo " namenode -format format the DFS filesystem"
+ echo " secondarynamenode run the DFS secondary namenode"
+ echo " namenode run the DFS namenode"
+ echo " journalnode run the DFS journalnode"
+ echo " zkfc run the ZK Failover Controller daemon"
+ echo " datanode run a DFS datanode"
+ echo " dfsadmin run a DFS admin client"
+ echo " haadmin run a DFS HA admin client"
+ echo " fsck run a DFS filesystem checking utility"
+ echo " balancer run a cluster balancing utility"
+ echo " jmxget get JMX exported values from NameNode or DataNode."
+ echo " mover run a utility to move block replicas across"
+ echo " storage types"
+ echo " oiv apply the offline fsimage viewer to an fsimage"
+ echo " oiv_legacy apply the offline fsimage viewer to an legacy fsimage"
+ echo " oev apply the offline edits viewer to an edits file"
+ echo " fetchdt fetch a delegation token from the NameNode"
+ echo " getconf get config values from configuration"
+ echo " groups get the groups which users belong to"
+ echo " snapshotDiff diff two snapshots of a directory or diff the"
+ echo " current directory contents with a snapshot"
+ echo " lsSnapshottableDir list all snapshottable dirs owned by the current user"
+ echo " Use -help to see options"
+ echo " portmap run a portmap service"
+ echo " nfs3 run an NFS version 3 gateway"
+ echo " cacheadmin configure the HDFS cache"
+ echo " crypto configure HDFS encryption zones"
+ echo " storagepolicies list/get/set block storage policies"
+ echo " version print the version"
+ echo ""
+ echo "Most commands print help when invoked w/o parameters."
+ # There are also debug commands, but they don't show up in this listing.
+}
+
+if [ $# = 0 ]; then
+ print_usage
+ exit
+fi
+
+COMMAND=$1
+shift
+
+case $COMMAND in
+ # usage flags
+ --help|-help|-h)
+ print_usage
+ exit
+ ;;
+esac
+
+# Determine if we're starting a secure datanode, and if so, redefine appropriate variables
+if [ "$COMMAND" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
+ if [ -n "$JSVC_HOME" ]; then
+ if [ -n "$HADOOP_SECURE_DN_PID_DIR" ]; then
+ HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
+ fi
+
+ if [ -n "$HADOOP_SECURE_DN_LOG_DIR" ]; then
+ HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
+ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
+ fi
+
+ HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
+ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
+ starting_secure_dn="true"
+ else
+ echo "It looks like you're trying to start a secure DN, but \$JSVC_HOME"\
+ "isn't set. Falling back to starting insecure DN."
+ fi
+fi
+
+# Determine if we're starting a privileged NFS daemon, and if so, redefine appropriate variables
+if [ "$COMMAND" == "nfs3" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_PRIVILEGED_NFS_USER" ]; then
+ if [ -n "$JSVC_HOME" ]; then
+ if [ -n "$HADOOP_PRIVILEGED_NFS_PID_DIR" ]; then
+ HADOOP_PID_DIR=$HADOOP_PRIVILEGED_NFS_PID_DIR
+ fi
+
+ if [ -n "$HADOOP_PRIVILEGED_NFS_LOG_DIR" ]; then
+ HADOOP_LOG_DIR=$HADOOP_PRIVILEGED_NFS_LOG_DIR
+ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
+ fi
+
+ HADOOP_IDENT_STRING=$HADOOP_PRIVILEGED_NFS_USER
+ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
+ starting_privileged_nfs="true"
+ else
+ echo "It looks like you're trying to start a privileged NFS server, but"\
+ "\$JSVC_HOME isn't set. Falling back to starting unprivileged NFS server."
+ fi
+fi
+
+if [ "$COMMAND" = "namenode" ] ; then
+ CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode'
+# HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_OPTS"
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_JMX_OPTS $HADOOP_NAMENODE_OPTS"
+elif [ "$COMMAND" = "zkfc" ] ; then
+ CLASS='org.apache.hadoop.hdfs.tools.DFSZKFailoverController'
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_ZKFC_OPTS"
+elif [ "$COMMAND" = "secondarynamenode" ] ; then
+ CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_SECONDARYNAMENODE_OPTS"
+elif [ "$COMMAND" = "datanode" ] ; then
+ CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode'
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_DATANODE_JMX_OPTS"
+ if [ "$starting_secure_dn" = "true" ]; then
+ HADOOP_OPTS="$HADOOP_OPTS -jvm server $HADOOP_DATANODE_OPTS"
+ else
+ HADOOP_OPTS="$HADOOP_OPTS -server $HADOOP_DATANODE_OPTS"
+ fi
+elif [ "$COMMAND" = "journalnode" ] ; then
+ CLASS='org.apache.hadoop.hdfs.qjournal.server.JournalNode'
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_JOURNALNODE_OPTS"
+elif [ "$COMMAND" = "dfs" ] ; then
+ CLASS=org.apache.hadoop.fs.FsShell
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "dfsadmin" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "haadmin" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin
+ CLASSPATH=${CLASSPATH}:${TOOL_PATH}
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "fsck" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.DFSck
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "balancer" ] ; then
+ CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_BALANCER_OPTS"
+elif [ "$COMMAND" = "mover" ] ; then
+ CLASS=org.apache.hadoop.hdfs.server.mover.Mover
+ HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_MOVER_OPTS}"
+elif [ "$COMMAND" = "storagepolicies" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin
+elif [ "$COMMAND" = "jmxget" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.JMXGet
+elif [ "$COMMAND" = "oiv" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB
+elif [ "$COMMAND" = "oiv_legacy" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
+elif [ "$COMMAND" = "oev" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
+elif [ "$COMMAND" = "fetchdt" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
+elif [ "$COMMAND" = "getconf" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.GetConf
+elif [ "$COMMAND" = "groups" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.GetGroups
+elif [ "$COMMAND" = "snapshotDiff" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff
+elif [ "$COMMAND" = "lsSnapshottableDir" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
+elif [ "$COMMAND" = "portmap" ] ; then
+ CLASS=org.apache.hadoop.portmap.Portmap
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_PORTMAP_OPTS"
+elif [ "$COMMAND" = "nfs3" ] ; then
+ CLASS=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NFS3_OPTS"
+elif [ "$COMMAND" = "cacheadmin" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin
+elif [ "$COMMAND" = "crypto" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin
+elif [ "$COMMAND" = "version" ] ; then
+ CLASS=org.apache.hadoop.util.VersionInfo
+elif [ "$COMMAND" = "debug" ]; then
+ CLASS=org.apache.hadoop.hdfs.tools.DebugAdmin
+elif [ "$COMMAND" = "classpath" ]; then
+ if [ "$#" -gt 0 ]; then
+ CLASS=org.apache.hadoop.util.Classpath
+ else
+ # No need to bother starting up a JVM for this simple case.
+ if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ fi
+ echo $CLASSPATH
+ exit 0
+ fi
+else
+ CLASS="$COMMAND"
+fi
+
+# cygwin path translation
+if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ HADOOP_LOG_DIR=$(cygpath -w "$HADOOP_LOG_DIR" 2>/dev/null)
+ HADOOP_PREFIX=$(cygpath -w "$HADOOP_PREFIX" 2>/dev/null)
+ HADOOP_CONF_DIR=$(cygpath -w "$HADOOP_CONF_DIR" 2>/dev/null)
+ HADOOP_COMMON_HOME=$(cygpath -w "$HADOOP_COMMON_HOME" 2>/dev/null)
+ HADOOP_HDFS_HOME=$(cygpath -w "$HADOOP_HDFS_HOME" 2>/dev/null)
+ HADOOP_YARN_HOME=$(cygpath -w "$HADOOP_YARN_HOME" 2>/dev/null)
+ HADOOP_MAPRED_HOME=$(cygpath -w "$HADOOP_MAPRED_HOME" 2>/dev/null)
+fi
+
+export CLASSPATH=$CLASSPATH
+
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
+
+# Check to see if we should start a secure datanode
+if [ "$starting_secure_dn" = "true" ]; then
+ if [ "$HADOOP_PID_DIR" = "" ]; then
+ HADOOP_SECURE_DN_PID="/tmp/hadoop_secure_dn.pid"
+ else
+ HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
+ fi
+
+ JSVC=$JSVC_HOME/jsvc
+ if [ ! -f $JSVC ]; then
+ echo "JSVC_HOME is not set correctly so jsvc cannot be found. jsvc is required to run secure datanodes. "
+ echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\
+ "and set JSVC_HOME to the directory containing the jsvc binary."
+ exit
+ fi
+
+ if [[ ! $JSVC_OUTFILE ]]; then
+ JSVC_OUTFILE="$HADOOP_LOG_DIR/jsvc.out"
+ fi
+
+ if [[ ! $JSVC_ERRFILE ]]; then
+ JSVC_ERRFILE="$HADOOP_LOG_DIR/jsvc.err"
+ fi
+
+ exec "$JSVC" \
+ -Dproc_$COMMAND -outfile "$JSVC_OUTFILE" \
+ -errfile "$JSVC_ERRFILE" \
+ -pidfile "$HADOOP_SECURE_DN_PID" \
+ -nodetach \
+ -user "$HADOOP_SECURE_DN_USER" \
+ -cp "$CLASSPATH" \
+ $JAVA_HEAP_MAX $HADOOP_OPTS \
+ org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter "$@"
+elif [ "$starting_privileged_nfs" = "true" ] ; then
+ if [ "$HADOOP_PID_DIR" = "" ]; then
+ HADOOP_PRIVILEGED_NFS_PID="/tmp/hadoop_privileged_nfs3.pid"
+ else
+ HADOOP_PRIVILEGED_NFS_PID="$HADOOP_PID_DIR/hadoop_privileged_nfs3.pid"
+ fi
+
+ JSVC=$JSVC_HOME/jsvc
+ if [ ! -f $JSVC ]; then
+ echo "JSVC_HOME is not set correctly so jsvc cannot be found. jsvc is required to run privileged NFS gateways. "
+ echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\
+ "and set JSVC_HOME to the directory containing the jsvc binary."
+ exit
+ fi
+
+ if [[ ! $JSVC_OUTFILE ]]; then
+ JSVC_OUTFILE="$HADOOP_LOG_DIR/nfs3_jsvc.out"
+ fi
+
+ if [[ ! $JSVC_ERRFILE ]]; then
+ JSVC_ERRFILE="$HADOOP_LOG_DIR/nfs3_jsvc.err"
+ fi
+
+ exec "$JSVC" \
+ -Dproc_$COMMAND -outfile "$JSVC_OUTFILE" \
+ -errfile "$JSVC_ERRFILE" \
+ -pidfile "$HADOOP_PRIVILEGED_NFS_PID" \
+ -nodetach \
+ -user "$HADOOP_PRIVILEGED_NFS_USER" \
+ -cp "$CLASSPATH" \
+ $JAVA_HEAP_MAX $HADOOP_OPTS \
+ org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter "$@"
+else
+ # run it
+ exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@"
+fi
+
diff --git a/MPE/hadoop-2.7.1/bin/hdfs.cmd b/MPE/hadoop-2.7.1/bin/hdfs.cmd
new file mode 100644
index 0000000..d52f52e
--- /dev/null
+++ b/MPE/hadoop-2.7.1/bin/hdfs.cmd
@@ -0,0 +1,234 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+setlocal enabledelayedexpansion
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\hdfs-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+if "%1" == "--loglevel" (
+ shift
+ shift
+)
+
+:main
+ if exist %HADOOP_CONF_DIR%\hadoop-env.cmd (
+ call %HADOOP_CONF_DIR%\hadoop-env.cmd
+ )
+
+ set hdfs-command=%1
+ call :make_command_arguments %*
+
+ if not defined hdfs-command (
+ goto print_usage
+ )
+
+ if %hdfs-command% == classpath (
+ if not defined hdfs-command-arguments (
+ @rem No need to bother starting up a JVM for this simple case.
+ @echo %CLASSPATH%
+ exit /b
+ )
+ )
+ set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath
+ for %%i in ( %hdfscommands% ) do (
+ if %hdfs-command% == %%i set hdfscommand=true
+ )
+ if defined hdfscommand (
+ call :%hdfs-command%
+ ) else (
+ set CLASSPATH=%CLASSPATH%;%CD%
+ set CLASS=%hdfs-command%
+ )
+
+ set java_arguments=%JAVA_HEAP_MAX% %HADOOP_OPTS% -classpath %CLASSPATH% %CLASS% %hdfs-command-arguments%
+ call %JAVA% %java_arguments%
+
+goto :eof
+
+:namenode
+ set CLASS=org.apache.hadoop.hdfs.server.namenode.NameNode
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_NAMENODE_OPTS%
+ goto :eof
+
+:journalnode
+ set CLASS=org.apache.hadoop.hdfs.qjournal.server.JournalNode
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_JOURNALNODE_OPTS%
+ goto :eof
+
+:zkfc
+ set CLASS=org.apache.hadoop.hdfs.tools.DFSZKFailoverController
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ZKFC_OPTS%
+ goto :eof
+
+:secondarynamenode
+ set CLASS=org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_SECONDARYNAMENODE_OPTS%
+ goto :eof
+
+:datanode
+ set CLASS=org.apache.hadoop.hdfs.server.datanode.DataNode
+ set HADOOP_OPTS=%HADOOP_OPTS% -server %HADOOP_DATANODE_OPTS%
+ goto :eof
+
+:dfs
+ set CLASS=org.apache.hadoop.fs.FsShell
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:dfsadmin
+ set CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:haadmin
+ set CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin
+ set CLASSPATH=%CLASSPATH%;%TOOL_PATH%
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:fsck
+ set CLASS=org.apache.hadoop.hdfs.tools.DFSck
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:balancer
+ set CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_BALANCER_OPTS%
+ goto :eof
+
+:jmxget
+ set CLASS=org.apache.hadoop.hdfs.tools.JMXGet
+ goto :eof
+
+:classpath
+ set CLASS=org.apache.hadoop.util.Classpath
+ goto :eof
+
+:oiv
+ set CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB
+ goto :eof
+
+:oev
+ set CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
+ goto :eof
+
+:fetchdt
+ set CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
+ goto :eof
+
+:getconf
+ set CLASS=org.apache.hadoop.hdfs.tools.GetConf
+ goto :eof
+
+:groups
+ set CLASS=org.apache.hadoop.hdfs.tools.GetGroups
+ goto :eof
+
+:snapshotDiff
+ set CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff
+ goto :eof
+
+:lsSnapshottableDir
+ set CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
+ goto :eof
+
+:cacheadmin
+ set CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin
+ goto :eof
+
+:mover
+ set CLASS=org.apache.hadoop.hdfs.server.mover.Mover
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_MOVER_OPTS%
+ goto :eof
+
+:storagepolicies
+ set CLASS=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin
+ goto :eof
+
+@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
+:make_command_arguments
+ if "%1" == "--config" (
+ shift
+ shift
+ )
+ if "%1" == "--loglevel" (
+ shift
+ shift
+ )
+ if [%2] == [] goto :eof
+ shift
+ set _hdfsarguments=
+ :MakeCmdArgsLoop
+ if [%1]==[] goto :EndLoop
+
+ if not defined _hdfsarguments (
+ set _hdfsarguments=%1
+ ) else (
+ set _hdfsarguments=!_hdfsarguments! %1
+ )
+ shift
+ goto :MakeCmdArgsLoop
+ :EndLoop
+ set hdfs-command-arguments=%_hdfsarguments%
+ goto :eof
+
+:print_usage
+ @echo Usage: hdfs [--config confdir] [--loglevel loglevel] COMMAND
+ @echo where COMMAND is one of:
+ @echo dfs run a filesystem command on the file systems supported in Hadoop.
+ @echo namenode -format format the DFS filesystem
+ @echo secondarynamenode run the DFS secondary namenode
+ @echo namenode run the DFS namenode
+ @echo journalnode run the DFS journalnode
+ @echo zkfc run the ZK Failover Controller daemon
+ @echo datanode run a DFS datanode
+ @echo dfsadmin run a DFS admin client
+ @echo haadmin run a DFS HA admin client
+ @echo fsck run a DFS filesystem checking utility
+ @echo balancer run a cluster balancing utility
+ @echo jmxget get JMX exported values from NameNode or DataNode.
+ @echo oiv apply the offline fsimage viewer to an fsimage
+ @echo oev apply the offline edits viewer to an edits file
+ @echo fetchdt fetch a delegation token from the NameNode
+ @echo getconf get config values from configuration
+ @echo groups get the groups which users belong to
+ @echo snapshotDiff diff two snapshots of a directory or diff the
+ @echo current directory contents with a snapshot
+ @echo lsSnapshottableDir list all snapshottable dirs owned by the current user
+ @echo Use -help to see options
+ @echo cacheadmin configure the HDFS cache
+ @echo mover run a utility to move block replicas across storage types
+ @echo storagepolicies list/get/set block storage policies
+ @echo.
+ @echo Most commands print help when invoked w/o parameters.
+
+endlocal
diff --git a/MPE/hadoop-2.7.1/bin/ini_hdfs.sh b/MPE/hadoop-2.7.1/bin/ini_hdfs.sh
new file mode 100644
index 0000000..e6e0f9d
--- /dev/null
+++ b/MPE/hadoop-2.7.1/bin/ini_hdfs.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+
+MASTER_IP=192.168.20.223
+SLAVE1_IP=192.168.20.224
+
+BASE_DIR=/data/tsg/olap
+VERSION=hadoop-2.7.1
+
+function ini_namenode() {
+
+cd $BASE_DIR/$VERSION/bin
+yes | ./hadoop namenode -format
+
+if [ $? -eq "0" ];then
+# scp -r $BASE_DIR/hadoop/ root@$SLAVE1_IP:$BASE_DIR/
+ echo yes
+else
+ echo no
+fi
+}
+
+function ini_zk() {
+
+cd $BASE_DIR/$VERSION/bin
+yes | ./hdfs zkfc -formatZK
+
+if [ $? -eq "0" ];then
+ echo yes
+else
+ echo no
+fi
+}
+
+case $1 in
+[namenode]*)
+ini_namenode
+;;
+[zkfc]*)
+ini_zk
+;;
+* )
+echo "请输入已有的指令."
+;;
+esac
+
diff --git a/MPE/hadoop-2.7.1/bin/mapred b/MPE/hadoop-2.7.1/bin/mapred
new file mode 100644
index 0000000..fe16e07
--- /dev/null
+++ b/MPE/hadoop-2.7.1/bin/mapred
@@ -0,0 +1,172 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`which $0`
+bin=`dirname ${bin}`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+if [ -e ${HADOOP_LIBEXEC_DIR}/mapred-config.sh ]; then
+ . ${HADOOP_LIBEXEC_DIR}/mapred-config.sh
+else
+ . "$bin/mapred-config.sh"
+fi
+
+function print_usage(){
+ echo "Usage: mapred [--config confdir] [--loglevel loglevel] COMMAND"
+ echo " where COMMAND is one of:"
+ echo " pipes run a Pipes job"
+ echo " job manipulate MapReduce jobs"
+ echo " queue get information regarding JobQueues"
+ echo " classpath prints the class path needed for running"
+ echo " mapreduce subcommands"
+ echo " historyserver run job history servers as a standalone daemon"
+ echo " distcp <srcurl> <desturl> copy file or directories recursively"
+ echo " archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive"
+ echo " hsadmin job history server admin interface"
+ echo ""
+ echo "Most commands print help when invoked w/o parameters."
+}
+
+if [ $# = 0 ]; then
+ print_usage
+ exit
+fi
+
+COMMAND=$1
+shift
+
+case $COMMAND in
+ # usage flags
+ --help|-help|-h)
+ print_usage
+ exit
+ ;;
+esac
+
+if [ "$COMMAND" = "job" ] ; then
+ CLASS=org.apache.hadoop.mapred.JobClient
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "queue" ] ; then
+ CLASS=org.apache.hadoop.mapred.JobQueueClient
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "pipes" ] ; then
+ CLASS=org.apache.hadoop.mapred.pipes.Submitter
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "sampler" ] ; then
+ CLASS=org.apache.hadoop.mapred.lib.InputSampler
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "classpath" ] ; then
+ echo -n
+elif [ "$COMMAND" = "historyserver" ] ; then
+ CLASS=org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer
+ HADOOP_OPTS="$HADOOP_OPTS -Dmapred.jobsummary.logger=${HADOOP_JHS_LOGGER:-INFO,console} $HADOOP_JOB_HISTORYSERVER_OPTS"
+ if [ "$HADOOP_JOB_HISTORYSERVER_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$HADOOP_JOB_HISTORYSERVER_HEAPSIZE""m"
+ fi
+elif [ "$COMMAND" = "mradmin" ] \
+ || [ "$COMMAND" = "jobtracker" ] \
+ || [ "$COMMAND" = "tasktracker" ] \
+ || [ "$COMMAND" = "groups" ] ; then
+ echo "Sorry, the $COMMAND command is no longer supported."
+ echo "You may find similar functionality with the \"yarn\" shell command."
+ print_usage
+ exit 1
+elif [ "$COMMAND" = "distcp" ] ; then
+ CLASS=org.apache.hadoop.tools.DistCp
+ CLASSPATH=${CLASSPATH}:${TOOL_PATH}
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "archive" ] ; then
+ CLASS=org.apache.hadoop.tools.HadoopArchives
+ CLASSPATH=${CLASSPATH}:${TOOL_PATH}
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "hsadmin" ] ; then
+ CLASS=org.apache.hadoop.mapreduce.v2.hs.client.HSAdmin
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+else
+ echo $COMMAND - invalid command
+ print_usage
+ exit 1
+fi
+
+# for developers, add mapred classes to CLASSPATH
+if [ -d "$HADOOP_MAPRED_HOME/build/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build/classes
+fi
+if [ -d "$HADOOP_MAPRED_HOME/build/webapps" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build
+fi
+if [ -d "$HADOOP_MAPRED_HOME/build/test/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build/test/classes
+fi
+if [ -d "$HADOOP_MAPRED_HOME/build/tools" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build/tools
+fi
+
+# for releases, add core mapred jar & webapps to CLASSPATH
+if [ -d "$HADOOP_PREFIX/${MAPRED_DIR}/webapps" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/${MAPRED_DIR}
+fi
+for f in $HADOOP_MAPRED_HOME/${MAPRED_DIR}/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+# Need YARN jars also
+for f in $HADOOP_YARN_HOME/${YARN_DIR}/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+# add libs to CLASSPATH
+for f in $HADOOP_MAPRED_HOME/${MAPRED_LIB_JARS_DIR}/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+# add modules to CLASSPATH
+for f in $HADOOP_MAPRED_HOME/modules/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+if [ "$COMMAND" = "classpath" ] ; then
+ if [ "$#" -gt 0 ]; then
+ CLASS=org.apache.hadoop.util.Classpath
+ else
+ if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ fi
+ echo $CLASSPATH
+ exit 0
+ fi
+fi
+
+# cygwin path translation
+if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ HADOOP_LOG_DIR=$(cygpath -w "$HADOOP_LOG_DIR" 2>/dev/null)
+ HADOOP_PREFIX=$(cygpath -w "$HADOOP_PREFIX" 2>/dev/null)
+ HADOOP_CONF_DIR=$(cygpath -w "$HADOOP_CONF_DIR" 2>/dev/null)
+ HADOOP_COMMON_HOME=$(cygpath -w "$HADOOP_COMMON_HOME" 2>/dev/null)
+ HADOOP_HDFS_HOME=$(cygpath -w "$HADOOP_HDFS_HOME" 2>/dev/null)
+ HADOOP_YARN_HOME=$(cygpath -w "$HADOOP_YARN_HOME" 2>/dev/null)
+ HADOOP_MAPRED_HOME=$(cygpath -w "$HADOOP_MAPRED_HOME" 2>/dev/null)
+fi
+
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
+
+export CLASSPATH
+exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@"
diff --git a/MPE/hadoop-2.7.1/bin/mapred.cmd b/MPE/hadoop-2.7.1/bin/mapred.cmd
new file mode 100644
index 0000000..550b1ed
--- /dev/null
+++ b/MPE/hadoop-2.7.1/bin/mapred.cmd
@@ -0,0 +1,216 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem The Hadoop mapred command script
+
+setlocal enabledelayedexpansion
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~`%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %DEFAULT_LIBEXEC_DIR%\mapred-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+if "%1" == "--loglevel" (
+ shift
+ shift
+)
+
+:main
+ if exist %MAPRED_CONF_DIR%\mapred-env.cmd (
+ call %MAPRED_CONF_DIR%\mapred-env.cmd
+ )
+ set mapred-command=%1
+ call :make_command_arguments %*
+
+ if not defined mapred-command (
+ goto print_usage
+ )
+
+ @rem JAVA and JAVA_HEAP_MAX are set in hadoop-confg.cmd
+
+ if defined MAPRED_HEAPSIZE (
+ @rem echo run with Java heapsize %MAPRED_HEAPSIZE%
+ set JAVA_HEAP_SIZE=-Xmx%MAPRED_HEAPSIZE%m
+ )
+
+ @rem CLASSPATH initially contains HADOOP_CONF_DIR and MAPRED_CONF_DIR
+ if not defined HADOOP_CONF_DIR (
+ echo NO HADOOP_CONF_DIR set.
+ echo Please specify it either in mapred-env.cmd or in the environment.
+ goto :eof
+ )
+
+ set CLASSPATH=%HADOOP_CONF_DIR%;%MAPRED_CONF_DIR%;%CLASSPATH%
+
+ @rem for developers, add Hadoop classes to CLASSPATH
+ if exist %HADOOP_MAPRED_HOME%\build\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\build\classes
+ )
+
+ if exist %HADOOP_MAPRED_HOME%\build\webapps (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\build
+ )
+
+ if exist %HADOOP_MAPRED_HOME%\build\test\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\build\test\classes
+ )
+
+ if exist %HADOOP_MAPRED_HOME%\build\tools (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\build\tools
+ )
+
+ @rem Need YARN jars also
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_DIR%\*
+
+ @rem add libs to CLASSPATH
+ set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\%MAPRED_LIB_JARS_DIR%\*
+
+ @rem add modules to CLASSPATH
+ set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\modules\*
+
+ if %mapred-command% == classpath (
+ if not defined mapred-command-arguments (
+ @rem No need to bother starting up a JVM for this simple case.
+ @echo %CLASSPATH%
+ exit /b
+ )
+ )
+
+ call :%mapred-command% %mapred-command-arguments%
+ set java_arguments=%JAVA_HEAP_MAX% %HADOOP_OPTS% -classpath %CLASSPATH% %CLASS% %mapred-command-arguments%
+ call %JAVA% %java_arguments%
+
+goto :eof
+
+
+:classpath
+ set CLASS=org.apache.hadoop.util.Classpath
+ goto :eof
+
+:job
+ set CLASS=org.apache.hadoop.mapred.JobClient
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:queue
+ set CLASS=org.apache.hadoop.mapred.JobQueueClient
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:sampler
+ set CLASS=org.apache.hadoop.mapred.lib.InputSampler
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:historyserver
+ set CLASS=org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer
+ set HADOOP_OPTS=%HADOOP_OPTS% -Dmapred.jobsummary.logger=%HADOOP_JHS_LOGGER% %HADOOP_JOB_HISTORYSERVER_OPTS%
+ if defined HADOOP_JOB_HISTORYSERVER_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%HADOOP_JOB_HISTORYSERVER_HEAPSIZE%m
+ )
+ goto :eof
+
+:distcp
+ set CLASS=org.apache.hadoop.tools.DistCp
+ set CLASSPATH=%CLASSPATH%;%TOO_PATH%
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:archive
+ set CLASS=org.apache.hadop.tools.HadoopArchives
+ set CLASSPATH=%CLASSPATH%;%TOO_PATH%
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+
+:hsadmin
+ set CLASS=org.apache.hadoop.mapreduce.v2.hs.client.HSAdmin
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+
+:pipes
+ goto not_supported
+
+:mradmin
+ goto not_supported
+
+:jobtracker
+ goto not_supported
+
+:tasktracker
+ goto not_supported
+
+:groups
+ goto not_supported
+
+
+@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
+:make_command_arguments
+ if [%2] == [] goto :eof
+ if "%1" == "--config" (
+ shift
+ shift
+ )
+ if "%1" == "--loglevel" (
+ shift
+ shift
+ )
+ shift
+ set _mapredarguments=
+ :MakeCmdArgsLoop
+ if [%1]==[] goto :EndLoop
+
+ if not defined _mapredarguments (
+ set _mapredarguments=%1
+ ) else (
+ set _mapredarguments=!_mapredarguments! %1
+ )
+ shift
+ goto :MakeCmdArgsLoop
+ :EndLoop
+ set mapred-command-arguments=%_mapredarguments%
+ goto :eof
+
+:not_supported
+ @echo Sorry, the %COMMAND% command is no longer supported.
+ @echo You may find similar functionality with the "yarn" shell command.
+ goto print_usage
+
+:print_usage
+ @echo Usage: mapred [--config confdir] [--loglevel loglevel] COMMAND
+ @echo where COMMAND is one of:
+ @echo job manipulate MapReduce jobs
+ @echo queue get information regarding JobQueues
+ @echo classpath prints the class path needed for running
+ @echo mapreduce subcommands
+ @echo historyserver run job history servers as a standalone daemon
+ @echo distcp ^<srcurl^> ^<desturl^> copy file or directories recursively
+ @echo archive -archiveName NAME -p ^<parent path^> ^<src^>* ^<dest^> create a hadoop archive
+ @echo hsadmin job history server admin interface
+ @echo
+ @echo Most commands print help when invoked w/o parameters.
+
+endlocal
diff --git a/MPE/hadoop-2.7.1/bin/rcc b/MPE/hadoop-2.7.1/bin/rcc
new file mode 100644
index 0000000..22bffff
--- /dev/null
+++ b/MPE/hadoop-2.7.1/bin/rcc
@@ -0,0 +1,61 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# The Hadoop record compiler
+#
+# Environment Variables
+#
+# JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+#
+# HADOOP_OPTS Extra Java runtime options.
+#
+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_PREFIX}/conf.
+#
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+
+if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
+ . "${HADOOP_CONF_DIR}/hadoop-env.sh"
+fi
+
+# some Java parameters
+if [ "$JAVA_HOME" != "" ]; then
+ #echo "run java in $JAVA_HOME"
+ JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+ echo "Error: JAVA_HOME is not set."
+ exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# restore ordinary behaviour
+unset IFS
+
+CLASS='org.apache.hadoop.record.compiler.generated.Rcc'
+
+# run it
+exec "$JAVA" $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@"
diff --git a/MPE/hadoop-2.7.1/bin/set_hdfs_env.sh b/MPE/hadoop-2.7.1/bin/set_hdfs_env.sh
new file mode 100644
index 0000000..8567c55
--- /dev/null
+++ b/MPE/hadoop-2.7.1/bin/set_hdfs_env.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+
+source /etc/profile
+
+function setChkconfig(){
+echo -e "\n#hadoop\nexport HADOOP_HOME=/data/tsg/olap/hadoop-2.7.1\nexport PATH=\$HADOOP_HOME/sbin:\$PATH\nexport PATH=\$HADOOP_HOME/bin:\$PATH\nexport HADOOP_CLASSPATH=\`hadoop classpath\`" >> /etc/profile.d/hadoop.sh
+chmod +x /etc/profile.d/hadoop.sh
+
+if [ -x '/etc/init.d/keephdfsmaster' ];then
+ chkconfig --add keephdfsmaster
+ chkconfig keephdfsmaster on
+fi
+
+if [ -x '/etc/init.d/keephdfsslave' ];then
+ chkconfig --add keephdfsslave
+ chkconfig keephdfsslave on
+fi
+
+if [ -x '/etc/init.d/keephdfsworker' ];then
+ chkconfig --add keephdfsworker
+ chkconfig keephdfsworker on
+fi
+
+if [ -x '/etc/init.d/keephdfsjournal' ];then
+ chkconfig --add keephdfsjournal
+ chkconfig keephdfsjournal on
+fi
+}
+
+case $1 in
+journal)
+if [ -x '/etc/init.d/keephdfsjournal' ];then
+ service keephdfsjournal start && sleep 5
+ journal_dae=`ps -ef | grep dae-hdfsjournal.sh | grep -v grep | wc -l`
+ if [ $journal_dae -lt 1 ];then
+ nohup /data/tsg/olap/hadoop-2.7.1/sbin/dae-hdfsjournal.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+master)
+if [ -x '/etc/init.d/keephdfsmaster' ];then
+ service keephdfsmaster start && sleep 5
+ master_dae=`ps -ef | grep dae-hdfsmaster.sh | grep -v grep | wc -l`
+ if [ $master_dae -lt 1 ];then
+ nohup /data/tsg/olap/hadoop-2.7.1/sbin/dae-hdfsmaster.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+slave)
+if [ -x '/etc/init.d/keephdfsslave' ];then
+ service keephdfsslave start && sleep 5
+ slave_dae=`ps -ef | grep dae-hdfsslave.sh | grep -v grep | wc -l`
+ if [ $slave_dae -lt 1 ];then
+ nohup /data/tsg/olap/hadoop-2.7.1/sbin/dae-hdfsslave.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+worker)
+if [ -x '/etc/init.d/keephdfsworker' ];then
+ service keephdfsworker start && sleep 5
+ worker_dae=`ps -ef | grep dae-hdfsworker.sh | grep -v grep | wc -l`
+ if [ $worker_dae -lt 1 ];then
+ nohup /data/tsg/olap/hadoop-2.7.1/sbin/dae-hdfsworker.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+chkconfig)
+ setChkconfig;;
+* )
+;;
+esac
diff --git a/MPE/hadoop-2.7.1/bin/set_yarn_env.sh b/MPE/hadoop-2.7.1/bin/set_yarn_env.sh
new file mode 100644
index 0000000..84ada4e
--- /dev/null
+++ b/MPE/hadoop-2.7.1/bin/set_yarn_env.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+source /etc/profile
+
+function setChkconfig(){
+echo -e "\n#hadoop\nexport HADOOP_HOME=/data/tsg/olap/hadoop-2.7.1\nexport PATH=\$HADOOP_HOME/sbin:\$PATH\nexport PATH=\$HADOOP_HOME/bin:\$PATH\nexport HADOOP_CLASSPATH=\`hadoop classpath\`" >> /etc/profile.d/hadoop.sh
+chmod +x /etc/profile.d/hadoop.sh
+
+if [ -x '/etc/init.d/keepyarnhistory' ];then
+ chkconfig --add keepyarnhistory
+ chkconfig keepyarnhistory on
+fi
+
+if [ -x '/etc/init.d/keepyarnmaster' ];then
+ chkconfig --add keepyarnmaster
+ chkconfig keepyarnmaster on
+fi
+
+if [ -x '/etc/init.d/keepyarnworker' ];then
+ chkconfig --add keepyarnworker
+ chkconfig keepyarnworker on
+fi
+}
+
+case $1 in
+history)
+if [ -x '/etc/init.d/keepyarnhistory' ];then
+ service keepyarnhistory start && sleep 5
+ history_dae=`ps -ef | grep "dae-yarnhistory.sh" | grep -v grep | wc -l`
+ if [ $history_dae -lt 1 ];then
+ nohup /data/tsg/olap/hadoop-2.7.1/sbin/dae-yarnhistory.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+master)
+if [ -x '/etc/init.d/keepyarnmaster' ];then
+ service keepyarnmaster start && sleep 5
+ master_dae=`ps -ef | grep "dae-yarnmaster.sh" | grep -v grep | wc -l`
+ if [ $master_dae -lt 1 ];then
+ nohup /data/tsg/olap/hadoop-2.7.1/sbin/dae-yarnmaster.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+worker)
+if [ -x '/etc/init.d/keepyarnworker' ];then
+ service keepyarnworker start && sleep 5
+ worker_dae=`ps -ef | grep dae-yarnworker.sh | grep -v grep | wc -l`
+ if [ $worker_dae -lt 1 ];then
+ nohup /data/tsg/olap/hadoop-2.7.1/sbin/dae-yarnworker.sh > /dev/null 2>&1 &
+ fi
+fi
+;;
+chkconfig)
+ setChkconfig;;
+* )
+;;
+esac
+
diff --git a/MPE/hadoop-2.7.1/bin/test-container-executor b/MPE/hadoop-2.7.1/bin/test-container-executor
new file mode 100644
index 0000000..df8c3db
--- /dev/null
+++ b/MPE/hadoop-2.7.1/bin/test-container-executor
Binary files differ
diff --git a/MPE/hadoop-2.7.1/bin/yarn b/MPE/hadoop-2.7.1/bin/yarn
new file mode 100644
index 0000000..0e4c5a2
--- /dev/null
+++ b/MPE/hadoop-2.7.1/bin/yarn
@@ -0,0 +1,330 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# The Hadoop command script
+#
+# Environment Variables
+#
+# JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+#
+# YARN_USER_CLASSPATH Additional user CLASSPATH entries.
+#
+# YARN_USER_CLASSPATH_FIRST If set to non empty value then the user classpath
+# specified in YARN_USER_CLASSPATH will be
+# appended at the beginning of YARN's final
+# classpath instead of at the end.
+#
+# YARN_HEAPSIZE The maximum amount of heap to use, in MB.
+# Default is 1000.
+#
+# YARN_{COMMAND}_HEAPSIZE overrides YARN_HEAPSIZE for a given command
+# eg YARN_NODEMANAGER_HEAPSIZE sets the heap
+# size for the NodeManager. If you set the
+# heap size in YARN_{COMMAND}_OPTS or YARN_OPTS
+# they take precedence.
+#
+# YARN_OPTS Extra Java runtime options.
+#
+# YARN_CLIENT_OPTS when the respective command is run.
+# YARN_{COMMAND}_OPTS etc YARN_NODEMANAGER_OPTS applies to NodeManager
+# for e.g. YARN_CLIENT_OPTS applies to
+# more than one command (fs, dfs, fsck,
+# dfsadmin etc)
+#
+# YARN_CONF_DIR Alternate conf dir. Default is ${HADOOP_YARN_HOME}/conf.
+#
+# YARN_ROOT_LOGGER The root appender. Default is INFO,console
+#
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/yarn-config.sh
+
+function print_usage(){
+ echo "Usage: yarn [--config confdir] [COMMAND | CLASSNAME]"
+ echo " CLASSNAME run the class named CLASSNAME"
+ echo " or"
+ echo " where COMMAND is one of:"
+ echo " resourcemanager -format-state-store deletes the RMStateStore"
+ echo " resourcemanager run the ResourceManager"
+ echo " nodemanager run a nodemanager on each slave"
+ echo " timelineserver run the timeline server"
+ echo " rmadmin admin tools"
+ echo " sharedcachemanager run the SharedCacheManager daemon"
+ echo " scmadmin SharedCacheManager admin tools"
+ echo " version print the version"
+ echo " jar <jar> run a jar file"
+ echo " application prints application(s)"
+ echo " report/kill application"
+ echo " applicationattempt prints applicationattempt(s)"
+ echo " report"
+ echo " container prints container(s) report"
+ echo " node prints node report(s)"
+ echo " queue prints queue information"
+ echo " logs dump container logs"
+ echo " classpath prints the class path needed to"
+ echo " get the Hadoop jar and the"
+ echo " required libraries"
+ echo " cluster prints cluster information"
+ echo " daemonlog get/set the log level for each"
+ echo " daemon"
+ echo ""
+ echo "Most commands print help when invoked w/o parameters."
+}
+
+# if no args specified, show usage
+if [ $# = 0 ]; then
+ print_usage
+ exit 1
+fi
+
+# get arguments
+COMMAND=$1
+shift
+
+case $COMMAND in
+ # usage flags
+ --help|-help|-h)
+ print_usage
+ exit
+ ;;
+esac
+
+if [ -f "${YARN_CONF_DIR}/yarn-env.sh" ]; then
+ . "${YARN_CONF_DIR}/yarn-env.sh"
+fi
+
+# some Java parameters
+if [ "$JAVA_HOME" != "" ]; then
+ #echo "run java in $JAVA_HOME"
+ JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+ echo "Error: JAVA_HOME is not set."
+ exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# check envvars which might override default args
+if [ "$YARN_HEAPSIZE" != "" ]; then
+ #echo "run with heapsize $YARN_HEAPSIZE"
+ JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+ #echo $JAVA_HEAP_MAX
+fi
+
+# CLASSPATH initially contains $HADOOP_CONF_DIR & $YARN_CONF_DIR
+if [ ! -d "$HADOOP_CONF_DIR" ]; then
+ echo No HADOOP_CONF_DIR set.
+ echo Please specify it either in yarn-env.sh or in the environment.
+ exit 1
+fi
+
+CLASSPATH="${HADOOP_CONF_DIR}:${YARN_CONF_DIR}:${CLASSPATH}"
+
+# for developers, add Hadoop classes to CLASSPATH
+if [ -d "$HADOOP_YARN_HOME/yarn-api/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-api/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/yarn-common/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-common/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/yarn-mapreduce/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-mapreduce/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/yarn-master-worker/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-master-worker/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/yarn-server/yarn-server-nodemanager/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-server/yarn-server-nodemanager/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/yarn-server/yarn-server-common/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-server/yarn-server-common/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/yarn-server/yarn-server-resourcemanager/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-server/yarn-server-resourcemanager/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/yarn-server/yarn-server-applicationhistoryservice/target/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-server/yarn-server-applicationhistoryservice/target/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/build/test/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/target/test/classes
+fi
+if [ -d "$HADOOP_YARN_HOME/build/tools" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/build/tools
+fi
+
+CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/${YARN_DIR}/*
+CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/${YARN_LIB_JARS_DIR}/*
+
+# Add user defined YARN_USER_CLASSPATH to the class path (if defined)
+if [ -n "$YARN_USER_CLASSPATH" ]; then
+ if [ -n "$YARN_USER_CLASSPATH_FIRST" ]; then
+ # User requested to add the custom entries at the beginning
+ CLASSPATH=${YARN_USER_CLASSPATH}:${CLASSPATH}
+ else
+ # By default we will just append the extra entries at the end
+ CLASSPATH=${CLASSPATH}:${YARN_USER_CLASSPATH}
+ fi
+fi
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+# default log directory & file
+if [ "$YARN_LOG_DIR" = "" ]; then
+ YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+if [ "$YARN_LOGFILE" = "" ]; then
+ YARN_LOGFILE='yarn.log'
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+# figure out which class to run
+if [ "$COMMAND" = "classpath" ] ; then
+ if [ "$#" -gt 0 ]; then
+ CLASS=org.apache.hadoop.util.Classpath
+ else
+ if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ fi
+ echo $CLASSPATH
+ exit 0
+ fi
+elif [ "$COMMAND" = "rmadmin" ] ; then
+ CLASS='org.apache.hadoop.yarn.client.cli.RMAdminCLI'
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "scmadmin" ] ; then
+ CLASS='org.apache.hadoop.yarn.client.SCMAdmin'
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "application" ] ||
+ [ "$COMMAND" = "applicationattempt" ] ||
+ [ "$COMMAND" = "container" ]; then
+ CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+ set -- $COMMAND $@
+elif [ "$COMMAND" = "node" ] ; then
+ CLASS=org.apache.hadoop.yarn.client.cli.NodeCLI
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "queue" ] ; then
+ CLASS=org.apache.hadoop.yarn.client.cli.QueueCLI
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "resourcemanager" ] ; then
+ CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/rm-config/log4j.properties
+ CLASS='org.apache.hadoop.yarn.server.resourcemanager.ResourceManager'
+ YARN_OPTS="$YARN_OPTS $YARN_RESOURCEMANAGER_OPTS $YARN_RESOURCEMANAGER_JMX_OPTS"
+ if [ "$YARN_RESOURCEMANAGER_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_RESOURCEMANAGER_HEAPSIZE""m"
+ fi
+elif [ "$COMMAND" = "historyserver" ] ; then
+ echo "DEPRECATED: Use of this command to start the timeline server is deprecated." 1>&2
+ echo "Instead use the timelineserver command for it." 1>&2
+ CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/ahs-config/log4j.properties
+ CLASS='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer'
+ YARN_OPTS="$YARN_OPTS $YARN_HISTORYSERVER_OPTS"
+ if [ "$YARN_HISTORYSERVER_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_HISTORYSERVER_HEAPSIZE""m"
+ fi
+elif [ "$COMMAND" = "timelineserver" ] ; then
+ CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/timelineserver-config/log4j.properties
+ CLASS='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer'
+ YARN_OPTS="$YARN_OPTS $YARN_TIMELINESERVER_OPTS"
+ if [ "$YARN_TIMELINESERVER_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_TIMELINESERVER_HEAPSIZE""m"
+ fi
+elif [ "$COMMAND" = "sharedcachemanager" ] ; then
+ CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/scm-config/log4j.properties
+ CLASS='org.apache.hadoop.yarn.server.sharedcachemanager.SharedCacheManager'
+ YARN_OPTS="$YARN_OPTS $YARN_SHAREDCACHEMANAGER_OPTS"
+ if [ "$YARN_SHAREDCACHEMANAGER_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_SHAREDCACHEMANAGER_HEAPSIZE""m"
+ fi
+elif [ "$COMMAND" = "nodemanager" ] ; then
+ CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/nm-config/log4j.properties
+ CLASS='org.apache.hadoop.yarn.server.nodemanager.NodeManager'
+ YARN_OPTS="$YARN_OPTS -server $YARN_NODEMANAGER_OPTS $YARN_NODEMANAGER_JMX_OPTS"
+ if [ "$YARN_NODEMANAGER_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_NODEMANAGER_HEAPSIZE""m"
+ fi
+elif [ "$COMMAND" = "proxyserver" ] ; then
+ CLASS='org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer'
+ YARN_OPTS="$YARN_OPTS $YARN_PROXYSERVER_OPTS"
+ if [ "$YARN_PROXYSERVER_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_PROXYSERVER_HEAPSIZE""m"
+ fi
+elif [ "$COMMAND" = "version" ] ; then
+ CLASS=org.apache.hadoop.util.VersionInfo
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "jar" ] ; then
+ CLASS=org.apache.hadoop.util.RunJar
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "logs" ] ; then
+ CLASS=org.apache.hadoop.yarn.client.cli.LogsCLI
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "daemonlog" ] ; then
+ CLASS=org.apache.hadoop.log.LogLevel
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "cluster" ] ; then
+ CLASS=org.apache.hadoop.yarn.client.cli.ClusterCLI
+ YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+else
+ CLASS=$COMMAND
+fi
+
+# cygwin path translation
+if $cygwin; then
+ CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
+ HADOOP_LOG_DIR=$(cygpath -w "$HADOOP_LOG_DIR" 2>/dev/null)
+ HADOOP_PREFIX=$(cygpath -w "$HADOOP_PREFIX" 2>/dev/null)
+ HADOOP_CONF_DIR=$(cygpath -w "$HADOOP_CONF_DIR" 2>/dev/null)
+ HADOOP_COMMON_HOME=$(cygpath -w "$HADOOP_COMMON_HOME" 2>/dev/null)
+ HADOOP_HDFS_HOME=$(cygpath -w "$HADOOP_HDFS_HOME" 2>/dev/null)
+ HADOOP_YARN_HOME=$(cygpath -w "$HADOOP_YARN_HOME" 2>/dev/null)
+ HADOOP_MAPRED_HOME=$(cygpath -w "$HADOOP_MAPRED_HOME" 2>/dev/null)
+ YARN_LOG_DIR=$(cygpath -w "$YARN_LOG_DIR" 2>/dev/null)
+fi
+
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$HADOOP_YARN_HOME"
+HADOOP_HOME=$HADOOP_PREFIX
+if $cygwin; then
+ HADOOP_HOME=$(cygpath -w "$HADOOP_HOME" 2>/dev/null)
+fi
+export HADOOP_HOME
+YARN_OPTS="$YARN_OPTS -Dhadoop.home.dir=$HADOOP_HOME"
+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-${HADOOP_LOGLEVEL},console}"
+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-${HADOOP_LOGLEVEL},console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+ if $cygwin; then
+ JAVA_LIBRARY_PATH=$(cygpath -w "$JAVA_LIBRARY_PATH" 2>/dev/null)
+ fi
+ YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+
+exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $YARN_OPTS -classpath "$CLASSPATH" $CLASS "$@"
diff --git a/MPE/hadoop-2.7.1/bin/yarn.cmd b/MPE/hadoop-2.7.1/bin/yarn.cmd
new file mode 100644
index 0000000..3cd57a7
--- /dev/null
+++ b/MPE/hadoop-2.7.1/bin/yarn.cmd
@@ -0,0 +1,332 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem The Hadoop command script
+@rem
+@rem Environment Variables
+@rem
+@rem JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+@rem
+@rem YARN_CLASSPATH Extra Java CLASSPATH entries.
+@rem
+@rem YARN_HEAPSIZE The maximum amount of heap to use, in MB.
+@rem Default is 1000.
+@rem
+@rem YARN_{COMMAND}_HEAPSIZE overrides YARN_HEAPSIZE for a given command
+@rem eg YARN_NODEMANAGER_HEAPSIZE sets the heap
+@rem size for the NodeManager. If you set the
+@rem heap size in YARN_{COMMAND}_OPTS or YARN_OPTS
+@rem they take precedence.
+@rem
+@rem YARN_OPTS Extra Java runtime options.
+@rem
+@rem YARN_CLIENT_OPTS when the respective command is run.
+@rem YARN_{COMMAND}_OPTS etc YARN_NODEMANAGER_OPTS applies to NodeManager
+@rem for e.g. YARN_CLIENT_OPTS applies to
+@rem more than one command (fs, dfs, fsck,
+@rem dfsadmin etc)
+@rem
+@rem YARN_CONF_DIR Alternate conf dir. Default is ${HADOOP_YARN_HOME}/conf.
+@rem
+@rem YARN_ROOT_LOGGER The root appender. Default is INFO,console
+@rem
+
+setlocal enabledelayedexpansion
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %DEFAULT_LIBEXEC_DIR%\yarn-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+if "%1" == "--loglevel" (
+ shift
+ shift
+)
+
+:main
+ if exist %YARN_CONF_DIR%\yarn-env.cmd (
+ call %YARN_CONF_DIR%\yarn-env.cmd
+ )
+
+ set yarn-command=%1
+ call :make_command_arguments %*
+
+ if not defined yarn-command (
+ goto print_usage
+ )
+
+ @rem JAVA and JAVA_HEAP_MAX and set in hadoop-config.cmd
+
+ if defined YARN_HEAPSIZE (
+ @rem echo run with Java heapsize %YARN_HEAPSIZE%
+ set JAVA_HEAP_MAX=-Xmx%YARN_HEAPSIZE%m
+ )
+
+ @rem CLASSPATH initially contains HADOOP_CONF_DIR & YARN_CONF_DIR
+ if not defined HADOOP_CONF_DIR (
+ echo No HADOOP_CONF_DIR set.
+ echo Please specify it either in yarn-env.cmd or in the environment.
+ goto :eof
+ )
+
+ set CLASSPATH=%HADOOP_CONF_DIR%;%YARN_CONF_DIR%;%CLASSPATH%
+
+ @rem for developers, add Hadoop classes to CLASSPATH
+ if exist %HADOOP_YARN_HOME%\yarn-api\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-api\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-common\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-common\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-mapreduce\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-mapreduce\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-master-worker\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-master-worker\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-nodemanager\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-nodemanager\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-common\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-common\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-resourcemanager\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-resourcemanager\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-applicationhistoryservice\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-applicationhistoryservice\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\build\test\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\build\test\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\build\tools (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\build\tools
+ )
+
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_DIR%\*
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR%\*
+
+ if %yarn-command% == classpath (
+ if not defined yarn-command-arguments (
+ @rem No need to bother starting up a JVM for this simple case.
+ @echo %CLASSPATH%
+ exit /b
+ )
+ )
+
+ set yarncommands=resourcemanager nodemanager proxyserver rmadmin version jar ^
+ application applicationattempt cluster container node queue logs daemonlog historyserver ^
+ timelineserver classpath
+ for %%i in ( %yarncommands% ) do (
+ if %yarn-command% == %%i set yarncommand=true
+ )
+ if defined yarncommand (
+ call :%yarn-command%
+ ) else (
+ set CLASSPATH=%CLASSPATH%;%CD%
+ set CLASS=%yarn-command%
+ )
+
+ if defined JAVA_LIBRARY_PATH (
+ set YARN_OPTS=%YARN_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH%
+ )
+
+ set java_arguments=%JAVA_HEAP_MAX% %YARN_OPTS% -classpath %CLASSPATH% %CLASS% %yarn-command-arguments%
+ call %JAVA% %java_arguments%
+
+goto :eof
+
+:classpath
+ set CLASS=org.apache.hadoop.util.Classpath
+ goto :eof
+
+:rmadmin
+ set CLASS=org.apache.hadoop.yarn.client.cli.RMAdminCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:application
+ set CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ set yarn-command-arguments=%yarn-command% %yarn-command-arguments%
+ goto :eof
+
+:applicationattempt
+ set CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ set yarn-command-arguments=%yarn-command% %yarn-command-arguments%
+ goto :eof
+
+:cluster
+ set CLASS=org.apache.hadoop.yarn.client.cli.ClusterCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:container
+ set CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ set yarn-command-arguments=%yarn-command% %yarn-command-arguments%
+ goto :eof
+
+:node
+ set CLASS=org.apache.hadoop.yarn.client.cli.NodeCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:queue
+ set CLASS=org.apache.hadoop.yarn.client.cli.QueueCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:resourcemanager
+ set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\rm-config\log4j.properties
+ set CLASS=org.apache.hadoop.yarn.server.resourcemanager.ResourceManager
+ set YARN_OPTS=%YARN_OPTS% %YARN_RESOURCEMANAGER_OPTS%
+ if defined YARN_RESOURCEMANAGER_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%YARN_RESOURCEMANAGER_HEAPSIZE%m
+ )
+ goto :eof
+
+:historyserver
+ @echo DEPRECATED: Use of this command to start the timeline server is deprecated. 1>&2
+ @echo Instead use the timelineserver command for it. 1>&2
+ set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\ahs-config\log4j.properties
+ set CLASS=org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer
+ set YARN_OPTS=%YARN_OPTS% %HADOOP_HISTORYSERVER_OPTS%
+ if defined YARN_HISTORYSERVER_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%YARN_HISTORYSERVER_HEAPSIZE%m
+ )
+ goto :eof
+
+:timelineserver
+ set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\timelineserver-config\log4j.properties
+ set CLASS=org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer
+ set YARN_OPTS=%YARN_OPTS% %HADOOP_TIMELINESERVER_OPTS%
+ if defined YARN_TIMELINESERVER_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%YARN_TIMELINESERVER_HEAPSIZE%m
+ )
+ goto :eof
+
+:nodemanager
+ set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\nm-config\log4j.properties
+ set CLASS=org.apache.hadoop.yarn.server.nodemanager.NodeManager
+ set YARN_OPTS=%YARN_OPTS% -server %HADOOP_NODEMANAGER_OPTS%
+ if defined YARN_NODEMANAGER_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%YARN_NODEMANAGER_HEAPSIZE%m
+ )
+ goto :eof
+
+:proxyserver
+ set CLASS=org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer
+ set YARN_OPTS=%YARN_OPTS% %HADOOP_PROXYSERVER_OPTS%
+ if defined YARN_PROXYSERVER_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%YARN_PROXYSERVER_HEAPSIZE%m
+ )
+ goto :eof
+
+:version
+ set CLASS=org.apache.hadoop.util.VersionInfo
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:jar
+ set CLASS=org.apache.hadoop.util.RunJar
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:logs
+ set CLASS=org.apache.hadoop.yarn.client.cli.LogsCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:daemonlog
+ set CLASS=org.apache.hadoop.log.LogLevel
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
+:make_command_arguments
+ if "%1" == "--config" (
+ shift
+ shift
+ )
+ if "%1" == "--loglevel" (
+ shift
+ shift
+ )
+ if [%2] == [] goto :eof
+ shift
+ set _yarnarguments=
+ :MakeCmdArgsLoop
+ if [%1]==[] goto :EndLoop
+
+ if not defined _yarnarguments (
+ set _yarnarguments=%1
+ ) else (
+ set _yarnarguments=!_yarnarguments! %1
+ )
+ shift
+ goto :MakeCmdArgsLoop
+ :EndLoop
+ set yarn-command-arguments=%_yarnarguments%
+ goto :eof
+
+:print_usage
+ @echo Usage: yarn [--config confdir] [--loglevel loglevel] COMMAND
+ @echo where COMMAND is one of:
+ @echo resourcemanager run the ResourceManager
+ @echo nodemanager run a nodemanager on each slave
+ @echo timelineserver run the timeline server
+ @echo rmadmin admin tools
+ @echo version print the version
+ @echo jar ^<jar^> run a jar file
+ @echo application prints application(s) report/kill application
+ @echo applicationattempt prints applicationattempt(s) report
+ @echo cluster prints cluster information
+ @echo container prints container(s) report
+ @echo node prints node report(s)
+ @echo queue prints queue information
+ @echo logs dump container logs
+ @echo classpath prints the class path needed to get the
+ @echo Hadoop jar and the required libraries
+ @echo daemonlog get/set the log level for each daemon
+ @echo or
+ @echo CLASSNAME run the class named CLASSNAME
+ @echo Most commands print help when invoked w/o parameters.
+
+endlocal