diff options
Diffstat (limited to 'MPE/hadoop-2.7.1/bin/hdfs')
| -rw-r--r-- | MPE/hadoop-2.7.1/bin/hdfs | 308 |
1 files changed, 308 insertions, 0 deletions
diff --git a/MPE/hadoop-2.7.1/bin/hdfs b/MPE/hadoop-2.7.1/bin/hdfs new file mode 100644 index 0000000..7f93738 --- /dev/null +++ b/MPE/hadoop-2.7.1/bin/hdfs @@ -0,0 +1,308 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Environment Variables +# +# JSVC_HOME home directory of jsvc binary. Required for starting secure +# datanode. +# +# JSVC_OUTFILE path to jsvc output file. Defaults to +# $HADOOP_LOG_DIR/jsvc.out. +# +# JSVC_ERRFILE path to jsvc error file. Defaults to $HADOOP_LOG_DIR/jsvc.err. + +bin=`which $0` +bin=`dirname ${bin}` +bin=`cd "$bin" > /dev/null; pwd` + +DEFAULT_LIBEXEC_DIR="$bin"/../libexec + +HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} +. $HADOOP_LIBEXEC_DIR/hdfs-config.sh + +function print_usage(){ + echo "Usage: hdfs [--config confdir] [--loglevel loglevel] COMMAND" + echo " where COMMAND is one of:" + echo " dfs run a filesystem command on the file systems supported in Hadoop." + echo " classpath prints the classpath" + echo " namenode -format format the DFS filesystem" + echo " secondarynamenode run the DFS secondary namenode" + echo " namenode run the DFS namenode" + echo " journalnode run the DFS journalnode" + echo " zkfc run the ZK Failover Controller daemon" + echo " datanode run a DFS datanode" + echo " dfsadmin run a DFS admin client" + echo " haadmin run a DFS HA admin client" + echo " fsck run a DFS filesystem checking utility" + echo " balancer run a cluster balancing utility" + echo " jmxget get JMX exported values from NameNode or DataNode." + echo " mover run a utility to move block replicas across" + echo " storage types" + echo " oiv apply the offline fsimage viewer to an fsimage" + echo " oiv_legacy apply the offline fsimage viewer to an legacy fsimage" + echo " oev apply the offline edits viewer to an edits file" + echo " fetchdt fetch a delegation token from the NameNode" + echo " getconf get config values from configuration" + echo " groups get the groups which users belong to" + echo " snapshotDiff diff two snapshots of a directory or diff the" + echo " current directory contents with a snapshot" + echo " lsSnapshottableDir list all snapshottable dirs owned by the current user" + echo " Use -help to see options" + echo " portmap run a portmap service" + echo " nfs3 run an NFS version 3 gateway" + echo " cacheadmin configure the HDFS cache" + echo " crypto configure HDFS encryption zones" + echo " storagepolicies list/get/set block storage policies" + echo " version print the version" + echo "" + echo "Most commands print help when invoked w/o parameters." + # There are also debug commands, but they don't show up in this listing. +} + +if [ $# = 0 ]; then + print_usage + exit +fi + +COMMAND=$1 +shift + +case $COMMAND in + # usage flags + --help|-help|-h) + print_usage + exit + ;; +esac + +# Determine if we're starting a secure datanode, and if so, redefine appropriate variables +if [ "$COMMAND" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then + if [ -n "$JSVC_HOME" ]; then + if [ -n "$HADOOP_SECURE_DN_PID_DIR" ]; then + HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR + fi + + if [ -n "$HADOOP_SECURE_DN_LOG_DIR" ]; then + HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR + HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR" + fi + + HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER + HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING" + starting_secure_dn="true" + else + echo "It looks like you're trying to start a secure DN, but \$JSVC_HOME"\ + "isn't set. Falling back to starting insecure DN." + fi +fi + +# Determine if we're starting a privileged NFS daemon, and if so, redefine appropriate variables +if [ "$COMMAND" == "nfs3" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_PRIVILEGED_NFS_USER" ]; then + if [ -n "$JSVC_HOME" ]; then + if [ -n "$HADOOP_PRIVILEGED_NFS_PID_DIR" ]; then + HADOOP_PID_DIR=$HADOOP_PRIVILEGED_NFS_PID_DIR + fi + + if [ -n "$HADOOP_PRIVILEGED_NFS_LOG_DIR" ]; then + HADOOP_LOG_DIR=$HADOOP_PRIVILEGED_NFS_LOG_DIR + HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR" + fi + + HADOOP_IDENT_STRING=$HADOOP_PRIVILEGED_NFS_USER + HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING" + starting_privileged_nfs="true" + else + echo "It looks like you're trying to start a privileged NFS server, but"\ + "\$JSVC_HOME isn't set. Falling back to starting unprivileged NFS server." + fi +fi + +if [ "$COMMAND" = "namenode" ] ; then + CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode' +# HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_OPTS" + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_JMX_OPTS $HADOOP_NAMENODE_OPTS" +elif [ "$COMMAND" = "zkfc" ] ; then + CLASS='org.apache.hadoop.hdfs.tools.DFSZKFailoverController' + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_ZKFC_OPTS" +elif [ "$COMMAND" = "secondarynamenode" ] ; then + CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode' + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_SECONDARYNAMENODE_OPTS" +elif [ "$COMMAND" = "datanode" ] ; then + CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode' + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_DATANODE_JMX_OPTS" + if [ "$starting_secure_dn" = "true" ]; then + HADOOP_OPTS="$HADOOP_OPTS -jvm server $HADOOP_DATANODE_OPTS" + else + HADOOP_OPTS="$HADOOP_OPTS -server $HADOOP_DATANODE_OPTS" + fi +elif [ "$COMMAND" = "journalnode" ] ; then + CLASS='org.apache.hadoop.hdfs.qjournal.server.JournalNode' + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_JOURNALNODE_OPTS" +elif [ "$COMMAND" = "dfs" ] ; then + CLASS=org.apache.hadoop.fs.FsShell + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" +elif [ "$COMMAND" = "dfsadmin" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" +elif [ "$COMMAND" = "haadmin" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin + CLASSPATH=${CLASSPATH}:${TOOL_PATH} + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" +elif [ "$COMMAND" = "fsck" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.DFSck + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" +elif [ "$COMMAND" = "balancer" ] ; then + CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_BALANCER_OPTS" +elif [ "$COMMAND" = "mover" ] ; then + CLASS=org.apache.hadoop.hdfs.server.mover.Mover + HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_MOVER_OPTS}" +elif [ "$COMMAND" = "storagepolicies" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin +elif [ "$COMMAND" = "jmxget" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.JMXGet +elif [ "$COMMAND" = "oiv" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB +elif [ "$COMMAND" = "oiv_legacy" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer +elif [ "$COMMAND" = "oev" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer +elif [ "$COMMAND" = "fetchdt" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher +elif [ "$COMMAND" = "getconf" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.GetConf +elif [ "$COMMAND" = "groups" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.GetGroups +elif [ "$COMMAND" = "snapshotDiff" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff +elif [ "$COMMAND" = "lsSnapshottableDir" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir +elif [ "$COMMAND" = "portmap" ] ; then + CLASS=org.apache.hadoop.portmap.Portmap + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_PORTMAP_OPTS" +elif [ "$COMMAND" = "nfs3" ] ; then + CLASS=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3 + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NFS3_OPTS" +elif [ "$COMMAND" = "cacheadmin" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin +elif [ "$COMMAND" = "crypto" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin +elif [ "$COMMAND" = "version" ] ; then + CLASS=org.apache.hadoop.util.VersionInfo +elif [ "$COMMAND" = "debug" ]; then + CLASS=org.apache.hadoop.hdfs.tools.DebugAdmin +elif [ "$COMMAND" = "classpath" ]; then + if [ "$#" -gt 0 ]; then + CLASS=org.apache.hadoop.util.Classpath + else + # No need to bother starting up a JVM for this simple case. + if $cygwin; then + CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null) + fi + echo $CLASSPATH + exit 0 + fi +else + CLASS="$COMMAND" +fi + +# cygwin path translation +if $cygwin; then + CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null) + HADOOP_LOG_DIR=$(cygpath -w "$HADOOP_LOG_DIR" 2>/dev/null) + HADOOP_PREFIX=$(cygpath -w "$HADOOP_PREFIX" 2>/dev/null) + HADOOP_CONF_DIR=$(cygpath -w "$HADOOP_CONF_DIR" 2>/dev/null) + HADOOP_COMMON_HOME=$(cygpath -w "$HADOOP_COMMON_HOME" 2>/dev/null) + HADOOP_HDFS_HOME=$(cygpath -w "$HADOOP_HDFS_HOME" 2>/dev/null) + HADOOP_YARN_HOME=$(cygpath -w "$HADOOP_YARN_HOME" 2>/dev/null) + HADOOP_MAPRED_HOME=$(cygpath -w "$HADOOP_MAPRED_HOME" 2>/dev/null) +fi + +export CLASSPATH=$CLASSPATH + +HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}" + +# Check to see if we should start a secure datanode +if [ "$starting_secure_dn" = "true" ]; then + if [ "$HADOOP_PID_DIR" = "" ]; then + HADOOP_SECURE_DN_PID="/tmp/hadoop_secure_dn.pid" + else + HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid" + fi + + JSVC=$JSVC_HOME/jsvc + if [ ! -f $JSVC ]; then + echo "JSVC_HOME is not set correctly so jsvc cannot be found. jsvc is required to run secure datanodes. " + echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\ + "and set JSVC_HOME to the directory containing the jsvc binary." + exit + fi + + if [[ ! $JSVC_OUTFILE ]]; then + JSVC_OUTFILE="$HADOOP_LOG_DIR/jsvc.out" + fi + + if [[ ! $JSVC_ERRFILE ]]; then + JSVC_ERRFILE="$HADOOP_LOG_DIR/jsvc.err" + fi + + exec "$JSVC" \ + -Dproc_$COMMAND -outfile "$JSVC_OUTFILE" \ + -errfile "$JSVC_ERRFILE" \ + -pidfile "$HADOOP_SECURE_DN_PID" \ + -nodetach \ + -user "$HADOOP_SECURE_DN_USER" \ + -cp "$CLASSPATH" \ + $JAVA_HEAP_MAX $HADOOP_OPTS \ + org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter "$@" +elif [ "$starting_privileged_nfs" = "true" ] ; then + if [ "$HADOOP_PID_DIR" = "" ]; then + HADOOP_PRIVILEGED_NFS_PID="/tmp/hadoop_privileged_nfs3.pid" + else + HADOOP_PRIVILEGED_NFS_PID="$HADOOP_PID_DIR/hadoop_privileged_nfs3.pid" + fi + + JSVC=$JSVC_HOME/jsvc + if [ ! -f $JSVC ]; then + echo "JSVC_HOME is not set correctly so jsvc cannot be found. jsvc is required to run privileged NFS gateways. " + echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\ + "and set JSVC_HOME to the directory containing the jsvc binary." + exit + fi + + if [[ ! $JSVC_OUTFILE ]]; then + JSVC_OUTFILE="$HADOOP_LOG_DIR/nfs3_jsvc.out" + fi + + if [[ ! $JSVC_ERRFILE ]]; then + JSVC_ERRFILE="$HADOOP_LOG_DIR/nfs3_jsvc.err" + fi + + exec "$JSVC" \ + -Dproc_$COMMAND -outfile "$JSVC_OUTFILE" \ + -errfile "$JSVC_ERRFILE" \ + -pidfile "$HADOOP_PRIVILEGED_NFS_PID" \ + -nodetach \ + -user "$HADOOP_PRIVILEGED_NFS_USER" \ + -cp "$CLASSPATH" \ + $JAVA_HEAP_MAX $HADOOP_OPTS \ + org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter "$@" +else + # run it + exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@" +fi + |
