diff options
Diffstat (limited to 'MSH-PIC/hadoop/sbin')
34 files changed, 2038 insertions, 0 deletions
diff --git a/MSH-PIC/hadoop/sbin/dae-hdfsjournal.sh b/MSH-PIC/hadoop/sbin/dae-hdfsjournal.sh new file mode 100644 index 0000000..4ec61f4 --- /dev/null +++ b/MSH-PIC/hadoop/sbin/dae-hdfsjournal.sh @@ -0,0 +1,42 @@ +#!/bin/bash +source /etc/profile + +BASE_DIR=/home/tsg/olap + +VERSION=hadoop-2.7.1 + +function set_log(){ +RES_SUM_FILE=$BASE_DIR/$VERSION/logs + +if [ ! -f "$RES_SUM_FILE/" ] +then + mkdir -p $RES_SUM_FILE +fi + +if [ ! -d "$RES_SUM_FILE/$1" ];then + echo "0" > $RES_SUM_FILE/$1 +fi + +OLD_NUM=`cat $RES_SUM_FILE/$1` +RESTART_NUM=`expr $OLD_NUM + 1` +echo $RESTART_NUM > $RES_SUM_FILE/$1 +if [ $OLD_NUM -eq "0" ];then + echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log +else + echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log +fi +} + + +while true ; do + +HAS_JN=`ps -ef | grep JournalNode | grep -v grep | wc -l` + +if [ $HAS_JN -eq "0" ];then + yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start journalnode > /dev/null + set_log jnRes_sum JournalNode +fi + +sleep 60 +done + diff --git a/MSH-PIC/hadoop/sbin/dae-hdfsmaster.sh b/MSH-PIC/hadoop/sbin/dae-hdfsmaster.sh new file mode 100644 index 0000000..57f6519 --- /dev/null +++ b/MSH-PIC/hadoop/sbin/dae-hdfsmaster.sh @@ -0,0 +1,53 @@ +#!/bin/bash +source /etc/profile + +BASE_DIR=/home/tsg/olap + +VERSION=hadoop-2.7.1 + +function set_log(){ +RES_SUM_FILE=$BASE_DIR/$VERSION/logs + +if [ ! -f "$RES_SUM_FILE/" ] +then + mkdir -p $RES_SUM_FILE +fi + +if [ ! -d "$RES_SUM_FILE/$1" ];then + echo "0" > $RES_SUM_FILE/$1 +fi + +OLD_NUM=`cat $RES_SUM_FILE/$1` +RESTART_NUM=`expr $OLD_NUM + 1` +echo $RESTART_NUM > $RES_SUM_FILE/$1 +if [ $OLD_NUM -eq "0" ];then + echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log +else + echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log +fi +} + + +while true ; do + +HAS_NN=`ps -ef | grep NameNode | grep -v grep | wc -l` +HAS_ZKFC=`ps -ef | grep DFSZKFailoverController | grep -v grep | wc -l` +#HAS_NM=`ps -ef | grep NodeManager | grep -v grep | wc -l` + +if [ $HAS_NN -eq "0" ];then + yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start namenode > /dev/null + set_log nnRes_sum NameNode +fi + +if [ $HAS_ZKFC -eq "0" ];then + yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start zkfc > /dev/null + set_log zkfcRes_sum DFSZKFailoverController +fi + +#if [ $HAS_NM -eq "0" ];then +# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null +# set_log nmRes_sum NodeManager +#fi + +sleep 60 +done diff --git a/MSH-PIC/hadoop/sbin/dae-hdfsworker.sh b/MSH-PIC/hadoop/sbin/dae-hdfsworker.sh new file mode 100644 index 0000000..d504768 --- /dev/null +++ b/MSH-PIC/hadoop/sbin/dae-hdfsworker.sh @@ -0,0 +1,47 @@ +#!/bin/bash +source /etc/profile + +BASE_DIR=/home/tsg/olap + +VERSION=hadoop-2.7.1 + +function set_log(){ +RES_SUM_FILE=$BASE_DIR/$VERSION/logs + +if [ ! -f "$RES_SUM_FILE/" ] +then + mkdir -p $RES_SUM_FILE +fi + +if [ ! -d "$RES_SUM_FILE/$1" ];then + echo "0" > $RES_SUM_FILE/$1 +fi + +OLD_NUM=`cat $RES_SUM_FILE/$1` +RESTART_NUM=`expr $OLD_NUM + 1` +echo $RESTART_NUM > $RES_SUM_FILE/$1 +if [ $OLD_NUM -eq "0" ];then + echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log +else + echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log +fi +} + + +while true ; do + +HAS_DN=`ps -ef | grep DataNode | grep -v grep | wc -l` +#HAS_NM=`ps -ef | grep NodeManager | grep -v grep | wc -l` + +if [ $HAS_DN -eq "0" ];then + yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start datanode > /dev/null + set_log dnRes_sum DataNode +fi + +#if [ $HAS_NM -eq "0" ];then +# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null +# set_log nmRes_sum NodeManager +#fi + +sleep 60 +done diff --git a/MSH-PIC/hadoop/sbin/dae-yarnhistory.sh b/MSH-PIC/hadoop/sbin/dae-yarnhistory.sh new file mode 100644 index 0000000..f732d6f --- /dev/null +++ b/MSH-PIC/hadoop/sbin/dae-yarnhistory.sh @@ -0,0 +1,41 @@ +#!/bin/bash +source /etc/profile + +BASE_DIR=/home/tsg/olap + +VERSION=hadoop-2.7.1 + +function set_log(){ +RES_SUM_FILE=$BASE_DIR/$VERSION/logs + +if [ ! -f "$RES_SUM_FILE/" ] +then + mkdir -p $RES_SUM_FILE +fi + +if [ ! -d "$RES_SUM_FILE/$1" ];then + echo "0" > $RES_SUM_FILE/$1 +fi + +OLD_NUM=`cat $RES_SUM_FILE/$1` +RESTART_NUM=`expr $OLD_NUM + 1` +echo $RESTART_NUM > $RES_SUM_FILE/$1 +if [ $OLD_NUM -eq "0" ];then + echo "`date "+%Y-%m-%d %H:%M:%S"` - Yarn $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log +else + echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Yarn $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log +fi +} + + +while true ; do + +HAS_HISTORY=`ps -ef | grep "org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer" | grep -v grep | wc -l` + +if [ $HAS_HISTORY -eq "0" ];then + $BASE_DIR/$VERSION/sbin/mr-jobhistory-daemon.sh start historyserver > /dev/null + set_log nmRes_sum JobHistoryServer +fi + +sleep 60 +done diff --git a/MSH-PIC/hadoop/sbin/dae-yarnmaster.sh b/MSH-PIC/hadoop/sbin/dae-yarnmaster.sh new file mode 100644 index 0000000..0cb98c2 --- /dev/null +++ b/MSH-PIC/hadoop/sbin/dae-yarnmaster.sh @@ -0,0 +1,41 @@ +#!/bin/bash +source /etc/profile + +BASE_DIR=/home/tsg/olap + +VERSION=hadoop-2.7.1 + +function set_log(){ +RES_SUM_FILE=$BASE_DIR/$VERSION/logs + +if [ ! -f "$RES_SUM_FILE/" ] +then + mkdir -p $RES_SUM_FILE +fi + +if [ ! -d "$RES_SUM_FILE/$1" ];then + echo "0" > $RES_SUM_FILE/$1 +fi + +OLD_NUM=`cat $RES_SUM_FILE/$1` +RESTART_NUM=`expr $OLD_NUM + 1` +echo $RESTART_NUM > $RES_SUM_FILE/$1 +if [ $OLD_NUM -eq "0" ];then + echo "`date "+%Y-%m-%d %H:%M:%S"` - Yarn $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log +else + echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Yarn $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log +fi +} + + +while true ; do + +HAS_RM=`ps -ef | grep "org.apache.hadoop.yarn.server.resourcemanager.ResourceManager" | grep -v grep | wc -l` + +if [ $HAS_RM -eq "0" ];then + $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start resourcemanager > /dev/null + set_log nmRes_sum ResourceManager +fi + +sleep 60 +done diff --git a/MSH-PIC/hadoop/sbin/dae-yarnworker.sh b/MSH-PIC/hadoop/sbin/dae-yarnworker.sh new file mode 100644 index 0000000..a2db47f --- /dev/null +++ b/MSH-PIC/hadoop/sbin/dae-yarnworker.sh @@ -0,0 +1,41 @@ +#!/bin/bash +source /etc/profile + +BASE_DIR=/home/tsg/olap + +VERSION=hadoop-2.7.1 + +function set_log(){ +RES_SUM_FILE=$BASE_DIR/$VERSION/logs + +if [ ! -f "$RES_SUM_FILE/" ] +then + mkdir -p $RES_SUM_FILE +fi + +if [ ! -d "$RES_SUM_FILE/$1" ];then + echo "0" > $RES_SUM_FILE/$1 +fi + +OLD_NUM=`cat $RES_SUM_FILE/$1` +RESTART_NUM=`expr $OLD_NUM + 1` +echo $RESTART_NUM > $RES_SUM_FILE/$1 +if [ $OLD_NUM -eq "0" ];then + echo "`date "+%Y-%m-%d %H:%M:%S"` - Yarn $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log +else + echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Yarn $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log +fi +} + + +while true ; do + +HAS_NM=`ps -ef | grep "org.apache.hadoop.yarn.server.nodemanager.NodeManager" | grep -v grep | wc -l` + +if [ $HAS_NM -eq "0" ];then + $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null + set_log nmRes_sum NodeManager +fi + +sleep 60 +done diff --git a/MSH-PIC/hadoop/sbin/distribute-exclude.sh b/MSH-PIC/hadoop/sbin/distribute-exclude.sh new file mode 100644 index 0000000..66fc14a --- /dev/null +++ b/MSH-PIC/hadoop/sbin/distribute-exclude.sh @@ -0,0 +1,81 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ------------------------------------------------------------------ +# +# The purpose of this script is to distribute the exclude file (see +# "dfs.hosts.exclude" in hdfs-site.xml). +# +# Input of the script is a local exclude file. The exclude file +# will be distributed to all the namenodes. The location on the namenodes +# is determined by the configuration "dfs.hosts.exclude" in hdfs-site.xml +# (this value is read from the local copy of hdfs-site.xml and must be same +# on all the namenodes). +# +# The user running this script needs write permissions on the target +# directory on namenodes. +# +# After this command, run refresh-namenodes.sh so that namenodes start +# using the new exclude file. + +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +DEFAULT_LIBEXEC_DIR="$bin"/../libexec +HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} +. $HADOOP_LIBEXEC_DIR/hdfs-config.sh + +if [ "$1" = '' ] ; then + "Error: please specify local exclude file as a first argument" + exit 1 +else + excludeFilenameLocal=$1 +fi + +if [ ! -f "$excludeFilenameLocal" ] ; then + echo "Error: exclude file [$excludeFilenameLocal] does not exist." + exit 1 +fi + +namenodes=$("$HADOOP_PREFIX/bin/hdfs" getconf -namenodes) +excludeFilenameRemote=$("$HADOOP_PREFIX/bin/hdfs" getconf -excludeFile) + +if [ "$excludeFilenameRemote" = '' ] ; then + echo \ + "Error: hdfs getconf -excludeFile returned empty string, " \ + "please setup dfs.hosts.exclude in hdfs-site.xml in local cluster " \ + "configuration and on all namenodes" + exit 1 +fi + +echo "Copying exclude file [$excludeFilenameRemote] to namenodes:" + +for namenode in $namenodes ; do + echo " [$namenode]" + scp "$excludeFilenameLocal" "$namenode:$excludeFilenameRemote" + if [ "$?" != '0' ] ; then errorFlag='1' ; fi +done + +if [ "$errorFlag" = '1' ] ; then + echo "Error: transfer of exclude file failed, see error messages above." + exit 1 +else + echo "Transfer of exclude file to all namenodes succeeded." +fi + +# eof diff --git a/MSH-PIC/hadoop/sbin/hadoop-daemon.sh b/MSH-PIC/hadoop/sbin/hadoop-daemon.sh new file mode 100644 index 0000000..6a4cd69 --- /dev/null +++ b/MSH-PIC/hadoop/sbin/hadoop-daemon.sh @@ -0,0 +1,214 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Runs a Hadoop command as a daemon. +# +# Environment Variables +# +# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_PREFIX}/conf. +# HADOOP_LOG_DIR Where log files are stored. PWD by default. +# HADOOP_MASTER host:path where hadoop code should be rsync'd from +# HADOOP_PID_DIR The pid files are stored. /tmp by default. +# HADOOP_IDENT_STRING A string representing this instance of hadoop. $USER by default +# HADOOP_NICENESS The scheduling priority for daemons. Defaults to 0. +## + +usage="Usage: hadoop-daemon.sh [--config <conf-dir>] [--hosts hostlistfile] [--script script] (start|stop) <hadoop-command> <args...>" + +# if no args specified, show usage +if [ $# -le 1 ]; then + echo $usage + exit 1 +fi + +bin=`dirname "${BASH_SOURCE-$0}"` +bin=`cd "$bin"; pwd` + +DEFAULT_LIBEXEC_DIR="$bin"/../libexec +HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} +. $HADOOP_LIBEXEC_DIR/hadoop-config.sh + +# get arguments + +#default value +hadoopScript="$HADOOP_PREFIX"/bin/hadoop +if [ "--script" = "$1" ] + then + shift + hadoopScript=$1 + shift +fi +startStop=$1 +shift +command=$1 +shift + +hadoop_rotate_log () +{ + log=$1; + num=5; + if [ -n "$2" ]; then + num=$2 + fi + if [ -f "$log" ]; then # rotate logs + while [ $num -gt 1 ]; do + prev=`expr $num - 1` + [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num" + num=$prev + done + mv "$log" "$log.$num"; + fi +} + +if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then + . "${HADOOP_CONF_DIR}/hadoop-env.sh" +fi + +# Determine if we're starting a secure datanode, and if so, redefine appropriate variables +if [ "$command" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then + export HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR + export HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR + export HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER + starting_secure_dn="true" +fi + +#Determine if we're starting a privileged NFS, if so, redefine the appropriate variables +if [ "$command" == "nfs3" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_PRIVILEGED_NFS_USER" ]; then + export HADOOP_PID_DIR=$HADOOP_PRIVILEGED_NFS_PID_DIR + export HADOOP_LOG_DIR=$HADOOP_PRIVILEGED_NFS_LOG_DIR + export HADOOP_IDENT_STRING=$HADOOP_PRIVILEGED_NFS_USER + starting_privileged_nfs="true" +fi + +if [ "$HADOOP_IDENT_STRING" = "" ]; then + export HADOOP_IDENT_STRING="$USER" +fi + + +# get log directory +if [ "$HADOOP_LOG_DIR" = "" ]; then + export HADOOP_LOG_DIR="$HADOOP_PREFIX/logs" +fi + +if [ ! -w "$HADOOP_LOG_DIR" ] ; then + mkdir -p "$HADOOP_LOG_DIR" + chown $HADOOP_IDENT_STRING $HADOOP_LOG_DIR +fi + +if [ "$HADOOP_PID_DIR" = "" ]; then + HADOOP_PID_DIR=/tmp +fi + +# some variables +export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log +export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-"INFO,RFA"} +export HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-"INFO,RFAS"} +export HDFS_AUDIT_LOGGER=${HDFS_AUDIT_LOGGER:-"INFO,NullAppender"} +log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out +pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid +HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5} + +# Set default scheduling priority +if [ "$HADOOP_NICENESS" = "" ]; then + export HADOOP_NICENESS=0 +fi + +case $startStop in + + (start) + + [ -w "$HADOOP_PID_DIR" ] || mkdir -p "$HADOOP_PID_DIR" + + if [ -f $pid ]; then + if kill -0 `cat $pid` > /dev/null 2>&1; then + echo $command running as process `cat $pid`. Stop it first. + exit 1 + fi + fi + + if [ "$HADOOP_MASTER" != "" ]; then + echo rsync from $HADOOP_MASTER + rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $HADOOP_MASTER/ "$HADOOP_PREFIX" + fi + + hadoop_rotate_log $log + echo starting $command, logging to $log + cd "$HADOOP_PREFIX" + case $command in + namenode|secondarynamenode|datanode|journalnode|dfs|dfsadmin|fsck|balancer|zkfc) + if [ -z "$HADOOP_HDFS_HOME" ]; then + hdfsScript="$HADOOP_PREFIX"/bin/hdfs + else + hdfsScript="$HADOOP_HDFS_HOME"/bin/hdfs + fi + nohup nice -n $HADOOP_NICENESS $hdfsScript --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null & + ;; + (*) + nohup nice -n $HADOOP_NICENESS $hadoopScript --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null & + ;; + esac + echo $! > $pid + sleep 1 + head "$log" + # capture the ulimit output + if [ "true" = "$starting_secure_dn" ]; then + echo "ulimit -a for secure datanode user $HADOOP_SECURE_DN_USER" >> $log + # capture the ulimit info for the appropriate user + su --shell=/bin/bash $HADOOP_SECURE_DN_USER -c 'ulimit -a' >> $log 2>&1 + elif [ "true" = "$starting_privileged_nfs" ]; then + echo "ulimit -a for privileged nfs user $HADOOP_PRIVILEGED_NFS_USER" >> $log + su --shell=/bin/bash $HADOOP_PRIVILEGED_NFS_USER -c 'ulimit -a' >> $log 2>&1 + else + echo "ulimit -a for user $USER" >> $log + ulimit -a >> $log 2>&1 + fi + sleep 3; + if ! ps -p $! > /dev/null ; then + exit 1 + fi + ;; + + (stop) + + if [ -f $pid ]; then + TARGET_PID=`cat $pid` + if kill -0 $TARGET_PID > /dev/null 2>&1; then + echo stopping $command + kill $TARGET_PID + sleep $HADOOP_STOP_TIMEOUT + if kill -0 $TARGET_PID > /dev/null 2>&1; then + echo "$command did not stop gracefully after $HADOOP_STOP_TIMEOUT seconds: killing with kill -9" + kill -9 $TARGET_PID + fi + else + echo no $command to stop + fi + rm -f $pid + else + echo no $command to stop + fi + ;; + + (*) + echo $usage + exit 1 + ;; + +esac + + diff --git a/MSH-PIC/hadoop/sbin/hadoop-daemons.sh b/MSH-PIC/hadoop/sbin/hadoop-daemons.sh new file mode 100644 index 0000000..181d7ac --- /dev/null +++ b/MSH-PIC/hadoop/sbin/hadoop-daemons.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Run a Hadoop command on all slave hosts. + +usage="Usage: hadoop-daemons.sh [--config confdir] [--hosts hostlistfile] [start|stop] command args..." + +# if no args specified, show usage +if [ $# -le 1 ]; then + echo $usage + exit 1 +fi + +bin=`dirname "${BASH_SOURCE-$0}"` +bin=`cd "$bin"; pwd` + +DEFAULT_LIBEXEC_DIR="$bin"/../libexec +HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} +. $HADOOP_LIBEXEC_DIR/hadoop-config.sh + +exec "$bin/slaves.sh" --config $HADOOP_CONF_DIR cd "$HADOOP_PREFIX" \; "$bin/hadoop-daemon.sh" --config $HADOOP_CONF_DIR "$@" diff --git a/MSH-PIC/hadoop/sbin/hdfs-config.cmd b/MSH-PIC/hadoop/sbin/hdfs-config.cmd new file mode 100644 index 0000000..f3aa733 --- /dev/null +++ b/MSH-PIC/hadoop/sbin/hdfs-config.cmd @@ -0,0 +1,43 @@ +@echo off +@rem Licensed to the Apache Software Foundation (ASF) under one or more +@rem contributor license agreements. See the NOTICE file distributed with +@rem this work for additional information regarding copyright ownership. +@rem The ASF licenses this file to You under the Apache License, Version 2.0 +@rem (the "License"); you may not use this file except in compliance with +@rem the License. You may obtain a copy of the License at +@rem +@rem http://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. + +@rem included in all the hdfs scripts with source command +@rem should not be executed directly + +if not defined HADOOP_BIN_PATH ( + set HADOOP_BIN_PATH=%~dp0 +) + +if "%HADOOP_BIN_PATH:~-1%" == "\" ( + set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1% +) + +set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec +if not defined HADOOP_LIBEXEC_DIR ( + set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR% +) + +if exist %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd ( + call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %* +) else if exist %HADOOP_COMMON_HOME%\libexec\hadoop-config.cmd ( + call %HADOOP_COMMON_HOME%\libexec\hadoop-config.cmd %* +) else if exist %HADOOP_HOME%\libexec\hadoop-config.cmd ( + call %HADOOP_HOME%\libexec\hadoop-config.cmd %* +) else ( + echo Hadoop common not found. +) + +:eof diff --git a/MSH-PIC/hadoop/sbin/hdfs-config.sh b/MSH-PIC/hadoop/sbin/hdfs-config.sh new file mode 100644 index 0000000..2aabf53 --- /dev/null +++ b/MSH-PIC/hadoop/sbin/hdfs-config.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# included in all the hdfs scripts with source command +# should not be executed directly + +bin=`which "$0"` +bin=`dirname "${bin}"` +bin=`cd "$bin"; pwd` + +DEFAULT_LIBEXEC_DIR="$bin"/../libexec +HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} +if [ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]; then + . ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh +elif [ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]; then + . "$HADOOP_COMMON_HOME"/libexec/hadoop-config.sh +elif [ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]; then + . "$HADOOP_HOME"/libexec/hadoop-config.sh +else + echo "Hadoop common not found." + exit +fi diff --git a/MSH-PIC/hadoop/sbin/httpfs.sh b/MSH-PIC/hadoop/sbin/httpfs.sh new file mode 100644 index 0000000..a593b67 --- /dev/null +++ b/MSH-PIC/hadoop/sbin/httpfs.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# resolve links - $0 may be a softlink +PRG="${0}" + +while [ -h "${PRG}" ]; do + ls=`ls -ld "${PRG}"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "${PRG}"`/"$link" + fi +done + +BASEDIR=`dirname ${PRG}` +BASEDIR=`cd ${BASEDIR}/..;pwd` + +source ${HADOOP_LIBEXEC_DIR:-${BASEDIR}/libexec}/httpfs-config.sh + +# The Java System property 'httpfs.http.port' it is not used by HttpFS, +# it is used in Tomcat's server.xml configuration file +# +print "Using CATALINA_OPTS: ${CATALINA_OPTS}" + +catalina_opts="-Dhttpfs.home.dir=${HTTPFS_HOME}"; +catalina_opts="${catalina_opts} -Dhttpfs.config.dir=${HTTPFS_CONFIG}"; +catalina_opts="${catalina_opts} -Dhttpfs.log.dir=${HTTPFS_LOG}"; +catalina_opts="${catalina_opts} -Dhttpfs.temp.dir=${HTTPFS_TEMP}"; +catalina_opts="${catalina_opts} -Dhttpfs.admin.port=${HTTPFS_ADMIN_PORT}"; +catalina_opts="${catalina_opts} -Dhttpfs.http.port=${HTTPFS_HTTP_PORT}"; +catalina_opts="${catalina_opts} -Dhttpfs.http.hostname=${HTTPFS_HTTP_HOSTNAME}"; +catalina_opts="${catalina_opts} -Dhttpfs.ssl.enabled=${HTTPFS_SSL_ENABLED}"; +catalina_opts="${catalina_opts} -Dhttpfs.ssl.keystore.file=${HTTPFS_SSL_KEYSTORE_FILE}"; +catalina_opts="${catalina_opts} -Dhttpfs.ssl.keystore.pass=${HTTPFS_SSL_KEYSTORE_PASS}"; + +print "Adding to CATALINA_OPTS: ${catalina_opts}" + +export CATALINA_OPTS="${CATALINA_OPTS} ${catalina_opts}" + +# A bug in catalina.sh script does not use CATALINA_OPTS for stopping the server +# +if [ "${1}" = "stop" ]; then + export JAVA_OPTS=${CATALINA_OPTS} +fi + +if [ "${HTTPFS_SILENT}" != "true" ]; then + exec ${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@" +else + exec ${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@" > /dev/null +fi + diff --git a/MSH-PIC/hadoop/sbin/kms.sh b/MSH-PIC/hadoop/sbin/kms.sh new file mode 100644 index 0000000..f6ef6a5 --- /dev/null +++ b/MSH-PIC/hadoop/sbin/kms.sh @@ -0,0 +1,83 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# resolve links - $0 may be a softlink +PRG="${0}" + +while [ -h "${PRG}" ]; do + ls=`ls -ld "${PRG}"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "${PRG}"`/"$link" + fi +done + +BASEDIR=`dirname ${PRG}` +BASEDIR=`cd ${BASEDIR}/..;pwd` + +KMS_SILENT=${KMS_SILENT:-true} + +HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-${BASEDIR}/libexec}" +source ${HADOOP_LIBEXEC_DIR}/kms-config.sh + + +if [ "x$JAVA_LIBRARY_PATH" = "x" ]; then + JAVA_LIBRARY_PATH="${HADOOP_LIBEXEC_DIR}/../lib/native/" +else + JAVA_LIBRARY_PATH="${HADOOP_LIBEXEC_DIR}/../lib/native/:${JAVA_LIBRARY_PATH}" +fi + +# The Java System property 'kms.http.port' it is not used by Kms, +# it is used in Tomcat's server.xml configuration file +# + +# Mask the trustStorePassword +KMS_SSL_TRUSTSTORE_PASS=`echo $CATALINA_OPTS | grep -o 'trustStorePassword=[^ ]*' | awk -F'=' '{print $2}'` +CATALINA_OPTS_DISP=`echo ${CATALINA_OPTS} | sed -e 's/trustStorePassword=[^ ]*/trustStorePassword=***/'` +print "Using CATALINA_OPTS: ${CATALINA_OPTS_DISP}" + +catalina_opts="-Dkms.home.dir=${KMS_HOME}"; +catalina_opts="${catalina_opts} -Dkms.config.dir=${KMS_CONFIG}"; +catalina_opts="${catalina_opts} -Dkms.log.dir=${KMS_LOG}"; +catalina_opts="${catalina_opts} -Dkms.temp.dir=${KMS_TEMP}"; +catalina_opts="${catalina_opts} -Dkms.admin.port=${KMS_ADMIN_PORT}"; +catalina_opts="${catalina_opts} -Dkms.http.port=${KMS_HTTP_PORT}"; +catalina_opts="${catalina_opts} -Dkms.max.threads=${KMS_MAX_THREADS}"; +catalina_opts="${catalina_opts} -Dkms.ssl.keystore.file=${KMS_SSL_KEYSTORE_FILE}"; +catalina_opts="${catalina_opts} -Djava.library.path=${JAVA_LIBRARY_PATH}"; + +print "Adding to CATALINA_OPTS: ${catalina_opts}" +print "Found KMS_SSL_KEYSTORE_PASS: `echo ${KMS_SSL_KEYSTORE_PASS} | sed 's/./*/g'`" + +export CATALINA_OPTS="${CATALINA_OPTS} ${catalina_opts}" + +# A bug in catalina.sh script does not use CATALINA_OPTS for stopping the server +# +if [ "${1}" = "stop" ]; then + export JAVA_OPTS=${CATALINA_OPTS} +fi + +# If ssl, the populate the passwords into ssl-server.xml before starting tomcat +if [ ! "${KMS_SSL_KEYSTORE_PASS}" = "" ] || [ ! "${KMS_SSL_TRUSTSTORE_PASS}" = "" ]; then + # Set a KEYSTORE_PASS if not already set + KMS_SSL_KEYSTORE_PASS=${KMS_SSL_KEYSTORE_PASS:-password} + cat ${CATALINA_BASE}/conf/ssl-server.xml.conf \ + | sed 's/_kms_ssl_keystore_pass_/'${KMS_SSL_KEYSTORE_PASS}'/g' \ + | sed 's/_kms_ssl_truststore_pass_/'${KMS_SSL_TRUSTSTORE_PASS}'/g' > ${CATALINA_BASE}/conf/ssl-server.xml +fi + +exec ${KMS_CATALINA_HOME}/bin/catalina.sh "$@" diff --git a/MSH-PIC/hadoop/sbin/mr-jobhistory-daemon.sh b/MSH-PIC/hadoop/sbin/mr-jobhistory-daemon.sh new file mode 100644 index 0000000..7585c9a --- /dev/null +++ b/MSH-PIC/hadoop/sbin/mr-jobhistory-daemon.sh @@ -0,0 +1,147 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# Environment Variables +# +# HADOOP_JHS_LOGGER Hadoop JobSummary logger. +# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_MAPRED_HOME}/conf. +# HADOOP_MAPRED_PID_DIR The pid files are stored. /tmp by default. +# HADOOP_MAPRED_NICENESS The scheduling priority for daemons. Defaults to 0. +## + +usage="Usage: mr-jobhistory-daemon.sh [--config <conf-dir>] (start|stop) <mapred-command> " + +# if no args specified, show usage +if [ $# -le 1 ]; then + echo $usage + exit 1 +fi + +bin=`dirname "${BASH_SOURCE-$0}"` +bin=`cd "$bin"; pwd` + +DEFAULT_LIBEXEC_DIR="$bin"/../libexec +HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} +if [ -e ${HADOOP_LIBEXEC_DIR}/mapred-config.sh ]; then + . $HADOOP_LIBEXEC_DIR/mapred-config.sh +fi + +# get arguments +startStop=$1 +shift +command=$1 +shift + +hadoop_rotate_log () +{ + log=$1; + num=5; + if [ -n "$2" ]; then + num=$2 + fi + if [ -f "$log" ]; then # rotate logs + while [ $num -gt 1 ]; do + prev=`expr $num - 1` + [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num" + num=$prev + done + mv "$log" "$log.$num"; + fi +} + +if [ "$HADOOP_MAPRED_IDENT_STRING" = "" ]; then + export HADOOP_MAPRED_IDENT_STRING="$USER" +fi + +export HADOOP_MAPRED_HOME=${HADOOP_MAPRED_HOME:-${HADOOP_PREFIX}} +export HADOOP_MAPRED_LOGFILE=mapred-$HADOOP_MAPRED_IDENT_STRING-$command-$HOSTNAME.log +export HADOOP_MAPRED_ROOT_LOGGER=${HADOOP_MAPRED_ROOT_LOGGER:-INFO,RFA} +export HADOOP_JHS_LOGGER=${HADOOP_JHS_LOGGER:-INFO,JSA} + +if [ -f "${HADOOP_CONF_DIR}/mapred-env.sh" ]; then + . "${HADOOP_CONF_DIR}/mapred-env.sh" +fi + +mkdir -p "$HADOOP_MAPRED_LOG_DIR" +chown $HADOOP_MAPRED_IDENT_STRING $HADOOP_MAPRED_LOG_DIR + +if [ "$HADOOP_MAPRED_PID_DIR" = "" ]; then + HADOOP_MAPRED_PID_DIR=/tmp +fi + +HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_MAPRED_IDENT_STRING" + +log=$HADOOP_MAPRED_LOG_DIR/mapred-$HADOOP_MAPRED_IDENT_STRING-$command-$HOSTNAME.out +pid=$HADOOP_MAPRED_PID_DIR/mapred-$HADOOP_MAPRED_IDENT_STRING-$command.pid + +HADOOP_MAPRED_STOP_TIMEOUT=${HADOOP_MAPRED_STOP_TIMEOUT:-5} + +# Set default scheduling priority +if [ "$HADOOP_MAPRED_NICENESS" = "" ]; then + export HADOOP_MAPRED_NICENESS=0 +fi + +case $startStop in + + (start) + + mkdir -p "$HADOOP_MAPRED_PID_DIR" + + if [ -f $pid ]; then + if kill -0 `cat $pid` > /dev/null 2>&1; then + echo $command running as process `cat $pid`. Stop it first. + exit 1 + fi + fi + + hadoop_rotate_log $log + echo starting $command, logging to $log + cd "$HADOOP_MAPRED_HOME" + nohup nice -n $HADOOP_MAPRED_NICENESS "$HADOOP_MAPRED_HOME"/bin/mapred --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null & + echo $! > $pid + sleep 1; head "$log" + ;; + + (stop) + + if [ -f $pid ]; then + TARGET_PID=`cat $pid` + if kill -0 $TARGET_PID > /dev/null 2>&1; then + echo stopping $command + kill $TARGET_PID + sleep $HADOOP_MAPRED_STOP_TIMEOUT + if kill -0 $TARGET_PID > /dev/null 2>&1; then + echo "$command did not stop gracefully after $HADOOP_MAPRED_STOP_TIMEOUT seconds: killing with kill -9" + kill -9 $TARGET_PID + fi + else + echo no $command to stop + fi + rm -f $pid + else + echo no $command to stop + fi + ;; + + (*) + echo $usage + exit 1 + ;; + +esac diff --git a/MSH-PIC/hadoop/sbin/refresh-namenodes.sh b/MSH-PIC/hadoop/sbin/refresh-namenodes.sh new file mode 100644 index 0000000..d3f6759 --- /dev/null +++ b/MSH-PIC/hadoop/sbin/refresh-namenodes.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ------------------------------------------------------------------ +# This script refreshes all namenodes, it's a simple wrapper +# for dfsadmin to support multiple namenodes. + +bin=`dirname "$0"` +bin=`cd "$bin"; pwd` + +DEFAULT_LIBEXEC_DIR="$bin"/../libexec +HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} +. $HADOOP_LIBEXEC_DIR/hdfs-config.sh + +namenodes=$("$HADOOP_PREFIX/bin/hdfs" getconf -nnRpcAddresses) +if [ "$?" != '0' ] ; then errorFlag='1' ; +else + for namenode in $namenodes ; do + echo "Refreshing namenode [$namenode]" + "$HADOOP_PREFIX/bin/hdfs" dfsadmin -fs hdfs://$namenode -refreshNodes + if [ "$?" != '0' ] ; then errorFlag='1' ; fi + done +fi + +if [ "$errorFlag" = '1' ] ; then + echo "Error: refresh of namenodes failed, see error messages above." + exit 1 +else + echo "Refresh of namenodes done." +fi + + +# eof diff --git a/MSH-PIC/hadoop/sbin/slaves.sh b/MSH-PIC/hadoop/sbin/slaves.sh new file mode 100644 index 0000000..016392f --- /dev/null +++ b/MSH-PIC/hadoop/sbin/slaves.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Run a shell command on all slave hosts. +# +# Environment Variables +# +# HADOOP_SLAVES File naming remote hosts. +# Default is ${HADOOP_CONF_DIR}/slaves. +# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_PREFIX}/conf. +# HADOOP_SLAVE_SLEEP Seconds to sleep between spawning remote commands. +# HADOOP_SSH_OPTS Options passed to ssh when running remote commands. +## + +usage="Usage: slaves.sh [--config confdir] command..." + +# if no args specified, show usage +if [ $# -le 0 ]; then + echo $usage + exit 1 +fi + +bin=`dirname "${BASH_SOURCE-$0}"` +bin=`cd "$bin"; pwd` + +DEFAULT_LIBEXEC_DIR="$bin"/../libexec +HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} +. $HADOOP_LIBEXEC_DIR/hadoop-config.sh + +if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then + . "${HADOOP_CONF_DIR}/hadoop-env.sh" +fi + +# Where to start the script, see hadoop-config.sh +# (it set up the variables based on command line options) +if [ "$HADOOP_SLAVE_NAMES" != '' ] ; then + SLAVE_NAMES=$HADOOP_SLAVE_NAMES +else + SLAVE_FILE=${HADOOP_SLAVES:-${HADOOP_CONF_DIR}/slaves} + SLAVE_NAMES=$(cat "$SLAVE_FILE" | sed 's/#.*$//;/^$/d') +fi + +# start the daemons +for slave in $SLAVE_NAMES ; do + ssh $HADOOP_SSH_OPTS $slave $"${@// /\\ }" \ + 2>&1 | sed "s/^/$slave: /" & + if [ "$HADOOP_SLAVE_SLEEP" != "" ]; then + sleep $HADOOP_SLAVE_SLEEP + fi +done + +wait diff --git a/MSH-PIC/hadoop/sbin/start-all.cmd b/MSH-PIC/hadoop/sbin/start-all.cmd new file mode 100644 index 0000000..9f65b5d --- /dev/null +++ b/MSH-PIC/hadoop/sbin/start-all.cmd @@ -0,0 +1,52 @@ +@echo off +@rem Licensed to the Apache Software Foundation (ASF) under one or more +@rem contributor license agreements. See the NOTICE file distributed with +@rem this work for additional information regarding copyright ownership. +@rem The ASF licenses this file to You under the Apache License, Version 2.0 +@rem (the "License"); you may not use this file except in compliance with +@rem the License. You may obtain a copy of the License at +@rem +@rem http://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. + +setlocal enabledelayedexpansion + +@rem Start all hadoop daemons. Run this on master node. + +echo This script is Deprecated. Instead use start-dfs.cmd and start-yarn.cmd + +if not defined HADOOP_BIN_PATH ( + set HADOOP_BIN_PATH=%~dp0 +) + +if "%HADOOP_BIN_PATH:~-1%" == "\" ( + set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1% +) + +set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec +if not defined HADOOP_LIBEXEC_DIR ( + set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR% +) + +call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %* +if "%1" == "--config" ( + shift + shift +) + +@rem start hdfs daemons if hdfs is present +if exist %HADOOP_HDFS_HOME%\sbin\start-dfs.cmd ( + call %HADOOP_HDFS_HOME%\sbin\start-dfs.cmd --config %HADOOP_CONF_DIR% +) + +@rem start yarn daemons if yarn is present +if exist %HADOOP_YARN_HOME%\sbin\start-yarn.cmd ( + call %HADOOP_YARN_HOME%\sbin\start-yarn.cmd --config %HADOOP_CONF_DIR% +) + +endlocal diff --git a/MSH-PIC/hadoop/sbin/start-all.sh b/MSH-PIC/hadoop/sbin/start-all.sh new file mode 100644 index 0000000..3124328 --- /dev/null +++ b/MSH-PIC/hadoop/sbin/start-all.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Start all hadoop daemons. Run this on master node. + +echo "This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh" + +bin=`dirname "${BASH_SOURCE-$0}"` +bin=`cd "$bin"; pwd` + +DEFAULT_LIBEXEC_DIR="$bin"/../libexec +HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} +. $HADOOP_LIBEXEC_DIR/hadoop-config.sh + +# start hdfs daemons if hdfs is present +if [ -f "${HADOOP_HDFS_HOME}"/sbin/start-dfs.sh ]; then + "${HADOOP_HDFS_HOME}"/sbin/start-dfs.sh --config $HADOOP_CONF_DIR +fi + +# start yarn daemons if yarn is present +if [ -f "${HADOOP_YARN_HOME}"/sbin/start-yarn.sh ]; then + "${HADOOP_YARN_HOME}"/sbin/start-yarn.sh --config $HADOOP_CONF_DIR +fi diff --git a/MSH-PIC/hadoop/sbin/start-balancer.sh b/MSH-PIC/hadoop/sbin/start-balancer.sh new file mode 100644 index 0000000..2c14a59 --- /dev/null +++ b/MSH-PIC/hadoop/sbin/start-balancer.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +bin=`dirname "${BASH_SOURCE-$0}"` +bin=`cd "$bin"; pwd` + +DEFAULT_LIBEXEC_DIR="$bin"/../libexec +HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} +. $HADOOP_LIBEXEC_DIR/hdfs-config.sh + +# Start balancer daemon. + +"$HADOOP_PREFIX"/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start balancer $@ diff --git a/MSH-PIC/hadoop/sbin/start-dfs.cmd b/MSH-PIC/hadoop/sbin/start-dfs.cmd new file mode 100644 index 0000000..9f20e5a --- /dev/null +++ b/MSH-PIC/hadoop/sbin/start-dfs.cmd @@ -0,0 +1,41 @@ +@echo off +@rem Licensed to the Apache Software Foundation (ASF) under one or more +@rem contributor license agreements. See the NOTICE file distributed with +@rem this work for additional information regarding copyright ownership. +@rem The ASF licenses this file to You under the Apache License, Version 2.0 +@rem (the "License"); you may not use this file except in compliance with +@rem the License. You may obtain a copy of the License at +@rem +@rem http://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem +setlocal enabledelayedexpansion + +if not defined HADOOP_BIN_PATH ( + set HADOOP_BIN_PATH=%~dp0 +) + +if "%HADOOP_BIN_PATH:~-1%" == "\" ( + set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1% +) + +set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec +if not defined HADOOP_LIBEXEC_DIR ( + set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR% +) + +call %HADOOP_LIBEXEC_DIR%\hdfs-config.cmd %* +if "%1" == "--config" ( + shift + shift +) + +start "Apache Hadoop Distribution" hadoop namenode +start "Apache Hadoop Distribution" hadoop datanode + +endlocal diff --git a/MSH-PIC/hadoop/sbin/start-dfs.sh b/MSH-PIC/hadoop/sbin/start-dfs.sh new file mode 100644 index 0000000..a8c2b98 --- /dev/null +++ b/MSH-PIC/hadoop/sbin/start-dfs.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Start hadoop dfs daemons. +# Optinally upgrade or rollback dfs state. +# Run this on master node. + +usage="Usage: start-dfs.sh [-upgrade|-rollback] [other options such as -clusterId]" + +bin=`dirname "${BASH_SOURCE-$0}"` +bin=`cd "$bin"; pwd` + +DEFAULT_LIBEXEC_DIR="$bin"/../libexec +HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} +. $HADOOP_LIBEXEC_DIR/hdfs-config.sh + +# get arguments +if [[ $# -ge 1 ]]; then + startOpt="$1" + shift + case "$startOpt" in + -upgrade) + nameStartOpt="$startOpt" + ;; + -rollback) + dataStartOpt="$startOpt" + ;; + *) + echo $usage + exit 1 + ;; + esac +fi + +#Add other possible options +nameStartOpt="$nameStartOpt $@" + +#--------------------------------------------------------- +# namenodes + +NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -namenodes) + +echo "Starting namenodes on [$NAMENODES]" + +"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ + --config "$HADOOP_CONF_DIR" \ + --hostnames "$NAMENODES" \ + --script "$bin/hdfs" start namenode $nameStartOpt + +#--------------------------------------------------------- +# datanodes (using default slaves file) + +if [ -n "$HADOOP_SECURE_DN_USER" ]; then + echo \ + "Attempting to start secure cluster, skipping datanodes. " \ + "Run start-secure-dns.sh as root to complete startup." +else + "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ + --config "$HADOOP_CONF_DIR" \ + --script "$bin/hdfs" start datanode $dataStartOpt +fi + +#--------------------------------------------------------- +# secondary namenodes (if any) + +SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>/dev/null) + +if [ -n "$SECONDARY_NAMENODES" ]; then + echo "Starting secondary namenodes [$SECONDARY_NAMENODES]" + + "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ + --config "$HADOOP_CONF_DIR" \ + --hostnames "$SECONDARY_NAMENODES" \ + --script "$bin/hdfs" start secondarynamenode +fi + +#--------------------------------------------------------- +# quorumjournal nodes (if any) + +SHARED_EDITS_DIR=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.namenode.shared.edits.dir 2>&-) + +case "$SHARED_EDITS_DIR" in +qjournal://*) + JOURNAL_NODES=$(echo "$SHARED_EDITS_DIR" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g') + echo "Starting journal nodes [$JOURNAL_NODES]" + "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ + --config "$HADOOP_CONF_DIR" \ + --hostnames "$JOURNAL_NODES" \ + --script "$bin/hdfs" start journalnode ;; +esac + +#--------------------------------------------------------- +# ZK Failover controllers, if auto-HA is enabled +AUTOHA_ENABLED=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled) +if [ "$(echo "$AUTOHA_ENABLED" | tr A-Z a-z)" = "true" ]; then + echo "Starting ZK Failover Controllers on NN hosts [$NAMENODES]" + "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ + --config "$HADOOP_CONF_DIR" \ + --hostnames "$NAMENODES" \ + --script "$bin/hdfs" start zkfc +fi + +# eof diff --git a/MSH-PIC/hadoop/sbin/start-secure-dns.sh b/MSH-PIC/hadoop/sbin/start-secure-dns.sh new file mode 100644 index 0000000..7ddf687 --- /dev/null +++ b/MSH-PIC/hadoop/sbin/start-secure-dns.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Run as root to start secure datanodes in a security-enabled cluster. + +usage="Usage (run as root in order to start secure datanodes): start-secure-dns.sh" + +bin=`dirname "${BASH_SOURCE-$0}"` +bin=`cd "$bin"; pwd` + +DEFAULT_LIBEXEC_DIR="$bin"/../libexec +HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} +. $HADOOP_LIBEXEC_DIR/hdfs-config.sh + +if [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then + "$HADOOP_PREFIX"/sbin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start datanode $dataStartOpt +else + echo $usage +fi diff --git a/MSH-PIC/hadoop/sbin/start-yarn.cmd b/MSH-PIC/hadoop/sbin/start-yarn.cmd new file mode 100644 index 0000000..989510b --- /dev/null +++ b/MSH-PIC/hadoop/sbin/start-yarn.cmd @@ -0,0 +1,47 @@ +@echo off +@rem Licensed to the Apache Software Foundation (ASF) under one or more +@rem contributor license agreements. See the NOTICE file distributed with +@rem this work for additional information regarding copyright ownership. +@rem The ASF licenses this file to You under the Apache License, Version 2.0 +@rem (the "License"); you may not use this file except in compliance with +@rem the License. You may obtain a copy of the License at +@rem +@rem http://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem +setlocal enabledelayedexpansion + +echo starting yarn daemons + +if not defined HADOOP_BIN_PATH ( + set HADOOP_BIN_PATH=%~dp0 +) + +if "%HADOOP_BIN_PATH:~-1%" == "\" ( + set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1% +) + +set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec +if not defined HADOOP_LIBEXEC_DIR ( + set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR% +) + +call %HADOOP_LIBEXEC_DIR%\yarn-config.cmd %* +if "%1" == "--config" ( + shift + shift +) + +@rem start resourceManager +start "Apache Hadoop Distribution" yarn resourcemanager +@rem start nodeManager +start "Apache Hadoop Distribution" yarn nodemanager +@rem start proxyserver +@rem start "Apache Hadoop Distribution" yarn proxyserver + +endlocal diff --git a/MSH-PIC/hadoop/sbin/start-yarn.sh b/MSH-PIC/hadoop/sbin/start-yarn.sh new file mode 100644 index 0000000..40b77fb --- /dev/null +++ b/MSH-PIC/hadoop/sbin/start-yarn.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Start all yarn daemons. Run this on master node. + +echo "starting yarn daemons" + +bin=`dirname "${BASH_SOURCE-$0}"` +bin=`cd "$bin"; pwd` + +DEFAULT_LIBEXEC_DIR="$bin"/../libexec +HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} +. $HADOOP_LIBEXEC_DIR/yarn-config.sh + +# start resourceManager +"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager +# start nodeManager +"$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager +# start proxyserver +#"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver diff --git a/MSH-PIC/hadoop/sbin/stop-all.cmd b/MSH-PIC/hadoop/sbin/stop-all.cmd new file mode 100644 index 0000000..1d22c79 --- /dev/null +++ b/MSH-PIC/hadoop/sbin/stop-all.cmd @@ -0,0 +1,52 @@ +@echo off +@rem Licensed to the Apache Software Foundation (ASF) under one or more +@rem contributor license agreements. See the NOTICE file distributed with +@rem this work for additional information regarding copyright ownership. +@rem The ASF licenses this file to You under the Apache License, Version 2.0 +@rem (the "License"); you may not use this file except in compliance with +@rem the License. You may obtain a copy of the License at +@rem +@rem http://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. + +setlocal enabledelayedexpansion + +@rem Stop all hadoop daemons. Run this on master node. + +echo This script is Deprecated. Instead use stop-dfs.cmd and stop-yarn.cmd + +if not defined HADOOP_BIN_PATH ( + set HADOOP_BIN_PATH=%~dp0 +) + +if "%HADOOP_BIN_PATH:~-1%" == "\" ( + set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1% +) + +set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec +if not defined HADOOP_LIBEXEC_DIR ( + set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR% +) + +call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %* +if "%1" == "--config" ( + shift + shift +) + +@rem stop hdfs daemons if hdfs is present +if exist %HADOOP_HDFS_HOME%\sbin\stop-dfs.cmd ( + call %HADOOP_HDFS_HOME%\sbin\stop-dfs.cmd --config %HADOOP_CONF_DIR% +) + +@rem stop yarn daemons if yarn is present +if exist %HADOOP_YARN_HOME%\sbin\stop-yarn.cmd ( + call %HADOOP_YARN_HOME%\sbin\stop-yarn.cmd --config %HADOOP_CONF_DIR% +) + +endlocal diff --git a/MSH-PIC/hadoop/sbin/stop-all.sh b/MSH-PIC/hadoop/sbin/stop-all.sh new file mode 100644 index 0000000..9a2fe98 --- /dev/null +++ b/MSH-PIC/hadoop/sbin/stop-all.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Stop all hadoop daemons. Run this on master node. + +echo "This script is Deprecated. Instead use stop-dfs.sh and stop-yarn.sh" + +bin=`dirname "${BASH_SOURCE-$0}"` +bin=`cd "$bin"; pwd` + +DEFAULT_LIBEXEC_DIR="$bin"/../libexec +HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} +. $HADOOP_LIBEXEC_DIR/hadoop-config.sh + +# stop hdfs daemons if hdfs is present +if [ -f "${HADOOP_HDFS_HOME}"/sbin/stop-dfs.sh ]; then + "${HADOOP_HDFS_HOME}"/sbin/stop-dfs.sh --config $HADOOP_CONF_DIR +fi + +# stop yarn daemons if yarn is present +if [ -f "${HADOOP_HDFS_HOME}"/sbin/stop-yarn.sh ]; then + "${HADOOP_HDFS_HOME}"/sbin/stop-yarn.sh --config $HADOOP_CONF_DIR +fi diff --git a/MSH-PIC/hadoop/sbin/stop-balancer.sh b/MSH-PIC/hadoop/sbin/stop-balancer.sh new file mode 100644 index 0000000..df82456 --- /dev/null +++ b/MSH-PIC/hadoop/sbin/stop-balancer.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +bin=`dirname "${BASH_SOURCE-$0}"` +bin=`cd "$bin"; pwd` + +DEFAULT_LIBEXEC_DIR="$bin"/../libexec +HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} +. $HADOOP_LIBEXEC_DIR/hdfs-config.sh + +# Stop balancer daemon. +# Run this on the machine where the balancer is running + +"$HADOOP_PREFIX"/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop balancer diff --git a/MSH-PIC/hadoop/sbin/stop-dfs.cmd b/MSH-PIC/hadoop/sbin/stop-dfs.cmd new file mode 100644 index 0000000..f0cf015 --- /dev/null +++ b/MSH-PIC/hadoop/sbin/stop-dfs.cmd @@ -0,0 +1,41 @@ +@echo off +@rem Licensed to the Apache Software Foundation (ASF) under one or more +@rem contributor license agreements. See the NOTICE file distributed with +@rem this work for additional information regarding copyright ownership. +@rem The ASF licenses this file to You under the Apache License, Version 2.0 +@rem (the "License"); you may not use this file except in compliance with +@rem the License. You may obtain a copy of the License at +@rem +@rem http://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem +setlocal enabledelayedexpansion + +if not defined HADOOP_BIN_PATH ( + set HADOOP_BIN_PATH=%~dp0 +) + +if "%HADOOP_BIN_PATH:~-1%" == "\" ( + set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1% +) + +set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec +if not defined HADOOP_LIBEXEC_DIR ( + set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR% +) + +call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %* +if "%1" == "--config" ( + shift + shift +) + +Taskkill /FI "WINDOWTITLE eq Apache Hadoop Distribution - hadoop namenode" +Taskkill /FI "WINDOWTITLE eq Apache Hadoop Distribution - hadoop datanode" + +endlocal diff --git a/MSH-PIC/hadoop/sbin/stop-dfs.sh b/MSH-PIC/hadoop/sbin/stop-dfs.sh new file mode 100644 index 0000000..6a622fa --- /dev/null +++ b/MSH-PIC/hadoop/sbin/stop-dfs.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +bin=`dirname "${BASH_SOURCE-$0}"` +bin=`cd "$bin"; pwd` + +DEFAULT_LIBEXEC_DIR="$bin"/../libexec +HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} +. $HADOOP_LIBEXEC_DIR/hdfs-config.sh + +#--------------------------------------------------------- +# namenodes + +NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -namenodes) + +echo "Stopping namenodes on [$NAMENODES]" + +"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ + --config "$HADOOP_CONF_DIR" \ + --hostnames "$NAMENODES" \ + --script "$bin/hdfs" stop namenode + +#--------------------------------------------------------- +# datanodes (using default slaves file) + +if [ -n "$HADOOP_SECURE_DN_USER" ]; then + echo \ + "Attempting to stop secure cluster, skipping datanodes. " \ + "Run stop-secure-dns.sh as root to complete shutdown." +else + "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ + --config "$HADOOP_CONF_DIR" \ + --script "$bin/hdfs" stop datanode +fi + +#--------------------------------------------------------- +# secondary namenodes (if any) + +SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>/dev/null) + +if [ -n "$SECONDARY_NAMENODES" ]; then + echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]" + + "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ + --config "$HADOOP_CONF_DIR" \ + --hostnames "$SECONDARY_NAMENODES" \ + --script "$bin/hdfs" stop secondarynamenode +fi + +#--------------------------------------------------------- +# quorumjournal nodes (if any) + +SHARED_EDITS_DIR=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.namenode.shared.edits.dir 2>&-) + +case "$SHARED_EDITS_DIR" in +qjournal://*) + JOURNAL_NODES=$(echo "$SHARED_EDITS_DIR" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g') + echo "Stopping journal nodes [$JOURNAL_NODES]" + "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ + --config "$HADOOP_CONF_DIR" \ + --hostnames "$JOURNAL_NODES" \ + --script "$bin/hdfs" stop journalnode ;; +esac + +#--------------------------------------------------------- +# ZK Failover controllers, if auto-HA is enabled +AUTOHA_ENABLED=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled) +if [ "$(echo "$AUTOHA_ENABLED" | tr A-Z a-z)" = "true" ]; then + echo "Stopping ZK Failover Controllers on NN hosts [$NAMENODES]" + "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ + --config "$HADOOP_CONF_DIR" \ + --hostnames "$NAMENODES" \ + --script "$bin/hdfs" stop zkfc +fi +# eof diff --git a/MSH-PIC/hadoop/sbin/stop-secure-dns.sh b/MSH-PIC/hadoop/sbin/stop-secure-dns.sh new file mode 100644 index 0000000..fdd47c3 --- /dev/null +++ b/MSH-PIC/hadoop/sbin/stop-secure-dns.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Run as root to start secure datanodes in a security-enabled cluster. + +usage="Usage (run as root in order to stop secure datanodes): stop-secure-dns.sh" + +bin=`dirname "${BASH_SOURCE-$0}"` +bin=`cd "$bin"; pwd` + +DEFAULT_LIBEXEC_DIR="$bin"/../libexec +HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} +. $HADOOP_LIBEXEC_DIR/hdfs-config.sh + +if [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then + "$HADOOP_PREFIX"/sbin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop datanode +else + echo $usage +fi diff --git a/MSH-PIC/hadoop/sbin/stop-yarn.cmd b/MSH-PIC/hadoop/sbin/stop-yarn.cmd new file mode 100644 index 0000000..0914337 --- /dev/null +++ b/MSH-PIC/hadoop/sbin/stop-yarn.cmd @@ -0,0 +1,47 @@ +@echo off +@rem Licensed to the Apache Software Foundation (ASF) under one or more +@rem contributor license agreements. See the NOTICE file distributed with +@rem this work for additional information regarding copyright ownership. +@rem The ASF licenses this file to You under the Apache License, Version 2.0 +@rem (the "License"); you may not use this file except in compliance with +@rem the License. You may obtain a copy of the License at +@rem +@rem http://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem +setlocal enabledelayedexpansion + +echo stopping yarn daemons + +if not defined HADOOP_BIN_PATH ( + set HADOOP_BIN_PATH=%~dp0 +) + +if "%HADOOP_BIN_PATH:~-1%" == "\" ( + set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1% +) + +set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec +if not defined HADOOP_LIBEXEC_DIR ( + set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR% +) + +call %HADOOP_LIBEXEC_DIR%\yarn-config.cmd %* +if "%1" == "--config" ( + shift + shift +) + +@rem stop resourceManager +Taskkill /FI "WINDOWTITLE eq Apache Hadoop Distribution - yarn resourcemanager" +@rem stop nodeManager +Taskkill /FI "WINDOWTITLE eq Apache Hadoop Distribution - yarn nodemanager" +@rem stop proxy server +Taskkill /FI "WINDOWTITLE eq Apache Hadoop Distribution - yarn proxyserver" + +endlocal diff --git a/MSH-PIC/hadoop/sbin/stop-yarn.sh b/MSH-PIC/hadoop/sbin/stop-yarn.sh new file mode 100644 index 0000000..a8498ef --- /dev/null +++ b/MSH-PIC/hadoop/sbin/stop-yarn.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Stop all yarn daemons. Run this on master node. + +echo "stopping yarn daemons" + +bin=`dirname "${BASH_SOURCE-$0}"` +bin=`cd "$bin"; pwd` + +DEFAULT_LIBEXEC_DIR="$bin"/../libexec +HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} +. $HADOOP_LIBEXEC_DIR/yarn-config.sh + +# stop resourceManager +"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR stop resourcemanager +# stop nodeManager +"$bin"/yarn-daemons.sh --config $YARN_CONF_DIR stop nodemanager +# stop proxy server +"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR stop proxyserver diff --git a/MSH-PIC/hadoop/sbin/yarn-daemon.sh b/MSH-PIC/hadoop/sbin/yarn-daemon.sh new file mode 100644 index 0000000..fbfa71d --- /dev/null +++ b/MSH-PIC/hadoop/sbin/yarn-daemon.sh @@ -0,0 +1,161 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Runs a yarn command as a daemon. +# +# Environment Variables +# +# YARN_CONF_DIR Alternate conf dir. Default is ${HADOOP_YARN_HOME}/conf. +# YARN_LOG_DIR Where log files are stored. PWD by default. +# YARN_MASTER host:path where hadoop code should be rsync'd from +# YARN_PID_DIR The pid files are stored. /tmp by default. +# YARN_IDENT_STRING A string representing this instance of hadoop. $USER by default +# YARN_NICENESS The scheduling priority for daemons. Defaults to 0. +## + +usage="Usage: yarn-daemon.sh [--config <conf-dir>] [--hosts hostlistfile] (start|stop) <yarn-command> " + +# if no args specified, show usage +if [ $# -le 1 ]; then + echo $usage + exit 1 +fi + +bin=`dirname "${BASH_SOURCE-$0}"` +bin=`cd "$bin"; pwd` + +DEFAULT_LIBEXEC_DIR="$bin"/../libexec +HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} +. $HADOOP_LIBEXEC_DIR/yarn-config.sh + +# get arguments +startStop=$1 +shift +command=$1 +shift + +hadoop_rotate_log () +{ + log=$1; + num=5; + if [ -n "$2" ]; then + num=$2 + fi + if [ -f "$log" ]; then # rotate logs + while [ $num -gt 1 ]; do + prev=`expr $num - 1` + [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num" + num=$prev + done + mv "$log" "$log.$num"; + fi +} + +if [ -f "${YARN_CONF_DIR}/yarn-env.sh" ]; then + . "${YARN_CONF_DIR}/yarn-env.sh" +fi + +if [ "$YARN_IDENT_STRING" = "" ]; then + export YARN_IDENT_STRING="$USER" +fi + +# get log directory +if [ "$YARN_LOG_DIR" = "" ]; then + export YARN_LOG_DIR="$HADOOP_YARN_HOME/logs" +fi + +if [ ! -w "$YARN_LOG_DIR" ] ; then + mkdir -p "$YARN_LOG_DIR" + chown $YARN_IDENT_STRING $YARN_LOG_DIR +fi + +if [ "$YARN_PID_DIR" = "" ]; then + YARN_PID_DIR=/tmp +fi + +# some variables +export YARN_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log +export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,RFA} +log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out +pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid +YARN_STOP_TIMEOUT=${YARN_STOP_TIMEOUT:-5} + +# Set default scheduling priority +if [ "$YARN_NICENESS" = "" ]; then + export YARN_NICENESS=0 +fi + +case $startStop in + + (start) + + [ -w "$YARN_PID_DIR" ] || mkdir -p "$YARN_PID_DIR" + + if [ -f $pid ]; then + if kill -0 `cat $pid` > /dev/null 2>&1; then + echo $command running as process `cat $pid`. Stop it first. + exit 1 + fi + fi + + if [ "$YARN_MASTER" != "" ]; then + echo rsync from $YARN_MASTER + rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $YARN_MASTER/ "$HADOOP_YARN_HOME" + fi + + hadoop_rotate_log $log + echo starting $command, logging to $log + cd "$HADOOP_YARN_HOME" + nohup nice -n $YARN_NICENESS "$HADOOP_YARN_HOME"/bin/yarn --config $YARN_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null & + echo $! > $pid + sleep 1 + head "$log" + # capture the ulimit output + echo "ulimit -a" >> $log + ulimit -a >> $log 2>&1 + ;; + + (stop) + + if [ -f $pid ]; then + TARGET_PID=`cat $pid` + if kill -0 $TARGET_PID > /dev/null 2>&1; then + echo stopping $command + kill $TARGET_PID + sleep $YARN_STOP_TIMEOUT + if kill -0 $TARGET_PID > /dev/null 2>&1; then + echo "$command did not stop gracefully after $YARN_STOP_TIMEOUT seconds: killing with kill -9" + kill -9 $TARGET_PID + fi + else + echo no $command to stop + fi + rm -f $pid + else + echo no $command to stop + fi + ;; + + (*) + echo $usage + exit 1 + ;; + +esac + + diff --git a/MSH-PIC/hadoop/sbin/yarn-daemons.sh b/MSH-PIC/hadoop/sbin/yarn-daemons.sh new file mode 100644 index 0000000..a7858e4 --- /dev/null +++ b/MSH-PIC/hadoop/sbin/yarn-daemons.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Run a Yarn command on all slave hosts. + +usage="Usage: yarn-daemons.sh [--config confdir] [--hosts hostlistfile] [start +|stop] command args..." + +# if no args specified, show usage +if [ $# -le 1 ]; then + echo $usage + exit 1 +fi + +bin=`dirname "${BASH_SOURCE-$0}"` +bin=`cd "$bin"; pwd` + +DEFAULT_LIBEXEC_DIR="$bin"/../libexec +HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} +. $HADOOP_LIBEXEC_DIR/yarn-config.sh + +exec "$bin/slaves.sh" --config $YARN_CONF_DIR cd "$HADOOP_YARN_HOME" \; "$bin/yarn-daemon.sh" --config $YARN_CONF_DIR "$@" + |
