hadoop启动脚本文件的解读(1.1.2)
来源:互联网 发布:mysql小海豚 编辑:程序博客网 时间:2024/06/05 16:26
先从start-all.sh开始
#!/usr/bin/env bash# Licensed to the Apache Software Foundation (ASF) under one or more# contributor license agreements. See the NOTICE file distributed with# this work for additional information regarding copyright ownership.# The ASF licenses this file to You under the Apache License, Version 2.0# (the "License"); you may not use this file except in compliance with# the License. You may obtain a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.# Start all hadoop daemons. Run this on master node.bin=`dirname "$0"` # 把当前文件所在的目录名赋值给“bin"这个变量bin=`cd "$bin"; pwd` # 当前目录的全路径赋给“bin"这个变量if [ -e "$bin/../libexec/hadoop-config.sh" ]; then # -e 表示某目录或文件存在 . "$bin"/../libexec/hadoop-config.shelse . "$bin/hadoop-config.sh"fi# start dfs daemons"$bin"/start-dfs.sh --config $HADOOP_CONF_DIR# start mapred daemons"$bin"/start-mapred.sh --config $HADOOP_CONF_DIR
hadoop-config.sh
# Licensed to the Apache Software Foundation (ASF) under one or more# contributor license agreements. See the NOTICE file distributed with# this work for additional information regarding copyright ownership.# The ASF licenses this file to You under the Apache License, Version 2.0# (the "License"); you may not use this file except in compliance with# the License. You may obtain a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.# included in all the hadoop scripts with source command# should not be executable directly# also should not be passed any arguments, since we need original $*# resolve links - $0 may be a softlinkthis="${BASH_SOURCE-$0}" # 如果设置了BASH_SOURCE变量且不为空,则采用它。如果为空,则取空(null)。如果没设置,则取 $0(即当前脚本名) # 见:http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_06_02common_bin=$(cd -P -- "$(dirname -- "$this")" && pwd -P) # -P 表示 physical, 相对于 logical。即使目标是符号链接,也会自动变成实际目标的物理目录 # pwd 的 -P 也类似 # $(xxx) 和 `xxx` 类似,表示执行命令 # -- 两个连续的减号表示参数结束了,后面是正文。script="$(basename -- "$this")"this="$common_bin/$script"# convert relative path to absolute pathconfig_bin=`dirname "$this"`script=`basename "$this"`config_bin=`cd "$config_bin"; pwd`this="$config_bin/$script"# the root of the Hadoop installationexport HADOOP_PREFIX=`dirname "$this"`/..#check to see if the conf dir is given as an optional argumentif [ $# -gt 1 ]then if [ "--config" = "$1" ] then shift confdir=$1 shift HADOOP_CONF_DIR=$confdir fifi # Allow alternate conf dir location.if [ -e "${HADOOP_PREFIX}/conf/hadoop-env.sh" ]; then DEFAULT_CONF_DIR="conf"else DEFAULT_CONF_DIR="etc/hadoop"fiHADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_PREFIX/$DEFAULT_CONF_DIR}"#check to see it is specified whether to use the slaves or the# masters fileif [ $# -gt 1 ] # 参数数目(不含当前文件名) > 1then if [ "--hosts" = "$1" ] then shift # 处理下一个参数 slavesfile=$1 shift export HADOOP_SLAVES="${HADOOP_CONF_DIR}/$slavesfile" fifiif [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then # -f 表示如果存在且是文件。 -e 表示如果存在(目录也可以) . "${HADOOP_CONF_DIR}/hadoop-env.sh"fiif [ "$HADOOP_HOME_WARN_SUPPRESS" = "" ] && [ "$HADOOP_HOME" != "" ]; then echo "Warning: \$HADOOP_HOME is deprecated." 1>&2 # 标准输出重定向到标准错误 echo 1>&2fi# Newer versions of glibc use an arena memory allocator that causes virtual# memory usage to explode. This interacts badly with the many threads that# we use in Hadoop. Tune the variable down to prevent vmem explosion.export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}export HADOOP_HOME=${HADOOP_PREFIX}export HADOOP_HOME_WARN_SUPPRESS=1
hadoop-daemon.sh
#!/usr/bin/env bash# Licensed to the Apache Software Foundation (ASF) under one or more# contributor license agreements. See the NOTICE file distributed with# this work for additional information regarding copyright ownership.# The ASF licenses this file to You under the Apache License, Version 2.0# (the "License"); you may not use this file except in compliance with# the License. You may obtain a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.# Runs a Hadoop command as a daemon.## Environment Variables## HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_PREFIX}/conf.# HADOOP_LOG_DIR Where log files are stored. PWD by default.# HADOOP_MASTER host:path where hadoop code should be rsync'd from# HADOOP_PID_DIR The pid files are stored. /tmp by default.# HADOOP_IDENT_STRING A string representing this instance of hadoop. $USER by default# HADOOP_NICENESS The scheduling priority for daemons. Defaults to 0.##usage="Usage: hadoop-daemon.sh [--config <conf-dir>] [--hosts hostlistfile] (start|stop) <hadoop-command> <args...>"# if no args specified, show usageif [ $# -le 1 ]; then echo $usage exit 1fibin=`dirname "$0"`bin=`cd "$bin"; pwd`if [ -e "$bin/../libexec/hadoop-config.sh" ]; then . "$bin"/../libexec/hadoop-config.shelse . "$bin/hadoop-config.sh"fi# get argumentsstartStop=$1shiftcommand=$1shifthadoop_rotate_log (){ log=$1; num=5; if [ -n "$2" ]; thennum=$2 fi if [ -f "$log" ]; then # rotate logswhile [ $num -gt 1 ]; do prev=`expr $num - 1` [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num" num=$prevdonemv "$log" "$log.$num"; fi}if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then . "${HADOOP_CONF_DIR}/hadoop-env.sh"fi# Determine if we're starting a secure datanode, and if so, redefine appropriate variablesif [ "$command" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then # $EUID 表示有效用户ID。定义了操作者的权限。有效用户ID是进程的属性,决定了该进程对文件的访问权限。linux系统中每个进程都有2个ID,分别为用户ID(UID)和有效用户ID有效用户ID(EUID),UID一般表示进程的创建者(属于哪个用户创建),而EUID表示进程对于文件和资源的访问权限(具备等同于哪个用户的权限)。而超级用户创建的进程是允许访问整个文件系统的。它的有效ID等于0。 export HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR export HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR export HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER fiif [ "$HADOOP_IDENT_STRING" = "" ]; then export HADOOP_IDENT_STRING="$USER"fi# get log directoryif [ "$HADOOP_LOG_DIR" = "" ]; then export HADOOP_LOG_DIR="$HADOOP_HOME/logs"fimkdir -p "$HADOOP_LOG_DIR"touch $HADOOP_LOG_DIR/.hadoop_test > /dev/null 2>&1TEST_LOG_DIR=$?if [ "${TEST_LOG_DIR}" = "0" ]; then rm -f $HADOOP_LOG_DIR/.hadoop_testelse chown $HADOOP_IDENT_STRING $HADOOP_LOG_DIR fiif [ "$HADOOP_PID_DIR" = "" ]; then HADOOP_PID_DIR=/tmpfi# some variablesexport HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.logexport HADOOP_ROOT_LOGGER="INFO,DRFA"log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.outpid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pidHADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5}# Set default scheduling priorityif [ "$HADOOP_NICENESS" = "" ]; then export HADOOP_NICENESS=0ficase $startStop in (start) mkdir -p "$HADOOP_PID_DIR" if [ -f $pid ]; then if kill -0 `cat $pid` > /dev/null 2>&1; then # kill -0就是不发送任何信号,但是系统会进行错误检查。 # 所以经常用来检查一个进程是否存在,当进程不存在时, # kill -0 pid会返回错误。 echo $command running as process `cat $pid`. Stop it first. exit 1 fi fi if [ "$HADOOP_MASTER" != "" ]; then echo rsync from $HADOOP_MASTER rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $HADOOP_MASTER/ "$HADOOP_HOME" fi hadoop_rotate_log $log echo starting $command, logging to $log cd "$HADOOP_PREFIX" nohup nice -n $HADOOP_NICENESS "$HADOOP_PREFIX"/bin/hadoop --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null & echo $! > $pid sleep 1; head "$log" ;; (stop) if [ -f $pid ]; then TARGET_PID=`cat $pid` if kill -0 $TARGET_PID > /dev/null 2>&1; then echo stopping $command kill $TARGET_PID sleep $HADOOP_STOP_TIMEOUT if kill -0 $TARGET_PID > /dev/null 2>&1; then echo "$command did not stop gracefully after $HADOOP_STOP_TIMEOUT seconds: killing with kill -9" kill -9 $TARGET_PID fi else echo no $command to stop fi else echo no $command to stop fi ;; (*) echo $usage exit 1 ;;esachadoop-daemons.sh
#!/usr/bin/env bash# Licensed to the Apache Software Foundation (ASF) under one or more# contributor license agreements. See the NOTICE file distributed with# this work for additional information regarding copyright ownership.# The ASF licenses this file to You under the Apache License, Version 2.0# (the "License"); you may not use this file except in compliance with# the License. You may obtain a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.# Run a Hadoop command on all slave hosts.usage="Usage: hadoop-daemons.sh [--config confdir] [--hosts hostlistfile] [start|stop] command args..."# if no args specified, show usageif [ $# -le 1 ]; then echo $usage exit 1fibin=`dirname "$0"`bin=`cd "$bin"; pwd`if [ -e "$bin/../libexec/hadoop-config.sh" ]; then . "$bin"/../libexec/hadoop-config.shelse . "$bin/hadoop-config.sh"fiexec "$bin/slaves.sh" --config $HADOOP_CONF_DIR cd "$HADOOP_HOME" \; "$bin/hadoop-daemon.sh" --config $HADOOP_CONF_DIR "$@"# 关于exec的用法,参见:在shell脚本中调用另一个脚本的三种不同方法(fork, exec, source) [ http://blog.csdn.net/frivol/article/details/9361857 ]# "\;" 表示对 ";" 的转义。这里是把 cd 和 $bin/hadoop-daemon.s 两个命令作为参数传递给 slaves.sh 文件,以便后面到slave所在的机器上去执行。
slaves.sh
#!/usr/bin/env bash# Licensed to the Apache Software Foundation (ASF) under one or more# contributor license agreements. See the NOTICE file distributed with# this work for additional information regarding copyright ownership.# The ASF licenses this file to You under the Apache License, Version 2.0# (the "License"); you may not use this file except in compliance with# the License. You may obtain a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.# Run a shell command on all slave hosts.## Environment Variables## HADOOP_SLAVES File naming remote hosts.# Default is ${HADOOP_CONF_DIR}/slaves.# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.# HADOOP_SLAVE_SLEEP Seconds to sleep between spawning remote commands.# HADOOP_SSH_OPTS Options passed to ssh when running remote commands.##usage="Usage: slaves.sh [--config confdir] command..."# if no args specified, show usageif [ $# -le 0 ]; then echo $usage exit 1fibin=`dirname "$0"`bin=`cd "$bin"; pwd`if [ -e "$bin/../libexec/hadoop-config.sh" ]; then . "$bin"/../libexec/hadoop-config.shelse . "$bin/hadoop-config.sh"fi# If the slaves file is specified in the command line,# then it takes precedence over the definition in # hadoop-env.sh. Save it here.HOSTLIST=$HADOOP_SLAVESif [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then . "${HADOOP_CONF_DIR}/hadoop-env.sh"fiif [ "$HOSTLIST" = "" ]; then if [ "$HADOOP_SLAVES" = "" ]; then export HOSTLIST="${HADOOP_CONF_DIR}/slaves" else export HOSTLIST="${HADOOP_SLAVES}" fififor slave in `cat "$HOSTLIST"|sed "s/#.*$//;/^$/d"`; do # 调用sed替换命令s:把以"#"开头的注释一直到行尾用空字符串替换。 # 第二条sed命令d:是删除空行(正则表达式/^$/表示空行) ssh $HADOOP_SSH_OPTS $slave $"${@// /\\ }" \ # 这个 ${@// /\\ } 写的很复杂,我知道至少等于 $@, 就是参数列表,至于其他的含义就不清楚了。 # 这个 ssh 语句后面带上由hadoop-daemons.sh传入的参数列表,表示到 $slave 机器上去执行参数列表所表示的命令 2>&1 | sed "s/^/$slave: /" & # 这个sed语句表示把ssh的输出每一行前面都加上 "$slave: " if [ "$HADOOP_SLAVE_SLEEP" != "" ]; then sleep $HADOOP_SLAVE_SLEEP fidonewait
bin/hadoop
#!/usr/bin/env bash# Licensed to the Apache Software Foundation (ASF) under one or more# contributor license agreements. See the NOTICE file distributed with# this work for additional information regarding copyright ownership.# The ASF licenses this file to You under the Apache License, Version 2.0# (the "License"); you may not use this file except in compliance with# the License. You may obtain a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.# The Hadoop command script## Environment Variables## JAVA_HOME The java implementation to use. Overrides JAVA_HOME.## HADOOP_CLASSPATH Extra Java CLASSPATH entries.## HADOOP_USER_CLASSPATH_FIRST When defined, the HADOOP_CLASSPATH is # added in the beginning of the global# classpath. Can be defined, for example,# by doing # export HADOOP_USER_CLASSPATH_FIRST=true## HADOOP_HEAPSIZE The maximum amount of heap to use, in MB. # Default is 1000.## HADOOP_OPTS Extra Java runtime options.# # HADOOP_NAMENODE_OPTS These options are added to HADOOP_OPTS # HADOOP_CLIENT_OPTS when the respective command is run.# HADOOP_{COMMAND}_OPTS etc HADOOP_JT_OPTS applies to JobTracker # for e.g. HADOOP_CLIENT_OPTS applies to # more than one command (fs, dfs, fsck, # dfsadmin etc) ## HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.## HADOOP_ROOT_LOGGER The root appender. Default is INFO,console#bin=`dirname "$0"`bin=`cd "$bin"; pwd`if [ -e "$bin"/../libexec/hadoop-config.sh ]; then . "$bin"/../libexec/hadoop-config.shelse . "$bin"/hadoop-config.shficygwin=falsecase "`uname`" inCYGWIN*) cygwin=true;;esac# if no args specified, show usageif [ $# = 0 ]; then echo "Usage: hadoop [--config confdir] COMMAND" echo "where COMMAND is one of:" echo " namenode -format format the DFS filesystem" echo " secondarynamenode run the DFS secondary namenode" echo " namenode run the DFS namenode" echo " datanode run a DFS datanode" echo " dfsadmin run a DFS admin client" echo " mradmin run a Map-Reduce admin client" echo " fsck run a DFS filesystem checking utility" echo " fs run a generic filesystem user client" echo " balancer run a cluster balancing utility" echo " fetchdt fetch a delegation token from the NameNode" echo " jobtracker run the MapReduce job Tracker node" echo " pipes run a Pipes job" echo " tasktracker run a MapReduce task Tracker node" echo " historyserver run job history servers as a standalone daemon" echo " job manipulate MapReduce jobs" echo " queue get information regarding JobQueues" echo " version print the version" echo " jar <jar> run a jar file" echo " distcp <srcurl> <desturl> copy file or directories recursively" echo " archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive" echo " classpath prints the class path needed to get the" echo " Hadoop jar and the required libraries" echo " daemonlog get/set the log level for each daemon" echo " or" echo " CLASSNAME run the class named CLASSNAME" echo "Most commands print help when invoked w/o parameters." exit 1fi# get argumentsCOMMAND=$1shift# Determine if we're starting a secure datanode, and if so, redefine appropriate variablesif [ "$COMMAND" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER starting_secure_dn="true"fi# some Java parametersif [ "$JAVA_HOME" != "" ]; then #echo "run java in $JAVA_HOME" JAVA_HOME=$JAVA_HOMEfi if [ "$JAVA_HOME" = "" ]; then echo "Error: JAVA_HOME is not set." exit 1fiJAVA=$JAVA_HOME/bin/javaJAVA_HEAP_MAX=-Xmx1000m # check envvars which might override default argsif [ "$HADOOP_HEAPSIZE" != "" ]; then #echo "run with heapsize $HADOOP_HEAPSIZE" JAVA_HEAP_MAX="-Xmx""$HADOOP_HEAPSIZE""m" #echo $JAVA_HEAP_MAXfi# CLASSPATH initially contains $HADOOP_CONF_DIRCLASSPATH="${HADOOP_CONF_DIR}"if [ "$HADOOP_USER_CLASSPATH_FIRST" != "" ] && [ "$HADOOP_CLASSPATH" != "" ] ; then CLASSPATH=${CLASSPATH}:${HADOOP_CLASSPATH}fiCLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar# for developers, add Hadoop classes to CLASSPATHif [ -d "$HADOOP_HOME/build/classes" ]; then # 如果存在目录"$HADOOP_HOME/build/classes" CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/classesfiif [ -d "$HADOOP_HOME/build/webapps" ]; then CLASSPATH=${CLASSPATH}:$HADOOP_HOME/buildfiif [ -d "$HADOOP_HOME/build/test/classes" ]; then CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/test/classesfiif [ -d "$HADOOP_HOME/build/tools" ]; then CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/toolsfi# so that filenames w/ spaces are handled correctly in loops below# 关于 IFS,参见:http://blog.csdn.net/frivol/article/details/9400339IFS=# for releases, add core hadoop jar & webapps to CLASSPATHif [ -e $HADOOP_PREFIX/share/hadoop/hadoop-core-* ]; then # binary layout if [ -d "$HADOOP_PREFIX/share/hadoop/webapps" ]; then CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/share/hadoop fi for f in $HADOOP_PREFIX/share/hadoop/hadoop-core-*.jar; do CLASSPATH=${CLASSPATH}:$f; done # add libs to CLASSPATH for f in $HADOOP_PREFIX/share/hadoop/lib/*.jar; do CLASSPATH=${CLASSPATH}:$f; done for f in $HADOOP_PREFIX/share/hadoop/lib/jsp-2.1/*.jar; do CLASSPATH=${CLASSPATH}:$f; done for f in $HADOOP_PREFIX/share/hadoop/hadoop-tools-*.jar; do TOOL_PATH=${TOOL_PATH}:$f; doneelse # tarball layout if [ -d "$HADOOP_HOME/webapps" ]; then CLASSPATH=${CLASSPATH}:$HADOOP_HOME fi for f in $HADOOP_HOME/hadoop-core-*.jar; do CLASSPATH=${CLASSPATH}:$f; done # add libs to CLASSPATH for f in $HADOOP_HOME/lib/*.jar; do CLASSPATH=${CLASSPATH}:$f; done if [ -d "$HADOOP_HOME/build/ivy/lib/Hadoop/common" ]; then for f in $HADOOP_HOME/build/ivy/lib/Hadoop/common/*.jar; do CLASSPATH=${CLASSPATH}:$f; done fi for f in $HADOOP_HOME/lib/jsp-2.1/*.jar; do CLASSPATH=${CLASSPATH}:$f; done for f in $HADOOP_HOME/hadoop-tools-*.jar; do TOOL_PATH=${TOOL_PATH}:$f; done for f in $HADOOP_HOME/build/hadoop-tools-*.jar; do TOOL_PATH=${TOOL_PATH}:$f; donefi# add user-specified CLASSPATH lastif [ "$HADOOP_USER_CLASSPATH_FIRST" = "" ] && [ "$HADOOP_CLASSPATH" != "" ]; then CLASSPATH=${CLASSPATH}:${HADOOP_CLASSPATH}fi# default log directory & fileif [ "$HADOOP_LOG_DIR" = "" ]; then HADOOP_LOG_DIR="$HADOOP_HOME/logs"fiif [ "$HADOOP_LOGFILE" = "" ]; then HADOOP_LOGFILE='hadoop.log'fi# default policy file for service-level authorizationif [ "$HADOOP_POLICYFILE" = "" ]; then HADOOP_POLICYFILE="hadoop-policy.xml"fi# restore ordinary behaviourunset IFS# figure out which class to runif [ "$COMMAND" = "classpath" ] ; then if $cygwin; then CLASSPATH=`cygpath -p -w "$CLASSPATH"` fi echo $CLASSPATH exitelif [ "$COMMAND" = "namenode" ] ; then CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode' HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_OPTS"elif [ "$COMMAND" = "secondarynamenode" ] ; then CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode' HADOOP_OPTS="$HADOOP_OPTS $HADOOP_SECONDARYNAMENODE_OPTS"elif [ "$COMMAND" = "datanode" ] ; then CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode' if [ "$starting_secure_dn" = "true" ]; then HADOOP_OPTS="$HADOOP_OPTS -jvm server $HADOOP_DATANODE_OPTS" else HADOOP_OPTS="$HADOOP_OPTS -server $HADOOP_DATANODE_OPTS" fielif [ "$COMMAND" = "fs" ] ; then CLASS=org.apache.hadoop.fs.FsShell HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"elif [ "$COMMAND" = "dfs" ] ; then CLASS=org.apache.hadoop.fs.FsShell HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"elif [ "$COMMAND" = "dfsadmin" ] ; then CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"elif [ "$COMMAND" = "mradmin" ] ; then CLASS=org.apache.hadoop.mapred.tools.MRAdmin HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"elif [ "$COMMAND" = "fsck" ] ; then CLASS=org.apache.hadoop.hdfs.tools.DFSck HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"elif [ "$COMMAND" = "balancer" ] ; then CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer HADOOP_OPTS="$HADOOP_OPTS $HADOOP_BALANCER_OPTS"elif [ "$COMMAND" = "fetchdt" ] ; then CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcherelif [ "$COMMAND" = "jobtracker" ] ; then CLASS=org.apache.hadoop.mapred.JobTracker HADOOP_OPTS="$HADOOP_OPTS $HADOOP_JOBTRACKER_OPTS"elif [ "$COMMAND" = "historyserver" ] ; then CLASS=org.apache.hadoop.mapred.JobHistoryServer HADOOP_OPTS="$HADOOP_OPTS $HADOOP_JOB_HISTORYSERVER_OPTS"elif [ "$COMMAND" = "tasktracker" ] ; then CLASS=org.apache.hadoop.mapred.TaskTracker HADOOP_OPTS="$HADOOP_OPTS $HADOOP_TASKTRACKER_OPTS"elif [ "$COMMAND" = "job" ] ; then CLASS=org.apache.hadoop.mapred.JobClient HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"elif [ "$COMMAND" = "queue" ] ; then CLASS=org.apache.hadoop.mapred.JobQueueClient HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"elif [ "$COMMAND" = "pipes" ] ; then CLASS=org.apache.hadoop.mapred.pipes.Submitter HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"elif [ "$COMMAND" = "version" ] ; then CLASS=org.apache.hadoop.util.VersionInfo HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"elif [ "$COMMAND" = "jar" ] ; then CLASS=org.apache.hadoop.util.RunJar HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"elif [ "$COMMAND" = "distcp" ] ; then CLASS=org.apache.hadoop.tools.DistCp CLASSPATH=${CLASSPATH}:${TOOL_PATH} HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"elif [ "$COMMAND" = "daemonlog" ] ; then CLASS=org.apache.hadoop.log.LogLevel HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"elif [ "$COMMAND" = "archive" ] ; then CLASS=org.apache.hadoop.tools.HadoopArchives CLASSPATH=${CLASSPATH}:${TOOL_PATH} HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"elif [ "$COMMAND" = "sampler" ] ; then CLASS=org.apache.hadoop.mapred.lib.InputSampler HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"else CLASS=$COMMANDfi# cygwin path translationif $cygwin; then CLASSPATH=`cygpath -p -w "$CLASSPATH"` HADOOP_HOME=`cygpath -w "$HADOOP_HOME"` HADOOP_LOG_DIR=`cygpath -w "$HADOOP_LOG_DIR"` TOOL_PATH=`cygpath -p -w "$TOOL_PATH"`fi#Determine the JAVA_PLATFORMJAVA_PLATFORM=`CLASSPATH=${CLASSPATH} ${JAVA} -Xmx32m ${HADOOP_JAVA_PLATFORM_OPTS} org.apache.hadoop.util.PlatformName | sed -e "s/ /_/g"` if [ "$JAVA_PLATFORM" = "Linux-amd64-64" ]; then JSVC_ARCH="amd64"else JSVC_ARCH="i386"fi# setup 'java.library.path' for native-hadoop code if necessaryJAVA_LIBRARY_PATH=''if [ -d "${HADOOP_HOME}/build/native" -o -d "${HADOOP_HOME}/lib/native" -o -e "${HADOOP_PREFIX}/lib/libhadoop.a" ]; then if [ -d "$HADOOP_HOME/build/native" ]; then JAVA_LIBRARY_PATH=${HADOOP_HOME}/build/native/${JAVA_PLATFORM}/lib fi if [ -d "${HADOOP_HOME}/lib/native" ]; then if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:${HADOOP_HOME}/lib/native/${JAVA_PLATFORM} else JAVA_LIBRARY_PATH=${HADOOP_HOME}/lib/native/${JAVA_PLATFORM} fi fi if [ -e "${HADOOP_PREFIX}/lib/libhadoop.a" ]; then JAVA_LIBRARY_PATH=${HADOOP_PREFIX}/lib fifi# cygwin path translationif $cygwin; then JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"`fiHADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.file=$HADOOP_LOGFILE"HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.home.dir=$HADOOP_HOME"HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}"#turn security logger on the namenode and jobtracker onlyif [ $COMMAND = "namenode" ] || [ $COMMAND = "jobtracker" ]; then HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,DRFAS}"else HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"fiif [ "x$JAVA_LIBRARY_PATH" != "x" ]; then HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"fi HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.policy.file=$HADOOP_POLICYFILE"# Check to see if we should start a secure datanodeif [ "$starting_secure_dn" = "true" ]; then if [ "$HADOOP_PID_DIR" = "" ]; then HADOOP_SECURE_DN_PID="/tmp/hadoop_secure_dn.pid" else HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid" fi exec "$HADOOP_HOME/libexec/jsvc.${JSVC_ARCH}" -Dproc_$COMMAND -outfile "$HADOOP_LOG_DIR/jsvc.out" \ -errfile "$HADOOP_LOG_DIR/jsvc.err" \ -pidfile "$HADOOP_SECURE_DN_PID" \ -nodetach \ -user "$HADOOP_SECURE_DN_USER" \ -cp "$CLASSPATH" \ $JAVA_HEAP_MAX $HADOOP_OPTS \ org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter "$@"else # run it exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@"fi
- hadoop启动脚本文件的解读(1.1.2)
- hadoop启动脚本解读
- STM32的启动文件解读
- 一段文件相关的shell脚本解读
- Hadoop 1.x 启动脚本学习(2)
- hadoop之启动脚本的分析
- 在Ubuntu16.04中将启动和关闭Hadoop的命令行写成脚本文件
- hadoop启动脚本分析
- Hadoop启动脚本分析
- hadoop启动脚本
- hadoop启动关闭脚本
- Hadoop 启动脚本分析
- 浅析Hadoop启动脚本
- hadoop用脚本启动
- hadoop自动化启动脚本
- Hadoop-CDH4各个脚本文件的作用
- Hadoop-CDH4各个脚本文件的作用
- Hadoop相关启动脚本分析
- mysql 常用语句笔记
- eclipse-----------出现The connection to adb is down, and a severe error has occured的解决方法
- 计算汽车行驶的方向
- 优先级和类型转换分析
- 敏捷开发的26条至理名言
- hadoop启动脚本文件的解读(1.1.2)
- 指针
- 所谓做减法
- (转载)C# Dictionary
- 唯见葵花向日倾
- 【视听盛宴】《大数据暑期学校》阿里巴巴(2013.7.16)
- 最近求实习位置之感
- 关于List比较好玩的操作
- [小说]魔王冢(6)致知