hadoop安装部署

来源:互联网 发布:三国无双7帝国初音数据 编辑:程序博客网 时间:2024/06/16 05:27

没有时间整理,先粗略将当时安装步骤给大家帖出来。

http://mirrors.cnnic.cn/apache/hadoop/common/hadoop-2.7.3/

useradd -m hadoop -G root -s /bin/bash
password

# install
tar zxf hadoop-2.7.3.tar.gz -C /usr/local/
cd /usr/local/
mv hadoop-2.7.3 hadoop
chown -R hadoop:hadoop hadoop 

vim /etc/sudoers
# add configure
hadoop  ALL=(ALL)       ALL
#wq


# authentication

cd ~/.ssh/                     # 若没有该目录,请先执行一次ssh localhost
ssh-keygen -t rsa              # 会有提示,都按回车就可以
cat id_rsa.pub >> authorized_keys  # 加入授权
chmod 600 ./authorized_keys    # 修改文件权限


# check version
/usr/local/hadoop/bin/hadoop version


# change env
vim /etc/profile
# add configure
export HADOOP_HOME=/usr/local/hadoop
export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin
# wq

source /etc/profile


cp /usr/local/hadoop/etc/hadoop/core-site.xml /usr/local/hadoop/etc/hadoop/core-site.xml.bak
vim /usr/local/hadoop/etc/hadoop/core-site.xml
# add configure
<configuration>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>file:/usr/local/hadoop/tmp</value>
        <description>Abase for other temporary directories.</description>
    </property>
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://localhost:9000</value>
    </property>
</configuration>
# wq


cp /usr/local/hadoop/etc/hadoop/hdfs-site.xml /usr/local/hadoop/etc/hadoop/hdfs-site.xml.bak
vim /usr/local/hadoop/etc/hadoop/hdfs-site.xml
# add configure
<configuration>
    <property>
        <name>dfs.replication</name>
        <value>1</value>
    </property>
    <property>
        <name>dfs.namenode.name.dir</name>
        <value>file:/usr/local/hadoop/tmp/dfs/name</value>
    </property>
    <property>
        <name>dfs.datanode.data.dir</name>
        <value>file:/usr/local/hadoop/tmp/dfs/data</value>
    </property>
</configuration>
# wq


# format
/usr/local/hadoop/bin/hdfs namenode -format


# change hadoop env
vim /usr/local/hadoop/etc/hadoop/hadoop-env.sh
# find $JAVA_HOME, replace configure
export JAVA_HOME=/usr/java/jdk1.8.0_51


# start/stop hadoop
/usr/local/hadoop/sbin/start-all.sh
/usr/local/hadoop/sbin/stop-all.sh

# start/stop dfs
/usr/local/hadoop/sbin/start-dfs.sh
/usr/local/hadoop/sbin/stop-dfs.sh


# view cmd

jps

如果只有一个jps进程的话,查看/usr/local/hadoop/tmp/中的目录所属是否为hadoop用户。



cp /usr/local/hadoop/etc/hadoop/mapred-site.xml.template /usr/local/hadoop/etc/hadoop/mapred-site.xml
vim /usr/local/hadoop/etc/hadoop/mapred-site.xml
# add configure
<configuration>
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>
</configuration>
# wq
cp /usr/local/hadoop/etc/hadoop/yarn-site.xml /usr/local/hadoop/etc/hadoop/yarn-site.xml.bak
vim /usr/local/hadoop/etc/hadoop/yarn-site.xml
# add confiure
<configuration>
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
</configuration>
# wq


# start/stop yarn
/usr/local/hadoop/sbin/start-yarn.sh

/usr/local/hadoop/sbin/mr-jobhistory-daemon.sh start historyserver

[hadoop@localhost hadoop]$ jps
49248 NameNode
52417 NodeManager
52467 JobHistoryServer
49348 DataNode
52317 ResourceManager
49551 SecondaryNameNode
52495 Jps

启动以后,共有上述进程。如果不够的话,请查看配置文件和相关各模块的配置。


0 0
原创粉丝点击