Hadoop在虚拟机简单的集群

来源:互联网 发布:淘宝日系女生店铺推荐 编辑:程序博客网 时间:2024/06/05 11:18
hadoop简单的集群
1.vi /etc/hosts (3节点都修改)

192.168.8.77 DB77
192.168.8.78 DB78
192.168.8.79 DB79



2.3台机器 创建hadoop 用户
useradd hadoop
passwd hadoop
hadoop 密码:123

3.安装JDK (3台都安装)
[root@DB77 ~]# mkdir /usr/java
[root@h201 ~]# mount //192.168.8.11/ISO /ff -o username=vfast,password=123
[root@h201 hadoop]# cp jdk-8u66-linux-x64.tar.gz /usr/java
[root@DB77 ~]# tar zxvf /home/hadoop/jdk-8u66-linux-x64.tar.gz -C /usr/java

[root@h201 ~]# vi /etc/profile
export JAVA_HOME=/usr/java/jdk1.8.0_66
export JAVA_BIN=/usr/java/jdk1.8.0_66
export PATH=$PATH:$JAVA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export JAVA_HOME JAVA_BIN PATH CLASSPATH

export HADOOP_HOME_WARN_SUPPRESS=10
export HADOOP_HOME=/home/hadoop/java/hadoop-1.1.2
export JAVA_HOME=/home/hadoop/java/jdk1.7.0_65
export PATH=.:$HADOOP_HOME/bin:$JAVA_HOME/bin:$PATH


[root@h201 ~]# reboot

4.安装ssh 证书
su - hadoop
[hadoop@h201 ~]$ ssh-keygen -t rsa
[hadoop@h202 ~]$ ssh-keygen -t rsa
[hadoop@h203 ~]$ ssh-keygen -t rsa

[hadoop@h201 ~]$ ssh-copy-id -i /home/hadoop/.ssh/id_rsa.pub db77
[hadoop@h201 ~]$ ssh-copy-id -i /home/hadoop/.ssh/id_rsa.pub db78
[hadoop@h201 ~]$ ssh-copy-id -i /home/hadoop/.ssh/id_rsa.pub db79

[hadoop@h202 ~]$ ssh-copy-id -i /home/hadoop/.ssh/id_rsa.pub db77
[hadoop@h202 ~]$ ssh-copy-id -i /home/hadoop/.ssh/id_rsa.pub db78
[hadoop@h202 ~]$ ssh-copy-id -i /home/hadoop/.ssh/id_rsa.pub db79

[hadoop@h203 ~]$ ssh-copy-id -i /home/hadoop/.ssh/id_rsa.pub db77
[hadoop@h203 ~]$ ssh-copy-id -i /home/hadoop/.ssh/id_rsa.pub db78
[hadoop@h203 ~]$ ssh-copy-id -i /home/hadoop/.ssh/id_rsa.pub db79


5.
[hadoop@h201 hadoop]$ cp hadoop-2.6.0.tar.gz /home/hadoop
[hadoop@h201 ~]$ tar -zxvf /home/hadoop/hadoop-2.6.0.tar.gz

[hadoop@h201 ~]$ vi .bash_profile

export JAVA_HOME=/usr/java/jdk1.8.0_66
export JAVA_BIN=/usr/java/jdk1.8.0_66
export PATH=$PATH:$JAVA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export JAVA_HOME JAVA_BIN PATH CLASSPATH

HADOOP_HOME=/home/hadoop/hadoop-2.6.0
HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
PATH=$HADOOP_HOME/bin:$PATH
export HADOOP_HOME HADOOP_CONF_DIR PATH

[hadoop@h201 ~]$ source .bash_profile

6.
修改core-site.xml
[hadoop@h201 ~]$ cd hadoop-2.6.0/etc/hadoop/
[hadoop@h201 hadoop]$ vi core-site.xml

<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://db77:9000</value>
<description>NameNode URI.</description>
</property>

<property>
<name>io.file.buffer.size</name>
<value>131072</value>
<description>Size of read/write buffer used inSequenceFiles.</description>
</property>
</configuration>

7.
编辑hdfs-site.xml
[hadoop@h201 hadoop-2.6.0]$ mkdir -p dfs/name
[hadoop@h201 hadoop-2.6.0]$ mkdir -p dfs/data
[hadoop@h201 hadoop-2.6.0]$ mkdir -p dfs/namesecondary

[hadoop@h201 hadoop]$ vi hdfs-site.xml

<property>
<name>dfs.namenode.secondary.http-address</name>
<value>db77:50090</value>
<description>The secondary namenode http server address andport.</description>
</property>

<property>
<name>dfs.namenode.name.dir</name>
<value>file:///home/hadoop/hadoop-2.6.0/dfs/name</value>
<description>Path on the local filesystem where the NameNodestores the namespace and transactions logs persistently.</description>
</property>

<property>
<name>dfs.datanode.data.dir</name>
<value>file:///home/hadoop/hadoop-2.6.0/dfs/data</value>
<description>Comma separated list of paths on the local filesystemof a DataNode where it should store its blocks.</description>
</property>

<property>
<name>dfs.namenode.checkpoint.dir</name>
<value>file:///home/hadoop/hadoop-2.6.0/dfs/namesecondary</value>
<description>Determines where on the local filesystem the DFSsecondary name node should store the temporary images to merge. If this is acomma-delimited list of directories then the image is replicated in all of thedirectories for redundancy.</description>
</property>

<property>
<name>dfs.replication</name>
<value>2</value>
</property>


8.
编辑mapred-site.xml

[hadoop@h201 hadoop]$ cp mapred-site.xml.template mapred-site.xml
[hadoop@h201 hadoop]$ vim mapred-site.xml
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
<description>Theruntime framework for executing MapReduce jobs. Can be one of local, classic oryarn.</description>
</property>

<property>
<name>mapreduce.jobhistory.address</name>
<value>db77:10020</value>
<description>MapReduce JobHistoryServer IPC host:port</description>
</property>

<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>db77:19888</value>
<description>MapReduce JobHistoryServer Web UI host:port</description>
</property>

*****
属性”mapreduce.framework.name“表示执行mapreduce任务所使用的运行框架,默认为local,需要将其改为”yarn”
*****

9.
编辑yarn-site.xml
[hadoop@h201 hadoop]$ vi yarn-site.xml

<property>
<name>yarn.resourcemanager.hostname</name>
<value>db77</value>
<description>The hostname of theRM.</description>
</property>

<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
<description>Shuffle service that needs to be set for Map Reduceapplications.</description>
</property>

10.
[hadoop@h201 hadoop]$ vi hadoop-env.sh
export JAVA_HOME=/usr/java/jdk1.8.0_66

11.
[hadoop@h201 hadoop]$ vi slaves
db78
db79

12.
[hadoop@h201 ~]$ scp -r ./hadoop-2.6.0/ hadoop@db78:/home/hadoop/
[hadoop@h201 ~]$ scp -r ./hadoop-2.6.0/ hadoop@db79:/home/hadoop/

验证:

[hadoop@h201 hadoop-2.6.0]$ bin/hdfs namenode -format
[hadoop@h201 hadoop-2.6.0]$ sbin/start-all.sh

[hadoop@h201 hadoop-2.6.0]$ jps
7054 SecondaryNameNode
7844 Jps
7318 NameNode
7598 ResourceManager

[hadoop@h201 hadoop-2.6.0]$ bin/hadoop fs -ls /
[hadoop@h201 hadoop-2.6.0]$ bin/hadoop fs -mkdir /aaa

1 0
原创粉丝点击