hadoop 高可用集群搭建

来源:互联网 发布:微信如何打开淘宝链接 编辑:程序博客网 时间:2024/05/17 06:39
#创建用户组以及用户
groupadd -g 1000 rdato
useradd  -u 1001 -g rdato rdato
mkdir -p /u01/
chown -R rdato:rdato /u01/


#编译/etc/hosts
cat >> /etc/hosts << EOF
192.168.168.141   sparkgc1
192.168.168.142   sparkgc2
192.168.168.143   sparkgc3
EOF


#关闭防火墙
systemctl disable firewalld
systemctl stop firewalld


#关闭selinux vi /etc/selinux/config  将SELINUX=enforcing 改为SELINUX=disabled
setenforce 0




#配置ssh互通
su - rdato
cd $HOME
mkdir .ssh
chmod 700 .ssh
cd .ssh
ssh-keygen -t rsa
#节点1合成authorized_keys并scp到节点2
cat  *rsa.pub >> authorized_keys
scp authorized_keys rdato@sparkgc2:/home/rdato/.ssh
#节点2继续合成并scp到节点3
cat  *rsa.pub >> authorized_keys
scp authorized_keys rdato@sparkgc3:/home/rdato/.ssh
#节点3合成authorized_keys并scp到节点1和节点2
cat  *rsa.pub >> authorized_keys
scp authorized_keys rdato@sparkgc1:/home/rdato/.ssh
scp authorized_keys rdato@sparkgc2:/home/rdato/.ssh


chmod 600 /home/rdato/.ssh/authorized_keys


#验证互通
ssh sparkgc1 date
ssh sparkgc2 date
ssh sparkgc3 date




#########################################安装jdk
yum install -y jdk-8u131-linux-x64.rpm 


#########################################安装scala
yum install -y scala-2.12.2.rpm


#########################################安装Zookeeper
su - rdato
cd /u01
tar -zxvf zookeeper-3.4.9.tar.gz
mv zookeeper-3.4.9 /u01/zookeeper
cp /u01/zookeeper/conf/zoo_sample.cfg /u01/zookeeper/conf/zoo.cfg
#修改 zoo.cfg 文件里datadir参数,并添加
cat >> /u01/zookeeper/conf/zoo.cfg << EOF
server.1=sparkgc1:42888:43888
server.2=sparkgc2:42888:43888
server.3=sparkgc3:42888:43888
EOF


#创建datadir目录
mkdir -p /u01/zookeeper/data/
节点分别配置
echo 1 > /u01/zookeeper/data/myid
echo 2 > /u01/zookeeper/data/myid
echo 3 > /u01/zookeeper/data/myid




#########################################安装hadoop
#下载
wget http://apache.mirrors.tds.net//hadoop/common/hadoop-XXX/hadoop-XXX.tar.gz
#解压安装hadoop
su - rdato
cd /u01
tar -zxvf hadoop-2.8.0.tar.gz
mv hadoop-2.8.0 hadoop


#########################################配置环境变量
cat >> /home/rdato/.bash_profile << EOF
JAVA_HOME=/usr/java/jdk1.8.0_131; export JAVA_HOME
SCALA_HOME=/usr/share/scala; export SCALA_HOME
HADOOP_HOME=/u01/hadoop; export HADOOP_HOME
SPARK_HOME=/u01/spark; export SPARK_HOME
PATH=/usr/java/jdk1.8.0_131/bin:/usr/share/scala/bin:/u01/hadoop/bin:/u01/spark/bin:$PATH; export PATH
EOF




#########################################单节点配置并scp其他所有节点
#配置 vi /u01/hadoop/etc/hadoop/hadoop-env.sh 和 yarn-env.sh
更改 export JAVA_HOME=/usr/java/jdk1.8.0_131


#配置 vi /u01/hadoop/etc/hadoop/core-site.xml 在configuration中加入


<property>
    <name>fs.trash.interval</name>
    <value>1400</value>
    <discription>HDFS文件删除后先进入垃圾回收站,回收站最长保留时间为1天</discription>
</property>


<property>
    <name>fs.defaultFS</name>
    <value>hdfs://sparkcluster</value>
    <discription>HA部署方式下namenode访问地址 hdfs-site.xml中的dfs.nameservices</discription>
</property>


<property>
    <name>hadoop.tmp.dir</name>
    <value>/u01/hadoop/tmp</value>
    <discription>hadoop临时目录</discription>
</property>


<property>
    <name>ha.zookeeper.quorum</name>
    <value>sparkgc1:2181,sparkgc2:2181,sparkgc3:2181</value>
    <discription>zookeeper地址</discription>
</property>






#配置 vi /u01/hadoop/etc/hadoop/hdfs-site.xml 在configuration中加入


<property>
    <name>dfs.nameservices</name>
    <value>sparkcluster</value>
    <discription>hdfs的nameservice为sparkcluster,需要和core-site.xml中的保持一致</discription>
</property>


<property>
    <name>dfs.ha.namenodes.sparkcluster</name>
    <value>nn1,nn2</value>
    <discription>sparkcluster下面有两个NameNode,分别是nn1,nn2</discription>
</property>


<property>
    <name>dfs.namenode.rpc-address.sparkcluster.nn1</name>
    <value>sparkgc1:9000</value>
    <discription>nn1的RPC通信地址</discription>
</property>


<property>
    <name>dfs.namenode.http-address.sparkcluster.nn1</name>
    <value>sparkgc1:50070</value>
    <discription>nn1的http通信地址</discription>
</property>


<property>
    <name>dfs.namenode.rpc-address.sparkcluster.nn2</name>
    <value>sparkgc2:9000</value>
    <discription>nn2的RPC通信地址</discription>
</property>


<property>
    <name>dfs.namenode.http-address.sparkcluster.nn2</name>
    <value>sparkgc2:50070</value>
    <discription>nn2的http通信地址</discription>
</property>


<property>
    <name>dfs.namenode.shared.edits.dir</name>
    <value>qjournal://sparkgc1:8485;sparkgc2:8485;sparkgc3:8485/sparkcluster</value>
    <discription>NameNode的元数据在JournalNode上的存放位置</discription>
</property>


<property>
    <name>dfs.journalnode.edits.dir</name>
    <value>/u01/hadoop/journal</value>
    <discription>JournalNode在本地磁盘存放数据的位置</discription>
</property>


<property>
    <name>dfs.ha.automatic-failover.enabled</name>
    <value>true</value>
    <discription>开启NameNode失败自动切换</discription>
</property>


<property>
    <name>dfs.client.failover.proxy.provider.sparkcluster</name>
    <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
    <discription>配置失败自动切换实现方式</discription>
</property>


<property>
    <name>dfs.ha.fencing.methods</name>
    <value>sshfence</value>
    <discription>配置隔离机制方法,多个机制用换行分割,即每个机制暂用一行</discription>
</property>


<property>
    <name>dfs.ha.fencing.ssh.private-key-files</name>
    <value>/home/rdato/.ssh/id_rsa</value>
    <discription>使用sshfence隔离机制时需要ssh免登陆</discription>
</property>


<property>
    <name>dfs.replication</name>
    <value>3</value>
    <discription>数据冗余份数</discription>
</property>


<property>
    <name>ha.zookeeper.quorum</name>
    <value>sparkgc1:2181,sparkgc2:2181,sparkgc3:2181</value>
    <discription>zookeeper地址</discription>
</property>






#复制文件
cp /u01/hadoop/etc/hadoop/mapred-site.xml.template /u01/hadoop/etc/hadoop/mapred-site.xml
#配置 vi /u01/hadoop/etc/hadoop/mapred-site.xml 在configuration中加入


<property>
   <name>mapreduce.framework.name</name>
   <value>yarn</value>
   <discription>MapReduce运行于yarn中</discription>
</property>




#配置 vi /u01/hadoop/etc/hadoop/yarn-site.xml 在configuration中加入


<property>
    <name>yarn.resourcemanager.zk-address</name>
    <value>sparkgc1:2181,sparkgc2:2181,sparkgc3:2181</value>
    <discription>zookeeper的连接地址</discription>
</property>


<property>
    <name>yarn.resourcemanager.cluster-id</name>
    <value>SparkCluster</value>
    <discription>RM的cluster id</discription>
</property>


<property>
    <name>yarn.resourcemanager.ha.enabled</name>
    <value>true</value>
    <discription>RM高可用</discription>
</property>


<property>
    <name>yarn.resourcemanager.ha.rm-ids</name>
    <value>rm1,rm2</value>
    <discription>RM的名字</discription>
</property>


<property>
    <name>yarn.resourcemanager.hostname.rm1</name>
    <value>sparkgc1</value>
    <discription>RM的地址1</discription>
</property>


<property>
    <name>yarn.resourcemanager.hostname.rm2</name>
    <value>sparkgc2</value>
    <discription>RM的地址2</discription>
</property>


<property>
    <name>yarn.resourcemanager.webapp.address.rm1</name>
    <value>sparkgc1:8088</value>
    <discription>RM的地址1 webaddr</discription>
</property>


<property>
    <name>yarn.resourcemanager.webapp.address.rm2</name>
    <value>sparkgc2:8088</value>
    <discription>RM的地址2 webaddr</discription>
</property>


<property>
    <name>yarn.resourcemanager.ha.automatic-failover.enabled</name>
    <value>true</value>
    <discription>rm故障自动切换</discription>
</property>


<property>
    <name>yarn.resourcemanager.ha.automatic-failover.embedded</name>
    <value>true</value>
    <discription>rm故障自动切换</discription>
</property>


<property>
    <name>yarn.resourcemanager.ha.automatic-failover.zk-base-path</name>
    <value>/yarn-leader-election</value>
    <discription>rm故障自动切换</discription>
</property>


<property>
    <name>yarn.resourcemanager.recovery.enabled</name>
    <value>true</value>
    <discription>rm自动恢复功能</discription>
</property>


<property>
    <name>yarn.resourcemanager.store.class</name>
    <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
    <discription>rm自动恢复功能</discription>
</property>


<property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
    <discription>nodemanager</discription>
</property>


<property>
    <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
    <discription>nodemanager</discription>
</property>




#配置vi /u01/hadoop/etc/hadoop/slaves 去掉localhost
sparkgc1
sparkgc2
sparkgc3


#传送到其他节点
scp /u01/hadoop/etc/hadoop/hadoop-env.sh     rdato@sparkgc2:/u01/hadoop/etc/hadoop/
scp /u01/hadoop/etc/hadoop/yarn-env.sh       rdato@sparkgc2:/u01/hadoop/etc/hadoop/
scp /u01/hadoop/etc/hadoop/core-site.xml     rdato@sparkgc2:/u01/hadoop/etc/hadoop/
scp /u01/hadoop/etc/hadoop/hdfs-site.xml     rdato@sparkgc2:/u01/hadoop/etc/hadoop/
scp /u01/hadoop/etc/hadoop/mapred-site.xml   rdato@sparkgc2:/u01/hadoop/etc/hadoop/
scp /u01/hadoop/etc/hadoop/yarn-site.xml     rdato@sparkgc2:/u01/hadoop/etc/hadoop/
scp /u01/hadoop/etc/hadoop/slaves            rdato@sparkgc2:/u01/hadoop/etc/hadoop/




################################以下所有格式化初次仅启动需要
#各自启动zookeeper
/u01/zookeeper/bin/zkServer.sh start


#单个节点格式化zookeeper上hadoop-ha目录
/u01/hadoop/bin/hdfs zkfc -formatZK


#检查格式化后是否有hadoop ha目录
/u01/zookeeper/bin/zkCli.sh -server sparkgc1:2181,sparkgc2:2181,sparkgc3:2181


#启动DFSZKFailoverController
ssh sparkgc1
/u01/hadoop/sbin/hadoop-daemon.sh start zkfc 
ssh sparkgc2
/u01/hadoop/sbin/hadoop-daemon.sh start zkfc




#在任意服务器上启动journal
/u01/hadoop/sbin/hadoop-daemons.sh start journalnode


#格式化第一台namenode节点
/u01/hadoop/bin/hadoop namenode -format
#启动第一台namenode
/u01/hadoop/sbin/hadoop-daemon.sh start namenode
#格式化同步第二台namenode
/u01/hadoop/bin/hdfs namenode -bootstrapStandby
/u01/hadoop/sbin/hadoop-daemon.sh start namenode


#查看namenode状态
hdfs haadmin -getServiceState nn1
#如果都是standby激活namenode
hdfs haadmin -transitionToActive --forcemanual nn1
#切换namenode 从1到2
hdfs haadmin -DFSHAadmin -failover nn1 nn2




#全部启动
/u01/hadoop/sbin/start-all.sh


#启动jobhistory
/u01/hadoop/sbin/mr-jobhistory-daemon.sh start historyserver




#查看集群状态
hdfs dfsadmin -report




#web访问
#hadoop集群管理界面
http://192.168.168.141:8088/
#HDFS管理界面 
http://192.168.168.141:50070/




有一次启动遇到如下问题
ERROR org.apache.hadoop.hdfs.server.datanode.DataNode: Initialization failed for Block pool <registering> (Datanode Uuid 7b189d18-eb27-4bd9-80eb-218717f10793) service to sparkgc1/192.168.168.141:9000. Exiting.
在第一次格式化dfs后,启动并使用了hadoop,后来又重新执行了格式化命令(hdfs namenode -format)
这时namenode的clusterID会重新生成,而datanode的clusterID 保持不变
需要将name目录下的 ./current/VERSION里的clusterID替代 data目录下./current/VERSION里的clusterID
原创粉丝点击