hadoop2.4.0 [自动HA]搭建
来源:互联网 发布:一路向前知乎 编辑:程序博客网 时间:2024/05/17 02:59
hadoop2.4.0自动的HA
1、 core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://mycluster</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/usr/local/hadoop-2.4.0/tmp</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>itr-mastertest01:2181,itr-mastertest02:2181,itr-nodetest01:2181</value>
</property>
<property>
<name>fs.trash.interval</name>
<value>2000</value>
</property>
</configuration>
2、hdfs-site.xml
<configuration>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.nameservices</name>
<value>mycluster</value>
</property>
<property>
<name>dfs.ha.namenodes.mycluster</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn1</name>
<value>itr-mastertest01:9000</value>
</property>
<property>
<name>dfs.namenode.http-address.mycluster.nn1</name>
<value>itr-mastertest01:50070</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn2</name>
<value>itr-mastertest02:9000</value>
</property>
<property>
<name>dfs.namenode.http-address.mycluster.nn2</name>
<value>itr-mastertest02:50070</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled.mycluster</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://itr-mastertest01:8485;itr-mastertest02:8485;itr-nodetest01:8485/mycluster</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/usr/local/hadoop-2.4.0/tmp/journal</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.mycluster</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
</configuration>
3、yarn-site.xml
<configuration>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>itr-mastertest01</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
4、mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
5、slaves
itr-nodetest01
itr-nodetest02
6、hadoop-env.sh修改
JAVA_HOME指定为jdk安装目录,JAVA_HOME=/usr/local/jdk1.7.0_45
7、scp hadoop-2.4.0
[root@itr-mastertest01 local]# scp -rq hadoop-2.4.0 itr-mastertest02:/usr/local/
[root@itr-mastertest01 local]# scp -rq hadoop-2.4.0 itr-nodetest01:/usr/local/
[root@itr-mastertest01 local]# scp -rq hadoop-2.4.0 itr-nodetest02:/usr/local/
8、启动journalnode
[root@itr-mastertest01 local]# hadoop-daemon.sh start journalnode
[root@itr-mastertest02 local]# hadoop-daemon.sh start journalnode
[root@itr-nodetest01 local]# hadoop-daemon.sh start journalnode
9、格式化zkfc集群
[root@itr-mastertest01 local]# hdfs zkfc -formatZK
10、格式化hdfs文件系统[格式化namenode,接着启动namenode]
[root@itr-mastertest01 tmp]# hdfs namenode -format -clusterId mycluster
[root@itr-mastertest01 tmp]# hadoop-daemon.sh start namenode
[root@itr-mastertest02 tmp]# hdfs namenode -bootstrapStandby
[root@itr-mastertest02 tmp]# hadoop-daemon.sh start namenode
9、启动datanode
[root@itr-mastertest01 local]# hadoop-daemons.sh start datanode
10、启动zkfc [DFSZKFailoverController]
[root@itr-mastertest01 local]# hadoop-daemon.sh start zkfc
[root@itr-mastertest02 tmp]# hadoop-daemon.sh start zkfc
11、启动yarn
[root@itr-mastertest01 local]# yarn-daemon.sh start resourcemanager
[root@itr-mastertest01 local]# yarn-daemons.sh start nodemanager
12、测试hdfs+Yarn+MR
hadoop fs -put /hadoop-env.sh /testdata
hadoop jar hadoop-mapreduce-examples-2.4.0.jar wordcount /testdata /output
13、启动historyserver [查询作业执行的详细情况]
注明:前提是自己已经编译好的hadoop版本或者cloudrea公司的cdh版本! 大数据讨论群:288396468
1、 core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://mycluster</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/usr/local/hadoop-2.4.0/tmp</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>itr-mastertest01:2181,itr-mastertest02:2181,itr-nodetest01:2181</value>
</property>
<property>
<name>fs.trash.interval</name>
<value>2000</value>
</property>
</configuration>
2、hdfs-site.xml
<configuration>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.nameservices</name>
<value>mycluster</value>
</property>
<property>
<name>dfs.ha.namenodes.mycluster</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn1</name>
<value>itr-mastertest01:9000</value>
</property>
<property>
<name>dfs.namenode.http-address.mycluster.nn1</name>
<value>itr-mastertest01:50070</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn2</name>
<value>itr-mastertest02:9000</value>
</property>
<property>
<name>dfs.namenode.http-address.mycluster.nn2</name>
<value>itr-mastertest02:50070</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled.mycluster</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://itr-mastertest01:8485;itr-mastertest02:8485;itr-nodetest01:8485/mycluster</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/usr/local/hadoop-2.4.0/tmp/journal</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.mycluster</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
</configuration>
3、yarn-site.xml
<configuration>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>itr-mastertest01</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
4、mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
5、slaves
itr-nodetest01
itr-nodetest02
6、hadoop-env.sh修改
JAVA_HOME指定为jdk安装目录,JAVA_HOME=/usr/local/jdk1.7.0_45
7、scp hadoop-2.4.0
[root@itr-mastertest01 local]# scp -rq hadoop-2.4.0 itr-mastertest02:/usr/local/
[root@itr-mastertest01 local]# scp -rq hadoop-2.4.0 itr-nodetest01:/usr/local/
[root@itr-mastertest01 local]# scp -rq hadoop-2.4.0 itr-nodetest02:/usr/local/
8、启动journalnode
[root@itr-mastertest01 local]# hadoop-daemon.sh start journalnode
[root@itr-mastertest02 local]# hadoop-daemon.sh start journalnode
[root@itr-nodetest01 local]# hadoop-daemon.sh start journalnode
9、格式化zkfc集群
[root@itr-mastertest01 local]# hdfs zkfc -formatZK
10、格式化hdfs文件系统[格式化namenode,接着启动namenode]
[root@itr-mastertest01 tmp]# hdfs namenode -format -clusterId mycluster
[root@itr-mastertest01 tmp]# hadoop-daemon.sh start namenode
[root@itr-mastertest02 tmp]# hdfs namenode -bootstrapStandby
[root@itr-mastertest02 tmp]# hadoop-daemon.sh start namenode
9、启动datanode
[root@itr-mastertest01 local]# hadoop-daemons.sh start datanode
10、启动zkfc [DFSZKFailoverController]
[root@itr-mastertest01 local]# hadoop-daemon.sh start zkfc
[root@itr-mastertest02 tmp]# hadoop-daemon.sh start zkfc
11、启动yarn
[root@itr-mastertest01 local]# yarn-daemon.sh start resourcemanager
[root@itr-mastertest01 local]# yarn-daemons.sh start nodemanager
12、测试hdfs+Yarn+MR
hadoop fs -put /hadoop-env.sh /testdata
hadoop jar hadoop-mapreduce-examples-2.4.0.jar wordcount /testdata /output
13、启动historyserver [查询作业执行的详细情况]
[root@itr-mastertest01 local]# mr-jobhistory-daemon.sh start historyserver
转载请注明出处:http://blog.csdn.net/sparkjvm
0 0
- hadoop2.4.0 [自动HA]搭建
- hadoop2.4.0 ha 搭建
- 搭建hadoop2 HA
- hadoop2.6.0分布式集群搭建(手动和自动HA)
- 搭建hadoop2.6.0 HA及YARN HA
- 搭建hadoop2.6.0 HA及YARN HA
- 搭建hadoop2.6.0 HA及YARN HA
- 搭建hadoop2.6.0 HA及YARN HA
- hadoop2.2+HA 集群搭建
- Hadoop2+HA+YARN环境搭建
- hadoop2.6.0集群HA搭建
- hadoop2集群搭建+HA配置
- hadoop2.5.2HDFS HA搭建
- Hadoop2.5 HA搭建说明
- hadoop2.0 HDFS搭建和HA切换
- Hadoop2 上HDFS HA 搭建过程
- hadoop2.2+zk ha环境搭建
- Hadoop2搭建可手工配置的HA
- 为什么答案是-54
- iOS微信分享开发
- Android比DES加密更安全的算法——3DES加密算法
- 视频语音聊天室办理会员是骗人的吗?
- 无向网图的最短路径--Floyd算法
- hadoop2.4.0 [自动HA]搭建
- vim 字体是横着的解决办法
- UML类图几种关系的总结
- uva 10105 uva 10910 uva 10943(排列组合C)
- windows phone 学习资料随手记
- 新的一年,my dream!
- EBS Form开发建立一个最简单的Form并部署到到EBS中(1)
- Android我们要友好的告诉用户,程序要崩溃了
- Java图形化界面设计——布局管理器之BorderLayout(边界布局)