centos 6.6 + hadoop2.7.3+zookeeper-3.4.8+hbase-1.1.2

来源:互联网 发布:上海姑娘 知乎 编辑:程序博客网 时间:2024/05/16 13:00

master1 192.168.0.71NameNodeResourceManager HMaster

master2 192.168.0.72SecondaryNameNode

slvae1 192.168.0.73DataNodeNodeManager QuorumPeerMain HRegionServer

slave2 192.168.0.74DataNodeNodeManager QuorumPeerMain HRegionServer

slave3 192.168.0.75DataNodeNodeManager QuorumPeerMain HRegionServer

1、软件下载地址

http://mirrors.hust.edu.cn/apache/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz

http://mirrors.hust.edu.cn/apache/zookeeper/zookeeper-3.4.8/zookeeper-3.4.8.tar.gz

hbase-1.1.2                 http://vdisk.weibo.com/s/zWP8qIll81I6s

 

linux通过wget直接下载jdk,避免用户验证


wget --no-check-certificate --no-cookies --header "Cookie: oraclelicense=accept-securebackup-cookie" http://download.oracle.com/otn-pub/java/jdk/7u71-b14/jdk-7u71-linux-x64.tar.gz

# chkconfig iptablesoff  #在开机时禁用一个服务

修改hosts和主机名称(/etc/sysconfig/network 

关闭selinux

2、配置profile和无密钥访问authorized_keys(authorized_keys这个很简单,我就不一一说明了)

#java
export JAVA_HOME=/opt/jdk1.7.0
export JRE_HOME=$JAVA_HOME/jre
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=./:$JAVA_HOME/lib:$JAVA_HOME/jre/lib
#hadoop
export HADOOP_HOME=/opt/hadoop-2.7.3
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_YARN_HOME=$HADOOP_HOME
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HADOOP_HOME/lib
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib"
export JAVA_LIBRARY_PATH=$HADOOP_HOME/lib/native
#zookeeper
export ZOOKEEPER_HOME=/opt/zookeeper-3.4.8
export PATH=$ZOOKEEPER_HOME/bin:$PATH
#hbase
export HBASE_HOME=/opt/hbase-1.1.2
export PATH=$PATH:$HBASE_HOME/bin

3、配置hadoop

3.1)

tar -zxvf hadoop-2.7.3.tar.gz
mkdir ./hadoop-2.7.3/tmp
mkdir ./hadoop-2.7.3/logs
mkdir ./hadoop-2.7.3/hdf
mkdir ./hadoop-2.7.3/hdf/data
mkdir ./hadoop-2.7.3/hdf/name

3.2)vi /opt/hadoop-2.7.3/etc/hadoop/hadoop-env.sh

export JAVA_HOME=/opt/jdk1.7.0

3.3)vi /opt/hadoop-2.7.3/etc/hadoop/yarn-env.sh

export JAVA_HOME=/opt/jdk1.7.0

3.4)修改slaves

配置内容:
删除:localhost
添加:
slave1
slave2
slave3

3.5) vi /opt/hadoop-2.7.3/etc/hadoop/core-site.xml 

<property>
    <name>fs.default.name</name>
    <value>hdfs://master1:9000</value>
  </property>
  <property>
    <name>hadoop.tmp.dir</name>
    <value>file:/opt/hadoop-2.7.3/tmp</value>
  </property>
  <property>
    <name>fs.checkpoint.period</name>
        <value>120</value>
              <description>The number of seconds between two periodic checkpoints.
                  </description>
                  </property>
                <property>
             <name>fs.checkpoint.size</name>
            <value>67108864</value>
    </property>

3.6) vi /opt/hadoop-2.7.3/etc/hadoop/hdfs-site.xml 

<property>
    <name>dfs.datanode.data.dir</name>
    <value>/opt/hadoop-2.7.3/hdf/data</value>
    <final>true</final>
  </property>
  <property>
    <name>dfs.namenode.name.dir</name>
    <value>/opt/hadoop-2.7.3/hdf/name</value>
    <final>true</final>
  </property>
  <property>
    <name>dfs.namenode.secondary.http-address</name>
      <value>master2:50090</value>
        </property>

3.7)vi /opt/hadoop-2.7.3/etc/hadoop/mapred-site.xml

<property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
  </property>
  <property>
    <name>mapreduce.jobhistory.address</name>
    <value>master1:10020</value>
  </property>
  <property>
    <name>mapreduce.jobhistory.webapp.address</name>
    <value>master1:19888</value>
  </property>

3.8)vi /opt/hadoop-2.7.3/etc/hadoop/yarn-site.xml 

<property>

    <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
    <value>org.apache.mapred.ShuffleHandler</value>
  </property>
  <property>
    <name>yarn.resourcemanager.address</name>
    <value>master1:8032</value>
  </property>
  <property>
    <name>yarn.resourcemanager.scheduler.address</name>
    <value>master1:8030</value>
  </property>
  <property>
    <name>yarn.resourcemanager.resource-tracker.address</name>
    <value>master1:8031</value>
  </property>
  <property>
    <name>yarn.resourcemanager.admin.address</name> /opt/zookeeper-3.4.8/conf/
    <value>master1:8033</value>
  </property>
  <property>
    <name>yarn.resourcemanager.webapp.address</name>
    <value>master1:8088</value>
  </property>

注意:把文件分发给其他服务器,进入hadoop用户格式化,hdfs namenode -format

然后启动hadoop start-all.sh

4、安装zookeeper

mkdir /opt/zookeeper-3.4.8/data

cd /opt/zookeeper-3.4.8/conf/

cpzoo_sample.cfg zoo.cfg

vi /opt/zookeeper-3.4.8/conf/zoo.cfg 

# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/opt/zookeeper-3.4.8/data
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
server.1=slave1:2888:3888
server.2=slave2:2888:3888
server.3=slave3:2888:3888

server.1myid内容为1

# cd /opt/zookeeper-3.4.8/data/

# touch myid

# echo 1 >myid

server.2myid内容为2

# cd /opt/zookeeper-3.4.8/data/

# touch myid

# echo 2 >myid

server.3myid内容为3

# cd /opt/zookeeper-3.4.8/data/

# touch myid

# echo 3 >myid


5、配置hbase

vi /opt/hbase-1.1.2/conf/regionservers 

slave1
slave2
slave3

vi /opt/hbase-1.1.2/conf/hbase-site.xml 

<property>
    <name>hbase.rootdir</name>
    <value>hdfs://master1:9000/hbase</value>
  </property>
  <property>
      <name>hbase.cluster.distributed</name>
      <value>true</value>
  </property>
  <property>
        <name>hbase.zookeeper.quorum</name>
        <value>slave1,slave2,slave3</value>
  </property>
    <property>
      <name>hbase.master</name>
      <value>hdfs://master1:60000</value>
  </property>
  <property>
    <name>hbase.zookeeper.property.dataDir</name>
    <value>/opt/zookeeper-3.4.8/data</value>
  </property>
  <property>
    <name>hbase.zookeeper.property.clientPort</name>
    <value>2181</value>
  </property>
       <!--默认HMaster HTTP访问端口-->
   <property>
       <name>hbase.master.info.port</name>
       <value>16010</value>
    </property>
       <!--默认HRegionServer HTTP访问端口-->
    <property>
       <name>hbase.regionserver.info.port</name>
       <value>16030</value>
    </property>

常见问题:

报错:ERROR: Can't get master address from ZooKeeper

 

hadoop fs -mkdir /hbase

hadoop fs -chmod 777 /hbase

hadoop fs -chmod -R 777 /hbase

 

http://blog.csdn.net/a925907195/article/details/41126609hadoopnamenode分开配置


0 0
原创粉丝点击