_00001 Hadoop-1.X 伪分布式安装

来源:互联网 发布:毕向东java视频下载 编辑:程序博客网 时间:2024/05/25 19:58
博文作者:妳那伊抹微笑
个性签名:世界上最遥远的距离不是天涯,也不是海角,而是我站在妳的面前,妳却感觉不到我的存在
技术方向:Flume+Kafka+Storm+Redis/Hbase+Hadoop+Hive+Mahout+Spark ... 云计算技术
转载声明:可以转载, 但必须以超链接形式标明文章原始出处和作者信息及版权声明,谢谢合作!
qq交流群:214293307  云计算之嫣然伊笑(期待与你一起学习,共同进步)


# hadoop前言

# Hadoop试验集群的部署结构

# 系统和组建的依赖关系


# 生产环境的部署结构

# Day1 搭建伪分布实验环境

# 准备软件

# vmare 9.0.2

# 操作系统 CentOS 6.4

# jdk-6u45-linux-i586.bin

# hadoop-1.1.2.tar.gz

# 开始搭建环境一 (基础环境)

# 在虚拟机上装好 CentOS 6.4

# VM的网络连接方式选择NAT方式

# 新建hadoop组跟用户(密码:hadoop)

[root@localhosthome]# groupadd hadoop

[root@localhosthome]# useradd -g hadoop hadoop

[root@localhosthome]# passwd hadoop

Changingpassword for user hadoop.

New password:

BAD PASSWORD: itis based on a dictionary word

BAD PASSWORD: istoo simple

Retype newpassword:

passwd: allauthentication tokens updated successfully.

[root@localhosthome]#

# 授时服务(时间同步)

[root@localhosthome]# crontab –e

[root@localhosthome]# crontab -l

0 1 * * */usr/sbin/ntpdate -u cn.pool.ntp.org

# jdk-6u45-linux-i586.bin安装(没有x权限,修改权限后执行)

[root@localhostjava]# pwd

/usr/local/java

[root@localhostjava]# ll

total 130600

-rwxrw-rw-. 1root root 61927560 Jun  7  2013 hadoop-1.1.2.tar.gz

-rw-r--r--. 1root root 71799552 Oct 14 14:33 jdk-6u45-linux-i586.bin

[root@localhostjava]# chmod u+x jdk-6u45-linux-i586.bin

[root@localhostjava]# ll

total 130600

-rwxrw-rw-. 1root root 61927560 Jun  7  2013 hadoop-1.1.2.tar.gz

-rwxr--r--. 1root root 71799552 Oct 14 14:33 jdk-6u45-linux-i586.bin

[root@localhostjava]# ./jdk-6u45-linux-i586.bin

# 配置环境变量(不在profile里面配置,新建一个java.sh文件,里面配置java的环境变量,profile文件会自动加载这个java.sh文件)

 

[root@localhostjdk1.6.0_45]# pwd

/usr/local/java/jdk1.6.0_45

[root@localhostjdk1.6.0_45]# vi /etc/profile.d/java.sh

[root@localhostjdk1.6.0_45]#

[root@localhostjdk1.6.0_45]# java

bash: java:command not found

[root@localhost jdk1.6.0_45]# source /etc/profile使java.sh文件配置生效

[root@localhostjdk1.6.0_45]# java -version

java version"1.6.0_45"

Java(TM) SERuntime Environment (build 1.6.0_45-b06)

Java HotSpot(TM)Client VM (build 20.45-b01, mixed mode, sharing)

[root@localhostjdk1.6.0_45]# javac -version

javac 1.6.0_45

[root@localhostjdk1.6.0_45]#

# 主机名修改

[root@localhostjdk1.6.0_45]# vi /etc/sysconfig/network

[root@localhostjdk1.6.0_45]# hostname

localhost.localdomain

[root@localhostjdk1.6.0_45]#

在这里需要logout一次,主机名才会生效

# IP配置

[root@localhostDesktop]# vi /etc/sysconfig/network-scripts/ifcfg-eth0

[root@localhostDesktop]# ifconfig

eth1      Link encap:Ethernet  HWaddr 00:50:56:38:E4:31 

          inet addr:192.168.209.100  Bcast:192.168.209.255  Mask:255.255.255.0

          inet6 addr:fe80::250:56ff:fe38:e431/64 Scope:Link

          UP BROADCAST RUNNING MULTICAST  MTU:1500 Metric:1

          RX packets:256 errors:0 dropped:0overruns:0 frame:0

          TX packets:140 errors:0 dropped:0overruns:0 carrier:0

          collisions:0 txqueuelen:1000

          RX bytes:66995 (65.4 KiB)  TX bytes:11119 (10.8 KiB)

          Interrupt:19 Base address:0x2024

 

lo        Link encap:Local Loopback 

          inet addr:127.0.0.1  Mask:255.0.0.0

          inet6 addr: ::1/128 Scope:Host

          UP LOOPBACK RUNNING  MTU:16436 Metric:1

          RX packets:18 errors:0 dropped:0overruns:0 frame:0

          TX packets:18 errors:0 dropped:0overruns:0 carrier:0

          collisions:0 txqueuelen:0

          RX bytes:1128 (1.1 KiB)  TX bytes:1128 (1.1 KiB)

 

[root@localhostDesktop]#

# hosts文件修改(能ping通就成功了)

[root@localhostDesktop]# vi /etc/hosts

[root@localhostDesktop]# ping master

PING master (192.168.209.100)56(84) bytes of data.

64 bytes frommaster (192.168.209.100): icmp_seq=1 ttl=64 time=0.488 ms

64 bytes frommaster (192.168.209.100): icmp_seq=2 ttl=64 time=0.083 ms

^C

--- master pingstatistics ---

2 packetstransmitted, 2 received, 0% packet loss, time 1374ms

rttmin/avg/max/mdev = 0.083/0.285/0.488/0.203 ms

[root@localhostDesktop]#

# 防火墙关闭

[root@localhostDesktop]# service iptables status

Table: filter

Chain INPUT(policy ACCEPT)

num  target    prot opt source              destination        

1    ACCEPT    all  --  0.0.0.0/0            0.0.0.0/0           state RELATED,ESTABLISHED

2    ACCEPT    icmp --  0.0.0.0/0            0.0.0.0/0          

3    ACCEPT    all  --  0.0.0.0/0            0.0.0.0/0          

4    ACCEPT    tcp  --  0.0.0.0/0            0.0.0.0/0           state NEW tcp dpt:22

5    REJECT    all  --  0.0.0.0/0            0.0.0.0/0           reject-with icmp-host-prohibited

 

Chain FORWARD(policy ACCEPT)

num  target    prot opt source              destination        

1    REJECT    all  --  0.0.0.0/0            0.0.0.0/0           reject-with icmp-host-prohibited

 

Chain OUTPUT(policy ACCEPT)

num  target    prot opt source              destination        

 

[root@localhostDesktop]# service iptables stop

iptables: Flushing firewall rules:                         [  OK  ]

iptables:Setting chains to policy ACCEPT: filter         [  OK  ]

iptables:Unloading modules:                              [  OK  ]

 [root@localhost Desktop]# chkconfig iptables--list

iptables            0:off       1:off       2:on 3:on 4:on 5:on 6:off

[root@localhostDesktop]# chkconfig iptables off

[root@localhostDesktop]# chkconfig iptables --list

iptables            0:off       1:off       2:off       3:off       4:off       5:off       6:off

[root@localhostDesktop]#

[root@localhostDesktop]# service iptables status

iptables:Firewall is not running.

# SSH 无密钥登录(切换到hadoop用户下)

切换到hadoop用户下

[root@localhost~]# su hadoop

生成公钥跟私钥(会有3次提示,一直回车即可)

[hadoop@localhostroot]$ cd

[hadoop@localhost~]$ pwd

/home/hadoop

[hadoop@localhost~]$ ssh-keygen -t rsa

Generatingpublic/private rsa key pair.

Enter file inwhich to save the key (/home/hadoop/.ssh/id_rsa):

Enter passphrase(empty for no passphrase):

Enter samepassphrase again:

Youridentification has been saved in /home/hadoop/.ssh/id_rsa.

Your public keyhas been saved in /home/hadoop/.ssh/id_rsa.pub.

The keyfingerprint is:

33:09:0b:6d:30:f5:07:10:40:0d:be:99:cf:a9:a4:92hadoop@localhost.localdomain

The key'srandomart image is:

+--[ RSA2048]----+

|   .*=+o.       |

|   . +.. .      |

|    o + . .     |

|     * o o      |

|    + . S       |

|     o . o      |

| .  . +         |

|E  o .          |

| .. .            |

+-----------------+

[hadoop@localhost~]$

[hadoop@localhost~]$ ls

[hadoop@localhost~]$ ll -a

total 36

drwx------. 5hadoop hadoop 4096 Feb 28 14:19 .

drwxr-xr-x. 3root   root   4096 Feb 28 13:47 ..

-rw-------. 1hadoop hadoop   79 Feb 28 14:23.bash_history

-rw-r--r--. 1hadoop hadoop   18 Feb 22  2013 .bash_logout

-rw-r--r--. 1hadoop hadoop  176 Feb 22  2013 .bash_profile

-rw-r--r--. 1hadoop hadoop  124 Feb 22  2013 .bashrc

drwxr-xr-x. 2hadoop hadoop 4096 Nov 12  2010 .gnome2

drwxr-xr-x. 4hadoop hadoop 4096 Feb 28 06:11 .mozilla

drwx------. 2hadoop hadoop 4096 Feb 28 14:23 .ssh

[hadoop@localhost~]$ cd .ssh/

[hadoop@localhost.ssh]$ ls

id_rsa  id_rsa.pub

[hadoop@localhost.ssh]$ ll

total 8

-rw-------. 1hadoop hadoop 1671 Feb 28 14:23 id_rsa

-rw-r--r--. 1hadoop hadoop  410 Feb 28 14:23id_rsa.pub

[hadoop@localhost.ssh]$ cp id_rsa.pub authorized_keys

[hadoop@localhost.ssh]$ ll

total 12

-rw-r--r--. 1hadoop hadoop  410 Feb 28 14:26authorized_keys

-rw-------. 1hadoop hadoop 1671 Feb 28 14:23 id_rsa

-rw-r--r--. 1hadoop hadoop  410 Feb 28 14:23id_rsa.pub

[hadoop@localhost.ssh]$

ssh登录

[hadoop@localhost.ssh]$ ssh master

The authenticityof host 'master (192.168.209.100)' can't be established.

RSA keyfingerprint is f0:92:0b:08:0d:9b:72:0d:ca:99:30:0a:40:7e:05:ae.

SSH第一次登录有这个提示,回车就好,然后直接ssh master 不需要密码就成功了

Are you sure youwant to continue connecting (yes/no)? yes

Warning:Permanently added 'master,192.168.209.100' (RSA) to the list of known hosts.

[hadoop@localhost~]$ exit

logout

Connection tomaster closed.

[hadoop@localhost.ssh]$ ssh master

Last login: FriFeb 28 14:27:32 2014 from master

[hadoop@localhost~]$

退出ssh登录,进行接下来的环境配置

[hadoop@localhost~]$ exit

Logout

# 检查基础环境

#测试命令

# java
# javac
# ifconfig
# ping master
# ssh master
# jps
# echo $JAVA_HOME
# echo $HADOOP_HOME
# hadoop
# hostname

# 开始搭建环境二 (hadoop环境)

# hadoop-1.1.2.tar.gz的安装(切换成root用户)

[root@localhosthadoop]# pwd

/usr/local/hadoop

[root@localhosthadoop]# ls

hadoop-1.1.2.tar.gz

[root@localhosthadoop]# tar -zxvf hadoop-1.1.2.tar.gz

# hadoop环境变量的配置(直接在java.sh文件中配置了)

[root@localhosthadoop-1.1.2]# vi /etc/profile.d/java.sh

[root@localhosthadoop-1.1.2]# source /etc/profile

[root@localhosthadoop-1.1.2]# hadoop fs -ls /

Found 24 items

dr-xr-xr-x   - root root      12288 2014-02-28 14:21 /lib

drwxr-xr-x   - root root       4096 2014-02-28 06:34 /var

drwxr-xr-x   - root root       3880 2014-02-28 14:06 /dev

drwxr-xr-x   - root root       4096 2014-02-27 23:12 /media

drwxrwxrwx   - root root       4096 2014-02-28 14:37 /tmp

drwxr-xr-x   - root root       4096 2014-02-28 13:33 /data

drwxr-xr-x   - root root       4096 2014-02-28 13:47 /home

dr-xr-xr-x   - root root          0 2014-02-28 13:13 /proc

drwxr-xr-x   - root root       4096 2014-02-28 23:05 /mnt

drwxr-xr-x   - root root          0 2014-02-28 13:13 /sys

drwxr-xr-x   - root root       4096 2011-09-23 19:47 /srv

drwxr-xr-x   - root root          0 2014-02-28 13:13 /selinux

drwxr-xr-x   - root root          0 2014-02-28 13:15 /net

drwx------   - root root       4096 2014-02-28 06:41 /.dbus

dr-xr-xr-x   - root root      12288 2014-02-28 14:22 /sbin

drwx------   - root root      16384 2014-02-28 06:03 /lost+found

dr-xr-xr-x   - root root       4096 2014-02-28 14:22 /bin

drwxr-xr-x   - root root          0 2014-02-28 13:15 /misc

drwxr-xr-x   - root root       4096 2014-02-28 06:35 /opt

dr-xr-x---   - root root       4096 2014-02-28 14:07 /root

dr-xr-xr-x   - root root       1024 2014-02-28 06:36 /boot

drwxr-xr-x   - root root      12288 2014-02-28 14:22 /etc

drwxr-xr-x   - root root       4096 2014-02-28 23:03 /usr

-rw-r--r--   1 root root          0 2014-02-28 13:14 /.autofsck

 

# 修改hadoop目录的权限

[root@masterlocal]# pwd

/usr/local

[root@masterlocal]# ll

total 44

drwxr-xr-x. 2root   root   4096 Sep 23 2011 bin

drwxr-xr-x. 2root   root   4096 Sep 23 2011 etc

drwxr-xr-x. 2root   root   4096 Sep 23 2011 games

drwxr-xr-x. 3hadoop hadoop 4096 Feb 28 14:34 hadoop

drwxr-xr-x. 2root   root   4096 Sep 23 2011 include

drwxr-xr-x. 3root   root   4096 Feb 28 14:51 java

drwxr-xr-x. 2root   root   4096 Sep 23 2011 lib

drwxr-xr-x. 2root   root   4096 Sep 23 2011 libexec

drwxr-xr-x. 2root   root   4096 Sep 23 2011 sbin

drwxr-xr-x. 5root   root   4096 Feb 28 06:11 share

drwxr-xr-x. 2root   root   4096 Sep 23 2011 src

[root@master local]# chown -R hadoop:hadoop hadoop/

[root@masterlocal]# ll

total 44

drwxr-xr-x. 2root   root   4096 Sep 23 2011 bin

drwxr-xr-x. 2root   root   4096 Sep 23 2011 etc

drwxr-xr-x. 2root   root   4096 Sep 23 2011 games

drwxr-xr-x. 3hadoop hadoop 4096 Feb 28 14:34 hadoop

drwxr-xr-x. 2root   root   4096 Sep 23 2011 include

drwxr-xr-x. 3root   root   4096 Feb 28 14:51 java

drwxr-xr-x. 2root   root   4096 Sep 23 2011 lib

drwxr-xr-x. 2root   root   4096 Sep 23 2011 libexec

drwxr-xr-x. 2root   root   4096 Sep 23 2011 sbin

drwxr-xr-x. 5root   root   4096 Feb 28 06:11 share

drwxr-xr-x. 2root   root   4096 Sep 23 2011 src

# 切换到hadoop用户

# 修改hadoop­-env.sh文件

 

[hadoop@masterconf]$ vi hadoop-env.sh

# 修改core-site.xml文件

[hadoop@masterconf]$ vi core-site.xml

<?xmlversion="1.0"?>

<?xml-stylesheettype="text/xsl" href="configuration.xsl"?>

                                                                                

<!--Put site-specific property overrides in this file. -->

                                                                               

<configuration>

    <property>

       <name>fs.default.name</name>

       <value>hdfs://master:9000</value>

    </property>

    <property>

       <name>fs.checkpoint.dir</name>

       <value>/data/hadoop/hdfs/namesecondary</value>

    </property>

    <property>

       <name>fs.checkpoint.period</name>

       <value>1800</value>

    </property>

    <property>

       <name>fs.checkpoint.size</name>

       <value>33554432</value>

    </property>

    <property>

       <name>io.compression.codecs</name>

        <value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec</value>

    </property>

    <property>

       <name>fs.trash.interval</name>

       <value>1440</value>

    </property>

</configuration>

# 修改hdfs-site.xml文件

<?xmlversion="1.0"?>

<?xml-stylesheettype="text/xsl" href="configuration.xsl"?>

 

<!--Put site-specific property overrides in this file. -->

 

<configuration>

    <property>

       <name>dfs.name.dir</name>

       <value>/data/hadoop/hdfs/name</value>

       <description>

       </description>

    </property>

    <property>

       <name>dfs.data.dir</name>

       <value>/data/hadoop/hdfs/data</value>

       <description>

       </description>

    </property>

    <property>

       <name>dfs.http.address</name>

       <value>master:50070</value>

    </property>

    <property>

       <name>dfs.secondary.http.address</name>

       <value>master:50090</value>

    </property>

    <property>

       <name>dfs.replication</name>

       <value>1</value>

    </property>

    <property>

       <name>dfs.datanode.du.reserved</name>

       <value>1073741824</value>

    </property>

    <property>

       <name>dfs.block.size</name>

       <value>134217728</value>

    </property>

    <property>

       <name>dfs.permissions</name>

       <value>false</value>

    </property>

</configuration>

# 修改 mapred-site.xml文件

<?xmlversion="1.0"?>

<?xml-stylesheettype="text/xsl" href="configuration.xsl"?>

                                                                               

<!--Put site-specific property overrides in this file. -->

 

<configuration>

    <property>

       <name>mapred.job.tracker</name>

       <value>master:9001</value>

    </property>

    <property>

       <name>mapred.local.dir</name>

       <value>/data/hadoop/mapred/mrlocal</value>

       <final>true</final>

    </property>

    <property>

       <name>mapred.system.dir</name>

       <value>/data/hadoop/mapred/mrsystem</value>

       <final>true</final>

    </property>

    <property>

       <name>mapred.tasktracker.map.tasks.maximum</name>

       <value>2</value>

       <final>true</final>

    </property>

    <property>

       <name>mapred.tasktracker.reduce.tasks.maximum</name>

       <value>1</value>

       <final>true</final>

    </property>

 

    <property>

       <name>io.sort.mb</name>

       <value>32</value>

       <final>true</final>

    </property>

 

    <property>

       <name>mapred.child.java.opts</name>

       <value>-Xmx64M</value>

    </property>

 

    <property>

       <name>mapred.compress.map.output</name>

       <value>true</value>

    </property>

</configuration>

# 开始搭建环境三(启动各个节点)

# 创建data目录,修改权限为hadoop

[root@master /]#mkdir data

[root@master /]#ll

total 98

drwxr-xr-x.   2 rootroot  4096 Feb 28 13:33 data

 [root@master /]# chown hadoop:hadoop data/

[root@master /]#ll

total 98

drwxr-xr-x.   2 hadoophadoop  4096 Feb 28 13:33 data

# 启动namenode节点hadoop-daemon.sh start namenode

# 在这之前先格式化namenode节点,否则启动失败,会报错

[hadoop@mastername]$ hadoop namenode -format

[hadoop@mastername]$ hadoop-daemon.sh start namenode

startingnamenode, logging to/usr/local/hadoop/hadoop-1.1.2/libexec/../logs/hadoop-hadoop-namenode-master.out

[hadoop@mastername]$ jps

4234 NameNode

4304 Jps

[hadoop@mastername]$

使用这个命令查看启动日志有没有异常,一般的异常信息再后面写了

[hadoop@mastername]$ tail -100f /usr/local/hadoop/hadoop-1.1.2/libexec/../logs/hadoop-hadoop-datanode-master.log

# 启动datanode节点hadoop-daemon.sh start datanode

[hadoop@mastername]$ hadoop-daemon.sh start datanode

startingdatanode, logging to/usr/local/hadoop/hadoop-1.1.2/libexec/../logs/hadoop-hadoop-datanode-master.out

[hadoop@mastername]$ jps

4335 DataNode

4234 NameNode

4375 Jps

[hadoop@mastername]$ tail -100f/usr/local/hadoop/hadoop-1.1.2/libexec/../logs/hadoop-hadoop-secondarynamenode-master.log

 

# 启动secnodarynamenode 节点secondarynamenode

[hadoop@mastername]$ hadoop-daemon.sh start secondarynamenode

startingsecondarynamenode, logging to/usr/local/hadoop/hadoop-1.1.2/libexec/../logs/hadoop-hadoop-secondarynamenode-master.out

[hadoop@mastername]$ jps

4335 DataNode

4234 NameNode

4448SecondaryNameNode

4486 Jps

[hadoop@mastername]$ tail -100f/usr/local/hadoop/hadoop-1.1.2/libexec/../logs/hadoop-hadoop-secondarynamenode-master.log

# 启动jobtracker节点 hadoop-daemon.sh start jobtracker

[hadoop@mastername]$ hadoop-daemon.sh start jobtracker

startingjobtracker, logging to/usr/local/hadoop/hadoop-1.1.2/libexec/../logs/hadoop-hadoop-jobtracker-master.out

[hadoop@mastername]$ tail -100f /usr/local/hadoop/hadoop-1.1.2/libexec/../logs/hadoop-hadoop-jobtracker-master.log

[hadoop@mastername]$ jps

4537 JobTracker

4335 DataNode

4612 Jps

4234 NameNode

4448SecondaryNameNode

[hadoop@mastername]$

# 启动tasktracker 节点hadoop-daemon.sh start tasktracker

[hadoop@mastername]$ hadoop-daemon.sh start tasktracker

startingtasktracker, logging to/usr/local/hadoop/hadoop-1.1.2/libexec/../logs/hadoop-hadoop-tasktracker-master.out

[hadoop@mastername]$ jps

4537 JobTracker

4689 Jps

4335 DataNode

4234 NameNode

4448SecondaryNameNode

4652 TaskTracker

[hadoop@mastername]$ tail -100f /usr/local/hadoop/hadoop-1.1.2/libexec/../logs/hadoop-hadoop-tasktracker-master.log

# 启动hadoop自带的测试例子,通过的话环境就搭建成功了

# hadoop jar hadoop-examples-1.1.2.jar pi 10 100

[hadoop@masterhadoop-1.1.2]$ hadoop jar hadoop-examples-1.1.2.jar pi 10 100

Number ofMaps  = 10

Samples per Map= 100

Wrote input forMap #0

Wrote input forMap #1

Wrote input forMap #2

Wrote input forMap #3

Wrote input forMap #4

Wrote input forMap #5

Wrote input forMap #6

Wrote input forMap #7

Wrote input forMap #8

Wrote input forMap #9

Starting Job

14/02/2816:04:00 INFO mapred.FileInputFormat: Total input paths to process : 10

14/02/2816:04:01 INFO mapred.JobClient: Running job: job_201402281553_0001

14/02/2816:04:02 INFO mapred.JobClient:  map 0%reduce 0%

14/02/2816:04:22 INFO mapred.JobClient:  map 20%reduce 0%

14/02/2816:04:38 INFO mapred.JobClient:  map 40%reduce 0%

14/02/2816:04:50 INFO mapred.JobClient:  map 60%reduce 0%

14/02/2816:04:51 INFO mapred.JobClient:  map 60%reduce 16%

14/02/2816:05:01 INFO mapred.JobClient:  map 70%reduce 20%

14/02/2816:05:02 INFO mapred.JobClient:  map 80%reduce 20%

14/02/2816:05:07 INFO mapred.JobClient:  map 80%reduce 26%

14/02/2816:05:11 INFO mapred.JobClient:  map 90%reduce 26%

14/02/2816:05:12 INFO mapred.JobClient:  map 100%reduce 26%

14/02/2816:05:16 INFO mapred.JobClient:  map 100%reduce 33%

14/02/28 16:05:18INFO mapred.JobClient:  map 100% reduce100%

14/02/2816:05:22 INFO mapred.JobClient: Job complete: job_201402281553_0001

14/02/2816:05:23 INFO mapred.JobClient: Counters: 30

14/02/2816:05:23 INFO mapred.JobClient:   JobCounters

14/02/2816:05:23 INFO mapred.JobClient:    Launched reduce tasks=1

14/02/2816:05:23 INFO mapred.JobClient:    SLOTS_MILLIS_MAPS=130917

14/02/2816:05:23 INFO mapred.JobClient:     Totaltime spent by all reduces waiting after reserving slots (ms)=0

14/02/2816:05:23 INFO mapred.JobClient:     Totaltime spent by all maps waiting after reserving slots (ms)=0

14/02/2816:05:23 INFO mapred.JobClient:    Launched map tasks=10

14/02/2816:05:23 INFO mapred.JobClient:    Data-local map tasks=10

14/02/2816:05:23 INFO mapred.JobClient:    SLOTS_MILLIS_REDUCES=56177

14/02/2816:05:23 INFO mapred.JobClient:   FileInput Format Counters

14/02/2816:05:23 INFO mapred.JobClient:     BytesRead=1180

14/02/2816:05:23 INFO mapred.JobClient:   FileOutput Format Counters

14/02/28 16:05:23INFO mapred.JobClient:     BytesWritten=97

14/02/2816:05:23 INFO mapred.JobClient:  FileSystemCounters

14/02/2816:05:23 INFO mapred.JobClient:    FILE_BYTES_READ=68

14/02/2816:05:23 INFO mapred.JobClient:    HDFS_BYTES_READ=2380

14/02/28 16:05:23INFO mapred.JobClient:    FILE_BYTES_WRITTEN=555675

14/02/2816:05:23 INFO mapred.JobClient:    HDFS_BYTES_WRITTEN=215

14/02/2816:05:23 INFO mapred.JobClient:  Map-Reduce Framework

14/02/2816:05:23 INFO mapred.JobClient:     Mapoutput materialized bytes=260

14/02/2816:05:23 INFO mapred.JobClient:     Mapinput records=10

14/02/2816:05:23 INFO mapred.JobClient:    Reduce shuffle bytes=260

14/02/2816:05:23 INFO mapred.JobClient:    Spilled Records=40

14/02/2816:05:23 INFO mapred.JobClient:     Mapoutput bytes=180

14/02/2816:05:23 INFO mapred.JobClient:     Totalcommitted heap usage (bytes)=495345664

14/02/2816:05:23 INFO mapred.JobClient:     CPUtime spent (ms)=10760

14/02/2816:05:23 INFO mapred.JobClient:     Mapinput bytes=240

14/02/28 16:05:23INFO mapred.JobClient:    SPLIT_RAW_BYTES=1200

14/02/2816:05:23 INFO mapred.JobClient:    Combine input records=0

14/02/2816:05:23 INFO mapred.JobClient:    Reduce input records=20

14/02/2816:05:23 INFO mapred.JobClient:    Reduce input groups=20

14/02/2816:05:23 INFO mapred.JobClient:    Combine output records=0

14/02/2816:05:23 INFO mapred.JobClient:    Physical memory (bytes) snapshot=760815616

14/02/2816:05:23 INFO mapred.JobClient:    Reduce output records=0

14/02/2816:05:23 INFO mapred.JobClient:    Virtual memory (bytes) snapshot=2558492672

14/02/2816:05:23 INFO mapred.JobClient:     Mapoutput records=20

Job Finished in83.369 seconds

Estimated valueof Pi is 3.14800000000000000000

[hadoop@masterhadoop-1.1.2]$

# 搭建环境可能会出现的问题

# 看妳那伊抹微笑搭建hadoop环境出现的问题(仅供参考)

# 到了这一步,妳那伊抹微笑恭喜你,你的hadoop伪分布式环境搭建成功了(在这里祝贺你)


妳那伊抹微笑以前搭建hadoop环境出现的问题(相应的jdk目录跟hadoop目录变更了,但是原理还是一样的仅供参考)

1、启动NameNode出现的错误hadoop-daemon.sh startdataNode

1.1 [hadoop@master modules]$ hadoop-daemon.sh startnamenode

bash: /root/.bashrc:Permission denied

Warning:$HADOOP_HOME is deprecated.

 

startingnamenode, logging to/opt/modules/hadoop/hadoop-1.0.3/bin/../logs/hadoop-hadoop-namenode-master.out

bash:/root/.bashrc: Permission denied

 

下面是百度的结果:

(前些天根据“EasyHadoop集群部署和入门文档配置Hadoop集群时出现了错误,本人使用的reshat4,hadoop-1.0.3)

提示是:

bash: /root/.bashrc: 权限不够

Warning: $HADOOP_HOMEis deprecated.

bash: /root/.bashrc:权限不够                          这里将将对应目录/root文件权限更改为777即可

bash: /root/.bashrc: 权限不够

starting namenode,logging to

/opt/modules/hadoop/hadoop-1.0.3/bin/../logs/hadoop-hadoop-namenode-master.out

这个问题我头疼了一个多星期,一直悬而未决,一直给予haddop-1.0.3目录777权限却还是有问题。偶尔看到某个网站试看视频中有个处理权限的镜头,今天照着命令敲了下。成功执行。因此要注意使用对应用户创建解压文件。并随时使用ll –a 查看文件权限。

[root@masterhadoop-1.0.3]# chmod 777 logs/*

[root@masterhadoop-1.0.3]# chmod 777 logs

[root@masterhadoop-1.0.3]# chown root logs/*

[root@masterhadoop-1.0.3]# chown root logs

[root@masterhadoop-1.0.3]# bin/start-all.sh

弄了这之后发现依然namenode打不开,我就格式化了namenodehadoopnamenode –format,发现里面原来有抛出例外:

ERRORnamenode.NameNode:java.io.IOException: Cannot create directory

/var/hadoop/hadoop-hadoop/dfs/name/currentat org.apache.hadoop.hdfs.server.common.Storage$

StorageDirectory.clearDirectory(Storage.java:297)

atorg.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1320)

atorg.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1339)

at.org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1164)

at.org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1271)

atorg.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1288)

根据列外是不能创建一个目录,有了上面的经验我就知道了是没有建目录的权限。所以我直接就在/var/下建了一个目录叫做hadoop/,并授予了权限。再格式化namenode,成功启动。但是转到子节点上输入jps查看,发现并没有完全启动。知道node也有同样问题。

而为了node不出现同样问题,我们最好是在虚拟机中配置好后将master复制过去。然后改ip,改主机名。

 

1.2 然后会出现一个sax解析xml文件”<!--” 不能解析的错误。

解决:进入配置文件把Core-site.xml hdfs-site.xml 中的<!-- -->注释都删了

1.3 然后又会报错

2014-02-1021:14:24,393 ERROR org.apache.hadoop.hdfs.server.namenode.FSNamesystem:FSNamesystem initialization failed.

java.io.IOException: NameNode is not formatted.

        atorg.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:330)   

解决:[hadoop@master conf]$hadoop namenode -format

Warning: $HADOOP_HOME is deprecated.

 1.4再报错,格式化终止Formataborted in /data/hadoop/hdfs/name,下面是执行的命令:

14/02/10 21:30:03 INFO namenode.NameNode: STARTUP_MSG:

/************************************************************

STARTUP_MSG: Starting NameNode

STARTUP_MSG:   host =master/192.168.1.100

STARTUP_MSG:   args =[-format]

STARTUP_MSG:   version = 1.0.3

STARTUP_MSG:   build =https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1.0 -r 1335192;compiled by 'hortonfo' on Tue May  820:31:25 UTC 2012

************************************************************/

Re-format filesystem in /data/hadoop/hdfs/name ? (Y or N) y

Format abortedin/data/hadoop/hdfs/name

14/02/10 21:30:05 INFO namenode.NameNode: SHUTDOWN_MSG:

/************************************************************

SHUTDOWN_MSG: Shutting down NameNode at master/192.168.1.100

************************************************************/

[hadoop@master conf]$

解决:在对namenode格式化之前,要确保dfs.name.dir(也就是/data/hadoop/hdfs/name )参数指定的目录不存在。

Hadoop这样做的目的是防止错误地将已存在的集群格式化了

1.5 继续报错

/************************************************************

SHUTDOWN_MSG: Shutting down NameNode atmaster/192.168.1.100

************************************************************/

2014-02-10 21:55:58,808 INFOorg.apache.hadoop.hdfs.server.namenode.NameNode: STARTUP_MSG:

/************************************************************

STARTUP_MSG: Starting NameNode

STARTUP_MSG:  host = master/192.168.1.100

STARTUP_MSG:  args = []

STARTUP_MSG:  version = 1.0.3

STARTUP_MSG:  build =https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1.0 -r 1335192;compiled by 'hortonfo' on Tue May  820:31:25 UTC 2012

************************************************************/

2014-02-10 21:55:59,478 INFOorg.apache.hadoop.metrics2.impl.MetricsConfig: loaded properties fromhadoop-metrics2.properties

2014-02-10 21:55:59,534 INFOorg.apache.hadoop.metrics2.impl.MetricsSourceAdapter: MBean for sourceMetricsSystem,sub=Stats registered.

2014-02-10 21:55:59,538 INFOorg.apache.hadoop.metrics2.impl.MetricsSystemImpl: Scheduled snapshot period at10 second(s).

2014-02-10 21:55:59,538 INFOorg.apache.hadoop.metrics2.impl.MetricsSystemImpl: NameNode metrics systemstarted

2014-02-10 21:55:59,926 INFOorg.apache.hadoop.metrics2.impl.MetricsSourceAdapter: MBean for source ugiregistered.

2014-02-10 21:55:59,951 INFOorg.apache.hadoop.metrics2.impl.MetricsSourceAdapter: MBean for source jvmregistered.

2014-02-10 21:55:59,954 INFOorg.apache.hadoop.metrics2.impl.MetricsSourceAdapter: MBean for source NameNoderegistered.

2014-02-10 21:56:00,202 INFOorg.apache.hadoop.hdfs.util.GSet: VM type      = 32-bit

2014-02-10 21:56:00,202 INFOorg.apache.hadoop.hdfs.util.GSet: 2% max memory = 0.61875 MB

2014-02-10 21:56:00,203 INFOorg.apache.hadoop.hdfs.util.GSet: capacity     = 2^17 = 131072 entries

2014-02-10 21:56:00,203 INFOorg.apache.hadoop.hdfs.util.GSet: recommended=131072, actual=131072

2014-02-10 21:56:00,305 INFOorg.apache.hadoop.hdfs.server.namenode.FSNamesystem: fsOwner=hadoop

2014-02-10 21:56:00,305 INFOorg.apache.hadoop.hdfs.server.namenode.FSNamesystem: supergroup=supergroup

2014-02-10 21:56:00,305 INFOorg.apache.hadoop.hdfs.server.namenode.FSNamesystem: isPermissionEnabled=false

2014-02-10 21:56:00,322 INFOorg.apache.hadoop.hdfs.server.namenode.FSNamesystem:dfs.block.invalidate.limit=100

2014-02-10 21:56:00,323 INFOorg.apache.hadoop.hdfs.server.namenode.FSNamesystem: isAccessTokenEnabled=falseaccessKeyUpdateInterval=0 min(s), accessTokenLifetime=0 min(s)

2014-02-10 21:56:01,295 INFOorg.apache.hadoop.hdfs.server.namenode.FSNamesystem: RegisteredFSNamesystemStateMBean and NameNodeMXBean

2014-02-10 21:56:01,375 INFOorg.apache.hadoop.hdfs.server.namenode.NameNode: Caching file names occuringmore than 10 times

2014-02-10 21:56:01,406 INFOorg.apache.hadoop.hdfs.server.common.Storage: Number of files = 1

2014-02-10 21:56:01,447 INFOorg.apache.hadoop.hdfs.server.common.Storage: Number of files underconstruction = 0

2014-02-10 21:56:01,448 INFOorg.apache.hadoop.hdfs.server.common.Storage: Image file of size 112 loaded in0 seconds.

2014-02-10 21:56:01,450 INFOorg.apache.hadoop.hdfs.server.common.Storage: Edits file/data/hadoop/hdfs/name/current/edits of size 4 edits # 0 loaded in 0 seconds.

2014-02-10 21:56:01,456 INFOorg.apache.hadoop.hdfs.server.common.Storage: Image file of size 112 saved in 0seconds.

2014-02-10 21:56:02,008 INFOorg.apache.hadoop.hdfs.server.common.Storage: Image file of size 112 saved in 0seconds.

2014-02-10 21:56:02,124 INFOorg.apache.hadoop.hdfs.server.namenode.NameCache: initialized with 0 entries 0lookups

2014-02-10 21:56:02,125 INFOorg.apache.hadoop.hdfs.server.namenode.FSNamesystem: Finished loading FSImagein 1843 msecs

2014-02-10 21:56:02,192 INFOorg.apache.hadoop.hdfs.server.namenode.FSNamesystem: Total number of blocks = 0

2014-02-10 21:56:02,192 INFOorg.apache.hadoop.hdfs.server.namenode.FSNamesystem: Number of invalid blocks =0

2014-02-10 21:56:02,192 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem:Number of under-replicated blocks = 0

2014-02-10 21:56:02,192 INFOorg.apache.hadoop.hdfs.server.namenode.FSNamesystem: Number of  over-replicated blocks = 0

2014-02-10 21:56:02,192 INFO org.apache.hadoop.hdfs.StateChange:STATE* Safe mode termination scan for invalid, over- and under-replicatedblocks completed in 53 msec

2014-02-10 21:56:02,192 INFOorg.apache.hadoop.hdfs.StateChange: STATE* Leaving safe mode after 1 secs.

2014-02-10 21:56:02,194 INFO org.apache.hadoop.hdfs.StateChange:STATE* Network topology has 0 racks and 0 datanodes

2014-02-10 21:56:02,194 INFOorg.apache.hadoop.hdfs.StateChange: STATE* UnderReplicatedBlocks has 0 blocks

2014-02-10 21:56:02,217 INFOorg.apache.hadoop.util.HostsFileReader: Refreshing hosts (include/exclude) list

2014-02-10 21:56:02,243 INFOorg.apache.hadoop.metrics2.impl.MetricsSourceAdapter: MBean for sourceFSNamesystemMetrics registered.

2014-02-10 21:56:02,342 INFOorg.apache.hadoop.hdfs.server.namenode.FSNamesystem: ReplicateQueueQueueProcessingStatistics: First cycle completed 0 blocks in 122 msec

2014-02-10 21:56:02,342 INFOorg.apache.hadoop.hdfs.server.namenode.FSNamesystem: ReplicateQueueQueueProcessingStatistics: Queue flush completed 0 blocks in 122 msec processingtime, 122 msec clock time, 1 cycles

2014-02-10 21:56:02,342 INFOorg.apache.hadoop.hdfs.server.namenode.FSNamesystem: InvalidateQueueQueueProcessingStatistics: First cycle completed 0 blocks in 0 msec

2014-02-10 21:56:02,343 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem:InvalidateQueue QueueProcessingStatistics: Queue flush completed 0 blocks in 0msec processing time, 0 msec clock time, 1 cycles

2014-02-10 21:56:02,565 INFOorg.apache.hadoop.metrics2.impl.MetricsSourceAdapter: MBean for sourceRpcDetailedActivityForPort9000 registered.

2014-02-10 21:56:02,567 INFOorg.apache.hadoop.metrics2.impl.MetricsSourceAdapter: MBean for sourceRpcActivityForPort9000 registered.

2014-02-10 21:56:02,575 INFOorg.apache.hadoop.hdfs.server.namenode.NameNode: Namenode up at:master/192.168.1.100:9000

2014-02-10 21:56:02,595 INFOorg.apache.hadoop.ipc.Server: Starting SocketReader

2014-02-10 21:56:03,125 INFO org.mortbay.log:Logging to org.slf4j.impl.Log4jLoggerAdapter(org.mortbay.log) via org.mortbay.log.Slf4jLog

2014-02-10 21:56:03,534 INFOorg.apache.hadoop.http.HttpServer: Added global filtersafety(class=org.apache.hadoop.http.HttpServer$QuotingInputFilter)

2014-02-10 21:56:03,638 INFOorg.apache.hadoop.http.HttpServer: dfs.webhdfs.enabled = false

2014-02-10 21:56:03,665 INFOorg.apache.hadoop.http.HttpServer: Port returned bywebServer.getConnectors()[0].getLocalPort() before open() is -1. Opening thelistener on 50070

2014-02-10 21:56:03,689 INFOorg.apache.hadoop.http.HttpServer: listener.getLocalPort() returned 50070webServer.getConnectors()[0].getLocalPort() returned 50070

2014-02-10 21:56:03,689 INFOorg.apache.hadoop.http.HttpServer: Jetty bound to port 50070

2014-02-10 21:56:03,689 INFO org.mortbay.log:jetty-6.1.26

2014-02-10 21:56:05,407 INFO org.mortbay.log:Started SelectChannelConnector@master:50070

2014-02-10 21:56:05,407 INFOorg.apache.hadoop.hdfs.server.namenode.NameNode: Web-server up at: master:50070

2014-02-10 21:56:05,409 INFOorg.apache.hadoop.ipc.Server: IPC Server Responder: starting

2014-02-10 21:56:05,427 INFOorg.apache.hadoop.ipc.Server: IPC Server listener on 9000: starting

2014-02-10 21:56:05,444 INFOorg.apache.hadoop.ipc.Server: IPC Server handler 0 on 9000: starting

2014-02-10 21:56:05,473 INFOorg.apache.hadoop.ipc.Server: IPC Server handler 2 on 9000: starting

2014-02-10 21:56:05,474 INFOorg.apache.hadoop.ipc.Server: IPC Server handler 1 on 9000: starting

2014-02-10 21:56:05,478 INFOorg.apache.hadoop.ipc.Server: IPC Server handler 3 on 9000: starting

2014-02-10 21:56:05,478 INFOorg.apache.hadoop.ipc.Server: IPC Server handler 4 on 9000: starting

2014-02-10 21:56:05,509 INFOorg.apache.hadoop.ipc.Server: IPC Server handler 5 on 9000: starting

2014-02-10 21:56:05,509 INFOorg.apache.hadoop.ipc.Server: IPC Server handler 6 on 9000: starting

2014-02-10 21:56:05,510 INFOorg.apache.hadoop.ipc.Server: IPC Server handler 7 on 9000: starting

2014-02-10 21:56:05,510 INFOorg.apache.hadoop.ipc.Server: IPC Server handler 8 on 9000: starting

2014-02-10 21:56:05,529 INFO org.apache.hadoop.ipc.Server:IPC Server handler 9 on 9000: starting

解决:好吧!这不是错误,是启动成功了 - - !使用一下命令,可以发现成功了

 

1.6 ssh有警告

Warning: No xauth data; using fake authenticationdata for X11 forwarding.

原因:  this means that your authorized keysfile is configured correctly, but your ssh configuration has X11forwarding enabled. 

解决:# vi /etc/ssh/ssh_config

  ForwardX11 no #这里设置为‘no’即可。

然后执行:# service sshd restart即可。

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

2.启动dataNode出现的错误hadoop-daemon.shstart dataNode

2.1权限不匹配Incorrect permission for /data/hadoop/hdfs/data,expected: rwxr-xr-x, while actual: rwxrwxr-x

/************************************************************

STARTUP_MSG: Starting DataNode

STARTUP_MSG:  host = master/192.168.1.100

STARTUP_MSG:  args = []

STARTUP_MSG:  version = 1.0.3

STARTUP_MSG:  build =https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1.0 -r 1335192;compiled by 'hortonfo' on Tue May  820:31:25 UTC 2012

************************************************************/

2014-02-10 22:10:55,197 INFO org.apache.hadoop.metrics2.impl.MetricsConfig:loaded properties from hadoop-metrics2.properties

2014-02-10 22:10:55,287 INFOorg.apache.hadoop.metrics2.impl.MetricsSourceAdapter: MBean for sourceMetricsSystem,sub=Stats registered.

2014-02-10 22:10:55,308 INFOorg.apache.hadoop.metrics2.impl.MetricsSystemImpl: Scheduled snapshot period at10 second(s).

2014-02-10 22:10:55,308 INFOorg.apache.hadoop.metrics2.impl.MetricsSystemImpl: DataNode metrics systemstarted

2014-02-10 22:10:55,902 INFO org.apache.hadoop.metrics2.impl.MetricsSourceAdapter:MBean for source ugi registered.

2014-02-10 22:10:56,238 WARNorg.apache.hadoop.hdfs.server.datanode.DataNode: Invalid directory indfs.data.dir:Incorrectpermission for /data/hadoop/hdfs/data, expected: rwxr-xr-x, while actual:rwxrwxr-x

2014-02-10 22:10:56,239 ERRORorg.apache.hadoop.hdfs.server.datanode.DataNode: All directories indfs.data.dir are invalid.

2014-02-10 22:10:56,239 INFOorg.apache.hadoop.hdfs.server.datanode.DataNode: Exiting Datanode

2014-02-10 22:10:56,240 INFOorg.apache.hadoop.hdfs.server.datanode.DataNode: SHUTDOWN_MSG:

/************************************************************

SHUTDOWN_MSG: Shutting down DataNode atmaster/192.168.1.100

************************************************************/

解决:修改权限:

[hadoop@master hdfs]$ chmod755/data/hadoop/hdfs/data

查看,启动成功

 

 

3.Jps出现多余的错误

[root@master root]#           jps
3672 DataNode

3569 NameNode

14811 -- process information unavailable   @yting_xmei1129这里不对

3845 Jps 

解决:

[root@master root]# cd /tmp/hsperfdata_hadoop

[root@master hsperfdata_hadoop]# pwd

/tmp/hsperfdata_hadoop

[root@master hsperfdata_hadoop]# ll

total 32

-rw-------    1 hadoop  hadoop      32768 Feb 10 21:5514811

[root@master hsperfdata_hadoop]# jps

3672 DataNode

3569 NameNode

14811 -- process information unavailable

3845 Jps

[root@master hsperfdata_hadoop]# rm -f14811

[root@master hsperfdata_hadoop]# jps

3855 Jps

3672 DataNode

3569 NameNode

[root@master hsperfdata_hadoop]# 需要注意的是 这个用户其他正常的进程号也在里面删除了就等于把进程杀掉了,慎用。

jps 就没有了

 

 

4.jobtracker 启动报错

4.1

2014-02-12 23:46:21,648 ERRORorg.apache.hadoop.hdfs.server.namenode.NameNode: java.io.FileNotFoundException:/data/hadoop/hdfs/name/current/VERSION (Permission denied)

原因:应该是root用户启动namenode时,更改了current文件的权限,root用户修改文件权限:

解决:

[root@master root]#ll /data/hadoop/hdfs/name/current

total 16

-rw-r--r--    1 root    root            4 Feb 12 23:01edits

-rw-r--r--    1 root    root          927 Feb 12 23:01fsimage

-rw-r--r--    1 root    root            8 Feb 12 23:01fstime

-rw-r--r--    1 root    root           99 Feb 12 23:01VERSION

[root@master root]#chown hadoop:hadoop -R /data/hadoop/hdfs/name/current/

[root@master root]#ll /data/hadoop/hdfs/name/current

total 16

-rw-r--r--    1 hadoop  hadoop          4 Feb 12 23:01edits

-rw-r--r--    1 hadoop  hadoop        927 Feb 12 23:01fsimage

-rw-r--r--    1 hadoop  hadoop          8 Feb 12 23:01fstime

-rw-r--r--    1 hadoop  hadoop         99 Feb 12 23:01VERSIO

4.2紧接着启动namenode时又报错 hadoop-daemon.sh start namenode

2014-02-12 23:57:46,480 ERRORorg.apache.hadoop.hdfs.server.common.Storage: Unable to move last checkpointfor /data/hadoop/hdfs/name

java.io.IOException: Failed to delete/data/hadoop/hdfs/name/previous.checkpoint

2014-02-12 23:57:46,483 FATALorg.apache.hadoop.hdfs.server.namenode.FSNamesystem: Edit streams not yetinitialized

java.lang.Exception: Edit streams not yetinitialized

原因:/data/hadoop/hdfs/name这个name目录的权限为root的,hadoop没有权限,所以要修改它的权限,然后启动namenode就行了

解决:

[root@master root]#ll /data/hadoop/hdfs/name

total 16

drwxrwxr-x    2 hadoop  hadoop       4096 Feb 12 23:57current

drwxrwxr-x    2 hadoop  hadoop       4096 Feb 10 21:51image

drwxr-xr-x    2 hadoop  hadoop       4096 Feb 12 23:01lastcheckpoint.tmp

drwxr-xr-x    2 root    root         4096 Feb 11 22:35previous.checkpoint

[root@master root]#chown -R hadoop:hadoop /data/hadoop/hdfs/

[root@master root]#ll /data/hadoop/hdfs/name

total 16

drwxrwxr-x    2 hadoop  hadoop       4096 Feb 12 23:57current

drwxrwxr-x    2 hadoop  hadoop       4096 Feb 10 21:51image

drwxr-xr-x    2 hadoop  hadoop       4096 Feb 12 23:01lastcheckpoint.tmp

drwxr-xr-x    2 hadoop  hadoop       4096 Feb 11 22:35previous.checkpoint

 

 

10 jdk1.6.0_45安装成功后,java命令可以用,javac命令可以用,javac编译Hello.class,javaHello 却不可以运行,看下面异常

Exception in thread"main" java.lang.NoClassDefFoundError: Hello

Caused by:java.lang.ClassNotFoundException: Hello

        atjava.net.URLClassLoader$1.run(URLClassLoader.java:202)

        atjava.security.AccessController.doPrivileged(Native Method)

        atjava.net.URLClassLoader.findClass(URLClassLoader.java:190)

        atjava.lang.ClassLoader.loadClass(ClassLoader.java:306)

        at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:301)

        atjava.lang.ClassLoader.loadClass(ClassLoader.java:247)

Could not find the mainclass: Hello.  Program will exit.

 

原因:/etc/profile文件中的CLASSPATH配置错误,忘记配置.;了

解决:配置好CLASSPATH,正确配置为CLASSPATH=.:=/usr/java/jdk1.6.0_45/jre/lib/rt.jar

       # java configuration start

JAVA_HOME=/usr/java/jdk1.6.0_45

PATH=$PATH:/usr/java/jdk1.6.0_45/bin

CLASSPATH=.:=/usr/java/jdk1.6.0_45/jre/lib/rt.jar

export JAVA_HOME PATH CLASSPATH

"/etc/profile" 61L, 1140C written                                                                                                                                           

[root@master root]# source /etc/profile

 

6.启动secondarynamenode失败

ERRORorg.apache.hadoop.security.UserGroupInformation: PriviledgedActionExceptionas:hadoop cause:java.net.BindException: Cannot assign requested address

原因:我觉得原因应该是饿的实验室为分布是安装,没有虚拟第二台机器,所以出这样的异常



今天只写hadoop伪分布式安装,下次再写其他的、、、

如果这个看的还不是很清晰,饿整理了一份hadoop详细的笔记,需要的话可以联系饿、、、

 

妳那伊抹微笑

The you smile until forever 、、、、、、、、、、、、、、、、、、、、、
1 0