hadoop-(2)wordcount运行

来源:互联网 发布:淘宝助理怎么导入csv 编辑:程序博客网 时间:2024/05/17 22:39
1、linux本地创建input、output文件夹

2、将本地input文件夹中的所有内容拷贝到 Hadoop的/in文件夹下
首先hadoop创建in文件夹bin/hadoop fs -mkdir  /in
bin/hadoop fs -put ../input/*  /in

3、查看Hadoop文件系统/in文件夹下的文件
bin/hadoop fs -ls   /in
运行wordcount:
 bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.3.jar  wordcount /in/ /output/wordcount1


查看执行结果
hadoop fs -cat /output/wordcount1/*


————————————————————————————————————————————
自己编写mapreduce程序运行,基于hadoop2.6.3版本
reduce开始之前,map必须完成
每读一行数据,调用一次Mapper类的map方法。

map 输入:key——>起始偏移量,Long;value——>一行数据,String;
输出:key——>一个单词,String;value——>数量1,Long
public class WordCountMapper extends Mapper<Long, String, String, Long> {
但是这些基本类型是不能直接用的,输入输出都是经过网络传递的,有序列化的过程,String、Long都是实现了java序列化接口的,可以在网上传,但是jdk的序列化机制在Hadoop下效率不是很高。Hadoop把这些序列化机制重新实现了一套,String、Long这些类型不符合Hadoop序列化机制。
Long——>LongWritable
String——>Text
public class WordCountMapper extends Mapper<LongWritable, Text, Text, LongWritable>
reduce 输入:key——>一个单词,String;value——>数量1,Long
输出:key——>一个单词,String;value——>数量n,Long
public class WordCountReduce extends Reducer<Text, LongWritable, Text, LongWritable> {

1、编写程序
package jvm.hadoop.starter;import java.io.IOException;import java.util.StringTokenizer;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Mapper;public class WordCountMapper extends Mapper<LongWritable, Text, Text, LongWritable> {private static final LongWritable one = new LongWritable(1);private Text word = new Text();@Overrideprotected void map(LongWritable key, Text value,Mapper<LongWritable, Text, Text, LongWritable>.Context context)throws IOException, InterruptedException {// 获取到一行文件的内容String line = value.toString();// 切分这一行的内容为一个单词数组StringTokenizer itr = new StringTokenizer(line, " ");// 行数据,分隔符// 遍历输出<word,1>while (itr.hasMoreTokens()) {this.word.set(itr.nextToken());context.write(this.word, one);// map的输出也是<K,V>形式}}}

package jvm.hadoop.starter;import java.io.IOException;import java.util.StringTokenizer;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Reducer;// key:hello, values:{1,1,1。。。。}public class WordCountReduce extends Reducer<Text, LongWritable, Text, LongWritable> {@Overrideprotected void reduce(Text key, Iterable<LongWritable> values,Reducer<Text, LongWritable, Text, LongWritable>.Context context)throws IOException, InterruptedException {// 定义一个累加计数器long count = 0;for (LongWritable value : values) {count += value.get();}// 输出<单词:count>键值对context.write(key, new LongWritable(count));}public static void main(String[] args) {StringTokenizer st = new StringTokenizer("this,is.a test", ",| ");while (st.hasMoreTokens()) {System.out.println(st.nextToken());}}}

package jvm.hadoop.starter;import java.io.IOException;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;/** * 用来描述一个作业job(使用哪个Mapper类,哪个reducer类,输入文件在哪,输出结果放哪) ; 然后提交这个job给Hadoop集群 * */// jvm.hadoop.starter.WordCountRunnerpublic class WordCountRunner {public static void main(String[] args)throws IOException, ClassNotFoundException, InterruptedException {// 用静态方法产生实例Configuration conf = new Configuration();Job wordcount = Job.getInstance(conf);// 当前job资源所在jar包:main方法所在的类wordcount.setJarByClass(WordCountRunner.class);// wordcount要使用哪个Mapper类、Reducer类wordcount.setMapperClass(WordCountMapper.class);wordcount.setReducerClass(WordCountReduce.class);// wordcount的maper类输出的kv数据类型,reducer类的输出类型没写,代表跟mapper一样// wordcount.setMapOutputKeyClass(Text.class);// wordcount.setMapOutputKeyClass(LongWritable.class);// wordcount的reducer类输出的kv数据类型wordcount.setOutputKeyClass(Text.class);wordcount.setOutputValueClass(LongWritable.class);// 指定要处理的原始数据所存放的路径,都是文件夹级别的FileInputFormat.setInputPaths(wordcount, "hdfs://master:9000/wc/srcdata");// 指定处理之后的结果输出到哪个路径FileOutputFormat.setOutputPath(wordcount, new Path("hdfs://master:9000/wc/output"));boolean res = wordcount.waitForCompletion(true);System.exit(res ? 0 : 1);}}

2、打成jar包,命名为wordcount.jar,把jar包放在/opt/workspace目录下
3、hdfs新建/wc/srcdata目录,并在该目录下上传一个要进行wordcount的文件
hadoop fs -mkdir /wc/srcdata
hadoop fs –put /opt/file.txt  /wc/srcdata
4、执行命令运行
在/opt/workspace目录下执行hadoop jar wordcount.jar jvm.hadoop.starter.WordCountRunner


期间遇到的问题:运行hadoop fs -put命令报no root to host
发现是从节点的防火墙没有关闭,关闭防火墙即可
0 0
原创粉丝点击