hadoop-(2)wordcount运行
来源:互联网 发布:淘宝助理怎么导入csv 编辑:程序博客网 时间:2024/05/17 22:39
1、linux本地创建input、output文件夹
2、将本地input文件夹中的所有内容拷贝到 Hadoop的/in文件夹下
首先hadoop创建in文件夹bin/hadoop fs -mkdir /in
bin/hadoop fs -put ../input/* /in
3、查看Hadoop文件系统/in文件夹下的文件
bin/hadoop fs -ls /in
运行wordcount:
bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.3.jar wordcount /in/ /output/wordcount1
查看执行结果
hadoop fs -cat /output/wordcount1/*
————————————————————————————————————————————
自己编写mapreduce程序运行,基于hadoop2.6.3版本
reduce开始之前,map必须完成每读一行数据,调用一次Mapper类的map方法。map 输入:key——>起始偏移量,Long;value——>一行数据,String;输出:key——>一个单词,String;value——>数量1,Long;public class WordCountMapper extends Mapper<Long, String, String, Long> {但是这些基本类型是不能直接用的,输入输出都是经过网络传递的,有序列化的过程,String、Long都是实现了java序列化接口的,可以在网上传,但是jdk的序列化机制在Hadoop下效率不是很高。Hadoop把这些序列化机制重新实现了一套,String、Long这些类型不符合Hadoop序列化机制。Long——>LongWritableString——>Textpublic class WordCountMapper extends Mapper<LongWritable, Text, Text, LongWritable>
reduce 输入:key——>一个单词,String;value——>数量1,Long;输出:key——>一个单词,String;value——>数量n,Long;
public class WordCountReduce extends Reducer<Text, LongWritable, Text, LongWritable> {
1、编写程序
package jvm.hadoop.starter;import java.io.IOException;import java.util.StringTokenizer;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Mapper;public class WordCountMapper extends Mapper<LongWritable, Text, Text, LongWritable> {private static final LongWritable one = new LongWritable(1);private Text word = new Text();@Overrideprotected void map(LongWritable key, Text value,Mapper<LongWritable, Text, Text, LongWritable>.Context context)throws IOException, InterruptedException {// 获取到一行文件的内容String line = value.toString();// 切分这一行的内容为一个单词数组StringTokenizer itr = new StringTokenizer(line, " ");// 行数据,分隔符// 遍历输出<word,1>while (itr.hasMoreTokens()) {this.word.set(itr.nextToken());context.write(this.word, one);// map的输出也是<K,V>形式}}}
package jvm.hadoop.starter;import java.io.IOException;import java.util.StringTokenizer;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Reducer;// key:hello, values:{1,1,1。。。。}public class WordCountReduce extends Reducer<Text, LongWritable, Text, LongWritable> {@Overrideprotected void reduce(Text key, Iterable<LongWritable> values,Reducer<Text, LongWritable, Text, LongWritable>.Context context)throws IOException, InterruptedException {// 定义一个累加计数器long count = 0;for (LongWritable value : values) {count += value.get();}// 输出<单词:count>键值对context.write(key, new LongWritable(count));}public static void main(String[] args) {StringTokenizer st = new StringTokenizer("this,is.a test", ",| ");while (st.hasMoreTokens()) {System.out.println(st.nextToken());}}}
package jvm.hadoop.starter;import java.io.IOException;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;/** * 用来描述一个作业job(使用哪个Mapper类,哪个reducer类,输入文件在哪,输出结果放哪) ; 然后提交这个job给Hadoop集群 * */// jvm.hadoop.starter.WordCountRunnerpublic class WordCountRunner {public static void main(String[] args)throws IOException, ClassNotFoundException, InterruptedException {// 用静态方法产生实例Configuration conf = new Configuration();Job wordcount = Job.getInstance(conf);// 当前job资源所在jar包:main方法所在的类wordcount.setJarByClass(WordCountRunner.class);// wordcount要使用哪个Mapper类、Reducer类wordcount.setMapperClass(WordCountMapper.class);wordcount.setReducerClass(WordCountReduce.class);// wordcount的maper类输出的kv数据类型,reducer类的输出类型没写,代表跟mapper一样// wordcount.setMapOutputKeyClass(Text.class);// wordcount.setMapOutputKeyClass(LongWritable.class);// wordcount的reducer类输出的kv数据类型wordcount.setOutputKeyClass(Text.class);wordcount.setOutputValueClass(LongWritable.class);// 指定要处理的原始数据所存放的路径,都是文件夹级别的FileInputFormat.setInputPaths(wordcount, "hdfs://master:9000/wc/srcdata");// 指定处理之后的结果输出到哪个路径FileOutputFormat.setOutputPath(wordcount, new Path("hdfs://master:9000/wc/output"));boolean res = wordcount.waitForCompletion(true);System.exit(res ? 0 : 1);}}
3、hdfs新建/wc/srcdata目录,并在该目录下上传一个要进行wordcount的文件
hadoop fs -mkdir /wc/srcdata
hadoop fs –put /opt/file.txt /wc/srcdata
4、执行命令运行
在/opt/workspace目录下执行hadoop jar wordcount.jar jvm.hadoop.starter.WordCountRunner
期间遇到的问题:运行hadoop fs -put命令报no root to host
发现是从节点的防火墙没有关闭,关闭防火墙即可
0 0
- hadoop-(2)wordcount运行
- hadoop-2.7.2运行WordCount
- hadoop 运行 wordcount
- hadoop wordcount运行实例
- Hadoop WordCount 运行
- [Hadoop] WordCount运行详解
- hadoop WordCount运行详解
- Hadoop中运行WordCount
- hadoop运行WordCount程序
- Hadoop之运行wordcount
- hadoop-运行WordCount实例
- Hadoop 运行wordcount 实例
- Hadoop 运行 Wordcount程序
- Hadoop WordCount运行详解
- Hadoop--05--运行WordCount
- Hadoop 运行wordcount案例
- Hadoop运行WordCount
- hadoop运行WordCount.jar
- Boost.Bind用法详解
- 关于String的两种赋值方式
- 在mac OSX中安装启动zookeeper
- 可点击的ImageSpan
- Maven多环境配置实战 filter
- hadoop-(2)wordcount运行
- 使用Maven创建Java项目
- goal
- Android截屏浅析
- QQ提示信息的拖动效果
- 【spring-security】j_spring_security_check 404
- Hibernate中的Criteria用法
- Jboss 服务器SSL证书安装指南
- C# 本地储存文本的几种方式