hadoop 自学指南三之WordCount解析(2)

来源:互联网 发布:华为4 a数据分区破坏 编辑:程序博客网 时间:2024/05/18 01:16

一、前言

自从0.20.2版本开始,hadoop 提供了一个新的API,新的API在org.apache.hadoop.mapreduce中,旧的api在org.apache.hadoop.mapred中

二、新版的wordCount

package hadoop.v3;import java.io.IOException;import java.util.StringTokenizer;import org.apache.hadoop.conf.Configured;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.IntWritable;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.Mapper;import org.apache.hadoop.mapreduce.Reducer;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;import org.apache.hadoop.util.Tool;import org.apache.hadoop.util.ToolRunner;import org.hai.hdfs.utils.HDFSUtils;/** * @author : chenhaipeng * @date : 2015年9月6日 上午2:00:50 */public class WordCountNewAPI extends Configured implements Tool {public static class Map extends Mapper<LongWritable, Text, Text, IntWritable>{private final static IntWritable one = new IntWritable(1);private Text word = new Text();/* (non-Javadoc) * @see org.apache.hadoop.mapreduce.Mapper#map(java.lang.Object, java.lang.Object, org.apache.hadoop.mapreduce.Mapper.Context) */@Overridepublic void map(LongWritable key, Text value, Context context)throws IOException, InterruptedException {String line = value.toString();StringTokenizer tokenizer = new StringTokenizer(line);while(tokenizer.hasMoreTokens()){word.set(tokenizer.nextToken());context.write(word, one);}}}public static class Reduce extends Reducer<Text, IntWritable, Text, IntWritable>{/* (non-Javadoc) * @see org.apache.hadoop.mapreduce.Reducer#reduce(java.lang.Object, java.lang.Iterable, org.apache.hadoop.mapreduce.Reducer.Context) */@Overridepublic void reduce(Text key, Iterable<IntWritable> values,Context context) throws IOException, InterruptedException {int sum = 0; for(IntWritable value : values){sum += value.get();}context.write(key, new IntWritable(sum));}}public static void main(String[] args) throws Exception {int ret = ToolRunner.run(new WordCountNewAPI(), args);System.exit(ret);}public static void deletedir(String path){try {HDFSUtils.DeleteHDFSFile(path);} catch (IOException e) {e.printStackTrace();}}/*  * @see org.apache.hadoop.util.Tool#run(java.lang.String[]) */@Overridepublic int run(String[] args) throws Exception {Job job = new Job(getConf());job.setJarByClass(WordCountNewAPI.class);job.setJobName("WordCountNewAPI");job.setOutputKeyClass(Text.class);job.setOutputValueClass(IntWritable.class);job.setInputFormatClass(TextInputFormat.class);job.setOutputFormatClass(TextOutputFormat.class);job.setMapperClass(Map.class);job.setReducerClass(Reduce.class);FileInputFormat.setInputPaths(job, new Path(args[0]));deletedir(args[1]);FileOutputFormat.setOutputPath(job, new Path(args[1]));boolean sucess = job.waitForCompletion(true);return sucess == true? 0 : 1;}}


区别:

1、在新的api中mapper与reducer 是抽象类而不是接口,Map 函数Reducer函数已经不再实现Mapper和Reducer,而是继承Mapper和Reducer抽象类

2、新的api使用context 代替OutputCollector和Reporter角色

3、Job配置由Configuration来完成

4、Job类job 控制



0 0
原创粉丝点击