Hadoop应用之顺序链接

来源:互联网 发布:男士护肤品推荐 知乎 编辑:程序博客网 时间:2024/06/07 00:29

虽然有些时候是可以手动的逐个操作作业的执行,但是更为便捷的方式还是自动的生成一个自动化的执行序列。我们可以将MapReduce作业按照顺序链接在一起,用一个MapReduce的作业的输出作为下一个作业的输入,类似于Unix的管道。
测试的代码:a:主类Driver

import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.lib.chain.ChainMapper;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;public class Driver {    public static void main(String[] args) throws Exception {        Configuration conf = new Configuration(); //组件配置是由Hadoop的Configuration的一个实例实现        /**         * 在main函数里,我们会像下面这样做。建立一个Job对象,设置它的JobName,         * 然后配置输入输出路径,设置我们的Mapper类和Reducer类,         * 设置InputFormat和正确的输出类型等等。然后我们会使用job.waitForCompletion()提交到JobTracker,         * 等待job运行并返回,这就是一般的Job设置过程。         *          */        Job job = Job.getInstance(conf, "JobName"); //        job.setJarByClass(Drive.class);        Configuration  map1Conf = new Configuration(false);          ChainMapper.addMapper(job, MapClass1.class, LongWritable.class, Text.class, Text.class, Text.class, map1Conf);        //顺序执行的体现        Configuration  map2Conf = new Configuration(false);         ChainMapper.addMapper(job, MapClass2.class, Text.class, Text.class, Text.class, Text.class, map2Conf);        Configuration  map3Conf = new Configuration(false);         job.setReducerClass(Reduce.class);        FileInputFormat.setInputPaths(job, new Path("hdfs://master:9000/user/input/ChainMapper.txt"));        FileOutputFormat.setOutputPath(job, new Path("hdfs://master:9000/user/output/test6"));        if (!job.waitForCompletion(true))            return;    }}

作业类一:

import java.io.IOException;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Mapper;public class MapClass1 extends Mapper<LongWritable, Text, Text, Text> {    public void map(LongWritable ikey, Text ivalue, Context context) throws IOException, InterruptedException {         String[] citation=ivalue.toString().split(" ");         if(!citation[0].equals("100"))         {             //顺序链接体现,输出的结果作为下一个mapper的输入             context.write(new Text(citation[0]),ivalue);         }    }}

Mapper2类:

import java.io.IOException;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Mapper;public class MapClass2 extends Mapper<Text, Text, Text, Text> {            //mapper1输出作为其输入    public void map(Text ikey, Text ivalue, Context context) throws IOException, InterruptedException {         String[] citation=ivalue.toString().split(" ");         if(!ikey.toString().equals("101"))         {             context.write(ikey, ivalue);         }    }}

Reduce处理类:

import java.io.IOException;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Reducer;public class Reduce extends Reducer<Text, Text, Text, Text> {    public void reduce(Text _key, Iterable<Text> values, Context context) throws IOException, InterruptedException {        //迭代输出mapper的值        for (Text val : values) {             context.write(_key, val);        }    }}

例子比较简单易懂,继续加油!!!

1 0
原创粉丝点击