MR之自定义outputformat输出方式代码演示

来源:互联网 发布:hadoop和java的关系 编辑:程序博客网 时间:2024/06/05 19:53
        1.项目需求
现有一些原始日志需要做增强解析处理,流程:
  1. 从原始日志文件中读取数据
  2. 根据日志中的一个URL字段到外部知识库中获取信息增强到原始日志
  3. 如果成功增强,则输出到增强结果目录;如果增强失败,则抽取原始数据中URL字段输出到待爬清单目录
 .2分析
程序的关键点是要在一个mapreduce程序中根据数据的不同输出两类结果到不同目录,这类灵活的输出需求可以通过自定义outputformat来实现
.3实现
实现要点:
- 在mapreduce中访问外部资源
- 自定义outputformat,改写其中的recordwriter,改写具体输出数据的方法write()
   1.实际案例一
   1)需求
       过滤输入的log日志中是否包含robot
       (1)包含robot的网站输出到e:/robot.log
       (2)不包含robot的网站输出到e:/other.log 

(1)自定义一个outputformat
package com.robot.mapreduce.outputformat;
import java.io.IOException;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class FilterOutputFormat extends FileOutputFormat<Text, NullWritable>{
       @Override
       public RecordWriter<Text, NullWritable> getRecordWriter(TaskAttemptContext job)
                     throws IOException, InterruptedException {
              // 创建一个RecordWriter
              return newFilterRecordWriter(job);
       }
}

(2)具体的写数据RecordWriter
package com.robot.mapreduce.outputformat;
import java.io.IOException;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
public classFilterRecordWriterextends RecordWriter<Text, NullWritable> {
       FSDataOutputStream robotOut = null;
       FSDataOutputStream otherOut = null;
       public FilterRecordWriter(TaskAttemptContext job) {
              // 1 获取文件系统
              FileSystem fs;
              try {
                     fs = FileSystem.get(job.getConfiguration());

                     // 2 创建输出文件路径
                     Path robotPath = new Path("e:/robot.log");
                     Path otherPath = new Path("e:/other.log");

                     // 3 创建输出流
                     robotOut = fs.create(robotPath);
                     otherOut = fs.create(otherPath);
              } catch (IOException e) {
                     e.printStackTrace();
              }
       }
       @Override
       public void write(Text key, NullWritable value) throws IOException, InterruptedException {

              // 判断是否包含“robot”输出到不同文件
              if (key.toString().contains("robot")) {
                     robotOut.write(key.toString().getBytes());
              } else {
                     otherOut.write(key.toString().getBytes());
              }
       }
       @Override
       public void close(TaskAttemptContext context) throws IOException, InterruptedException {
              // 关闭资源
              if (robotOut != null) {
                     robotOut.close();
              }

              if (otherOut != null) {
                     otherOut.close();
              }
       }}

(3)编写FilterMapper
package com.robot.mapreduce.outputformat;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class FilterMapper extends Mapper<LongWritable, Text, Text, NullWritable>{
  @Override
       protected void reduce(Text key, Iterable<NullWritable> values, Context context)
                     throws IOException, InterruptedException {

              String k = key.toString();
              k = k + "\r\n";

              context.write(new Text(k), NullWritable.get());
       }
}

(5)编写FilterDriver
package com.robot.mapreduce.outputformat;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class FilterDriver {
       public static void main(String[] args) throws Exception {
              Configuration conf = new Configuration();
              Job job = Job.getInstance(conf);
              job.setJarByClass(FilterDriver.class);
              job.setMapperClass(FilterMapper.class);
              job.setReducerClass(FilterReducer.class);
              job.setMapOutputKeyClass(Text.class);
              job.setMapOutputValueClass(NullWritable.class);
              job.setOutputKeyClass(Text.class);
              job.setOutputValueClass(NullWritable.class);
              // 要将自定义的输出格式组件设置到job中
              job.setOutputFormatClass(FilterOutputFormat.class);

              FileInputFormat.setInputPaths(job, new Path(args[0]));

              // 虽然我们自定义了outputformat,但是因为我们的outputformat继承自fileoutputformat
              // 而fileoutputformat要输出一个_SUCCESS文件,这个文件输出必备的,是为了让MRappmaster知道程序执行成功了。所以在这还得指定一个输出目录
              FileOutputFormat.setOutputPath(job, new Path(args[1]));

              boolean result = job.waitForCompletion(true);
              System.exit(result ? 0 : 1);
       }
}