Hadoop的多文件输出及自定义文件名

来源:互联网 发布:oppo wlan 移动数据 编辑:程序博客网 时间:2024/06/06 09:29

    最近写程序的时候要用到多文件输出。但是在网上找了很多资料,要么是老版本的方法,要么就是新版本的方法。网上很多说0.20.203.0版本的Hadoop中有MultipleOutputs类——确实有,只不过是老版的。坑啊。而他们用的MultipleOutputs是在org.apache.hadoop.mapreduce.lib.output里面的,但我笔者的Hadoop中偏偏就没有这个类。取去了一个最新版的Hadoop,才发现了这个类。最后,在网上超找了半天,才发现,从0.21开始,org.apache.hadoop.mapreduce.lib.output中才有MultipleOutputs类。所以,只有自己重写了- -。

    又找了半天,终于找到自己想要的了。由于笔者还没有正式学习Java,所以,还有很多地方不理解,但是大致能看懂,如有即使不周到或者没有解释的地方,请各位见谅。同时,笔者修改了一些地方以适应自己的程序。

    首先是输出格式的类,也就是job.setOutputFormatClass(……)参数列表中的类:

public class MoreFileOutputFormat extends Multiple<Text,Text>{  @Override  protected String generateFileNameForKeyValue(Text key, Text value,Configuration conf)   {      return "Your name";  }}

    这里,继承Multiple类后必须重写generateFileNameForKeyValue()方法,这个方法返回的字符串作为输出文件的文件名。内容有各位自己根据需要编写。同时,key和value的值也根据自己的需要更换。

    接下来是Multiple模板类的代码:

import java.io.DataOutputStream;import java.io.IOException;import java.util.HashMap;import java.util.Iterator;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.FSDataOutputStream;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.Writable;import org.apache.hadoop.io.WritableComparable;import org.apache.hadoop.io.compress.CompressionCodec;import org.apache.hadoop.io.compress.GzipCodec;import org.apache.hadoop.mapreduce.OutputCommitter;import org.apache.hadoop.mapreduce.RecordWriter;import org.apache.hadoop.mapreduce.TaskAttemptContext;import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;import org.apache.hadoop.util.ReflectionUtils;public abstract class Multiple<K extends WritableComparable<?>, V extends Writable>  extends FileOutputFormat<K, V> {   // 接口类,需要在调用程序中实现generateFileNameForKeyValue来获取文件名   private MultiRecordWriter writer = null;   public RecordWriter<K, V> getRecordWriter(TaskAttemptContext job)     throws IOException, InterruptedException    {        if (writer == null)         {             writer = new MultiRecordWriter(job, getTaskOutputPath(job));        }        return writer;   }       /**    * get task output path    *     * @param conf    * @return    * @throws IOException    */   private Path getTaskOutputPath(TaskAttemptContext conf) throws IOException   {        Path workPath = null;        OutputCommitter committer = super.getOutputCommitter(conf);        if (committer instanceof FileOutputCommitter)         {             workPath = ((FileOutputCommitter) committer).getWorkPath();        }         else         {             Path outputPath = super.getOutputPath(conf);             if (outputPath == null)              {                  throw new IOException("Undefined job output-path");             }             workPath = outputPath;        }        return workPath;   }       //继承后重写以获得文件名   protected abstract String generateFileNameForKeyValue(K key, V value,Configuration conf);       //实现记录写入器RecordWriter类 (内部类)   public class MultiRecordWriter extends RecordWriter<K, V>    {        /** RecordWriter的缓存 */        private HashMap<String, RecordWriter<K, V>> recordWriters = null;        private TaskAttemptContext job = null;                /** 输出目录 */        private Path workPath = null;        public MultiRecordWriter(TaskAttemptContext job, Path workPath)         {             super();             this.job = job;             this.workPath = workPath;             recordWriters = new HashMap<String, RecordWriter<K, V>>();        }                  @Override        public void close(TaskAttemptContext context) throws IOException,          InterruptedException         {             Iterator<RecordWriter<K, V>> values = this.recordWriters.values().iterator();             while (values.hasNext())              {                  values.next().close(context);             }             this.recordWriters.clear();        }                  @Override        public void write(K key, V value) throws IOException,          InterruptedException         {             // 得到输出文件名             String baseName = generateFileNameForKeyValue(key, value,job.getConfiguration());             // 如果recordWriters里没有文件名,那么就建立。否则就直接写值。             RecordWriter<K, V> rw = this.recordWriters.get(baseName);             if (rw == null)              {                  rw = getBaseRecordWriter(job, baseName);                  this.recordWriters.put(baseName, rw);             }             rw.write(key, value);        }                  // ${mapred.out.dir}/_temporary/_${taskid}/${nameWithExtension}        private RecordWriter<K, V> getBaseRecordWriter(TaskAttemptContext job,          String baseName) throws IOException, InterruptedException         {             Configuration conf = job.getConfiguration();             // 查看是否使用解码器             boolean isCompressed = getCompressOutput(job);             RecordWriter<K, V> recordWriter = null;             if (isCompressed)              {                  Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(                    job, GzipCodec.class);                  CompressionCodec codec = ReflectionUtils.newInstance(codecClass, conf);                  Path file = new Path(workPath, baseName + codec.getDefaultExtension());                  FSDataOutputStream fileOut = file.getFileSystem(conf).create(file, false);                  // 这里我使用的自定义的OutputFormat                  recordWriter = new MyRecordWriter<K, V>(new DataOutputStream(                    codec.createOutputStream(fileOut)));             }              else              {                  Path file;                  System.out.println("workPath = " + workPath + ", basename = " + baseName);                  file = new Path(workPath, baseName);                  FSDataOutputStream fileOut = file.getFileSystem(conf).create(file, false);                  // 这里我使用的自定义的OutputFormat                  recordWriter = new MyRecordWriter<K, V>(fileOut);             }             return recordWriter;        }   }}

    现在来实现Multiple的内部类MultiRecordWriter中的MyRecordWriter类以实现自己想要的输出方式:

public class MyRecordWriter<K, V> extends RecordWriter<K,V>{   private static final String utf8 = "UTF-8";//定义字符编码格式        protected DataOutputStream out;            public MyRecordWriter(DataOutputStream out)    {        this.out = out;     }          private void writeObject(Object o) throws IOException    {        if (o instanceof Text)        {             Text to = (Text) o;             out.write(to.getBytes(), 0, to.getLength());        }        else        {               //输出成字节流。如果不是文本类的,请更改此处             out.write(o.toString().getBytes(utf8));        }   }        /**     * 将mapreduce的key,value以自定义格式写入到输出流中     */   public synchronized void write(K key, V value) throws IOException   {        writeObject(value);   }          public synchronized void close(TaskAttemptContext context) throws IOException   {        out.close();   } }

    这个类中还有其它集中方法,不过笔者不需要那些方法,所以把它们都删除了,但最初的文件也删除了- -,所以现在找不到了。请大家见谅。

    现在,只需在main()或者run()函数中将job的输出格式设置成MoreFileOutputFormat类就行了,如下:

job.setOutputFormatClass(MoreFileOutputFormatClass);

详情请参考一下网址:

http://blog.csdn.net/inkfish/article/details/5156651

0 0
原创粉丝点击