实现mapreduce多文件自定义输出

来源:互联网 发布:工程三维绘图软件 编辑:程序博客网 时间:2024/04/30 09:07

hadoop mapreduce获取当前数据块所在的文件名

使用的0.20.2版本hadoop 
查了许久,如何在map方法中获取当前数据块所在的文件名,方法如下: 
//
获取文件名 
InputSplit inputSplit=(InputSplit)context.getInputSplit(); 

Stringfilename=((FileSplit)inputSplit).getPath().getName();

 

 

 

 

 

实现mapreduce多文件自定义输出

2012-07-1506:00:00     我来说两句      

收藏  我要投稿

普通maprduce中通常是有map和reduce两个阶段,在不做设置的情况下,计算结果会以part-000*输出成多个文件,并且输出的文件数量和reduce数量一样,文件内容格式也不能随心所欲。这样不利于后续结果处理。
       在hadoop中,reduce支持多个输出,输出的文件名也是可控的,就是继承MultipleTextOutputFormat类,重写generateFileNameForKey方法。如果只是想做到输出结果的文件名可控,实现自己的LogNameMultipleTextOutputFormat类,设置jobconf.setOutputFormat(LogNameMultipleTextOutputFormat.class);就可以了,但是这种方式只限于使用旧版本的hadoop api.如果想采用新版本的api接口或者自定义输出内容的格式等等更多的需求,那么就要自己动手重写一些hadoop api了。
    首先需要构造一个自己的MultipleOutputFormat类实现FileOutputFormat类(注意是org.apache.hadoop.mapreduce.lib.output包的FileOutputFormat)
[java]
   
 
import java.io.DataOutputStream; 
import java.io.IOException; 
import java.util.HashMap; 
import java.util.Iterator; 
 
 
import org.apache.hadoop.conf.Configuration; 
import org.apache.hadoop.fs.FSDataOutputStream; 
import org.apache.hadoop.fs.Path; 
import org.apache.hadoop.io.Writable; 
import org.apache.hadoop.io.WritableComparable; 
import org.apache.hadoop.io.compress.CompressionCodec; 
import org.apache.hadoop.io.compress.GzipCodec; 
import org.apache.hadoop.mapreduce.OutputCommitter; 
import org.apache.hadoop.mapreduce.RecordWriter; 
import org.apache.hadoop.mapreduce.TaskAttemptContext; 
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter; 
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; 
import org.apache.hadoop.util.ReflectionUtils; 
 
 
/**
 * This abstract class extends the FileOutputFormat, allowing to write the
 * output data to different output files. There are three basic use casesfor
 * this class. 
 * Created on 2012-07-08
 * @author zhoulongliu
 * @param <K>
 * @param <V>
 */ 
public abstract class MultipleOutputFormat<K extendsWritableComparable<?>, V extends Writable> extends 
        FileOutputFormat<K, V> { 
 
 
   //接口类,需要在调用程序中实现generateFileNameForKeyValue来获取文件名 
    private MultiRecordWriter writer = null; 
 
 
    public RecordWriter<K, V>getRecordWriter(TaskAttemptContext job) throws IOException,InterruptedException { 
        if (writer == null) { 
            writer = newMultiRecordWriter(job, getTaskOutputPath(job)); 
        } 
        return writer; 
    } 
 
 
    /**
     * get task output path
     * @param conf
     * @return
     * @throws IOException
     */ 
    private Path getTaskOutputPath(TaskAttemptContext conf)throws IOException { 
        Path workPath = null; 
        OutputCommitter committer =super.getOutputCommitter(conf); 
        if (committer instanceofFileOutputCommitter) { 
            workPath =((FileOutputCommitter) committer).getWorkPath(); 
        } else { 
            Path outputPath= super.getOutputPath(conf); 
            if(outputPath == null) { 
               throw new IOException("Undefined job output-path"); 
            } 
            workPath =outputPath; 
        } 
        return workPath; 
    } 
 
 
    /**
     * 通过key, value, conf来确定输出文件名(含扩展名) Generate the file output file name based
     * on the given key and the leaf file name. The defaultbehavior is that the
     * file name does not depend on the key.
     * 
     * @param key the key of the output data
     * @param name the leaf file name
     * @param conf the configure object
     * @return generated file name
     */ 
    protected abstract String generateFileNameForKeyValue(K key,V value, Configuration conf); 
 
 
   /**
    * 实现记录写入器RecordWriter类
    * (内部类)
    * @author zhoulongliu
    *
    */ 
    public class MultiRecordWriter extends RecordWriter<K,V> { 
        /** RecordWriter的缓存 */ 
        private HashMap<String,RecordWriter<K, V>> recordWriters = null; 
        private TaskAttemptContext job =null; 
        /** 输出目录*/ 
        private Path workPath = null; 
 
 
        publicMultiRecordWriter(TaskAttemptContext job, Path workPath) { 
           super(); 
            this.job =job; 
           this.workPath = workPath; 
           recordWriters = new HashMap<String, RecordWriter<K, V>>(); 
        } 
 
 
        @Override 
        public void close(TaskAttemptContextcontext) throws IOException, InterruptedException { 
           Iterator<RecordWriter<K, V>> values = this.recordWriters.values().iterator(); 
            while(values.hasNext()) { 
               values.next().close(context); 
            } 
           this.recordWriters.clear(); 
        } 
 
 
        @Override 
        public void write(K key, V value)throws IOException, InterruptedException { 
            // 得到输出文件名 
            StringbaseName = generateFileNameForKeyValue(key, value,job.getConfiguration()); 
           //如果recordWriters里没有文件名,那么就建立。否则就直接写值。 
           RecordWriter<K, V> rw = this.recordWriters.get(baseName); 
            if (rw ==null) { 
               rw = getBaseRecordWriter(job, baseName); 
               this.recordWriters.put(baseName, rw); 
            } 
           rw.write(key, value); 
        } 
 
 
        // ${mapred.out.dir}/_temporary/_${taskid}/${nameWithExtension} 
        private RecordWriter<K, V>getBaseRecordWriter(TaskAttemptContext job, String baseName) throwsIOException, 
               InterruptedException { 
           Configuration conf = job.getConfiguration(); 
           //查看是否使用解码器   
            booleanisCompressed = getCompressOutput(job); 
            StringkeyValueSeparator = ","; 
           RecordWriter<K, V> recordWriter = null; 
            if(isCompressed) { 
               Class<? extends CompressionCodec> codecClass =getOutputCompressorClass(job, GzipCodec.class); 
               CompressionCodec codec = ReflectionUtils.newInstance(codecClass, conf); 
               Path file = new Path(workPath, baseName + codec.getDefaultExtension()); 
               FSDataOutputStream fileOut = file.getFileSystem(conf).create(file,false); 
               //这里我使用的自定义的OutputFormat  
               recordWriter = new LineRecordWriter<K, V>(newDataOutputStream(codec.createOutputStream(fileOut)), 
                       keyValueSeparator); 
            } else{ 
               Path file = new Path(workPath, baseName); 
               FSDataOutputStream fileOut = file.getFileSystem(conf).create(file,false); 
               //这里我使用的自定义的OutputFormat  
               recordWriter = new LineRecordWriter<K, V>(fileOut,keyValueSeparator); 
            } 
            returnrecordWriter; 
        } 
    } 
 
 

    接着你还需要自定义一个LineRecordWriter实现记录写入器RecordWriter类,自定义输出格式。
[java] 
import java.io.DataOutputStream; 
import java.io.IOException; 
import java.io.UnsupportedEncodingException; 
 
import org.apache.hadoop.io.NullWritable; 
import org.apache.hadoop.io.Text; 
import org.apache.hadoop.mapreduce.RecordWriter; 
import org.apache.hadoop.mapreduce.TaskAttemptContext; 
 
/**
 * 
 * 重新构造实现记录写入器RecordWriter类
 * Created on 2012-07-08
 * @author zhoulongliu
 * @param <K>
 * @param <V>
 */ 
public class LineRecordWriter<K, V> extends RecordWriter<K, V>{ 
 
    private static final String utf8 = "UTF-8";//定义字符编码格式 
    private static final byte[] newline; 
    static { 
        try { 
            newline ="\n".getBytes(utf8);//定义换行符 
        } catch(UnsupportedEncodingException uee) { 
            throw newIllegalArgumentException("can't find " + utf8 + " encoding"); 
        } 
    } 
    protected DataOutputStream out; 
    private final byte[] keyValueSeparator; 
 
     //实现构造方法,出入输出流对象和分隔符 
    public LineRecordWriter(DataOutputStream out, StringkeyValueSeparator) { 
        this.out = out; 
        try { 
           this.keyValueSeparator = keyValueSeparator.getBytes(utf8); 
        } catch(UnsupportedEncodingException uee) { 
            throw newIllegalArgumentException("can't find " + utf8 + "encoding"); 
        } 
    } 
 
    public LineRecordWriter(DataOutputStream out) { 
        this(out, "\t"); 
    } 
 
    private void writeObject(Object o) throws IOException{ 
        if (o instanceof Text) { 
            Text to =(Text) o; 
           out.write(to.getBytes(), 0, to.getLength()); 
        } else { 
           out.write(o.toString().getBytes(utf8)); 
        } 
    } 
    
    /**
     * 将mapreduce的key,value以自定义格式写入到输出流中
     */ 
    public synchronized void write(K key, V value) throwsIOException { 
        boolean nullKey = key == null || keyinstanceof NullWritable; 
        boolean nullValue = value == null ||value instanceof NullWritable; 
        if (nullKey && nullValue){ 
           return; 
        } 
        if (!nullKey) { 
           writeObject(key); 
        } 
        if (!(nullKey || nullValue)) { 
           out.write(keyValueSeparator); 
        } 
        if (!nullValue) { 
           writeObject(value); 
        } 
        out.write(newline); 
    } 
 
    public synchronized void close(TaskAttemptContext context)throws IOException { 
        out.close(); 
    } 
 

     接着,你实现刚刚重写MultipleOutputFormat类中的generateFileNameForKeyValue方法自定义返回需要输出文件的名称,我这里是以key值中以逗号分割取第一个字段的值作为输出文件名,这样第一个字段值相同的会输出到一个文件中并以其值作为文件名。www.2cto.com
[java] 
public static class VVLogNameMultipleTextOutputFormat extendsMultipleOutputFormat<Text, NullWritable> { 
        
       @Override 
       protected StringgenerateFileNameForKeyValue(Text key, NullWritable value, Configuration conf){  
           String sp[] =key.toString().split(","); 
           String filename =sp[1]; 
           try { 
              Long.parseLong(sp[1]); 
           } catch(NumberFormatException e) { 
              filename = "000000000000"; 
           } 
           return filename; 
       } 
 
 
   } 


    
最后就是在job调用时设置了
        Configuration conf = getConf();
        Job job = new Job(conf);
        job.setNumReduceTasks(12);
        ......
       job.setMapperClass(VVEtlMapper.class);
        job.setReducerClass(EtlReducer.class);
       job.setOutputFormatClass(VVLogNameMultipleTextOutputFormat.class);//设置自定义的多文件输出类
       FileInputFormat.setInputPaths(job,newPath(args[0]));
       FileOutputFormat.setOutputPath(job,newPath(args[1]));
       FileOutputFormat.setCompressOutput(job,true);//设置输出结果采用压缩
      FileOutputFormat.setOutputCompressorClass(job, LzopCodec.class); //设置输出结果采用lzo压缩
   ok,这样你就完成了支持新的hadoop api自定义的多文件输出mapreduce编写。