hadoop DistributeCatche使用代码片段

来源:互联网 发布:excel 对数据进行统计 编辑:程序博客网 时间:2024/06/10 01:41
import java.io.BufferedReader;import java.io.FileReader;import java.io.IOException;import java.net.URI;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.filecache.DistributedCache;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.Mapper;import org.apache.hadoop.mapreduce.Reducer;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;import org.apache.hadoop.util.GenericOptionsParser;public class DistributeCatcheTest {  public static class TokenizerMapper extends Mapper<Object, Text, Text, Text>{    private Text word = new Text("file");    private Path[] files;@Overrideprotected void setup(Context context) throws IOException,InterruptedException {super.setup(context);files = DistributedCache.getLocalCacheFiles(context.getConfiguration());System.out.println("start ======");FileReader reader = new FileReader("input.txt");        BufferedReader br = new BufferedReader(reader);        String s1 = null;        while ((s1 = br.readLine()) != null)        {            System.out.println(s1);        }        br.close();        reader.close();        System.out.println("end ======");}public void map(Object key, Text value, Context context                    ) throws IOException, InterruptedException {for(Path path : files){context.write(word, new Text(path.toString()));}    }  }    public static class IntSumReducer        extends Reducer<Text,Text,Text,Text> {    public void reduce(Text key, Iterable<Text> values,                        Context context                       ) throws IOException, InterruptedException {      for (Text val : values) {      context.write(key, val);      }    }  }  public static void main(String[] args) throws Exception {    Configuration conf = new Configuration();    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();    if (otherArgs.length != 2) {      System.err.println("Usage: DistCatche <in> <out>");      System.exit(2);    }    DistributedCache.createSymlink(conf);    DistributedCache.addCacheFile(new URI("/usr/input/input.txt#input.txt"), conf);    Job job = new Job(conf, "DistCatche");    job.setJarByClass(DistributeCatcheTest.class);    job.setMapperClass(TokenizerMapper.class);    job.setCombinerClass(IntSumReducer.class);    job.setReducerClass(IntSumReducer.class);    job.setOutputKeyClass(Text.class);    job.setOutputValueClass(Text.class);    FileInputFormat.addInputPath(job, new Path(otherArgs[0]));    FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));    System.exit(job.waitForCompletion(true) ? 0 : 1);  }}


这里需要注意的几点:


1.
DistributedCache.createSymlink(conf);
DistributedCache.addCacheFile(new URI(“/input/input.txt#input.txt”), conf);
一定要放在job的初始化前面,否则会在mapper或者reducer中出现文件找不到的情况
2.
/input/input.txt#input”
#后面的input相当于是一个链接(别名),这时候可以在maper或者reducer中直接用FileReader(“input”)读取文件



0 0
原创粉丝点击