MapReduce编程基础—学习笔记[2]

来源:互联网 发布:大麦盒子安装软件 编辑:程序博客网 时间:2024/06/02 02:39

1、MapReduce编程模型

  (1)Record reader:读取hdfs文件;
  (2)Map:把hdfs的结果映射成另一种结果,比如WordCount这个例子而言,就是把读进来的文本,映射成一个<字符,1>这样逻辑;
  (3)Combiner:很重要的一个功能,很多MR可以没有,但是性能会下降。实现数据减少的操作,在MAP端做一个局部的Reduce;
  (4)Partitioner:实现把m个map变成n个reduce,比如hash;
  (5)Shuffle and sort:数据交换,用于排序;
  (6)Reduce:同样的可以做关联操作等;
  (7)Output format:输出;

【1】一个常见WordCount执行逻辑示例图如下
这里写图片描述

**主要的过程是:**Record reader完成对原始hdfs中(部分)数据进行分割过程;分割结果为键值对形式的(key, value)形式,key可以任何形式,一般在文本中使用偏移量,比如说起始为0,然后value“Hello World”占据12字符;所以“Bye World”从12开始;紧接着就是map()函数处理,得到此时以单词为key,出现次数的value的键值对,同时在Map端进行一个排序,按照key,这里是字母表;接着就是Combine过程,等价于一个局部的Reduce过程,再是编程实现,也是复用Reduce类,主要是实现相同key值的键值对进行排序,相对于数据量大文件来说,提高了相率;经过Combine处理,得到的结果到Shuffle过程,主要是排序后的键值对中value值是list,是迭代器,这点需要注意;最后就是reduce过程。

【2】WordCount实例代码解释如下

package org.apache.hadoop.examples;import java.io.IOException;import java.util.StringTokenizer;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.IntWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.Mapper;import org.apache.hadoop.mapreduce.Reducer;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;import org.apache.hadoop.util.GenericOptionsParser;public class WordCount{    //Map和Reduce类中类,继承各自的父类。    //输入的key是Object可以任何任性(偏移量);输入的value 第一个text,一行的字符串;    // 接着就是输出key,第二个Text,输出的value是IntWritable    public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable>    {        private final static IntWritable one = new IntWritable(1);        private Text word = new Text();       //不用太关心Context,是上下文,获取一些并行环境的信息。比如说jobid        public void map(Object key, Text value, Context context) throws IOException, InterruptedException        {            StringTokenizer itr = new StringTokenizer(value.toString());            while (itr.hasMoreTokens())            {                word.set(itr.nextToken()); //默认是空格                context.write(word, one); //输出            }        }    }    //!Combiner过程是局部reucde,就可以直接用这个reduce复用    //此处的前两个参数是map后两个参数值,对应的。    public static class IntSumReducer extends Reducer<Text,IntWritable,Text,IntWritable>    {        private IntWritable result = new IntWritable(); //出现的次数        //注意value不是IntWritable,而是迭代器,在写自己的reduce时,这是值得注意的。        public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException        {            int sum = 0;            for (IntWritable val : values)            {                sum += val.get();            }            result.set(sum);            context.write(key, result);        }    }    //Map一般是由文件大小指定,多少个block,有多少个tack;Reduce默认是1,如果只有map,则是设置为0    public static void main(String[] args) throws Exception    {        //通用逻辑        Configuration conf = new Configuration();        String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();        if (otherArgs.length != 2)        {            System.err.println("Usage: wordcount <in> <out>");            System.exit(2);        }        Job job = new Job(conf, "word count");        job.setJarByClass(WordCount.class); //打成jar包        job.setMapperClass(TokenizerMapper.class);        job.setCombinerClass(IntSumReducer.class);        job.setReducerClass(IntSumReducer.class);        job.setOutputKeyClass(Text.class);        job.setOutputValueClass(IntWritable.class);        FileInputFormat.addInputPath(job, new Path(otherArgs[0]));        FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));        System.exit(job.waitForCompletion(true) ? 0 : 1); //提交以上任务,等待执行结果    }}

【3】WordMean实例代码解释如下

package org.apache.hadoop.examples;import java.io.BufferedReader;import java.io.IOException;import java.io.InputStreamReader;import java.util.StringTokenizer;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.conf.Configured;import org.apache.hadoop.fs.FileSystem;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.Mapper;import org.apache.hadoop.mapreduce.Reducer;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;import org.apache.hadoop.util.Tool;import org.apache.hadoop.util.ToolRunner;import com.google.common.base.Charsets;//实现文中单词的长度统计,并求出全文单词的平均长度//算法思路:求出每个单词的长度li,然后相加求和sum,最后除以单词的个数n。public class WordMean extends Configured implements Tool{    private double mean = 0;    private final static Text COUNT = new Text("count");    private final static Text LENGTH = new Text("length");    private final static LongWritable ONE = new LongWritable(1);    /**     * Maps words from line of text into 2 key-value pairs; one key-value pair for     * counting the word, another for counting its length.     */    //继承参数时没什么多大差别,含义也一样,有一点就是输出value是LongWritable,算单词总和,int不够。    public static class WordMeanMapper extends Mapper<Object, Text, Text, LongWritable>    {        private LongWritable wordLen = new LongWritable();        /**         * Emits 2 key-value pairs for counting the word and its length. Outputs are         * (Text, LongWritable).         *         * @param value         *         This will be a line of text coming in from our input file.         */        public void map(Object key, Text value, Context context) throws IOException, InterruptedException        {            //把从文件中读进来的一行给分隔一下            StringTokenizer itr = new StringTokenizer(value.toString());            while (itr.hasMoreTokens())            {                String string = itr.nextToken();                this.wordLen.set(string.length());                context.write(LENGTH, this.wordLen);//每一个单词输出一个长度,而不用关心其具体是什么                context.write(COUNT, ONE); //统计一次            }        }    }    /**     * Performs integer summation of all the values for each key.     */    //    public static class WordMeanReducer extends Reducer<Text, LongWritable, Text, LongWritable>    {        private LongWritable sum = new LongWritable();        /**         * Sums all the individual values within the iterator and writes them to the         * same key.         *         * @param key         *          This will be one of 2 constants: LENGTH_STR or COUNT_STR.         * @param values         *          This will be an iterator of all the values associated with that         *          key.         */        public void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException        {            int theSum = 0;            for (LongWritable val : values)            {                theSum += val.get();            }            sum.set(theSum);            context.write(key, sum); //输出结果,所有count是多少和所有word的长度        }    }    /**     * Reads the output file and parses the summation of lengths, and the word     * count, to perform a quick calculation of the mean.     *     * @param path     *          The path to find the output file in. Set in main to the output     *          directory.     * @throws IOException     *           If it cannot access the output directory, we throw an exception.     */    private double readAndCalcMean(Path path, Configuration conf) throws IOException    {        FileSystem fs = FileSystem.get(conf); //文件系统是,比如是hdfs        Path file = new Path(path, "part-r-00000");        if (!fs.exists(file))        {            throw new IOException("Output not found!");        }        BufferedReader br = null;        // average = total sum / number of elements;        try        {            br = new BufferedReader(new InputStreamReader(fs.open(file), Charsets.UTF_8));            long count = 0;            long length = 0;            String line;            while ((line = br.readLine()) != null)            {                StringTokenizer st = new StringTokenizer(line);                // grab type                String type = st.nextToken();                // differentiate                if (type.equals(COUNT.toString()))                {                    String countLit = st.nextToken();                    count = Long.parseLong(countLit);                }                else if (type.equals(LENGTH.toString()))                {                    String lengthLit = st.nextToken();                    length = Long.parseLong(lengthLit);                }            }            double theMean = (((double) length) / ((double) count));            System.out.println("The mean is: " + theMean);            return theMean;        }        finally        {            if (br != null)            {                br.close();            }        }    }    //    public static void main(String[] args) throws Exception    {        //所有继承Tool的类,实例化的时候就是run。        ToolRunner.run(new Configuration(), new WordMean(), args);    }    @Override    public int run(String[] args) throws Exception    {        if (args.length != 2)        {            System.err.println("Usage: wordmean <in> <out>");            return 0;        }        Configuration conf = getConf();        @SuppressWarnings("deprecation")        Job job = new Job(conf, "word mean");        job.setJarByClass(WordMean.class);        job.setMapperClass(WordMeanMapper.class);        job.setCombinerClass(WordMeanReducer.class);        job.setReducerClass(WordMeanReducer.class);        job.setOutputKeyClass(Text.class);        job.setOutputValueClass(LongWritable.class);        FileInputFormat.addInputPath(job, new Path(args[0]));        Path outputpath = new Path(args[1]);        FileOutputFormat.setOutputPath(job, outputpath);        boolean result = job.waitForCompletion(true);        mean = readAndCalcMean(outputpath, conf);        return (result ? 0 : 1);    }    /**     * Only valuable after run() called.     *     * @return Returns the mean value.     */    public double getMean()    {        return mean;    }}

【4】WordMedian实例代码解释如下

package org.apache.hadoop.examples;import java.io.BufferedReader;import java.io.IOException;import java.io.InputStreamReader;import java.util.StringTokenizer;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.conf.Configured;import org.apache.hadoop.fs.FileSystem;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.IntWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.Mapper;import org.apache.hadoop.mapreduce.Reducer;import org.apache.hadoop.mapreduce.TaskCounter;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;import org.apache.hadoop.util.Tool;import org.apache.hadoop.util.ToolRunner;import com.google.common.base.Charsets;//中位数,长度的中值,因为平均值对极端值敏感。public class WordMedian extends Configured implements Tool{    private double median = 0;    private final static IntWritable ONE = new IntWritable(1);    /**     * Maps words from line of text into a key-value pair; the length of the word     * as the key, and 1 as the value.     */    public static class WordMedianMapper extends Mapper<Object, Text, IntWritable, IntWritable>    {        private IntWritable length = new IntWritable();        /**         * Emits a key-value pair for counting the word. Outputs are (IntWritable,         * IntWritable).         *         * @param value         *          This will be a line of text coming in from our input file.         */        public void map(Object key, Text value, Context context) throws IOException, InterruptedException        {            StringTokenizer itr = new StringTokenizer(value.toString());            while (itr.hasMoreTokens())            {                String string = itr.nextToken();                length.set(string.length());                context.write(length, ONE); //输出长度,然后这个长度的字符出现了一次            }        }    }    /**     * Performs integer summation of all the values for each key.     */    public static class WordMedianReducer extends Reducer<IntWritable, IntWritable, IntWritable, IntWritable>    {        private IntWritable val = new IntWritable();        /**         * Sums all the individual values within the iterator and writes them to the         * same key.         *         * @param key         *          This will be a length of a word that was read.         * @param values         *          This will be an iterator of all the values associated with that         *          key.         */        public void reduce(IntWritable key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException        {            int sum = 0;            for (IntWritable value : values)            {                sum += value.get();            }            val.set(sum);            context.write(key, val); //key与map中输出值一样,val总的出现长度为key的次数总数        }    }    /**     * This is a standard program to read and find a median value based on a file     * of word counts such as: 1 456, 2 132, 3 56... Where the first values are     * the word lengths and the following values are the number of times that     * words of that length appear.     *     * @param path     *          The path to read the HDFS file from (part-r-00000...00001...etc).     * @param medianIndex1     *          The first length value to look for.     * @param medianIndex2     *          The second length value to look for (will be the same as the first     *          if there are an even number of words total).     * @throws IOException     *           If file cannot be found, we throw an exception.     * */    //辅助方法    private double readAndFindMedian(String path, int medianIndex1, int medianIndex2, Configuration conf) throws IOException    {        FileSystem fs = FileSystem.get(conf);        Path file = new Path(path, "part-r-00000");        if (!fs.exists(file))            throw new IOException("Output not found!");        BufferedReader br = null;        try        {            br = new BufferedReader(new InputStreamReader(fs.open(file), Charsets.UTF_8));            int num = 0;            String line;            while ((line = br.readLine()) != null)            {                StringTokenizer st = new StringTokenizer(line);                // grab length                String currLen = st.nextToken();                // grab count                String lengthFreq = st.nextToken();                int prevNum = num;                num += Integer.parseInt(lengthFreq);                if (medianIndex2 >= prevNum && medianIndex1 <= num)                {                    System.out.println("The median is: " + currLen);                    br.close();                    return Double.parseDouble(currLen);                }                else if (medianIndex2 >= prevNum && medianIndex1 < num)                {                    String nextCurrLen = st.nextToken();                    double theMedian = (Integer.parseInt(currLen) + Integer.parseInt(nextCurrLen)) / 2.0;                    System.out.println("The median is: " + theMedian);                    br.close();                    return theMedian;                }            }        }        finally        {            if (br != null)            {                br.close();            }        }        // error, no median found        return -1;    }    public static void main(String[] args) throws Exception    {        ToolRunner.run(new Configuration(), new WordMedian(), args);    }    @Override    public int run(String[] args) throws Exception    {        if (args.length != 2)        {            System.err.println("Usage: wordmedian <in> <out>");            return 0;        }        setConf(new Configuration());        Configuration conf = getConf();        @SuppressWarnings("deprecation")        Job job = new Job(conf, "word median");        job.setJarByClass(WordMedian.class);        job.setMapperClass(WordMedianMapper.class);        job.setCombinerClass(WordMedianReducer.class);        job.setReducerClass(WordMedianReducer.class);        job.setOutputKeyClass(IntWritable.class);        job.setOutputValueClass(IntWritable.class);        FileInputFormat.addInputPath(job, new Path(args[0]));        FileOutputFormat.setOutputPath(job, new Path(args[1]));        boolean result = job.waitForCompletion(true);        // Wait for JOB 1 -- get middle value to check for Median        long totalWords = job.getCounters().getGroup(TaskCounter.class.getCanonicalName())                .findCounter("MAP_OUTPUT_RECORDS", "Map output records").getValue();        int medianIndex1 = (int) Math.ceil((totalWords / 2.0)); //奇数中位数        int medianIndex2 = (int) Math.floor((totalWords / 2.0));//偶数中位数        median = readAndFindMedian(args[1], medianIndex1, medianIndex2, conf);        return (result ? 0 : 1);    }    public double getMedian()    {        return median;    }}
0 0