Hadoop实例:单轮MapReduce的矩阵乘法Demo

来源:互联网 发布:知识产权法院知乎 编辑:程序博客网 时间:2024/04/29 05:19

       矩阵的乘法只有在第一个矩阵的列数(column)和第二个矩阵的行数(row)相同时才有定义。一般单指矩阵乘积时,指的便是一般矩阵乘积。若A为i×r矩阵,B为r×j矩阵,则他们的乘积AB(有时记做A · B)会是一个i×j矩阵。其乘积矩阵的元素如下面式子得出:\

书中提到的对矩阵乘法的MapReduce实现方法是:
      

        Map函数:对于矩阵M的每个元素M[i,j],产生一系列的键值对(i,k)->(M,j, M[i,j]),其中k=1,2…,直到矩阵N的列数。同样,对于矩阵N的每个元素N[j,k],产生一系列的键值对(i,k)->(N,j,N[j,k]),其中i=1,2…,直到矩阵M的行数。

       Reduce函数:根据MR的原理,相同键i,k的数据会发送个同一个 reduce。如果M为2*2矩阵,N为2×3矩阵,reduce函数需要处理的数据为:

(1,1)->[(M,1, M[1,1])、(M,2, M[1,2])、(N,1, N[1,1])、(N,2, N[2,1])],
(1,2)->[(M,1, M[1,1])、(M,2, M[1,2])、(N,1, N[1,2])、(N,2, N[2,2])],
(1,3)->[(M,1, M[1,1])、(M,2, M[1,2])、(N,1, N[1,3])、(N,2, N[2,3])],
(2,1)->[(M,1, M[2,1])、(M,2, M[2,2])、(N,1, N[1,1])、(N,2, N[2,1])],
(2,2)->[(M,1, M[2,1])、(M,2, M[2,2])、(N,1, N[1,2])、(N,2, N[2,2])],
(2,3)->[(M,1, M[2,1])、(M,2, M[2,2])、(N,1, N[1,3])、(N,2, N[2,3])]。




        这样只要将所有(M,j, M[i,j])和(N,j, N[j,k])分别按照j值排序并放在不同的两个列表里面。将这个列表的第j个元素M[i,j]个N[j,k]相乘,然后将这些积相加,最后积的和与键(i,k)组对作为reduce函数的输出。对于上面的例子reduce的输出就是:

(1,1)->(M[1,1]* N[1,1]+ M[1,2]* N[2,1])
(1,2)->(M[1,1]* N[1,2]+ M[1,2]* N[2,2])
(1,3)->(M[1,1]* N[1,3]+ M[1,2]* N[2,3])
(2,1)->(M[2,1]* N[2,1]+ M[2,2]* N[2,1])
(2,2)->(M[2,1]* N[1,2]+ M[2,2]* N[2,2])
(2,3)->(M[2,1]* N[1,3]+ M[2,2]* N[2,3])

下面是MapReduce的实现步骤:

(1).构造矩阵M:300*150;矩阵N:150*500。两矩阵的值放在一个M.data文件中,每行的格式为:文件标识#行坐标#列坐标#坐标值。


package com.cuijh.matrix;import java.io.IOException;import java.net.URI;import java.util.Random;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.FSDataOutputStream;import org.apache.hadoop.fs.FileSystem;import org.apache.hadoop.fs.Path;//构造矩阵 M: 300*150  N: 150*300 两矩阵的值放在一个Matrix.data文件中,每行的格式为:文件标识#行坐标#列坐标#坐标值。public class MatrixGenerate { public static void main(String[] args) throws IOException, InterruptedException {Configuration conf = new Configuration();  String dst = "hdfs://127.0.0.1:9000/user/cuijh/testdata/Matrix.data";        FileSystem fs = FileSystem.get(URI.create(dst), conf);         Path path = new Path(dst);          FSDataOutputStream fdos= fs.create(path,true);        int i=30; int j=15; int k=50;        for(int r=1; r<=i; r++){        for(int c=1; c<=j; c++){        fdos.writeUTF("M#"+r+"#"+c+"#"+new Random().nextInt(100)+"\n");        }        fdos.flush();        }        for(int r=1; r<=j; r++){        for(int c=1; c<=k; c++){        fdos.writeUTF("N#"+r+"#"+c+"#"+new Random().nextInt(100)+"\n");        }        fdos.flush();        }                fdos.close();        fs.close();}   }

(2).基于上面的方法编写Map函数和Reduce函数。

package com.cuijh.matrix;import java.io.IOException;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.Mapper;import org.apache.hadoop.mapreduce.Reducer;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;import org.apache.hadoop.util.GenericOptionsParser;//单轮MapReduce的矩阵乘法public class MartrixMultiplication {public static class MartrixMapper extends Mapper<Object, Text, Text, Text>{private Text map_key = new Text();private Text map_value = new Text();int rNumber = 30;int cNumber = 50;String fileTarget;String i,j,k,ij,jk;public void map(Object key, Text value, Context context) throws IOException, InterruptedException {String eachterm[] = value.toString().split("#");          fileTarget = eachterm[0];           if(fileTarget.equals("M")){            i = eachterm[1];            j = eachterm[2];            ij = eachterm[3];                          for(int c = 1; c<=cNumber; c++){                map_key.set(i + "#" + String.valueOf(c));                map_value.set("M" + "#" + j + "#" + ij);                context.write(map_key, map_value);            }                        }else if(fileTarget.equals("N")){            j = eachterm[1];            k = eachterm[2];            jk = eachterm[3];              for(int r = 1; r<=rNumber; r++){                map_key.set(String.valueOf(r) + "#" +k);                map_value.set("N" + "#" + j + "#" + jk);                context.write(map_key, map_value);            }          }      }      }   public static class MartrixReducer extends Reducer<Text,Text,Text,Text> {private Text reduce_value = new Text();int jNumber = 15;int M_ij[] = new int[jNumber+1];int N_jk[] = new int[jNumber+1];int j, ij, jk;String fileTarget;public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {int jsum=0;for(Text val : values){String eachterm[] = val.toString().split("#");fileTarget = eachterm[0];j=Integer.parseInt(eachterm[1]);if("M".equals(fileTarget)){ij = Integer.parseInt(eachterm[2]);M_ij[j]=ij;}else if("N".equals(fileTarget)){jk = Integer.parseInt(eachterm[2]);N_jk[j]=jk;}}for(int d=1; d<=jNumber; d++){jsum += M_ij[j] * N_jk[d];}reduce_value.set(String.valueOf(jsum));context.write(key, reduce_value);}}public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {Configuration conf = new Configuration();String[] otherArgs = new GenericOptionsParser(conf,args).getRemainingArgs();if(otherArgs.length !=2){System.out.println("Usage: MatrixMultiplication <input> <output>");System.exit(2);}Job job = new Job(conf,"MartrixMultiplication"); job.setJarByClass(MartrixMultiplication.class);      job.setMapperClass(MartrixMapper.class);      job.setReducerClass(MartrixReducer.class);      job.setOutputKeyClass(Text.class);      job.setOutputValueClass(Text.class);      FileInputFormat.setInputPaths(job, new Path(otherArgs[0]));      FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));      System.exit(job.waitForCompletion(true) ? 0 : 1);}}


0 0
原创粉丝点击