hadoop对专利引用数据进行处理,输出被引用的专利

来源:互联网 发布:中保险网络大学ii 编辑:程序博客网 时间:2024/06/01 10:43

对有如下专利数据


进行处理,把每个专利被引用的专利输出,代码如下

package com.hadoop.test;import java.io.IOException;import java.util.Iterator;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.conf.Configured;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapred.FileInputFormat;import org.apache.hadoop.mapred.FileOutputFormat;import org.apache.hadoop.mapred.JobClient;import org.apache.hadoop.mapred.JobConf;import org.apache.hadoop.mapred.KeyValueTextInputFormat;import org.apache.hadoop.mapred.MapReduceBase;import org.apache.hadoop.mapred.Mapper;import org.apache.hadoop.mapred.OutputCollector;import org.apache.hadoop.mapred.Reducer;import org.apache.hadoop.mapred.Reporter;import org.apache.hadoop.mapred.TextOutputFormat;import org.apache.hadoop.util.Tool;import org.apache.hadoop.util.ToolRunner;/** * 对专利数据集进行处理,一个专利输出所有的引用的专利 *  * @author root 2014-12-28 */public class MyJob extends Configured implements Tool {public static class MapClass extends MapReduceBase implementsMapper<Text, Text, Text, Text> {@Overridepublic void map(Text key, Text value,OutputCollector<Text, Text> output, Reporter reporter)throws IOException {// TODO Auto-generated method stuboutput.collect(value, key);}}public static class Reduce extends MapReduceBase implementsReducer<Text, Text, Text, Text> {@Overridepublic void reduce(Text key, Iterator<Text> values,OutputCollector<Text, Text> output, Reporter reduce)throws IOException {// TODO Auto-generated method stubString csv = "";while (values.hasNext()) {if (csv.length() > 0)csv += ",";csv += values.next().toString();}output.collect(key, new Text(csv));}}@Overridepublic int run(String[] arg0) throws Exception {// TODO Auto-generated method stubConfiguration conf = getConf();JobConf job = new JobConf(conf, MyJob.class);// 输入输出路径Path in = new Path(arg0[0]);Path out = new Path(arg0[1]);FileInputFormat.setInputPaths(job, in);FileOutputFormat.setOutputPath(job, out);// 设置job的名字job.setJobName("MyJob");job.setMapperClass(MapClass.class);job.setReducerClass(Reduce.class);//设置K1,V1均为Text类型job.setInputFormat(KeyValueTextInputFormat.class);//设置输出到文本中的类型job.setOutputFormat(TextOutputFormat.class);//指定K2类型job.setOutputKeyClass(Text.class);//指定V2类型job.setOutputValueClass(Text.class);//对每一行以逗号分割job.set("key.value.separator.in.input.line", ",");JobClient.runJob(job);return 0;}public static void main(String[] args) throws Exception {// 定义本地输入文件路径String inputPath = "/home/znb/test/cite75_99.txt";// 定义输出的HDFS文件路径String outputPath = "hdfs://znb:9000/output/MyJob/";String[] args1 = { inputPath, outputPath };int res = ToolRunner.run(new Configuration(), new MyJob(), args1);System.exit(res);}}

输出的结果为


0 0