基于hadoop0.20.2mapreducer的实例
来源:互联网 发布:js中的eval方法 编辑:程序博客网 时间:2024/06/05 09:45
package com.founder.hadoop.mapreduce;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class TestMapReduce2 extends Configured implements Tool {
enum Counter {
LINESKIP
}
public static class Map extends
Mapper<LongWritable, Text, Text, Text> {
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
try{
String line = value.toString();
String[] lines = line.split(" ");
String anum = lines[0];
String bnum = lines[1];
Text outa = new Text(anum);
Text outb = new Text(bnum);
context.write(outb, outa) ;
}catch(java.lang.ArrayIndexOutOfBoundsException e){
context.getCounter(Counter.LINESKIP).increment(1);
return ;
}
}
}
public static class Reduce extends Reducer<Text, Text, Text, Text> {
protected void reduce(Text key, Iterable<Text> values,Context context)
throws IOException, InterruptedException {
// TODO Auto-generated method stub
String valueString;
String out="";
for(Text value : values){
valueString = value.toString();
out += valueString + "|" ;
}
context.write(key, new Text(out)) ;
}
}
@Override
public int run(String[] arg) throws Exception {
// TODO Auto-generated method stub
Configuration conf = getConf();
Job job = new Job(conf, "TestMapReduce2");// 任务名称
job.setJarByClass(TestMapReduce2.class);// 指定class
FileInputFormat.addInputPath(job, new Path(arg[0]));// 输入路径
FileOutputFormat.setOutputPath(job, new Path(arg[1]));// 输出路径
job.setMapperClass(Map.class);//调用上面Map类作为作为map任务代码
job.setReducerClass(Reduce.class);
job.setOutputFormatClass(TextOutputFormat.class);//
job.setOutputKeyClass(Text.class);//指定输出key的格式
job.setOutputValueClass(Text.class);//指定输出value的格式
job.waitForCompletion(true);
return job.isSuccessful() ? 0 : 1;
}
public static void main(String args[]) throws Exception {
int res = ToolRunner.run(new Configuration(), new TestMapReduce2(),
args);
System.exit(res);
}
}
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class TestMapReduce2 extends Configured implements Tool {
enum Counter {
LINESKIP
}
public static class Map extends
Mapper<LongWritable, Text, Text, Text> {
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
try{
String line = value.toString();
String[] lines = line.split(" ");
String anum = lines[0];
String bnum = lines[1];
Text outa = new Text(anum);
Text outb = new Text(bnum);
context.write(outb, outa) ;
}catch(java.lang.ArrayIndexOutOfBoundsException e){
context.getCounter(Counter.LINESKIP).increment(1);
return ;
}
}
}
public static class Reduce extends Reducer<Text, Text, Text, Text> {
protected void reduce(Text key, Iterable<Text> values,Context context)
throws IOException, InterruptedException {
// TODO Auto-generated method stub
String valueString;
String out="";
for(Text value : values){
valueString = value.toString();
out += valueString + "|" ;
}
context.write(key, new Text(out)) ;
}
}
@Override
public int run(String[] arg) throws Exception {
// TODO Auto-generated method stub
Configuration conf = getConf();
Job job = new Job(conf, "TestMapReduce2");// 任务名称
job.setJarByClass(TestMapReduce2.class);// 指定class
FileInputFormat.addInputPath(job, new Path(arg[0]));// 输入路径
FileOutputFormat.setOutputPath(job, new Path(arg[1]));// 输出路径
job.setMapperClass(Map.class);//调用上面Map类作为作为map任务代码
job.setReducerClass(Reduce.class);
job.setOutputFormatClass(TextOutputFormat.class);//
job.setOutputKeyClass(Text.class);//指定输出key的格式
job.setOutputValueClass(Text.class);//指定输出value的格式
job.waitForCompletion(true);
return job.isSuccessful() ? 0 : 1;
}
public static void main(String args[]) throws Exception {
int res = ToolRunner.run(new Configuration(), new TestMapReduce2(),
args);
System.exit(res);
}
}
0 0
- 基于hadoop0.20.2mapreducer的实例
- 基于hadoop0.20.2的wordcount实例
- 基于hadoop0.20.2只有map的 实例
- 基于mapreducer的图算法
- 基于hadoop0.20.2的列出文件夹下所有文件
- 基于hadoop0.20.2的mapreduce结果存入Hbase
- 【Hadoop】Hadoop0.20.2的安装
- MapReducer的Job调优
- eclipse indigo的hadoop0.20.2插件
- DistributedCache的使用方法(hadoop0.20.2)
- mapReducer的测试案例①
- hadoop0.20.2中的KeyValueInputFormat
- Pig-0.9.2的安装过程(Hadoop0.20.2)
- Hadoop0.20.2集群环境搭建
- Ubuntu11.04配置Hadoop0.20.2
- 【Hadoop】Windows下的Eclipse远程连接Linux下的Hadoop0.20.2
- Hadoop0.20.2+Hbase0.90.4+Zookeeper3.3.3集成以及遇到的问题
- Hadoop0.20.2在Linux X64下的分布式配置和使用
- 做一个智能车牌如何?
- struts2文件上传
- LeetCode Longest Substring Without Repeating Characters
- CSS实现背景透明,文字不透明(各浏览器兼容)
- C语言简单程序
- 基于hadoop0.20.2mapreducer的实例
- 使用通配符定义Action
- char *s 与char s[]的区别
- C++智能指针浅析
- 多线程 《多线程操作同一变量》
- 十、从头到尾彻底理解傅里叶变换算法、上
- Cocos2d-x处理双击事件的两种方法!
- RTSP服务器(一)
- Wikioi 1082线段树成段更新成段查询