hbase的mapreduce程序小抄

来源:互联网 发布:java大数据处理技术 编辑:程序博客网 时间:2024/06/07 09:14
package cn.itcast.bigdata.hbase;import java.io.IOException;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.conf.Configured;import org.apache.hadoop.hbase.HBaseConfiguration;import org.apache.hadoop.hbase.client.Put;import org.apache.hadoop.hbase.client.Result;import org.apache.hadoop.hbase.client.Scan;import org.apache.hadoop.hbase.io.ImmutableBytesWritable;import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;import org.apache.hadoop.hbase.mapreduce.TableMapper;import org.apache.hadoop.hbase.mapreduce.TableReducer;import org.apache.hadoop.hbase.util.Bytes;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.NullWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.util.Tool;import org.apache.hadoop.util.ToolRunner;/** * @ClassName: HbaseRunner * @Description: hbase的map_reduce程序 * @author mpc * @date 2016年11月2日 下午1:47:02 *  */public class HbaseRunner extends Configured implements Tool {@Overridepublic int run(String[] args) throws Exception {Configuration conf = HBaseConfiguration.create();// 定义configurationconf.set("hbase.zookeeper.quorum", "mpc5,mpc6,mpc7");// 告诉hbase去哪里找zookeeperJob job = Job.getInstance(conf);// 使用设置的conf实例化一个JOBjob.setJarByClass(HbaseRunner.class);Scan scan = new Scan();scan.setCaching(500);// 每次查询500条数据scan.setCacheBlocks(false);// 因为每次查询出来的数据只使用一次,所以不需要进行缓存/* 初始化map,表名为mpc_log */TableMapReduceUtil.initTableMapperJob("mpc_log", scan,HbaseMapper.class, Text.class, LongWritable.class, job);/* 初始化reduce,表名为mpc_count */TableMapReduceUtil.initTableReducerJob("mpc_count", HbaseReducer.class,job);/* job等待完成 */return job.waitForCompletion(true) ? 0 : 1;}public static class HbaseMapper extends TableMapper<Text, LongWritable> {// map的实现/* * 由于TableMapper已经定义好了输入的key为rowKey,输入的value为Result,所以只需要定义输出的key和value的类型 */@Overrideprotected void map(ImmutableBytesWritable key, Result value,Context context) throws IOException, InterruptedException {if (value.containsNonEmptyColumn(Bytes.toBytes("cf"),Bytes.toBytes("test"))) {// 如果test列不为空byte[] val = value.getValue(Bytes.toBytes("cf"),Bytes.toBytes("test"));// 获得test列的数据context.write(new Text(val), new LongWritable(1));//输出以test列的值为key,1为value}}}public static class HbaseReducer extendsTableReducer<Text, LongWritable, NullWritable> {//reduce的实现/*由于TableReducer已经定义好了输出的value的类型为put或者delete,所以我们这里只定义输入的key的类型, * 输入的value的类型,输出的key的类型*/@Overrideprotected void reduce(Text key, Iterable<LongWritable> vals,Context context) throws IOException, InterruptedException {long count = 0;for (LongWritable lo : vals) {count += lo.get();}/*进行统计并输出,这里new一个put*/Put put = new Put(Bytes.toBytes(key.toString()));put.add(Bytes.toBytes("cf"), Bytes.toBytes("sum"),Bytes.toBytes(count));context.write(NullWritable.get(), put);}}public static void main(String[] args) throws Exception {//执行jobint res = ToolRunner.run(new HbaseRunner(), args);System.exit(res);}}

1 0
原创粉丝点击