Spark读写Hbase示例代码

来源:互联网 发布:有个搞笑的淘宝真人秀 编辑:程序博客网 时间:2024/04/29 13:03
最少需要导入hbase以下4个jar:
  • hbase-client
  • hbase-common
  • hbase-protocol
  • hbase-server

写入Hbase
defwriteHbaseFromRDD(tableName:String,columnFamily:String,column:String,rdd:RDD[(String,String)]):Unit={
valhbaseConf = HBaseConfiguration.create()
//新旧API都可以用,大部分Hadoop版本包含新旧两版的API
/** hadoopAPI写法*/
// val jobConf = new org.apache.hadoop.mapred.JobConf(hbaseConf)
// jobConf.setOutputFormat(classOf[org.apache.hadoop.hbase.mapred.TableOutputFormat])
// jobConf.set(org.apache.hadoop.hbase.mapred.TableOutputFormat.OUTPUT_TABLE,tableName)
// rdd.map{
// case(key,value) =>
// val p = new Put(Bytes.toBytes(key))
// p.add(Bytes.toBytes(columnFamily),Bytes.toBytes(column),Bytes.toBytes(value))
//// p.setWriteToWAL(false)
// (new ImmutableBytesWritable,p)
// }.saveAsHadoopDataset(jobConf)

/** hadoopAPI写法*/
valjob =neworg.apache.hadoop.mapreduce.Job(hbaseConf)
job.setOutputFormatClass(
classOf[org.apache.hadoop.hbase.mapreduce.TableOutputFormat[ImmutableBytesWritable]])
job.setOutputKeyClass(
classOf[ImmutableBytesWritable])
job.setOutputValueClass(
classOf[Writable])
job.getConfiguration.set(org.apache.hadoop.hbase.mapreduce.TableOutputFormat.
OUTPUT_TABLE,tableName)
rdd.map{
case(key,value) =>
valp =newPut(Bytes.toBytes(key))
p.add(Bytes.
toBytes(columnFamily)
,Bytes.toBytes(column),Bytes.toBytes(value))
// p.setWriteToWAL(false)
(newImmutableBytesWritable,p)
}.saveAsNewAPIHadoopDataset(job.getConfiguration)
}

读取Hbase
defreadAsRDD():Unit={
valsparkConf =newSparkConf().setAppName("read-hbase-test").setMaster("local")
sparkConf.set(
"spark.serializer","org.apache.spark.serializer.KryoSerializer")
valsc =newSparkContext(sparkConf)
valhbaseConf = HandleHbase.conf
/** hbase中的scan可以在这里当做参数传入*/
valscan =newScan()
scan.setStartRow(Bytes.
toBytes(
"row-1"))
scan.setStopRow(Bytes.
toBytes(
"row-2"))
defconvertScanToString(scan: Scan) = {
valproto: ClientProtos.Scan = ProtobufUtil.toScan(scan)
Base64.
encodeBytes(proto.toByteArray)
}
/** TableInputFormat中有若干参数可以用来过滤,可以参考看一下TableInputFormat的静态常量*/
hbaseConf.set(org.apache.hadoop.hbase.mapreduce.TableInputFormat.SCAN,
convertScanToString(scan))

hbaseConf.set(org.apache.hadoop.hbase.mapreduce.TableInputFormat.
INPUT_TABLE,"spark_hbase_test")
valrdd = sc.newAPIHadoopRDD(hbaseConf,classOf[org.apache.hadoop.hbase.mapreduce.TableInputFormat],
classOf[ImmutableBytesWritable],classOf[org.apache.hadoop.hbase.client.Result])
rdd.foreach(
println)
}

0 0
原创粉丝点击