基于scala 新版API操作HBase
来源:互联网 发布:安卓软件收费 编辑:程序博客网 时间:2024/06/05 06:35
package com.sncfi.hbaseimport org.apache.hadoop.hbase.client.HBaseAdminimport org.apache.hadoop.hbase.client.Adminimport org.apache.hadoop.hbase.util.Bytesimport org.apache.hadoop.hbase.{HBaseConfiguration,HTableDescriptor,TableName,HColumnDescriptor}import org.apache.hadoop.hbase.mapreduce.TableInputFormatimport org.apache.spark.SparkConfimport org.apache.spark.SparkContextimport org.apache.hadoop.hbase.client.ConnectionFactoryimport org.apache.hadoop.hbase.client.Connectionimport org.apache.hadoop.hbase.client.Putimport org.apache.hadoop.hbase.client.Getimport org.apache.hadoop.hbase.client.Deleteimport org.apache.hadoop.hbase.client.Tableimport org.apache.hadoop.hbase.client.Scanimport org.apache.hadoop.hbase.client.ResultScannerimport org.apache.hadoop.hbase.client.Result/** * Created by admin on 2016/12/14. * 这是一个spark自带一个hbase例子 * 在这里要注意的是,自定义方法要写在前面。 */object HBaseLocalTest { //创建表 def createHTable(connection: Connection,tablename: String): Unit= { //Hbase表模式管理器 val admin = connection.getAdmin //本例将操作的表名 val tableName = TableName.valueOf(tablename) //如果需要创建表 if (!admin.tableExists(tableName)) { //创建Hbase表模式 val tableDescriptor = new HTableDescriptor(tableName) //创建列簇1 artitle tableDescriptor.addFamily(new HColumnDescriptor("artitle".getBytes())) //创建列簇2 author tableDescriptor.addFamily(new HColumnDescriptor("author".getBytes())) //创建表 admin.createTable(tableDescriptor) println("create done.") } } //删除表 def deleteHTable(connection:Connection,tablename:String):Unit={ //本例将操作的表名 val tableName = TableName.valueOf(tablename) //Hbase表模式管理器 val admin = connection.getAdmin if (admin.tableExists(tableName)){ admin.disableTable(tableName) admin.deleteTable(tableName) } } //插入记录 def insertHTable(connection:Connection,tablename:String,family:String,column:String,key:String,value:String):Unit={ try{ val userTable = TableName.valueOf(tablename) val table=connection.getTable(userTable) //准备key 的数据 val p=new Put(key.getBytes) //为put操作指定 column 和 value p.addColumn(family.getBytes,column.getBytes,value.getBytes()) //验证可以提交两个clomun????不可以 // p.addColumn(family.getBytes(),"china".getBytes(),"JAVA for china".getBytes()) //提交一行 table.put(p) } } //基于KEY查询某条数据 def getAResult(connection:Connection,tablename:String,family:String,column:String,key:String):Unit={ var table:Table=null try{ val userTable = TableName.valueOf(tablename) table=connection.getTable(userTable) val g=new Get(key.getBytes()) val result=table.get(g) val value=Bytes.toString(result.getValue(family.getBytes(),column.getBytes())) println("key:"+value) }finally{ if(table!=null)table.close() } } //删除某条记录 def deleteRecord(connection:Connection,tablename:String,family:String,column:String,key:String): Unit ={ var table:Table=null try{ val userTable=TableName.valueOf(tablename) table=connection.getTable(userTable) val d=new Delete(key.getBytes()) d.addColumn(family.getBytes(),column.getBytes()) table.delete(d) println("delete record done.") }finally{ if(table!=null)table.close() } } //扫描记录 def scanRecord(connection:Connection,tablename:String,family:String,column:String): Unit ={ var table:Table=null var scanner:ResultScanner=null try{ val userTable=TableName.valueOf(tablename) table=connection.getTable(userTable) val s=new Scan() s.addColumn(family.getBytes(),column.getBytes()) scanner=table.getScanner(s) println("scan...for...") var result:Result=scanner.next() while(result!=null){ println("Found row:" + result) println("Found value: "+Bytes.toString(result.getValue(family.getBytes(),column.getBytes()))) result=scanner.next() } }finally{ if(table!=null) table.close() scanner.close() } } def main(args: Array[String]): Unit = { // val sparkConf = new SparkConf().setAppName("HBaseTest") //启用spark上下文,只有这样才能驱动spark并行计算框架 //val sc = new SparkContext(sparkConf) //创建一个配置,采用的是工厂方法 val conf = HBaseConfiguration.create val tablename = "blog" conf.set("hbase.zookeeper.property.clientPort", "2181") conf.set("zookeeper.znode.parent", "/hbase-unsecure") conf.set("hbase.zookeeper.quorum", "hadoop36.newqd.com,hadoop37.newqd.com,hadoop38.newqd.com") // conf.set("hbase.zookeeper.quorum", "hadoop1.snnu.edu.cn,hadoop3.snnu.edu.cn") conf.set(TableInputFormat.INPUT_TABLE, tablename) try{ //Connection 的创建是个重量级的工作,线程安全,是操作hbase的入口 val connection= ConnectionFactory.createConnection(conf) //创建表测试 try { createHTable(connection, "blog") //插入数据,重复执行为覆盖 insertHTable(connection,"blog","artitle","engish","002","c++ for me") insertHTable(connection,"blog","artitle","engish","003","python for me") insertHTable(connection,"blog","artitle","chinese","002","C++ for china") //删除记录 // deleteRecord(connection,"blog","artitle","chinese","002") //扫描整个表 scanRecord(connection,"blog","artitle","engish") //删除表测试 // deleteHTable(connection, "blog") }finally { connection.close // sc.stop } } }}
1 0
- 基于scala 新版API操作HBase
- 使用scala操作hbase api
- java对hbase的基本操作,新版API实验
- 新版Hbase API 用法示例
- scala 操作远程hbase
- Scala操作Hbase
- Java API操作Hbase(基于0.96新的api)
- scala使用hbase新api
- scala spark hbase 操作案例
- ScalaHbase 使用scala 操作hbase
- hbase java api操作
- HBase简单API操作
- hbase java api操作
- hbase Java Api 操作
- hbase java api操作
- Hbase-Java API操作
- hbase API操作范例
- Hbase API 基本操作
- 归纳存储
- 依赖注入 javax.inject中@Inject、@Named、@Qualifier和@Provider用法
- 数据为空的时候不错的代理第三方简介
- 大龄程序员的发展方向,做管理or继续写程序?
- Kafka学习整理三(borker(0.9.0及0.10.0)配置)
- 基于scala 新版API操作HBase
- stack smashing detected,程序段错误
- OpenCV在Qt中的配置方法
- java 计算今天还剩多少时间
- extjs分页
- spring注入时bean的set方法为什么不能是static类型的?
- tomcat跨域以及session丢失的解决方案
- 利用Mongodb做地理空间查询
- STM32 相关文章