hadoop hbase 问题 及 简单 案例
来源:互联网 发布:java重载的内部调用 编辑:程序博客网 时间:2024/06/05 19:30
问题 1
regionserver.HRegionServer: error telling master we are up com.google.protobuf.ServiceException: java.net.SocketException: Invalid argumentat org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:240)
解决方法
修改 slave3 文件
vi /ets/hosts
127.0.0.1 localhost slave3
改为
127.0.0.1 localhost
192.168.18.200 slave3
问题2
org.apache.hadoop.hbase.client.HTablePool$PooledHTable cannot be cast to org.apac
解决办法
最新版本 如今应用的api版本中pool.getTable返回的类型是HTableInterface ,无法强转为HTable
代码
HTablePool hp=new HTablePool(con, 1000);
HTable ht=(HTable)hp.getTable(tName);
Put put = new Put("firstrow".getBytes());//
put.add("m_id".getBytes(), null, "333".getBytes());//
put.add("name".getBytes(), null, "333".getBytes());//
put.add("info".getBytes(), null, "33".getBytes());//
ht.put(put);
因此
ht.put(put);
改为 hp.getTable(tName).put(put);
测试代码
根据网上资料 修改 可以运行
package com.hadoop.main;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.HTablePool;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.protobuf.generated.CellProtos.KeyValue;
public class WordCount {
public static Configuration configuration;
static {
configuration = HBaseConfiguration.create();
configuration.set("hbase.zookeeper.property.clientPort", "2181");
configuration.set("hbase.zookeeper.quorum", "slave3");
configuration.set("hbase.master", "master:60000");
}
public static void main(String[] args) {
System.out.println(configuration.get("hbase.master"));
createTable("finn");
// insertData("member");
// insertDataFinn("finn");
// dropTable("table2");
// QueryAll("finn");
// deleteByCondition("finn","firstrow");
}
public static void createTable(String tableName) {
System.out.println("start create table ......");
try {
HBaseAdmin hBaseAdmin = new HBaseAdmin(configuration);
if (hBaseAdmin.tableExists(tableName)) {
hBaseAdmin.disableTable(tableName);
hBaseAdmin.deleteTable(tableName);
System.out.println(tableName + " is exist,detele....");
}
HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);
tableDescriptor.addFamily(new HColumnDescriptor("column1"));
tableDescriptor.addFamily(new HColumnDescriptor("column2"));
tableDescriptor.addFamily(new HColumnDescriptor("column3"));
hBaseAdmin.createTable(tableDescriptor);
} catch (MasterNotRunningException e) {
e.printStackTrace();
} catch (ZooKeeperConnectionException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
System.out.println("end create table ......");
}
@SuppressWarnings("deprecation")
public static void insertData(String tableName) {
System.out.println("start insert data ......");
System.out.println("configuration:"+configuration);
HTablePool pool = new HTablePool(configuration, 1000);
Put put = new Put("firstrow".getBytes());//
put.add("m_id".getBytes(), null, "333".getBytes());//
put.add("name".getBytes(), null, "333".getBytes());//
put.add("info".getBytes(), null, "33".getBytes());//
try {
pool.getTable(tableName).put(put);
} catch (IOException e) {
e.printStackTrace();
}
System.out.println("end insert data ......");
}
@SuppressWarnings("deprecation")
public static void insertDataFinn(String tableName) {
System.out.println("start insert data ......");
System.out.println("configuration:"+configuration);
HTablePool pool = new HTablePool(configuration, 1000);
Put put = new Put("firstrow2".getBytes());//
put.add("column1".getBytes(), null, "333".getBytes());//
put.add("column2".getBytes(), null, "333".getBytes());//
put.add("column3".getBytes(), null, "33".getBytes());//
try {
pool.getTable(tableName).put(put);
} catch (IOException e) {
e.printStackTrace();
}
System.out.println("end insert data ......");
}
public static void dropTable(String tableName) {
try {
HBaseAdmin admin = new HBaseAdmin(configuration);
admin.disableTable(tableName);
admin.deleteTable(tableName);
} catch (MasterNotRunningException e) {
e.printStackTrace();
} catch (ZooKeeperConnectionException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
public static void deleteByCondition(String tablename, String rowkey) {
try {
HTable table = new HTable(configuration, tablename);
List list = new ArrayList();
Delete d1 = new Delete(rowkey.getBytes());
list.add(d1);
table.delete(list);
System.out.println("删除行成功!");
} catch (IOException e) {
e.printStackTrace();
}
}
public static void QueryAll(String tableName) {
HTablePool pool = new HTablePool(configuration, 1000);
try {
ResultScanner rs = pool.getTable(tableName).getScanner(new Scan());
for (Result r : rs) {
System.out.println("rowkey:" + new String(r.getRow()));
for (org.apache.hadoop.hbase.KeyValue keyValue : r.raw()) {
System.out.println("family:" + new String(keyValue.getFamily())
+ "====?:" + new String(keyValue.getValue()));
}
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
regionserver.HRegionServer: error telling master we are up com.google.protobuf.ServiceException: java.net.SocketException: Invalid argumentat org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:240)
解决方法
修改 slave3 文件
vi /ets/hosts
127.0.0.1 localhost slave3
改为
127.0.0.1 localhost
192.168.18.200 slave3
问题2
org.apache.hadoop.hbase.client.HTablePool$PooledHTable cannot be cast to org.apac
解决办法
最新版本 如今应用的api版本中pool.getTable返回的类型是HTableInterface ,无法强转为HTable
代码
HTablePool hp=new HTablePool(con, 1000);
HTable ht=(HTable)hp.getTable(tName);
Put put = new Put("firstrow".getBytes());//
put.add("m_id".getBytes(), null, "333".getBytes());//
put.add("name".getBytes(), null, "333".getBytes());//
put.add("info".getBytes(), null, "33".getBytes());//
ht.put(put);
因此
ht.put(put);
改为 hp.getTable(tName).put(put);
测试代码
根据网上资料 修改 可以运行
package com.hadoop.main;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.HTablePool;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.protobuf.generated.CellProtos.KeyValue;
public class WordCount {
public static Configuration configuration;
static {
configuration = HBaseConfiguration.create();
configuration.set("hbase.zookeeper.property.clientPort", "2181");
configuration.set("hbase.zookeeper.quorum", "slave3");
configuration.set("hbase.master", "master:60000");
}
public static void main(String[] args) {
System.out.println(configuration.get("hbase.master"));
createTable("finn");
// insertData("member");
// insertDataFinn("finn");
// dropTable("table2");
// QueryAll("finn");
// deleteByCondition("finn","firstrow");
}
public static void createTable(String tableName) {
System.out.println("start create table ......");
try {
HBaseAdmin hBaseAdmin = new HBaseAdmin(configuration);
if (hBaseAdmin.tableExists(tableName)) {
hBaseAdmin.disableTable(tableName);
hBaseAdmin.deleteTable(tableName);
System.out.println(tableName + " is exist,detele....");
}
HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);
tableDescriptor.addFamily(new HColumnDescriptor("column1"));
tableDescriptor.addFamily(new HColumnDescriptor("column2"));
tableDescriptor.addFamily(new HColumnDescriptor("column3"));
hBaseAdmin.createTable(tableDescriptor);
} catch (MasterNotRunningException e) {
e.printStackTrace();
} catch (ZooKeeperConnectionException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
System.out.println("end create table ......");
}
@SuppressWarnings("deprecation")
public static void insertData(String tableName) {
System.out.println("start insert data ......");
System.out.println("configuration:"+configuration);
HTablePool pool = new HTablePool(configuration, 1000);
Put put = new Put("firstrow".getBytes());//
put.add("m_id".getBytes(), null, "333".getBytes());//
put.add("name".getBytes(), null, "333".getBytes());//
put.add("info".getBytes(), null, "33".getBytes());//
try {
pool.getTable(tableName).put(put);
} catch (IOException e) {
e.printStackTrace();
}
System.out.println("end insert data ......");
}
@SuppressWarnings("deprecation")
public static void insertDataFinn(String tableName) {
System.out.println("start insert data ......");
System.out.println("configuration:"+configuration);
HTablePool pool = new HTablePool(configuration, 1000);
Put put = new Put("firstrow2".getBytes());//
put.add("column1".getBytes(), null, "333".getBytes());//
put.add("column2".getBytes(), null, "333".getBytes());//
put.add("column3".getBytes(), null, "33".getBytes());//
try {
pool.getTable(tableName).put(put);
} catch (IOException e) {
e.printStackTrace();
}
System.out.println("end insert data ......");
}
public static void dropTable(String tableName) {
try {
HBaseAdmin admin = new HBaseAdmin(configuration);
admin.disableTable(tableName);
admin.deleteTable(tableName);
} catch (MasterNotRunningException e) {
e.printStackTrace();
} catch (ZooKeeperConnectionException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
public static void deleteByCondition(String tablename, String rowkey) {
try {
HTable table = new HTable(configuration, tablename);
List list = new ArrayList();
Delete d1 = new Delete(rowkey.getBytes());
list.add(d1);
table.delete(list);
System.out.println("删除行成功!");
} catch (IOException e) {
e.printStackTrace();
}
}
public static void QueryAll(String tableName) {
HTablePool pool = new HTablePool(configuration, 1000);
try {
ResultScanner rs = pool.getTable(tableName).getScanner(new Scan());
for (Result r : rs) {
System.out.println("rowkey:" + new String(r.getRow()));
for (org.apache.hadoop.hbase.KeyValue keyValue : r.raw()) {
System.out.println("family:" + new String(keyValue.getFamily())
+ "====?:" + new String(keyValue.getValue()));
}
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
0 0
- hadoop hbase 问题 及 简单 案例
- Hadoop渐进九:HBase配置及简单应用
- hadoop,hbase安装过程中遇到问题及解决过程!
- hbase、zookeeper及hadoop部署
- Hadoop及Hbase集群搭建
- hbase、zookeeper及hadoop部署
- Hadoop-简单的MapReduce案例
- Hadoop Hbase 升级的问题
- Hadoop、HBase集群问题记录
- hadoop,hbase集群一些问题
- hadoop&hbase学习01--hadoop启动问题
- Hadoop学习笔记(9)-搭建Hbase伪分布式及简单操作
- HBase完全分布式安装及案例设计
- NoSQL选型及HBase案例详解
- Hadoop HBase 单机环境简单配置教程
- Hadoop HBase 单机环境简单配置教程
- Hadoop HBase 单机环境简单配置教程
- hadoop学习第十节:HBase介绍、安装与应用案例
- [JAVA]数组转换成字符串
- Linux gcc编译器
- Macos系统下PATH环境变量的配置方法
- 蓝桥杯 ALGO-98 算法训练 数位分离
- String,StringBuffer与StringBuilder之间区别
- hadoop hbase 问题 及 简单 案例
- python基础学习-网络学习
- python搜索包的路径
- netstat命令
- 蓝桥杯 ALGO-97 算法训练 排序
- AI之用行为树来实现逻辑
- 使用python脚本执行系统命令
- C/C++中va_list,va_start,va_arg,va_end的使用和原理
- hive安装