java hbase

来源:互联网 发布:强制破解wifi密码软件 编辑:程序博客网 时间:2024/05/23 17:53
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;


public class HTableUtils {
private static Configuration conf = null;
/**
* 初始化配置
*/
static {
conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.property.clientPort", "2181");
}
/**
 * 创建表操作
 * @throws IOException
*/
public void createTable(String tablename, String[] cfs) throws IOException {
    HBaseAdmin admin =new HBaseAdmin(conf);
if (admin.tableExists(tablename)) {
        System.out.println("表已经存在!");
    }
else {
        HTableDescriptor tableDesc =new HTableDescriptor(tablename);
for (int i =0; i < cfs.length; i++) {
            tableDesc.addFamily(new HColumnDescriptor(cfs[i]));
        }
        admin.createTable(tableDesc);
        System.out.println("表创建成功!");
    }
}
/**
 * 删除表操作
 * @param tablename
 * @throws IOException
*/
public void deleteTable(String tablename) throws IOException {
try {
        HBaseAdmin admin =new HBaseAdmin(conf);
        admin.disableTable(tablename);
        admin.deleteTable(tablename);
        System.out.println("表删除成功!");
    } catch (MasterNotRunningException e) {
        e.printStackTrace();
    } catch (ZooKeeperConnectionException e) {
        e.printStackTrace();
    }
}
/**
 * 插入一行记录
 * @param tablename
 * @param cfs
*/
public void writeRow(String tablename, String[] cfs) {
try {
        HTable table =new HTable(conf, tablename);
        Put put =new Put(Bytes.toBytes("rows1"));
for (int j =0; j < cfs.length; j++) {
            put.add(Bytes.toBytes(cfs[j]),
                    Bytes.toBytes(String.valueOf(1)),
                    Bytes.toBytes("value_1"));
            table.put(put);
        }
    } catch (IOException e) {
        e.printStackTrace();
    }
}
/**
 * 删除一行记录
 * @param tablename
 * @param rowkey
 * @throws IOException
*/
public void deleteRow(String tablename, String rowkey) throws IOException {
    HTable table =new HTable(conf, tablename);
    List list =new ArrayList();
    Delete d1 =new Delete(rowkey.getBytes());
    list.add(d1);
    table.delete(list);
    System.out.println("删除行成功!");
}
/**
 * 查找一行记录
 * @param tablename
 * @param rowkey
*/
public static void selectRow(String tablename, String rowKey)
throws IOException {
    HTable table =new HTable(conf, tablename);
    Get g =new Get(rowKey.getBytes());
    Result rs = table.get(g);
for (KeyValue kv : rs.raw()) {
        System.out.print(new String(kv.getRow()) +"");
        System.out.print(new String(kv.getFamily()) +":");
        System.out.print(new String(kv.getQualifier()) +"");
        System.out.print(kv.getTimestamp() +"");
        System.out.println(new String(kv.getValue()));
    }
}
public static void main(String args[]){
try {
HTableUtils.selectRow("hperson", "1");
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
/**
 * 查询表中所有行
 * @param tablename
*/
public void scaner(String tablename) {
try {
        HTable table =new HTable(conf, tablename);
        Scan s =new Scan();
        ResultScanner rs = table.getScanner(s);
for (Result r : rs) {
            KeyValue[] kv = r.raw();
for (int i =0; i < kv.length; i++) {
                System.out.print(new String(kv[i].getRow()) +"");
                System.out.print(new String(kv[i].getFamily()) +":");
                System.out.print(new String(kv[i].getQualifier()) +"");
                System.out.print(kv[i].getTimestamp() +"");
                System.out.println(new String(kv[i].getValue()));
            }
        }
    } catch (IOException e) {
        e.printStackTrace();
    }
}

}


<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>


<groupId>asem_analysis</groupId>
<artifactId>asem_analysis</artifactId>
<version>0.0.1-SNAPSHOT</version>
<packaging>jar</packaging>


<name>asem_analysis</name>
<url>http://maven.apache.org</url>


<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>
<repositories>
<repository>
<id>central</id>
<url>http://repo1.maven.org/maven2</url>
</repository>
<repository>
<id>internal</id>
<url>http://10.200.34.190:8081/archiva/repository/internal/</url>
</repository>
</repositories>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.8.2</version>
<scope>test</scope>
</dependency>
<!--<dependency> <groupId>hadoop-core</groupId> <artifactId>hadoop-core</artifactId> 
<version>0.20.203</version> </dependency> -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core</artifactId>
<version>0.20.203.0</version>
</dependency>
<dependency>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
<version>3.1</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>4.2.1</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpasyncclient</artifactId>
<version>4.0-beta3</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>0.9.0</version>
</dependency>
<dependency>
<groupId>javax.jdo</groupId>
<artifactId>jdo2-api</artifactId>
<version>2.3-eb</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase</artifactId>
<version>0.90.4</version>
<scope>provided</scope>
</dependency>
</dependencies>
</project>


hbase-site.xml放到resources目录下:

<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
/**
 * Copyright 2010 The Apache Software Foundation
 *
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
-->
<configuration>
        <property>
          <name>hbase.rootdir</name>
          <value>hdfs://namenode:9000/hbase</value>
        </property>
        <property>
          <name>hbase.cluster.distributed</name>
          <value>true</value>
        </property>
        <property>
          <name>zookeeper.session.timeout</name>
          <value>60000</value>
        </property>
        <property>
          <name>hbase.zookeeper.quorum</name>
          <value>namenode,datanode1,datanode2</value>
        </property>
        <property> 
                <name>hbase.master</name> 
                <value>hdfs://namenode:60000</value> 
        </property>         
        <property>  
          <name>hbase.zookeeper.property.dataDir</name>  
          <value>/opt/zookeeper</value>  
          <description>Property fromZooKeeper's config zoo.cfg.  
          The directory where the snapshot isstored.  
        </description>  
    </property> 
</configuration>

如果是在windows环境下运行程序需要修改hosts里的IP映射,datanode1 datanode2 namenode与IP的映射