Hadoop中的序列化

来源:互联网 发布:手机网络电话软件排名 编辑:程序博客网 时间:2024/06/07 05:49

1 简介

序列化和反序列化就是结构化对象和字节流之间的转换,主要用在内部进程的通讯和持久化存储方面。

通讯格式需求
hadoop在节点间的内部通讯使用的是RPC,RPC协议把消息翻译成二进制字节流发送到远程节点,远程节点再通过反序列化把二进制流转成原始的信息。RPC的序列化需要实现以下几点:
1.压缩,可以起到压缩的效果,占用的宽带资源要小。
2.快速,内部进程为分布式系统构建了高速链路,因此在序列化和反序列化间必须是快速的,不能让传输速度成为瓶颈。
3.可扩展的,新的服务端为新的客户端增加了一个参数,老客户端照样可以使用。
4.兼容性好,可以支持多个语言的客户端

存储格式需求
表面上看来序列化框架在持久化存储方面可能需要其他的一些特性,但事实上依然是那四点:
1.压缩,占用的空间更小
2.快速,可以快速读写
3.可扩展,可以以老格式读取老数据
4.兼容性好,可以支持多种语言的读写

hadoop的序列化格式
hadoop自身的序列化存储格式就是实现了Writable接口的类,他只实现了前面两点,压缩和快速。但是不容易扩展,也不跨语言。
我们先来看下Writable接口,Writable接口定义了两个方法:

1.将数据写入到二进制流中

2.从二进制数据流中读取数据

package org.apache.hadoop.io;public interface Writable {    void write(java.io.DataOutput p1) throws java.io.IOException;    void readFields(java.io.DataInput p1) throws java.io.IOException;}

2 实例

import java.io.ByteArrayInputStream;import java.io.ByteArrayOutputStream;import java.io.DataInputStream;import java.io.DataOutputStream;import java.io.IOException;import org.apache.hadoop.io.IntWritable;import org.apache.hadoop.io.Writable;import org.apache.hadoop.util.StringUtils;import org.junit.Assert;import org.junit.Before;import org.junit.Test;public class TestWritable {byte[] bytes = null;@Beforepublic void init() throws IOException {IntWritable writable = new IntWritable(163);bytes = serialize(writable);}@Testpublic void testSerialize() throws IOException {// 序列化后的四个字节的字节流Assert.assertEquals(bytes.length, 4);// big-endian的队列排列Assert.assertEquals(StringUtils.byteToHexString(bytes), "000000a3");}@Testpublic void testDeserialize() throws IOException {// 通过调用反序列化方法将bytes的数据读入对象IntWritable newWritable = new IntWritable();deserialize(newWritable, bytes);// 通过调用get方法,获得原始的值163Assert.assertEquals(newWritable.get(), 163);}/** * 序列化 *  * @param writable 待序列化对象 */public static byte[] serialize(Writable writable) throws IOException {ByteArrayOutputStream out = new ByteArrayOutputStream();DataOutputStream dataOut = new DataOutputStream(out);writable.write(dataOut);dataOut.close();return out.toByteArray();}/** * 反序列化 *  * @param writable 接受序列化后的对象 * @param bytes 待反序列化数据流 */public static byte[] deserialize(Writable writable, byte[] bytes)throws IOException {ByteArrayInputStream in = new ByteArrayInputStream(bytes);DataInputStream dataIn = new DataInputStream(in);writable.readFields(dataIn);dataIn.close();return bytes;}}

import java.io.ByteArrayOutputStream;import java.io.DataOutputStream;import java.io.IOException;import org.apache.hadoop.io.IntWritable;import org.apache.hadoop.io.RawComparator;import org.apache.hadoop.io.Writable;import org.apache.hadoop.io.WritableComparator;import org.junit.Assert;import org.junit.Before;import org.junit.Test;public class TestComparator {// key值的大小进行排序RawComparator<IntWritable> comparator;IntWritable w1;IntWritable w2;/** * 获得IntWritable的comparator,并初始化两个IntWritable */@SuppressWarnings("unchecked")@Beforepublic void init() {comparator = WritableComparator.get(IntWritable.class);w1 = new IntWritable(163);w2 = new IntWritable(76);}/** * 比较两个对象大小 */@Testpublic void testComparator() {Assert.assertTrue(comparator.compare(w1, w2) > 0);}/** * 序列化后进行直接比较 */@Testpublic void testcompare() throws IOException {byte[] b1 = serialize(w1);byte[] b2 = serialize(w2);Assert.assertTrue(comparator.compare(b1, 0, b1.length, b2, 0, b2.length) > 0);}/** * 将一个实现了Writable接口的对象序列化成字节流 */public static byte[] serialize(Writable writable) throws IOException {ByteArrayOutputStream out = new ByteArrayOutputStream();DataOutputStream dataOut = new DataOutputStream(out);writable.write(dataOut);dataOut.close();return out.toByteArray();}}

3 自定义

import java.io.DataInput;import java.io.DataOutput;import java.io.IOException;import org.apache.hadoop.io.WritableComparable;public class InfoBean implements WritableComparable<InfoBean> {private String account;private double income;private double expenses;private double surplus;public void set(String account, double income, double expenses) {this.account = account;this.income = income;this.expenses = expenses;this.surplus = income - expenses;}@Overridepublic void write(DataOutput out) throws IOException {out.writeUTF(account);out.writeDouble(income);out.writeDouble(expenses);out.writeDouble(surplus);}@Overridepublic void readFields(DataInput in) throws IOException {this.account = in.readUTF();this.income = in.readDouble();this.expenses = in.readDouble();this.surplus = in.readDouble();}@Overridepublic int compareTo(InfoBean o) {if (this.income == o.getIncome()) {return this.expenses > o.getExpenses() ? 1 : -1;}return this.income > o.getIncome() ? 1 : -1;}@Overridepublic String toString() {return income + "\t" + expenses + "\t" + surplus;}public String getAccount() {return account;}public void setAccount(String account) {this.account = account;}public double getIncome() {return income;}public void setIncome(double income) {this.income = income;}public double getExpenses() {return expenses;}public void setExpenses(double expenses) {this.expenses = expenses;}public double getSurplus() {return surplus;}public void setSurplus(double surplus) {this.surplus = surplus;}}import java.io.ByteArrayInputStream;import java.io.ByteArrayOutputStream;import java.io.DataInputStream;import java.io.DataOutputStream;import java.io.IOException;import org.apache.hadoop.io.RawComparator;import org.apache.hadoop.io.Writable;import org.apache.hadoop.io.WritableComparator;public class TestInfoBean {public static void main(String[] args) throws IOException {// 序列化InfoBean infoBean = new InfoBean();infoBean.set("abc", 100, 10);byte[] bytes = serialize(infoBean);System.out.println(bytes.length);// 反序列化InfoBean infoBeanRes = new InfoBean();deserialize(infoBeanRes, bytes);System.out.println(infoBeanRes);// 比较@SuppressWarnings("unchecked")RawComparator<InfoBean> comparator = WritableComparator.get(InfoBean.class);InfoBean infoBean1 = new InfoBean();infoBean1.set("abc", 110, 10);InfoBean infoBean2 = new InfoBean();infoBean2.set("abc", 100, 10);System.out.println(comparator.compare(infoBean1, infoBean2));}/** * 序列化 *  * @param writable 待序列化对象 */public static byte[] serialize(Writable writable) throws IOException {ByteArrayOutputStream out = new ByteArrayOutputStream();DataOutputStream dataOut = new DataOutputStream(out);writable.write(dataOut);dataOut.close();return out.toByteArray();}/** * 反序列化 *  * @param writable 接受序列化后的对象 * @param bytes 待反序列化数据流 */public static byte[] deserialize(Writable writable, byte[] bytes)throws IOException {ByteArrayInputStream in = new ByteArrayInputStream(bytes);DataInputStream dataIn = new DataInputStream(in);writable.readFields(dataIn);dataIn.close();return bytes;}}

原贴地址:http://blog.csdn.net/lastsweetop/article/details/9193907


1 0
原创粉丝点击