Hadoop源码分析_DatanodeInfo
来源:互联网 发布:虚拟社交网络定义 编辑:程序博客网 时间:2024/06/05 05:25
封装了一个节点的状态,用于Datanode通信协议和Client通信协议的实例
package org.apache.hadoop.hdfs.protocol;import java.io.DataInput;import java.io.DataOutput;import java.io.IOException;import java.util.Date;import org.apache.hadoop.io.Text;import org.apache.hadoop.io.Writable;import org.apache.hadoop.io.WritableFactories;import org.apache.hadoop.io.WritableFactory;import org.apache.hadoop.io.WritableUtils;import org.apache.hadoop.net.NetworkTopology;import org.apache.hadoop.net.Node;import org.apache.hadoop.net.NodeBase;import org.apache.hadoop.util.StringUtils;/** * DatanodeInfo represents the status of a DataNode. * This object is used for communication in the * Datanode Protocol and the Client Protocol. */public class DatanodeInfo extends DatanodeID implements Node { protected long capacity; protected long dfsUsed; protected long remaining; protected long lastUpdate; protected int xceiverCount; protected String location = NetworkTopology.DEFAULT_RACK; /** HostName as suplied by the datanode during registration as its * name. Namenode uses datanode IP address as the name. */ protected String hostName = null; // administrative states of a datanode public AdminStates {NORMAL, DECOMMISSION_INPROGRESS, DECOMMISSIONED; } protected AdminStates adminState; public DatanodeInfo() { super(); adminState = null; } public DatanodeInfo(DatanodeInfo from) { super(from); this.capacity = from.getCapacity(); this.dfsUsed = from.getDfsUsed(); this.remaining = from.getRemaining(); this.lastUpdate = from.getLastUpdate(); this.xceiverCount = from.getXceiverCount(); this.location = from.getNetworkLocation(); this.adminState = from.adminState; this.hostName = from.hostName; } public DatanodeInfo(DatanodeID nodeID) { super(nodeID); this.capacity = 0L; this.dfsUsed = 0L; this.remaining = 0L; this.lastUpdate = 0L; this.xceiverCount = 0; this.adminState = null; } protected DatanodeInfo(DatanodeID nodeID, String location, String hostName) { this(nodeID); this.location = location; this.hostName = hostName; } /** Constructor */ public DatanodeInfo(final String name, final String storageID, final int infoPort, final int ipcPort, final long capacity, final long dfsUsed, final long remaining, final long lastUpdate, final int xceiverCount, final String networkLocation, final String hostName, final AdminStates adminState) { super(name, storageID, infoPort, ipcPort); this.capacity = capacity; this.dfsUsed = dfsUsed; this.remaining = remaining; this.lastUpdate = lastUpdate; this.xceiverCount = xceiverCount; this.location = networkLocation; this.hostName = hostName; this.adminState = adminState; } /** The raw capacity. */ public long getCapacity() { return capacity; } /** The used space by the data node. */ public long getDfsUsed() { return dfsUsed; } /** The used space by the data node. */ public long getNonDfsUsed() { long nonDFSUsed = capacity - dfsUsed - remaining; return nonDFSUsed < 0 ? 0 : nonDFSUsed; } /** The used space by the data node as percentage of present capacity */ public float getDfsUsedPercent() { if (capacity <= 0) { return 100; } return ((float)dfsUsed * 100.0f)/(float)capacity; } /** The raw free space. */ public long getRemaining() { return remaining; } /** The remaining space as percentage of configured capacity. */ public float getRemainingPercent() { if (capacity <= 0) { return 0; } return ((float)remaining * 100.0f)/(float)capacity; } /** The time when this information was accurate. */ public long getLastUpdate() { return lastUpdate; } /** number of active connections */ public int getXceiverCount() { return xceiverCount; } /** Sets raw capacity. */ public void setCapacity(long capacity) { this.capacity = capacity; } /** Sets raw free space. */ public void setRemaining(long remaining) { this.remaining = remaining; } /** Sets time when this information was accurate. */ public void setLastUpdate(long lastUpdate) { this.lastUpdate = lastUpdate; } /** Sets number of active connections */ public void setXceiverCount(int xceiverCount) { this.xceiverCount = xceiverCount; } /** rack name **/ public synchronized String getNetworkLocation() {return location;} /** Sets the rack name */ public synchronized void setNetworkLocation(String location) { this.location = NodeBase.normalize(location); } public String getHostName() { return (hostName == null || hostName.length()==0) ? getHost() : hostName; } public void setHostName(String host) { hostName = host; } /** A formatted string for reporting the status of the DataNode. */ public String getDatanodeReport() { StringBuffer buffer = new StringBuffer(); long c = getCapacity(); long r = getRemaining(); long u = getDfsUsed(); long nonDFSUsed = getNonDfsUsed(); float usedPercent = getDfsUsedPercent(); float remainingPercent = getRemainingPercent(); buffer.append("Name: "+name+"\n"); if (!NetworkTopology.DEFAULT_RACK.equals(location)) { buffer.append("Rack: "+location+"\n"); } buffer.append("Decommission Status : "); if (isDecommissioned()) { buffer.append("Decommissioned\n"); } else if (isDecommissionInProgress()) { buffer.append("Decommission in progress\n"); } else { buffer.append("Normal\n"); } buffer.append("Configured Capacity: "+c+" ("+StringUtils.byteDesc(c)+")"+"\n"); buffer.append("DFS Used: "+u+" ("+StringUtils.byteDesc(u)+")"+"\n"); buffer.append("Non DFS Used: "+nonDFSUsed+" ("+StringUtils.byteDesc(nonDFSUsed)+")"+"\n"); buffer.append("DFS Remaining: " +r+ "("+StringUtils.byteDesc(r)+")"+"\n"); buffer.append("DFS Used%: "+StringUtils.limitDecimalTo2(usedPercent)+"%\n"); buffer.append("DFS Remaining%: "+StringUtils.limitDecimalTo2(remainingPercent)+"%\n"); buffer.append("Last contact: "+new Date(lastUpdate)+"\n"); return buffer.toString(); } /** A formatted string for printing the status of the DataNode. */ public String dumpDatanode() { StringBuffer buffer = new StringBuffer(); long c = getCapacity(); long r = getRemaining(); long u = getDfsUsed(); buffer.append(name); if (!NetworkTopology.DEFAULT_RACK.equals(location)) { buffer.append(" "+location); } if (isDecommissioned()) { buffer.append(" DD"); } else if (isDecommissionInProgress()) { buffer.append(" DP"); } else { buffer.append(" IN"); } buffer.append(" " + c + "(" + StringUtils.byteDesc(c)+")"); buffer.append(" " + u + "(" + StringUtils.byteDesc(u)+")"); buffer.append(" " + StringUtils.limitDecimalTo2(((1.0*u)/c)*100)+"%"); buffer.append(" " + r + "(" + StringUtils.byteDesc(r)+")"); buffer.append(" " + new Date(lastUpdate)); return buffer.toString(); } /** * Start decommissioning a node. * old state. */ public void startDecommission() { adminState = AdminStates.DECOMMISSION_INPROGRESS; } /** * Stop decommissioning a node. * old state. */ public void stopDecommission() { adminState = null; } /** * Returns true if the node is in the process of being decommissioned */ public boolean isDecommissionInProgress() { if (adminState == AdminStates.DECOMMISSION_INPROGRESS) { return true; } return false; } /** * Returns true if the node has been decommissioned. */ public boolean isDecommissioned() { if (adminState == AdminStates.DECOMMISSIONED) { return true; } return false; } /** * Sets the admin state to indicate that decommision is complete. */ public void setDecommissioned() { adminState = AdminStates.DECOMMISSIONED; } /** * Retrieves the admin state of this node. */ public AdminStates getAdminState() { if (adminState == null) { return AdminStates.NORMAL; } return adminState; } /** * Sets the admin state of this node. */ protected void setAdminState(AdminStates newState) { if (newState == AdminStates.NORMAL) { adminState = null; } else { adminState = newState; } } private int level; //which level of the tree the node resides private Node parent; //its parent /** Return this node's parent */ public Node getParent() { return parent; } public void setParent(Node parent) {this.parent = parent;} /** Return this node's level in the tree. * E.g. the root of a tree returns 0 and its children return 1 */ public int getLevel() { return level; } public void setLevel(int level) {this.level = level;} ///////////////////////////////////////////////// // Writable ///////////////////////////////////////////////// static { // register a ctor WritableFactories.setFactory (DatanodeInfo.class, new WritableFactory() { public Writable newInstance() { return new DatanodeInfo(); } }); } /** {@inheritDoc} */ public void write(DataOutput out) throws IOException { super.write(out); //TODO: move it to DatanodeID once DatanodeID is not stored in FSImage out.writeShort(ipcPort); out.writeLong(capacity); out.writeLong(dfsUsed); out.writeLong(remaining); out.writeLong(lastUpdate); out.writeInt(xceiverCount); Text.writeString(out, location); Text.writeString(out, hostName == null? "": hostName); WritableUtils.writeEnum(out, getAdminState()); } /** {@inheritDoc} */ public void readFields(DataInput in) throws IOException { super.readFields(in); //TODO: move it to DatanodeID once DatanodeID is not stored in FSImage this.ipcPort = in.readShort() & 0x0000ffff; this.capacity = in.readLong(); this.dfsUsed = in.readLong(); this.remaining = in.readLong(); this.lastUpdate = in.readLong(); this.xceiverCount = in.readInt(); this.location = Text.readString(in); this.hostName = Text.readString(in); setAdminState(WritableUtils.readEnum(in, AdminStates.class)); }}
- Hadoop源码分析_DatanodeInfo
- Hadoop源码分析-HDFS
- Hadoop RPC源码分析
- hadoop datanode源码分析
- hadoop datanode源码分析
- Hadoop RPC源码分析
- hadoop datanode源码分析
- hadoop 源码分析一
- Hadoop源码分析_DatanodeDescriptor
- hadoop源码分析 jobsplit
- Hadoop源码分析
- Hadoop TextInputFormat源码分析
- Hadoop InputFormat源码分析
- Hadoop源码分析-Text
- Hadoop源码分析-Context
- Hadoop源码分析1
- Hadoop源码分析
- Hadoop RPC源码分析
- R语言与数据挖掘学习笔记
- 使用游标for偱环和嵌套游标
- linux PHP-FPM
- Codeforces Round #208 (Div. 2) A_ Dima and Continuous Line
- 手机内存解读以及android刷机原理
- Hadoop源码分析_DatanodeInfo
- TOGAF培训讲义(周金根)
- 博客2
- 百度百发引热议 传统金融业亟需变革
- NFC技术,爱就贴一起,异地恋去死
- uva 11029 - Leading and Trailing(快速幂)
- Hadoop源码学习_Host2NodesMap
- 传谷歌10月31日发布Nexus 5及Android 4.4
- 第一篇