hadoop 2.6 HdfsServerConstants源代码分析

来源:互联网 发布:数据恢复软件比较 编辑:程序博客网 时间:2024/04/30 04:51

org.apache.hadoop.hdfs.server.common.HdfsServerConstants类定义了一些内部常量。

结点类型,只有NAME_NODE,DATA_NODE,JOURNAL_NODE三种。

 /**   * Type of the node   */  static public enum NodeType {    NAME_NODE,    DATA_NODE,    JOURNAL_NODE;  }

循环升级启动选项

 /** Startup options for rolling upgrade. */  public static enum RollingUpgradeStartupOption{    ROLLBACK, DOWNGRADE, STARTED;    public String getOptionString() {      return StartupOption.ROLLINGUPGRADE.getName() + " "          + name().toLowerCase();    }    public boolean matches(StartupOption option) {      return option == StartupOption.ROLLINGUPGRADE          && option.getRollingUpgradeStartupOption() == this;    }    private static final RollingUpgradeStartupOption[] VALUES = values();    static RollingUpgradeStartupOption fromString(String s) {      for(RollingUpgradeStartupOption opt : VALUES) {        if (opt.name().equalsIgnoreCase(s)) {          return opt;        }      }      throw new IllegalArgumentException("Failed to convert \"" + s          + "\" to " + RollingUpgradeStartupOption.class.getSimpleName());    }    public static String getAllOptionString() {      final StringBuilder b = new StringBuilder("<");      for(RollingUpgradeStartupOption opt : VALUES) {        b.append(opt.name().toLowerCase()).append("|");      }      b.setCharAt(b.length() - 1, '>');      return b.toString();    }  }

启动选项

/** Startup options */  static public enum StartupOption{    FORMAT  ("-format"),    CLUSTERID ("-clusterid"),    GENCLUSTERID ("-genclusterid"),    REGULAR ("-regular"),    BACKUP  ("-backup"),    CHECKPOINT("-checkpoint"),    UPGRADE ("-upgrade"),    ROLLBACK("-rollback"),    FINALIZE("-finalize"),    ROLLINGUPGRADE("-rollingUpgrade"),    IMPORT  ("-importCheckpoint"),    BOOTSTRAPSTANDBY("-bootstrapStandby"),    INITIALIZESHAREDEDITS("-initializeSharedEdits"),    RECOVER  ("-recover"),    FORCE("-force"),    NONINTERACTIVE("-nonInteractive"),    RENAMERESERVED("-renameReserved"),    METADATAVERSION("-metadataVersion"),    UPGRADEONLY("-upgradeOnly"),    // The -hotswap constant should not be used as a startup option, it is    // only used for StorageDirectory.analyzeStorage() in hot swap drive scenario.    // TODO refactor StorageDirectory.analyzeStorage() so that we can do away with    // this in StartupOption.    HOTSWAP("-hotswap");    private static final Pattern ENUM_WITH_ROLLING_UPGRADE_OPTION = Pattern.compile(        "(\\w+)\\((\\w+)\\)");    private final String name;        // Used only with format and upgrade options    private String clusterId = null;        // Used only by rolling upgrade    private RollingUpgradeStartupOption rollingUpgradeStartupOption;    // Used only with format option    private boolean isForceFormat = false;    private boolean isInteractiveFormat = true;        // Used only with recovery option    private int force = 0;    private StartupOption(String arg) {this.name = arg;}    public String getName() {return name;}    public NamenodeRole toNodeRole() {      switch(this) {      case BACKUP:         return NamenodeRole.BACKUP;      case CHECKPOINT:         return NamenodeRole.CHECKPOINT;      default:        return NamenodeRole.NAMENODE;      }    }        public void setClusterId(String cid) {      clusterId = cid;    }    public String getClusterId() {      return clusterId;    }        public void setRollingUpgradeStartupOption(String opt) {      Preconditions.checkState(this == ROLLINGUPGRADE);      rollingUpgradeStartupOption = RollingUpgradeStartupOption.fromString(opt);    }        public RollingUpgradeStartupOption getRollingUpgradeStartupOption() {      Preconditions.checkState(this == ROLLINGUPGRADE);      return rollingUpgradeStartupOption;    }    public MetaRecoveryContext createRecoveryContext() {      if (!name.equals(RECOVER.name))        return null;      return new MetaRecoveryContext(force);    }    public void setForce(int force) {      this.force = force;    }        public int getForce() {      return this.force;    }        public boolean getForceFormat() {      return isForceFormat;    }        public void setForceFormat(boolean force) {      isForceFormat = force;    }        public boolean getInteractiveFormat() {      return isInteractiveFormat;    }        public void setInteractiveFormat(boolean interactive) {      isInteractiveFormat = interactive;    }        @Override    public String toString() {      if (this == ROLLINGUPGRADE) {        return new StringBuilder(super.toString())            .append("(").append(getRollingUpgradeStartupOption()).append(")")            .toString();      }      return super.toString();    }    static public StartupOption getEnum(String value) {      Matcher matcher = ENUM_WITH_ROLLING_UPGRADE_OPTION.matcher(value);      if (matcher.matches()) {        StartupOption option = StartupOption.valueOf(matcher.group(1));        option.setRollingUpgradeStartupOption(matcher.group(2));        return option;      } else {        return StartupOption.valueOf(value);      }    }  }
以下是HdfsServerConstants的一些常量,定义了读数据的超时时间是1分钟,写数据的超时时间是8分钟。

// Timeouts for communicating with DataNode for streaming writes/reads  public static final int READ_TIMEOUT = 60 * 1000;  public static final int READ_TIMEOUT_EXTENSION = 5 * 1000;  public static final int WRITE_TIMEOUT = 8 * 60 * 1000;  public static final int WRITE_TIMEOUT_EXTENSION = 5 * 1000; //for write pipeline


NamenodeRole只有以下三种

/**   * Defines the NameNode role.   */  static public enum NamenodeRole {    NAMENODE  ("NameNode"),    BACKUP    ("Backup Node"),    CHECKPOINT("Checkpoint Node");    private String description = null;    private NamenodeRole(String arg) {this.description = arg;}      @Override    public String toString() {      return description;    }  }

复本的状态,当数据块的一个复本在构建过程中,可以经过这些状态之间的转换。FINALIZED表示复本已经完成,复本不能改变;RWB 表示副本正在被写入;RWR表示副本正等待恢复,RUR表示副本正在恢复,TEMPORARY表示临时副本,用于复制和重新定位。

/**   * Block replica states, which it can go through while being constructed.   */  static public enum ReplicaState {    /** Replica is finalized. The state when replica is not modified. */    FINALIZED(0),    /** Replica is being written to. */    RBW(1),    /** Replica is waiting to be recovered. */    RWR(2),    /** Replica is under recovery. */    RUR(3),    /** Temporary replica: created for replication and relocation only. */    TEMPORARY(4);    private final int value;    private ReplicaState(int v) {      value = v;    }    public int getValue() {      return value;    }    public static ReplicaState getState(int v) {      return ReplicaState.values()[v];    }    /** Read from in */    public static ReplicaState read(DataInput in) throws IOException {      return values()[in.readByte()];    }<pre name="code" class="html">

/** Write to out */ public void write(DataOutput out) throws IOException { out.writeByte(ordinal()); } }

BlockUCState表示在数据块构建过程中经历的状态。COMPLETE 数据块构建完成,数据块至少有一个副本处于FINALIZED状态,该数据块不能被修改。UNDER_CONSTRUCTION表示数据块正在被写入或者追加。UNDER_RECOVERY数据块正在恢复,当一个文件租约过期,最后一个数据块可能不在COMPLETE状态,需要经历一个恢复过程,在此过程中和现存的复本内容同步。COMMITTED,数据块正在被提交,客户端汇报所有的数据都被写入数据结点,并且汇报生成的时间和块长度,并且各数据复本没有在FINALIZED状态。

  /**   * States, which a block can go through while it is under construction.   */  static public enum BlockUCState {    /**     * Block construction completed.<br>     * The block has at least one {@link ReplicaState#FINALIZED} replica,     * and is not going to be modified.     */    COMPLETE,    /**     * The block is under construction.<br>     * It has been recently allocated for write or append.     */    UNDER_CONSTRUCTION,    /**     * The block is under recovery.<br>     * When a file lease expires its last block may not be {@link #COMPLETE}     * and needs to go through a recovery procedure,      * which synchronizes the existing replicas contents.     */    UNDER_RECOVERY,    /**     * The block is committed.<br>     * The client reported that all bytes are written to data-nodes     * with the given generation stamp and block length, but no      * {@link ReplicaState#FINALIZED}      * replicas has yet been reported by data-nodes themselves.     */    COMMITTED;  }


其它一些常量如下:


  public static final String NAMENODE_LEASE_HOLDER = "HDFS_NameNode";  public static final long NAMENODE_LEASE_RECHECK_INTERVAL = 2000;  public static final String CRYPTO_XATTR_ENCRYPTION_ZONE =      "raw.hdfs.crypto.encryption.zone";  public static final String CRYPTO_XATTR_FILE_ENCRYPTION_INFO =      "raw.hdfs.crypto.file.encryption.info";  public static final String SECURITY_XATTR_UNREADABLE_BY_SUPERUSER =      "security.hdfs.unreadable.by.superuser";


0 0