【集合源码】HashMap源码解析(基于JDK 1.8)
来源:互联网 发布:傲剑八卦升级数据大全 编辑:程序博客网 时间:2024/06/08 17:40
- HashMap简介
- 源码解析
- 小结
HashMap简介
1.基于JDK 1.8的HashMap有三种数据结构,数组,链表,红黑树。
2.HashMap是非线程安全的。多线程环境下可以采用concurrent并发包下的concurrentHashMap。
3.HashMap存储的内容是键值对(key-value)映射,key、value都可以为null。
4.HashMap中的映射不是有序的。
5.实现了Cloneable接口,能被克隆。
6.实现了Serializable接口,支持序列化。
源码解析
比较重要的方法都加了详细的注解:
package java.util;import java.io.IOException;import java.io.InvalidObjectException;import java.io.Serializable;import java.lang.reflect.ParameterizedType;import java.lang.reflect.Type;import java.util.function.BiConsumer;import java.util.function.BiFunction;import java.util.function.Consumer;import java.util.function.Function;public class HashMap<K,V> extends AbstractMap<K,V> implements Map<K,V>, Cloneable, Serializable { private static final long serialVersionUID = 362498820763181265L; /** * 默认的初始容量(容量为HashMap中槽的数目)是16,且实际容量必须是2的整数次幂。 */ static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; /** * 最大容量(必须是2的幂且小于2的30次方,传入容量过大将被这个值替换) */ static final int MAXIMUM_CAPACITY = 1 << 30; /** * 默认负载因子为0.75 */ static final float DEFAULT_LOAD_FACTOR = 0.75f; /** * 链表转化为红黑树的临界值为8 */ static final int TREEIFY_THRESHOLD = 8; /** * 删除冲突节点后,hash相同的节点数目小于这个数,红黑树就恢复成链表 */ static final int UNTREEIFY_THRESHOLD = 6; /** * 扩容的临界值 */ static final int MIN_TREEIFY_CAPACITY = 64; /** * Node节点的数据结构 */ static class Node<K,V> implements Map.Entry<K,V> { final int hash; final K key; V value; Node<K,V> next; //下一个节点 Node(int hash, K key, V value, Node<K,V> next) {//初始化 this.hash = hash; this.key = key; this.value = value; this.next = next; } public final K getKey() { return key; } public final V getValue() { return value; } public final String toString() { return key + "=" + value; } public final int hashCode() { //返回hash值 return Objects.hashCode(key) ^ Objects.hashCode(value); } public final V setValue(V newValue) { V oldValue = value; value = newValue; return oldValue; } //重写equals方法 public final boolean equals(Object o) { if (o == this) return true; if (o instanceof Map.Entry) { Map.Entry<?,?> e = (Map.Entry<?,?>)o; if (Objects.equals(key, e.getKey()) && Objects.equals(value, e.getValue())) return true; } return false; } } /** * 计算key.hashCode()。假如key为空,返回0 */ static final int hash(Object key) { int h; return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16); } /** * 返回x的class */ static Class<?> comparableClassFor(Object x) { if (x instanceof Comparable) { Class<?> c; Type[] ts, as; Type t; ParameterizedType p; if ((c = x.getClass()) == String.class) // bypass checks return c; if ((ts = c.getGenericInterfaces()) != null) { for (int i = 0; i < ts.length; ++i) { if (((t = ts[i]) instanceof ParameterizedType) && ((p = (ParameterizedType)t).getRawType() == Comparable.class) && (as = p.getActualTypeArguments()) != null && as.length == 1 && as[0] == c) // type arg is c return c; } } } return null; } /** * 返回k.compareTo(x) */ @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable static int compareComparables(Class<?> kc, Object k, Object x) { return (x == null || x.getClass() != kc ? 0 : ((Comparable)k).compareTo(x)); } /** * 返回给定容量的2的幂次方大小 */ static final int tableSizeFor(int cap) { int n = cap - 1; n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; n |= n >>> 16; return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; } /** * 存储元素的数组 */ transient Node<K,V>[] table; /** * Holds cached entrySet(). */ transient Set<Map.Entry<K,V>> entrySet; /** * map中包含的键值对的个数. */ transient int size; /** * HashMap被改变的次数 */ transient int modCount; /** * HashMap的阈值,用于判断是否需要调整HashMap的容量(threshold = 容量*加载因子) */ int threshold; /** * 哈希表的负载因子 */ final float loadFactor; /** * 指定“容量大小”(initialCapacity)和“加载因子”(loadFactor)的构造函数 */ public HashMap(int initialCapacity, float loadFactor) { if (initialCapacity < 0) throw new IllegalArgumentException("Illegal initial capacity: " + initialCapacity); if (initialCapacity > MAXIMUM_CAPACITY) initialCapacity = MAXIMUM_CAPACITY; if (loadFactor <= 0 || Float.isNaN(loadFactor)) throw new IllegalArgumentException("Illegal load factor: " + loadFactor); this.loadFactor = loadFactor; this.threshold = tableSizeFor(initialCapacity); } /** * 指定“容量大小”(initialCapacity)的构造函数 */ public HashMap(int initialCapacity) { this(initialCapacity, DEFAULT_LOAD_FACTOR); } /** * 默认构造函数 */ public HashMap() { this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted } /** * 包含“子Map”的构造函数 */ public HashMap(Map<? extends K, ? extends V> m) { this.loadFactor = DEFAULT_LOAD_FACTOR; putMapEntries(m, false); } /** * 实现 Map.putAll 和 Map 构造函数 */ final void putMapEntries(Map<? extends K, ? extends V> m, boolean evict) { int s = m.size(); if (s > 0) { if (table == null) { // pre-size float ft = ((float)s / loadFactor) + 1.0F; int t = ((ft < (float)MAXIMUM_CAPACITY) ? (int)ft : MAXIMUM_CAPACITY); if (t > threshold) threshold = tableSizeFor(t); } else if (s > threshold) resize(); for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) { K key = e.getKey(); V value = e.getValue(); putVal(hash(key), key, value, false, evict); } } } /** * 返回该map的键值对的数目 */ public int size() { return size; } /** * 该hashmap是否为空。空则返回true,否则返回false */ public boolean isEmpty() { return size == 0; } /** * 获取key对应的value */ public V get(Object key) { Node<K,V> e; return (e = getNode(hash(key), key)) == null ? null : e.value; } /** * 实现 Map.get 和相关方法 */ final Node<K,V> getNode(int hash, Object key) { Node<K,V>[] tab; Node<K,V> first, e; int n; K k; if ((tab = table) != null && (n = tab.length) > 0 && (first = tab[(n - 1) & hash]) != null) { // 数组元素相等 if (first.hash == hash && // always check first node ((k = first.key) == key || (key != null && key.equals(k)))) return first; // 桶中不止一个节点 if ((e = first.next) != null) { // 在红黑树中get if (first instanceof TreeNode) return ((TreeNode<K,V>)first).getTreeNode(hash, key); // 在链表中get do { if (e.hash == hash && ((k = e.key) == key || (key != null && key.equals(k)))) return e; } while ((e = e.next) != null); } } return null; } /** * HashMap是否包含key */ public boolean containsKey(Object key) { return getNode(hash(key), key) != null; } /** * 放入键值对。已存在则覆盖已有的,不存在则新建 */ public V put(K key, V value) { return putVal(hash(key), key, value, false, true); } /** * 实现Map.put和相关的方法 */ final V putVal(int hash, K key, V value, boolean onlyIfAbsent, boolean evict) { Node<K,V>[] tab; Node<K,V> p; int n, i; if ((tab = table) == null || (n = tab.length) == 0)//tab为空则创建 n = (tab = resize()).length; // 计算index,并对null做处理 if ((p = tab[i = (n - 1) & hash]) == null) tab[i] = newNode(hash, key, value, null); else { Node<K,V> e; K k; // 节点存在 if (p.hash == hash && ((k = p.key) == key || (key != null && key.equals(k)))) e = p; // 该链为树 else if (p instanceof TreeNode) e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value); else { for (int binCount = 0; ; ++binCount) { if ((e = p.next) == null) { p.next = newNode(hash, key, value, null); if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st treeifyBin(tab, hash); break; } if (e.hash == hash && ((k = e.key) == key || (key != null && key.equals(k)))) break; p = e; } } // 写入 if (e != null) { // 已经存在指定键的键值对 V oldValue = e.value; if (!onlyIfAbsent || oldValue == null) e.value = value; afterNodeAccess(e); return oldValue; } } ++modCount; // 超过负载 factor*current capacity,则resize if (++size > threshold) resize(); afterNodeInsertion(evict); return null; } /** * 初始化或者doubles表的尺寸. */ final Node<K,V>[] resize() { Node<K,V>[] oldTab = table; int oldCap = (oldTab == null) ? 0 : oldTab.length; int oldThr = threshold; int newCap, newThr = 0; if (oldCap > 0) { // 超过最大值就不再扩充了,就只好随你碰撞去吧 if (oldCap >= MAXIMUM_CAPACITY) { threshold = Integer.MAX_VALUE; return oldTab; } // 没超过最大值,就扩充为原来的2倍 else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY && oldCap >= DEFAULT_INITIAL_CAPACITY) newThr = oldThr << 1; // double threshold } else if (oldThr > 0) // initial capacity was placed in threshold newCap = oldThr; else { // zero initial threshold signifies using defaults newCap = DEFAULT_INITIAL_CAPACITY; newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY); } // 计算新的resize上限 if (newThr == 0) { float ft = (float)newCap * loadFactor; newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ? (int)ft : Integer.MAX_VALUE); } threshold = newThr; @SuppressWarnings({"rawtypes","unchecked"}) Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap]; table = newTab; if (oldTab != null) { // 把每个bucket都移动到新的buckets中 for (int j = 0; j < oldCap; ++j) { Node<K,V> e; if ((e = oldTab[j]) != null) { oldTab[j] = null; if (e.next == null) newTab[e.hash & (newCap - 1)] = e; else if (e instanceof TreeNode) ((TreeNode<K,V>)e).split(this, newTab, j, oldCap); else { // preserve order Node<K,V> loHead = null, loTail = null; Node<K,V> hiHead = null, hiTail = null; Node<K,V> next; do { next = e.next; // 原索引 if ((e.hash & oldCap) == 0) { if (loTail == null) loHead = e; else loTail.next = e; loTail = e; } // 原索引+oldCap else { if (hiTail == null) hiHead = e; else hiTail.next = e; hiTail = e; } } while ((e = next) != null); // 原索引放到bucket里 if (loTail != null) { loTail.next = null; newTab[j] = loHead; } // 原索引+oldCap放到bucket里 if (hiTail != null) { hiTail.next = null; newTab[j + oldCap] = hiHead; } } } } } return newTab; } /** * 根据给的hash,替换掉所有链表中的节点。假如表太小,则resize */ final void treeifyBin(Node<K,V>[] tab, int hash) { int n, index; Node<K,V> e; if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY) resize(); else if ((e = tab[index = (n - 1) & hash]) != null) { TreeNode<K,V> hd = null, tl = null; do { TreeNode<K,V> p = replacementTreeNode(e, null); if (tl == null) hd = p; else { p.prev = tl; tl.next = p; } tl = p; } while ((e = e.next) != null); if ((tab[index] = hd) != null) hd.treeify(tab); } } /** * // 将"m"的全部元素都添加到HashMap中 */ public void putAll(Map<? extends K, ? extends V> m) { putMapEntries(m, true); } /** * 假如指定键存在,则移除该指定键的对应键值对 */ public V remove(Object key) { Node<K,V> e; return (e = removeNode(hash(key), key, null, false, true)) == null ? null : e.value; } /** * 实现Map.remove 和相关的方法 */ final Node<K,V> removeNode(int hash, Object key, Object value, boolean matchValue, boolean movable) { Node<K,V>[] tab; Node<K,V> p; int n, index; if ((tab = table) != null && (n = tab.length) > 0 && (p = tab[index = (n - 1) & hash]) != null) { Node<K,V> node = null, e; K k; V v; if (p.hash == hash && ((k = p.key) == key || (key != null && key.equals(k)))) node = p; else if ((e = p.next) != null) { if (p instanceof TreeNode) node = ((TreeNode<K,V>)p).getTreeNode(hash, key); else { do { if (e.hash == hash && ((k = e.key) == key || (key != null && key.equals(k)))) { node = e; break; } p = e; } while ((e = e.next) != null); } } if (node != null && (!matchValue || (v = node.value) == value || (value != null && value.equals(v)))) { if (node instanceof TreeNode) ((TreeNode<K,V>)node).removeTreeNode(this, tab, movable); else if (node == p) tab[index] = node.next; else p.next = node.next; ++modCount; --size; afterNodeRemoval(node); return node; } } return null; } /** * 清空该HashMap */ public void clear() { Node<K,V>[] tab; modCount++; if ((tab = table) != null && size > 0) { size = 0; for (int i = 0; i < tab.length; ++i) tab[i] = null; } } /** * 该HashMap是否包含指定value */ public boolean containsValue(Object value) { Node<K,V>[] tab; V v; if ((tab = table) != null && size > 0) { for (int i = 0; i < tab.length; ++i) { for (Node<K,V> e = tab[i]; e != null; e = e.next) { if ((v = e.value) == value || (value != null && value.equals(v))) return true; } } } return false; } /** * 返回“key的集合”,实际上返回一个“KeySet对象” */ public Set<K> keySet() { Set<K> ks = keySet; if (ks == null) { ks = new KeySet(); keySet = ks; } return ks; } /** * Key对应的集合 * KeySet继承于AbstractSet,说明该集合中没有重复的Key。 */ final class KeySet extends AbstractSet<K> { public final int size() { return size; } public final void clear() { HashMap.this.clear(); } public final Iterator<K> iterator() { return new KeyIterator(); } public final boolean contains(Object o) { return containsKey(o); } public final boolean remove(Object key) { return removeNode(hash(key), key, null, false, true) != null; } public final Spliterator<K> spliterator() { return new KeySpliterator<>(HashMap.this, 0, -1, 0, 0); } public final void forEach(Consumer<? super K> action) { Node<K,V>[] tab; if (action == null) throw new NullPointerException(); if (size > 0 && (tab = table) != null) { int mc = modCount; for (int i = 0; i < tab.length; ++i) { for (Node<K,V> e = tab[i]; e != null; e = e.next) action.accept(e.key); } if (modCount != mc) throw new ConcurrentModificationException(); } } } /** * 返回“value集合”,实际上返回的是一个Values对象 */ public Collection<V> values() { Collection<V> vs = values; if (vs == null) { vs = new Values(); values = vs; } return vs; } /** *“value集合” * Values继承于AbstractCollection,不同于“KeySet继承于AbstractSet”, * Values中的元素能够重复。因为不同的key可以指向相同的value。 */ final class Values extends AbstractCollection<V> { public final int size() { return size; } public final void clear() { HashMap.this.clear(); } public final Iterator<V> iterator() { return new ValueIterator(); } public final boolean contains(Object o) { return containsValue(o); } public final Spliterator<V> spliterator() { return new ValueSpliterator<>(HashMap.this, 0, -1, 0, 0); } public final void forEach(Consumer<? super V> action) { Node<K,V>[] tab; if (action == null) throw new NullPointerException(); if (size > 0 && (tab = table) != null) { int mc = modCount; for (int i = 0; i < tab.length; ++i) { for (Node<K,V> e = tab[i]; e != null; e = e.next) action.accept(e.value); } if (modCount != mc) throw new ConcurrentModificationException(); } } } /** * 克隆一个HashMap,并返回Object对象 */ @SuppressWarnings("unchecked") @Override public Object clone() { HashMap<K,V> result; try { result = (HashMap<K,V>)super.clone(); } catch (CloneNotSupportedException e) { // 因为是可克隆的,所以这不应该发生 throw new InternalError(e); } result.reinitialize(); result.putMapEntries(this, false); return result; } // 当序列化HashSets的时候,这些方法会被调用 final float loadFactor() { return loadFactor; } final int capacity() { return (table != null) ? table.length : (threshold > 0) ? threshold : DEFAULT_INITIAL_CAPACITY; } /** * java.io.Serializable的写入函数 * 将HashMap的“总的容量,实际容量,所有的Entry”都写入到输出流中 */ private void writeObject(java.io.ObjectOutputStream s) throws IOException { int buckets = capacity(); // Write out the threshold, loadfactor, and any hidden stuff s.defaultWriteObject(); s.writeInt(buckets); s.writeInt(size); internalWriteEntries(s); } /** * java.io.Serializable的读取函数:根据写入方式读出 * 将HashMap的“总的容量,实际容量,所有的Entry”依次读出 */ private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException { // Read in the threshold (ignored), loadfactor, and any hidden stuff s.defaultReadObject(); reinitialize(); if (loadFactor <= 0 || Float.isNaN(loadFactor)) throw new InvalidObjectException("Illegal load factor: " + loadFactor); s.readInt(); // Read and ignore number of buckets int mappings = s.readInt(); // Read number of mappings (size) if (mappings < 0) throw new InvalidObjectException("Illegal mappings count: " + mappings); else if (mappings > 0) { // (if zero, use defaults) // Size the table using given load factor only if within // range of 0.25...4.0 float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f); float fc = (float)mappings / lf + 1.0f; int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ? DEFAULT_INITIAL_CAPACITY : (fc >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : tableSizeFor((int)fc)); float ft = (float)cap * lf; threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ? (int)ft : Integer.MAX_VALUE); @SuppressWarnings({"rawtypes","unchecked"}) Node<K,V>[] tab = (Node<K,V>[])new Node[cap]; table = tab; // Read the keys and values, and put the mappings in the HashMap for (int i = 0; i < mappings; i++) { @SuppressWarnings("unchecked") K key = (K) s.readObject(); @SuppressWarnings("unchecked") V value = (V) s.readObject(); putVal(hash(key), key, value, false, false); } } } /** * 红黑树、迭代器、分割器等等相关代码就不一一罗列了。 */}
小结
HashMap有三种数据结构,分别是数组,链表,红黑树。在JDK1.8之前是没有红黑树的。这里加上红黑树是因为仅仅用链表法解决哈希冲突时,链表的长度过长,查找的时间复杂度为O(n),性能没有红黑树好(查找的时间复杂度为O(logn))。
如果冲突节点到8时,就把链表转换成红黑树;为什么不直接用红黑树彻底代替链表呢?这里我猜测是因为当链表的长度只是个位数时,查找的时间复杂度只是常数级别的,性能完全够了。而且红黑树结构实现复杂。
如果bucket满了(超过load factor * current 的容量),就要resize。
在resize的过程,就是把bucket扩充为2倍,之后重新计算index,把节点再放到新的bucket中。
get过程中如果出现冲突,则通过key.equals(k)去查找对应的entry
若为树,则在树中通过key.equals(k)查找,若为链表,则在链表中通过key.equals(k)查找。
1 0
- 【集合源码】HashMap源码解析(基于JDK 1.8)
- 【集合】HashMap源码解析
- 源码解析-集合-HashMap
- JDK源码解析集合篇--HashMap无敌全解析
- HashMap 源码分析 基于 JDK 1.8
- jdk集合源码之HashMap
- JDK源码【集合框架】HashMap
- JDK之HashMap源码解析
- JDK源码解析之HashMap
- Java集合源码解析(二)HashMap源码解析
- Java集合源码解析(三)HashMap源码解析
- 【源码解析】JDK源码之HashMap
- 【集合详解】HashMap源码解析
- Java8集合源码解析-HashMap
- 【java集合】HashMap源码解析
- HashMap源码解析(基于JDK1.7)
- jdk源码解析--集合类
- JDK集合源码解析剖析
- Ex2:改写Canny算法(只用CImg库)【code1】
- c语言总结小笔记——小包包的成长记
- Linux上安装JDK
- Linux 下的 Redis 安装 && 启动 && 关闭 && 卸载
- 更改Ubuntu默认python版本的两种方法
- 【集合源码】HashMap源码解析(基于JDK 1.8)
- swift方法
- Linux系统调用
- Vue 学习笔记
- 阿里Java开发手册之编程规约
- jQuery插件推荐
- 工具类网站
- 从0开始搭建hadoop2.X.X集群环境
- Position beyond number of declared ordinal parameters. Remember that ordinal parameters are 1-based!