ConcurrentHashMap源码
来源:互联网 发布:excel 重复数据计数 编辑:程序博客网 时间:2024/06/05 13:44
java.util.concurrent.ConcurrentHashMapConcurrentHashMap<K,V> extends AbstractMap<K,V> implements ConcurrentMap<K,V>, Serializable {
- 设计首要目的:维护并发可读性(get、迭代相关);次要目的:使空间消耗比HashMap相同或更好,且支持多线程高效率的初始插入(empty table)。
HashTable线程安全,但采用synchronized,多线程下效率低下。线程1put时,线程2无法put或get。
CAS算法;unsafe.compareAndSwapInt(this, valueOffset, expect, update); CAS(Compare And Swap),意思是如果valueOffset位置包含的值与expect值相同,则更新valueOffset位置的值为update,并返回true,否则不更新,返回false。
- 与Java8的HashMap有相通之处,底层依然由“数组”+链表+红黑树;
- 底层结构存放的是TreeBin对象,而不是TreeNode对象;
- CAS作为知名无锁算法,那ConcurrentHashMap就没用锁了么?当然不是,hash值相同的链表的头结点还是会synchronized上锁。
1.类变量&常量
private static final int MAXIMUM_CAPACITY = 1 << 30; //最大容量private static final int DEFAULT_CAPACITY = 16; //默认容量//最大数组长度,被toArray等方法调用static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;//默认的并发级别,保证构造map时初始容量不小于concurrencyLevel。仅为了兼容旧版本,jdk8中未使用private static final int DEFAULT_CONCURRENCY_LEVEL = 16;private static final float LOAD_FACTOR = 0.75f; //默认装载因子//链表转红黑树最大长度,且table容量大于64才会发生装换操作static final int TREEIFY_THRESHOLD = 8; static final int UNTREEIFY_THRESHOLD = 6; //红黑树退化成链表//大于等于DEFAULT_CAPACITY,每一步转换所需要重新回收的最少步数private static final int MIN_TRANSFER_STRIDE = 16; private static int RESIZE_STAMP_BITS = 16; //用于size控制的bit数//用于size控制的最大bit数private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;//用于记录size控制的位移数private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;//hash运算的相关标志位static final int MOVED = -1; // hash for forwarding nodesstatic final int TREEBIN = -2; // hash for roots of treesstatic final int RESERVED = -3; // hash for transient reservationsstatic final int HASH_BITS = 0x7fffffff; // usable bits of normal node hashstatic final int NCPU = Runtime.getRuntime().availableProcessors(); //可用处理器个数//存储节点的表,大小总是2的幂,且第一次插入元素时才会初始化transient volatile Node<K,V>[] table; //下一张可用表,只在扩容操作时使用private transient volatile Node<K,V>[] nextTable;//实际上保存的是hashmap中的元素个数,利用CAS锁进行更新,但它并不用返回当前hashmap的元素个数 private transient volatile long baseCount;/* 控制标识符,负数代表正在进行初始化或扩容操作 -1代表正在初始化 -N 表示有N-1个线程正在进行扩容操作 正数或0代表hash表还没有被初始化,这个数值表示初始化或下一次进行扩容的大小,类似于扩容阈值。它的值始终是当前ConcurrentHashMap容量的0.75倍,这与loadfactor相对应。实际容量>=sizeCtl,则扩容。*/private transient volatile int sizeCtl;private transient volatile int transferIndex;//下一次扩容时调整的位置(+1)//自旋锁(通过CAS锁定)时,调整大小和/或创建CounterCells类时使用private transient volatile int cellsBusy;//CounterCells类的数组,大小为2的幂private transient volatile CounterCell[] counterCells;//View视图类private transient KeySetView<K,V> keySet;private transient ValuesView<K,V> values;private transient EntrySetView<K,V> entrySet;//Unsafe的相关操作,unsafe静态块控制其修改行为。//Unsafe类用于执行低级别、不安全操作的方法集合。private static final sun.misc.Unsafe U;private static final long SIZECTL;private static final long TRANSFERINDEX;private static final long BASECOUNT;private static final long CELLSBUSY;private static final long CELLVALUE;private static final long ABASE;private static final int ASHIFT;static { try { U = sun.misc.Unsafe.getUnsafe(); Class<?> k = ConcurrentHashMap.class; SIZECTL = U.objectFieldOffset (k.getDeclaredField("sizeCtl")); TRANSFERINDEX = U.objectFieldOffset (k.getDeclaredField("transferIndex")); BASECOUNT = U.objectFieldOffset (k.getDeclaredField("baseCount")); CELLSBUSY = U.objectFieldOffset (k.getDeclaredField("cellsBusy")); Class<?> ck = CounterCell.class; CELLVALUE = U.objectFieldOffset (ck.getDeclaredField("value")); Class<?> ak = Node[].class; ABASE = U.arrayBaseOffset(ak); int scale = U.arrayIndexScale(ak); if ((scale & (scale - 1)) != 0) throw new Error("data type scale not a power of two"); ASHIFT = 31 - Integer.numberOfLeadingZeros(scale); } catch (Exception e) { throw new Error(e); }}
2.构造函数
//无参构造方法,默认初始容量(16)、加载因子(0.75)和concurrencyLevel (16) 的新的map public ConcurrentHashMap() { } //有容量的构造函数, public ConcurrentHashMap(int initialCapacity) { if (initialCapacity < 0) throw new IllegalArgumentException(); int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : //最接近该容量的2的幂 tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); this.sizeCtl = cap; //初始化 } //带有容器的构造类,将容器内元素全部put进map中 public ConcurrentHashMap(Map<? extends K, ? extends V> m) { this.sizeCtl = DEFAULT_CAPACITY; putAll(m); } //带有初始容量、加载因子的构造器,其concurrencyLevel为1 public ConcurrentHashMap(int initialCapacity, float loadFactor) { this(initialCapacity, loadFactor, 1); } //带有初始容量、加载因子和并发级别(能够同时更新ConccurentHashMap //且不产生锁竞争的最大线程数)。 //其最终容量为initialCapacity的最接近的2的幂 public ConcurrentHashMap(int initialCapacity, float loadFactor, int concurrencyLevel) { if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) throw new IllegalArgumentException(); if (initialCapacity < concurrencyLevel) // Use at least as many bins initialCapacity = concurrencyLevel; // as estimated threads long size = (long)(1.0 + (long)initialCapacity / loadFactor); int cap = (size >= (long)MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : tableSizeFor((int)size); this.sizeCtl = cap; }
3.内部类
Node<K,V> implements Map.Entry<K,V>:<K,V>的实体类,只有get()方法,无set()方法。有以下属性: final int hash; final K key; volatile V val; //volatile属性,保证可见性 volatile Node<K,V> next; 以及相应的构造方法: Node(int hash, K key, V val, Node<K,V> next) { this.hash = hash; this.key = key; this.val = val; this.next = next; } 和equals&hashcode和get方法 以及find方法,用于辅助map.get(),给出指定结点的key值和hash值找到对应的节点: Node<K,V> find(int h, Object k) { Node<K,V> e = this; if (k != null) { do { K ek; if (e.hash == h && ((ek = e.key) == k || (ek != null && k.equals(ek)))) return e; } while ((e = e.next) != null); } return null; }
MapEntry<K,V> implements Map.Entry<K,V>:可以被导出,即可以根据该类来对map进行相应操作。有以下属性: final K key; // non-null V val; // non-null final ConcurrentHashMap<K,V> map; equals、hashcode、toString等方法,以及带参数的构造方法 以及getKey、getValue、setValue等方法
Segment<K,V> extends ReentrantLock implements Serializable:相比早期版本,该类现在只用于序列化和反序列化 private static final long serialVersionUID = 2249069246763182397L; final float loadFactor; Segment(float lf) { this.loadFactor = lf; }
Traverser:主要用于遍历,其子类有BaseIterator、KeySpliterator、ValueSpliterator、EntrySpliterator四个类。BaseIterator用于遍历,其它3个用于对键、值、实体的划分。BaseIterator又有三个子类,KeyIterator、ValueIterator和EntryIterator分别用于键、值和实体的遍历操作
ForwardingNode :一个用于连接两个table的节点类。它包含一个nextTable指针,用于指向下一张表。而且这个节点的key value next指针全部为null,它的hash值为-1。 这里面定义的find的方法是从nextTable里进行查询节点,而不是以自身为头节点进行查找
CollectionView<K,V,E> implements Collection<E>, java.io.Serializable:CollectionView抽象类主要定义了视图操作,其子类KeySetView、ValueSetView、EntrySetView分别表示键视图、值视图、键值对视图。对视图均可以进行操作。
CounterCell类,分发计数
TreeBin类、TreeNode类,用于辅助红黑树的构建,这些结点包装成TreeNode放在TreeBin对象中,由TreeBin完成对红黑树的包装
4.重要函数
1.重要的原子操作 //ASHITF等同于private static final //查找指定位置i在tab表的节点 static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) { return (Node<K,V>)U.getObjectVolatile(tab, ((long)i<<ASHIFT) + ABASE); } //执行CAS操作,比较c和tab[i]的值,若相同,则tab[i]=v static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i, Node<K,V> c, Node<K,V> v) { return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v); } //另tab[i]=v,仅在上锁区被调用 static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) { U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v); }
2.作用等同于HashMap的hash()方法,只不过这个方法传递进来的参数直接是key的hashcode static final int spread(int h) { return (h ^ (h >>> 16)) & HASH_BITS; }
3.初始化表函数initTable对于table的大小,会根据sizeCtl的值进行设置。只在第一次添加元素时才会初始化如果没有设置szieCtl的值,那么默认生成的table大小为16;否则,会根据sizeCtl的大小设置table大小。 private final Node<K,V>[] initTable() { Node<K,V>[] tab; int sc; while ((tab = table) == null || tab.length == 0) { if ((sc = sizeCtl) < 0) //若sizeCtl<0,则丧失线程占有 Thread.yield(); // lost initialization race; just spin //CAS操作,将sizeCtl设置成-1,表示该线程正在初始化 else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { try { //若表长度为0 if ((tab = table) == null || tab.length == 0) { //判断sizeCtl是否大于0,是则容量为sc,不是则用默认容量16 int n = (sc > 0) ? sc : DEFAULT_CAPACITY; @SuppressWarnings("unchecked") Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n]; table = tab = nt; //sc=n*(3/4) sc = n - (n >>> 2); } } finally { //sizeCtl变成0.75*(table.length-1) sizeCtl = sc; } break; } } return tab; }
4.treeifBin,由链表转成红黑树 private final void treeifyBin(Node<K,V>[] tab, int index) { Node<K,V> b; int n, sc; if (tab != null) { if ((n = tab.length) < MIN_TREEIFY_CAPACITY) tryPresize(n << 1); //若表容量小于64,则只扩容不变换 else if ((b = tabAt(tab, index)) != null && b.hash >= 0) { synchronized (b) { if (tabAt(tab, index) == b) { TreeNode<K,V> hd = null, tl = null; for (Node<K,V> e = b; e != null; e = e.next) { TreeNode<K,V> p = new TreeNode<K,V>(e.hash, e.key, e.val, null, null); if ((p.prev = tl) == null) hd = p; else tl.next = p; tl = p; } setTabAt(tab, index, new TreeBin<K,V>(hd)); } } } } }
5.put函数,不同于HashMap,其key和value均不能为null public V put(K key, V value) { return putVal(key, value, false); } public V putIfAbsent(K key, V value) { return putVal(key, value, true); } public void putAll(Map<? extends K, ? extends V> m) { tryPresize(m.size()); //预扩容表的大小 for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) putVal(e.getKey(), e.getValue(), false); } //put和putIfAbsent的方法实现 final V putVal(K key, V value, boolean onlyIfAbsent) { if (key == null || value == null) throw new NullPointerException(); int hash = spread(key.hashCode()); int binCount = 0; for (Node<K,V>[] tab = table;;) { //tab为新表,无限循环,直到插入成功或失败 Node<K,V> f; int n, i, fh; if(tab == null||(n = tab.length) == 0) //若表为null或长度为0(lazy init) tab = initTable(); //对表进行初始化 //使用h&(tab.length-1),可以计算出该元素在tab中的位置 else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) { if (casTabAt(tab, i, null, new Node<K,V>(hash, key, value, null))) break; // CAS操作,若为null,则用新Node替换,不需要加锁 } else if ((fh = f.hash) == MOVED) //MOVED=-1,检测到正在扩容 tab = helpTransfer(tab, f); //帮助其扩容 else { //添加元素进tab V oldVal = null; synchronized (f) { //对tab的相应位置的头结点加锁 if (tabAt(tab, i) == f) { if (fh >= 0) { //表示为链表头结点 binCount = 1; for (Node<K,V> e = f;; ++binCount) { K ek; if (e.hash == hash && ((ek = e.key) == key || (ek != null && key.equals(ek)))) { oldVal = e.val; if (!onlyIfAbsent) //判断是否是若存在则替换 e.val = value; break; } Node<K,V> pred = e; if ((e = e.next) == null) { //链表尾部直接插入 pred.next = new Node<K,V>(hash, key, value, null); break; } } } else if (f instanceof TreeBin) { //表示为树节点 Node<K,V> p; binCount = 2; if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key, value)) != null) { oldVal = p.val; if (!onlyIfAbsent) p.val = value; } } } } if (binCount != 0) { //若节点数已经大于8,且最大容量大于64,则转成树 if (binCount >= TREEIFY_THRESHOLD) treeifyBin(tab, i); if (oldVal != null) return oldVal; break; } } } addCount(1L, binCount); //增加binCount的值 return null; }
6.addCount方法,将binCount+1,用于将元素个数加1 private final void addCount(long x, int check) { CounterCell[] as; long b, s; //利用CAS方法更新baseCount的值 if ((as = counterCells) != null || !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)){ CounterCell a; long v; int m; boolean uncontended = true; if (as == null || (m = as.length - 1) < 0 || (a = as[ThreadLocalRandom.getProbe() & m]) == null || !(uncontended = U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) { fullAddCount(x, uncontended); return; } if (check <= 1) return; s = sumCount(); } //检测是否需要扩容 if (check >= 0) { Node<K,V>[] tab, nt; int n, sc; while (s >= (long)(sc = sizeCtl) && (tab = table) != null && (n = tab.length) < MAXIMUM_CAPACITY) { int rs = resizeStamp(n); if (sc < 0) { if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || sc == rs + MAX_RESIZERS || (nt = nextTable) == null || transferIndex <= 0) break; //如果已经有其他线程在执行扩容操作 if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) transfer(tab, nt); } //当前线程是唯一的或是第一个发起扩容的线程,此时nextTable=null else if (U.compareAndSwapInt(this, SIZECTL, sc, (rs << RESIZE_STAMP_SHIFT) + 2)) transfer(tab, null); s = sumCount(); } } }
7.helpTransfer方法,调用该方法帮助扩容操作,此时nextTable!=null。首先拿到这个nextTable对象,调用transfer方法。当本线程进入扩容方法的时候会直接进入复制阶段。 final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) { Node<K,V>[] nextTab; int sc; if (tab != null && (f instanceof ForwardingNode) && (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) { int rs = resizeStamp(tab.length);//计算操作校验码 while (nextTab == nextTable && table == tab && (sc = sizeCtl) < 0) { if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || sc == rs + MAX_RESIZERS || transferIndex <= 0) break; if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) { transfer(tab, nextTab); break; } } return nextTab; } return table; }
8.get方法,根据key来查找value,没有加锁操作,只有CAS操作tabAt public V get(Object key) { Node<K,V>[] tab; //新表 Node<K,V> e, p; int n, eh; K ek; int h = spread(key.hashCode()); //计算key的hash值 if ((tab = table) != null && (n = tab.length) > 0 && (e = tabAt(tab, (n - 1) & h)) != null) { //key在tab上 //e的hash值在put进tab时已经用spread方法计算出 if ((eh = e.hash) == h) { if ((ek = e.key) == key || (ek != null && key.equals(ek))) return e.val; } else if (eh < 0) //hash值小于0,说明节点在树上,直接查找 return (p = e.find(h, key)) != null ? p.val : null; while ((e = e.next) != null) { if (e.hash == h && ((ek = e.key) == key || (ek != null && key.equals(ek)))) return e.val; } } return null; }
9.transfer方法,扩容方法支持多线程进行扩容操作,而并没有加锁。扩容操作分成两部分。第一部分是构建一个nextTable,它的容量是原来的两倍,这个操作是单线程完成的。这个单线程的保证是通过RESIZE_STAMP_SHIFT这个常量经过一次运算来保证的,这个地方在后面会有提到;第二个部分就是将原来table中的元素复制到nextTable中,这里允许多线程进行操作。先来看一下单线程是如何完成的:它的大体思想就是遍历、复制的过程。首先根据运算得到需要遍历的次数i,然后利用tabAt方法获得i位置的元素:如果这个位置为空,就在原table中的i位置放入forwardNode节点,这个也是触发并发扩容的关键点;如果这个位置是Node节点(fh>=0),如果它是一个链表的头节点,就构造一个反序链表,把他们分别放在nextTable的i和i+n的位置上;如果这个位置是TreeBin节点(fh<0),也做一个反序处理,并且判断是否需要untreefi,把处理的结果分别放在nextTable的i和i+n的位置上遍历过所有的节点以后就完成了复制工作,这时让nextTable作为新的table,并且更新sizeCtl为新容量的0.75倍 ,完成扩容。再看一下多线程是如何完成的:else if ((fh = f.hash) == MOVED) advance = true; 这段代码表示如果遍历到的节点是forward节点,就向后继续遍历,再加上给节点上锁的机制,就完成了多线程的控制。多线程遍历节点,处理了一个节点,就把对应点的值set为forward,另一个线程看到forward,就向后遍历。这样交叉就完成了复制工作。而且还很好的解决了线程安全的问题。private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) { int n = tab.length, stride; if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE) stride = MIN_TRANSFER_STRIDE; // subdivide range if (nextTab == null) { // initiating try { @SuppressWarnings("unchecked") //构建一个新表,长度为原来表的二倍 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1]; nextTab = nt; } catch (Throwable ex) { // try to cope with OOME sizeCtl = Integer.MAX_VALUE; return; } nextTable = nextTab; transferIndex = n; } int nextn = nextTab.length; //一个连接点指针,用于标志位 ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab); boolean advance = true; //并发关键属性,如果为true表示已被该节点处理过 boolean finishing = false; // to ensure sweep before committing nextTab for (int i = 0, bound = 0;;) { Node<K,V> f; int fh; while (advance) { //这个while循环体的作用就是在控制--i,可以依次遍历原hash表中的节点 int nextIndex, nextBound; if (--i >= bound || finishing) advance = false; else if ((nextIndex = transferIndex) <= 0) { i = -1; advance = false; } else if (U.compareAndSwapInt (this, TRANSFERINDEX, nextIndex, nextBound = (nextIndex > stride ? nextIndex - stride : 0))) { bound = nextBound; i = nextIndex - 1; advance = false; } } if (i < 0 || i >= n || i + n >= nextn) { int sc; //若所有节点都完成复制工作,就把nextTab赋值给table,令nextTable=null if (finishing) { nextTable = null; table = nextTab; sizeCtl = (n << 1) - (n >>> 1);//扩容阈值设置为原来容量的1.5倍 return; } //利用CAS方法更新这个扩容阈值 //在这里面sizectl值减一,说明新加入一个线程参与到扩容操作 if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) { if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT) return; finishing = advance = true; i = n; // recheck before commit } } //如果遍历到的节点为空 则放入ForwardingNode指针 else if ((f = tabAt(tab, i)) == null) advance = casTabAt(tab, i, null, fwd); //如果遍历到ForwardingNode节点,说明这个点已经被处理过 else if ((fh = f.hash) == MOVED) advance = true; // already processed else { synchronized (f) { //节点上锁 if (tabAt(tab, i) == f) { Node<K,V> ln, hn; if (fh >= 0) { //说明是链表节点 int runBit = fh & n; Node<K,V> lastRun = f; //构造两个新的链表,一个原链表,一个是原链表的反序 for (Node<K,V> p = f.next; p != null; p = p.next) { int b = p.hash & n; if (b != runBit) { runBit = b; lastRun = p; } } if (runBit == 0) { ln = lastRun; hn = null; } else { hn = lastRun; ln = null; } for (Node<K,V> p = f; p != lastRun; p = p.next) { int ph = p.hash; K pk = p.key; V pv = p.val; if ((ph & n) == 0) ln = new Node<K,V>(ph, pk, pv, ln); else hn = new Node<K,V>(ph, pk, pv, hn); } //在nextTable的i位置上插入一个链表 setTabAt(nextTab, i, ln); //在nextTable的i+n位置上插入另一个链表 setTabAt(nextTab, i + n, hn); //在table的i位置上插入forwardNode节点,表示已处理过该节点 setTabAt(tab, i, fwd); advance = true; } //对TreeBin对象进行处理,与上面的过程类似 else if (f instanceof TreeBin) { TreeBin<K,V> t = (TreeBin<K,V>)f; TreeNode<K,V> lo = null, loTail = null; TreeNode<K,V> hi = null, hiTail = null; int lc = 0, hc = 0; for (Node<K,V> e = t.first; e != null; e = e.next) { int h = e.hash; TreeNode<K,V> p = new TreeNode<K,V> (h, e.key, e.val, null, null); if ((h & n) == 0) { if ((p.prev = loTail) == null) lo = p; else loTail.next = p; loTail = p; ++lc; } else { if ((p.prev = hiTail) == null) hi = p; else hiTail.next = p; hiTail = p; ++hc; } } //如果扩容后已经不再需要tree的结构 反向转换为链表结构 ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) : (hc != 0) ? new TreeBin<K,V>(lo) : t; hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) : (lc != 0) ? new TreeBin<K,V>(hi) : t; setTabAt(nextTab, i, ln); setTabAt(nextTab, i + n, hn); setTabAt(tab, i, fwd); advance = true; } } } } }}
10.size相关操作只能给出近似值,因为有可能多线程操作,对map的size进行了改变 public int size() { long n = sumCount(); return ((n < 0L) ? 0 : (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : (int)n); } //和size方法类似,jdk1.8中用来取代size方法 public long mappingCount() { long n = sumCount(); return (n < 0L) ? 0L : n; } //统计的核心方法 final long sumCount() { CounterCell[] as = counterCells; CounterCell a; long sum = baseCount; if (as != null) { for (int i = 0; i < as.length; ++i) { if ((a = as[i]) != null) sum += a.value; //所有counter的和 } } return sum; }
0 0
- ConcurrentHashMap源码
- ConcurrentHashMap源码
- ConcurrentHashMap源码
- ConcurrentHashMap 源码分析 (二)
- 探索 ConcurrentHashMap 源码导读
- ConcurrentHashMap源码分析
- ConcurrentHashMap源码解读
- ConcurrentHashMap 源码分析
- ConcurrentHashMap源码-转载
- ConcurrentHashMap源码解读
- ConcurrentHashMap源码分析--Java8
- Java源码分析:ConcurrentHashMap
- Java源码之ConcurrentHashMap
- ConcurrentHashMap源码剖析
- ConcurrentHashMap源码解析
- Java-ConcurrentHashMap源码分析
- ConcurrentHashMap源码解析
- ConcurrentHashMap之源码分析
- <!DOCTYPE html>很重要
- centos安装setup小工具
- SpringMVC框架中的过滤器设置
- hadoop初试时遇到的一些小麻烦
- 上机之动态刷新余额
- ConcurrentHashMap源码
- SDUT 3556 数列求和2 (动态规划) -- 解题报告
- Android Studio搭建NDK
- Keil 离线安装packet方法
- 王思聪 远程操控女主播“翻床”
- 使用CXF和spring搭建webService服务
- JavaScript基础知识问答题(答案仅供参考)
- Android硬件抽象层(HAL)概要介绍和学习计划
- java中short s1 = 1;s1 = s1 + 1;会出错而short s1+=1;能编译通过的原因