java ConcurrentHashMap分段加鎖提高并發(fā)效率
ConcurrentHashMap詳解
JDK7
Segment
在jdk8之前concurrentHashMap使用該對象進行分段加鎖,降低了鎖的粒度,使得并發(fā)效率提高,Segment本身也相當于一個HashMap,Segment包含一個HashEntry數(shù)組,數(shù)組中每個HashEntry既是一個鍵值對,又是一個鏈表的頭結點
get方法
- 根據(jù)key做hash運算,得到hash值
- 通過hash值,定位到對應的segment對象
- 再次通過hash值,定位到segment當中數(shù)組的具體位置
put方法
- 根據(jù)key做hash運算,得到hash值
- 通過hash值,定位到對應的segment對象
- 獲取可重入鎖
- 再次通過hash值,定位到segment當中數(shù)組的具體位置
- 插入或覆蓋hashEntry對象
- 釋放鎖
但是使用這種方式實現(xiàn)需要進行兩次hash操作,第一次hash操作找到對應的segment,第二次hash操作定位到元素所在鏈表的頭部
JDK8
在jdk8的時候參考了HashMap的設計,采用了數(shù)組+鏈表+紅黑樹的方式,內部大量采用CAS操作,舍棄了分段鎖的思想
CAS
CAS是compare and swap的縮寫,即我們所說的比較交換,CAS屬于樂觀鎖。
CAS包含三個操作數(shù),---內存中的值(V),預期原值(A),新值(B) 如果內存中的值和A的值一樣,就可以將內存中的值更新為B。CAS通過無限循環(huán)來獲取數(shù)據(jù),一直到V和A一致為止
樂觀鎖
樂觀鎖會很樂觀的認為不會出現(xiàn)并發(fā)問題,所以采用無鎖的機制來進行處理,比如通過給記錄加version來獲取數(shù)據(jù),性能比悲觀鎖要高
悲觀鎖
悲觀鎖會很悲觀的認為肯定會出現(xiàn)并發(fā)問題,所以會將資源鎖住,該資源只能有一個線程進行操作,只有前一個獲得鎖的線程釋放鎖之后,下一個線程才可以訪問
源碼分析
重要變量
// 表示整個hash表,初始化階段是在第一次插入的時候,容量總是2的次冪 transient volatile Node<K,V>[] table; // 下一個使用的表 只有在擴容的時候非空,其他情況都是null private transient volatile Node<K,V>[] nextTable; /** * Base counter value, used mainly when there is no contention, * but also as a fallback during table initialization * races. Updated via CAS. */ private transient volatile long baseCount; // 用于初始化和擴容控制 // 0:默認值 // -1:正在初始化 // 大于0:為hash表的閾值 // 小于-1:有多個線程在進行擴容 該值為 -(1+正在擴容的線程數(shù)) private transient volatile int sizeCtl; /** * The next table index (plus one) to split while resizing. */ private transient volatile int transferIndex; /** * Spinlock (locked via CAS) used when resizing and/or creating CounterCells. */ private transient volatile int cellsBusy; /** * Table of counter cells. When non-null, size is a power of 2. */ private transient volatile CounterCell[] counterCells; // views private transient KeySetView<K,V> keySet; private transient ValuesView<K,V> values; private transient EntrySetView<K,V> entrySet;
構造函數(shù)
/**
* Creates a new, empty map with the default initial table size (16).
*/
public ConcurrentHashMap() {
}
/**
* Creates a new, empty map with an initial table size
* accommodating the specified number of elements without the need
* to dynamically resize.
*
* @param initialCapacity The implementation performs internal
* sizing to accommodate this many elements.
* @throws IllegalArgumentException if the initial capacity of
* elements is negative
*/
public ConcurrentHashMap(int initialCapacity) {
if (initialCapacity < 0)
throw new IllegalArgumentException();
int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
MAXIMUM_CAPACITY :
tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
this.sizeCtl = cap;
}
/**
* Creates a new map with the same mappings as the given map.
*
* @param m the map
*/
public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
this.sizeCtl = DEFAULT_CAPACITY;
putAll(m);
}
/**
* Creates a new, empty map with an initial table size based on
* the given number of elements ({@code initialCapacity}) and
* initial table density ({@code loadFactor}).
*
* @param initialCapacity the initial capacity. The implementation
* performs internal sizing to accommodate this many elements,
* given the specified load factor.
* @param loadFactor the load factor (table density) for
* establishing the initial table size
* @throws IllegalArgumentException if the initial capacity of
* elements is negative or the load factor is nonpositive
*
* @since 1.6
*/
public ConcurrentHashMap(int initialCapacity, float loadFactor) {
this(initialCapacity, loadFactor, 1);
}
/**
* Creates a new, empty map with an initial table size based on
* the given number of elements ({@code initialCapacity}), table
* density ({@code loadFactor}), and number of concurrently
* updating threads ({@code concurrencyLevel}).
*
* @param initialCapacity the initial capacity. The implementation
* performs internal sizing to accommodate this many elements,
* given the specified load factor.
* @param loadFactor the load factor (table density) for
* establishing the initial table size
* @param concurrencyLevel the estimated number of concurrently
* updating threads. The implementation may use this value as
* a sizing hint.
* @throws IllegalArgumentException if the initial capacity is
* negative or the load factor or concurrencyLevel are
* nonpositive
*/
public ConcurrentHashMap(int initialCapacity,
float loadFactor, int concurrencyLevel) {
if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
throw new IllegalArgumentException();
if (initialCapacity < concurrencyLevel) // Use at least as many bins
initialCapacity = concurrencyLevel; // as estimated threads
long size = (long)(1.0 + (long)initialCapacity / loadFactor);
int cap = (size >= (long)MAXIMUM_CAPACITY) ?
MAXIMUM_CAPACITY : tableSizeFor((int)size);
this.sizeCtl = cap;
}重要方法
put方法
ConcurrentHashMap是如何保證在插入的時候線程安全的呢
public V put(K key, V value) {
return putVal(key, value, false);
}final V putVal(K key, V value, boolean onlyIfAbsent) {
// ConcurrentHashMap不允許key和value為null
if (key == null || value == null) throw new NullPointerException();
// 計算hash值
int hash = spread(key.hashCode());
int binCount = 0;
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
// tab為null,哈希表還沒有初始化,進行初始化哈希表
if (tab == null || (n = tab.length) == 0)
tab = initTable();
// 該索引位置為null,表示還沒有元素
else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
// 使用CAS的方式添加節(jié)點
if (casTabAt(tab, i, null,
new Node<K,V>(hash, key, value, null)))
break; // no lock when adding to empty bin
}
// 節(jié)點的hash值為-1,表示該哈希表正在擴容
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
V oldVal = null;
// 對頭節(jié)點加鎖
synchronized (f) {
// 再次判斷一下該節(jié)點是否為目標索引位置的頭節(jié)點,防止期間被修改
if (tabAt(tab, i) == f) {
// 表示是普通的鏈表
if (fh >= 0) {
binCount = 1;
for (Node<K,V> e = f;; ++binCount) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
oldVal = e.val;
if (!onlyIfAbsent)
e.val = value;
break;
}
Node<K,V> pred = e;
if ((e = e.next) == null) {
pred.next = new Node<K,V>(hash, key,
value, null);
break;
}
}
}
// 紅黑樹 TreeBin的hash值為TREEBIN,是-2
else if (f instanceof TreeBin) {
Node<K,V> p;
binCount = 2;
if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
value)) != null) {
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
}
}
// 可以看一下上述的賦值流程
// 默認初始值是0
// 鏈表時為1 在遍歷時進行累加,直到找到所要添加的位置為止
// 紅黑樹時為2
if (binCount != 0) {
// 鏈表的長度是否達到8 達到8轉為紅黑樹
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
// oldVal不為null,表示只是對key的值進行的修改,沒有添加元素,直接返回即可
if (oldVal != null)
return oldVal;
break;
}
}
}
//
addCount(1L, binCount);
return null;
}哈希函數(shù)根據(jù)hashCode計算出哈希值,這里的hash值與HashMap的計算方式稍微有點不同,在低十六位異或高十六位之后還需要與HASH_BITS在進行與運算,HASH_BITS的值是0x7fffffff,轉為二進制是31個1,進行與運算是為了保證得到的hash值為正數(shù)。
ConcurrentHashMap中hash值為負數(shù)包含有其他含義,-1表示為ForwardingNode節(jié)點,-2表示為TreeBin節(jié)點
static final int spread(int h) {
// (h ^ (h >>> 16)與hashMap相同
// HASH_BITS進行與運算
return (h ^ (h >>> 16)) & HASH_BITS;
}初始化hash表的操作
private final Node<K,V>[] initTable() {
Node<K,V>[] tab; int sc;
// hash表為null時才需要進行初始化
while ((tab = table) == null || tab.length == 0) {
// sizeCtl小于0表示有其他線程在進行初始化操作了
if ((sc = sizeCtl) < 0)
Thread.yield(); // lost initialization race; just spin
// 將SIZECTL設為-1,表示該線程要開始初始化表了
else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
if ((tab = table) == null || tab.length == 0) {
int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
@SuppressWarnings("unchecked")
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
table = tab = nt;
// n右移兩位 表示1/4n n-1/4n為3/4n 即為n*0.75
sc = n - (n >>> 2);
}
} finally {
sizeCtl = sc;
}
break;
}
}
return tab;
}private final void addCount(long x, int check) {
CounterCell[] as; long b, s;
if ((as = counterCells) != null ||
!U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
CounterCell a; long v; int m;
boolean uncontended = true;
if (as == null || (m = as.length - 1) < 0 ||
(a = as[ThreadLocalRandom.getProbe() & m]) == null ||
!(uncontended =
U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
fullAddCount(x, uncontended);
return;
}
if (check <= 1)
return;
s = sumCount();
}
if (check >= 0) {
Node<K,V>[] tab, nt; int n, sc;
while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
(n = tab.length) < MAXIMUM_CAPACITY) {
int rs = resizeStamp(n);
if (sc < 0) {
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
transferIndex <= 0)
break;
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
transfer(tab, nt);
}
else if (U.compareAndSwapInt(this, SIZECTL, sc,
(rs << RESIZE_STAMP_SHIFT) + 2))
transfer(tab, null);
s = sumCount();
}
}
}computeIfAbsent和putIfAbsent方法
ConcurrentHashMap有兩個比較特殊的方法,這兩個方法要是可以好好地利用起來,那就爽歪歪了
- 當Key存在的時候,如果Value獲取比較昂貴的話,putIfAbsent就白白浪費時間在獲取這個昂貴的Value上(這個點特別注意)
- Key不存在的時候,putIfAbsent返回null,小心空指針,而computeIfAbsent返回計算后的值
- 當Key不存在的時候,putIfAbsent允許put null進去,而computeIfAbsent不能,之后進行containsKey查詢是有區(qū)別的
以上就是java ConcurrentHashMap分段加鎖提高并發(fā)效率的詳細內容,更多關于java ConcurrentHashMap分段加鎖的資料請關注腳本之家其它相關文章!
相關文章
Spring boot 整合 Okhttp3 并封裝請求工具實例 詳解
OkHttp作為一款成熟、穩(wěn)定、易用的HTTP客戶端庫,擁有較高的性能和多樣化的功能,已被廣泛應用于移動應用開發(fā)、Web服務端開發(fā)等領域,這篇文章主要介紹了Spring boot 整合 Okhttp3 并封裝請求工具,需要的朋友可以參考下2023-08-08
Mybatis Criteria使用and和or進行聯(lián)合條件查詢的操作方法
這篇文章主要介紹了Mybatis Criteria的and和or進行聯(lián)合條件查詢的方法,本文通過例子給大家介紹的非常詳細,具有一定的參考借鑒價值,需要的朋友可以參考下2021-10-10

