1 /* 2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. Oracle designates this 7 * particular file as subject to the "Classpath" exception as provided 8 * by Oracle in the LICENSE file that accompanied this code. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 */ 24 25 /* 26 * This file is available under and governed by the GNU General Public 27 * License version 2 only, as published by the Free Software Foundation. 28 * However, the following notice accompanied the original version of this 29 * file: 30 * 31 * Written by Doug Lea with assistance from members of JCP JSR-166 32 * Expert Group and released to the public domain, as explained at 33 * http://creativecommons.org/publicdomain/zero/1.0/ 34 */ 35 36 package java.util.concurrent; 37 import java.util.concurrent.locks.*; 38 import java.util.*; 39 import java.io.Serializable; 40 41 /** 42 * A hash table supporting full concurrency of retrievals and 43 * adjustable expected concurrency for updates. This class obeys the 44 * same functional specification as {@link java.util.Hashtable}, and 45 * includes versions of methods corresponding to each method of 46 * <tt>Hashtable</tt>. However, even though all operations are 47 * thread-safe, retrieval operations do <em>not</em> entail locking, 48 * and there is <em>not</em> any support for locking the entire table 49 * in a way that prevents all access. This class is fully 50 * interoperable with <tt>Hashtable</tt> in programs that rely on its 51 * thread safety but not on its synchronization details. 52 * 53 * <p> Retrieval operations (including <tt>get</tt>) generally do not 54 * block, so may overlap with update operations (including 55 * <tt>put</tt> and <tt>remove</tt>). Retrievals reflect the results 56 * of the most recently <em>completed</em> update operations holding 57 * upon their onset. For aggregate operations such as <tt>putAll</tt> 58 * and <tt>clear</tt>, concurrent retrievals may reflect insertion or 59 * removal of only some entries. Similarly, Iterators and 60 * Enumerations return elements reflecting the state of the hash table 61 * at some point at or since the creation of the iterator/enumeration. 62 * They do <em>not</em> throw {@link ConcurrentModificationException}. 63 * However, iterators are designed to be used by only one thread at a time. 64 * 65 * <p> The allowed concurrency among update operations is guided by 66 * the optional <tt>concurrencyLevel</tt> constructor argument 67 * (default <tt>16</tt>), which is used as a hint for internal sizing. The 68 * table is internally partitioned to try to permit the indicated 69 * number of concurrent updates without contention. Because placement 70 * in hash tables is essentially random, the actual concurrency will 71 * vary. Ideally, you should choose a value to accommodate as many 72 * threads as will ever concurrently modify the table. Using a 73 * significantly higher value than you need can waste space and time, 74 * and a significantly lower value can lead to thread contention. But 75 * overestimates and underestimates within an order of magnitude do 76 * not usually have much noticeable impact. A value of one is 77 * appropriate when it is known that only one thread will modify and 78 * all others will only read. Also, resizing this or any other kind of 79 * hash table is a relatively slow operation, so, when possible, it is 80 * a good idea to provide estimates of expected table sizes in 81 * constructors. 82 * 83 * <p>This class and its views and iterators implement all of the 84 * <em>optional</em> methods of the {@link Map} and {@link Iterator} 85 * interfaces. 86 * 87 * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class 88 * does <em>not</em> allow <tt>null</tt> to be used as a key or value. 89 * 90 * <p>This class is a member of the 91 * <a href="{@docRoot}/../technotes/guides/collections/index.html"> 92 * Java Collections Framework</a>. 93 * 94 * @since 1.5 95 * @author Doug Lea 96 * @param <K> the type of keys maintained by this map 97 * @param <V> the type of mapped values 98 */ 99 public class ConcurrentHashMap<K, V> extends AbstractMap<K, V> 100 implements ConcurrentMap<K, V>, Serializable { 101 private static final long serialVersionUID = 7249069246763182397L; 102 103 /* 104 * The basic strategy is to subdivide the table among Segments, 105 * each of which itself is a concurrently readable hash table. To 106 * reduce footprint, all but one segments are constructed only 107 * when first needed (see ensureSegment). To maintain visibility 108 * in the presence of lazy construction, accesses to segments as 109 * well as elements of segment's table must use volatile access, 110 * which is done via Unsafe within methods segmentAt etc 111 * below. These provide the functionality of AtomicReferenceArrays 112 * but reduce the levels of indirection. Additionally, 113 * volatile-writes of table elements and entry "next" fields 114 * within locked operations use the cheaper "lazySet" forms of 115 * writes (via putOrderedObject) because these writes are always 116 * followed by lock releases that maintain sequential consistency 117 * of table updates. 118 * 119 * Historical note: The previous version of this class relied 120 * heavily on "final" fields, which avoided some volatile reads at 121 * the expense of a large initial footprint. Some remnants of 122 * that design (including forced construction of segment 0) exist 123 * to ensure serialization compatibility. 124 */ 125 126 /* ---------------- Constants -------------- */ 127 128 /** 129 * The default initial capacity for this table, 130 * used when not otherwise specified in a constructor. 131 */ 132 static final int DEFAULT_INITIAL_CAPACITY = 16; 133 134 /** 135 * The default load factor for this table, used when not 136 * otherwise specified in a constructor. 137 */ 138 static final float DEFAULT_LOAD_FACTOR = 0.75f; 139 140 /** 141 * The default concurrency level for this table, used when not 142 * otherwise specified in a constructor. 143 */ 144 static final int DEFAULT_CONCURRENCY_LEVEL = 16; 145 146 /** 147 * The maximum capacity, used if a higher value is implicitly 148 * specified by either of the constructors with arguments. MUST 149 * be a power of two <= 1<<30 to ensure that entries are indexable 150 * using ints. 151 */ 152 static final int MAXIMUM_CAPACITY = 1 << 30; 153 154 /** 155 * The minimum capacity for per-segment tables. Must be a power 156 * of two, at least two to avoid immediate resizing on next use 157 * after lazy construction. 158 */ 159 static final int MIN_SEGMENT_TABLE_CAPACITY = 2; 160 161 /** 162 * The maximum number of segments to allow; used to bound 163 * constructor arguments. Must be power of two less than 1 << 24. 164 */ 165 static final int MAX_SEGMENTS = 1 << 16; // slightly conservative 166 167 /** 168 * Number of unsynchronized retries in size and containsValue 169 * methods before resorting to locking. This is used to avoid 170 * unbounded retries if tables undergo continuous modification 171 * which would make it impossible to obtain an accurate result. 172 */ 173 static final int RETRIES_BEFORE_LOCK = 2; 174 175 /* ---------------- Fields -------------- */ 176 177 /** 178 * A randomizing value associated with this instance that is applied to 179 * hash code of keys to make hash collisions harder to find. 180 */ 181 private transient final int hashSeed = sun.misc.Hashing.randomHashSeed(this); 182 183 /** 184 * Mask value for indexing into segments. The upper bits of a 185 * key's hash code are used to choose the segment. 186 */ 187 final int segmentMask; 188 189 /** 190 * Shift value for indexing within segments. 191 */ 192 final int segmentShift; 193 194 /** 195 * The segments, each of which is a specialized hash table. 196 */ 197 final Segment<K,V>[] segments; 198 199 transient Set<K> keySet; 200 transient Set<Map.Entry<K,V>> entrySet; 201 transient Collection<V> values; 202 203 /** 204 * ConcurrentHashMap list entry. Note that this is never exported 205 * out as a user-visible Map.Entry. 206 */ 207 static final class HashEntry<K,V> { 208 final int hash; 209 final K key; 210 volatile V value; 211 volatile HashEntry<K,V> next; 212 213 HashEntry(int hash, K key, V value, HashEntry<K,V> next) { 214 this.hash = hash; 215 this.key = key; 216 this.value = value; 217 this.next = next; 218 } 219 220 /** 221 * Sets next field with volatile write semantics. (See above 222 * about use of putOrderedObject.) 223 */ 224 final void setNext(HashEntry<K,V> n) { 225 UNSAFE.putOrderedObject(this, nextOffset, n); 226 } 227 228 // Unsafe mechanics 229 static final sun.misc.Unsafe UNSAFE; 230 static final long nextOffset; 231 static { 232 try { 233 UNSAFE = sun.misc.Unsafe.getUnsafe(); 234 Class<?> k = HashEntry.class; 235 nextOffset = UNSAFE.objectFieldOffset 236 (k.getDeclaredField("next")); 237 } catch (Exception e) { 238 throw new Error(e); 239 } 240 } 241 } 242 243 /** 244 * Gets the ith element of given table (if nonnull) with volatile 245 * read semantics. Note: This is manually integrated into a few 246 * performance-sensitive methods to reduce call overhead. 247 */ 248 @SuppressWarnings("unchecked") 249 static final <K,V> HashEntry<K,V> entryAt(HashEntry<K,V>[] tab, int i) { 250 return (tab == null) ? null : 251 (HashEntry<K,V>) UNSAFE.getObjectVolatile 252 (tab, ((long)i << TSHIFT) + TBASE); 253 } 254 255 /** 256 * Sets the ith element of given table, with volatile write 257 * semantics. (See above about use of putOrderedObject.) 258 */ 259 static final <K,V> void setEntryAt(HashEntry<K,V>[] tab, int i, 260 HashEntry<K,V> e) { 261 UNSAFE.putOrderedObject(tab, ((long)i << TSHIFT) + TBASE, e); 262 } 263 264 /** 265 * Applies a supplemental hash function to a given hashCode, which 266 * defends against poor quality hash functions. This is critical 267 * because ConcurrentHashMap uses power-of-two length hash tables, 268 * that otherwise encounter collisions for hashCodes that do not 269 * differ in lower or upper bits. 270 */ 271 private int hash(Object k) { 272 int h = hashSeed; 273 274 if (k instanceof String) { 275 return h ^ ((String) k).hash32(); 276 } 277 278 h ^= k.hashCode(); 279 280 // Spread bits to regularize both segment and index locations, 281 // using variant of single-word Wang/Jenkins hash. 282 h += (h << 15) ^ 0xffffcd7d; 283 h ^= (h >>> 10); 284 h += (h << 3); 285 h ^= (h >>> 6); 286 h += (h << 2) + (h << 14); 287 return h ^ (h >>> 16); 288 } 289 290 /** 291 * Segments are specialized versions of hash tables. This 292 * subclasses from ReentrantLock opportunistically, just to 293 * simplify some locking and avoid separate construction. 294 */ 295 static final class Segment<K,V> extends ReentrantLock implements Serializable { 296 /* 297 * Segments maintain a table of entry lists that are always 298 * kept in a consistent state, so can be read (via volatile 299 * reads of segments and tables) without locking. This 300 * requires replicating nodes when necessary during table 301 * resizing, so the old lists can be traversed by readers 302 * still using old version of table. 303 * 304 * This class defines only mutative methods requiring locking. 305 * Except as noted, the methods of this class perform the 306 * per-segment versions of ConcurrentHashMap methods. (Other 307 * methods are integrated directly into ConcurrentHashMap 308 * methods.) These mutative methods use a form of controlled 309 * spinning on contention via methods scanAndLock and 310 * scanAndLockForPut. These intersperse tryLocks with 311 * traversals to locate nodes. The main benefit is to absorb 312 * cache misses (which are very common for hash tables) while 313 * obtaining locks so that traversal is faster once 314 * acquired. We do not actually use the found nodes since they 315 * must be re-acquired under lock anyway to ensure sequential 316 * consistency of updates (and in any case may be undetectably 317 * stale), but they will normally be much faster to re-locate. 318 * Also, scanAndLockForPut speculatively creates a fresh node 319 * to use in put if no node is found. 320 */ 321 322 private static final long serialVersionUID = 2249069246763182397L; 323 324 /** 325 * The maximum number of times to tryLock in a prescan before 326 * possibly blocking on acquire in preparation for a locked 327 * segment operation. On multiprocessors, using a bounded 328 * number of retries maintains cache acquired while locating 329 * nodes. 330 */ 331 static final int MAX_SCAN_RETRIES = 332 Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1; 333 334 /** 335 * The per-segment table. Elements are accessed via 336 * entryAt/setEntryAt providing volatile semantics. 337 */ 338 transient volatile HashEntry<K,V>[] table; 339 340 /** 341 * The number of elements. Accessed only either within locks 342 * or among other volatile reads that maintain visibility. 343 */ 344 transient int count; 345 346 /** 347 * The total number of mutative operations in this segment. 348 * Even though this may overflows 32 bits, it provides 349 * sufficient accuracy for stability checks in CHM isEmpty() 350 * and size() methods. Accessed only either within locks or 351 * among other volatile reads that maintain visibility. 352 */ 353 transient int modCount; 354 355 /** 356 * The table is rehashed when its size exceeds this threshold. 357 * (The value of this field is always <tt>(int)(capacity * 358 * loadFactor)</tt>.) 359 */ 360 transient int threshold; 361 362 /** 363 * The load factor for the hash table. Even though this value 364 * is same for all segments, it is replicated to avoid needing 365 * links to outer object. 366 * @serial 367 */ 368 final float loadFactor; 369 370 Segment(float lf, int threshold, HashEntry<K,V>[] tab) { 371 this.loadFactor = lf; 372 this.threshold = threshold; 373 this.table = tab; 374 } 375 376 final V put(K key, int hash, V value, boolean onlyIfAbsent) { 377 HashEntry<K,V> node = tryLock() ? null : 378 scanAndLockForPut(key, hash, value); 379 V oldValue; 380 try { 381 HashEntry<K,V>[] tab = table; 382 int index = (tab.length - 1) & hash; 383 HashEntry<K,V> first = entryAt(tab, index); 384 for (HashEntry<K,V> e = first;;) { 385 if (e != null) { 386 K k; 387 if ((k = e.key) == key || 388 (e.hash == hash && key.equals(k))) { 389 oldValue = e.value; 390 if (!onlyIfAbsent) { 391 e.value = value; 392 ++modCount; 393 } 394 break; 395 } 396 e = e.next; 397 } 398 else { 399 if (node != null) 400 node.setNext(first); 401 else 402 node = new HashEntry<K,V>(hash, key, value, first); 403 int c = count + 1; 404 if (c > threshold && tab.length < MAXIMUM_CAPACITY) 405 rehash(node); 406 else 407 setEntryAt(tab, index, node); 408 ++modCount; 409 count = c; 410 oldValue = null; 411 break; 412 } 413 } 414 } finally { 415 unlock(); 416 } 417 return oldValue; 418 } 419 420 /** 421 * Doubles size of table and repacks entries, also adding the 422 * given node to new table 423 */ 424 @SuppressWarnings("unchecked") 425 private void rehash(HashEntry<K,V> node) { 426 /* 427 * Reclassify nodes in each list to new table. Because we 428 * are using power-of-two expansion, the elements from 429 * each bin must either stay at same index, or move with a 430 * power of two offset. We eliminate unnecessary node 431 * creation by catching cases where old nodes can be 432 * reused because their next fields won't change. 433 * Statistically, at the default threshold, only about 434 * one-sixth of them need cloning when a table 435 * doubles. The nodes they replace will be garbage 436 * collectable as soon as they are no longer referenced by 437 * any reader thread that may be in the midst of 438 * concurrently traversing table. Entry accesses use plain 439 * array indexing because they are followed by volatile 440 * table write. 441 */ 442 HashEntry<K,V>[] oldTable = table; 443 int oldCapacity = oldTable.length; 444 int newCapacity = oldCapacity << 1; 445 threshold = (int)(newCapacity * loadFactor); 446 HashEntry<K,V>[] newTable = 447 (HashEntry<K,V>[]) new HashEntry<?,?>[newCapacity]; 448 int sizeMask = newCapacity - 1; 449 for (int i = 0; i < oldCapacity ; i++) { 450 HashEntry<K,V> e = oldTable[i]; 451 if (e != null) { 452 HashEntry<K,V> next = e.next; 453 int idx = e.hash & sizeMask; 454 if (next == null) // Single node on list 455 newTable[idx] = e; 456 else { // Reuse consecutive sequence at same slot 457 HashEntry<K,V> lastRun = e; 458 int lastIdx = idx; 459 for (HashEntry<K,V> last = next; 460 last != null; 461 last = last.next) { 462 int k = last.hash & sizeMask; 463 if (k != lastIdx) { 464 lastIdx = k; 465 lastRun = last; 466 } 467 } 468 newTable[lastIdx] = lastRun; 469 // Clone remaining nodes 470 for (HashEntry<K,V> p = e; p != lastRun; p = p.next) { 471 V v = p.value; 472 int h = p.hash; 473 int k = h & sizeMask; 474 HashEntry<K,V> n = newTable[k]; 475 newTable[k] = new HashEntry<K,V>(h, p.key, v, n); 476 } 477 } 478 } 479 } 480 int nodeIndex = node.hash & sizeMask; // add the new node 481 node.setNext(newTable[nodeIndex]); 482 newTable[nodeIndex] = node; 483 table = newTable; 484 } 485 486 /** 487 * Scans for a node containing given key while trying to 488 * acquire lock, creating and returning one if not found. Upon 489 * return, guarantees that lock is held. UNlike in most 490 * methods, calls to method equals are not screened: Since 491 * traversal speed doesn't matter, we might as well help warm 492 * up the associated code and accesses as well. 493 * 494 * @return a new node if key not found, else null 495 */ 496 private HashEntry<K,V> scanAndLockForPut(K key, int hash, V value) { 497 HashEntry<K,V> first = entryForHash(this, hash); 498 HashEntry<K,V> e = first; 499 HashEntry<K,V> node = null; 500 int retries = -1; // negative while locating node 501 while (!tryLock()) { 502 HashEntry<K,V> f; // to recheck first below 503 if (retries < 0) { 504 if (e == null) { 505 if (node == null) // speculatively create node 506 node = new HashEntry<K,V>(hash, key, value, null); 507 retries = 0; 508 } 509 else if (key.equals(e.key)) 510 retries = 0; 511 else 512 e = e.next; 513 } 514 else if (++retries > MAX_SCAN_RETRIES) { 515 lock(); 516 break; 517 } 518 else if ((retries & 1) == 0 && 519 (f = entryForHash(this, hash)) != first) { 520 e = first = f; // re-traverse if entry changed 521 retries = -1; 522 } 523 } 524 return node; 525 } 526 527 /** 528 * Scans for a node containing the given key while trying to 529 * acquire lock for a remove or replace operation. Upon 530 * return, guarantees that lock is held. Note that we must 531 * lock even if the key is not found, to ensure sequential 532 * consistency of updates. 533 */ 534 private void scanAndLock(Object key, int hash) { 535 // similar to but simpler than scanAndLockForPut 536 HashEntry<K,V> first = entryForHash(this, hash); 537 HashEntry<K,V> e = first; 538 int retries = -1; 539 while (!tryLock()) { 540 HashEntry<K,V> f; 541 if (retries < 0) { 542 if (e == null || key.equals(e.key)) 543 retries = 0; 544 else 545 e = e.next; 546 } 547 else if (++retries > MAX_SCAN_RETRIES) { 548 lock(); 549 break; 550 } 551 else if ((retries & 1) == 0 && 552 (f = entryForHash(this, hash)) != first) { 553 e = first = f; 554 retries = -1; 555 } 556 } 557 } 558 559 /** 560 * Remove; match on key only if value null, else match both. 561 */ 562 final V remove(Object key, int hash, Object value) { 563 if (!tryLock()) 564 scanAndLock(key, hash); 565 V oldValue = null; 566 try { 567 HashEntry<K,V>[] tab = table; 568 int index = (tab.length - 1) & hash; 569 HashEntry<K,V> e = entryAt(tab, index); 570 HashEntry<K,V> pred = null; 571 while (e != null) { 572 K k; 573 HashEntry<K,V> next = e.next; 574 if ((k = e.key) == key || 575 (e.hash == hash && key.equals(k))) { 576 V v = e.value; 577 if (value == null || value == v || value.equals(v)) { 578 if (pred == null) 579 setEntryAt(tab, index, next); 580 else 581 pred.setNext(next); 582 ++modCount; 583 --count; 584 oldValue = v; 585 } 586 break; 587 } 588 pred = e; 589 e = next; 590 } 591 } finally { 592 unlock(); 593 } 594 return oldValue; 595 } 596 597 final boolean replace(K key, int hash, V oldValue, V newValue) { 598 if (!tryLock()) 599 scanAndLock(key, hash); 600 boolean replaced = false; 601 try { 602 HashEntry<K,V> e; 603 for (e = entryForHash(this, hash); e != null; e = e.next) { 604 K k; 605 if ((k = e.key) == key || 606 (e.hash == hash && key.equals(k))) { 607 if (oldValue.equals(e.value)) { 608 e.value = newValue; 609 ++modCount; 610 replaced = true; 611 } 612 break; 613 } 614 } 615 } finally { 616 unlock(); 617 } 618 return replaced; 619 } 620 621 final V replace(K key, int hash, V value) { 622 if (!tryLock()) 623 scanAndLock(key, hash); 624 V oldValue = null; 625 try { 626 HashEntry<K,V> e; 627 for (e = entryForHash(this, hash); e != null; e = e.next) { 628 K k; 629 if ((k = e.key) == key || 630 (e.hash == hash && key.equals(k))) { 631 oldValue = e.value; 632 e.value = value; 633 ++modCount; 634 break; 635 } 636 } 637 } finally { 638 unlock(); 639 } 640 return oldValue; 641 } 642 643 final void clear() { 644 lock(); 645 try { 646 HashEntry<K,V>[] tab = table; 647 for (int i = 0; i < tab.length ; i++) 648 setEntryAt(tab, i, null); 649 ++modCount; 650 count = 0; 651 } finally { 652 unlock(); 653 } 654 } 655 } 656 657 // Accessing segments 658 659 /** 660 * Gets the jth element of given segment array (if nonnull) with 661 * volatile element access semantics via Unsafe. (The null check 662 * can trigger harmlessly only during deserialization.) Note: 663 * because each element of segments array is set only once (using 664 * fully ordered writes), some performance-sensitive methods rely 665 * on this method only as a recheck upon null reads. 666 */ 667 @SuppressWarnings("unchecked") 668 static final <K,V> Segment<K,V> segmentAt(Segment<K,V>[] ss, int j) { 669 long u = (j << SSHIFT) + SBASE; 670 return ss == null ? null : 671 (Segment<K,V>) UNSAFE.getObjectVolatile(ss, u); 672 } 673 674 /** 675 * Returns the segment for the given index, creating it and 676 * recording in segment table (via CAS) if not already present. 677 * 678 * @param k the index 679 * @return the segment 680 */ 681 @SuppressWarnings("unchecked") 682 private Segment<K,V> ensureSegment(int k) { 683 final Segment<K,V>[] ss = this.segments; 684 long u = (k << SSHIFT) + SBASE; // raw offset 685 Segment<K,V> seg; 686 if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) { 687 Segment<K,V> proto = ss[0]; // use segment 0 as prototype 688 int cap = proto.table.length; 689 float lf = proto.loadFactor; 690 int threshold = (int)(cap * lf); 691 HashEntry<K,V>[] tab = (HashEntry<K,V>[])new HashEntry<?,?>[cap]; 692 if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) 693 == null) { // recheck 694 Segment<K,V> s = new Segment<K,V>(lf, threshold, tab); 695 while ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) 696 == null) { 697 if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s)) 698 break; 699 } 700 } 701 } 702 return seg; 703 } 704 705 // Hash-based segment and entry accesses 706 707 /** 708 * Gets the segment for the given hash code. 709 */ 710 @SuppressWarnings("unchecked") 711 private Segment<K,V> segmentForHash(int h) { 712 long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; 713 return (Segment<K,V>) UNSAFE.getObjectVolatile(segments, u); 714 } 715 716 /** 717 * Gets the table entry for the given segment and hash code. 718 */ 719 @SuppressWarnings("unchecked") 720 static final <K,V> HashEntry<K,V> entryForHash(Segment<K,V> seg, int h) { 721 HashEntry<K,V>[] tab; 722 return (seg == null || (tab = seg.table) == null) ? null : 723 (HashEntry<K,V>) UNSAFE.getObjectVolatile 724 (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); 725 } 726 727 /* ---------------- Public operations -------------- */ 728 729 /** 730 * Creates a new, empty map with the specified initial 731 * capacity, load factor and concurrency level. 732 * 733 * @param initialCapacity the initial capacity. The implementation 734 * performs internal sizing to accommodate this many elements. 735 * @param loadFactor the load factor threshold, used to control resizing. 736 * Resizing may be performed when the average number of elements per 737 * bin exceeds this threshold. 738 * @param concurrencyLevel the estimated number of concurrently 739 * updating threads. The implementation performs internal sizing 740 * to try to accommodate this many threads. 741 * @throws IllegalArgumentException if the initial capacity is 742 * negative or the load factor or concurrencyLevel are 743 * nonpositive. 744 */ 745 @SuppressWarnings("unchecked") 746 public ConcurrentHashMap(int initialCapacity, 747 float loadFactor, int concurrencyLevel) { 748 if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0) 749 throw new IllegalArgumentException(); 750 if (concurrencyLevel > MAX_SEGMENTS) 751 concurrencyLevel = MAX_SEGMENTS; 752 // Find power-of-two sizes best matching arguments 753 int sshift = 0; 754 int ssize = 1; 755 while (ssize < concurrencyLevel) { 756 ++sshift; 757 ssize <<= 1; 758 } 759 this.segmentShift = 32 - sshift; 760 this.segmentMask = ssize - 1; 761 if (initialCapacity > MAXIMUM_CAPACITY) 762 initialCapacity = MAXIMUM_CAPACITY; 763 int c = initialCapacity / ssize; 764 if (c * ssize < initialCapacity) 765 ++c; 766 int cap = MIN_SEGMENT_TABLE_CAPACITY; 767 while (cap < c) 768 cap <<= 1; 769 // create segments and segments[0] 770 Segment<K,V> s0 = 771 new Segment<K,V>(loadFactor, (int)(cap * loadFactor), 772 (HashEntry<K,V>[])new HashEntry<?,?>[cap]); 773 Segment<K,V>[] ss = (Segment<K,V>[])new Segment<?,?>[ssize]; 774 UNSAFE.putOrderedObject(ss, SBASE, s0); // ordered write of segments[0] 775 this.segments = ss; 776 } 777 778 /** 779 * Creates a new, empty map with the specified initial capacity 780 * and load factor and with the default concurrencyLevel (16). 781 * 782 * @param initialCapacity The implementation performs internal 783 * sizing to accommodate this many elements. 784 * @param loadFactor the load factor threshold, used to control resizing. 785 * Resizing may be performed when the average number of elements per 786 * bin exceeds this threshold. 787 * @throws IllegalArgumentException if the initial capacity of 788 * elements is negative or the load factor is nonpositive 789 * 790 * @since 1.6 791 */ 792 public ConcurrentHashMap(int initialCapacity, float loadFactor) { 793 this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL); 794 } 795 796 /** 797 * Creates a new, empty map with the specified initial capacity, 798 * and with default load factor (0.75) and concurrencyLevel (16). 799 * 800 * @param initialCapacity the initial capacity. The implementation 801 * performs internal sizing to accommodate this many elements. 802 * @throws IllegalArgumentException if the initial capacity of 803 * elements is negative. 804 */ 805 public ConcurrentHashMap(int initialCapacity) { 806 this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); 807 } 808 809 /** 810 * Creates a new, empty map with a default initial capacity (16), 811 * load factor (0.75) and concurrencyLevel (16). 812 */ 813 public ConcurrentHashMap() { 814 this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); 815 } 816 817 /** 818 * Creates a new map with the same mappings as the given map. 819 * The map is created with a capacity of 1.5 times the number 820 * of mappings in the given map or 16 (whichever is greater), 821 * and a default load factor (0.75) and concurrencyLevel (16). 822 * 823 * @param m the map 824 */ 825 public ConcurrentHashMap(Map<? extends K, ? extends V> m) { 826 this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, 827 DEFAULT_INITIAL_CAPACITY), 828 DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); 829 putAll(m); 830 } 831 832 /** 833 * Returns <tt>true</tt> if this map contains no key-value mappings. 834 * 835 * @return <tt>true</tt> if this map contains no key-value mappings 836 */ 837 public boolean isEmpty() { 838 /* 839 * Sum per-segment modCounts to avoid mis-reporting when 840 * elements are concurrently added and removed in one segment 841 * while checking another, in which case the table was never 842 * actually empty at any point. (The sum ensures accuracy up 843 * through at least 1<<31 per-segment modifications before 844 * recheck.) Methods size() and containsValue() use similar 845 * constructions for stability checks. 846 */ 847 long sum = 0L; 848 final Segment<K,V>[] segments = this.segments; 849 for (int j = 0; j < segments.length; ++j) { 850 Segment<K,V> seg = segmentAt(segments, j); 851 if (seg != null) { 852 if (seg.count != 0) 853 return false; 854 sum += seg.modCount; 855 } 856 } 857 if (sum != 0L) { // recheck unless no modifications 858 for (int j = 0; j < segments.length; ++j) { 859 Segment<K,V> seg = segmentAt(segments, j); 860 if (seg != null) { 861 if (seg.count != 0) 862 return false; 863 sum -= seg.modCount; 864 } 865 } 866 if (sum != 0L) 867 return false; 868 } 869 return true; 870 } 871 872 /** 873 * Returns the number of key-value mappings in this map. If the 874 * map contains more than <tt>Integer.MAX_VALUE</tt> elements, returns 875 * <tt>Integer.MAX_VALUE</tt>. 876 * 877 * @return the number of key-value mappings in this map 878 */ 879 public int size() { 880 // Try a few times to get accurate count. On failure due to 881 // continuous async changes in table, resort to locking. 882 final Segment<K,V>[] segments = this.segments; 883 int size; 884 boolean overflow; // true if size overflows 32 bits 885 long sum; // sum of modCounts 886 long last = 0L; // previous sum 887 int retries = -1; // first iteration isn't retry 888 try { 889 for (;;) { 890 if (retries++ == RETRIES_BEFORE_LOCK) { 891 for (int j = 0; j < segments.length; ++j) 892 ensureSegment(j).lock(); // force creation 893 } 894 sum = 0L; 895 size = 0; 896 overflow = false; 897 for (int j = 0; j < segments.length; ++j) { 898 Segment<K,V> seg = segmentAt(segments, j); 899 if (seg != null) { 900 sum += seg.modCount; 901 int c = seg.count; 902 if (c < 0 || (size += c) < 0) 903 overflow = true; 904 } 905 } 906 if (sum == last) 907 break; 908 last = sum; 909 } 910 } finally { 911 if (retries > RETRIES_BEFORE_LOCK) { 912 for (int j = 0; j < segments.length; ++j) 913 segmentAt(segments, j).unlock(); 914 } 915 } 916 return overflow ? Integer.MAX_VALUE : size; 917 } 918 919 /** 920 * Returns the value to which the specified key is mapped, 921 * or {@code null} if this map contains no mapping for the key. 922 * 923 * <p>More formally, if this map contains a mapping from a key 924 * {@code k} to a value {@code v} such that {@code key.equals(k)}, 925 * then this method returns {@code v}; otherwise it returns 926 * {@code null}. (There can be at most one such mapping.) 927 * 928 * @throws NullPointerException if the specified key is null 929 */ 930 @SuppressWarnings("unchecked") 931 public V get(Object key) { 932 Segment<K,V> s; // manually integrate access methods to reduce overhead 933 HashEntry<K,V>[] tab; 934 int h = hash(key); 935 long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; 936 if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null && 937 (tab = s.table) != null) { 938 for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile 939 (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); 940 e != null; e = e.next) { 941 K k; 942 if ((k = e.key) == key || (e.hash == h && key.equals(k))) 943 return e.value; 944 } 945 } 946 return null; 947 } 948 949 /** 950 * Tests if the specified object is a key in this table. 951 * 952 * @param key possible key 953 * @return <tt>true</tt> if and only if the specified object 954 * is a key in this table, as determined by the 955 * <tt>equals</tt> method; <tt>false</tt> otherwise. 956 * @throws NullPointerException if the specified key is null 957 */ 958 @SuppressWarnings("unchecked") 959 public boolean containsKey(Object key) { 960 Segment<K,V> s; // same as get() except no need for volatile value read 961 HashEntry<K,V>[] tab; 962 int h = hash(key); 963 long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; 964 if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null && 965 (tab = s.table) != null) { 966 for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile 967 (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); 968 e != null; e = e.next) { 969 K k; 970 if ((k = e.key) == key || (e.hash == h && key.equals(k))) 971 return true; 972 } 973 } 974 return false; 975 } 976 977 /** 978 * Returns <tt>true</tt> if this map maps one or more keys to the 979 * specified value. Note: This method requires a full internal 980 * traversal of the hash table, and so is much slower than 981 * method <tt>containsKey</tt>. 982 * 983 * @param value value whose presence in this map is to be tested 984 * @return <tt>true</tt> if this map maps one or more keys to the 985 * specified value 986 * @throws NullPointerException if the specified value is null 987 */ 988 public boolean containsValue(Object value) { 989 // Same idea as size() 990 if (value == null) 991 throw new NullPointerException(); 992 final Segment<K,V>[] segments = this.segments; 993 boolean found = false; 994 long last = 0; 995 int retries = -1; 996 try { 997 outer: for (;;) { 998 if (retries++ == RETRIES_BEFORE_LOCK) { 999 for (int j = 0; j < segments.length; ++j) 1000 ensureSegment(j).lock(); // force creation 1001 } 1002 long hashSum = 0L; 1003 int sum = 0; 1004 for (int j = 0; j < segments.length; ++j) { 1005 HashEntry<K,V>[] tab; 1006 Segment<K,V> seg = segmentAt(segments, j); 1007 if (seg != null && (tab = seg.table) != null) { 1008 for (int i = 0 ; i < tab.length; i++) { 1009 HashEntry<K,V> e; 1010 for (e = entryAt(tab, i); e != null; e = e.next) { 1011 V v = e.value; 1012 if (v != null && value.equals(v)) { 1013 found = true; 1014 break outer; 1015 } 1016 } 1017 } 1018 sum += seg.modCount; 1019 } 1020 } 1021 if (retries > 0 && sum == last) 1022 break; 1023 last = sum; 1024 } 1025 } finally { 1026 if (retries > RETRIES_BEFORE_LOCK) { 1027 for (int j = 0; j < segments.length; ++j) 1028 segmentAt(segments, j).unlock(); 1029 } 1030 } 1031 return found; 1032 } 1033 1034 /** 1035 * Legacy method testing if some key maps into the specified value 1036 * in this table. This method is identical in functionality to 1037 * {@link #containsValue}, and exists solely to ensure 1038 * full compatibility with class {@link java.util.Hashtable}, 1039 * which supported this method prior to introduction of the 1040 * Java Collections framework. 1041 * 1042 * @param value a value to search for 1043 * @return <tt>true</tt> if and only if some key maps to the 1044 * <tt>value</tt> argument in this table as 1045 * determined by the <tt>equals</tt> method; 1046 * <tt>false</tt> otherwise 1047 * @throws NullPointerException if the specified value is null 1048 */ 1049 public boolean contains(Object value) { 1050 return containsValue(value); 1051 } 1052 1053 /** 1054 * Maps the specified key to the specified value in this table. 1055 * Neither the key nor the value can be null. 1056 * 1057 * <p> The value can be retrieved by calling the <tt>get</tt> method 1058 * with a key that is equal to the original key. 1059 * 1060 * @param key key with which the specified value is to be associated 1061 * @param value value to be associated with the specified key 1062 * @return the previous value associated with <tt>key</tt>, or 1063 * <tt>null</tt> if there was no mapping for <tt>key</tt> 1064 * @throws NullPointerException if the specified key or value is null 1065 */ 1066 @SuppressWarnings("unchecked") 1067 public V put(K key, V value) { 1068 Segment<K,V> s; 1069 if (value == null) 1070 throw new NullPointerException(); 1071 int hash = hash(key); 1072 int j = (hash >>> segmentShift) & segmentMask; 1073 if ((s = (Segment<K,V>)UNSAFE.getObject // nonvolatile; recheck 1074 (segments, (j << SSHIFT) + SBASE)) == null) // in ensureSegment 1075 s = ensureSegment(j); 1076 return s.put(key, hash, value, false); 1077 } 1078 1079 /** 1080 * {@inheritDoc} 1081 * 1082 * @return the previous value associated with the specified key, 1083 * or <tt>null</tt> if there was no mapping for the key 1084 * @throws NullPointerException if the specified key or value is null 1085 */ 1086 @SuppressWarnings("unchecked") 1087 public V putIfAbsent(K key, V value) { 1088 Segment<K,V> s; 1089 if (value == null) 1090 throw new NullPointerException(); 1091 int hash = hash(key); 1092 int j = (hash >>> segmentShift) & segmentMask; 1093 if ((s = (Segment<K,V>)UNSAFE.getObject 1094 (segments, (j << SSHIFT) + SBASE)) == null) 1095 s = ensureSegment(j); 1096 return s.put(key, hash, value, true); 1097 } 1098 1099 /** 1100 * Copies all of the mappings from the specified map to this one. 1101 * These mappings replace any mappings that this map had for any of the 1102 * keys currently in the specified map. 1103 * 1104 * @param m mappings to be stored in this map 1105 */ 1106 public void putAll(Map<? extends K, ? extends V> m) { 1107 for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) 1108 put(e.getKey(), e.getValue()); 1109 } 1110 1111 /** 1112 * Removes the key (and its corresponding value) from this map. 1113 * This method does nothing if the key is not in the map. 1114 * 1115 * @param key the key that needs to be removed 1116 * @return the previous value associated with <tt>key</tt>, or 1117 * <tt>null</tt> if there was no mapping for <tt>key</tt> 1118 * @throws NullPointerException if the specified key is null 1119 */ 1120 public V remove(Object key) { 1121 int hash = hash(key); 1122 Segment<K,V> s = segmentForHash(hash); 1123 return s == null ? null : s.remove(key, hash, null); 1124 } 1125 1126 /** 1127 * {@inheritDoc} 1128 * 1129 * @throws NullPointerException if the specified key is null 1130 */ 1131 public boolean remove(Object key, Object value) { 1132 int hash = hash(key); 1133 Segment<K,V> s; 1134 return value != null && (s = segmentForHash(hash)) != null && 1135 s.remove(key, hash, value) != null; 1136 } 1137 1138 /** 1139 * {@inheritDoc} 1140 * 1141 * @throws NullPointerException if any of the arguments are null 1142 */ 1143 public boolean replace(K key, V oldValue, V newValue) { 1144 int hash = hash(key); 1145 if (oldValue == null || newValue == null) 1146 throw new NullPointerException(); 1147 Segment<K,V> s = segmentForHash(hash); 1148 return s != null && s.replace(key, hash, oldValue, newValue); 1149 } 1150 1151 /** 1152 * {@inheritDoc} 1153 * 1154 * @return the previous value associated with the specified key, 1155 * or <tt>null</tt> if there was no mapping for the key 1156 * @throws NullPointerException if the specified key or value is null 1157 */ 1158 public V replace(K key, V value) { 1159 int hash = hash(key); 1160 if (value == null) 1161 throw new NullPointerException(); 1162 Segment<K,V> s = segmentForHash(hash); 1163 return s == null ? null : s.replace(key, hash, value); 1164 } 1165 1166 /** 1167 * Removes all of the mappings from this map. 1168 */ 1169 public void clear() { 1170 final Segment<K,V>[] segments = this.segments; 1171 for (int j = 0; j < segments.length; ++j) { 1172 Segment<K,V> s = segmentAt(segments, j); 1173 if (s != null) 1174 s.clear(); 1175 } 1176 } 1177 1178 /** 1179 * Returns a {@link Set} view of the keys contained in this map. 1180 * The set is backed by the map, so changes to the map are 1181 * reflected in the set, and vice-versa. The set supports element 1182 * removal, which removes the corresponding mapping from this map, 1183 * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>, 1184 * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> 1185 * operations. It does not support the <tt>add</tt> or 1186 * <tt>addAll</tt> operations. 1187 * 1188 * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator 1189 * that will never throw {@link ConcurrentModificationException}, 1190 * and guarantees to traverse elements as they existed upon 1191 * construction of the iterator, and may (but is not guaranteed to) 1192 * reflect any modifications subsequent to construction. 1193 */ 1194 public Set<K> keySet() { 1195 Set<K> ks = keySet; 1196 return (ks != null) ? ks : (keySet = new KeySet()); 1197 } 1198 1199 /** 1200 * Returns a {@link Collection} view of the values contained in this map. 1201 * The collection is backed by the map, so changes to the map are 1202 * reflected in the collection, and vice-versa. The collection 1203 * supports element removal, which removes the corresponding 1204 * mapping from this map, via the <tt>Iterator.remove</tt>, 1205 * <tt>Collection.remove</tt>, <tt>removeAll</tt>, 1206 * <tt>retainAll</tt>, and <tt>clear</tt> operations. It does not 1207 * support the <tt>add</tt> or <tt>addAll</tt> operations. 1208 * 1209 * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator 1210 * that will never throw {@link ConcurrentModificationException}, 1211 * and guarantees to traverse elements as they existed upon 1212 * construction of the iterator, and may (but is not guaranteed to) 1213 * reflect any modifications subsequent to construction. 1214 */ 1215 public Collection<V> values() { 1216 Collection<V> vs = values; 1217 return (vs != null) ? vs : (values = new Values()); 1218 } 1219 1220 /** 1221 * Returns a {@link Set} view of the mappings contained in this map. 1222 * The set is backed by the map, so changes to the map are 1223 * reflected in the set, and vice-versa. The set supports element 1224 * removal, which removes the corresponding mapping from the map, 1225 * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>, 1226 * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> 1227 * operations. It does not support the <tt>add</tt> or 1228 * <tt>addAll</tt> operations. 1229 * 1230 * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator 1231 * that will never throw {@link ConcurrentModificationException}, 1232 * and guarantees to traverse elements as they existed upon 1233 * construction of the iterator, and may (but is not guaranteed to) 1234 * reflect any modifications subsequent to construction. 1235 */ 1236 public Set<Map.Entry<K,V>> entrySet() { 1237 Set<Map.Entry<K,V>> es = entrySet; 1238 return (es != null) ? es : (entrySet = new EntrySet()); 1239 } 1240 1241 /** 1242 * Returns an enumeration of the keys in this table. 1243 * 1244 * @return an enumeration of the keys in this table 1245 * @see #keySet() 1246 */ 1247 public Enumeration<K> keys() { 1248 return new KeyIterator(); 1249 } 1250 1251 /** 1252 * Returns an enumeration of the values in this table. 1253 * 1254 * @return an enumeration of the values in this table 1255 * @see #values() 1256 */ 1257 public Enumeration<V> elements() { 1258 return new ValueIterator(); 1259 } 1260 1261 /* ---------------- Iterator Support -------------- */ 1262 1263 abstract class HashIterator { 1264 int nextSegmentIndex; 1265 int nextTableIndex; 1266 HashEntry<K,V>[] currentTable; 1267 HashEntry<K, V> nextEntry; 1268 HashEntry<K, V> lastReturned; 1269 1270 HashIterator() { 1271 nextSegmentIndex = segments.length - 1; 1272 nextTableIndex = -1; 1273 advance(); 1274 } 1275 1276 /** 1277 * Sets nextEntry to first node of next non-empty table 1278 * (in backwards order, to simplify checks). 1279 */ 1280 final void advance() { 1281 for (;;) { 1282 if (nextTableIndex >= 0) { 1283 if ((nextEntry = entryAt(currentTable, 1284 nextTableIndex--)) != null) 1285 break; 1286 } 1287 else if (nextSegmentIndex >= 0) { 1288 Segment<K,V> seg = segmentAt(segments, nextSegmentIndex--); 1289 if (seg != null && (currentTable = seg.table) != null) 1290 nextTableIndex = currentTable.length - 1; 1291 } 1292 else 1293 break; 1294 } 1295 } 1296 1297 final HashEntry<K,V> nextEntry() { 1298 HashEntry<K,V> e = nextEntry; 1299 if (e == null) 1300 throw new NoSuchElementException(); 1301 lastReturned = e; // cannot assign until after null check 1302 if ((nextEntry = e.next) == null) 1303 advance(); 1304 return e; 1305 } 1306 1307 public final boolean hasNext() { return nextEntry != null; } 1308 public final boolean hasMoreElements() { return nextEntry != null; } 1309 1310 public final void remove() { 1311 if (lastReturned == null) 1312 throw new IllegalStateException(); 1313 ConcurrentHashMap.this.remove(lastReturned.key); 1314 lastReturned = null; 1315 } 1316 } 1317 1318 final class KeyIterator 1319 extends HashIterator 1320 implements Iterator<K>, Enumeration<K> 1321 { 1322 public final K next() { return super.nextEntry().key; } 1323 public final K nextElement() { return super.nextEntry().key; } 1324 } 1325 1326 final class ValueIterator 1327 extends HashIterator 1328 implements Iterator<V>, Enumeration<V> 1329 { 1330 public final V next() { return super.nextEntry().value; } 1331 public final V nextElement() { return super.nextEntry().value; } 1332 } 1333 1334 /** 1335 * Custom Entry class used by EntryIterator.next(), that relays 1336 * setValue changes to the underlying map. 1337 */ 1338 final class WriteThroughEntry 1339 extends AbstractMap.SimpleEntry<K,V> 1340 { 1341 static final long serialVersionUID = 7249069246763182397L; 1342 1343 WriteThroughEntry(K k, V v) { 1344 super(k,v); 1345 } 1346 1347 /** 1348 * Sets our entry's value and writes through to the map. The 1349 * value to return is somewhat arbitrary here. Since a 1350 * WriteThroughEntry does not necessarily track asynchronous 1351 * changes, the most recent "previous" value could be 1352 * different from what we return (or could even have been 1353 * removed in which case the put will re-establish). We do not 1354 * and cannot guarantee more. 1355 */ 1356 public V setValue(V value) { 1357 if (value == null) throw new NullPointerException(); 1358 V v = super.setValue(value); 1359 ConcurrentHashMap.this.put(getKey(), value); 1360 return v; 1361 } 1362 } 1363 1364 final class EntryIterator 1365 extends HashIterator 1366 implements Iterator<Entry<K,V>> 1367 { 1368 public Map.Entry<K,V> next() { 1369 HashEntry<K,V> e = super.nextEntry(); 1370 return new WriteThroughEntry(e.key, e.value); 1371 } 1372 } 1373 1374 final class KeySet extends AbstractSet<K> { 1375 public Iterator<K> iterator() { 1376 return new KeyIterator(); 1377 } 1378 public int size() { 1379 return ConcurrentHashMap.this.size(); 1380 } 1381 public boolean isEmpty() { 1382 return ConcurrentHashMap.this.isEmpty(); 1383 } 1384 public boolean contains(Object o) { 1385 return ConcurrentHashMap.this.containsKey(o); 1386 } 1387 public boolean remove(Object o) { 1388 return ConcurrentHashMap.this.remove(o) != null; 1389 } 1390 public void clear() { 1391 ConcurrentHashMap.this.clear(); 1392 } 1393 } 1394 1395 final class Values extends AbstractCollection<V> { 1396 public Iterator<V> iterator() { 1397 return new ValueIterator(); 1398 } 1399 public int size() { 1400 return ConcurrentHashMap.this.size(); 1401 } 1402 public boolean isEmpty() { 1403 return ConcurrentHashMap.this.isEmpty(); 1404 } 1405 public boolean contains(Object o) { 1406 return ConcurrentHashMap.this.containsValue(o); 1407 } 1408 public void clear() { 1409 ConcurrentHashMap.this.clear(); 1410 } 1411 } 1412 1413 final class EntrySet extends AbstractSet<Map.Entry<K,V>> { 1414 public Iterator<Map.Entry<K,V>> iterator() { 1415 return new EntryIterator(); 1416 } 1417 public boolean contains(Object o) { 1418 if (!(o instanceof Map.Entry)) 1419 return false; 1420 Map.Entry<?,?> e = (Map.Entry<?,?>)o; 1421 V v = ConcurrentHashMap.this.get(e.getKey()); 1422 return v != null && v.equals(e.getValue()); 1423 } 1424 public boolean remove(Object o) { 1425 if (!(o instanceof Map.Entry)) 1426 return false; 1427 Map.Entry<?,?> e = (Map.Entry<?,?>)o; 1428 return ConcurrentHashMap.this.remove(e.getKey(), e.getValue()); 1429 } 1430 public int size() { 1431 return ConcurrentHashMap.this.size(); 1432 } 1433 public boolean isEmpty() { 1434 return ConcurrentHashMap.this.isEmpty(); 1435 } 1436 public void clear() { 1437 ConcurrentHashMap.this.clear(); 1438 } 1439 } 1440 1441 /* ---------------- Serialization Support -------------- */ 1442 1443 /** 1444 * Saves the state of the <tt>ConcurrentHashMap</tt> instance to a 1445 * stream (i.e., serializes it). 1446 * @param s the stream 1447 * @serialData 1448 * the key (Object) and value (Object) 1449 * for each key-value mapping, followed by a null pair. 1450 * The key-value mappings are emitted in no particular order. 1451 */ 1452 private void writeObject(java.io.ObjectOutputStream s) 1453 throws java.io.IOException { 1454 // force all segments for serialization compatibility 1455 for (int k = 0; k < segments.length; ++k) 1456 ensureSegment(k); 1457 s.defaultWriteObject(); 1458 1459 final Segment<K,V>[] segments = this.segments; 1460 for (int k = 0; k < segments.length; ++k) { 1461 Segment<K,V> seg = segmentAt(segments, k); 1462 seg.lock(); 1463 try { 1464 HashEntry<K,V>[] tab = seg.table; 1465 for (int i = 0; i < tab.length; ++i) { 1466 HashEntry<K,V> e; 1467 for (e = entryAt(tab, i); e != null; e = e.next) { 1468 s.writeObject(e.key); 1469 s.writeObject(e.value); 1470 } 1471 } 1472 } finally { 1473 seg.unlock(); 1474 } 1475 } 1476 s.writeObject(null); 1477 s.writeObject(null); 1478 } 1479 1480 /** 1481 * Reconstitutes the <tt>ConcurrentHashMap</tt> instance from a 1482 * stream (i.e., deserializes it). 1483 * @param s the stream 1484 */ 1485 @SuppressWarnings("unchecked") 1486 private void readObject(java.io.ObjectInputStream s) 1487 throws java.io.IOException, ClassNotFoundException { 1488 s.defaultReadObject(); 1489 1490 // set hashMask 1491 UNSAFE.putIntVolatile(this, HASHSEED_OFFSET, 1492 sun.misc.Hashing.randomHashSeed(this)); 1493 1494 // Re-initialize segments to be minimally sized, and let grow. 1495 int cap = MIN_SEGMENT_TABLE_CAPACITY; 1496 final Segment<K,V>[] segments = this.segments; 1497 for (int k = 0; k < segments.length; ++k) { 1498 Segment<K,V> seg = segments[k]; 1499 if (seg != null) { 1500 seg.threshold = (int)(cap * seg.loadFactor); 1501 seg.table = (HashEntry<K,V>[]) new HashEntry<?,?>[cap]; 1502 } 1503 } 1504 1505 // Read the keys and values, and put the mappings in the table 1506 for (;;) { 1507 K key = (K) s.readObject(); 1508 V value = (V) s.readObject(); 1509 if (key == null) 1510 break; 1511 put(key, value); 1512 } 1513 } 1514 1515 // Unsafe mechanics 1516 private static final sun.misc.Unsafe UNSAFE; 1517 private static final long SBASE; 1518 private static final int SSHIFT; 1519 private static final long TBASE; 1520 private static final int TSHIFT; 1521 private static final long HASHSEED_OFFSET; 1522 1523 static { 1524 int ss, ts; 1525 try { 1526 UNSAFE = sun.misc.Unsafe.getUnsafe(); 1527 Class<?> tc = HashEntry[].class; 1528 Class<?> sc = Segment[].class; 1529 TBASE = UNSAFE.arrayBaseOffset(tc); 1530 SBASE = UNSAFE.arrayBaseOffset(sc); 1531 ts = UNSAFE.arrayIndexScale(tc); 1532 ss = UNSAFE.arrayIndexScale(sc); 1533 HASHSEED_OFFSET = UNSAFE.objectFieldOffset( 1534 ConcurrentHashMap.class.getDeclaredField("hashSeed")); 1535 } catch (Exception e) { 1536 throw new Error(e); 1537 } 1538 if ((ss & (ss-1)) != 0 || (ts & (ts-1)) != 0) 1539 throw new Error("data type scale not a power of two"); 1540 SSHIFT = 31 - Integer.numberOfLeadingZeros(ss); 1541 TSHIFT = 31 - Integer.numberOfLeadingZeros(ts); 1542 } 1543 1544 }