1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package java.util; 27 28 import java.io.*; 29 import java.lang.reflect.ParameterizedType; 30 import java.lang.reflect.Type; 31 import java.util.concurrent.ThreadLocalRandom; 32 import java.util.function.BiConsumer; 33 import java.util.function.Consumer; 34 import java.util.function.BiFunction; 35 import java.util.function.Function; 36 37 /** 38 * Hash table based implementation of the <tt>Map</tt> interface. This 39 * implementation provides all of the optional map operations, and permits 40 * <tt>null</tt> values and the <tt>null</tt> key. (The <tt>HashMap</tt> 41 * class is roughly equivalent to <tt>Hashtable</tt>, except that it is 42 * unsynchronized and permits nulls.) This class makes no guarantees as to 43 * the order of the map; in particular, it does not guarantee that the order 44 * will remain constant over time. 45 * 46 * <p>This implementation provides constant-time performance for the basic 47 * operations (<tt>get</tt> and <tt>put</tt>), assuming the hash function 48 * disperses the elements properly among the buckets. Iteration over 49 * collection views requires time proportional to the "capacity" of the 50 * <tt>HashMap</tt> instance (the number of buckets) plus its size (the number 51 * of key-value mappings). Thus, it's very important not to set the initial 52 * capacity too high (or the load factor too low) if iteration performance is 53 * important. 54 * 55 * <p>An instance of <tt>HashMap</tt> has two parameters that affect its 56 * performance: <i>initial capacity</i> and <i>load factor</i>. The 57 * <i>capacity</i> is the number of buckets in the hash table, and the initial 58 * capacity is simply the capacity at the time the hash table is created. The 59 * <i>load factor</i> is a measure of how full the hash table is allowed to 60 * get before its capacity is automatically increased. When the number of 61 * entries in the hash table exceeds the product of the load factor and the 62 * current capacity, the hash table is <i>rehashed</i> (that is, internal data 63 * structures are rebuilt) so that the hash table has approximately twice the 64 * number of buckets. 65 * 66 * <p>As a general rule, the default load factor (.75) offers a good tradeoff 67 * between time and space costs. Higher values decrease the space overhead 68 * but increase the lookup cost (reflected in most of the operations of the 69 * <tt>HashMap</tt> class, including <tt>get</tt> and <tt>put</tt>). The 70 * expected number of entries in the map and its load factor should be taken 71 * into account when setting its initial capacity, so as to minimize the 72 * number of rehash operations. If the initial capacity is greater 73 * than the maximum number of entries divided by the load factor, no 74 * rehash operations will ever occur. 75 * 76 * <p>If many mappings are to be stored in a <tt>HashMap</tt> instance, 77 * creating it with a sufficiently large capacity will allow the mappings to 78 * be stored more efficiently than letting it perform automatic rehashing as 79 * needed to grow the table. 80 * 81 * <p><strong>Note that this implementation is not synchronized.</strong> 82 * If multiple threads access a hash map concurrently, and at least one of 83 * the threads modifies the map structurally, it <i>must</i> be 84 * synchronized externally. (A structural modification is any operation 85 * that adds or deletes one or more mappings; merely changing the value 86 * associated with a key that an instance already contains is not a 87 * structural modification.) This is typically accomplished by 88 * synchronizing on some object that naturally encapsulates the map. 89 * 90 * If no such object exists, the map should be "wrapped" using the 91 * {@link Collections#synchronizedMap Collections.synchronizedMap} 92 * method. This is best done at creation time, to prevent accidental 93 * unsynchronized access to the map:<pre> 94 * Map m = Collections.synchronizedMap(new HashMap(...));</pre> 95 * 96 * <p>The iterators returned by all of this class's "collection view methods" 97 * are <i>fail-fast</i>: if the map is structurally modified at any time after 98 * the iterator is created, in any way except through the iterator's own 99 * <tt>remove</tt> method, the iterator will throw a 100 * {@link ConcurrentModificationException}. Thus, in the face of concurrent 101 * modification, the iterator fails quickly and cleanly, rather than risking 102 * arbitrary, non-deterministic behavior at an undetermined time in the 103 * future. 104 * 105 * <p>Note that the fail-fast behavior of an iterator cannot be guaranteed 106 * as it is, generally speaking, impossible to make any hard guarantees in the 107 * presence of unsynchronized concurrent modification. Fail-fast iterators 108 * throw <tt>ConcurrentModificationException</tt> on a best-effort basis. 109 * Therefore, it would be wrong to write a program that depended on this 110 * exception for its correctness: <i>the fail-fast behavior of iterators 111 * should be used only to detect bugs.</i> 112 * 113 * <p>This class is a member of the 114 * <a href="{@docRoot}/../technotes/guides/collections/index.html"> 115 * Java Collections Framework</a>. 116 * 117 * @param <K> the type of keys maintained by this map 118 * @param <V> the type of mapped values 119 * 120 * @author Doug Lea 121 * @author Josh Bloch 122 * @author Arthur van Hoff 123 * @author Neal Gafter 124 * @see Object#hashCode() 125 * @see Collection 126 * @see Map 127 * @see TreeMap 128 * @see Hashtable 129 * @since 1.2 130 */ 131 132 public class HashMap<K,V> 133 extends AbstractMap<K,V> 134 implements Map<K,V>, Cloneable, Serializable 135 { 136 137 /** 138 * The default initial capacity - MUST be a power of two. 139 */ 140 static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16 141 142 /** 143 * The maximum capacity, used if a higher value is implicitly specified 144 * by either of the constructors with arguments. 145 * MUST be a power of two <= 1<<30. 146 */ 147 static final int MAXIMUM_CAPACITY = 1 << 30; 148 149 /** 150 * The load factor used when none specified in constructor. 151 */ 152 static final float DEFAULT_LOAD_FACTOR = 0.75f; 153 154 /** 155 * An empty table instance to share when the table is not inflated. 156 */ 157 static final Object[] EMPTY_TABLE = {}; 158 159 /** 160 * The table, resized as necessary. Length MUST Always be a power of two. 161 */ 162 transient Object[] table = EMPTY_TABLE; 163 164 /** 165 * The number of key-value mappings contained in this map. 166 */ 167 transient int size; 168 169 /** 170 * The next size value at which to resize (capacity * load factor). 171 * @serial 172 */ 173 // If table == EMPTY_TABLE then this is the initial capacity at which the 174 // table will be created when inflated. 175 int threshold; 176 177 /** 178 * The load factor for the hash table. 179 * 180 * @serial 181 */ 182 final float loadFactor; 183 184 /** 185 * The number of times this HashMap has been structurally modified 186 * Structural modifications are those that change the number of mappings in 187 * the HashMap or otherwise modify its internal structure (e.g., 188 * rehash). This field is used to make iterators on Collection-views of 189 * the HashMap fail-fast. (See ConcurrentModificationException). 190 */ 191 transient int modCount; 192 193 /** 194 * Holds values which can't be initialized until after VM is booted. 195 */ 196 private static class Holder { 197 static final sun.misc.Unsafe UNSAFE; 198 199 /** 200 * Offset of "final" hashSeed field we must set in 201 * readObject() method. 202 */ 203 static final long HASHSEED_OFFSET; 204 205 static final boolean USE_HASHSEED; 206 207 static { 208 String hashSeedProp = java.security.AccessController.doPrivileged( 209 new sun.security.action.GetPropertyAction( 210 "jdk.map.useRandomSeed")); 211 boolean localBool = (null != hashSeedProp) 212 ? Boolean.parseBoolean(hashSeedProp) : false; 213 USE_HASHSEED = localBool; 214 215 if (USE_HASHSEED) { 216 try { 217 UNSAFE = sun.misc.Unsafe.getUnsafe(); 218 HASHSEED_OFFSET = UNSAFE.objectFieldOffset( 219 HashMap.class.getDeclaredField("hashSeed")); 220 } catch (NoSuchFieldException | SecurityException e) { 221 throw new InternalError("Failed to record hashSeed offset", e); 222 } 223 } else { 224 UNSAFE = null; 225 HASHSEED_OFFSET = 0; 226 } 227 } 228 } 229 230 /* 231 * A randomizing value associated with this instance that is applied to 232 * hash code of keys to make hash collisions harder to find. 233 * 234 * Non-final so it can be set lazily, but be sure not to set more than once. 235 */ 236 transient final int hashSeed; 237 238 /* 239 * TreeBin/TreeNode code from CHM doesn't handle the null key. Store the 240 * null key entry here. 241 */ 242 transient Entry<K,V> nullKeyEntry = null; 243 244 /* 245 * In order to improve performance under high hash-collision conditions, 246 * HashMap will switch to storing a bin's entries in a balanced tree 247 * (TreeBin) instead of a linked-list once the number of entries in the bin 248 * passes a certain threshold (TreeBin.TREE_THRESHOLD), if at least one of 249 * the keys in the bin implements Comparable. This technique is borrowed 250 * from ConcurrentHashMap. 251 */ 252 253 /* 254 * Code based on CHMv8 255 * 256 * Node type for TreeBin 257 */ 258 final static class TreeNode<K,V> { 259 TreeNode parent; // red-black tree links 260 TreeNode left; 261 TreeNode right; 262 TreeNode prev; // needed to unlink next upon deletion 263 boolean red; 264 final HashMap.Entry<K,V> entry; 265 266 TreeNode(HashMap.Entry<K,V> entry, Object next, TreeNode parent) { 267 this.entry = entry; 268 this.entry.next = next; 269 this.parent = parent; 270 } 271 } 272 273 /** 274 * Returns a Class for the given object of the form "class C 275 * implements Comparable<C>", if one exists, else null. See the TreeBin 276 * docs, below, for explanation. 277 */ 278 static Class<?> comparableClassFor(Object x) { 279 Class<?> c, s, cmpc; Type[] ts, as; Type t; ParameterizedType p; 280 if ((c = x.getClass()) == String.class) // bypass checks 281 return c; 282 if ((cmpc = Comparable.class).isAssignableFrom(c)) { 283 while (cmpc.isAssignableFrom(s = c.getSuperclass())) 284 c = s; // find topmost comparable class 285 if ((ts = c.getGenericInterfaces()) != null) { 286 for (int i = 0; i < ts.length; ++i) { 287 if (((t = ts[i]) instanceof ParameterizedType) && 288 ((p = (ParameterizedType)t).getRawType() == cmpc) && 289 (as = p.getActualTypeArguments()) != null && 290 as.length == 1 && as[0] == c) // type arg is c 291 return c; 292 } 293 } 294 } 295 return null; 296 } 297 298 /* 299 * Code based on CHMv8 300 * 301 * A specialized form of red-black tree for use in bins 302 * whose size exceeds a threshold. 303 * 304 * TreeBins use a special form of comparison for search and 305 * related operations (which is the main reason we cannot use 306 * existing collections such as TreeMaps). TreeBins contain 307 * Comparable elements, but may contain others, as well as 308 * elements that are Comparable but not necessarily Comparable<T> 309 * for the same T, so we cannot invoke compareTo among them. To 310 * handle this, the tree is ordered primarily by hash value, then 311 * by Comparable.compareTo order if applicable. On lookup at a 312 * node, if elements are not comparable or compare as 0 then both 313 * left and right children may need to be searched in the case of 314 * tied hash values. (This corresponds to the full list search 315 * that would be necessary if all elements were non-Comparable and 316 * had tied hashes.) The red-black balancing code is updated from 317 * pre-jdk-collections 318 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) 319 * based in turn on Cormen, Leiserson, and Rivest "Introduction to 320 * Algorithms" (CLR). 321 */ 322 final class TreeBin { 323 /* 324 * The bin count threshold for using a tree rather than list for a bin. The 325 * value reflects the approximate break-even point for using tree-based 326 * operations. 327 */ 328 static final int TREE_THRESHOLD = 16; 329 330 TreeNode<K,V> root; // root of tree 331 TreeNode<K,V> first; // head of next-pointer list 332 333 /* 334 * Split a TreeBin into lo and hi parts and install in given table. 335 * 336 * Existing Entrys are re-used, which maintains the before/after links for 337 * LinkedHashMap.Entry. 338 * 339 * No check for Comparable, though this is the same as CHM. 340 */ 341 final void splitTreeBin(Object[] newTable, int i, TreeBin loTree, TreeBin hiTree) { 342 TreeBin oldTree = this; 343 int bit = newTable.length >>> 1; 344 int loCount = 0, hiCount = 0; 345 TreeNode<K,V> e = oldTree.first; 346 TreeNode<K,V> next; 347 348 // This method is called when the table has just increased capacity, 349 // so indexFor() is now taking one additional bit of hash into 350 // account ("bit"). Entries in this TreeBin now belong in one of 351 // two bins, "i" or "i+bit", depending on if the new top bit of the 352 // hash is set. The trees for the two bins are loTree and hiTree. 353 // If either tree ends up containing fewer than TREE_THRESHOLD 354 // entries, it is converted back to a linked list. 355 while (e != null) { 356 // Save entry.next - it will get overwritten in putTreeNode() 357 next = (TreeNode<K,V>)e.entry.next; 358 359 int h = e.entry.hash; 360 K k = (K) e.entry.key; 361 V v = e.entry.value; 362 if ((h & bit) == 0) { 363 ++loCount; 364 // Re-using e.entry 365 loTree.putTreeNode(h, k, v, e.entry); 366 } else { 367 ++hiCount; 368 hiTree.putTreeNode(h, k, v, e.entry); 369 } 370 // Iterate using the saved 'next' 371 e = next; 372 } 373 if (loCount < TREE_THRESHOLD) { // too small, convert back to list 374 HashMap.Entry loEntry = null; 375 TreeNode<K,V> p = loTree.first; 376 while (p != null) { 377 @SuppressWarnings("unchecked") 378 TreeNode<K,V> savedNext = (TreeNode<K,V>) p.entry.next; 379 p.entry.next = loEntry; 380 loEntry = p.entry; 381 p = savedNext; 382 } 383 // assert newTable[i] == null; 384 newTable[i] = loEntry; 385 } else { 386 // assert newTable[i] == null; 387 newTable[i] = loTree; 388 } 389 if (hiCount < TREE_THRESHOLD) { // too small, convert back to list 390 HashMap.Entry hiEntry = null; 391 TreeNode<K,V> p = hiTree.first; 392 while (p != null) { 393 @SuppressWarnings("unchecked") 394 TreeNode<K,V> savedNext = (TreeNode<K,V>) p.entry.next; 395 p.entry.next = hiEntry; 396 hiEntry = p.entry; 397 p = savedNext; 398 } 399 // assert newTable[i + bit] == null; 400 newTable[i + bit] = hiEntry; 401 } else { 402 // assert newTable[i + bit] == null; 403 newTable[i + bit] = hiTree; 404 } 405 } 406 407 /* 408 * Popuplate the TreeBin with entries from the linked list e 409 * 410 * Assumes 'this' is a new/empty TreeBin 411 * 412 * Note: no check for Comparable 413 * Note: I believe this changes iteration order 414 */ 415 @SuppressWarnings("unchecked") 416 void populate(HashMap.Entry e) { 417 // assert root == null; 418 // assert first == null; 419 HashMap.Entry next; 420 while (e != null) { 421 // Save entry.next - it will get overwritten in putTreeNode() 422 next = (HashMap.Entry)e.next; 423 // Re-using Entry e will maintain before/after in LinkedHM 424 putTreeNode(e.hash, (K)e.key, (V)e.value, e); 425 // Iterate using the saved 'next' 426 e = next; 427 } 428 } 429 430 /** 431 * Copied from CHMv8 432 * From CLR 433 */ 434 private void rotateLeft(TreeNode p) { 435 if (p != null) { 436 TreeNode r = p.right, pp, rl; 437 if ((rl = p.right = r.left) != null) { 438 rl.parent = p; 439 } 440 if ((pp = r.parent = p.parent) == null) { 441 root = r; 442 } else if (pp.left == p) { 443 pp.left = r; 444 } else { 445 pp.right = r; 446 } 447 r.left = p; 448 p.parent = r; 449 } 450 } 451 452 /** 453 * Copied from CHMv8 454 * From CLR 455 */ 456 private void rotateRight(TreeNode p) { 457 if (p != null) { 458 TreeNode l = p.left, pp, lr; 459 if ((lr = p.left = l.right) != null) { 460 lr.parent = p; 461 } 462 if ((pp = l.parent = p.parent) == null) { 463 root = l; 464 } else if (pp.right == p) { 465 pp.right = l; 466 } else { 467 pp.left = l; 468 } 469 l.right = p; 470 p.parent = l; 471 } 472 } 473 474 /** 475 * Returns the TreeNode (or null if not found) for the given 476 * key. A front-end for recursive version. 477 */ 478 final TreeNode getTreeNode(int h, K k) { 479 return getTreeNode(h, k, root, comparableClassFor(k)); 480 } 481 482 /** 483 * Returns the TreeNode (or null if not found) for the given key 484 * starting at given root. 485 */ 486 @SuppressWarnings("unchecked") 487 final TreeNode getTreeNode (int h, K k, TreeNode p, Class<?> cc) { 488 // assert k != null; 489 while (p != null) { 490 int dir, ph; Object pk; 491 if ((ph = p.entry.hash) != h) 492 dir = (h < ph) ? -1 : 1; 493 else if ((pk = p.entry.key) == k || k.equals(pk)) 494 return p; 495 else if (cc == null || comparableClassFor(pk) != cc || 496 (dir = ((Comparable<Object>)k).compareTo(pk)) == 0) { 497 // assert pk != null; 498 TreeNode r, pl, pr; // check both sides 499 if ((pr = p.right) != null && 500 (r = getTreeNode(h, k, pr, cc)) != null) 501 return r; 502 else if ((pl = p.left) != null) 503 dir = -1; 504 else // nothing there 505 break; 506 } 507 p = (dir > 0) ? p.right : p.left; 508 } 509 return null; 510 } 511 512 /* 513 * Finds or adds a node. 514 * 515 * 'entry' should be used to recycle an existing Entry (e.g. in the case 516 * of converting a linked-list bin to a TreeBin). 517 * If entry is null, a new Entry will be created for the new TreeNode 518 * 519 * @return the TreeNode containing the mapping, or null if a new 520 * TreeNode was added 521 */ 522 @SuppressWarnings("unchecked") 523 TreeNode putTreeNode(int h, K k, V v, HashMap.Entry<K,V> entry) { 524 // assert k != null; 525 //if (entry != null) { 526 // assert h == entry.hash; 527 // assert k == entry.key; 528 // assert v == entry.value; 529 // } 530 Class<?> cc = comparableClassFor(k); 531 TreeNode pp = root, p = null; 532 int dir = 0; 533 while (pp != null) { // find existing node or leaf to insert at 534 int ph; Object pk; 535 p = pp; 536 if ((ph = p.entry.hash) != h) 537 dir = (h < ph) ? -1 : 1; 538 else if ((pk = p.entry.key) == k || k.equals(pk)) 539 return p; 540 else if (cc == null || comparableClassFor(pk) != cc || 541 (dir = ((Comparable<Object>)k).compareTo(pk)) == 0) { 542 TreeNode r, pr; 543 if ((pr = p.right) != null && 544 (r = getTreeNode(h, k, pr, cc)) != null) 545 return r; 546 else // continue left 547 dir = -1; 548 } 549 pp = (dir > 0) ? p.right : p.left; 550 } 551 552 // Didn't find the mapping in the tree, so add it 553 TreeNode f = first; 554 TreeNode x; 555 if (entry != null) { 556 x = new TreeNode(entry, f, p); 557 } else { 558 x = new TreeNode(newEntry(h, k, v, null), f, p); 559 } 560 first = x; 561 562 if (p == null) { 563 root = x; 564 } else { // attach and rebalance; adapted from CLR 565 TreeNode xp, xpp; 566 if (f != null) { 567 f.prev = x; 568 } 569 if (dir <= 0) { 570 p.left = x; 571 } else { 572 p.right = x; 573 } 574 x.red = true; 575 while (x != null && (xp = x.parent) != null && xp.red 576 && (xpp = xp.parent) != null) { 577 TreeNode xppl = xpp.left; 578 if (xp == xppl) { 579 TreeNode y = xpp.right; 580 if (y != null && y.red) { 581 y.red = false; 582 xp.red = false; 583 xpp.red = true; 584 x = xpp; 585 } else { 586 if (x == xp.right) { 587 rotateLeft(x = xp); 588 xpp = (xp = x.parent) == null ? null : xp.parent; 589 } 590 if (xp != null) { 591 xp.red = false; 592 if (xpp != null) { 593 xpp.red = true; 594 rotateRight(xpp); 595 } 596 } 597 } 598 } else { 599 TreeNode y = xppl; 600 if (y != null && y.red) { 601 y.red = false; 602 xp.red = false; 603 xpp.red = true; 604 x = xpp; 605 } else { 606 if (x == xp.left) { 607 rotateRight(x = xp); 608 xpp = (xp = x.parent) == null ? null : xp.parent; 609 } 610 if (xp != null) { 611 xp.red = false; 612 if (xpp != null) { 613 xpp.red = true; 614 rotateLeft(xpp); 615 } 616 } 617 } 618 } 619 } 620 TreeNode r = root; 621 if (r != null && r.red) { 622 r.red = false; 623 } 624 } 625 return null; 626 } 627 628 /* 629 * From CHMv8 630 * 631 * Removes the given node, that must be present before this 632 * call. This is messier than typical red-black deletion code 633 * because we cannot swap the contents of an interior node 634 * with a leaf successor that is pinned by "next" pointers 635 * that are accessible independently of lock. So instead we 636 * swap the tree linkages. 637 */ 638 final void deleteTreeNode(TreeNode p) { 639 TreeNode next = (TreeNode) p.entry.next; // unlink traversal pointers 640 TreeNode pred = p.prev; 641 if (pred == null) { 642 first = next; 643 } else { 644 pred.entry.next = next; 645 } 646 if (next != null) { 647 next.prev = pred; 648 } 649 TreeNode replacement; 650 TreeNode pl = p.left; 651 TreeNode pr = p.right; 652 if (pl != null && pr != null) { 653 TreeNode s = pr, sl; 654 while ((sl = s.left) != null) // find successor 655 { 656 s = sl; 657 } 658 boolean c = s.red; 659 s.red = p.red; 660 p.red = c; // swap colors 661 TreeNode sr = s.right; 662 TreeNode pp = p.parent; 663 if (s == pr) { // p was s's direct parent 664 p.parent = s; 665 s.right = p; 666 } else { 667 TreeNode sp = s.parent; 668 if ((p.parent = sp) != null) { 669 if (s == sp.left) { 670 sp.left = p; 671 } else { 672 sp.right = p; 673 } 674 } 675 if ((s.right = pr) != null) { 676 pr.parent = s; 677 } 678 } 679 p.left = null; 680 if ((p.right = sr) != null) { 681 sr.parent = p; 682 } 683 if ((s.left = pl) != null) { 684 pl.parent = s; 685 } 686 if ((s.parent = pp) == null) { 687 root = s; 688 } else if (p == pp.left) { 689 pp.left = s; 690 } else { 691 pp.right = s; 692 } 693 replacement = sr; 694 } else { 695 replacement = (pl != null) ? pl : pr; 696 } 697 TreeNode pp = p.parent; 698 if (replacement == null) { 699 if (pp == null) { 700 root = null; 701 return; 702 } 703 replacement = p; 704 } else { 705 replacement.parent = pp; 706 if (pp == null) { 707 root = replacement; 708 } else if (p == pp.left) { 709 pp.left = replacement; 710 } else { 711 pp.right = replacement; 712 } 713 p.left = p.right = p.parent = null; 714 } 715 if (!p.red) { // rebalance, from CLR 716 TreeNode x = replacement; 717 while (x != null) { 718 TreeNode xp, xpl; 719 if (x.red || (xp = x.parent) == null) { 720 x.red = false; 721 break; 722 } 723 if (x == (xpl = xp.left)) { 724 TreeNode sib = xp.right; 725 if (sib != null && sib.red) { 726 sib.red = false; 727 xp.red = true; 728 rotateLeft(xp); 729 sib = (xp = x.parent) == null ? null : xp.right; 730 } 731 if (sib == null) { 732 x = xp; 733 } else { 734 TreeNode sl = sib.left, sr = sib.right; 735 if ((sr == null || !sr.red) 736 && (sl == null || !sl.red)) { 737 sib.red = true; 738 x = xp; 739 } else { 740 if (sr == null || !sr.red) { 741 if (sl != null) { 742 sl.red = false; 743 } 744 sib.red = true; 745 rotateRight(sib); 746 sib = (xp = x.parent) == null ? 747 null : xp.right; 748 } 749 if (sib != null) { 750 sib.red = (xp == null) ? false : xp.red; 751 if ((sr = sib.right) != null) { 752 sr.red = false; 753 } 754 } 755 if (xp != null) { 756 xp.red = false; 757 rotateLeft(xp); 758 } 759 x = root; 760 } 761 } 762 } else { // symmetric 763 TreeNode sib = xpl; 764 if (sib != null && sib.red) { 765 sib.red = false; 766 xp.red = true; 767 rotateRight(xp); 768 sib = (xp = x.parent) == null ? null : xp.left; 769 } 770 if (sib == null) { 771 x = xp; 772 } else { 773 TreeNode sl = sib.left, sr = sib.right; 774 if ((sl == null || !sl.red) 775 && (sr == null || !sr.red)) { 776 sib.red = true; 777 x = xp; 778 } else { 779 if (sl == null || !sl.red) { 780 if (sr != null) { 781 sr.red = false; 782 } 783 sib.red = true; 784 rotateLeft(sib); 785 sib = (xp = x.parent) == null ? 786 null : xp.left; 787 } 788 if (sib != null) { 789 sib.red = (xp == null) ? false : xp.red; 790 if ((sl = sib.left) != null) { 791 sl.red = false; 792 } 793 } 794 if (xp != null) { 795 xp.red = false; 796 rotateRight(xp); 797 } 798 x = root; 799 } 800 } 801 } 802 } 803 } 804 if (p == replacement && (pp = p.parent) != null) { 805 if (p == pp.left) // detach pointers 806 { 807 pp.left = null; 808 } else if (p == pp.right) { 809 pp.right = null; 810 } 811 p.parent = null; 812 } 813 } 814 } 815 816 /** 817 * Constructs an empty <tt>HashMap</tt> with the specified initial 818 * capacity and load factor. 819 * 820 * @param initialCapacity the initial capacity 821 * @param loadFactor the load factor 822 * @throws IllegalArgumentException if the initial capacity is negative 823 * or the load factor is nonpositive 824 */ 825 public HashMap(int initialCapacity, float loadFactor) { 826 if (initialCapacity < 0) 827 throw new IllegalArgumentException("Illegal initial capacity: " + 828 initialCapacity); 829 if (initialCapacity > MAXIMUM_CAPACITY) 830 initialCapacity = MAXIMUM_CAPACITY; 831 if (loadFactor <= 0 || Float.isNaN(loadFactor)) 832 throw new IllegalArgumentException("Illegal load factor: " + 833 loadFactor); 834 this.loadFactor = loadFactor; 835 threshold = initialCapacity; 836 hashSeed = initHashSeed(); 837 init(); 838 } 839 840 /** 841 * Constructs an empty <tt>HashMap</tt> with the specified initial 842 * capacity and the default load factor (0.75). 843 * 844 * @param initialCapacity the initial capacity. 845 * @throws IllegalArgumentException if the initial capacity is negative. 846 */ 847 public HashMap(int initialCapacity) { 848 this(initialCapacity, DEFAULT_LOAD_FACTOR); 849 } 850 851 /** 852 * Constructs an empty <tt>HashMap</tt> with the default initial capacity 853 * (16) and the default load factor (0.75). 854 */ 855 public HashMap() { 856 this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR); 857 } 858 859 /** 860 * Constructs a new <tt>HashMap</tt> with the same mappings as the 861 * specified <tt>Map</tt>. The <tt>HashMap</tt> is created with 862 * default load factor (0.75) and an initial capacity sufficient to 863 * hold the mappings in the specified <tt>Map</tt>. 864 * 865 * @param m the map whose mappings are to be placed in this map 866 * @throws NullPointerException if the specified map is null 867 */ 868 public HashMap(Map<? extends K, ? extends V> m) { 869 this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, 870 DEFAULT_INITIAL_CAPACITY), DEFAULT_LOAD_FACTOR); 871 inflateTable(threshold); 872 873 putAllForCreate(m); 874 // assert size == m.size(); 875 } 876 877 private static int roundUpToPowerOf2(int number) { 878 // assert number >= 0 : "number must be non-negative"; 879 int rounded = number >= MAXIMUM_CAPACITY 880 ? MAXIMUM_CAPACITY 881 : (rounded = Integer.highestOneBit(number)) != 0 882 ? (Integer.bitCount(number) > 1) ? rounded << 1 : rounded 883 : 1; 884 885 return rounded; 886 } 887 888 /** 889 * Inflates the table. 890 */ 891 private void inflateTable(int toSize) { 892 // Find a power of 2 >= toSize 893 int capacity = roundUpToPowerOf2(toSize); 894 895 threshold = (int) Math.min(capacity * loadFactor, MAXIMUM_CAPACITY + 1); 896 table = new Object[capacity]; 897 } 898 899 // internal utilities 900 901 /** 902 * Initialization hook for subclasses. This method is called 903 * in all constructors and pseudo-constructors (clone, readObject) 904 * after HashMap has been initialized but before any entries have 905 * been inserted. (In the absence of this method, readObject would 906 * require explicit knowledge of subclasses.) 907 */ 908 void init() { 909 } 910 911 /** 912 * Return an initial value for the hashSeed, or 0 if the random seed is not 913 * enabled. 914 */ 915 final int initHashSeed() { 916 if (sun.misc.VM.isBooted() && Holder.USE_HASHSEED) { 917 int seed = ThreadLocalRandom.current().nextInt(); 918 return (seed != 0) ? seed : 1; 919 } 920 return 0; 921 } 922 923 /** 924 * Retrieve object hash code and applies a supplemental hash function to the 925 * result hash, which defends against poor quality hash functions. This is 926 * critical because HashMap uses power-of-two length hash tables, that 927 * otherwise encounter collisions for hashCodes that do not differ 928 * in lower bits. 929 */ 930 final int hash(Object k) { 931 int h = hashSeed ^ k.hashCode(); 932 933 // This function ensures that hashCodes that differ only by 934 // constant multiples at each bit position have a bounded 935 // number of collisions (approximately 8 at default load factor). 936 h ^= (h >>> 20) ^ (h >>> 12); 937 return h ^ (h >>> 7) ^ (h >>> 4); 938 } 939 940 /** 941 * Returns index for hash code h. 942 */ 943 static int indexFor(int h, int length) { 944 // assert Integer.bitCount(length) == 1 : "length must be a non-zero power of 2"; 945 return h & (length-1); 946 } 947 948 /** 949 * Returns the number of key-value mappings in this map. 950 * 951 * @return the number of key-value mappings in this map 952 */ 953 public int size() { 954 return size; 955 } 956 957 /** 958 * Returns <tt>true</tt> if this map contains no key-value mappings. 959 * 960 * @return <tt>true</tt> if this map contains no key-value mappings 961 */ 962 public boolean isEmpty() { 963 return size == 0; 964 } 965 966 /** 967 * Returns the value to which the specified key is mapped, 968 * or {@code null} if this map contains no mapping for the key. 969 * 970 * <p>More formally, if this map contains a mapping from a key 971 * {@code k} to a value {@code v} such that {@code (key==null ? k==null : 972 * key.equals(k))}, then this method returns {@code v}; otherwise 973 * it returns {@code null}. (There can be at most one such mapping.) 974 * 975 * <p>A return value of {@code null} does not <i>necessarily</i> 976 * indicate that the map contains no mapping for the key; it's also 977 * possible that the map explicitly maps the key to {@code null}. 978 * The {@link #containsKey containsKey} operation may be used to 979 * distinguish these two cases. 980 * 981 * @see #put(Object, Object) 982 */ 983 @SuppressWarnings("unchecked") 984 public V get(Object key) { 985 Entry<K,V> entry = getEntry(key); 986 987 return null == entry ? null : entry.getValue(); 988 } 989 990 @Override 991 public V getOrDefault(Object key, V defaultValue) { 992 Entry<K,V> entry = getEntry(key); 993 994 return (entry == null) ? defaultValue : entry.getValue(); 995 } 996 997 /** 998 * Returns <tt>true</tt> if this map contains a mapping for the 999 * specified key. 1000 * 1001 * @param key The key whose presence in this map is to be tested 1002 * @return <tt>true</tt> if this map contains a mapping for the specified 1003 * key. 1004 */ 1005 public boolean containsKey(Object key) { 1006 return getEntry(key) != null; 1007 } 1008 1009 /** 1010 * Returns the entry associated with the specified key in the 1011 * HashMap. Returns null if the HashMap contains no mapping 1012 * for the key. 1013 */ 1014 @SuppressWarnings("unchecked") 1015 final Entry<K,V> getEntry(Object key) { 1016 if (isEmpty()) { 1017 return null; 1018 } 1019 if (key == null) { 1020 return nullKeyEntry; 1021 } 1022 int hash = hash(key); 1023 int bin = indexFor(hash, table.length); 1024 1025 if (table[bin] instanceof Entry) { 1026 Entry<K,V> e = (Entry<K,V>) table[bin]; 1027 for (; e != null; e = (Entry<K,V>)e.next) { 1028 Object k; 1029 if (e.hash == hash && 1030 ((k = e.key) == key || key.equals(k))) { 1031 return e; 1032 } 1033 } 1034 } else if (table[bin] != null) { 1035 TreeBin e = (TreeBin)table[bin]; 1036 TreeNode p = e.getTreeNode(hash, (K)key); 1037 if (p != null) { 1038 // assert p.entry.hash == hash && p.entry.key.equals(key); 1039 return (Entry<K,V>)p.entry; 1040 } else { 1041 return null; 1042 } 1043 } 1044 return null; 1045 } 1046 1047 1048 /** 1049 * Associates the specified value with the specified key in this map. 1050 * If the map previously contained a mapping for the key, the old 1051 * value is replaced. 1052 * 1053 * @param key key with which the specified value is to be associated 1054 * @param value value to be associated with the specified key 1055 * @return the previous value associated with <tt>key</tt>, or 1056 * <tt>null</tt> if there was no mapping for <tt>key</tt>. 1057 * (A <tt>null</tt> return can also indicate that the map 1058 * previously associated <tt>null</tt> with <tt>key</tt>.) 1059 */ 1060 @SuppressWarnings("unchecked") 1061 public V put(K key, V value) { 1062 if (table == EMPTY_TABLE) { 1063 inflateTable(threshold); 1064 } 1065 if (key == null) 1066 return putForNullKey(value); 1067 int hash = hash(key); 1068 int i = indexFor(hash, table.length); 1069 boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? 1070 1071 if (table[i] instanceof Entry) { 1072 // Bin contains ordinary Entries. Search for key in the linked list 1073 // of entries, counting the number of entries. Only check for 1074 // TreeBin conversion if the list size is >= TREE_THRESHOLD. 1075 // (The conversion still may not happen if the table gets resized.) 1076 int listSize = 0; 1077 Entry<K,V> e = (Entry<K,V>) table[i]; 1078 for (; e != null; e = (Entry<K,V>)e.next) { 1079 Object k; 1080 if (e.hash == hash && ((k = e.key) == key || key.equals(k))) { 1081 V oldValue = e.value; 1082 e.value = value; 1083 e.recordAccess(this); 1084 return oldValue; 1085 } 1086 listSize++; 1087 } 1088 // Didn't find, so fall through and call addEntry() to add the 1089 // Entry and check for TreeBin conversion. 1090 checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; 1091 } else if (table[i] != null) { 1092 TreeBin e = (TreeBin)table[i]; 1093 TreeNode p = e.putTreeNode(hash, key, value, null); 1094 if (p == null) { // putTreeNode() added a new node 1095 modCount++; 1096 size++; 1097 if (size >= threshold) { 1098 resize(2 * table.length); 1099 } 1100 return null; 1101 } else { // putTreeNode() found an existing node 1102 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1103 V oldVal = pEntry.value; 1104 pEntry.value = value; 1105 pEntry.recordAccess(this); 1106 return oldVal; 1107 } 1108 } 1109 modCount++; 1110 addEntry(hash, key, value, i, checkIfNeedTree); 1111 return null; 1112 } 1113 1114 /** 1115 * Offloaded version of put for null keys 1116 */ 1117 private V putForNullKey(V value) { 1118 if (nullKeyEntry != null) { 1119 V oldValue = nullKeyEntry.value; 1120 nullKeyEntry.value = value; 1121 nullKeyEntry.recordAccess(this); 1122 return oldValue; 1123 } 1124 modCount++; 1125 size++; // newEntry() skips size++ 1126 nullKeyEntry = newEntry(0, null, value, null); 1127 return null; 1128 } 1129 1130 private void putForCreateNullKey(V value) { 1131 // Look for preexisting entry for key. This will never happen for 1132 // clone or deserialize. It will only happen for construction if the 1133 // input Map is a sorted map whose ordering is inconsistent w/ equals. 1134 if (nullKeyEntry != null) { 1135 nullKeyEntry.value = value; 1136 } else { 1137 nullKeyEntry = newEntry(0, null, value, null); 1138 size++; 1139 } 1140 } 1141 1142 1143 /** 1144 * This method is used instead of put by constructors and 1145 * pseudoconstructors (clone, readObject). It does not resize the table, 1146 * check for comodification, etc, though it will convert bins to TreeBins 1147 * as needed. It calls createEntry rather than addEntry. 1148 */ 1149 @SuppressWarnings("unchecked") 1150 private void putForCreate(K key, V value) { 1151 if (null == key) { 1152 putForCreateNullKey(value); 1153 return; 1154 } 1155 int hash = hash(key); 1156 int i = indexFor(hash, table.length); 1157 boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? 1158 1159 /** 1160 * Look for preexisting entry for key. This will never happen for 1161 * clone or deserialize. It will only happen for construction if the 1162 * input Map is a sorted map whose ordering is inconsistent w/ equals. 1163 */ 1164 if (table[i] instanceof Entry) { 1165 int listSize = 0; 1166 Entry<K,V> e = (Entry<K,V>) table[i]; 1167 for (; e != null; e = (Entry<K,V>)e.next) { 1168 Object k; 1169 if (e.hash == hash && ((k = e.key) == key || key.equals(k))) { 1170 e.value = value; 1171 return; 1172 } 1173 listSize++; 1174 } 1175 // Didn't find, fall through to createEntry(). 1176 // Check for conversion to TreeBin done via createEntry(). 1177 checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; 1178 } else if (table[i] != null) { 1179 TreeBin e = (TreeBin)table[i]; 1180 TreeNode p = e.putTreeNode(hash, key, value, null); 1181 if (p != null) { 1182 p.entry.setValue(value); // Found an existing node, set value 1183 } else { 1184 size++; // Added a new TreeNode, so update size 1185 } 1186 // don't need modCount++/check for resize - just return 1187 return; 1188 } 1189 1190 createEntry(hash, key, value, i, checkIfNeedTree); 1191 } 1192 1193 private void putAllForCreate(Map<? extends K, ? extends V> m) { 1194 for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) 1195 putForCreate(e.getKey(), e.getValue()); 1196 } 1197 1198 /** 1199 * Rehashes the contents of this map into a new array with a 1200 * larger capacity. This method is called automatically when the 1201 * number of keys in this map reaches its threshold. 1202 * 1203 * If current capacity is MAXIMUM_CAPACITY, this method does not 1204 * resize the map, but sets threshold to Integer.MAX_VALUE. 1205 * This has the effect of preventing future calls. 1206 * 1207 * @param newCapacity the new capacity, MUST be a power of two; 1208 * must be greater than current capacity unless current 1209 * capacity is MAXIMUM_CAPACITY (in which case value 1210 * is irrelevant). 1211 */ 1212 void resize(int newCapacity) { 1213 Object[] oldTable = table; 1214 int oldCapacity = oldTable.length; 1215 if (oldCapacity == MAXIMUM_CAPACITY) { 1216 threshold = Integer.MAX_VALUE; 1217 return; 1218 } 1219 1220 Object[] newTable = new Object[newCapacity]; 1221 transfer(newTable); 1222 table = newTable; 1223 threshold = (int)Math.min(newCapacity * loadFactor, MAXIMUM_CAPACITY + 1); 1224 } 1225 1226 /** 1227 * Transfers all entries from current table to newTable. 1228 * 1229 * Assumes newTable is larger than table 1230 */ 1231 @SuppressWarnings("unchecked") 1232 void transfer(Object[] newTable) { 1233 Object[] src = table; 1234 // assert newTable.length > src.length : "newTable.length(" + 1235 // newTable.length + ") expected to be > src.length("+src.length+")"; 1236 int newCapacity = newTable.length; 1237 for (int j = 0; j < src.length; j++) { 1238 if (src[j] instanceof Entry) { 1239 // Assume: since wasn't TreeBin before, won't need TreeBin now 1240 Entry<K,V> e = (Entry<K,V>) src[j]; 1241 while (null != e) { 1242 Entry<K,V> next = (Entry<K,V>)e.next; 1243 int i = indexFor(e.hash, newCapacity); 1244 e.next = (Entry<K,V>) newTable[i]; 1245 newTable[i] = e; 1246 e = next; 1247 } 1248 } else if (src[j] != null) { 1249 TreeBin e = (TreeBin) src[j]; 1250 TreeBin loTree = new TreeBin(); 1251 TreeBin hiTree = new TreeBin(); 1252 e.splitTreeBin(newTable, j, loTree, hiTree); 1253 } 1254 } 1255 Arrays.fill(table, null); 1256 } 1257 1258 /** 1259 * Copies all of the mappings from the specified map to this map. 1260 * These mappings will replace any mappings that this map had for 1261 * any of the keys currently in the specified map. 1262 * 1263 * @param m mappings to be stored in this map 1264 * @throws NullPointerException if the specified map is null 1265 */ 1266 public void putAll(Map<? extends K, ? extends V> m) { 1267 int numKeysToBeAdded = m.size(); 1268 if (numKeysToBeAdded == 0) 1269 return; 1270 1271 if (table == EMPTY_TABLE) { 1272 inflateTable((int) Math.max(numKeysToBeAdded * loadFactor, threshold)); 1273 } 1274 1275 /* 1276 * Expand the map if the map if the number of mappings to be added 1277 * is greater than or equal to threshold. This is conservative; the 1278 * obvious condition is (m.size() + size) >= threshold, but this 1279 * condition could result in a map with twice the appropriate capacity, 1280 * if the keys to be added overlap with the keys already in this map. 1281 * By using the conservative calculation, we subject ourself 1282 * to at most one extra resize. 1283 */ 1284 if (numKeysToBeAdded > threshold && table.length < MAXIMUM_CAPACITY) { 1285 resize(table.length * 2); 1286 } 1287 1288 for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) 1289 put(e.getKey(), e.getValue()); 1290 } 1291 1292 /** 1293 * Removes the mapping for the specified key from this map if present. 1294 * 1295 * @param key key whose mapping is to be removed from the map 1296 * @return the previous value associated with <tt>key</tt>, or 1297 * <tt>null</tt> if there was no mapping for <tt>key</tt>. 1298 * (A <tt>null</tt> return can also indicate that the map 1299 * previously associated <tt>null</tt> with <tt>key</tt>.) 1300 */ 1301 public V remove(Object key) { 1302 Entry<K,V> e = removeEntryForKey(key); 1303 return (e == null ? null : e.value); 1304 } 1305 1306 // optimized implementations of default methods in Map 1307 1308 @Override 1309 public void forEach(BiConsumer<? super K, ? super V> action) { 1310 Objects.requireNonNull(action); 1311 final int expectedModCount = modCount; 1312 if (nullKeyEntry != null) { 1313 forEachNullKey(expectedModCount, action); 1314 } 1315 Object[] tab = this.table; 1316 for (int index = 0; index < tab.length; index++) { 1317 Object item = tab[index]; 1318 if (item == null) { 1319 continue; 1320 } 1321 if (item instanceof HashMap.TreeBin) { 1322 eachTreeNode(expectedModCount, ((TreeBin)item).first, action); 1323 continue; 1324 } 1325 @SuppressWarnings("unchecked") 1326 Entry<K, V> entry = (Entry<K, V>)item; 1327 while (entry != null) { 1328 action.accept(entry.key, entry.value); 1329 entry = (Entry<K, V>)entry.next; 1330 1331 if (expectedModCount != modCount) { 1332 throw new ConcurrentModificationException(); 1333 } 1334 } 1335 } 1336 } 1337 1338 private void eachTreeNode(int expectedModCount, TreeNode<K, V> node, BiConsumer<? super K, ? super V> action) { 1339 while (node != null) { 1340 @SuppressWarnings("unchecked") 1341 Entry<K, V> entry = (Entry<K, V>)node.entry; 1342 action.accept(entry.key, entry.value); 1343 node = (TreeNode<K, V>)entry.next; 1344 1345 if (expectedModCount != modCount) { 1346 throw new ConcurrentModificationException(); 1347 } 1348 } 1349 } 1350 1351 private void forEachNullKey(int expectedModCount, BiConsumer<? super K, ? super V> action) { 1352 action.accept(null, nullKeyEntry.value); 1353 1354 if (expectedModCount != modCount) { 1355 throw new ConcurrentModificationException(); 1356 } 1357 } 1358 1359 @Override 1360 public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) { 1361 Objects.requireNonNull(function); 1362 final int expectedModCount = modCount; 1363 if (nullKeyEntry != null) { 1364 replaceforNullKey(expectedModCount, function); 1365 } 1366 Object[] tab = this.table; 1367 for (int index = 0; index < tab.length; index++) { 1368 Object item = tab[index]; 1369 if (item == null) { 1370 continue; 1371 } 1372 if (item instanceof HashMap.TreeBin) { 1373 replaceEachTreeNode(expectedModCount, ((TreeBin)item).first, function); 1374 continue; 1375 } 1376 @SuppressWarnings("unchecked") 1377 Entry<K, V> entry = (Entry<K, V>)item; 1378 while (entry != null) { 1379 entry.value = function.apply(entry.key, entry.value); 1380 entry = (Entry<K, V>)entry.next; 1381 1382 if (expectedModCount != modCount) { 1383 throw new ConcurrentModificationException(); 1384 } 1385 } 1386 } 1387 } 1388 1389 private void replaceEachTreeNode(int expectedModCount, TreeNode<K, V> node, BiFunction<? super K, ? super V, ? extends V> function) { 1390 while (node != null) { 1391 @SuppressWarnings("unchecked") 1392 Entry<K, V> entry = (Entry<K, V>)node.entry; 1393 entry.value = function.apply(entry.key, entry.value); 1394 node = (TreeNode<K, V>)entry.next; 1395 1396 if (expectedModCount != modCount) { 1397 throw new ConcurrentModificationException(); 1398 } 1399 } 1400 } 1401 1402 private void replaceforNullKey(int expectedModCount, BiFunction<? super K, ? super V, ? extends V> function) { 1403 nullKeyEntry.value = function.apply(null, nullKeyEntry.value); 1404 1405 if (expectedModCount != modCount) { 1406 throw new ConcurrentModificationException(); 1407 } 1408 } 1409 1410 @Override 1411 public V putIfAbsent(K key, V value) { 1412 if (table == EMPTY_TABLE) { 1413 inflateTable(threshold); 1414 } 1415 if (key == null) { 1416 if (nullKeyEntry == null || nullKeyEntry.value == null) { 1417 putForNullKey(value); 1418 return null; 1419 } else { 1420 return nullKeyEntry.value; 1421 } 1422 } 1423 int hash = hash(key); 1424 int i = indexFor(hash, table.length); 1425 boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? 1426 1427 if (table[i] instanceof Entry) { 1428 int listSize = 0; 1429 Entry<K,V> e = (Entry<K,V>) table[i]; 1430 for (; e != null; e = (Entry<K,V>)e.next) { 1431 if (e.hash == hash && Objects.equals(e.key, key)) { 1432 if (e.value != null) { 1433 return e.value; 1434 } 1435 e.value = value; 1436 e.recordAccess(this); 1437 return null; 1438 } 1439 listSize++; 1440 } 1441 // Didn't find, so fall through and call addEntry() to add the 1442 // Entry and check for TreeBin conversion. 1443 checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; 1444 } else if (table[i] != null) { 1445 TreeBin e = (TreeBin)table[i]; 1446 TreeNode p = e.putTreeNode(hash, key, value, null); 1447 if (p == null) { // not found, putTreeNode() added a new node 1448 modCount++; 1449 size++; 1450 if (size >= threshold) { 1451 resize(2 * table.length); 1452 } 1453 return null; 1454 } else { // putTreeNode() found an existing node 1455 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1456 V oldVal = pEntry.value; 1457 if (oldVal == null) { // only replace if maps to null 1458 pEntry.value = value; 1459 pEntry.recordAccess(this); 1460 } 1461 return oldVal; 1462 } 1463 } 1464 modCount++; 1465 addEntry(hash, key, value, i, checkIfNeedTree); 1466 return null; 1467 } 1468 1469 @Override 1470 public boolean remove(Object key, Object value) { 1471 if (isEmpty()) { 1472 return false; 1473 } 1474 if (key == null) { 1475 if (nullKeyEntry != null && 1476 Objects.equals(nullKeyEntry.value, value)) { 1477 removeNullKey(); 1478 return true; 1479 } 1480 return false; 1481 } 1482 int hash = hash(key); 1483 int i = indexFor(hash, table.length); 1484 1485 if (table[i] instanceof Entry) { 1486 @SuppressWarnings("unchecked") 1487 Entry<K,V> prev = (Entry<K,V>) table[i]; 1488 Entry<K,V> e = prev; 1489 while (e != null) { 1490 @SuppressWarnings("unchecked") 1491 Entry<K,V> next = (Entry<K,V>) e.next; 1492 if (e.hash == hash && Objects.equals(e.key, key)) { 1493 if (!Objects.equals(e.value, value)) { 1494 return false; 1495 } 1496 modCount++; 1497 size--; 1498 if (prev == e) 1499 table[i] = next; 1500 else 1501 prev.next = next; 1502 e.recordRemoval(this); 1503 return true; 1504 } 1505 prev = e; 1506 e = next; 1507 } 1508 } else if (table[i] != null) { 1509 TreeBin tb = ((TreeBin) table[i]); 1510 TreeNode p = tb.getTreeNode(hash, (K)key); 1511 if (p != null) { 1512 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1513 // assert pEntry.key.equals(key); 1514 if (Objects.equals(pEntry.value, value)) { 1515 modCount++; 1516 size--; 1517 tb.deleteTreeNode(p); 1518 pEntry.recordRemoval(this); 1519 if (tb.root == null || tb.first == null) { 1520 // assert tb.root == null && tb.first == null : 1521 // "TreeBin.first and root should both be null"; 1522 // TreeBin is now empty, we should blank this bin 1523 table[i] = null; 1524 } 1525 return true; 1526 } 1527 } 1528 } 1529 return false; 1530 } 1531 1532 @Override 1533 public boolean replace(K key, V oldValue, V newValue) { 1534 if (isEmpty()) { 1535 return false; 1536 } 1537 if (key == null) { 1538 if (nullKeyEntry != null && 1539 Objects.equals(nullKeyEntry.value, oldValue)) { 1540 putForNullKey(newValue); 1541 return true; 1542 } 1543 return false; 1544 } 1545 int hash = hash(key); 1546 int i = indexFor(hash, table.length); 1547 1548 if (table[i] instanceof Entry) { 1549 @SuppressWarnings("unchecked") 1550 Entry<K,V> e = (Entry<K,V>) table[i]; 1551 for (; e != null; e = (Entry<K,V>)e.next) { 1552 if (e.hash == hash && Objects.equals(e.key, key) && Objects.equals(e.value, oldValue)) { 1553 e.value = newValue; 1554 e.recordAccess(this); 1555 return true; 1556 } 1557 } 1558 return false; 1559 } else if (table[i] != null) { 1560 TreeBin tb = ((TreeBin) table[i]); 1561 TreeNode p = tb.getTreeNode(hash, key); 1562 if (p != null) { 1563 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1564 // assert pEntry.key.equals(key); 1565 if (Objects.equals(pEntry.value, oldValue)) { 1566 pEntry.value = newValue; 1567 pEntry.recordAccess(this); 1568 return true; 1569 } 1570 } 1571 } 1572 return false; 1573 } 1574 1575 @Override 1576 public V replace(K key, V value) { 1577 if (isEmpty()) { 1578 return null; 1579 } 1580 if (key == null) { 1581 if (nullKeyEntry != null) { 1582 return putForNullKey(value); 1583 } 1584 return null; 1585 } 1586 int hash = hash(key); 1587 int i = indexFor(hash, table.length); 1588 if (table[i] instanceof Entry) { 1589 @SuppressWarnings("unchecked") 1590 Entry<K,V> e = (Entry<K,V>)table[i]; 1591 for (; e != null; e = (Entry<K,V>)e.next) { 1592 if (e.hash == hash && Objects.equals(e.key, key)) { 1593 V oldValue = e.value; 1594 e.value = value; 1595 e.recordAccess(this); 1596 return oldValue; 1597 } 1598 } 1599 1600 return null; 1601 } else if (table[i] != null) { 1602 TreeBin tb = ((TreeBin) table[i]); 1603 TreeNode p = tb.getTreeNode(hash, key); 1604 if (p != null) { 1605 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1606 // assert pEntry.key.equals(key); 1607 V oldValue = pEntry.value; 1608 pEntry.value = value; 1609 pEntry.recordAccess(this); 1610 return oldValue; 1611 } 1612 } 1613 return null; 1614 } 1615 1616 @Override 1617 public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) { 1618 if (table == EMPTY_TABLE) { 1619 inflateTable(threshold); 1620 } 1621 if (key == null) { 1622 if (nullKeyEntry == null || nullKeyEntry.value == null) { 1623 V newValue = mappingFunction.apply(key); 1624 if (newValue != null) { 1625 putForNullKey(newValue); 1626 } 1627 return newValue; 1628 } 1629 return nullKeyEntry.value; 1630 } 1631 int hash = hash(key); 1632 int i = indexFor(hash, table.length); 1633 boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? 1634 1635 if (table[i] instanceof Entry) { 1636 int listSize = 0; 1637 @SuppressWarnings("unchecked") 1638 Entry<K,V> e = (Entry<K,V>)table[i]; 1639 for (; e != null; e = (Entry<K,V>)e.next) { 1640 if (e.hash == hash && Objects.equals(e.key, key)) { 1641 V oldValue = e.value; 1642 if (oldValue == null) { 1643 V newValue = mappingFunction.apply(key); 1644 if (newValue != null) { 1645 e.value = newValue; 1646 e.recordAccess(this); 1647 } 1648 return newValue; 1649 } 1650 return oldValue; 1651 } 1652 listSize++; 1653 } 1654 // Didn't find, fall through to call the mapping function 1655 checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; 1656 } else if (table[i] != null) { 1657 TreeBin e = (TreeBin)table[i]; 1658 V value = mappingFunction.apply(key); 1659 if (value == null) { // Return the existing value, if any 1660 TreeNode p = e.getTreeNode(hash, key); 1661 if (p != null) { 1662 return (V) p.entry.value; 1663 } 1664 return null; 1665 } else { // Put the new value into the Tree, if absent 1666 TreeNode p = e.putTreeNode(hash, key, value, null); 1667 if (p == null) { // not found, new node was added 1668 modCount++; 1669 size++; 1670 if (size >= threshold) { 1671 resize(2 * table.length); 1672 } 1673 return value; 1674 } else { // putTreeNode() found an existing node 1675 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1676 V oldVal = pEntry.value; 1677 if (oldVal == null) { // only replace if maps to null 1678 pEntry.value = value; 1679 pEntry.recordAccess(this); 1680 return value; 1681 } 1682 return oldVal; 1683 } 1684 } 1685 } 1686 V newValue = mappingFunction.apply(key); 1687 if (newValue != null) { // add Entry and check for TreeBin conversion 1688 modCount++; 1689 addEntry(hash, key, newValue, i, checkIfNeedTree); 1690 } 1691 1692 return newValue; 1693 } 1694 1695 @Override 1696 public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1697 if (isEmpty()) { 1698 return null; 1699 } 1700 if (key == null) { 1701 V oldValue; 1702 if (nullKeyEntry != null && (oldValue = nullKeyEntry.value) != null) { 1703 V newValue = remappingFunction.apply(key, oldValue); 1704 if (newValue != null ) { 1705 putForNullKey(newValue); 1706 return newValue; 1707 } else { 1708 removeNullKey(); 1709 } 1710 } 1711 return null; 1712 } 1713 int hash = hash(key); 1714 int i = indexFor(hash, table.length); 1715 if (table[i] instanceof Entry) { 1716 @SuppressWarnings("unchecked") 1717 Entry<K,V> prev = (Entry<K,V>)table[i]; 1718 Entry<K,V> e = prev; 1719 while (e != null) { 1720 Entry<K,V> next = (Entry<K,V>)e.next; 1721 if (e.hash == hash && Objects.equals(e.key, key)) { 1722 V oldValue = e.value; 1723 if (oldValue == null) 1724 break; 1725 V newValue = remappingFunction.apply(key, oldValue); 1726 if (newValue == null) { 1727 modCount++; 1728 size--; 1729 if (prev == e) 1730 table[i] = next; 1731 else 1732 prev.next = next; 1733 e.recordRemoval(this); 1734 } else { 1735 e.value = newValue; 1736 e.recordAccess(this); 1737 } 1738 return newValue; 1739 } 1740 prev = e; 1741 e = next; 1742 } 1743 } else if (table[i] != null) { 1744 TreeBin tb = (TreeBin)table[i]; 1745 TreeNode p = tb.getTreeNode(hash, key); 1746 if (p != null) { 1747 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1748 // assert pEntry.key.equals(key); 1749 V oldValue = pEntry.value; 1750 if (oldValue != null) { 1751 V newValue = remappingFunction.apply(key, oldValue); 1752 if (newValue == null) { // remove mapping 1753 modCount++; 1754 size--; 1755 tb.deleteTreeNode(p); 1756 pEntry.recordRemoval(this); 1757 if (tb.root == null || tb.first == null) { 1758 // assert tb.root == null && tb.first == null : 1759 // "TreeBin.first and root should both be null"; 1760 // TreeBin is now empty, we should blank this bin 1761 table[i] = null; 1762 } 1763 } else { 1764 pEntry.value = newValue; 1765 pEntry.recordAccess(this); 1766 } 1767 return newValue; 1768 } 1769 } 1770 } 1771 return null; 1772 } 1773 1774 @Override 1775 public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1776 if (table == EMPTY_TABLE) { 1777 inflateTable(threshold); 1778 } 1779 if (key == null) { 1780 V oldValue = nullKeyEntry == null ? null : nullKeyEntry.value; 1781 V newValue = remappingFunction.apply(key, oldValue); 1782 if (newValue != oldValue) { 1783 if (newValue == null) { 1784 removeNullKey(); 1785 } else { 1786 putForNullKey(newValue); 1787 } 1788 } 1789 return newValue; 1790 } 1791 int hash = hash(key); 1792 int i = indexFor(hash, table.length); 1793 boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? 1794 1795 if (table[i] instanceof Entry) { 1796 int listSize = 0; 1797 @SuppressWarnings("unchecked") 1798 Entry<K,V> prev = (Entry<K,V>)table[i]; 1799 Entry<K,V> e = prev; 1800 1801 while (e != null) { 1802 Entry<K,V> next = (Entry<K,V>)e.next; 1803 if (e.hash == hash && Objects.equals(e.key, key)) { 1804 V oldValue = e.value; 1805 V newValue = remappingFunction.apply(key, oldValue); 1806 if (newValue != oldValue) { 1807 if (newValue == null) { 1808 modCount++; 1809 size--; 1810 if (prev == e) 1811 table[i] = next; 1812 else 1813 prev.next = next; 1814 e.recordRemoval(this); 1815 } else { 1816 e.value = newValue; 1817 e.recordAccess(this); 1818 } 1819 } 1820 return newValue; 1821 } 1822 prev = e; 1823 e = next; 1824 listSize++; 1825 } 1826 checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; 1827 } else if (table[i] != null) { 1828 TreeBin tb = (TreeBin)table[i]; 1829 TreeNode p = tb.getTreeNode(hash, key); 1830 V oldValue = p == null ? null : (V)p.entry.value; 1831 V newValue = remappingFunction.apply(key, oldValue); 1832 if (newValue != oldValue) { 1833 if (newValue == null) { 1834 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1835 modCount++; 1836 size--; 1837 tb.deleteTreeNode(p); 1838 pEntry.recordRemoval(this); 1839 if (tb.root == null || tb.first == null) { 1840 // assert tb.root == null && tb.first == null : 1841 // "TreeBin.first and root should both be null"; 1842 // TreeBin is now empty, we should blank this bin 1843 table[i] = null; 1844 } 1845 } else { 1846 if (p != null) { // just update the value 1847 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1848 pEntry.value = newValue; 1849 pEntry.recordAccess(this); 1850 } else { // need to put new node 1851 p = tb.putTreeNode(hash, key, newValue, null); 1852 // assert p == null; // should have added a new node 1853 modCount++; 1854 size++; 1855 if (size >= threshold) { 1856 resize(2 * table.length); 1857 } 1858 } 1859 } 1860 } 1861 return newValue; 1862 } 1863 1864 V newValue = remappingFunction.apply(key, null); 1865 if (newValue != null) { 1866 modCount++; 1867 addEntry(hash, key, newValue, i, checkIfNeedTree); 1868 } 1869 1870 return newValue; 1871 } 1872 1873 @Override 1874 public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) { 1875 if (table == EMPTY_TABLE) { 1876 inflateTable(threshold); 1877 } 1878 if (key == null) { 1879 V oldValue = nullKeyEntry == null ? null : nullKeyEntry.value; 1880 V newValue = oldValue == null ? value : remappingFunction.apply(oldValue, value); 1881 if (newValue != null) { 1882 putForNullKey(newValue); 1883 } else if (nullKeyEntry != null) { 1884 removeNullKey(); 1885 } 1886 return newValue; 1887 } 1888 int hash = hash(key); 1889 int i = indexFor(hash, table.length); 1890 boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? 1891 1892 if (table[i] instanceof Entry) { 1893 int listSize = 0; 1894 @SuppressWarnings("unchecked") 1895 Entry<K,V> prev = (Entry<K,V>)table[i]; 1896 Entry<K,V> e = prev; 1897 1898 while (e != null) { 1899 Entry<K,V> next = (Entry<K,V>)e.next; 1900 if (e.hash == hash && Objects.equals(e.key, key)) { 1901 V oldValue = e.value; 1902 V newValue = (oldValue == null) ? value : 1903 remappingFunction.apply(oldValue, value); 1904 if (newValue == null) { 1905 modCount++; 1906 size--; 1907 if (prev == e) 1908 table[i] = next; 1909 else 1910 prev.next = next; 1911 e.recordRemoval(this); 1912 } else { 1913 e.value = newValue; 1914 e.recordAccess(this); 1915 } 1916 return newValue; 1917 } 1918 prev = e; 1919 e = next; 1920 listSize++; 1921 } 1922 // Didn't find, so fall through and (maybe) call addEntry() to add 1923 // the Entry and check for TreeBin conversion. 1924 checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; 1925 } else if (table[i] != null) { 1926 TreeBin tb = (TreeBin)table[i]; 1927 TreeNode p = tb.getTreeNode(hash, key); 1928 V oldValue = p == null ? null : (V)p.entry.value; 1929 V newValue = (oldValue == null) ? value : 1930 remappingFunction.apply(oldValue, value); 1931 if (newValue == null) { 1932 if (p != null) { 1933 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1934 modCount++; 1935 size--; 1936 tb.deleteTreeNode(p); 1937 pEntry.recordRemoval(this); 1938 1939 if (tb.root == null || tb.first == null) { 1940 // assert tb.root == null && tb.first == null : 1941 // "TreeBin.first and root should both be null"; 1942 // TreeBin is now empty, we should blank this bin 1943 table[i] = null; 1944 } 1945 } 1946 return null; 1947 } else if (newValue != oldValue) { 1948 if (p != null) { // just update the value 1949 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1950 pEntry.value = newValue; 1951 pEntry.recordAccess(this); 1952 } else { // need to put new node 1953 p = tb.putTreeNode(hash, key, newValue, null); 1954 // assert p == null; // should have added a new node 1955 modCount++; 1956 size++; 1957 if (size >= threshold) { 1958 resize(2 * table.length); 1959 } 1960 } 1961 } 1962 return newValue; 1963 } 1964 if (value != null) { 1965 modCount++; 1966 addEntry(hash, key, value, i, checkIfNeedTree); 1967 } 1968 return value; 1969 } 1970 1971 // end of optimized implementations of default methods in Map 1972 1973 /** 1974 * Removes and returns the entry associated with the specified key 1975 * in the HashMap. Returns null if the HashMap contains no mapping 1976 * for this key. 1977 * 1978 * We don't bother converting TreeBins back to Entry lists if the bin falls 1979 * back below TREE_THRESHOLD, but we do clear bins when removing the last 1980 * TreeNode in a TreeBin. 1981 */ 1982 final Entry<K,V> removeEntryForKey(Object key) { 1983 if (isEmpty()) { 1984 return null; 1985 } 1986 if (key == null) { 1987 if (nullKeyEntry != null) { 1988 return removeNullKey(); 1989 } 1990 return null; 1991 } 1992 int hash = hash(key); 1993 int i = indexFor(hash, table.length); 1994 1995 if (table[i] instanceof Entry) { 1996 @SuppressWarnings("unchecked") 1997 Entry<K,V> prev = (Entry<K,V>)table[i]; 1998 Entry<K,V> e = prev; 1999 2000 while (e != null) { 2001 @SuppressWarnings("unchecked") 2002 Entry<K,V> next = (Entry<K,V>) e.next; 2003 if (e.hash == hash && Objects.equals(e.key, key)) { 2004 modCount++; 2005 size--; 2006 if (prev == e) 2007 table[i] = next; 2008 else 2009 prev.next = next; 2010 e.recordRemoval(this); 2011 return e; 2012 } 2013 prev = e; 2014 e = next; 2015 } 2016 } else if (table[i] != null) { 2017 TreeBin tb = ((TreeBin) table[i]); 2018 TreeNode p = tb.getTreeNode(hash, (K)key); 2019 if (p != null) { 2020 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 2021 // assert pEntry.key.equals(key); 2022 modCount++; 2023 size--; 2024 tb.deleteTreeNode(p); 2025 pEntry.recordRemoval(this); 2026 if (tb.root == null || tb.first == null) { 2027 // assert tb.root == null && tb.first == null : 2028 // "TreeBin.first and root should both be null"; 2029 // TreeBin is now empty, we should blank this bin 2030 table[i] = null; 2031 } 2032 return pEntry; 2033 } 2034 } 2035 return null; 2036 } 2037 2038 /** 2039 * Special version of remove for EntrySet using {@code Map.Entry.equals()} 2040 * for matching. 2041 */ 2042 final Entry<K,V> removeMapping(Object o) { 2043 if (isEmpty() || !(o instanceof Map.Entry)) 2044 return null; 2045 2046 Map.Entry<?,?> entry = (Map.Entry<?,?>) o; 2047 Object key = entry.getKey(); 2048 2049 if (key == null) { 2050 if (entry.equals(nullKeyEntry)) { 2051 return removeNullKey(); 2052 } 2053 return null; 2054 } 2055 2056 int hash = hash(key); 2057 int i = indexFor(hash, table.length); 2058 2059 if (table[i] instanceof Entry) { 2060 @SuppressWarnings("unchecked") 2061 Entry<K,V> prev = (Entry<K,V>)table[i]; 2062 Entry<K,V> e = prev; 2063 2064 while (e != null) { 2065 @SuppressWarnings("unchecked") 2066 Entry<K,V> next = (Entry<K,V>)e.next; 2067 if (e.hash == hash && e.equals(entry)) { 2068 modCount++; 2069 size--; 2070 if (prev == e) 2071 table[i] = next; 2072 else 2073 prev.next = next; 2074 e.recordRemoval(this); 2075 return e; 2076 } 2077 prev = e; 2078 e = next; 2079 } 2080 } else if (table[i] != null) { 2081 TreeBin tb = ((TreeBin) table[i]); 2082 TreeNode p = tb.getTreeNode(hash, (K)key); 2083 if (p != null && p.entry.equals(entry)) { 2084 @SuppressWarnings("unchecked") 2085 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 2086 // assert pEntry.key.equals(key); 2087 modCount++; 2088 size--; 2089 tb.deleteTreeNode(p); 2090 pEntry.recordRemoval(this); 2091 if (tb.root == null || tb.first == null) { 2092 // assert tb.root == null && tb.first == null : 2093 // "TreeBin.first and root should both be null"; 2094 // TreeBin is now empty, we should blank this bin 2095 table[i] = null; 2096 } 2097 return pEntry; 2098 } 2099 } 2100 return null; 2101 } 2102 2103 /* 2104 * Remove the mapping for the null key, and update internal accounting 2105 * (size, modcount, recordRemoval, etc). 2106 * 2107 * Assumes nullKeyEntry is non-null. 2108 */ 2109 private Entry<K,V> removeNullKey() { 2110 // assert nullKeyEntry != null; 2111 Entry<K,V> retVal = nullKeyEntry; 2112 modCount++; 2113 size--; 2114 retVal.recordRemoval(this); 2115 nullKeyEntry = null; 2116 return retVal; 2117 } 2118 2119 /** 2120 * Removes all of the mappings from this map. 2121 * The map will be empty after this call returns. 2122 */ 2123 public void clear() { 2124 modCount++; 2125 if (nullKeyEntry != null) { 2126 nullKeyEntry = null; 2127 } 2128 Arrays.fill(table, null); 2129 size = 0; 2130 } 2131 2132 /** 2133 * Returns <tt>true</tt> if this map maps one or more keys to the 2134 * specified value. 2135 * 2136 * @param value value whose presence in this map is to be tested 2137 * @return <tt>true</tt> if this map maps one or more keys to the 2138 * specified value 2139 */ 2140 public boolean containsValue(Object value) { 2141 if (value == null) { 2142 return containsNullValue(); 2143 } 2144 Object[] tab = table; 2145 for (int i = 0; i < tab.length; i++) { 2146 if (tab[i] instanceof Entry) { 2147 Entry<?,?> e = (Entry<?,?>)tab[i]; 2148 for (; e != null; e = (Entry<?,?>)e.next) { 2149 if (value.equals(e.value)) { 2150 return true; 2151 } 2152 } 2153 } else if (tab[i] != null) { 2154 TreeBin e = (TreeBin)tab[i]; 2155 TreeNode p = e.first; 2156 for (; p != null; p = (TreeNode) p.entry.next) { 2157 if (value == p.entry.value || value.equals(p.entry.value)) { 2158 return true; 2159 } 2160 } 2161 } 2162 } 2163 // Didn't find value in table - could be in nullKeyEntry 2164 return (nullKeyEntry != null && (value == nullKeyEntry.value || 2165 value.equals(nullKeyEntry.value))); 2166 } 2167 2168 /** 2169 * Special-case code for containsValue with null argument 2170 */ 2171 private boolean containsNullValue() { 2172 Object[] tab = table; 2173 for (int i = 0; i < tab.length; i++) { 2174 if (tab[i] instanceof Entry) { 2175 Entry<K,V> e = (Entry<K,V>)tab[i]; 2176 for (; e != null; e = (Entry<K,V>)e.next) { 2177 if (e.value == null) { 2178 return true; 2179 } 2180 } 2181 } else if (tab[i] != null) { 2182 TreeBin e = (TreeBin)tab[i]; 2183 TreeNode p = e.first; 2184 for (; p != null; p = (TreeNode) p.entry.next) { 2185 if (p.entry.value == null) { 2186 return true; 2187 } 2188 } 2189 } 2190 } 2191 // Didn't find value in table - could be in nullKeyEntry 2192 return (nullKeyEntry != null && nullKeyEntry.value == null); 2193 } 2194 2195 /** 2196 * Returns a shallow copy of this <tt>HashMap</tt> instance: the keys and 2197 * values themselves are not cloned. 2198 * 2199 * @return a shallow copy of this map 2200 */ 2201 @SuppressWarnings("unchecked") 2202 public Object clone() { 2203 HashMap<K,V> result = null; 2204 try { 2205 result = (HashMap<K,V>)super.clone(); 2206 } catch (CloneNotSupportedException e) { 2207 // assert false; 2208 } 2209 if (result.table != EMPTY_TABLE) { 2210 result.inflateTable(Math.min( 2211 (int) Math.min( 2212 size * Math.min(1 / loadFactor, 4.0f), 2213 // we have limits... 2214 HashMap.MAXIMUM_CAPACITY), 2215 table.length)); 2216 } 2217 result.entrySet = null; 2218 result.modCount = 0; 2219 result.size = 0; 2220 result.nullKeyEntry = null; 2221 result.init(); 2222 result.putAllForCreate(this); 2223 2224 return result; 2225 } 2226 2227 static class Entry<K,V> implements Map.Entry<K,V> { 2228 final K key; 2229 V value; 2230 Object next; // an Entry, or a TreeNode 2231 final int hash; 2232 2233 /** 2234 * Creates new entry. 2235 */ 2236 Entry(int h, K k, V v, Object n) { 2237 value = v; 2238 next = n; 2239 key = k; 2240 hash = h; 2241 } 2242 2243 public final K getKey() { 2244 return key; 2245 } 2246 2247 public final V getValue() { 2248 return value; 2249 } 2250 2251 public final V setValue(V newValue) { 2252 V oldValue = value; 2253 value = newValue; 2254 return oldValue; 2255 } 2256 2257 public final boolean equals(Object o) { 2258 if (!(o instanceof Map.Entry)) 2259 return false; 2260 Map.Entry<?,?> e = (Map.Entry<?,?>)o; 2261 Object k1 = getKey(); 2262 Object k2 = e.getKey(); 2263 if (k1 == k2 || (k1 != null && k1.equals(k2))) { 2264 Object v1 = getValue(); 2265 Object v2 = e.getValue(); 2266 if (v1 == v2 || (v1 != null && v1.equals(v2))) 2267 return true; 2268 } 2269 return false; 2270 } 2271 2272 public final int hashCode() { 2273 return Objects.hashCode(getKey()) ^ Objects.hashCode(getValue()); 2274 } 2275 2276 public final String toString() { 2277 return getKey() + "=" + getValue(); 2278 } 2279 2280 /** 2281 * This method is invoked whenever the value in an entry is 2282 * overwritten for a key that's already in the HashMap. 2283 */ 2284 void recordAccess(HashMap<K,V> m) { 2285 } 2286 2287 /** 2288 * This method is invoked whenever the entry is 2289 * removed from the table. 2290 */ 2291 void recordRemoval(HashMap<K,V> m) { 2292 } 2293 } 2294 2295 void addEntry(int hash, K key, V value, int bucketIndex) { 2296 addEntry(hash, key, value, bucketIndex, true); 2297 } 2298 2299 /** 2300 * Adds a new entry with the specified key, value and hash code to 2301 * the specified bucket. It is the responsibility of this 2302 * method to resize the table if appropriate. The new entry is then 2303 * created by calling createEntry(). 2304 * 2305 * Subclass overrides this to alter the behavior of put method. 2306 * 2307 * If checkIfNeedTree is false, it is known that this bucket will not need 2308 * to be converted to a TreeBin, so don't bothering checking. 2309 * 2310 * Assumes key is not null. 2311 */ 2312 void addEntry(int hash, K key, V value, int bucketIndex, boolean checkIfNeedTree) { 2313 // assert key != null; 2314 if ((size >= threshold) && (null != table[bucketIndex])) { 2315 resize(2 * table.length); 2316 hash = hash(key); 2317 bucketIndex = indexFor(hash, table.length); 2318 } 2319 createEntry(hash, key, value, bucketIndex, checkIfNeedTree); 2320 } 2321 2322 /** 2323 * Called by addEntry(), and also used when creating entries 2324 * as part of Map construction or "pseudo-construction" (cloning, 2325 * deserialization). This version does not check for resizing of the table. 2326 * 2327 * This method is responsible for converting a bucket to a TreeBin once 2328 * TREE_THRESHOLD is reached. However if checkIfNeedTree is false, it is known 2329 * that this bucket will not need to be converted to a TreeBin, so don't 2330 * bother checking. The new entry is constructed by calling newEntry(). 2331 * 2332 * Assumes key is not null. 2333 * 2334 * Note: buckets already converted to a TreeBin don't call this method, but 2335 * instead call TreeBin.putTreeNode() to create new entries. 2336 */ 2337 void createEntry(int hash, K key, V value, int bucketIndex, boolean checkIfNeedTree) { 2338 // assert key != null; 2339 @SuppressWarnings("unchecked") 2340 Entry<K,V> e = (Entry<K,V>)table[bucketIndex]; 2341 table[bucketIndex] = newEntry(hash, key, value, e); 2342 size++; 2343 2344 if (checkIfNeedTree) { 2345 int listSize = 0; 2346 for (e = (Entry<K,V>) table[bucketIndex]; e != null; e = (Entry<K,V>)e.next) { 2347 listSize++; 2348 if (listSize >= TreeBin.TREE_THRESHOLD) { // Convert to TreeBin 2349 if (comparableClassFor(key) != null) { 2350 TreeBin t = new TreeBin(); 2351 t.populate((Entry)table[bucketIndex]); 2352 table[bucketIndex] = t; 2353 } 2354 break; 2355 } 2356 } 2357 } 2358 } 2359 2360 /* 2361 * Factory method to create a new Entry object. 2362 */ 2363 Entry<K,V> newEntry(int hash, K key, V value, Object next) { 2364 return new HashMap.Entry<>(hash, key, value, next); 2365 } 2366 2367 2368 private abstract class HashIterator<E> implements Iterator<E> { 2369 Object next; // next entry to return, an Entry or a TreeNode 2370 int expectedModCount; // For fast-fail 2371 int index; // current slot 2372 Object current; // current entry, an Entry or a TreeNode 2373 2374 HashIterator() { 2375 expectedModCount = modCount; 2376 if (size > 0) { // advance to first entry 2377 if (nullKeyEntry != null) { 2378 // assert nullKeyEntry.next == null; 2379 // This works with nextEntry(): nullKeyEntry isa Entry, and 2380 // e.next will be null, so we'll hit the findNextBin() call. 2381 next = nullKeyEntry; 2382 } else { 2383 findNextBin(); 2384 } 2385 } 2386 } 2387 2388 public final boolean hasNext() { 2389 return next != null; 2390 } 2391 2392 @SuppressWarnings("unchecked") 2393 final Entry<K,V> nextEntry() { 2394 if (modCount != expectedModCount) { 2395 throw new ConcurrentModificationException(); 2396 } 2397 Object e = next; 2398 Entry<K,V> retVal; 2399 2400 if (e == null) 2401 throw new NoSuchElementException(); 2402 2403 if (e instanceof TreeNode) { // TreeBin 2404 retVal = (Entry<K,V>)((TreeNode)e).entry; 2405 next = retVal.next; 2406 } else { 2407 retVal = (Entry<K,V>)e; 2408 next = ((Entry<K,V>)e).next; 2409 } 2410 2411 if (next == null) { // Move to next bin 2412 findNextBin(); 2413 } 2414 current = e; 2415 return retVal; 2416 } 2417 2418 public void remove() { 2419 if (current == null) 2420 throw new IllegalStateException(); 2421 if (modCount != expectedModCount) 2422 throw new ConcurrentModificationException(); 2423 K k; 2424 2425 if (current instanceof Entry) { 2426 k = ((Entry<K,V>)current).key; 2427 } else { 2428 k = ((Entry<K,V>)((TreeNode)current).entry).key; 2429 2430 } 2431 current = null; 2432 HashMap.this.removeEntryForKey(k); 2433 expectedModCount = modCount; 2434 } 2435 2436 /* 2437 * Set 'next' to the first entry of the next non-empty bin in the table 2438 */ 2439 private void findNextBin() { 2440 // assert next == null; 2441 Object[] t = table; 2442 2443 while (index < t.length && (next = t[index++]) == null) 2444 ; 2445 if (next instanceof HashMap.TreeBin) { // Point to the first TreeNode 2446 next = ((TreeBin) next).first; 2447 // assert next != null; // There should be no empty TreeBins 2448 } 2449 } 2450 } 2451 2452 private final class ValueIterator extends HashIterator<V> { 2453 public V next() { 2454 return nextEntry().value; 2455 } 2456 } 2457 2458 private final class KeyIterator extends HashIterator<K> { 2459 public K next() { 2460 return nextEntry().getKey(); 2461 } 2462 } 2463 2464 private final class EntryIterator extends HashIterator<Map.Entry<K,V>> { 2465 public Map.Entry<K,V> next() { 2466 return nextEntry(); 2467 } 2468 } 2469 2470 // Subclass overrides these to alter behavior of views' iterator() method 2471 Iterator<K> newKeyIterator() { 2472 return new KeyIterator(); 2473 } 2474 Iterator<V> newValueIterator() { 2475 return new ValueIterator(); 2476 } 2477 Iterator<Map.Entry<K,V>> newEntryIterator() { 2478 return new EntryIterator(); 2479 } 2480 2481 2482 // Views 2483 2484 private transient Set<Map.Entry<K,V>> entrySet = null; 2485 2486 /** 2487 * Returns a {@link Set} view of the keys contained in this map. 2488 * The set is backed by the map, so changes to the map are 2489 * reflected in the set, and vice-versa. If the map is modified 2490 * while an iteration over the set is in progress (except through 2491 * the iterator's own <tt>remove</tt> operation), the results of 2492 * the iteration are undefined. The set supports element removal, 2493 * which removes the corresponding mapping from the map, via the 2494 * <tt>Iterator.remove</tt>, <tt>Set.remove</tt>, 2495 * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> 2496 * operations. It does not support the <tt>add</tt> or <tt>addAll</tt> 2497 * operations. 2498 */ 2499 public Set<K> keySet() { 2500 Set<K> ks = keySet; 2501 return (ks != null ? ks : (keySet = new KeySet())); 2502 } 2503 2504 private final class KeySet extends AbstractSet<K> { 2505 public Iterator<K> iterator() { 2506 return newKeyIterator(); 2507 } 2508 public int size() { 2509 return size; 2510 } 2511 public boolean contains(Object o) { 2512 return containsKey(o); 2513 } 2514 public boolean remove(Object o) { 2515 return HashMap.this.removeEntryForKey(o) != null; 2516 } 2517 public void clear() { 2518 HashMap.this.clear(); 2519 } 2520 2521 public Spliterator<K> spliterator() { 2522 if (HashMap.this.getClass() == HashMap.class) 2523 return new KeySpliterator<K,V>(HashMap.this, 0, -1, 0, 0); 2524 else 2525 return Spliterators.spliterator 2526 (this, Spliterator.SIZED | Spliterator.DISTINCT); 2527 } 2528 } 2529 2530 /** 2531 * Returns a {@link Collection} view of the values contained in this map. 2532 * The collection is backed by the map, so changes to the map are 2533 * reflected in the collection, and vice-versa. If the map is 2534 * modified while an iteration over the collection is in progress 2535 * (except through the iterator's own <tt>remove</tt> operation), 2536 * the results of the iteration are undefined. The collection 2537 * supports element removal, which removes the corresponding 2538 * mapping from the map, via the <tt>Iterator.remove</tt>, 2539 * <tt>Collection.remove</tt>, <tt>removeAll</tt>, 2540 * <tt>retainAll</tt> and <tt>clear</tt> operations. It does not 2541 * support the <tt>add</tt> or <tt>addAll</tt> operations. 2542 */ 2543 public Collection<V> values() { 2544 Collection<V> vs = values; 2545 return (vs != null ? vs : (values = new Values())); 2546 } 2547 2548 private final class Values extends AbstractCollection<V> { 2549 public Iterator<V> iterator() { 2550 return newValueIterator(); 2551 } 2552 public int size() { 2553 return size; 2554 } 2555 public boolean contains(Object o) { 2556 return containsValue(o); 2557 } 2558 public void clear() { 2559 HashMap.this.clear(); 2560 } 2561 2562 public Spliterator<V> spliterator() { 2563 if (HashMap.this.getClass() == HashMap.class) 2564 return new ValueSpliterator<K,V>(HashMap.this, 0, -1, 0, 0); 2565 else 2566 return Spliterators.spliterator 2567 (this, Spliterator.SIZED); 2568 } 2569 } 2570 2571 /** 2572 * Returns a {@link Set} view of the mappings contained in this map. 2573 * The set is backed by the map, so changes to the map are 2574 * reflected in the set, and vice-versa. If the map is modified 2575 * while an iteration over the set is in progress (except through 2576 * the iterator's own <tt>remove</tt> operation, or through the 2577 * <tt>setValue</tt> operation on a map entry returned by the 2578 * iterator) the results of the iteration are undefined. The set 2579 * supports element removal, which removes the corresponding 2580 * mapping from the map, via the <tt>Iterator.remove</tt>, 2581 * <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt> and 2582 * <tt>clear</tt> operations. It does not support the 2583 * <tt>add</tt> or <tt>addAll</tt> operations. 2584 * 2585 * @return a set view of the mappings contained in this map 2586 */ 2587 public Set<Map.Entry<K,V>> entrySet() { 2588 return entrySet0(); 2589 } 2590 2591 private Set<Map.Entry<K,V>> entrySet0() { 2592 Set<Map.Entry<K,V>> es = entrySet; 2593 return es != null ? es : (entrySet = new EntrySet()); 2594 } 2595 2596 private final class EntrySet extends AbstractSet<Map.Entry<K,V>> { 2597 public Iterator<Map.Entry<K,V>> iterator() { 2598 return newEntryIterator(); 2599 } 2600 public boolean contains(Object o) { 2601 if (!(o instanceof Map.Entry)) 2602 return false; 2603 Map.Entry<?,?> e = (Map.Entry<?,?>) o; 2604 Entry<K,V> candidate = getEntry(e.getKey()); 2605 return candidate != null && candidate.equals(e); 2606 } 2607 public boolean remove(Object o) { 2608 return removeMapping(o) != null; 2609 } 2610 public int size() { 2611 return size; 2612 } 2613 public void clear() { 2614 HashMap.this.clear(); 2615 } 2616 2617 public Spliterator<Map.Entry<K,V>> spliterator() { 2618 if (HashMap.this.getClass() == HashMap.class) 2619 return new EntrySpliterator<K,V>(HashMap.this, 0, -1, 0, 0); 2620 else 2621 return Spliterators.spliterator 2622 (this, Spliterator.SIZED | Spliterator.DISTINCT); 2623 } 2624 } 2625 2626 /** 2627 * Save the state of the <tt>HashMap</tt> instance to a stream (i.e., 2628 * serialize it). 2629 * 2630 * @serialData The <i>capacity</i> of the HashMap (the length of the 2631 * bucket array) is emitted (int), followed by the 2632 * <i>size</i> (an int, the number of key-value 2633 * mappings), followed by the key (Object) and value (Object) 2634 * for each key-value mapping. The key-value mappings are 2635 * emitted in no particular order. 2636 */ 2637 private void writeObject(java.io.ObjectOutputStream s) 2638 throws IOException 2639 { 2640 // Write out the threshold, loadfactor, and any hidden stuff 2641 s.defaultWriteObject(); 2642 2643 // Write out number of buckets 2644 if (table==EMPTY_TABLE) { 2645 s.writeInt(roundUpToPowerOf2(threshold)); 2646 } else { 2647 s.writeInt(table.length); 2648 } 2649 2650 // Write out size (number of Mappings) 2651 s.writeInt(size); 2652 2653 // Write out keys and values (alternating) 2654 if (size > 0) { 2655 for(Map.Entry<K,V> e : entrySet0()) { 2656 s.writeObject(e.getKey()); 2657 s.writeObject(e.getValue()); 2658 } 2659 } 2660 } 2661 2662 private static final long serialVersionUID = 362498820763181265L; 2663 2664 /** 2665 * Reconstitute the {@code HashMap} instance from a stream (i.e., 2666 * deserialize it). 2667 */ 2668 private void readObject(java.io.ObjectInputStream s) 2669 throws IOException, ClassNotFoundException 2670 { 2671 // Read in the threshold (ignored), loadfactor, and any hidden stuff 2672 s.defaultReadObject(); 2673 if (loadFactor <= 0 || Float.isNaN(loadFactor)) { 2674 throw new InvalidObjectException("Illegal load factor: " + 2675 loadFactor); 2676 } 2677 2678 // set other fields that need values 2679 if (Holder.USE_HASHSEED) { 2680 int seed = ThreadLocalRandom.current().nextInt(); 2681 Holder.UNSAFE.putIntVolatile(this, Holder.HASHSEED_OFFSET, 2682 (seed != 0) ? seed : 1); 2683 } 2684 table = EMPTY_TABLE; 2685 2686 // Read in number of buckets 2687 s.readInt(); // ignored. 2688 2689 // Read number of mappings 2690 int mappings = s.readInt(); 2691 if (mappings < 0) 2692 throw new InvalidObjectException("Illegal mappings count: " + 2693 mappings); 2694 2695 // capacity chosen by number of mappings and desired load (if >= 0.25) 2696 int capacity = (int) Math.min( 2697 mappings * Math.min(1 / loadFactor, 4.0f), 2698 // we have limits... 2699 HashMap.MAXIMUM_CAPACITY); 2700 2701 // allocate the bucket array; 2702 if (mappings > 0) { 2703 inflateTable(capacity); 2704 } else { 2705 threshold = capacity; 2706 } 2707 2708 init(); // Give subclass a chance to do its thing. 2709 2710 // Read the keys and values, and put the mappings in the HashMap 2711 for (int i=0; i<mappings; i++) { 2712 @SuppressWarnings("unchecked") 2713 K key = (K) s.readObject(); 2714 @SuppressWarnings("unchecked") 2715 V value = (V) s.readObject(); 2716 putForCreate(key, value); 2717 } 2718 } 2719 2720 // These methods are used when serializing HashSets 2721 int capacity() { return table.length; } 2722 float loadFactor() { return loadFactor; } 2723 2724 /** 2725 * Standin until HM overhaul; based loosely on Weak and Identity HM. 2726 */ 2727 static class HashMapSpliterator<K,V> { 2728 final HashMap<K,V> map; 2729 Object current; // current node, can be Entry or TreeNode 2730 int index; // current index, modified on advance/split 2731 int fence; // one past last index 2732 int est; // size estimate 2733 int expectedModCount; // for comodification checks 2734 boolean acceptedNull; // Have we accepted the null key? 2735 // Without this, we can't distinguish 2736 // between being at the very beginning (and 2737 // needing to accept null), or being at the 2738 // end of the list in bin 0. In both cases, 2739 // current == null && index == 0. 2740 2741 HashMapSpliterator(HashMap<K,V> m, int origin, 2742 int fence, int est, 2743 int expectedModCount) { 2744 this.map = m; 2745 this.index = origin; 2746 this.fence = fence; 2747 this.est = est; 2748 this.expectedModCount = expectedModCount; 2749 this.acceptedNull = false; 2750 } 2751 2752 final int getFence() { // initialize fence and size on first use 2753 int hi; 2754 if ((hi = fence) < 0) { 2755 HashMap<K,V> m = map; 2756 est = m.size; 2757 expectedModCount = m.modCount; 2758 hi = fence = m.table.length; 2759 } 2760 return hi; 2761 } 2762 2763 public final long estimateSize() { 2764 getFence(); // force init 2765 return (long) est; 2766 } 2767 } 2768 2769 static final class KeySpliterator<K,V> 2770 extends HashMapSpliterator<K,V> 2771 implements Spliterator<K> { 2772 KeySpliterator(HashMap<K,V> m, int origin, int fence, int est, 2773 int expectedModCount) { 2774 super(m, origin, fence, est, expectedModCount); 2775 } 2776 2777 public KeySpliterator<K,V> trySplit() { 2778 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; 2779 if (lo >= mid || current != null) { 2780 return null; 2781 } else { 2782 KeySpliterator<K,V> retVal = new KeySpliterator<K,V>(map, lo, 2783 index = mid, est >>>= 1, expectedModCount); 2784 // Only 'this' Spliterator chould check for null. 2785 retVal.acceptedNull = true; 2786 return retVal; 2787 } 2788 } 2789 2790 @SuppressWarnings("unchecked") 2791 public void forEachRemaining(Consumer<? super K> action) { 2792 int i, hi, mc; 2793 if (action == null) 2794 throw new NullPointerException(); 2795 HashMap<K,V> m = map; 2796 Object[] tab = m.table; 2797 if ((hi = fence) < 0) { 2798 mc = expectedModCount = m.modCount; 2799 hi = fence = tab.length; 2800 } 2801 else 2802 mc = expectedModCount; 2803 2804 if (!acceptedNull) { 2805 acceptedNull = true; 2806 if (m.nullKeyEntry != null) { 2807 action.accept(m.nullKeyEntry.key); 2808 } 2809 } 2810 if (tab.length >= hi && (i = index) >= 0 && 2811 (i < (index = hi) || current != null)) { 2812 Object p = current; 2813 current = null; 2814 do { 2815 if (p == null) { 2816 p = tab[i++]; 2817 if (p instanceof HashMap.TreeBin) { 2818 p = ((HashMap.TreeBin)p).first; 2819 } 2820 } else { 2821 HashMap.Entry<K,V> entry; 2822 if (p instanceof HashMap.Entry) { 2823 entry = (HashMap.Entry<K,V>)p; 2824 } else { 2825 entry = (HashMap.Entry<K,V>)((TreeNode)p).entry; 2826 } 2827 action.accept(entry.key); 2828 p = entry.next; 2829 } 2830 } while (p != null || i < hi); 2831 if (m.modCount != mc) 2832 throw new ConcurrentModificationException(); 2833 } 2834 } 2835 2836 @SuppressWarnings("unchecked") 2837 public boolean tryAdvance(Consumer<? super K> action) { 2838 int hi; 2839 if (action == null) 2840 throw new NullPointerException(); 2841 Object[] tab = map.table; 2842 hi = getFence(); 2843 2844 if (!acceptedNull) { 2845 acceptedNull = true; 2846 if (map.nullKeyEntry != null) { 2847 action.accept(map.nullKeyEntry.key); 2848 if (map.modCount != expectedModCount) 2849 throw new ConcurrentModificationException(); 2850 return true; 2851 } 2852 } 2853 if (tab.length >= hi && index >= 0) { 2854 while (current != null || index < hi) { 2855 if (current == null) { 2856 current = tab[index++]; 2857 if (current instanceof HashMap.TreeBin) { 2858 current = ((HashMap.TreeBin)current).first; 2859 } 2860 } else { 2861 HashMap.Entry<K,V> entry; 2862 if (current instanceof HashMap.Entry) { 2863 entry = (HashMap.Entry<K,V>)current; 2864 } else { 2865 entry = (HashMap.Entry<K,V>)((TreeNode)current).entry; 2866 } 2867 K k = entry.key; 2868 current = entry.next; 2869 action.accept(k); 2870 if (map.modCount != expectedModCount) 2871 throw new ConcurrentModificationException(); 2872 return true; 2873 } 2874 } 2875 } 2876 return false; 2877 } 2878 2879 public int characteristics() { 2880 return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | 2881 Spliterator.DISTINCT; 2882 } 2883 } 2884 2885 static final class ValueSpliterator<K,V> 2886 extends HashMapSpliterator<K,V> 2887 implements Spliterator<V> { 2888 ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est, 2889 int expectedModCount) { 2890 super(m, origin, fence, est, expectedModCount); 2891 } 2892 2893 public ValueSpliterator<K,V> trySplit() { 2894 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; 2895 if (lo >= mid || current != null) { 2896 return null; 2897 } else { 2898 ValueSpliterator<K,V> retVal = new ValueSpliterator<K,V>(map, 2899 lo, index = mid, est >>>= 1, expectedModCount); 2900 // Only 'this' Spliterator chould check for null. 2901 retVal.acceptedNull = true; 2902 return retVal; 2903 } 2904 } 2905 2906 @SuppressWarnings("unchecked") 2907 public void forEachRemaining(Consumer<? super V> action) { 2908 int i, hi, mc; 2909 if (action == null) 2910 throw new NullPointerException(); 2911 HashMap<K,V> m = map; 2912 Object[] tab = m.table; 2913 if ((hi = fence) < 0) { 2914 mc = expectedModCount = m.modCount; 2915 hi = fence = tab.length; 2916 } 2917 else 2918 mc = expectedModCount; 2919 2920 if (!acceptedNull) { 2921 acceptedNull = true; 2922 if (m.nullKeyEntry != null) { 2923 action.accept(m.nullKeyEntry.value); 2924 } 2925 } 2926 if (tab.length >= hi && (i = index) >= 0 && 2927 (i < (index = hi) || current != null)) { 2928 Object p = current; 2929 current = null; 2930 do { 2931 if (p == null) { 2932 p = tab[i++]; 2933 if (p instanceof HashMap.TreeBin) { 2934 p = ((HashMap.TreeBin)p).first; 2935 } 2936 } else { 2937 HashMap.Entry<K,V> entry; 2938 if (p instanceof HashMap.Entry) { 2939 entry = (HashMap.Entry<K,V>)p; 2940 } else { 2941 entry = (HashMap.Entry<K,V>)((TreeNode)p).entry; 2942 } 2943 action.accept(entry.value); 2944 p = entry.next; 2945 } 2946 } while (p != null || i < hi); 2947 if (m.modCount != mc) 2948 throw new ConcurrentModificationException(); 2949 } 2950 } 2951 2952 @SuppressWarnings("unchecked") 2953 public boolean tryAdvance(Consumer<? super V> action) { 2954 int hi; 2955 if (action == null) 2956 throw new NullPointerException(); 2957 Object[] tab = map.table; 2958 hi = getFence(); 2959 2960 if (!acceptedNull) { 2961 acceptedNull = true; 2962 if (map.nullKeyEntry != null) { 2963 action.accept(map.nullKeyEntry.value); 2964 if (map.modCount != expectedModCount) 2965 throw new ConcurrentModificationException(); 2966 return true; 2967 } 2968 } 2969 if (tab.length >= hi && index >= 0) { 2970 while (current != null || index < hi) { 2971 if (current == null) { 2972 current = tab[index++]; 2973 if (current instanceof HashMap.TreeBin) { 2974 current = ((HashMap.TreeBin)current).first; 2975 } 2976 } else { 2977 HashMap.Entry<K,V> entry; 2978 if (current instanceof HashMap.Entry) { 2979 entry = (Entry<K,V>)current; 2980 } else { 2981 entry = (Entry<K,V>)((TreeNode)current).entry; 2982 } 2983 V v = entry.value; 2984 current = entry.next; 2985 action.accept(v); 2986 if (map.modCount != expectedModCount) 2987 throw new ConcurrentModificationException(); 2988 return true; 2989 } 2990 } 2991 } 2992 return false; 2993 } 2994 2995 public int characteristics() { 2996 return (fence < 0 || est == map.size ? Spliterator.SIZED : 0); 2997 } 2998 } 2999 3000 static final class EntrySpliterator<K,V> 3001 extends HashMapSpliterator<K,V> 3002 implements Spliterator<Map.Entry<K,V>> { 3003 EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est, 3004 int expectedModCount) { 3005 super(m, origin, fence, est, expectedModCount); 3006 } 3007 3008 public EntrySpliterator<K,V> trySplit() { 3009 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; 3010 if (lo >= mid || current != null) { 3011 return null; 3012 } else { 3013 EntrySpliterator<K,V> retVal = new EntrySpliterator<K,V>(map, 3014 lo, index = mid, est >>>= 1, expectedModCount); 3015 // Only 'this' Spliterator chould check for null. 3016 retVal.acceptedNull = true; 3017 return retVal; 3018 } 3019 } 3020 3021 @SuppressWarnings("unchecked") 3022 public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) { 3023 int i, hi, mc; 3024 if (action == null) 3025 throw new NullPointerException(); 3026 HashMap<K,V> m = map; 3027 Object[] tab = m.table; 3028 if ((hi = fence) < 0) { 3029 mc = expectedModCount = m.modCount; 3030 hi = fence = tab.length; 3031 } 3032 else 3033 mc = expectedModCount; 3034 3035 if (!acceptedNull) { 3036 acceptedNull = true; 3037 if (m.nullKeyEntry != null) { 3038 action.accept(m.nullKeyEntry); 3039 } 3040 } 3041 if (tab.length >= hi && (i = index) >= 0 && 3042 (i < (index = hi) || current != null)) { 3043 Object p = current; 3044 current = null; 3045 do { 3046 if (p == null) { 3047 p = tab[i++]; 3048 if (p instanceof HashMap.TreeBin) { 3049 p = ((HashMap.TreeBin)p).first; 3050 } 3051 } else { 3052 HashMap.Entry<K,V> entry; 3053 if (p instanceof HashMap.Entry) { 3054 entry = (HashMap.Entry<K,V>)p; 3055 } else { 3056 entry = (HashMap.Entry<K,V>)((TreeNode)p).entry; 3057 } 3058 action.accept(entry); 3059 p = entry.next; 3060 3061 } 3062 } while (p != null || i < hi); 3063 if (m.modCount != mc) 3064 throw new ConcurrentModificationException(); 3065 } 3066 } 3067 3068 @SuppressWarnings("unchecked") 3069 public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) { 3070 int hi; 3071 if (action == null) 3072 throw new NullPointerException(); 3073 Object[] tab = map.table; 3074 hi = getFence(); 3075 3076 if (!acceptedNull) { 3077 acceptedNull = true; 3078 if (map.nullKeyEntry != null) { 3079 action.accept(map.nullKeyEntry); 3080 if (map.modCount != expectedModCount) 3081 throw new ConcurrentModificationException(); 3082 return true; 3083 } 3084 } 3085 if (tab.length >= hi && index >= 0) { 3086 while (current != null || index < hi) { 3087 if (current == null) { 3088 current = tab[index++]; 3089 if (current instanceof HashMap.TreeBin) { 3090 current = ((HashMap.TreeBin)current).first; 3091 } 3092 } else { 3093 HashMap.Entry<K,V> e; 3094 if (current instanceof HashMap.Entry) { 3095 e = (Entry<K,V>)current; 3096 } else { 3097 e = (Entry<K,V>)((TreeNode)current).entry; 3098 } 3099 current = e.next; 3100 action.accept(e); 3101 if (map.modCount != expectedModCount) 3102 throw new ConcurrentModificationException(); 3103 return true; 3104 } 3105 } 3106 } 3107 return false; 3108 } 3109 3110 public int characteristics() { 3111 return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | 3112 Spliterator.DISTINCT; 3113 } 3114 } 3115 }