1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package java.util; 27 28 import java.io.*; 29 import java.lang.reflect.ParameterizedType; 30 import java.lang.reflect.Type; 31 import java.util.concurrent.ThreadLocalRandom; 32 import java.util.function.Consumer; 33 import java.util.function.BiFunction; 34 import java.util.function.Function; 35 36 /** 37 * Hash table based implementation of the <tt>Map</tt> interface. This 38 * implementation provides all of the optional map operations, and permits 39 * <tt>null</tt> values and the <tt>null</tt> key. (The <tt>HashMap</tt> 40 * class is roughly equivalent to <tt>Hashtable</tt>, except that it is 41 * unsynchronized and permits nulls.) This class makes no guarantees as to 42 * the order of the map; in particular, it does not guarantee that the order 43 * will remain constant over time. 44 * 45 * <p>This implementation provides constant-time performance for the basic 46 * operations (<tt>get</tt> and <tt>put</tt>), assuming the hash function 47 * disperses the elements properly among the buckets. Iteration over 48 * collection views requires time proportional to the "capacity" of the 49 * <tt>HashMap</tt> instance (the number of buckets) plus its size (the number 50 * of key-value mappings). Thus, it's very important not to set the initial 51 * capacity too high (or the load factor too low) if iteration performance is 52 * important. 53 * 54 * <p>An instance of <tt>HashMap</tt> has two parameters that affect its 55 * performance: <i>initial capacity</i> and <i>load factor</i>. The 56 * <i>capacity</i> is the number of buckets in the hash table, and the initial 57 * capacity is simply the capacity at the time the hash table is created. The 58 * <i>load factor</i> is a measure of how full the hash table is allowed to 59 * get before its capacity is automatically increased. When the number of 60 * entries in the hash table exceeds the product of the load factor and the 61 * current capacity, the hash table is <i>rehashed</i> (that is, internal data 62 * structures are rebuilt) so that the hash table has approximately twice the 63 * number of buckets. 64 * 65 * <p>As a general rule, the default load factor (.75) offers a good tradeoff 66 * between time and space costs. Higher values decrease the space overhead 67 * but increase the lookup cost (reflected in most of the operations of the 68 * <tt>HashMap</tt> class, including <tt>get</tt> and <tt>put</tt>). The 69 * expected number of entries in the map and its load factor should be taken 70 * into account when setting its initial capacity, so as to minimize the 71 * number of rehash operations. If the initial capacity is greater 72 * than the maximum number of entries divided by the load factor, no 73 * rehash operations will ever occur. 74 * 75 * <p>If many mappings are to be stored in a <tt>HashMap</tt> instance, 76 * creating it with a sufficiently large capacity will allow the mappings to 77 * be stored more efficiently than letting it perform automatic rehashing as 78 * needed to grow the table. 79 * 80 * <p><strong>Note that this implementation is not synchronized.</strong> 81 * If multiple threads access a hash map concurrently, and at least one of 82 * the threads modifies the map structurally, it <i>must</i> be 83 * synchronized externally. (A structural modification is any operation 84 * that adds or deletes one or more mappings; merely changing the value 85 * associated with a key that an instance already contains is not a 86 * structural modification.) This is typically accomplished by 87 * synchronizing on some object that naturally encapsulates the map. 88 * 89 * If no such object exists, the map should be "wrapped" using the 90 * {@link Collections#synchronizedMap Collections.synchronizedMap} 91 * method. This is best done at creation time, to prevent accidental 92 * unsynchronized access to the map:<pre> 93 * Map m = Collections.synchronizedMap(new HashMap(...));</pre> 94 * 95 * <p>The iterators returned by all of this class's "collection view methods" 96 * are <i>fail-fast</i>: if the map is structurally modified at any time after 97 * the iterator is created, in any way except through the iterator's own 98 * <tt>remove</tt> method, the iterator will throw a 99 * {@link ConcurrentModificationException}. Thus, in the face of concurrent 100 * modification, the iterator fails quickly and cleanly, rather than risking 101 * arbitrary, non-deterministic behavior at an undetermined time in the 102 * future. 103 * 104 * <p>Note that the fail-fast behavior of an iterator cannot be guaranteed 105 * as it is, generally speaking, impossible to make any hard guarantees in the 106 * presence of unsynchronized concurrent modification. Fail-fast iterators 107 * throw <tt>ConcurrentModificationException</tt> on a best-effort basis. 108 * Therefore, it would be wrong to write a program that depended on this 109 * exception for its correctness: <i>the fail-fast behavior of iterators 110 * should be used only to detect bugs.</i> 111 * 112 * <p>This class is a member of the 113 * <a href="{@docRoot}/../technotes/guides/collections/index.html"> 114 * Java Collections Framework</a>. 115 * 116 * @param <K> the type of keys maintained by this map 117 * @param <V> the type of mapped values 118 * 119 * @author Doug Lea 120 * @author Josh Bloch 121 * @author Arthur van Hoff 122 * @author Neal Gafter 123 * @see Object#hashCode() 124 * @see Collection 125 * @see Map 126 * @see TreeMap 127 * @see Hashtable 128 * @since 1.2 129 */ 130 131 public class HashMap<K,V> 132 extends AbstractMap<K,V> 133 implements Map<K,V>, Cloneable, Serializable 134 { 135 136 /** 137 * The default initial capacity - MUST be a power of two. 138 */ 139 static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16 140 141 /** 142 * The maximum capacity, used if a higher value is implicitly specified 143 * by either of the constructors with arguments. 144 * MUST be a power of two <= 1<<30. 145 */ 146 static final int MAXIMUM_CAPACITY = 1 << 30; 147 148 /** 149 * The load factor used when none specified in constructor. 150 */ 151 static final float DEFAULT_LOAD_FACTOR = 0.75f; 152 153 /** 154 * An empty table instance to share when the table is not inflated. 155 */ 156 static final Object[] EMPTY_TABLE = {}; 157 158 /** 159 * The table, resized as necessary. Length MUST Always be a power of two. 160 */ 161 transient Object[] table = EMPTY_TABLE; 162 163 /** 164 * The number of key-value mappings contained in this map. 165 */ 166 transient int size; 167 168 /** 169 * The next size value at which to resize (capacity * load factor). 170 * @serial 171 */ 172 // If table == EMPTY_TABLE then this is the initial capacity at which the 173 // table will be created when inflated. 174 int threshold; 175 176 /** 177 * The load factor for the hash table. 178 * 179 * @serial 180 */ 181 final float loadFactor; 182 183 /** 184 * The number of times this HashMap has been structurally modified 185 * Structural modifications are those that change the number of mappings in 186 * the HashMap or otherwise modify its internal structure (e.g., 187 * rehash). This field is used to make iterators on Collection-views of 188 * the HashMap fail-fast. (See ConcurrentModificationException). 189 */ 190 transient int modCount; 191 192 /** 193 * Holds values which can't be initialized until after VM is booted. 194 */ 195 private static class Holder { 196 static final sun.misc.Unsafe UNSAFE; 197 198 /** 199 * Offset of "final" hashSeed field we must set in 200 * readObject() method. 201 */ 202 static final long HASHSEED_OFFSET; 203 204 static final boolean USE_HASHSEED; 205 206 static { 207 String hashSeedProp = java.security.AccessController.doPrivileged( 208 new sun.security.action.GetPropertyAction( 209 "jdk.map.useRandomSeed")); 210 boolean localBool = (null != hashSeedProp) 211 ? Boolean.parseBoolean(hashSeedProp) : false; 212 USE_HASHSEED = localBool; 213 214 if (USE_HASHSEED) { 215 try { 216 UNSAFE = sun.misc.Unsafe.getUnsafe(); 217 HASHSEED_OFFSET = UNSAFE.objectFieldOffset( 218 HashMap.class.getDeclaredField("hashSeed")); 219 } catch (NoSuchFieldException | SecurityException e) { 220 throw new InternalError("Failed to record hashSeed offset", e); 221 } 222 } else { 223 UNSAFE = null; 224 HASHSEED_OFFSET = 0; 225 } 226 } 227 } 228 229 /* 230 * A randomizing value associated with this instance that is applied to 231 * hash code of keys to make hash collisions harder to find. 232 * 233 * Non-final so it can be set lazily, but be sure not to set more than once. 234 */ 235 transient final int hashSeed; 236 237 /* 238 * TreeBin/TreeNode code from CHM doesn't handle the null key. Store the 239 * null key entry here. 240 */ 241 transient Entry<K,V> nullKeyEntry = null; 242 243 /* 244 * In order to improve performance under high hash-collision conditions, 245 * HashMap will switch to storing a bin's entries in a balanced tree 246 * (TreeBin) instead of a linked-list once the number of entries in the bin 247 * passes a certain threshold (TreeBin.TREE_THRESHOLD), if at least one of 248 * the keys in the bin implements Comparable. This technique is borrowed 249 * from ConcurrentHashMap. 250 */ 251 252 /* 253 * Code based on CHMv8 254 * 255 * Node type for TreeBin 256 */ 257 final static class TreeNode<K,V> { 258 TreeNode parent; // red-black tree links 259 TreeNode left; 260 TreeNode right; 261 TreeNode prev; // needed to unlink next upon deletion 262 boolean red; 263 final HashMap.Entry<K,V> entry; 264 265 TreeNode(HashMap.Entry<K,V> entry, Object next, TreeNode parent) { 266 this.entry = entry; 267 this.entry.next = next; 268 this.parent = parent; 269 } 270 } 271 272 /** 273 * Returns a Class for the given object of the form "class C 274 * implements Comparable<C>", if one exists, else null. See the TreeBin 275 * docs, below, for explanation. 276 */ 277 static Class<?> comparableClassFor(Object x) { 278 Class<?> c, s, cmpc; Type[] ts, as; Type t; ParameterizedType p; 279 if ((c = x.getClass()) == String.class) // bypass checks 280 return c; 281 if ((cmpc = Comparable.class).isAssignableFrom(c)) { 282 while (cmpc.isAssignableFrom(s = c.getSuperclass())) 283 c = s; // find topmost comparable class 284 if ((ts = c.getGenericInterfaces()) != null) { 285 for (int i = 0; i < ts.length; ++i) { 286 if (((t = ts[i]) instanceof ParameterizedType) && 287 ((p = (ParameterizedType)t).getRawType() == cmpc) && 288 (as = p.getActualTypeArguments()) != null && 289 as.length == 1 && as[0] == c) // type arg is c 290 return c; 291 } 292 } 293 } 294 return null; 295 } 296 297 /* 298 * Code based on CHMv8 299 * 300 * A specialized form of red-black tree for use in bins 301 * whose size exceeds a threshold. 302 * 303 * TreeBins use a special form of comparison for search and 304 * related operations (which is the main reason we cannot use 305 * existing collections such as TreeMaps). TreeBins contain 306 * Comparable elements, but may contain others, as well as 307 * elements that are Comparable but not necessarily Comparable<T> 308 * for the same T, so we cannot invoke compareTo among them. To 309 * handle this, the tree is ordered primarily by hash value, then 310 * by Comparable.compareTo order if applicable. On lookup at a 311 * node, if elements are not comparable or compare as 0 then both 312 * left and right children may need to be searched in the case of 313 * tied hash values. (This corresponds to the full list search 314 * that would be necessary if all elements were non-Comparable and 315 * had tied hashes.) The red-black balancing code is updated from 316 * pre-jdk-collections 317 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) 318 * based in turn on Cormen, Leiserson, and Rivest "Introduction to 319 * Algorithms" (CLR). 320 */ 321 final class TreeBin { 322 /* 323 * The bin count threshold for using a tree rather than list for a bin. The 324 * value reflects the approximate break-even point for using tree-based 325 * operations. 326 */ 327 static final int TREE_THRESHOLD = 16; 328 329 TreeNode<K,V> root; // root of tree 330 TreeNode<K,V> first; // head of next-pointer list 331 332 /* 333 * Split a TreeBin into lo and hi parts and install in given table. 334 * 335 * Existing Entrys are re-used, which maintains the before/after links for 336 * LinkedHashMap.Entry. 337 * 338 * No check for Comparable, though this is the same as CHM. 339 */ 340 final void splitTreeBin(Object[] newTable, int i, TreeBin loTree, TreeBin hiTree) { 341 TreeBin oldTree = this; 342 int bit = newTable.length >>> 1; 343 int loCount = 0, hiCount = 0; 344 TreeNode<K,V> e = oldTree.first; 345 TreeNode<K,V> next; 346 347 // This method is called when the table has just increased capacity, 348 // so indexFor() is now taking one additional bit of hash into 349 // account ("bit"). Entries in this TreeBin now belong in one of 350 // two bins, "i" or "i+bit", depending on if the new top bit of the 351 // hash is set. The trees for the two bins are loTree and hiTree. 352 // If either tree ends up containing fewer than TREE_THRESHOLD 353 // entries, it is converted back to a linked list. 354 while (e != null) { 355 // Save entry.next - it will get overwritten in putTreeNode() 356 next = (TreeNode<K,V>)e.entry.next; 357 358 int h = e.entry.hash; 359 K k = (K) e.entry.key; 360 V v = e.entry.value; 361 if ((h & bit) == 0) { 362 ++loCount; 363 // Re-using e.entry 364 loTree.putTreeNode(h, k, v, e.entry); 365 } else { 366 ++hiCount; 367 hiTree.putTreeNode(h, k, v, e.entry); 368 } 369 // Iterate using the saved 'next' 370 e = next; 371 } 372 if (loCount < TREE_THRESHOLD) { // too small, convert back to list 373 HashMap.Entry loEntry = null; 374 TreeNode<K,V> p = loTree.first; 375 while (p != null) { 376 @SuppressWarnings("unchecked") 377 TreeNode<K,V> savedNext = (TreeNode<K,V>) p.entry.next; 378 p.entry.next = loEntry; 379 loEntry = p.entry; 380 p = savedNext; 381 } 382 // assert newTable[i] == null; 383 newTable[i] = loEntry; 384 } else { 385 // assert newTable[i] == null; 386 newTable[i] = loTree; 387 } 388 if (hiCount < TREE_THRESHOLD) { // too small, convert back to list 389 HashMap.Entry hiEntry = null; 390 TreeNode<K,V> p = hiTree.first; 391 while (p != null) { 392 @SuppressWarnings("unchecked") 393 TreeNode<K,V> savedNext = (TreeNode<K,V>) p.entry.next; 394 p.entry.next = hiEntry; 395 hiEntry = p.entry; 396 p = savedNext; 397 } 398 // assert newTable[i + bit] == null; 399 newTable[i + bit] = hiEntry; 400 } else { 401 // assert newTable[i + bit] == null; 402 newTable[i + bit] = hiTree; 403 } 404 } 405 406 /* 407 * Popuplate the TreeBin with entries from the linked list e 408 * 409 * Assumes 'this' is a new/empty TreeBin 410 * 411 * Note: no check for Comparable 412 * Note: I believe this changes iteration order 413 */ 414 @SuppressWarnings("unchecked") 415 void populate(HashMap.Entry e) { 416 // assert root == null; 417 // assert first == null; 418 HashMap.Entry next; 419 while (e != null) { 420 // Save entry.next - it will get overwritten in putTreeNode() 421 next = (HashMap.Entry)e.next; 422 // Re-using Entry e will maintain before/after in LinkedHM 423 putTreeNode(e.hash, (K)e.key, (V)e.value, e); 424 // Iterate using the saved 'next' 425 e = next; 426 } 427 } 428 429 /** 430 * Copied from CHMv8 431 * From CLR 432 */ 433 private void rotateLeft(TreeNode p) { 434 if (p != null) { 435 TreeNode r = p.right, pp, rl; 436 if ((rl = p.right = r.left) != null) { 437 rl.parent = p; 438 } 439 if ((pp = r.parent = p.parent) == null) { 440 root = r; 441 } else if (pp.left == p) { 442 pp.left = r; 443 } else { 444 pp.right = r; 445 } 446 r.left = p; 447 p.parent = r; 448 } 449 } 450 451 /** 452 * Copied from CHMv8 453 * From CLR 454 */ 455 private void rotateRight(TreeNode p) { 456 if (p != null) { 457 TreeNode l = p.left, pp, lr; 458 if ((lr = p.left = l.right) != null) { 459 lr.parent = p; 460 } 461 if ((pp = l.parent = p.parent) == null) { 462 root = l; 463 } else if (pp.right == p) { 464 pp.right = l; 465 } else { 466 pp.left = l; 467 } 468 l.right = p; 469 p.parent = l; 470 } 471 } 472 473 /** 474 * Returns the TreeNode (or null if not found) for the given 475 * key. A front-end for recursive version. 476 */ 477 final TreeNode getTreeNode(int h, K k) { 478 return getTreeNode(h, k, root, comparableClassFor(k)); 479 } 480 481 /** 482 * Returns the TreeNode (or null if not found) for the given key 483 * starting at given root. 484 */ 485 @SuppressWarnings("unchecked") 486 final TreeNode getTreeNode (int h, K k, TreeNode p, Class<?> cc) { 487 // assert k != null; 488 while (p != null) { 489 int dir, ph; Object pk; 490 if ((ph = p.entry.hash) != h) 491 dir = (h < ph) ? -1 : 1; 492 else if ((pk = p.entry.key) == k || k.equals(pk)) 493 return p; 494 else if (cc == null || comparableClassFor(pk) != cc || 495 (dir = ((Comparable<Object>)k).compareTo(pk)) == 0) { 496 // assert pk != null; 497 TreeNode r, pl, pr; // check both sides 498 if ((pr = p.right) != null && 499 (r = getTreeNode(h, k, pr, cc)) != null) 500 return r; 501 else if ((pl = p.left) != null) 502 dir = -1; 503 else // nothing there 504 break; 505 } 506 p = (dir > 0) ? p.right : p.left; 507 } 508 return null; 509 } 510 511 /* 512 * Finds or adds a node. 513 * 514 * 'entry' should be used to recycle an existing Entry (e.g. in the case 515 * of converting a linked-list bin to a TreeBin). 516 * If entry is null, a new Entry will be created for the new TreeNode 517 * 518 * @return the TreeNode containing the mapping, or null if a new 519 * TreeNode was added 520 */ 521 @SuppressWarnings("unchecked") 522 TreeNode putTreeNode(int h, K k, V v, HashMap.Entry<K,V> entry) { 523 // assert k != null; 524 //if (entry != null) { 525 // assert h == entry.hash; 526 // assert k == entry.key; 527 // assert v == entry.value; 528 // } 529 Class<?> cc = comparableClassFor(k); 530 TreeNode pp = root, p = null; 531 int dir = 0; 532 while (pp != null) { // find existing node or leaf to insert at 533 int ph; Object pk; 534 p = pp; 535 if ((ph = p.entry.hash) != h) 536 dir = (h < ph) ? -1 : 1; 537 else if ((pk = p.entry.key) == k || k.equals(pk)) 538 return p; 539 else if (cc == null || comparableClassFor(pk) != cc || 540 (dir = ((Comparable<Object>)k).compareTo(pk)) == 0) { 541 TreeNode r, pr; 542 if ((pr = p.right) != null && 543 (r = getTreeNode(h, k, pr, cc)) != null) 544 return r; 545 else // continue left 546 dir = -1; 547 } 548 pp = (dir > 0) ? p.right : p.left; 549 } 550 551 // Didn't find the mapping in the tree, so add it 552 TreeNode f = first; 553 TreeNode x; 554 if (entry != null) { 555 x = new TreeNode(entry, f, p); 556 } else { 557 x = new TreeNode(newEntry(h, k, v, null), f, p); 558 } 559 first = x; 560 561 if (p == null) { 562 root = x; 563 } else { // attach and rebalance; adapted from CLR 564 TreeNode xp, xpp; 565 if (f != null) { 566 f.prev = x; 567 } 568 if (dir <= 0) { 569 p.left = x; 570 } else { 571 p.right = x; 572 } 573 x.red = true; 574 while (x != null && (xp = x.parent) != null && xp.red 575 && (xpp = xp.parent) != null) { 576 TreeNode xppl = xpp.left; 577 if (xp == xppl) { 578 TreeNode y = xpp.right; 579 if (y != null && y.red) { 580 y.red = false; 581 xp.red = false; 582 xpp.red = true; 583 x = xpp; 584 } else { 585 if (x == xp.right) { 586 rotateLeft(x = xp); 587 xpp = (xp = x.parent) == null ? null : xp.parent; 588 } 589 if (xp != null) { 590 xp.red = false; 591 if (xpp != null) { 592 xpp.red = true; 593 rotateRight(xpp); 594 } 595 } 596 } 597 } else { 598 TreeNode y = xppl; 599 if (y != null && y.red) { 600 y.red = false; 601 xp.red = false; 602 xpp.red = true; 603 x = xpp; 604 } else { 605 if (x == xp.left) { 606 rotateRight(x = xp); 607 xpp = (xp = x.parent) == null ? null : xp.parent; 608 } 609 if (xp != null) { 610 xp.red = false; 611 if (xpp != null) { 612 xpp.red = true; 613 rotateLeft(xpp); 614 } 615 } 616 } 617 } 618 } 619 TreeNode r = root; 620 if (r != null && r.red) { 621 r.red = false; 622 } 623 } 624 return null; 625 } 626 627 /* 628 * From CHMv8 629 * 630 * Removes the given node, that must be present before this 631 * call. This is messier than typical red-black deletion code 632 * because we cannot swap the contents of an interior node 633 * with a leaf successor that is pinned by "next" pointers 634 * that are accessible independently of lock. So instead we 635 * swap the tree linkages. 636 */ 637 final void deleteTreeNode(TreeNode p) { 638 TreeNode next = (TreeNode) p.entry.next; // unlink traversal pointers 639 TreeNode pred = p.prev; 640 if (pred == null) { 641 first = next; 642 } else { 643 pred.entry.next = next; 644 } 645 if (next != null) { 646 next.prev = pred; 647 } 648 TreeNode replacement; 649 TreeNode pl = p.left; 650 TreeNode pr = p.right; 651 if (pl != null && pr != null) { 652 TreeNode s = pr, sl; 653 while ((sl = s.left) != null) // find successor 654 { 655 s = sl; 656 } 657 boolean c = s.red; 658 s.red = p.red; 659 p.red = c; // swap colors 660 TreeNode sr = s.right; 661 TreeNode pp = p.parent; 662 if (s == pr) { // p was s's direct parent 663 p.parent = s; 664 s.right = p; 665 } else { 666 TreeNode sp = s.parent; 667 if ((p.parent = sp) != null) { 668 if (s == sp.left) { 669 sp.left = p; 670 } else { 671 sp.right = p; 672 } 673 } 674 if ((s.right = pr) != null) { 675 pr.parent = s; 676 } 677 } 678 p.left = null; 679 if ((p.right = sr) != null) { 680 sr.parent = p; 681 } 682 if ((s.left = pl) != null) { 683 pl.parent = s; 684 } 685 if ((s.parent = pp) == null) { 686 root = s; 687 } else if (p == pp.left) { 688 pp.left = s; 689 } else { 690 pp.right = s; 691 } 692 replacement = sr; 693 } else { 694 replacement = (pl != null) ? pl : pr; 695 } 696 TreeNode pp = p.parent; 697 if (replacement == null) { 698 if (pp == null) { 699 root = null; 700 return; 701 } 702 replacement = p; 703 } else { 704 replacement.parent = pp; 705 if (pp == null) { 706 root = replacement; 707 } else if (p == pp.left) { 708 pp.left = replacement; 709 } else { 710 pp.right = replacement; 711 } 712 p.left = p.right = p.parent = null; 713 } 714 if (!p.red) { // rebalance, from CLR 715 TreeNode x = replacement; 716 while (x != null) { 717 TreeNode xp, xpl; 718 if (x.red || (xp = x.parent) == null) { 719 x.red = false; 720 break; 721 } 722 if (x == (xpl = xp.left)) { 723 TreeNode sib = xp.right; 724 if (sib != null && sib.red) { 725 sib.red = false; 726 xp.red = true; 727 rotateLeft(xp); 728 sib = (xp = x.parent) == null ? null : xp.right; 729 } 730 if (sib == null) { 731 x = xp; 732 } else { 733 TreeNode sl = sib.left, sr = sib.right; 734 if ((sr == null || !sr.red) 735 && (sl == null || !sl.red)) { 736 sib.red = true; 737 x = xp; 738 } else { 739 if (sr == null || !sr.red) { 740 if (sl != null) { 741 sl.red = false; 742 } 743 sib.red = true; 744 rotateRight(sib); 745 sib = (xp = x.parent) == null ? 746 null : xp.right; 747 } 748 if (sib != null) { 749 sib.red = (xp == null) ? false : xp.red; 750 if ((sr = sib.right) != null) { 751 sr.red = false; 752 } 753 } 754 if (xp != null) { 755 xp.red = false; 756 rotateLeft(xp); 757 } 758 x = root; 759 } 760 } 761 } else { // symmetric 762 TreeNode sib = xpl; 763 if (sib != null && sib.red) { 764 sib.red = false; 765 xp.red = true; 766 rotateRight(xp); 767 sib = (xp = x.parent) == null ? null : xp.left; 768 } 769 if (sib == null) { 770 x = xp; 771 } else { 772 TreeNode sl = sib.left, sr = sib.right; 773 if ((sl == null || !sl.red) 774 && (sr == null || !sr.red)) { 775 sib.red = true; 776 x = xp; 777 } else { 778 if (sl == null || !sl.red) { 779 if (sr != null) { 780 sr.red = false; 781 } 782 sib.red = true; 783 rotateLeft(sib); 784 sib = (xp = x.parent) == null ? 785 null : xp.left; 786 } 787 if (sib != null) { 788 sib.red = (xp == null) ? false : xp.red; 789 if ((sl = sib.left) != null) { 790 sl.red = false; 791 } 792 } 793 if (xp != null) { 794 xp.red = false; 795 rotateRight(xp); 796 } 797 x = root; 798 } 799 } 800 } 801 } 802 } 803 if (p == replacement && (pp = p.parent) != null) { 804 if (p == pp.left) // detach pointers 805 { 806 pp.left = null; 807 } else if (p == pp.right) { 808 pp.right = null; 809 } 810 p.parent = null; 811 } 812 } 813 } 814 815 /** 816 * Constructs an empty <tt>HashMap</tt> with the specified initial 817 * capacity and load factor. 818 * 819 * @param initialCapacity the initial capacity 820 * @param loadFactor the load factor 821 * @throws IllegalArgumentException if the initial capacity is negative 822 * or the load factor is nonpositive 823 */ 824 public HashMap(int initialCapacity, float loadFactor) { 825 if (initialCapacity < 0) 826 throw new IllegalArgumentException("Illegal initial capacity: " + 827 initialCapacity); 828 if (initialCapacity > MAXIMUM_CAPACITY) 829 initialCapacity = MAXIMUM_CAPACITY; 830 if (loadFactor <= 0 || Float.isNaN(loadFactor)) 831 throw new IllegalArgumentException("Illegal load factor: " + 832 loadFactor); 833 this.loadFactor = loadFactor; 834 threshold = initialCapacity; 835 hashSeed = initHashSeed(); 836 init(); 837 } 838 839 /** 840 * Constructs an empty <tt>HashMap</tt> with the specified initial 841 * capacity and the default load factor (0.75). 842 * 843 * @param initialCapacity the initial capacity. 844 * @throws IllegalArgumentException if the initial capacity is negative. 845 */ 846 public HashMap(int initialCapacity) { 847 this(initialCapacity, DEFAULT_LOAD_FACTOR); 848 } 849 850 /** 851 * Constructs an empty <tt>HashMap</tt> with the default initial capacity 852 * (16) and the default load factor (0.75). 853 */ 854 public HashMap() { 855 this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR); 856 } 857 858 /** 859 * Constructs a new <tt>HashMap</tt> with the same mappings as the 860 * specified <tt>Map</tt>. The <tt>HashMap</tt> is created with 861 * default load factor (0.75) and an initial capacity sufficient to 862 * hold the mappings in the specified <tt>Map</tt>. 863 * 864 * @param m the map whose mappings are to be placed in this map 865 * @throws NullPointerException if the specified map is null 866 */ 867 public HashMap(Map<? extends K, ? extends V> m) { 868 this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, 869 DEFAULT_INITIAL_CAPACITY), DEFAULT_LOAD_FACTOR); 870 inflateTable(threshold); 871 872 putAllForCreate(m); 873 // assert size == m.size(); 874 } 875 876 private static int roundUpToPowerOf2(int number) { 877 // assert number >= 0 : "number must be non-negative"; 878 int rounded = number >= MAXIMUM_CAPACITY 879 ? MAXIMUM_CAPACITY 880 : (rounded = Integer.highestOneBit(number)) != 0 881 ? (Integer.bitCount(number) > 1) ? rounded << 1 : rounded 882 : 1; 883 884 return rounded; 885 } 886 887 /** 888 * Inflates the table. 889 */ 890 private void inflateTable(int toSize) { 891 // Find a power of 2 >= toSize 892 int capacity = roundUpToPowerOf2(toSize); 893 894 threshold = (int) Math.min(capacity * loadFactor, MAXIMUM_CAPACITY + 1); 895 table = new Object[capacity]; 896 } 897 898 // internal utilities 899 900 /** 901 * Initialization hook for subclasses. This method is called 902 * in all constructors and pseudo-constructors (clone, readObject) 903 * after HashMap has been initialized but before any entries have 904 * been inserted. (In the absence of this method, readObject would 905 * require explicit knowledge of subclasses.) 906 */ 907 void init() { 908 } 909 910 /** 911 * Return an initial value for the hashSeed, or 0 if the random seed is not 912 * enabled. 913 */ 914 final int initHashSeed() { 915 if (sun.misc.VM.isBooted() && Holder.USE_HASHSEED) { 916 int seed = ThreadLocalRandom.current().nextInt(); 917 return (seed != 0) ? seed : 1; 918 } 919 return 0; 920 } 921 922 /** 923 * Retrieve object hash code and applies a supplemental hash function to the 924 * result hash, which defends against poor quality hash functions. This is 925 * critical because HashMap uses power-of-two length hash tables, that 926 * otherwise encounter collisions for hashCodes that do not differ 927 * in lower bits. 928 */ 929 final int hash(Object k) { 930 int h = hashSeed ^ k.hashCode(); 931 932 // This function ensures that hashCodes that differ only by 933 // constant multiples at each bit position have a bounded 934 // number of collisions (approximately 8 at default load factor). 935 h ^= (h >>> 20) ^ (h >>> 12); 936 return h ^ (h >>> 7) ^ (h >>> 4); 937 } 938 939 /** 940 * Returns index for hash code h. 941 */ 942 static int indexFor(int h, int length) { 943 // assert Integer.bitCount(length) == 1 : "length must be a non-zero power of 2"; 944 return h & (length-1); 945 } 946 947 /** 948 * Returns the number of key-value mappings in this map. 949 * 950 * @return the number of key-value mappings in this map 951 */ 952 public int size() { 953 return size; 954 } 955 956 /** 957 * Returns <tt>true</tt> if this map contains no key-value mappings. 958 * 959 * @return <tt>true</tt> if this map contains no key-value mappings 960 */ 961 public boolean isEmpty() { 962 return size == 0; 963 } 964 965 /** 966 * Returns the value to which the specified key is mapped, 967 * or {@code null} if this map contains no mapping for the key. 968 * 969 * <p>More formally, if this map contains a mapping from a key 970 * {@code k} to a value {@code v} such that {@code (key==null ? k==null : 971 * key.equals(k))}, then this method returns {@code v}; otherwise 972 * it returns {@code null}. (There can be at most one such mapping.) 973 * 974 * <p>A return value of {@code null} does not <i>necessarily</i> 975 * indicate that the map contains no mapping for the key; it's also 976 * possible that the map explicitly maps the key to {@code null}. 977 * The {@link #containsKey containsKey} operation may be used to 978 * distinguish these two cases. 979 * 980 * @see #put(Object, Object) 981 */ 982 @SuppressWarnings("unchecked") 983 public V get(Object key) { 984 Entry<K,V> entry = getEntry(key); 985 986 return null == entry ? null : entry.getValue(); 987 } 988 989 @Override 990 public V getOrDefault(Object key, V defaultValue) { 991 Entry<K,V> entry = getEntry(key); 992 993 return (entry == null) ? defaultValue : entry.getValue(); 994 } 995 996 /** 997 * Returns <tt>true</tt> if this map contains a mapping for the 998 * specified key. 999 * 1000 * @param key The key whose presence in this map is to be tested 1001 * @return <tt>true</tt> if this map contains a mapping for the specified 1002 * key. 1003 */ 1004 public boolean containsKey(Object key) { 1005 return getEntry(key) != null; 1006 } 1007 1008 /** 1009 * Returns the entry associated with the specified key in the 1010 * HashMap. Returns null if the HashMap contains no mapping 1011 * for the key. 1012 */ 1013 @SuppressWarnings("unchecked") 1014 final Entry<K,V> getEntry(Object key) { 1015 if (isEmpty()) { 1016 return null; 1017 } 1018 if (key == null) { 1019 return nullKeyEntry; 1020 } 1021 int hash = hash(key); 1022 int bin = indexFor(hash, table.length); 1023 1024 if (table[bin] instanceof Entry) { 1025 Entry<K,V> e = (Entry<K,V>) table[bin]; 1026 for (; e != null; e = (Entry<K,V>)e.next) { 1027 Object k; 1028 if (e.hash == hash && 1029 ((k = e.key) == key || key.equals(k))) { 1030 return e; 1031 } 1032 } 1033 } else if (table[bin] != null) { 1034 TreeBin e = (TreeBin)table[bin]; 1035 TreeNode p = e.getTreeNode(hash, (K)key); 1036 if (p != null) { 1037 // assert p.entry.hash == hash && p.entry.key.equals(key); 1038 return (Entry<K,V>)p.entry; 1039 } else { 1040 return null; 1041 } 1042 } 1043 return null; 1044 } 1045 1046 1047 /** 1048 * Associates the specified value with the specified key in this map. 1049 * If the map previously contained a mapping for the key, the old 1050 * value is replaced. 1051 * 1052 * @param key key with which the specified value is to be associated 1053 * @param value value to be associated with the specified key 1054 * @return the previous value associated with <tt>key</tt>, or 1055 * <tt>null</tt> if there was no mapping for <tt>key</tt>. 1056 * (A <tt>null</tt> return can also indicate that the map 1057 * previously associated <tt>null</tt> with <tt>key</tt>.) 1058 */ 1059 @SuppressWarnings("unchecked") 1060 public V put(K key, V value) { 1061 if (table == EMPTY_TABLE) { 1062 inflateTable(threshold); 1063 } 1064 if (key == null) 1065 return putForNullKey(value); 1066 int hash = hash(key); 1067 int i = indexFor(hash, table.length); 1068 boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? 1069 1070 if (table[i] instanceof Entry) { 1071 // Bin contains ordinary Entries. Search for key in the linked list 1072 // of entries, counting the number of entries. Only check for 1073 // TreeBin conversion if the list size is >= TREE_THRESHOLD. 1074 // (The conversion still may not happen if the table gets resized.) 1075 int listSize = 0; 1076 Entry<K,V> e = (Entry<K,V>) table[i]; 1077 for (; e != null; e = (Entry<K,V>)e.next) { 1078 Object k; 1079 if (e.hash == hash && ((k = e.key) == key || key.equals(k))) { 1080 V oldValue = e.value; 1081 e.value = value; 1082 e.recordAccess(this); 1083 return oldValue; 1084 } 1085 listSize++; 1086 } 1087 // Didn't find, so fall through and call addEntry() to add the 1088 // Entry and check for TreeBin conversion. 1089 checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; 1090 } else if (table[i] != null) { 1091 TreeBin e = (TreeBin)table[i]; 1092 TreeNode p = e.putTreeNode(hash, key, value, null); 1093 if (p == null) { // putTreeNode() added a new node 1094 modCount++; 1095 size++; 1096 if (size >= threshold) { 1097 resize(2 * table.length); 1098 } 1099 return null; 1100 } else { // putTreeNode() found an existing node 1101 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1102 V oldVal = pEntry.value; 1103 pEntry.value = value; 1104 pEntry.recordAccess(this); 1105 return oldVal; 1106 } 1107 } 1108 modCount++; 1109 addEntry(hash, key, value, i, checkIfNeedTree); 1110 return null; 1111 } 1112 1113 /** 1114 * Offloaded version of put for null keys 1115 */ 1116 private V putForNullKey(V value) { 1117 if (nullKeyEntry != null) { 1118 V oldValue = nullKeyEntry.value; 1119 nullKeyEntry.value = value; 1120 nullKeyEntry.recordAccess(this); 1121 return oldValue; 1122 } 1123 modCount++; 1124 size++; // newEntry() skips size++ 1125 nullKeyEntry = newEntry(0, null, value, null); 1126 return null; 1127 } 1128 1129 private void putForCreateNullKey(V value) { 1130 // Look for preexisting entry for key. This will never happen for 1131 // clone or deserialize. It will only happen for construction if the 1132 // input Map is a sorted map whose ordering is inconsistent w/ equals. 1133 if (nullKeyEntry != null) { 1134 nullKeyEntry.value = value; 1135 } else { 1136 nullKeyEntry = newEntry(0, null, value, null); 1137 size++; 1138 } 1139 } 1140 1141 1142 /** 1143 * This method is used instead of put by constructors and 1144 * pseudoconstructors (clone, readObject). It does not resize the table, 1145 * check for comodification, etc, though it will convert bins to TreeBins 1146 * as needed. It calls createEntry rather than addEntry. 1147 */ 1148 @SuppressWarnings("unchecked") 1149 private void putForCreate(K key, V value) { 1150 if (null == key) { 1151 putForCreateNullKey(value); 1152 return; 1153 } 1154 int hash = hash(key); 1155 int i = indexFor(hash, table.length); 1156 boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? 1157 1158 /** 1159 * Look for preexisting entry for key. This will never happen for 1160 * clone or deserialize. It will only happen for construction if the 1161 * input Map is a sorted map whose ordering is inconsistent w/ equals. 1162 */ 1163 if (table[i] instanceof Entry) { 1164 int listSize = 0; 1165 Entry<K,V> e = (Entry<K,V>) table[i]; 1166 for (; e != null; e = (Entry<K,V>)e.next) { 1167 Object k; 1168 if (e.hash == hash && ((k = e.key) == key || key.equals(k))) { 1169 e.value = value; 1170 return; 1171 } 1172 listSize++; 1173 } 1174 // Didn't find, fall through to createEntry(). 1175 // Check for conversion to TreeBin done via createEntry(). 1176 checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; 1177 } else if (table[i] != null) { 1178 TreeBin e = (TreeBin)table[i]; 1179 TreeNode p = e.putTreeNode(hash, key, value, null); 1180 if (p != null) { 1181 p.entry.setValue(value); // Found an existing node, set value 1182 } else { 1183 size++; // Added a new TreeNode, so update size 1184 } 1185 // don't need modCount++/check for resize - just return 1186 return; 1187 } 1188 1189 createEntry(hash, key, value, i, checkIfNeedTree); 1190 } 1191 1192 private void putAllForCreate(Map<? extends K, ? extends V> m) { 1193 for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) 1194 putForCreate(e.getKey(), e.getValue()); 1195 } 1196 1197 /** 1198 * Rehashes the contents of this map into a new array with a 1199 * larger capacity. This method is called automatically when the 1200 * number of keys in this map reaches its threshold. 1201 * 1202 * If current capacity is MAXIMUM_CAPACITY, this method does not 1203 * resize the map, but sets threshold to Integer.MAX_VALUE. 1204 * This has the effect of preventing future calls. 1205 * 1206 * @param newCapacity the new capacity, MUST be a power of two; 1207 * must be greater than current capacity unless current 1208 * capacity is MAXIMUM_CAPACITY (in which case value 1209 * is irrelevant). 1210 */ 1211 void resize(int newCapacity) { 1212 Object[] oldTable = table; 1213 int oldCapacity = oldTable.length; 1214 if (oldCapacity == MAXIMUM_CAPACITY) { 1215 threshold = Integer.MAX_VALUE; 1216 return; 1217 } 1218 1219 Object[] newTable = new Object[newCapacity]; 1220 transfer(newTable); 1221 table = newTable; 1222 threshold = (int)Math.min(newCapacity * loadFactor, MAXIMUM_CAPACITY + 1); 1223 } 1224 1225 /** 1226 * Transfers all entries from current table to newTable. 1227 * 1228 * Assumes newTable is larger than table 1229 */ 1230 @SuppressWarnings("unchecked") 1231 void transfer(Object[] newTable) { 1232 Object[] src = table; 1233 // assert newTable.length > src.length : "newTable.length(" + 1234 // newTable.length + ") expected to be > src.length("+src.length+")"; 1235 int newCapacity = newTable.length; 1236 for (int j = 0; j < src.length; j++) { 1237 if (src[j] instanceof Entry) { 1238 // Assume: since wasn't TreeBin before, won't need TreeBin now 1239 Entry<K,V> e = (Entry<K,V>) src[j]; 1240 while (null != e) { 1241 Entry<K,V> next = (Entry<K,V>)e.next; 1242 int i = indexFor(e.hash, newCapacity); 1243 e.next = (Entry<K,V>) newTable[i]; 1244 newTable[i] = e; 1245 e = next; 1246 } 1247 } else if (src[j] != null) { 1248 TreeBin e = (TreeBin) src[j]; 1249 TreeBin loTree = new TreeBin(); 1250 TreeBin hiTree = new TreeBin(); 1251 e.splitTreeBin(newTable, j, loTree, hiTree); 1252 } 1253 } 1254 Arrays.fill(table, null); 1255 } 1256 1257 /** 1258 * Copies all of the mappings from the specified map to this map. 1259 * These mappings will replace any mappings that this map had for 1260 * any of the keys currently in the specified map. 1261 * 1262 * @param m mappings to be stored in this map 1263 * @throws NullPointerException if the specified map is null 1264 */ 1265 public void putAll(Map<? extends K, ? extends V> m) { 1266 int numKeysToBeAdded = m.size(); 1267 if (numKeysToBeAdded == 0) 1268 return; 1269 1270 if (table == EMPTY_TABLE) { 1271 inflateTable((int) Math.max(numKeysToBeAdded * loadFactor, threshold)); 1272 } 1273 1274 /* 1275 * Expand the map if the map if the number of mappings to be added 1276 * is greater than or equal to threshold. This is conservative; the 1277 * obvious condition is (m.size() + size) >= threshold, but this 1278 * condition could result in a map with twice the appropriate capacity, 1279 * if the keys to be added overlap with the keys already in this map. 1280 * By using the conservative calculation, we subject ourself 1281 * to at most one extra resize. 1282 */ 1283 if (numKeysToBeAdded > threshold && table.length < MAXIMUM_CAPACITY) { 1284 resize(table.length * 2); 1285 } 1286 1287 for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) 1288 put(e.getKey(), e.getValue()); 1289 } 1290 1291 /** 1292 * Removes the mapping for the specified key from this map if present. 1293 * 1294 * @param key key whose mapping is to be removed from the map 1295 * @return the previous value associated with <tt>key</tt>, or 1296 * <tt>null</tt> if there was no mapping for <tt>key</tt>. 1297 * (A <tt>null</tt> return can also indicate that the map 1298 * previously associated <tt>null</tt> with <tt>key</tt>.) 1299 */ 1300 public V remove(Object key) { 1301 Entry<K,V> e = removeEntryForKey(key); 1302 return (e == null ? null : e.value); 1303 } 1304 1305 // optimized implementations of default methods in Map 1306 1307 @Override 1308 public V putIfAbsent(K key, V value) { 1309 if (table == EMPTY_TABLE) { 1310 inflateTable(threshold); 1311 } 1312 if (key == null) { 1313 if (nullKeyEntry == null || nullKeyEntry.value == null) { 1314 putForNullKey(value); 1315 return null; 1316 } else { 1317 return nullKeyEntry.value; 1318 } 1319 } 1320 int hash = hash(key); 1321 int i = indexFor(hash, table.length); 1322 boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? 1323 1324 if (table[i] instanceof Entry) { 1325 int listSize = 0; 1326 Entry<K,V> e = (Entry<K,V>) table[i]; 1327 for (; e != null; e = (Entry<K,V>)e.next) { 1328 if (e.hash == hash && Objects.equals(e.key, key)) { 1329 if (e.value != null) { 1330 return e.value; 1331 } 1332 e.value = value; 1333 e.recordAccess(this); 1334 return null; 1335 } 1336 listSize++; 1337 } 1338 // Didn't find, so fall through and call addEntry() to add the 1339 // Entry and check for TreeBin conversion. 1340 checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; 1341 } else if (table[i] != null) { 1342 TreeBin e = (TreeBin)table[i]; 1343 TreeNode p = e.putTreeNode(hash, key, value, null); 1344 if (p == null) { // not found, putTreeNode() added a new node 1345 modCount++; 1346 size++; 1347 if (size >= threshold) { 1348 resize(2 * table.length); 1349 } 1350 return null; 1351 } else { // putTreeNode() found an existing node 1352 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1353 V oldVal = pEntry.value; 1354 if (oldVal == null) { // only replace if maps to null 1355 pEntry.value = value; 1356 pEntry.recordAccess(this); 1357 } 1358 return oldVal; 1359 } 1360 } 1361 modCount++; 1362 addEntry(hash, key, value, i, checkIfNeedTree); 1363 return null; 1364 } 1365 1366 @Override 1367 public boolean remove(Object key, Object value) { 1368 if (isEmpty()) { 1369 return false; 1370 } 1371 if (key == null) { 1372 if (nullKeyEntry != null && 1373 Objects.equals(nullKeyEntry.value, value)) { 1374 removeNullKey(); 1375 return true; 1376 } 1377 return false; 1378 } 1379 int hash = hash(key); 1380 int i = indexFor(hash, table.length); 1381 1382 if (table[i] instanceof Entry) { 1383 @SuppressWarnings("unchecked") 1384 Entry<K,V> prev = (Entry<K,V>) table[i]; 1385 Entry<K,V> e = prev; 1386 while (e != null) { 1387 @SuppressWarnings("unchecked") 1388 Entry<K,V> next = (Entry<K,V>) e.next; 1389 if (e.hash == hash && Objects.equals(e.key, key)) { 1390 if (!Objects.equals(e.value, value)) { 1391 return false; 1392 } 1393 modCount++; 1394 size--; 1395 if (prev == e) 1396 table[i] = next; 1397 else 1398 prev.next = next; 1399 e.recordRemoval(this); 1400 return true; 1401 } 1402 prev = e; 1403 e = next; 1404 } 1405 } else if (table[i] != null) { 1406 TreeBin tb = ((TreeBin) table[i]); 1407 TreeNode p = tb.getTreeNode(hash, (K)key); 1408 if (p != null) { 1409 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1410 // assert pEntry.key.equals(key); 1411 if (Objects.equals(pEntry.value, value)) { 1412 modCount++; 1413 size--; 1414 tb.deleteTreeNode(p); 1415 pEntry.recordRemoval(this); 1416 if (tb.root == null || tb.first == null) { 1417 // assert tb.root == null && tb.first == null : 1418 // "TreeBin.first and root should both be null"; 1419 // TreeBin is now empty, we should blank this bin 1420 table[i] = null; 1421 } 1422 return true; 1423 } 1424 } 1425 } 1426 return false; 1427 } 1428 1429 @Override 1430 public boolean replace(K key, V oldValue, V newValue) { 1431 if (isEmpty()) { 1432 return false; 1433 } 1434 if (key == null) { 1435 if (nullKeyEntry != null && 1436 Objects.equals(nullKeyEntry.value, oldValue)) { 1437 putForNullKey(newValue); 1438 return true; 1439 } 1440 return false; 1441 } 1442 int hash = hash(key); 1443 int i = indexFor(hash, table.length); 1444 1445 if (table[i] instanceof Entry) { 1446 @SuppressWarnings("unchecked") 1447 Entry<K,V> e = (Entry<K,V>) table[i]; 1448 for (; e != null; e = (Entry<K,V>)e.next) { 1449 if (e.hash == hash && Objects.equals(e.key, key) && Objects.equals(e.value, oldValue)) { 1450 e.value = newValue; 1451 e.recordAccess(this); 1452 return true; 1453 } 1454 } 1455 return false; 1456 } else if (table[i] != null) { 1457 TreeBin tb = ((TreeBin) table[i]); 1458 TreeNode p = tb.getTreeNode(hash, key); 1459 if (p != null) { 1460 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1461 // assert pEntry.key.equals(key); 1462 if (Objects.equals(pEntry.value, oldValue)) { 1463 pEntry.value = newValue; 1464 pEntry.recordAccess(this); 1465 return true; 1466 } 1467 } 1468 } 1469 return false; 1470 } 1471 1472 @Override 1473 public V replace(K key, V value) { 1474 if (isEmpty()) { 1475 return null; 1476 } 1477 if (key == null) { 1478 if (nullKeyEntry != null) { 1479 return putForNullKey(value); 1480 } 1481 return null; 1482 } 1483 int hash = hash(key); 1484 int i = indexFor(hash, table.length); 1485 if (table[i] instanceof Entry) { 1486 @SuppressWarnings("unchecked") 1487 Entry<K,V> e = (Entry<K,V>)table[i]; 1488 for (; e != null; e = (Entry<K,V>)e.next) { 1489 if (e.hash == hash && Objects.equals(e.key, key)) { 1490 V oldValue = e.value; 1491 e.value = value; 1492 e.recordAccess(this); 1493 return oldValue; 1494 } 1495 } 1496 1497 return null; 1498 } else if (table[i] != null) { 1499 TreeBin tb = ((TreeBin) table[i]); 1500 TreeNode p = tb.getTreeNode(hash, key); 1501 if (p != null) { 1502 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1503 // assert pEntry.key.equals(key); 1504 V oldValue = pEntry.value; 1505 pEntry.value = value; 1506 pEntry.recordAccess(this); 1507 return oldValue; 1508 } 1509 } 1510 return null; 1511 } 1512 1513 @Override 1514 public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) { 1515 if (table == EMPTY_TABLE) { 1516 inflateTable(threshold); 1517 } 1518 if (key == null) { 1519 if (nullKeyEntry == null || nullKeyEntry.value == null) { 1520 V newValue = mappingFunction.apply(key); 1521 if (newValue != null) { 1522 putForNullKey(newValue); 1523 } 1524 return newValue; 1525 } 1526 return nullKeyEntry.value; 1527 } 1528 int hash = hash(key); 1529 int i = indexFor(hash, table.length); 1530 boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? 1531 1532 if (table[i] instanceof Entry) { 1533 int listSize = 0; 1534 @SuppressWarnings("unchecked") 1535 Entry<K,V> e = (Entry<K,V>)table[i]; 1536 for (; e != null; e = (Entry<K,V>)e.next) { 1537 if (e.hash == hash && Objects.equals(e.key, key)) { 1538 V oldValue = e.value; 1539 if (oldValue == null) { 1540 V newValue = mappingFunction.apply(key); 1541 if (newValue != null) { 1542 e.value = newValue; 1543 e.recordAccess(this); 1544 } 1545 return newValue; 1546 } 1547 return oldValue; 1548 } 1549 listSize++; 1550 } 1551 // Didn't find, fall through to call the mapping function 1552 checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; 1553 } else if (table[i] != null) { 1554 TreeBin e = (TreeBin)table[i]; 1555 V value = mappingFunction.apply(key); 1556 if (value == null) { // Return the existing value, if any 1557 TreeNode p = e.getTreeNode(hash, key); 1558 if (p != null) { 1559 return (V) p.entry.value; 1560 } 1561 return null; 1562 } else { // Put the new value into the Tree, if absent 1563 TreeNode p = e.putTreeNode(hash, key, value, null); 1564 if (p == null) { // not found, new node was added 1565 modCount++; 1566 size++; 1567 if (size >= threshold) { 1568 resize(2 * table.length); 1569 } 1570 return value; 1571 } else { // putTreeNode() found an existing node 1572 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1573 V oldVal = pEntry.value; 1574 if (oldVal == null) { // only replace if maps to null 1575 pEntry.value = value; 1576 pEntry.recordAccess(this); 1577 return value; 1578 } 1579 return oldVal; 1580 } 1581 } 1582 } 1583 V newValue = mappingFunction.apply(key); 1584 if (newValue != null) { // add Entry and check for TreeBin conversion 1585 modCount++; 1586 addEntry(hash, key, newValue, i, checkIfNeedTree); 1587 } 1588 1589 return newValue; 1590 } 1591 1592 @Override 1593 public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1594 if (isEmpty()) { 1595 return null; 1596 } 1597 if (key == null) { 1598 V oldValue; 1599 if (nullKeyEntry != null && (oldValue = nullKeyEntry.value) != null) { 1600 V newValue = remappingFunction.apply(key, oldValue); 1601 if (newValue != null ) { 1602 putForNullKey(newValue); 1603 return newValue; 1604 } else { 1605 removeNullKey(); 1606 } 1607 } 1608 return null; 1609 } 1610 int hash = hash(key); 1611 int i = indexFor(hash, table.length); 1612 if (table[i] instanceof Entry) { 1613 @SuppressWarnings("unchecked") 1614 Entry<K,V> prev = (Entry<K,V>)table[i]; 1615 Entry<K,V> e = prev; 1616 while (e != null) { 1617 Entry<K,V> next = (Entry<K,V>)e.next; 1618 if (e.hash == hash && Objects.equals(e.key, key)) { 1619 V oldValue = e.value; 1620 if (oldValue == null) 1621 break; 1622 V newValue = remappingFunction.apply(key, oldValue); 1623 if (newValue == null) { 1624 modCount++; 1625 size--; 1626 if (prev == e) 1627 table[i] = next; 1628 else 1629 prev.next = next; 1630 e.recordRemoval(this); 1631 } else { 1632 e.value = newValue; 1633 e.recordAccess(this); 1634 } 1635 return newValue; 1636 } 1637 prev = e; 1638 e = next; 1639 } 1640 } else if (table[i] != null) { 1641 TreeBin tb = (TreeBin)table[i]; 1642 TreeNode p = tb.getTreeNode(hash, key); 1643 if (p != null) { 1644 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1645 // assert pEntry.key.equals(key); 1646 V oldValue = pEntry.value; 1647 if (oldValue != null) { 1648 V newValue = remappingFunction.apply(key, oldValue); 1649 if (newValue == null) { // remove mapping 1650 modCount++; 1651 size--; 1652 tb.deleteTreeNode(p); 1653 pEntry.recordRemoval(this); 1654 if (tb.root == null || tb.first == null) { 1655 // assert tb.root == null && tb.first == null : 1656 // "TreeBin.first and root should both be null"; 1657 // TreeBin is now empty, we should blank this bin 1658 table[i] = null; 1659 } 1660 } else { 1661 pEntry.value = newValue; 1662 pEntry.recordAccess(this); 1663 } 1664 return newValue; 1665 } 1666 } 1667 } 1668 return null; 1669 } 1670 1671 @Override 1672 public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1673 if (table == EMPTY_TABLE) { 1674 inflateTable(threshold); 1675 } 1676 if (key == null) { 1677 V oldValue = nullKeyEntry == null ? null : nullKeyEntry.value; 1678 V newValue = remappingFunction.apply(key, oldValue); 1679 if (newValue != oldValue) { 1680 if (newValue == null) { 1681 removeNullKey(); 1682 } else { 1683 putForNullKey(newValue); 1684 } 1685 } 1686 return newValue; 1687 } 1688 int hash = hash(key); 1689 int i = indexFor(hash, table.length); 1690 boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? 1691 1692 if (table[i] instanceof Entry) { 1693 int listSize = 0; 1694 @SuppressWarnings("unchecked") 1695 Entry<K,V> prev = (Entry<K,V>)table[i]; 1696 Entry<K,V> e = prev; 1697 1698 while (e != null) { 1699 Entry<K,V> next = (Entry<K,V>)e.next; 1700 if (e.hash == hash && Objects.equals(e.key, key)) { 1701 V oldValue = e.value; 1702 V newValue = remappingFunction.apply(key, oldValue); 1703 if (newValue != oldValue) { 1704 if (newValue == null) { 1705 modCount++; 1706 size--; 1707 if (prev == e) 1708 table[i] = next; 1709 else 1710 prev.next = next; 1711 e.recordRemoval(this); 1712 } else { 1713 e.value = newValue; 1714 e.recordAccess(this); 1715 } 1716 } 1717 return newValue; 1718 } 1719 prev = e; 1720 e = next; 1721 listSize++; 1722 } 1723 checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; 1724 } else if (table[i] != null) { 1725 TreeBin tb = (TreeBin)table[i]; 1726 TreeNode p = tb.getTreeNode(hash, key); 1727 V oldValue = p == null ? null : (V)p.entry.value; 1728 V newValue = remappingFunction.apply(key, oldValue); 1729 if (newValue != oldValue) { 1730 if (newValue == null) { 1731 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1732 modCount++; 1733 size--; 1734 tb.deleteTreeNode(p); 1735 pEntry.recordRemoval(this); 1736 if (tb.root == null || tb.first == null) { 1737 // assert tb.root == null && tb.first == null : 1738 // "TreeBin.first and root should both be null"; 1739 // TreeBin is now empty, we should blank this bin 1740 table[i] = null; 1741 } 1742 } else { 1743 if (p != null) { // just update the value 1744 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1745 pEntry.value = newValue; 1746 pEntry.recordAccess(this); 1747 } else { // need to put new node 1748 p = tb.putTreeNode(hash, key, newValue, null); 1749 // assert p == null; // should have added a new node 1750 modCount++; 1751 size++; 1752 if (size >= threshold) { 1753 resize(2 * table.length); 1754 } 1755 } 1756 } 1757 } 1758 return newValue; 1759 } 1760 1761 V newValue = remappingFunction.apply(key, null); 1762 if (newValue != null) { 1763 modCount++; 1764 addEntry(hash, key, newValue, i, checkIfNeedTree); 1765 } 1766 1767 return newValue; 1768 } 1769 1770 @Override 1771 public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) { 1772 if (table == EMPTY_TABLE) { 1773 inflateTable(threshold); 1774 } 1775 if (key == null) { 1776 V oldValue = nullKeyEntry == null ? null : nullKeyEntry.value; 1777 V newValue = oldValue == null ? value : remappingFunction.apply(oldValue, value); 1778 if (newValue != null) { 1779 putForNullKey(newValue); 1780 } else if (nullKeyEntry != null) { 1781 removeNullKey(); 1782 } 1783 return newValue; 1784 } 1785 int hash = hash(key); 1786 int i = indexFor(hash, table.length); 1787 boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? 1788 1789 if (table[i] instanceof Entry) { 1790 int listSize = 0; 1791 @SuppressWarnings("unchecked") 1792 Entry<K,V> prev = (Entry<K,V>)table[i]; 1793 Entry<K,V> e = prev; 1794 1795 while (e != null) { 1796 Entry<K,V> next = (Entry<K,V>)e.next; 1797 if (e.hash == hash && Objects.equals(e.key, key)) { 1798 V oldValue = e.value; 1799 V newValue = (oldValue == null) ? value : 1800 remappingFunction.apply(oldValue, value); 1801 if (newValue == null) { 1802 modCount++; 1803 size--; 1804 if (prev == e) 1805 table[i] = next; 1806 else 1807 prev.next = next; 1808 e.recordRemoval(this); 1809 } else { 1810 e.value = newValue; 1811 e.recordAccess(this); 1812 } 1813 return newValue; 1814 } 1815 prev = e; 1816 e = next; 1817 listSize++; 1818 } 1819 // Didn't find, so fall through and (maybe) call addEntry() to add 1820 // the Entry and check for TreeBin conversion. 1821 checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; 1822 } else if (table[i] != null) { 1823 TreeBin tb = (TreeBin)table[i]; 1824 TreeNode p = tb.getTreeNode(hash, key); 1825 V oldValue = p == null ? null : (V)p.entry.value; 1826 V newValue = (oldValue == null) ? value : 1827 remappingFunction.apply(oldValue, value); 1828 if (newValue == null) { 1829 if (p != null) { 1830 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1831 modCount++; 1832 size--; 1833 tb.deleteTreeNode(p); 1834 pEntry.recordRemoval(this); 1835 1836 if (tb.root == null || tb.first == null) { 1837 // assert tb.root == null && tb.first == null : 1838 // "TreeBin.first and root should both be null"; 1839 // TreeBin is now empty, we should blank this bin 1840 table[i] = null; 1841 } 1842 } 1843 return null; 1844 } else if (newValue != oldValue) { 1845 if (p != null) { // just update the value 1846 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1847 pEntry.value = newValue; 1848 pEntry.recordAccess(this); 1849 } else { // need to put new node 1850 p = tb.putTreeNode(hash, key, newValue, null); 1851 // assert p == null; // should have added a new node 1852 modCount++; 1853 size++; 1854 if (size >= threshold) { 1855 resize(2 * table.length); 1856 } 1857 } 1858 } 1859 return newValue; 1860 } 1861 if (value != null) { 1862 modCount++; 1863 addEntry(hash, key, value, i, checkIfNeedTree); 1864 } 1865 return value; 1866 } 1867 1868 // end of optimized implementations of default methods in Map 1869 1870 /** 1871 * Removes and returns the entry associated with the specified key 1872 * in the HashMap. Returns null if the HashMap contains no mapping 1873 * for this key. 1874 * 1875 * We don't bother converting TreeBins back to Entry lists if the bin falls 1876 * back below TREE_THRESHOLD, but we do clear bins when removing the last 1877 * TreeNode in a TreeBin. 1878 */ 1879 final Entry<K,V> removeEntryForKey(Object key) { 1880 if (isEmpty()) { 1881 return null; 1882 } 1883 if (key == null) { 1884 if (nullKeyEntry != null) { 1885 return removeNullKey(); 1886 } 1887 return null; 1888 } 1889 int hash = hash(key); 1890 int i = indexFor(hash, table.length); 1891 1892 if (table[i] instanceof Entry) { 1893 @SuppressWarnings("unchecked") 1894 Entry<K,V> prev = (Entry<K,V>)table[i]; 1895 Entry<K,V> e = prev; 1896 1897 while (e != null) { 1898 @SuppressWarnings("unchecked") 1899 Entry<K,V> next = (Entry<K,V>) e.next; 1900 if (e.hash == hash && Objects.equals(e.key, key)) { 1901 modCount++; 1902 size--; 1903 if (prev == e) 1904 table[i] = next; 1905 else 1906 prev.next = next; 1907 e.recordRemoval(this); 1908 return e; 1909 } 1910 prev = e; 1911 e = next; 1912 } 1913 } else if (table[i] != null) { 1914 TreeBin tb = ((TreeBin) table[i]); 1915 TreeNode p = tb.getTreeNode(hash, (K)key); 1916 if (p != null) { 1917 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1918 // assert pEntry.key.equals(key); 1919 modCount++; 1920 size--; 1921 tb.deleteTreeNode(p); 1922 pEntry.recordRemoval(this); 1923 if (tb.root == null || tb.first == null) { 1924 // assert tb.root == null && tb.first == null : 1925 // "TreeBin.first and root should both be null"; 1926 // TreeBin is now empty, we should blank this bin 1927 table[i] = null; 1928 } 1929 return pEntry; 1930 } 1931 } 1932 return null; 1933 } 1934 1935 /** 1936 * Special version of remove for EntrySet using {@code Map.Entry.equals()} 1937 * for matching. 1938 */ 1939 final Entry<K,V> removeMapping(Object o) { 1940 if (isEmpty() || !(o instanceof Map.Entry)) 1941 return null; 1942 1943 Map.Entry<?,?> entry = (Map.Entry<?,?>) o; 1944 Object key = entry.getKey(); 1945 1946 if (key == null) { 1947 if (entry.equals(nullKeyEntry)) { 1948 return removeNullKey(); 1949 } 1950 return null; 1951 } 1952 1953 int hash = hash(key); 1954 int i = indexFor(hash, table.length); 1955 1956 if (table[i] instanceof Entry) { 1957 @SuppressWarnings("unchecked") 1958 Entry<K,V> prev = (Entry<K,V>)table[i]; 1959 Entry<K,V> e = prev; 1960 1961 while (e != null) { 1962 @SuppressWarnings("unchecked") 1963 Entry<K,V> next = (Entry<K,V>)e.next; 1964 if (e.hash == hash && e.equals(entry)) { 1965 modCount++; 1966 size--; 1967 if (prev == e) 1968 table[i] = next; 1969 else 1970 prev.next = next; 1971 e.recordRemoval(this); 1972 return e; 1973 } 1974 prev = e; 1975 e = next; 1976 } 1977 } else if (table[i] != null) { 1978 TreeBin tb = ((TreeBin) table[i]); 1979 TreeNode p = tb.getTreeNode(hash, (K)key); 1980 if (p != null && p.entry.equals(entry)) { 1981 @SuppressWarnings("unchecked") 1982 Entry<K,V> pEntry = (Entry<K,V>)p.entry; 1983 // assert pEntry.key.equals(key); 1984 modCount++; 1985 size--; 1986 tb.deleteTreeNode(p); 1987 pEntry.recordRemoval(this); 1988 if (tb.root == null || tb.first == null) { 1989 // assert tb.root == null && tb.first == null : 1990 // "TreeBin.first and root should both be null"; 1991 // TreeBin is now empty, we should blank this bin 1992 table[i] = null; 1993 } 1994 return pEntry; 1995 } 1996 } 1997 return null; 1998 } 1999 2000 /* 2001 * Remove the mapping for the null key, and update internal accounting 2002 * (size, modcount, recordRemoval, etc). 2003 * 2004 * Assumes nullKeyEntry is non-null. 2005 */ 2006 private Entry<K,V> removeNullKey() { 2007 // assert nullKeyEntry != null; 2008 Entry<K,V> retVal = nullKeyEntry; 2009 modCount++; 2010 size--; 2011 retVal.recordRemoval(this); 2012 nullKeyEntry = null; 2013 return retVal; 2014 } 2015 2016 /** 2017 * Removes all of the mappings from this map. 2018 * The map will be empty after this call returns. 2019 */ 2020 public void clear() { 2021 modCount++; 2022 if (nullKeyEntry != null) { 2023 nullKeyEntry = null; 2024 } 2025 Arrays.fill(table, null); 2026 size = 0; 2027 } 2028 2029 /** 2030 * Returns <tt>true</tt> if this map maps one or more keys to the 2031 * specified value. 2032 * 2033 * @param value value whose presence in this map is to be tested 2034 * @return <tt>true</tt> if this map maps one or more keys to the 2035 * specified value 2036 */ 2037 public boolean containsValue(Object value) { 2038 if (value == null) { 2039 return containsNullValue(); 2040 } 2041 Object[] tab = table; 2042 for (int i = 0; i < tab.length; i++) { 2043 if (tab[i] instanceof Entry) { 2044 Entry<?,?> e = (Entry<?,?>)tab[i]; 2045 for (; e != null; e = (Entry<?,?>)e.next) { 2046 if (value.equals(e.value)) { 2047 return true; 2048 } 2049 } 2050 } else if (tab[i] != null) { 2051 TreeBin e = (TreeBin)tab[i]; 2052 TreeNode p = e.first; 2053 for (; p != null; p = (TreeNode) p.entry.next) { 2054 if (value == p.entry.value || value.equals(p.entry.value)) { 2055 return true; 2056 } 2057 } 2058 } 2059 } 2060 // Didn't find value in table - could be in nullKeyEntry 2061 return (nullKeyEntry != null && (value == nullKeyEntry.value || 2062 value.equals(nullKeyEntry.value))); 2063 } 2064 2065 /** 2066 * Special-case code for containsValue with null argument 2067 */ 2068 private boolean containsNullValue() { 2069 Object[] tab = table; 2070 for (int i = 0; i < tab.length; i++) { 2071 if (tab[i] instanceof Entry) { 2072 Entry<K,V> e = (Entry<K,V>)tab[i]; 2073 for (; e != null; e = (Entry<K,V>)e.next) { 2074 if (e.value == null) { 2075 return true; 2076 } 2077 } 2078 } else if (tab[i] != null) { 2079 TreeBin e = (TreeBin)tab[i]; 2080 TreeNode p = e.first; 2081 for (; p != null; p = (TreeNode) p.entry.next) { 2082 if (p.entry.value == null) { 2083 return true; 2084 } 2085 } 2086 } 2087 } 2088 // Didn't find value in table - could be in nullKeyEntry 2089 return (nullKeyEntry != null && nullKeyEntry.value == null); 2090 } 2091 2092 /** 2093 * Returns a shallow copy of this <tt>HashMap</tt> instance: the keys and 2094 * values themselves are not cloned. 2095 * 2096 * @return a shallow copy of this map 2097 */ 2098 @SuppressWarnings("unchecked") 2099 public Object clone() { 2100 HashMap<K,V> result = null; 2101 try { 2102 result = (HashMap<K,V>)super.clone(); 2103 } catch (CloneNotSupportedException e) { 2104 // assert false; 2105 } 2106 if (result.table != EMPTY_TABLE) { 2107 result.inflateTable(Math.min( 2108 (int) Math.min( 2109 size * Math.min(1 / loadFactor, 4.0f), 2110 // we have limits... 2111 HashMap.MAXIMUM_CAPACITY), 2112 table.length)); 2113 } 2114 result.entrySet = null; 2115 result.modCount = 0; 2116 result.size = 0; 2117 result.nullKeyEntry = null; 2118 result.init(); 2119 result.putAllForCreate(this); 2120 2121 return result; 2122 } 2123 2124 static class Entry<K,V> implements Map.Entry<K,V> { 2125 final K key; 2126 V value; 2127 Object next; // an Entry, or a TreeNode 2128 final int hash; 2129 2130 /** 2131 * Creates new entry. 2132 */ 2133 Entry(int h, K k, V v, Object n) { 2134 value = v; 2135 next = n; 2136 key = k; 2137 hash = h; 2138 } 2139 2140 public final K getKey() { 2141 return key; 2142 } 2143 2144 public final V getValue() { 2145 return value; 2146 } 2147 2148 public final V setValue(V newValue) { 2149 V oldValue = value; 2150 value = newValue; 2151 return oldValue; 2152 } 2153 2154 public final boolean equals(Object o) { 2155 if (!(o instanceof Map.Entry)) 2156 return false; 2157 Map.Entry<?,?> e = (Map.Entry<?,?>)o; 2158 Object k1 = getKey(); 2159 Object k2 = e.getKey(); 2160 if (k1 == k2 || (k1 != null && k1.equals(k2))) { 2161 Object v1 = getValue(); 2162 Object v2 = e.getValue(); 2163 if (v1 == v2 || (v1 != null && v1.equals(v2))) 2164 return true; 2165 } 2166 return false; 2167 } 2168 2169 public final int hashCode() { 2170 return Objects.hashCode(getKey()) ^ Objects.hashCode(getValue()); 2171 } 2172 2173 public final String toString() { 2174 return getKey() + "=" + getValue(); 2175 } 2176 2177 /** 2178 * This method is invoked whenever the value in an entry is 2179 * overwritten for a key that's already in the HashMap. 2180 */ 2181 void recordAccess(HashMap<K,V> m) { 2182 } 2183 2184 /** 2185 * This method is invoked whenever the entry is 2186 * removed from the table. 2187 */ 2188 void recordRemoval(HashMap<K,V> m) { 2189 } 2190 } 2191 2192 void addEntry(int hash, K key, V value, int bucketIndex) { 2193 addEntry(hash, key, value, bucketIndex, true); 2194 } 2195 2196 /** 2197 * Adds a new entry with the specified key, value and hash code to 2198 * the specified bucket. It is the responsibility of this 2199 * method to resize the table if appropriate. The new entry is then 2200 * created by calling createEntry(). 2201 * 2202 * Subclass overrides this to alter the behavior of put method. 2203 * 2204 * If checkIfNeedTree is false, it is known that this bucket will not need 2205 * to be converted to a TreeBin, so don't bothering checking. 2206 * 2207 * Assumes key is not null. 2208 */ 2209 void addEntry(int hash, K key, V value, int bucketIndex, boolean checkIfNeedTree) { 2210 // assert key != null; 2211 if ((size >= threshold) && (null != table[bucketIndex])) { 2212 resize(2 * table.length); 2213 hash = hash(key); 2214 bucketIndex = indexFor(hash, table.length); 2215 } 2216 createEntry(hash, key, value, bucketIndex, checkIfNeedTree); 2217 } 2218 2219 /** 2220 * Called by addEntry(), and also used when creating entries 2221 * as part of Map construction or "pseudo-construction" (cloning, 2222 * deserialization). This version does not check for resizing of the table. 2223 * 2224 * This method is responsible for converting a bucket to a TreeBin once 2225 * TREE_THRESHOLD is reached. However if checkIfNeedTree is false, it is known 2226 * that this bucket will not need to be converted to a TreeBin, so don't 2227 * bother checking. The new entry is constructed by calling newEntry(). 2228 * 2229 * Assumes key is not null. 2230 * 2231 * Note: buckets already converted to a TreeBin don't call this method, but 2232 * instead call TreeBin.putTreeNode() to create new entries. 2233 */ 2234 void createEntry(int hash, K key, V value, int bucketIndex, boolean checkIfNeedTree) { 2235 // assert key != null; 2236 @SuppressWarnings("unchecked") 2237 Entry<K,V> e = (Entry<K,V>)table[bucketIndex]; 2238 table[bucketIndex] = newEntry(hash, key, value, e); 2239 size++; 2240 2241 if (checkIfNeedTree) { 2242 int listSize = 0; 2243 for (e = (Entry<K,V>) table[bucketIndex]; e != null; e = (Entry<K,V>)e.next) { 2244 listSize++; 2245 if (listSize >= TreeBin.TREE_THRESHOLD) { // Convert to TreeBin 2246 if (comparableClassFor(key) != null) { 2247 TreeBin t = new TreeBin(); 2248 t.populate((Entry)table[bucketIndex]); 2249 table[bucketIndex] = t; 2250 } 2251 break; 2252 } 2253 } 2254 } 2255 } 2256 2257 /* 2258 * Factory method to create a new Entry object. 2259 */ 2260 Entry<K,V> newEntry(int hash, K key, V value, Object next) { 2261 return new HashMap.Entry<>(hash, key, value, next); 2262 } 2263 2264 2265 private abstract class HashIterator<E> implements Iterator<E> { 2266 Object next; // next entry to return, an Entry or a TreeNode 2267 int expectedModCount; // For fast-fail 2268 int index; // current slot 2269 Object current; // current entry, an Entry or a TreeNode 2270 2271 HashIterator() { 2272 expectedModCount = modCount; 2273 if (size > 0) { // advance to first entry 2274 if (nullKeyEntry != null) { 2275 // assert nullKeyEntry.next == null; 2276 // This works with nextEntry(): nullKeyEntry isa Entry, and 2277 // e.next will be null, so we'll hit the findNextBin() call. 2278 next = nullKeyEntry; 2279 } else { 2280 findNextBin(); 2281 } 2282 } 2283 } 2284 2285 public final boolean hasNext() { 2286 return next != null; 2287 } 2288 2289 @SuppressWarnings("unchecked") 2290 final Entry<K,V> nextEntry() { 2291 if (modCount != expectedModCount) { 2292 throw new ConcurrentModificationException(); 2293 } 2294 Object e = next; 2295 Entry<K,V> retVal; 2296 2297 if (e == null) 2298 throw new NoSuchElementException(); 2299 2300 if (e instanceof Entry) { 2301 retVal = (Entry<K,V>)e; 2302 next = ((Entry<K,V>)e).next; 2303 } else { // TreeBin 2304 retVal = (Entry<K,V>)((TreeNode)e).entry; 2305 next = retVal.next; 2306 } 2307 2308 if (next == null) { // Move to next bin 2309 findNextBin(); 2310 } 2311 current = e; 2312 return retVal; 2313 } 2314 2315 public void remove() { 2316 if (current == null) 2317 throw new IllegalStateException(); 2318 if (modCount != expectedModCount) 2319 throw new ConcurrentModificationException(); 2320 K k; 2321 2322 if (current instanceof Entry) { 2323 k = ((Entry<K,V>)current).key; 2324 } else { 2325 k = ((Entry<K,V>)((TreeNode)current).entry).key; 2326 2327 } 2328 current = null; 2329 HashMap.this.removeEntryForKey(k); 2330 expectedModCount = modCount; 2331 } 2332 2333 /* 2334 * Set 'next' to the first entry of the next non-empty bin in the table 2335 */ 2336 private void findNextBin() { 2337 // assert next == null; 2338 Object[] t = table; 2339 2340 while (index < t.length && (next = t[index++]) == null) 2341 ; 2342 if (next instanceof HashMap.TreeBin) { // Point to the first TreeNode 2343 next = ((TreeBin) next).first; 2344 // assert next != null; // There should be no empty TreeBins 2345 } 2346 } 2347 } 2348 2349 private final class ValueIterator extends HashIterator<V> { 2350 public V next() { 2351 return nextEntry().value; 2352 } 2353 } 2354 2355 private final class KeyIterator extends HashIterator<K> { 2356 public K next() { 2357 return nextEntry().getKey(); 2358 } 2359 } 2360 2361 private final class EntryIterator extends HashIterator<Map.Entry<K,V>> { 2362 public Map.Entry<K,V> next() { 2363 return nextEntry(); 2364 } 2365 } 2366 2367 // Subclass overrides these to alter behavior of views' iterator() method 2368 Iterator<K> newKeyIterator() { 2369 return new KeyIterator(); 2370 } 2371 Iterator<V> newValueIterator() { 2372 return new ValueIterator(); 2373 } 2374 Iterator<Map.Entry<K,V>> newEntryIterator() { 2375 return new EntryIterator(); 2376 } 2377 2378 2379 // Views 2380 2381 private transient Set<Map.Entry<K,V>> entrySet = null; 2382 2383 /** 2384 * Returns a {@link Set} view of the keys contained in this map. 2385 * The set is backed by the map, so changes to the map are 2386 * reflected in the set, and vice-versa. If the map is modified 2387 * while an iteration over the set is in progress (except through 2388 * the iterator's own <tt>remove</tt> operation), the results of 2389 * the iteration are undefined. The set supports element removal, 2390 * which removes the corresponding mapping from the map, via the 2391 * <tt>Iterator.remove</tt>, <tt>Set.remove</tt>, 2392 * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> 2393 * operations. It does not support the <tt>add</tt> or <tt>addAll</tt> 2394 * operations. 2395 */ 2396 public Set<K> keySet() { 2397 Set<K> ks = keySet; 2398 return (ks != null ? ks : (keySet = new KeySet())); 2399 } 2400 2401 private final class KeySet extends AbstractSet<K> { 2402 public Iterator<K> iterator() { 2403 return newKeyIterator(); 2404 } 2405 public int size() { 2406 return size; 2407 } 2408 public boolean contains(Object o) { 2409 return containsKey(o); 2410 } 2411 public boolean remove(Object o) { 2412 return HashMap.this.removeEntryForKey(o) != null; 2413 } 2414 public void clear() { 2415 HashMap.this.clear(); 2416 } 2417 2418 public Spliterator<K> spliterator() { 2419 if (HashMap.this.getClass() == HashMap.class) 2420 return new KeySpliterator<K,V>(HashMap.this, 0, -1, 0, 0); 2421 else 2422 return Spliterators.spliterator 2423 (this, Spliterator.SIZED | Spliterator.DISTINCT); 2424 } 2425 } 2426 2427 /** 2428 * Returns a {@link Collection} view of the values contained in this map. 2429 * The collection is backed by the map, so changes to the map are 2430 * reflected in the collection, and vice-versa. If the map is 2431 * modified while an iteration over the collection is in progress 2432 * (except through the iterator's own <tt>remove</tt> operation), 2433 * the results of the iteration are undefined. The collection 2434 * supports element removal, which removes the corresponding 2435 * mapping from the map, via the <tt>Iterator.remove</tt>, 2436 * <tt>Collection.remove</tt>, <tt>removeAll</tt>, 2437 * <tt>retainAll</tt> and <tt>clear</tt> operations. It does not 2438 * support the <tt>add</tt> or <tt>addAll</tt> operations. 2439 */ 2440 public Collection<V> values() { 2441 Collection<V> vs = values; 2442 return (vs != null ? vs : (values = new Values())); 2443 } 2444 2445 private final class Values extends AbstractCollection<V> { 2446 public Iterator<V> iterator() { 2447 return newValueIterator(); 2448 } 2449 public int size() { 2450 return size; 2451 } 2452 public boolean contains(Object o) { 2453 return containsValue(o); 2454 } 2455 public void clear() { 2456 HashMap.this.clear(); 2457 } 2458 2459 public Spliterator<V> spliterator() { 2460 if (HashMap.this.getClass() == HashMap.class) 2461 return new ValueSpliterator<K,V>(HashMap.this, 0, -1, 0, 0); 2462 else 2463 return Spliterators.spliterator 2464 (this, Spliterator.SIZED); 2465 } 2466 } 2467 2468 /** 2469 * Returns a {@link Set} view of the mappings contained in this map. 2470 * The set is backed by the map, so changes to the map are 2471 * reflected in the set, and vice-versa. If the map is modified 2472 * while an iteration over the set is in progress (except through 2473 * the iterator's own <tt>remove</tt> operation, or through the 2474 * <tt>setValue</tt> operation on a map entry returned by the 2475 * iterator) the results of the iteration are undefined. The set 2476 * supports element removal, which removes the corresponding 2477 * mapping from the map, via the <tt>Iterator.remove</tt>, 2478 * <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt> and 2479 * <tt>clear</tt> operations. It does not support the 2480 * <tt>add</tt> or <tt>addAll</tt> operations. 2481 * 2482 * @return a set view of the mappings contained in this map 2483 */ 2484 public Set<Map.Entry<K,V>> entrySet() { 2485 return entrySet0(); 2486 } 2487 2488 private Set<Map.Entry<K,V>> entrySet0() { 2489 Set<Map.Entry<K,V>> es = entrySet; 2490 return es != null ? es : (entrySet = new EntrySet()); 2491 } 2492 2493 private final class EntrySet extends AbstractSet<Map.Entry<K,V>> { 2494 public Iterator<Map.Entry<K,V>> iterator() { 2495 return newEntryIterator(); 2496 } 2497 public boolean contains(Object o) { 2498 if (!(o instanceof Map.Entry)) 2499 return false; 2500 Map.Entry<?,?> e = (Map.Entry<?,?>) o; 2501 Entry<K,V> candidate = getEntry(e.getKey()); 2502 return candidate != null && candidate.equals(e); 2503 } 2504 public boolean remove(Object o) { 2505 return removeMapping(o) != null; 2506 } 2507 public int size() { 2508 return size; 2509 } 2510 public void clear() { 2511 HashMap.this.clear(); 2512 } 2513 2514 public Spliterator<Map.Entry<K,V>> spliterator() { 2515 if (HashMap.this.getClass() == HashMap.class) 2516 return new EntrySpliterator<K,V>(HashMap.this, 0, -1, 0, 0); 2517 else 2518 return Spliterators.spliterator 2519 (this, Spliterator.SIZED | Spliterator.DISTINCT); 2520 } 2521 } 2522 2523 /** 2524 * Save the state of the <tt>HashMap</tt> instance to a stream (i.e., 2525 * serialize it). 2526 * 2527 * @serialData The <i>capacity</i> of the HashMap (the length of the 2528 * bucket array) is emitted (int), followed by the 2529 * <i>size</i> (an int, the number of key-value 2530 * mappings), followed by the key (Object) and value (Object) 2531 * for each key-value mapping. The key-value mappings are 2532 * emitted in no particular order. 2533 */ 2534 private void writeObject(java.io.ObjectOutputStream s) 2535 throws IOException 2536 { 2537 // Write out the threshold, loadfactor, and any hidden stuff 2538 s.defaultWriteObject(); 2539 2540 // Write out number of buckets 2541 if (table==EMPTY_TABLE) { 2542 s.writeInt(roundUpToPowerOf2(threshold)); 2543 } else { 2544 s.writeInt(table.length); 2545 } 2546 2547 // Write out size (number of Mappings) 2548 s.writeInt(size); 2549 2550 // Write out keys and values (alternating) 2551 if (size > 0) { 2552 for(Map.Entry<K,V> e : entrySet0()) { 2553 s.writeObject(e.getKey()); 2554 s.writeObject(e.getValue()); 2555 } 2556 } 2557 } 2558 2559 private static final long serialVersionUID = 362498820763181265L; 2560 2561 /** 2562 * Reconstitute the {@code HashMap} instance from a stream (i.e., 2563 * deserialize it). 2564 */ 2565 private void readObject(java.io.ObjectInputStream s) 2566 throws IOException, ClassNotFoundException 2567 { 2568 // Read in the threshold (ignored), loadfactor, and any hidden stuff 2569 s.defaultReadObject(); 2570 if (loadFactor <= 0 || Float.isNaN(loadFactor)) { 2571 throw new InvalidObjectException("Illegal load factor: " + 2572 loadFactor); 2573 } 2574 2575 // set other fields that need values 2576 if (Holder.USE_HASHSEED) { 2577 int seed = ThreadLocalRandom.current().nextInt(); 2578 Holder.UNSAFE.putIntVolatile(this, Holder.HASHSEED_OFFSET, 2579 (seed != 0) ? seed : 1); 2580 } 2581 table = EMPTY_TABLE; 2582 2583 // Read in number of buckets 2584 s.readInt(); // ignored. 2585 2586 // Read number of mappings 2587 int mappings = s.readInt(); 2588 if (mappings < 0) 2589 throw new InvalidObjectException("Illegal mappings count: " + 2590 mappings); 2591 2592 // capacity chosen by number of mappings and desired load (if >= 0.25) 2593 int capacity = (int) Math.min( 2594 mappings * Math.min(1 / loadFactor, 4.0f), 2595 // we have limits... 2596 HashMap.MAXIMUM_CAPACITY); 2597 2598 // allocate the bucket array; 2599 if (mappings > 0) { 2600 inflateTable(capacity); 2601 } else { 2602 threshold = capacity; 2603 } 2604 2605 init(); // Give subclass a chance to do its thing. 2606 2607 // Read the keys and values, and put the mappings in the HashMap 2608 for (int i=0; i<mappings; i++) { 2609 @SuppressWarnings("unchecked") 2610 K key = (K) s.readObject(); 2611 @SuppressWarnings("unchecked") 2612 V value = (V) s.readObject(); 2613 putForCreate(key, value); 2614 } 2615 } 2616 2617 // These methods are used when serializing HashSets 2618 int capacity() { return table.length; } 2619 float loadFactor() { return loadFactor; } 2620 2621 /** 2622 * Standin until HM overhaul; based loosely on Weak and Identity HM. 2623 */ 2624 static class HashMapSpliterator<K,V> { 2625 final HashMap<K,V> map; 2626 Object current; // current node, can be Entry or TreeNode 2627 int index; // current index, modified on advance/split 2628 int fence; // one past last index 2629 int est; // size estimate 2630 int expectedModCount; // for comodification checks 2631 boolean acceptedNull; // Have we accepted the null key? 2632 // Without this, we can't distinguish 2633 // between being at the very beginning (and 2634 // needing to accept null), or being at the 2635 // end of the list in bin 0. In both cases, 2636 // current == null && index == 0. 2637 2638 HashMapSpliterator(HashMap<K,V> m, int origin, 2639 int fence, int est, 2640 int expectedModCount) { 2641 this.map = m; 2642 this.index = origin; 2643 this.fence = fence; 2644 this.est = est; 2645 this.expectedModCount = expectedModCount; 2646 this.acceptedNull = false; 2647 } 2648 2649 final int getFence() { // initialize fence and size on first use 2650 int hi; 2651 if ((hi = fence) < 0) { 2652 HashMap<K,V> m = map; 2653 est = m.size; 2654 expectedModCount = m.modCount; 2655 hi = fence = m.table.length; 2656 } 2657 return hi; 2658 } 2659 2660 public final long estimateSize() { 2661 getFence(); // force init 2662 return (long) est; 2663 } 2664 } 2665 2666 static final class KeySpliterator<K,V> 2667 extends HashMapSpliterator<K,V> 2668 implements Spliterator<K> { 2669 KeySpliterator(HashMap<K,V> m, int origin, int fence, int est, 2670 int expectedModCount) { 2671 super(m, origin, fence, est, expectedModCount); 2672 } 2673 2674 public KeySpliterator<K,V> trySplit() { 2675 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; 2676 if (lo >= mid || current != null) { 2677 return null; 2678 } else { 2679 KeySpliterator<K,V> retVal = new KeySpliterator<K,V>(map, lo, 2680 index = mid, est >>>= 1, expectedModCount); 2681 // Only 'this' Spliterator chould check for null. 2682 retVal.acceptedNull = true; 2683 return retVal; 2684 } 2685 } 2686 2687 @SuppressWarnings("unchecked") 2688 public void forEachRemaining(Consumer<? super K> action) { 2689 int i, hi, mc; 2690 if (action == null) 2691 throw new NullPointerException(); 2692 HashMap<K,V> m = map; 2693 Object[] tab = m.table; 2694 if ((hi = fence) < 0) { 2695 mc = expectedModCount = m.modCount; 2696 hi = fence = tab.length; 2697 } 2698 else 2699 mc = expectedModCount; 2700 2701 if (!acceptedNull) { 2702 acceptedNull = true; 2703 if (m.nullKeyEntry != null) { 2704 action.accept(m.nullKeyEntry.key); 2705 } 2706 } 2707 if (tab.length >= hi && (i = index) >= 0 && 2708 (i < (index = hi) || current != null)) { 2709 Object p = current; 2710 current = null; 2711 do { 2712 if (p == null) { 2713 p = tab[i++]; 2714 if (p instanceof HashMap.TreeBin) { 2715 p = ((HashMap.TreeBin)p).first; 2716 } 2717 } else { 2718 HashMap.Entry<K,V> entry; 2719 if (p instanceof HashMap.Entry) { 2720 entry = (HashMap.Entry<K,V>)p; 2721 } else { 2722 entry = (HashMap.Entry<K,V>)((TreeNode)p).entry; 2723 } 2724 action.accept(entry.key); 2725 p = entry.next; 2726 } 2727 } while (p != null || i < hi); 2728 if (m.modCount != mc) 2729 throw new ConcurrentModificationException(); 2730 } 2731 } 2732 2733 @SuppressWarnings("unchecked") 2734 public boolean tryAdvance(Consumer<? super K> action) { 2735 int hi; 2736 if (action == null) 2737 throw new NullPointerException(); 2738 Object[] tab = map.table; 2739 hi = getFence(); 2740 2741 if (!acceptedNull) { 2742 acceptedNull = true; 2743 if (map.nullKeyEntry != null) { 2744 action.accept(map.nullKeyEntry.key); 2745 if (map.modCount != expectedModCount) 2746 throw new ConcurrentModificationException(); 2747 return true; 2748 } 2749 } 2750 if (tab.length >= hi && index >= 0) { 2751 while (current != null || index < hi) { 2752 if (current == null) { 2753 current = tab[index++]; 2754 if (current instanceof HashMap.TreeBin) { 2755 current = ((HashMap.TreeBin)current).first; 2756 } 2757 } else { 2758 HashMap.Entry<K,V> entry; 2759 if (current instanceof HashMap.Entry) { 2760 entry = (HashMap.Entry<K,V>)current; 2761 } else { 2762 entry = (HashMap.Entry<K,V>)((TreeNode)current).entry; 2763 } 2764 K k = entry.key; 2765 current = entry.next; 2766 action.accept(k); 2767 if (map.modCount != expectedModCount) 2768 throw new ConcurrentModificationException(); 2769 return true; 2770 } 2771 } 2772 } 2773 return false; 2774 } 2775 2776 public int characteristics() { 2777 return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | 2778 Spliterator.DISTINCT; 2779 } 2780 } 2781 2782 static final class ValueSpliterator<K,V> 2783 extends HashMapSpliterator<K,V> 2784 implements Spliterator<V> { 2785 ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est, 2786 int expectedModCount) { 2787 super(m, origin, fence, est, expectedModCount); 2788 } 2789 2790 public ValueSpliterator<K,V> trySplit() { 2791 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; 2792 if (lo >= mid || current != null) { 2793 return null; 2794 } else { 2795 ValueSpliterator<K,V> retVal = new ValueSpliterator<K,V>(map, 2796 lo, index = mid, est >>>= 1, expectedModCount); 2797 // Only 'this' Spliterator chould check for null. 2798 retVal.acceptedNull = true; 2799 return retVal; 2800 } 2801 } 2802 2803 @SuppressWarnings("unchecked") 2804 public void forEachRemaining(Consumer<? super V> action) { 2805 int i, hi, mc; 2806 if (action == null) 2807 throw new NullPointerException(); 2808 HashMap<K,V> m = map; 2809 Object[] tab = m.table; 2810 if ((hi = fence) < 0) { 2811 mc = expectedModCount = m.modCount; 2812 hi = fence = tab.length; 2813 } 2814 else 2815 mc = expectedModCount; 2816 2817 if (!acceptedNull) { 2818 acceptedNull = true; 2819 if (m.nullKeyEntry != null) { 2820 action.accept(m.nullKeyEntry.value); 2821 } 2822 } 2823 if (tab.length >= hi && (i = index) >= 0 && 2824 (i < (index = hi) || current != null)) { 2825 Object p = current; 2826 current = null; 2827 do { 2828 if (p == null) { 2829 p = tab[i++]; 2830 if (p instanceof HashMap.TreeBin) { 2831 p = ((HashMap.TreeBin)p).first; 2832 } 2833 } else { 2834 HashMap.Entry<K,V> entry; 2835 if (p instanceof HashMap.Entry) { 2836 entry = (HashMap.Entry<K,V>)p; 2837 } else { 2838 entry = (HashMap.Entry<K,V>)((TreeNode)p).entry; 2839 } 2840 action.accept(entry.value); 2841 p = entry.next; 2842 } 2843 } while (p != null || i < hi); 2844 if (m.modCount != mc) 2845 throw new ConcurrentModificationException(); 2846 } 2847 } 2848 2849 @SuppressWarnings("unchecked") 2850 public boolean tryAdvance(Consumer<? super V> action) { 2851 int hi; 2852 if (action == null) 2853 throw new NullPointerException(); 2854 Object[] tab = map.table; 2855 hi = getFence(); 2856 2857 if (!acceptedNull) { 2858 acceptedNull = true; 2859 if (map.nullKeyEntry != null) { 2860 action.accept(map.nullKeyEntry.value); 2861 if (map.modCount != expectedModCount) 2862 throw new ConcurrentModificationException(); 2863 return true; 2864 } 2865 } 2866 if (tab.length >= hi && index >= 0) { 2867 while (current != null || index < hi) { 2868 if (current == null) { 2869 current = tab[index++]; 2870 if (current instanceof HashMap.TreeBin) { 2871 current = ((HashMap.TreeBin)current).first; 2872 } 2873 } else { 2874 HashMap.Entry<K,V> entry; 2875 if (current instanceof HashMap.Entry) { 2876 entry = (Entry<K,V>)current; 2877 } else { 2878 entry = (Entry<K,V>)((TreeNode)current).entry; 2879 } 2880 V v = entry.value; 2881 current = entry.next; 2882 action.accept(v); 2883 if (map.modCount != expectedModCount) 2884 throw new ConcurrentModificationException(); 2885 return true; 2886 } 2887 } 2888 } 2889 return false; 2890 } 2891 2892 public int characteristics() { 2893 return (fence < 0 || est == map.size ? Spliterator.SIZED : 0); 2894 } 2895 } 2896 2897 static final class EntrySpliterator<K,V> 2898 extends HashMapSpliterator<K,V> 2899 implements Spliterator<Map.Entry<K,V>> { 2900 EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est, 2901 int expectedModCount) { 2902 super(m, origin, fence, est, expectedModCount); 2903 } 2904 2905 public EntrySpliterator<K,V> trySplit() { 2906 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; 2907 if (lo >= mid || current != null) { 2908 return null; 2909 } else { 2910 EntrySpliterator<K,V> retVal = new EntrySpliterator<K,V>(map, 2911 lo, index = mid, est >>>= 1, expectedModCount); 2912 // Only 'this' Spliterator chould check for null. 2913 retVal.acceptedNull = true; 2914 return retVal; 2915 } 2916 } 2917 2918 @SuppressWarnings("unchecked") 2919 public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) { 2920 int i, hi, mc; 2921 if (action == null) 2922 throw new NullPointerException(); 2923 HashMap<K,V> m = map; 2924 Object[] tab = m.table; 2925 if ((hi = fence) < 0) { 2926 mc = expectedModCount = m.modCount; 2927 hi = fence = tab.length; 2928 } 2929 else 2930 mc = expectedModCount; 2931 2932 if (!acceptedNull) { 2933 acceptedNull = true; 2934 if (m.nullKeyEntry != null) { 2935 action.accept(m.nullKeyEntry); 2936 } 2937 } 2938 if (tab.length >= hi && (i = index) >= 0 && 2939 (i < (index = hi) || current != null)) { 2940 Object p = current; 2941 current = null; 2942 do { 2943 if (p == null) { 2944 p = tab[i++]; 2945 if (p instanceof HashMap.TreeBin) { 2946 p = ((HashMap.TreeBin)p).first; 2947 } 2948 } else { 2949 HashMap.Entry<K,V> entry; 2950 if (p instanceof HashMap.Entry) { 2951 entry = (HashMap.Entry<K,V>)p; 2952 } else { 2953 entry = (HashMap.Entry<K,V>)((TreeNode)p).entry; 2954 } 2955 action.accept(entry); 2956 p = entry.next; 2957 2958 } 2959 } while (p != null || i < hi); 2960 if (m.modCount != mc) 2961 throw new ConcurrentModificationException(); 2962 } 2963 } 2964 2965 @SuppressWarnings("unchecked") 2966 public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) { 2967 int hi; 2968 if (action == null) 2969 throw new NullPointerException(); 2970 Object[] tab = map.table; 2971 hi = getFence(); 2972 2973 if (!acceptedNull) { 2974 acceptedNull = true; 2975 if (map.nullKeyEntry != null) { 2976 action.accept(map.nullKeyEntry); 2977 if (map.modCount != expectedModCount) 2978 throw new ConcurrentModificationException(); 2979 return true; 2980 } 2981 } 2982 if (tab.length >= hi && index >= 0) { 2983 while (current != null || index < hi) { 2984 if (current == null) { 2985 current = tab[index++]; 2986 if (current instanceof HashMap.TreeBin) { 2987 current = ((HashMap.TreeBin)current).first; 2988 } 2989 } else { 2990 HashMap.Entry<K,V> e; 2991 if (current instanceof HashMap.Entry) { 2992 e = (Entry<K,V>)current; 2993 } else { 2994 e = (Entry<K,V>)((TreeNode)current).entry; 2995 } 2996 current = e.next; 2997 action.accept(e); 2998 if (map.modCount != expectedModCount) 2999 throw new ConcurrentModificationException(); 3000 return true; 3001 } 3002 } 3003 } 3004 return false; 3005 } 3006 3007 public int characteristics() { 3008 return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | 3009 Spliterator.DISTINCT; 3010 } 3011 } 3012 }