1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package java.util; 27 28 import java.io.IOException; 29 import java.io.InvalidObjectException; 30 import java.io.Serializable; 31 import java.lang.reflect.ParameterizedType; 32 import java.lang.reflect.Type; 33 import java.util.function.BiConsumer; 34 import java.util.function.BiFunction; 35 import java.util.function.Consumer; 36 import java.util.function.Function; 37 import jdk.internal.misc.SharedSecrets; 38 39 /** 40 * Hash table based implementation of the {@code Map} interface. This 41 * implementation provides all of the optional map operations, and permits 42 * {@code null} values and the {@code null} key. (The {@code HashMap} 43 * class is roughly equivalent to {@code Hashtable}, except that it is 44 * unsynchronized and permits nulls.) This class makes no guarantees as to 45 * the order of the map; in particular, it does not guarantee that the order 46 * will remain constant over time. 47 * 48 * <p>This implementation provides constant-time performance for the basic 49 * operations ({@code get} and {@code put}), assuming the hash function 50 * disperses the elements properly among the buckets. Iteration over 51 * collection views requires time proportional to the "capacity" of the 52 * {@code HashMap} instance (the number of buckets) plus its size (the number 53 * of key-value mappings). Thus, it's very important not to set the initial 54 * capacity too high (or the load factor too low) if iteration performance is 55 * important. 56 * 57 * <p>An instance of {@code HashMap} has two parameters that affect its 58 * performance: <i>initial capacity</i> and <i>load factor</i>. The 59 * <i>capacity</i> is the number of buckets in the hash table, and the initial 60 * capacity is simply the capacity at the time the hash table is created. The 61 * <i>load factor</i> is a measure of how full the hash table is allowed to 62 * get before its capacity is automatically increased. When the number of 63 * entries in the hash table exceeds the product of the load factor and the 64 * current capacity, the hash table is <i>rehashed</i> (that is, internal data 65 * structures are rebuilt) so that the hash table has approximately twice the 66 * number of buckets. 67 * 68 * <p>As a general rule, the default load factor (.75) offers a good 69 * tradeoff between time and space costs. Higher values decrease the 70 * space overhead but increase the lookup cost (reflected in most of 71 * the operations of the {@code HashMap} class, including 72 * {@code get} and {@code put}). The expected number of entries in 73 * the map and its load factor should be taken into account when 74 * setting its initial capacity, so as to minimize the number of 75 * rehash operations. If the initial capacity is greater than the 76 * maximum number of entries divided by the load factor, no rehash 77 * operations will ever occur. 78 * 79 * <p>If many mappings are to be stored in a {@code HashMap} 80 * instance, creating it with a sufficiently large capacity will allow 81 * the mappings to be stored more efficiently than letting it perform 82 * automatic rehashing as needed to grow the table. Note that using 83 * many keys with the same {@code hashCode()} is a sure way to slow 84 * down performance of any hash table. To ameliorate impact, when keys 85 * are {@link Comparable}, this class may use comparison order among 86 * keys to help break ties. 87 * 88 * <p><strong>Note that this implementation is not synchronized.</strong> 89 * If multiple threads access a hash map concurrently, and at least one of 90 * the threads modifies the map structurally, it <i>must</i> be 91 * synchronized externally. (A structural modification is any operation 92 * that adds or deletes one or more mappings; merely changing the value 93 * associated with a key that an instance already contains is not a 94 * structural modification.) This is typically accomplished by 95 * synchronizing on some object that naturally encapsulates the map. 96 * 97 * If no such object exists, the map should be "wrapped" using the 98 * {@link Collections#synchronizedMap Collections.synchronizedMap} 99 * method. This is best done at creation time, to prevent accidental 100 * unsynchronized access to the map:<pre> 101 * Map m = Collections.synchronizedMap(new HashMap(...));</pre> 102 * 103 * <p>The iterators returned by all of this class's "collection view methods" 104 * are <i>fail-fast</i>: if the map is structurally modified at any time after 105 * the iterator is created, in any way except through the iterator's own 106 * {@code remove} method, the iterator will throw a 107 * {@link ConcurrentModificationException}. Thus, in the face of concurrent 108 * modification, the iterator fails quickly and cleanly, rather than risking 109 * arbitrary, non-deterministic behavior at an undetermined time in the 110 * future. 111 * 112 * <p>Note that the fail-fast behavior of an iterator cannot be guaranteed 113 * as it is, generally speaking, impossible to make any hard guarantees in the 114 * presence of unsynchronized concurrent modification. Fail-fast iterators 115 * throw {@code ConcurrentModificationException} on a best-effort basis. 116 * Therefore, it would be wrong to write a program that depended on this 117 * exception for its correctness: <i>the fail-fast behavior of iterators 118 * should be used only to detect bugs.</i> 119 * 120 * <p>This class is a member of the 121 * <a href="{@docRoot}/java.base/java/util/package-summary.html#CollectionsFramework"> 122 * Java Collections Framework</a>. 123 * 124 * @param <K> the type of keys maintained by this map 125 * @param <V> the type of mapped values 126 * 127 * @author Doug Lea 128 * @author Josh Bloch 129 * @author Arthur van Hoff 130 * @author Neal Gafter 131 * @see Object#hashCode() 132 * @see Collection 133 * @see Map 134 * @see TreeMap 135 * @see Hashtable 136 * @since 1.2 137 */ 138 public class HashMap<K,V> extends AbstractMap<K,V> 139 implements Map<K,V>, Cloneable, Serializable { 140 141 private static final long serialVersionUID = 362498820763181265L; 142 143 /* 144 * Implementation notes. 145 * 146 * This map usually acts as a binned (bucketed) hash table, but 147 * when bins get too large, they are transformed into bins of 148 * TreeNodes, each structured similarly to those in 149 * java.util.TreeMap. Most methods try to use normal bins, but 150 * relay to TreeNode methods when applicable (simply by checking 151 * instanceof a node). Bins of TreeNodes may be traversed and 152 * used like any others, but additionally support faster lookup 153 * when overpopulated. However, since the vast majority of bins in 154 * normal use are not overpopulated, checking for existence of 155 * tree bins may be delayed in the course of table methods. 156 * 157 * Tree bins (i.e., bins whose elements are all TreeNodes) are 158 * ordered primarily by hashCode, but in the case of ties, if two 159 * elements are of the same "class C implements Comparable<C>", 160 * type then their compareTo method is used for ordering. (We 161 * conservatively check generic types via reflection to validate 162 * this -- see method comparableClassFor). The added complexity 163 * of tree bins is worthwhile in providing worst-case O(log n) 164 * operations when keys either have distinct hashes or are 165 * orderable, Thus, performance degrades gracefully under 166 * accidental or malicious usages in which hashCode() methods 167 * return values that are poorly distributed, as well as those in 168 * which many keys share a hashCode, so long as they are also 169 * Comparable. (If neither of these apply, we may waste about a 170 * factor of two in time and space compared to taking no 171 * precautions. But the only known cases stem from poor user 172 * programming practices that are already so slow that this makes 173 * little difference.) 174 * 175 * Because TreeNodes are about twice the size of regular nodes, we 176 * use them only when bins contain enough nodes to warrant use 177 * (see TREEIFY_THRESHOLD). And when they become too small (due to 178 * removal or resizing) they are converted back to plain bins. In 179 * usages with well-distributed user hashCodes, tree bins are 180 * rarely used. Ideally, under random hashCodes, the frequency of 181 * nodes in bins follows a Poisson distribution 182 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a 183 * parameter of about 0.5 on average for the default resizing 184 * threshold of 0.75, although with a large variance because of 185 * resizing granularity. Ignoring variance, the expected 186 * occurrences of list size k are (exp(-0.5) * pow(0.5, k) / 187 * factorial(k)). The first values are: 188 * 189 * 0: 0.60653066 190 * 1: 0.30326533 191 * 2: 0.07581633 192 * 3: 0.01263606 193 * 4: 0.00157952 194 * 5: 0.00015795 195 * 6: 0.00001316 196 * 7: 0.00000094 197 * 8: 0.00000006 198 * more: less than 1 in ten million 199 * 200 * The root of a tree bin is normally its first node. However, 201 * sometimes (currently only upon Iterator.remove), the root might 202 * be elsewhere, but can be recovered following parent links 203 * (method TreeNode.root()). 204 * 205 * All applicable internal methods accept a hash code as an 206 * argument (as normally supplied from a public method), allowing 207 * them to call each other without recomputing user hashCodes. 208 * Most internal methods also accept a "tab" argument, that is 209 * normally the current table, but may be a new or old one when 210 * resizing or converting. 211 * 212 * When bin lists are treeified, split, or untreeified, we keep 213 * them in the same relative access/traversal order (i.e., field 214 * Node.next) to better preserve locality, and to slightly 215 * simplify handling of splits and traversals that invoke 216 * iterator.remove. When using comparators on insertion, to keep a 217 * total ordering (or as close as is required here) across 218 * rebalancings, we compare classes and identityHashCodes as 219 * tie-breakers. 220 * 221 * The use and transitions among plain vs tree modes is 222 * complicated by the existence of subclass LinkedHashMap. See 223 * below for hook methods defined to be invoked upon insertion, 224 * removal and access that allow LinkedHashMap internals to 225 * otherwise remain independent of these mechanics. (This also 226 * requires that a map instance be passed to some utility methods 227 * that may create new nodes.) 228 * 229 * The concurrent-programming-like SSA-based coding style helps 230 * avoid aliasing errors amid all of the twisty pointer operations. 231 */ 232 233 /** 234 * The default initial capacity - MUST be a power of two. 235 */ 236 static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16 237 238 /** 239 * The maximum capacity, used if a higher value is implicitly specified 240 * by either of the constructors with arguments. 241 * MUST be a power of two <= 1<<30. 242 */ 243 static final int MAXIMUM_CAPACITY = 1 << 30; 244 245 /** 246 * The load factor used when none specified in constructor. 247 */ 248 static final float DEFAULT_LOAD_FACTOR = 0.75f; 249 250 /** 251 * The bin count threshold for using a tree rather than list for a 252 * bin. Bins are converted to trees when adding an element to a 253 * bin with at least this many nodes. The value must be greater 254 * than 2 and should be at least 8 to mesh with assumptions in 255 * tree removal about conversion back to plain bins upon 256 * shrinkage. 257 */ 258 static final int TREEIFY_THRESHOLD = 8; 259 260 /** 261 * The bin count threshold for untreeifying a (split) bin during a 262 * resize operation. Should be less than TREEIFY_THRESHOLD, and at 263 * most 6 to mesh with shrinkage detection under removal. 264 */ 265 static final int UNTREEIFY_THRESHOLD = 6; 266 267 /** 268 * The smallest table capacity for which bins may be treeified. 269 * (Otherwise the table is resized if too many nodes in a bin.) 270 * Should be at least 4 * TREEIFY_THRESHOLD to avoid conflicts 271 * between resizing and treeification thresholds. 272 */ 273 static final int MIN_TREEIFY_CAPACITY = 64; 274 275 /** 276 * Basic hash bin node, used for most entries. (See below for 277 * TreeNode subclass, and in LinkedHashMap for its Entry subclass.) 278 */ 279 static class Node<K,V> implements Map.Entry<K,V> { 280 final int hash; 281 final K key; 282 V value; 283 Node<K,V> next; 284 285 Node(int hash, K key, V value, Node<K,V> next) { 286 this.hash = hash; 287 this.key = key; 288 this.value = value; 289 this.next = next; 290 } 291 292 public final K getKey() { return key; } 293 public final V getValue() { return value; } 294 public final String toString() { return key + "=" + value; } 295 296 public final int hashCode() { 297 return Objects.hashCode(key) ^ Objects.hashCode(value); 298 } 299 300 public final V setValue(V newValue) { 301 V oldValue = value; 302 value = newValue; 303 return oldValue; 304 } 305 306 public final boolean equals(Object o) { 307 if (o == this) 308 return true; 309 if (o instanceof Map.Entry) { 310 Map.Entry<?,?> e = (Map.Entry<?,?>)o; 311 if (Objects.equals(key, e.getKey()) && 312 Objects.equals(value, e.getValue())) 313 return true; 314 } 315 return false; 316 } 317 } 318 319 /* ---------------- Static utilities -------------- */ 320 321 /** 322 * Computes key.hashCode() and spreads (XORs) higher bits of hash 323 * to lower. Because the table uses power-of-two masking, sets of 324 * hashes that vary only in bits above the current mask will 325 * always collide. (Among known examples are sets of Float keys 326 * holding consecutive whole numbers in small tables.) So we 327 * apply a transform that spreads the impact of higher bits 328 * downward. There is a tradeoff between speed, utility, and 329 * quality of bit-spreading. Because many common sets of hashes 330 * are already reasonably distributed (so don't benefit from 331 * spreading), and because we use trees to handle large sets of 332 * collisions in bins, we just XOR some shifted bits in the 333 * cheapest possible way to reduce systematic lossage, as well as 334 * to incorporate impact of the highest bits that would otherwise 335 * never be used in index calculations because of table bounds. 336 */ 337 static final int hash(Object key) { 338 int h; 339 return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16); 340 } 341 342 /** 343 * Returns x's Class if it is of the form "class C implements 344 * Comparable<C>", else null. 345 */ 346 static Class<?> comparableClassFor(Object x) { 347 if (x instanceof Comparable) { 348 Class<?> c; Type[] ts, as; ParameterizedType p; 349 if ((c = x.getClass()) == String.class) // bypass checks 350 return c; 351 if ((ts = c.getGenericInterfaces()) != null) { 352 for (Type t : ts) { 353 if ((t instanceof ParameterizedType) && 354 ((p = (ParameterizedType) t).getRawType() == 355 Comparable.class) && 356 (as = p.getActualTypeArguments()) != null && 357 as.length == 1 && as[0] == c) // type arg is c 358 return c; 359 } 360 } 361 } 362 return null; 363 } 364 365 /** 366 * Returns k.compareTo(x) if x matches kc (k's screened comparable 367 * class), else 0. 368 */ 369 @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable 370 static int compareComparables(Class<?> kc, Object k, Object x) { 371 return (x == null || x.getClass() != kc ? 0 : 372 ((Comparable)k).compareTo(x)); 373 } 374 375 /** 376 * Returns a power of two size for the given target capacity. 377 */ 378 static final int tableSizeFor(int cap) { 379 int n = cap - 1; 380 n |= n >>> 1; 381 n |= n >>> 2; 382 n |= n >>> 4; 383 n |= n >>> 8; 384 n |= n >>> 16; 385 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; 386 } 387 388 /* ---------------- Fields -------------- */ 389 390 /** 391 * The table, initialized on first use, and resized as 392 * necessary. When allocated, length is always a power of two. 393 * (We also tolerate length zero in some operations to allow 394 * bootstrapping mechanics that are currently not needed.) 395 */ 396 transient Node<K,V>[] table; 397 398 /** 399 * Holds cached entrySet(). Note that AbstractMap fields are used 400 * for keySet() and values(). 401 */ 402 transient Set<Map.Entry<K,V>> entrySet; 403 404 /** 405 * The number of key-value mappings contained in this map. 406 */ 407 transient int size; 408 409 /** 410 * The number of times this HashMap has been structurally modified 411 * Structural modifications are those that change the number of mappings in 412 * the HashMap or otherwise modify its internal structure (e.g., 413 * rehash). This field is used to make iterators on Collection-views of 414 * the HashMap fail-fast. (See ConcurrentModificationException). 415 */ 416 transient int modCount; 417 418 /** 419 * The next size value at which to resize (capacity * load factor). 420 * 421 * @serial 422 */ 423 // (The javadoc description is true upon serialization. 424 // Additionally, if the table array has not been allocated, this 425 // field holds the initial array capacity, or zero signifying 426 // DEFAULT_INITIAL_CAPACITY.) 427 int threshold; 428 429 /** 430 * The load factor for the hash table. 431 * 432 * @serial 433 */ 434 final float loadFactor; 435 436 /* ---------------- Public operations -------------- */ 437 438 /** 439 * Constructs an empty {@code HashMap} with the specified initial 440 * capacity and load factor. 441 * 442 * @param initialCapacity the initial capacity 443 * @param loadFactor the load factor 444 * @throws IllegalArgumentException if the initial capacity is negative 445 * or the load factor is nonpositive 446 */ 447 public HashMap(int initialCapacity, float loadFactor) { 448 if (initialCapacity < 0) 449 throw new IllegalArgumentException("Illegal initial capacity: " + 450 initialCapacity); 451 if (initialCapacity > MAXIMUM_CAPACITY) 452 initialCapacity = MAXIMUM_CAPACITY; 453 if (loadFactor <= 0 || Float.isNaN(loadFactor)) 454 throw new IllegalArgumentException("Illegal load factor: " + 455 loadFactor); 456 this.loadFactor = loadFactor; 457 this.threshold = tableSizeFor(initialCapacity); 458 } 459 460 /** 461 * Constructs an empty {@code HashMap} with the specified initial 462 * capacity and the default load factor (0.75). 463 * 464 * @param initialCapacity the initial capacity. 465 * @throws IllegalArgumentException if the initial capacity is negative. 466 */ 467 public HashMap(int initialCapacity) { 468 this(initialCapacity, DEFAULT_LOAD_FACTOR); 469 } 470 471 /** 472 * Constructs an empty {@code HashMap} with the default initial capacity 473 * (16) and the default load factor (0.75). 474 */ 475 public HashMap() { 476 this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted 477 } 478 479 /** 480 * Constructs a new {@code HashMap} with the same mappings as the 481 * specified {@code Map}. The {@code HashMap} is created with 482 * default load factor (0.75) and an initial capacity sufficient to 483 * hold the mappings in the specified {@code Map}. 484 * 485 * @param m the map whose mappings are to be placed in this map 486 * @throws NullPointerException if the specified map is null 487 */ 488 public HashMap(Map<? extends K, ? extends V> m) { 489 this.loadFactor = DEFAULT_LOAD_FACTOR; 490 putMapEntries(m, false); 491 } 492 493 /** 494 * Implements Map.putAll and Map constructor. 495 * 496 * @param m the map 497 * @param evict false when initially constructing this map, else 498 * true (relayed to method afterNodeInsertion). 499 */ 500 final void putMapEntries(Map<? extends K, ? extends V> m, boolean evict) { 501 int s = m.size(); 502 if (s > 0) { 503 if (table == null) { // pre-size 504 float ft = ((float)s / loadFactor) + 1.0F; 505 int t = ((ft < (float)MAXIMUM_CAPACITY) ? 506 (int)ft : MAXIMUM_CAPACITY); 507 if (t > threshold) 508 threshold = tableSizeFor(t); 509 } 510 else if (s > threshold) 511 resize(); 512 for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) { 513 K key = e.getKey(); 514 V value = e.getValue(); 515 putVal(hash(key), key, value, false, evict); 516 } 517 } 518 } 519 520 /** 521 * Returns the number of key-value mappings in this map. 522 * 523 * @return the number of key-value mappings in this map 524 */ 525 public int size() { 526 return size; 527 } 528 529 /** 530 * Returns {@code true} if this map contains no key-value mappings. 531 * 532 * @return {@code true} if this map contains no key-value mappings 533 */ 534 public boolean isEmpty() { 535 return size == 0; 536 } 537 538 /** 539 * Returns the value to which the specified key is mapped, 540 * or {@code null} if this map contains no mapping for the key. 541 * 542 * <p>More formally, if this map contains a mapping from a key 543 * {@code k} to a value {@code v} such that {@code (key==null ? k==null : 544 * key.equals(k))}, then this method returns {@code v}; otherwise 545 * it returns {@code null}. (There can be at most one such mapping.) 546 * 547 * <p>A return value of {@code null} does not <i>necessarily</i> 548 * indicate that the map contains no mapping for the key; it's also 549 * possible that the map explicitly maps the key to {@code null}. 550 * The {@link #containsKey containsKey} operation may be used to 551 * distinguish these two cases. 552 * 553 * @see #put(Object, Object) 554 */ 555 public V get(Object key) { 556 Node<K,V> e; 557 return (e = getNode(hash(key), key)) == null ? null : e.value; 558 } 559 560 /** 561 * Implements Map.get and related methods. 562 * 563 * @param hash hash for key 564 * @param key the key 565 * @return the node, or null if none 566 */ 567 final Node<K,V> getNode(int hash, Object key) { 568 Node<K,V>[] tab; Node<K,V> first, e; int n; K k; 569 if ((tab = table) != null && (n = tab.length) > 0 && 570 (first = tab[(n - 1) & hash]) != null) { 571 if (first.hash == hash && // always check first node 572 ((k = first.key) == key || (key != null && key.equals(k)))) 573 return first; 574 if ((e = first.next) != null) { 575 if (first instanceof TreeNode) 576 return ((TreeNode<K,V>)first).getTreeNode(hash, key); 577 do { 578 if (e.hash == hash && 579 ((k = e.key) == key || (key != null && key.equals(k)))) 580 return e; 581 } while ((e = e.next) != null); 582 } 583 } 584 return null; 585 } 586 587 /** 588 * Returns {@code true} if this map contains a mapping for the 589 * specified key. 590 * 591 * @param key The key whose presence in this map is to be tested 592 * @return {@code true} if this map contains a mapping for the specified 593 * key. 594 */ 595 public boolean containsKey(Object key) { 596 return getNode(hash(key), key) != null; 597 } 598 599 /** 600 * Associates the specified value with the specified key in this map. 601 * If the map previously contained a mapping for the key, the old 602 * value is replaced. 603 * 604 * @param key key with which the specified value is to be associated 605 * @param value value to be associated with the specified key 606 * @return the previous value associated with {@code key}, or 607 * {@code null} if there was no mapping for {@code key}. 608 * (A {@code null} return can also indicate that the map 609 * previously associated {@code null} with {@code key}.) 610 */ 611 public V put(K key, V value) { 612 return putVal(hash(key), key, value, false, true); 613 } 614 615 /** 616 * Implements Map.put and related methods. 617 * 618 * @param hash hash for key 619 * @param key the key 620 * @param value the value to put 621 * @param onlyIfAbsent if true, don't change existing value 622 * @param evict if false, the table is in creation mode. 623 * @return previous value, or null if none 624 */ 625 final V putVal(int hash, K key, V value, boolean onlyIfAbsent, 626 boolean evict) { 627 Node<K,V>[] tab; Node<K,V> p; int n, i; 628 if ((tab = table) == null || (n = tab.length) == 0) 629 n = (tab = resize()).length; 630 if ((p = tab[i = (n - 1) & hash]) == null) 631 tab[i] = newNode(hash, key, value, null); 632 else { 633 Node<K,V> e; K k; 634 if (p.hash == hash && 635 ((k = p.key) == key || (key != null && key.equals(k)))) 636 e = p; 637 else if (p instanceof TreeNode) 638 e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value); 639 else { 640 for (int binCount = 0; ; ++binCount) { 641 if ((e = p.next) == null) { 642 p.next = newNode(hash, key, value, null); 643 if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st 644 treeifyBin(tab, hash); 645 break; 646 } 647 if (e.hash == hash && 648 ((k = e.key) == key || (key != null && key.equals(k)))) 649 break; 650 p = e; 651 } 652 } 653 if (e != null) { // existing mapping for key 654 V oldValue = e.value; 655 if (!onlyIfAbsent || oldValue == null) 656 e.value = value; 657 afterNodeAccess(e); 658 return oldValue; 659 } 660 } 661 ++modCount; 662 if (++size > threshold) 663 resize(); 664 afterNodeInsertion(evict); 665 return null; 666 } 667 668 /** 669 * Initializes or doubles table size. If null, allocates in 670 * accord with initial capacity target held in field threshold. 671 * Otherwise, because we are using power-of-two expansion, the 672 * elements from each bin must either stay at same index, or move 673 * with a power of two offset in the new table. 674 * 675 * @return the table 676 */ 677 final Node<K,V>[] resize() { 678 Node<K,V>[] oldTab = table; 679 int oldCap = (oldTab == null) ? 0 : oldTab.length; 680 int oldThr = threshold; 681 int newCap, newThr = 0; 682 if (oldCap > 0) { 683 if (oldCap >= MAXIMUM_CAPACITY) { 684 threshold = Integer.MAX_VALUE; 685 return oldTab; 686 } 687 else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY && 688 oldCap >= DEFAULT_INITIAL_CAPACITY) 689 newThr = oldThr << 1; // double threshold 690 } 691 else if (oldThr > 0) // initial capacity was placed in threshold 692 newCap = oldThr; 693 else { // zero initial threshold signifies using defaults 694 newCap = DEFAULT_INITIAL_CAPACITY; 695 newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY); 696 } 697 if (newThr == 0) { 698 float ft = (float)newCap * loadFactor; 699 newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ? 700 (int)ft : Integer.MAX_VALUE); 701 } 702 threshold = newThr; 703 @SuppressWarnings({"rawtypes","unchecked"}) 704 Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap]; 705 table = newTab; 706 if (oldTab != null) { 707 for (int j = 0; j < oldCap; ++j) { 708 Node<K,V> e; 709 if ((e = oldTab[j]) != null) { 710 oldTab[j] = null; 711 if (e.next == null) 712 newTab[e.hash & (newCap - 1)] = e; 713 else if (e instanceof TreeNode) 714 ((TreeNode<K,V>)e).split(this, newTab, j, oldCap); 715 else { // preserve order 716 Node<K,V> loHead = null, loTail = null; 717 Node<K,V> hiHead = null, hiTail = null; 718 Node<K,V> next; 719 do { 720 next = e.next; 721 if ((e.hash & oldCap) == 0) { 722 if (loTail == null) 723 loHead = e; 724 else 725 loTail.next = e; 726 loTail = e; 727 } 728 else { 729 if (hiTail == null) 730 hiHead = e; 731 else 732 hiTail.next = e; 733 hiTail = e; 734 } 735 } while ((e = next) != null); 736 if (loTail != null) { 737 loTail.next = null; 738 newTab[j] = loHead; 739 } 740 if (hiTail != null) { 741 hiTail.next = null; 742 newTab[j + oldCap] = hiHead; 743 } 744 } 745 } 746 } 747 } 748 return newTab; 749 } 750 751 /** 752 * Replaces all linked nodes in bin at index for given hash unless 753 * table is too small, in which case resizes instead. 754 */ 755 final void treeifyBin(Node<K,V>[] tab, int hash) { 756 int n, index; Node<K,V> e; 757 if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY) 758 resize(); 759 else if ((e = tab[index = (n - 1) & hash]) != null) { 760 TreeNode<K,V> hd = null, tl = null; 761 do { 762 TreeNode<K,V> p = replacementTreeNode(e, null); 763 if (tl == null) 764 hd = p; 765 else { 766 p.prev = tl; 767 tl.next = p; 768 } 769 tl = p; 770 } while ((e = e.next) != null); 771 if ((tab[index] = hd) != null) 772 hd.treeify(tab); 773 } 774 } 775 776 /** 777 * Copies all of the mappings from the specified map to this map. 778 * These mappings will replace any mappings that this map had for 779 * any of the keys currently in the specified map. 780 * 781 * @param m mappings to be stored in this map 782 * @throws NullPointerException if the specified map is null 783 */ 784 public void putAll(Map<? extends K, ? extends V> m) { 785 putMapEntries(m, true); 786 } 787 788 /** 789 * Removes the mapping for the specified key from this map if present. 790 * 791 * @param key key whose mapping is to be removed from the map 792 * @return the previous value associated with {@code key}, or 793 * {@code null} if there was no mapping for {@code key}. 794 * (A {@code null} return can also indicate that the map 795 * previously associated {@code null} with {@code key}.) 796 */ 797 public V remove(Object key) { 798 Node<K,V> e; 799 return (e = removeNode(hash(key), key, null, false, true)) == null ? 800 null : e.value; 801 } 802 803 /** 804 * Implements Map.remove and related methods. 805 * 806 * @param hash hash for key 807 * @param key the key 808 * @param value the value to match if matchValue, else ignored 809 * @param matchValue if true only remove if value is equal 810 * @param movable if false do not move other nodes while removing 811 * @return the node, or null if none 812 */ 813 final Node<K,V> removeNode(int hash, Object key, Object value, 814 boolean matchValue, boolean movable) { 815 Node<K,V>[] tab; Node<K,V> p; int n, index; 816 if ((tab = table) != null && (n = tab.length) > 0 && 817 (p = tab[index = (n - 1) & hash]) != null) { 818 Node<K,V> node = null, e; K k; V v; 819 if (p.hash == hash && 820 ((k = p.key) == key || (key != null && key.equals(k)))) 821 node = p; 822 else if ((e = p.next) != null) { 823 if (p instanceof TreeNode) 824 node = ((TreeNode<K,V>)p).getTreeNode(hash, key); 825 else { 826 do { 827 if (e.hash == hash && 828 ((k = e.key) == key || 829 (key != null && key.equals(k)))) { 830 node = e; 831 break; 832 } 833 p = e; 834 } while ((e = e.next) != null); 835 } 836 } 837 if (node != null && (!matchValue || (v = node.value) == value || 838 (value != null && value.equals(v)))) { 839 if (node instanceof TreeNode) 840 ((TreeNode<K,V>)node).removeTreeNode(this, tab, movable); 841 else if (node == p) 842 tab[index] = node.next; 843 else 844 p.next = node.next; 845 ++modCount; 846 --size; 847 afterNodeRemoval(node); 848 return node; 849 } 850 } 851 return null; 852 } 853 854 /** 855 * Removes all of the mappings from this map. 856 * The map will be empty after this call returns. 857 */ 858 public void clear() { 859 Node<K,V>[] tab; 860 modCount++; 861 if ((tab = table) != null && size > 0) { 862 size = 0; 863 for (int i = 0; i < tab.length; ++i) 864 tab[i] = null; 865 } 866 } 867 868 /** 869 * Returns {@code true} if this map maps one or more keys to the 870 * specified value. 871 * 872 * @param value value whose presence in this map is to be tested 873 * @return {@code true} if this map maps one or more keys to the 874 * specified value 875 */ 876 public boolean containsValue(Object value) { 877 Node<K,V>[] tab; V v; 878 if ((tab = table) != null && size > 0) { 879 for (Node<K,V> e : tab) { 880 for (; e != null; e = e.next) { 881 if ((v = e.value) == value || 882 (value != null && value.equals(v))) 883 return true; 884 } 885 } 886 } 887 return false; 888 } 889 890 /** 891 * Returns a {@link Set} view of the keys contained in this map. 892 * The set is backed by the map, so changes to the map are 893 * reflected in the set, and vice-versa. If the map is modified 894 * while an iteration over the set is in progress (except through 895 * the iterator's own {@code remove} operation), the results of 896 * the iteration are undefined. The set supports element removal, 897 * which removes the corresponding mapping from the map, via the 898 * {@code Iterator.remove}, {@code Set.remove}, 899 * {@code removeAll}, {@code retainAll}, and {@code clear} 900 * operations. It does not support the {@code add} or {@code addAll} 901 * operations. 902 * 903 * @return a set view of the keys contained in this map 904 */ 905 public Set<K> keySet() { 906 Set<K> ks = keySet; 907 if (ks == null) { 908 ks = new KeySet(); 909 keySet = ks; 910 } 911 return ks; 912 } 913 914 final class KeySet extends AbstractSet<K> { 915 public final int size() { return size; } 916 public final void clear() { HashMap.this.clear(); } 917 public final Iterator<K> iterator() { return new KeyIterator(); } 918 public final boolean contains(Object o) { return containsKey(o); } 919 public final boolean remove(Object key) { 920 return removeNode(hash(key), key, null, false, true) != null; 921 } 922 public final Spliterator<K> spliterator() { 923 return new KeySpliterator<>(HashMap.this, 0, -1, 0, 0); 924 } 925 public final void forEach(Consumer<? super K> action) { 926 Node<K,V>[] tab; 927 if (action == null) 928 throw new NullPointerException(); 929 if (size > 0 && (tab = table) != null) { 930 int mc = modCount; 931 for (Node<K,V> e : tab) { 932 for (; e != null; e = e.next) 933 action.accept(e.key); 934 } 935 if (modCount != mc) 936 throw new ConcurrentModificationException(); 937 } 938 } 939 } 940 941 /** 942 * Returns a {@link Collection} view of the values contained in this map. 943 * The collection is backed by the map, so changes to the map are 944 * reflected in the collection, and vice-versa. If the map is 945 * modified while an iteration over the collection is in progress 946 * (except through the iterator's own {@code remove} operation), 947 * the results of the iteration are undefined. The collection 948 * supports element removal, which removes the corresponding 949 * mapping from the map, via the {@code Iterator.remove}, 950 * {@code Collection.remove}, {@code removeAll}, 951 * {@code retainAll} and {@code clear} operations. It does not 952 * support the {@code add} or {@code addAll} operations. 953 * 954 * @return a view of the values contained in this map 955 */ 956 public Collection<V> values() { 957 Collection<V> vs = values; 958 if (vs == null) { 959 vs = new Values(); 960 values = vs; 961 } 962 return vs; 963 } 964 965 final class Values extends AbstractCollection<V> { 966 public final int size() { return size; } 967 public final void clear() { HashMap.this.clear(); } 968 public final Iterator<V> iterator() { return new ValueIterator(); } 969 public final boolean contains(Object o) { return containsValue(o); } 970 public final Spliterator<V> spliterator() { 971 return new ValueSpliterator<>(HashMap.this, 0, -1, 0, 0); 972 } 973 public final void forEach(Consumer<? super V> action) { 974 Node<K,V>[] tab; 975 if (action == null) 976 throw new NullPointerException(); 977 if (size > 0 && (tab = table) != null) { 978 int mc = modCount; 979 for (Node<K,V> e : tab) { 980 for (; e != null; e = e.next) 981 action.accept(e.value); 982 } 983 if (modCount != mc) 984 throw new ConcurrentModificationException(); 985 } 986 } 987 } 988 989 /** 990 * Returns a {@link Set} view of the mappings contained in this map. 991 * The set is backed by the map, so changes to the map are 992 * reflected in the set, and vice-versa. If the map is modified 993 * while an iteration over the set is in progress (except through 994 * the iterator's own {@code remove} operation, or through the 995 * {@code setValue} operation on a map entry returned by the 996 * iterator) the results of the iteration are undefined. The set 997 * supports element removal, which removes the corresponding 998 * mapping from the map, via the {@code Iterator.remove}, 999 * {@code Set.remove}, {@code removeAll}, {@code retainAll} and 1000 * {@code clear} operations. It does not support the 1001 * {@code add} or {@code addAll} operations. 1002 * 1003 * @return a set view of the mappings contained in this map 1004 */ 1005 public Set<Map.Entry<K,V>> entrySet() { 1006 Set<Map.Entry<K,V>> es; 1007 return (es = entrySet) == null ? (entrySet = new EntrySet()) : es; 1008 } 1009 1010 final class EntrySet extends AbstractSet<Map.Entry<K,V>> { 1011 public final int size() { return size; } 1012 public final void clear() { HashMap.this.clear(); } 1013 public final Iterator<Map.Entry<K,V>> iterator() { 1014 return new EntryIterator(); 1015 } 1016 public final boolean contains(Object o) { 1017 if (!(o instanceof Map.Entry)) 1018 return false; 1019 Map.Entry<?,?> e = (Map.Entry<?,?>) o; 1020 Object key = e.getKey(); 1021 Node<K,V> candidate = getNode(hash(key), key); 1022 return candidate != null && candidate.equals(e); 1023 } 1024 public final boolean remove(Object o) { 1025 if (o instanceof Map.Entry) { 1026 Map.Entry<?,?> e = (Map.Entry<?,?>) o; 1027 Object key = e.getKey(); 1028 Object value = e.getValue(); 1029 return removeNode(hash(key), key, value, true, true) != null; 1030 } 1031 return false; 1032 } 1033 public final Spliterator<Map.Entry<K,V>> spliterator() { 1034 return new EntrySpliterator<>(HashMap.this, 0, -1, 0, 0); 1035 } 1036 public final void forEach(Consumer<? super Map.Entry<K,V>> action) { 1037 Node<K,V>[] tab; 1038 if (action == null) 1039 throw new NullPointerException(); 1040 if (size > 0 && (tab = table) != null) { 1041 int mc = modCount; 1042 for (Node<K,V> e : tab) { 1043 for (; e != null; e = e.next) 1044 action.accept(e); 1045 } 1046 if (modCount != mc) 1047 throw new ConcurrentModificationException(); 1048 } 1049 } 1050 } 1051 1052 // Overrides of JDK8 Map extension methods 1053 1054 @Override 1055 public V getOrDefault(Object key, V defaultValue) { 1056 Node<K,V> e; 1057 return (e = getNode(hash(key), key)) == null ? defaultValue : e.value; 1058 } 1059 1060 @Override 1061 public V putIfAbsent(K key, V value) { 1062 return putVal(hash(key), key, value, true, true); 1063 } 1064 1065 @Override 1066 public boolean remove(Object key, Object value) { 1067 return removeNode(hash(key), key, value, true, true) != null; 1068 } 1069 1070 @Override 1071 public boolean replace(K key, V oldValue, V newValue) { 1072 Node<K,V> e; V v; 1073 if ((e = getNode(hash(key), key)) != null && 1074 ((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) { 1075 e.value = newValue; 1076 afterNodeAccess(e); 1077 return true; 1078 } 1079 return false; 1080 } 1081 1082 @Override 1083 public V replace(K key, V value) { 1084 Node<K,V> e; 1085 if ((e = getNode(hash(key), key)) != null) { 1086 V oldValue = e.value; 1087 e.value = value; 1088 afterNodeAccess(e); 1089 return oldValue; 1090 } 1091 return null; 1092 } 1093 1094 /** 1095 * {@inheritDoc} 1096 * 1097 * <p>This method will, on a best-effort basis, throw a 1098 * {@link ConcurrentModificationException} if it is detected that the 1099 * mapping function modifies this map during computation. 1100 * 1101 * @throws ConcurrentModificationException if it is detected that the 1102 * mapping function modified this map 1103 */ 1104 @Override 1105 public V computeIfAbsent(K key, 1106 Function<? super K, ? extends V> mappingFunction) { 1107 if (mappingFunction == null) 1108 throw new NullPointerException(); 1109 int hash = hash(key); 1110 Node<K,V>[] tab; Node<K,V> first; int n, i; 1111 int binCount = 0; 1112 TreeNode<K,V> t = null; 1113 Node<K,V> old = null; 1114 if (size > threshold || (tab = table) == null || 1115 (n = tab.length) == 0) 1116 n = (tab = resize()).length; 1117 if ((first = tab[i = (n - 1) & hash]) != null) { 1118 if (first instanceof TreeNode) 1119 old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); 1120 else { 1121 Node<K,V> e = first; K k; 1122 do { 1123 if (e.hash == hash && 1124 ((k = e.key) == key || (key != null && key.equals(k)))) { 1125 old = e; 1126 break; 1127 } 1128 ++binCount; 1129 } while ((e = e.next) != null); 1130 } 1131 V oldValue; 1132 if (old != null && (oldValue = old.value) != null) { 1133 afterNodeAccess(old); 1134 return oldValue; 1135 } 1136 } 1137 int mc = modCount; 1138 V v = mappingFunction.apply(key); 1139 if (mc != modCount) { throw new ConcurrentModificationException(); } 1140 if (v == null) { 1141 return null; 1142 } else if (old != null) { 1143 old.value = v; 1144 afterNodeAccess(old); 1145 return v; 1146 } 1147 else if (t != null) 1148 t.putTreeVal(this, tab, hash, key, v); 1149 else { 1150 tab[i] = newNode(hash, key, v, first); 1151 if (binCount >= TREEIFY_THRESHOLD - 1) 1152 treeifyBin(tab, hash); 1153 } 1154 modCount = mc + 1; 1155 ++size; 1156 afterNodeInsertion(true); 1157 return v; 1158 } 1159 1160 /** 1161 * {@inheritDoc} 1162 * 1163 * <p>This method will, on a best-effort basis, throw a 1164 * {@link ConcurrentModificationException} if it is detected that the 1165 * remapping function modifies this map during computation. 1166 * 1167 * @throws ConcurrentModificationException if it is detected that the 1168 * remapping function modified this map 1169 */ 1170 @Override 1171 public V computeIfPresent(K key, 1172 BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1173 if (remappingFunction == null) 1174 throw new NullPointerException(); 1175 Node<K,V> e; V oldValue; 1176 int hash = hash(key); 1177 if ((e = getNode(hash, key)) != null && 1178 (oldValue = e.value) != null) { 1179 int mc = modCount; 1180 V v = remappingFunction.apply(key, oldValue); 1181 if (mc != modCount) { throw new ConcurrentModificationException(); } 1182 if (v != null) { 1183 e.value = v; 1184 afterNodeAccess(e); 1185 return v; 1186 } 1187 else 1188 removeNode(hash, key, null, false, true); 1189 } 1190 return null; 1191 } 1192 1193 /** 1194 * {@inheritDoc} 1195 * 1196 * <p>This method will, on a best-effort basis, throw a 1197 * {@link ConcurrentModificationException} if it is detected that the 1198 * remapping function modifies this map during computation. 1199 * 1200 * @throws ConcurrentModificationException if it is detected that the 1201 * remapping function modified this map 1202 */ 1203 @Override 1204 public V compute(K key, 1205 BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1206 if (remappingFunction == null) 1207 throw new NullPointerException(); 1208 int hash = hash(key); 1209 Node<K,V>[] tab; Node<K,V> first; int n, i; 1210 int binCount = 0; 1211 TreeNode<K,V> t = null; 1212 Node<K,V> old = null; 1213 if (size > threshold || (tab = table) == null || 1214 (n = tab.length) == 0) 1215 n = (tab = resize()).length; 1216 if ((first = tab[i = (n - 1) & hash]) != null) { 1217 if (first instanceof TreeNode) 1218 old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); 1219 else { 1220 Node<K,V> e = first; K k; 1221 do { 1222 if (e.hash == hash && 1223 ((k = e.key) == key || (key != null && key.equals(k)))) { 1224 old = e; 1225 break; 1226 } 1227 ++binCount; 1228 } while ((e = e.next) != null); 1229 } 1230 } 1231 V oldValue = (old == null) ? null : old.value; 1232 int mc = modCount; 1233 V v = remappingFunction.apply(key, oldValue); 1234 if (mc != modCount) { throw new ConcurrentModificationException(); } 1235 if (old != null) { 1236 if (v != null) { 1237 old.value = v; 1238 afterNodeAccess(old); 1239 } 1240 else 1241 removeNode(hash, key, null, false, true); 1242 } 1243 else if (v != null) { 1244 if (t != null) 1245 t.putTreeVal(this, tab, hash, key, v); 1246 else { 1247 tab[i] = newNode(hash, key, v, first); 1248 if (binCount >= TREEIFY_THRESHOLD - 1) 1249 treeifyBin(tab, hash); 1250 } 1251 modCount = mc + 1; 1252 ++size; 1253 afterNodeInsertion(true); 1254 } 1255 return v; 1256 } 1257 1258 /** 1259 * {@inheritDoc} 1260 * 1261 * <p>This method will, on a best-effort basis, throw a 1262 * {@link ConcurrentModificationException} if it is detected that the 1263 * remapping function modifies this map during computation. 1264 * 1265 * @throws ConcurrentModificationException if it is detected that the 1266 * remapping function modified this map 1267 */ 1268 @Override 1269 public V merge(K key, V value, 1270 BiFunction<? super V, ? super V, ? extends V> remappingFunction) { 1271 if (value == null) 1272 throw new NullPointerException(); 1273 if (remappingFunction == null) 1274 throw new NullPointerException(); 1275 int hash = hash(key); 1276 Node<K,V>[] tab; Node<K,V> first; int n, i; 1277 int binCount = 0; 1278 TreeNode<K,V> t = null; 1279 Node<K,V> old = null; 1280 if (size > threshold || (tab = table) == null || 1281 (n = tab.length) == 0) 1282 n = (tab = resize()).length; 1283 if ((first = tab[i = (n - 1) & hash]) != null) { 1284 if (first instanceof TreeNode) 1285 old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); 1286 else { 1287 Node<K,V> e = first; K k; 1288 do { 1289 if (e.hash == hash && 1290 ((k = e.key) == key || (key != null && key.equals(k)))) { 1291 old = e; 1292 break; 1293 } 1294 ++binCount; 1295 } while ((e = e.next) != null); 1296 } 1297 } 1298 if (old != null) { 1299 V v; 1300 if (old.value != null) { 1301 int mc = modCount; 1302 v = remappingFunction.apply(old.value, value); 1303 if (mc != modCount) { 1304 throw new ConcurrentModificationException(); 1305 } 1306 } else { 1307 v = value; 1308 } 1309 if (v != null) { 1310 old.value = v; 1311 afterNodeAccess(old); 1312 } 1313 else 1314 removeNode(hash, key, null, false, true); 1315 return v; 1316 } 1317 if (value != null) { 1318 if (t != null) 1319 t.putTreeVal(this, tab, hash, key, value); 1320 else { 1321 tab[i] = newNode(hash, key, value, first); 1322 if (binCount >= TREEIFY_THRESHOLD - 1) 1323 treeifyBin(tab, hash); 1324 } 1325 ++modCount; 1326 ++size; 1327 afterNodeInsertion(true); 1328 } 1329 return value; 1330 } 1331 1332 @Override 1333 public void forEach(BiConsumer<? super K, ? super V> action) { 1334 Node<K,V>[] tab; 1335 if (action == null) 1336 throw new NullPointerException(); 1337 if (size > 0 && (tab = table) != null) { 1338 int mc = modCount; 1339 for (Node<K,V> e : tab) { 1340 for (; e != null; e = e.next) 1341 action.accept(e.key, e.value); 1342 } 1343 if (modCount != mc) 1344 throw new ConcurrentModificationException(); 1345 } 1346 } 1347 1348 @Override 1349 public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) { 1350 Node<K,V>[] tab; 1351 if (function == null) 1352 throw new NullPointerException(); 1353 if (size > 0 && (tab = table) != null) { 1354 int mc = modCount; 1355 for (Node<K,V> e : tab) { 1356 for (; e != null; e = e.next) { 1357 e.value = function.apply(e.key, e.value); 1358 } 1359 } 1360 if (modCount != mc) 1361 throw new ConcurrentModificationException(); 1362 } 1363 } 1364 1365 /* ------------------------------------------------------------ */ 1366 // Cloning and serialization 1367 1368 /** 1369 * Returns a shallow copy of this {@code HashMap} instance: the keys and 1370 * values themselves are not cloned. 1371 * 1372 * @return a shallow copy of this map 1373 */ 1374 @SuppressWarnings("unchecked") 1375 @Override 1376 public Object clone() { 1377 HashMap<K,V> result; 1378 try { 1379 result = (HashMap<K,V>)super.clone(); 1380 } catch (CloneNotSupportedException e) { 1381 // this shouldn't happen, since we are Cloneable 1382 throw new InternalError(e); 1383 } 1384 result.reinitialize(); 1385 result.putMapEntries(this, false); 1386 return result; 1387 } 1388 1389 // These methods are also used when serializing HashSets 1390 final float loadFactor() { return loadFactor; } 1391 final int capacity() { 1392 return (table != null) ? table.length : 1393 (threshold > 0) ? threshold : 1394 DEFAULT_INITIAL_CAPACITY; 1395 } 1396 1397 /** 1398 * Saves this map to a stream (that is, serializes it). 1399 * 1400 * @param s the stream 1401 * @throws IOException if an I/O error occurs 1402 * @serialData The <i>capacity</i> of the HashMap (the length of the 1403 * bucket array) is emitted (int), followed by the 1404 * <i>size</i> (an int, the number of key-value 1405 * mappings), followed by the key (Object) and value (Object) 1406 * for each key-value mapping. The key-value mappings are 1407 * emitted in no particular order. 1408 */ 1409 private void writeObject(java.io.ObjectOutputStream s) 1410 throws IOException { 1411 int buckets = capacity(); 1412 // Write out the threshold, loadfactor, and any hidden stuff 1413 s.defaultWriteObject(); 1414 s.writeInt(buckets); 1415 s.writeInt(size); 1416 internalWriteEntries(s); 1417 } 1418 1419 /** 1420 * Reconstitutes this map from a stream (that is, deserializes it). 1421 * @param s the stream 1422 * @throws ClassNotFoundException if the class of a serialized object 1423 * could not be found 1424 * @throws IOException if an I/O error occurs 1425 */ 1426 private void readObject(java.io.ObjectInputStream s) 1427 throws IOException, ClassNotFoundException { 1428 // Read in the threshold (ignored), loadfactor, and any hidden stuff 1429 s.defaultReadObject(); 1430 reinitialize(); 1431 if (loadFactor <= 0 || Float.isNaN(loadFactor)) 1432 throw new InvalidObjectException("Illegal load factor: " + 1433 loadFactor); 1434 s.readInt(); // Read and ignore number of buckets 1435 int mappings = s.readInt(); // Read number of mappings (size) 1436 if (mappings < 0) 1437 throw new InvalidObjectException("Illegal mappings count: " + 1438 mappings); 1439 else if (mappings > 0) { // (if zero, use defaults) 1440 // Size the table using given load factor only if within 1441 // range of 0.25...4.0 1442 float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f); 1443 float fc = (float)mappings / lf + 1.0f; 1444 int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ? 1445 DEFAULT_INITIAL_CAPACITY : 1446 (fc >= MAXIMUM_CAPACITY) ? 1447 MAXIMUM_CAPACITY : 1448 tableSizeFor((int)fc)); 1449 float ft = (float)cap * lf; 1450 threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ? 1451 (int)ft : Integer.MAX_VALUE); 1452 1453 // Check Map.Entry[].class since it's the nearest public type to 1454 // what we're actually creating. 1455 SharedSecrets.getJavaObjectInputStreamAccess().checkArray(s, Map.Entry[].class, cap); 1456 @SuppressWarnings({"rawtypes","unchecked"}) 1457 Node<K,V>[] tab = (Node<K,V>[])new Node[cap]; 1458 table = tab; 1459 1460 // Read the keys and values, and put the mappings in the HashMap 1461 for (int i = 0; i < mappings; i++) { 1462 @SuppressWarnings("unchecked") 1463 K key = (K) s.readObject(); 1464 @SuppressWarnings("unchecked") 1465 V value = (V) s.readObject(); 1466 putVal(hash(key), key, value, false, false); 1467 } 1468 } 1469 } 1470 1471 /* ------------------------------------------------------------ */ 1472 // iterators 1473 1474 abstract class HashIterator { 1475 Node<K,V> next; // next entry to return 1476 Node<K,V> current; // current entry 1477 int expectedModCount; // for fast-fail 1478 int index; // current slot 1479 1480 HashIterator() { 1481 expectedModCount = modCount; 1482 Node<K,V>[] t = table; 1483 current = next = null; 1484 index = 0; 1485 if (t != null && size > 0) { // advance to first entry 1486 do {} while (index < t.length && (next = t[index++]) == null); 1487 } 1488 } 1489 1490 public final boolean hasNext() { 1491 return next != null; 1492 } 1493 1494 final Node<K,V> nextNode() { 1495 Node<K,V>[] t; 1496 Node<K,V> e = next; 1497 if (modCount != expectedModCount) 1498 throw new ConcurrentModificationException(); 1499 if (e == null) 1500 throw new NoSuchElementException(); 1501 if ((next = (current = e).next) == null && (t = table) != null) { 1502 do {} while (index < t.length && (next = t[index++]) == null); 1503 } 1504 return e; 1505 } 1506 1507 public final void remove() { 1508 Node<K,V> p = current; 1509 if (p == null) 1510 throw new IllegalStateException(); 1511 if (modCount != expectedModCount) 1512 throw new ConcurrentModificationException(); 1513 current = null; 1514 removeNode(p.hash, p.key, null, false, false); 1515 expectedModCount = modCount; 1516 } 1517 } 1518 1519 final class KeyIterator extends HashIterator 1520 implements Iterator<K> { 1521 public final K next() { return nextNode().key; } 1522 } 1523 1524 final class ValueIterator extends HashIterator 1525 implements Iterator<V> { 1526 public final V next() { return nextNode().value; } 1527 } 1528 1529 final class EntryIterator extends HashIterator 1530 implements Iterator<Map.Entry<K,V>> { 1531 public final Map.Entry<K,V> next() { return nextNode(); } 1532 } 1533 1534 /* ------------------------------------------------------------ */ 1535 // spliterators 1536 1537 static class HashMapSpliterator<K,V> { 1538 final HashMap<K,V> map; 1539 Node<K,V> current; // current node 1540 int index; // current index, modified on advance/split 1541 int fence; // one past last index 1542 int est; // size estimate 1543 int expectedModCount; // for comodification checks 1544 1545 HashMapSpliterator(HashMap<K,V> m, int origin, 1546 int fence, int est, 1547 int expectedModCount) { 1548 this.map = m; 1549 this.index = origin; 1550 this.fence = fence; 1551 this.est = est; 1552 this.expectedModCount = expectedModCount; 1553 } 1554 1555 final int getFence() { // initialize fence and size on first use 1556 int hi; 1557 if ((hi = fence) < 0) { 1558 HashMap<K,V> m = map; 1559 est = m.size; 1560 expectedModCount = m.modCount; 1561 Node<K,V>[] tab = m.table; 1562 hi = fence = (tab == null) ? 0 : tab.length; 1563 } 1564 return hi; 1565 } 1566 1567 public final long estimateSize() { 1568 getFence(); // force init 1569 return (long) est; 1570 } 1571 } 1572 1573 static final class KeySpliterator<K,V> 1574 extends HashMapSpliterator<K,V> 1575 implements Spliterator<K> { 1576 KeySpliterator(HashMap<K,V> m, int origin, int fence, int est, 1577 int expectedModCount) { 1578 super(m, origin, fence, est, expectedModCount); 1579 } 1580 1581 public KeySpliterator<K,V> trySplit() { 1582 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; 1583 return (lo >= mid || current != null) ? null : 1584 new KeySpliterator<>(map, lo, index = mid, est >>>= 1, 1585 expectedModCount); 1586 } 1587 1588 public void forEachRemaining(Consumer<? super K> action) { 1589 int i, hi, mc; 1590 if (action == null) 1591 throw new NullPointerException(); 1592 HashMap<K,V> m = map; 1593 Node<K,V>[] tab = m.table; 1594 if ((hi = fence) < 0) { 1595 mc = expectedModCount = m.modCount; 1596 hi = fence = (tab == null) ? 0 : tab.length; 1597 } 1598 else 1599 mc = expectedModCount; 1600 if (tab != null && tab.length >= hi && 1601 (i = index) >= 0 && (i < (index = hi) || current != null)) { 1602 Node<K,V> p = current; 1603 current = null; 1604 do { 1605 if (p == null) 1606 p = tab[i++]; 1607 else { 1608 action.accept(p.key); 1609 p = p.next; 1610 } 1611 } while (p != null || i < hi); 1612 if (m.modCount != mc) 1613 throw new ConcurrentModificationException(); 1614 } 1615 } 1616 1617 public boolean tryAdvance(Consumer<? super K> action) { 1618 int hi; 1619 if (action == null) 1620 throw new NullPointerException(); 1621 Node<K,V>[] tab = map.table; 1622 if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { 1623 while (current != null || index < hi) { 1624 if (current == null) 1625 current = tab[index++]; 1626 else { 1627 K k = current.key; 1628 current = current.next; 1629 action.accept(k); 1630 if (map.modCount != expectedModCount) 1631 throw new ConcurrentModificationException(); 1632 return true; 1633 } 1634 } 1635 } 1636 return false; 1637 } 1638 1639 public int characteristics() { 1640 return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | 1641 Spliterator.DISTINCT; 1642 } 1643 } 1644 1645 static final class ValueSpliterator<K,V> 1646 extends HashMapSpliterator<K,V> 1647 implements Spliterator<V> { 1648 ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est, 1649 int expectedModCount) { 1650 super(m, origin, fence, est, expectedModCount); 1651 } 1652 1653 public ValueSpliterator<K,V> trySplit() { 1654 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; 1655 return (lo >= mid || current != null) ? null : 1656 new ValueSpliterator<>(map, lo, index = mid, est >>>= 1, 1657 expectedModCount); 1658 } 1659 1660 public void forEachRemaining(Consumer<? super V> action) { 1661 int i, hi, mc; 1662 if (action == null) 1663 throw new NullPointerException(); 1664 HashMap<K,V> m = map; 1665 Node<K,V>[] tab = m.table; 1666 if ((hi = fence) < 0) { 1667 mc = expectedModCount = m.modCount; 1668 hi = fence = (tab == null) ? 0 : tab.length; 1669 } 1670 else 1671 mc = expectedModCount; 1672 if (tab != null && tab.length >= hi && 1673 (i = index) >= 0 && (i < (index = hi) || current != null)) { 1674 Node<K,V> p = current; 1675 current = null; 1676 do { 1677 if (p == null) 1678 p = tab[i++]; 1679 else { 1680 action.accept(p.value); 1681 p = p.next; 1682 } 1683 } while (p != null || i < hi); 1684 if (m.modCount != mc) 1685 throw new ConcurrentModificationException(); 1686 } 1687 } 1688 1689 public boolean tryAdvance(Consumer<? super V> action) { 1690 int hi; 1691 if (action == null) 1692 throw new NullPointerException(); 1693 Node<K,V>[] tab = map.table; 1694 if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { 1695 while (current != null || index < hi) { 1696 if (current == null) 1697 current = tab[index++]; 1698 else { 1699 V v = current.value; 1700 current = current.next; 1701 action.accept(v); 1702 if (map.modCount != expectedModCount) 1703 throw new ConcurrentModificationException(); 1704 return true; 1705 } 1706 } 1707 } 1708 return false; 1709 } 1710 1711 public int characteristics() { 1712 return (fence < 0 || est == map.size ? Spliterator.SIZED : 0); 1713 } 1714 } 1715 1716 static final class EntrySpliterator<K,V> 1717 extends HashMapSpliterator<K,V> 1718 implements Spliterator<Map.Entry<K,V>> { 1719 EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est, 1720 int expectedModCount) { 1721 super(m, origin, fence, est, expectedModCount); 1722 } 1723 1724 public EntrySpliterator<K,V> trySplit() { 1725 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; 1726 return (lo >= mid || current != null) ? null : 1727 new EntrySpliterator<>(map, lo, index = mid, est >>>= 1, 1728 expectedModCount); 1729 } 1730 1731 public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) { 1732 int i, hi, mc; 1733 if (action == null) 1734 throw new NullPointerException(); 1735 HashMap<K,V> m = map; 1736 Node<K,V>[] tab = m.table; 1737 if ((hi = fence) < 0) { 1738 mc = expectedModCount = m.modCount; 1739 hi = fence = (tab == null) ? 0 : tab.length; 1740 } 1741 else 1742 mc = expectedModCount; 1743 if (tab != null && tab.length >= hi && 1744 (i = index) >= 0 && (i < (index = hi) || current != null)) { 1745 Node<K,V> p = current; 1746 current = null; 1747 do { 1748 if (p == null) 1749 p = tab[i++]; 1750 else { 1751 action.accept(p); 1752 p = p.next; 1753 } 1754 } while (p != null || i < hi); 1755 if (m.modCount != mc) 1756 throw new ConcurrentModificationException(); 1757 } 1758 } 1759 1760 public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) { 1761 int hi; 1762 if (action == null) 1763 throw new NullPointerException(); 1764 Node<K,V>[] tab = map.table; 1765 if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { 1766 while (current != null || index < hi) { 1767 if (current == null) 1768 current = tab[index++]; 1769 else { 1770 Node<K,V> e = current; 1771 current = current.next; 1772 action.accept(e); 1773 if (map.modCount != expectedModCount) 1774 throw new ConcurrentModificationException(); 1775 return true; 1776 } 1777 } 1778 } 1779 return false; 1780 } 1781 1782 public int characteristics() { 1783 return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | 1784 Spliterator.DISTINCT; 1785 } 1786 } 1787 1788 /* ------------------------------------------------------------ */ 1789 // LinkedHashMap support 1790 1791 1792 /* 1793 * The following package-protected methods are designed to be 1794 * overridden by LinkedHashMap, but not by any other subclass. 1795 * Nearly all other internal methods are also package-protected 1796 * but are declared final, so can be used by LinkedHashMap, view 1797 * classes, and HashSet. 1798 */ 1799 1800 // Create a regular (non-tree) node 1801 Node<K,V> newNode(int hash, K key, V value, Node<K,V> next) { 1802 return new Node<>(hash, key, value, next); 1803 } 1804 1805 // For conversion from TreeNodes to plain nodes 1806 Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) { 1807 return new Node<>(p.hash, p.key, p.value, next); 1808 } 1809 1810 // Create a tree bin node 1811 TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) { 1812 return new TreeNode<>(hash, key, value, next); 1813 } 1814 1815 // For treeifyBin 1816 TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) { 1817 return new TreeNode<>(p.hash, p.key, p.value, next); 1818 } 1819 1820 /** 1821 * Reset to initial default state. Called by clone and readObject. 1822 */ 1823 void reinitialize() { 1824 table = null; 1825 entrySet = null; 1826 keySet = null; 1827 values = null; 1828 modCount = 0; 1829 threshold = 0; 1830 size = 0; 1831 } 1832 1833 // Callbacks to allow LinkedHashMap post-actions 1834 void afterNodeAccess(Node<K,V> p) { } 1835 void afterNodeInsertion(boolean evict) { } 1836 void afterNodeRemoval(Node<K,V> p) { } 1837 1838 // Called only from writeObject, to ensure compatible ordering. 1839 void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException { 1840 Node<K,V>[] tab; 1841 if (size > 0 && (tab = table) != null) { 1842 for (Node<K,V> e : tab) { 1843 for (; e != null; e = e.next) { 1844 s.writeObject(e.key); 1845 s.writeObject(e.value); 1846 } 1847 } 1848 } 1849 } 1850 1851 /* ------------------------------------------------------------ */ 1852 // Tree bins 1853 1854 /** 1855 * Entry for Tree bins. Extends LinkedHashMap.Entry (which in turn 1856 * extends Node) so can be used as extension of either regular or 1857 * linked node. 1858 */ 1859 static final class TreeNode<K,V> extends LinkedHashMap.Entry<K,V> { 1860 TreeNode<K,V> parent; // red-black tree links 1861 TreeNode<K,V> left; 1862 TreeNode<K,V> right; 1863 TreeNode<K,V> prev; // needed to unlink next upon deletion 1864 boolean red; 1865 TreeNode(int hash, K key, V val, Node<K,V> next) { 1866 super(hash, key, val, next); 1867 } 1868 1869 /** 1870 * Returns root of tree containing this node. 1871 */ 1872 final TreeNode<K,V> root() { 1873 for (TreeNode<K,V> r = this, p;;) { 1874 if ((p = r.parent) == null) 1875 return r; 1876 r = p; 1877 } 1878 } 1879 1880 /** 1881 * Ensures that the given root is the first node of its bin. 1882 */ 1883 static <K,V> void moveRootToFront(Node<K,V>[] tab, TreeNode<K,V> root) { 1884 int n; 1885 if (root != null && tab != null && (n = tab.length) > 0) { 1886 int index = (n - 1) & root.hash; 1887 TreeNode<K,V> first = (TreeNode<K,V>)tab[index]; 1888 if (root != first) { 1889 Node<K,V> rn; 1890 tab[index] = root; 1891 TreeNode<K,V> rp = root.prev; 1892 if ((rn = root.next) != null) 1893 ((TreeNode<K,V>)rn).prev = rp; 1894 if (rp != null) 1895 rp.next = rn; 1896 if (first != null) 1897 first.prev = root; 1898 root.next = first; 1899 root.prev = null; 1900 } 1901 assert checkInvariants(root); 1902 } 1903 } 1904 1905 /** 1906 * Finds the node starting at root p with the given hash and key. 1907 * The kc argument caches comparableClassFor(key) upon first use 1908 * comparing keys. 1909 */ 1910 final TreeNode<K,V> find(int h, Object k, Class<?> kc) { 1911 TreeNode<K,V> p = this; 1912 do { 1913 int ph, dir; K pk; 1914 TreeNode<K,V> pl = p.left, pr = p.right, q; 1915 if ((ph = p.hash) > h) 1916 p = pl; 1917 else if (ph < h) 1918 p = pr; 1919 else if ((pk = p.key) == k || (k != null && k.equals(pk))) 1920 return p; 1921 else if (pl == null) 1922 p = pr; 1923 else if (pr == null) 1924 p = pl; 1925 else if ((kc != null || 1926 (kc = comparableClassFor(k)) != null) && 1927 (dir = compareComparables(kc, k, pk)) != 0) 1928 p = (dir < 0) ? pl : pr; 1929 else if ((q = pr.find(h, k, kc)) != null) 1930 return q; 1931 else 1932 p = pl; 1933 } while (p != null); 1934 return null; 1935 } 1936 1937 /** 1938 * Calls find for root node. 1939 */ 1940 final TreeNode<K,V> getTreeNode(int h, Object k) { 1941 return ((parent != null) ? root() : this).find(h, k, null); 1942 } 1943 1944 /** 1945 * Tie-breaking utility for ordering insertions when equal 1946 * hashCodes and non-comparable. We don't require a total 1947 * order, just a consistent insertion rule to maintain 1948 * equivalence across rebalancings. Tie-breaking further than 1949 * necessary simplifies testing a bit. 1950 */ 1951 static int tieBreakOrder(Object a, Object b) { 1952 int d; 1953 if (a == null || b == null || 1954 (d = a.getClass().getName(). 1955 compareTo(b.getClass().getName())) == 0) 1956 d = (System.identityHashCode(a) <= System.identityHashCode(b) ? 1957 -1 : 1); 1958 return d; 1959 } 1960 1961 /** 1962 * Forms tree of the nodes linked from this node. 1963 */ 1964 final void treeify(Node<K,V>[] tab) { 1965 TreeNode<K,V> root = null; 1966 for (TreeNode<K,V> x = this, next; x != null; x = next) { 1967 next = (TreeNode<K,V>)x.next; 1968 x.left = x.right = null; 1969 if (root == null) { 1970 x.parent = null; 1971 x.red = false; 1972 root = x; 1973 } 1974 else { 1975 K k = x.key; 1976 int h = x.hash; 1977 Class<?> kc = null; 1978 for (TreeNode<K,V> p = root;;) { 1979 int dir, ph; 1980 K pk = p.key; 1981 if ((ph = p.hash) > h) 1982 dir = -1; 1983 else if (ph < h) 1984 dir = 1; 1985 else if ((kc == null && 1986 (kc = comparableClassFor(k)) == null) || 1987 (dir = compareComparables(kc, k, pk)) == 0) 1988 dir = tieBreakOrder(k, pk); 1989 1990 TreeNode<K,V> xp = p; 1991 if ((p = (dir <= 0) ? p.left : p.right) == null) { 1992 x.parent = xp; 1993 if (dir <= 0) 1994 xp.left = x; 1995 else 1996 xp.right = x; 1997 root = balanceInsertion(root, x); 1998 break; 1999 } 2000 } 2001 } 2002 } 2003 moveRootToFront(tab, root); 2004 } 2005 2006 /** 2007 * Returns a list of non-TreeNodes replacing those linked from 2008 * this node. 2009 */ 2010 final Node<K,V> untreeify(HashMap<K,V> map) { 2011 Node<K,V> hd = null, tl = null; 2012 for (Node<K,V> q = this; q != null; q = q.next) { 2013 Node<K,V> p = map.replacementNode(q, null); 2014 if (tl == null) 2015 hd = p; 2016 else 2017 tl.next = p; 2018 tl = p; 2019 } 2020 return hd; 2021 } 2022 2023 /** 2024 * Tree version of putVal. 2025 */ 2026 final TreeNode<K,V> putTreeVal(HashMap<K,V> map, Node<K,V>[] tab, 2027 int h, K k, V v) { 2028 Class<?> kc = null; 2029 boolean searched = false; 2030 TreeNode<K,V> root = (parent != null) ? root() : this; 2031 for (TreeNode<K,V> p = root;;) { 2032 int dir, ph; K pk; 2033 if ((ph = p.hash) > h) 2034 dir = -1; 2035 else if (ph < h) 2036 dir = 1; 2037 else if ((pk = p.key) == k || (k != null && k.equals(pk))) 2038 return p; 2039 else if ((kc == null && 2040 (kc = comparableClassFor(k)) == null) || 2041 (dir = compareComparables(kc, k, pk)) == 0) { 2042 if (!searched) { 2043 TreeNode<K,V> q, ch; 2044 searched = true; 2045 if (((ch = p.left) != null && 2046 (q = ch.find(h, k, kc)) != null) || 2047 ((ch = p.right) != null && 2048 (q = ch.find(h, k, kc)) != null)) 2049 return q; 2050 } 2051 dir = tieBreakOrder(k, pk); 2052 } 2053 2054 TreeNode<K,V> xp = p; 2055 if ((p = (dir <= 0) ? p.left : p.right) == null) { 2056 Node<K,V> xpn = xp.next; 2057 TreeNode<K,V> x = map.newTreeNode(h, k, v, xpn); 2058 if (dir <= 0) 2059 xp.left = x; 2060 else 2061 xp.right = x; 2062 xp.next = x; 2063 x.parent = x.prev = xp; 2064 if (xpn != null) 2065 ((TreeNode<K,V>)xpn).prev = x; 2066 moveRootToFront(tab, balanceInsertion(root, x)); 2067 return null; 2068 } 2069 } 2070 } 2071 2072 /** 2073 * Removes the given node, that must be present before this call. 2074 * This is messier than typical red-black deletion code because we 2075 * cannot swap the contents of an interior node with a leaf 2076 * successor that is pinned by "next" pointers that are accessible 2077 * independently during traversal. So instead we swap the tree 2078 * linkages. If the current tree appears to have too few nodes, 2079 * the bin is converted back to a plain bin. (The test triggers 2080 * somewhere between 2 and 6 nodes, depending on tree structure). 2081 */ 2082 final void removeTreeNode(HashMap<K,V> map, Node<K,V>[] tab, 2083 boolean movable) { 2084 int n; 2085 if (tab == null || (n = tab.length) == 0) 2086 return; 2087 int index = (n - 1) & hash; 2088 TreeNode<K,V> first = (TreeNode<K,V>)tab[index], root = first, rl; 2089 TreeNode<K,V> succ = (TreeNode<K,V>)next, pred = prev; 2090 if (pred == null) 2091 tab[index] = first = succ; 2092 else 2093 pred.next = succ; 2094 if (succ != null) 2095 succ.prev = pred; 2096 if (first == null) 2097 return; 2098 if (root.parent != null) 2099 root = root.root(); 2100 if (root == null 2101 || (movable 2102 && (root.right == null 2103 || (rl = root.left) == null 2104 || rl.left == null))) { 2105 tab[index] = first.untreeify(map); // too small 2106 return; 2107 } 2108 TreeNode<K,V> p = this, pl = left, pr = right, replacement; 2109 if (pl != null && pr != null) { 2110 TreeNode<K,V> s = pr, sl; 2111 while ((sl = s.left) != null) // find successor 2112 s = sl; 2113 boolean c = s.red; s.red = p.red; p.red = c; // swap colors 2114 TreeNode<K,V> sr = s.right; 2115 TreeNode<K,V> pp = p.parent; 2116 if (s == pr) { // p was s's direct parent 2117 p.parent = s; 2118 s.right = p; 2119 } 2120 else { 2121 TreeNode<K,V> sp = s.parent; 2122 if ((p.parent = sp) != null) { 2123 if (s == sp.left) 2124 sp.left = p; 2125 else 2126 sp.right = p; 2127 } 2128 if ((s.right = pr) != null) 2129 pr.parent = s; 2130 } 2131 p.left = null; 2132 if ((p.right = sr) != null) 2133 sr.parent = p; 2134 if ((s.left = pl) != null) 2135 pl.parent = s; 2136 if ((s.parent = pp) == null) 2137 root = s; 2138 else if (p == pp.left) 2139 pp.left = s; 2140 else 2141 pp.right = s; 2142 if (sr != null) 2143 replacement = sr; 2144 else 2145 replacement = p; 2146 } 2147 else if (pl != null) 2148 replacement = pl; 2149 else if (pr != null) 2150 replacement = pr; 2151 else 2152 replacement = p; 2153 if (replacement != p) { 2154 TreeNode<K,V> pp = replacement.parent = p.parent; 2155 if (pp == null) 2156 root = replacement; 2157 else if (p == pp.left) 2158 pp.left = replacement; 2159 else 2160 pp.right = replacement; 2161 p.left = p.right = p.parent = null; 2162 } 2163 2164 TreeNode<K,V> r = p.red ? root : balanceDeletion(root, replacement); 2165 2166 if (replacement == p) { // detach 2167 TreeNode<K,V> pp = p.parent; 2168 p.parent = null; 2169 if (pp != null) { 2170 if (p == pp.left) 2171 pp.left = null; 2172 else if (p == pp.right) 2173 pp.right = null; 2174 } 2175 } 2176 if (movable) 2177 moveRootToFront(tab, r); 2178 } 2179 2180 /** 2181 * Splits nodes in a tree bin into lower and upper tree bins, 2182 * or untreeifies if now too small. Called only from resize; 2183 * see above discussion about split bits and indices. 2184 * 2185 * @param map the map 2186 * @param tab the table for recording bin heads 2187 * @param index the index of the table being split 2188 * @param bit the bit of hash to split on 2189 */ 2190 final void split(HashMap<K,V> map, Node<K,V>[] tab, int index, int bit) { 2191 TreeNode<K,V> b = this; 2192 // Relink into lo and hi lists, preserving order 2193 TreeNode<K,V> loHead = null, loTail = null; 2194 TreeNode<K,V> hiHead = null, hiTail = null; 2195 int lc = 0, hc = 0; 2196 for (TreeNode<K,V> e = b, next; e != null; e = next) { 2197 next = (TreeNode<K,V>)e.next; 2198 e.next = null; 2199 if ((e.hash & bit) == 0) { 2200 if ((e.prev = loTail) == null) 2201 loHead = e; 2202 else 2203 loTail.next = e; 2204 loTail = e; 2205 ++lc; 2206 } 2207 else { 2208 if ((e.prev = hiTail) == null) 2209 hiHead = e; 2210 else 2211 hiTail.next = e; 2212 hiTail = e; 2213 ++hc; 2214 } 2215 } 2216 2217 if (loHead != null) { 2218 if (lc <= UNTREEIFY_THRESHOLD) 2219 tab[index] = loHead.untreeify(map); 2220 else { 2221 tab[index] = loHead; 2222 if (hiHead != null) // (else is already treeified) 2223 loHead.treeify(tab); 2224 } 2225 } 2226 if (hiHead != null) { 2227 if (hc <= UNTREEIFY_THRESHOLD) 2228 tab[index + bit] = hiHead.untreeify(map); 2229 else { 2230 tab[index + bit] = hiHead; 2231 if (loHead != null) 2232 hiHead.treeify(tab); 2233 } 2234 } 2235 } 2236 2237 /* ------------------------------------------------------------ */ 2238 // Red-black tree methods, all adapted from CLR 2239 2240 static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root, 2241 TreeNode<K,V> p) { 2242 TreeNode<K,V> r, pp, rl; 2243 if (p != null && (r = p.right) != null) { 2244 if ((rl = p.right = r.left) != null) 2245 rl.parent = p; 2246 if ((pp = r.parent = p.parent) == null) 2247 (root = r).red = false; 2248 else if (pp.left == p) 2249 pp.left = r; 2250 else 2251 pp.right = r; 2252 r.left = p; 2253 p.parent = r; 2254 } 2255 return root; 2256 } 2257 2258 static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root, 2259 TreeNode<K,V> p) { 2260 TreeNode<K,V> l, pp, lr; 2261 if (p != null && (l = p.left) != null) { 2262 if ((lr = p.left = l.right) != null) 2263 lr.parent = p; 2264 if ((pp = l.parent = p.parent) == null) 2265 (root = l).red = false; 2266 else if (pp.right == p) 2267 pp.right = l; 2268 else 2269 pp.left = l; 2270 l.right = p; 2271 p.parent = l; 2272 } 2273 return root; 2274 } 2275 2276 static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root, 2277 TreeNode<K,V> x) { 2278 x.red = true; 2279 for (TreeNode<K,V> xp, xpp, xppl, xppr;;) { 2280 if ((xp = x.parent) == null) { 2281 x.red = false; 2282 return x; 2283 } 2284 else if (!xp.red || (xpp = xp.parent) == null) 2285 return root; 2286 if (xp == (xppl = xpp.left)) { 2287 if ((xppr = xpp.right) != null && xppr.red) { 2288 xppr.red = false; 2289 xp.red = false; 2290 xpp.red = true; 2291 x = xpp; 2292 } 2293 else { 2294 if (x == xp.right) { 2295 root = rotateLeft(root, x = xp); 2296 xpp = (xp = x.parent) == null ? null : xp.parent; 2297 } 2298 if (xp != null) { 2299 xp.red = false; 2300 if (xpp != null) { 2301 xpp.red = true; 2302 root = rotateRight(root, xpp); 2303 } 2304 } 2305 } 2306 } 2307 else { 2308 if (xppl != null && xppl.red) { 2309 xppl.red = false; 2310 xp.red = false; 2311 xpp.red = true; 2312 x = xpp; 2313 } 2314 else { 2315 if (x == xp.left) { 2316 root = rotateRight(root, x = xp); 2317 xpp = (xp = x.parent) == null ? null : xp.parent; 2318 } 2319 if (xp != null) { 2320 xp.red = false; 2321 if (xpp != null) { 2322 xpp.red = true; 2323 root = rotateLeft(root, xpp); 2324 } 2325 } 2326 } 2327 } 2328 } 2329 } 2330 2331 static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root, 2332 TreeNode<K,V> x) { 2333 for (TreeNode<K,V> xp, xpl, xpr;;) { 2334 if (x == null || x == root) 2335 return root; 2336 else if ((xp = x.parent) == null) { 2337 x.red = false; 2338 return x; 2339 } 2340 else if (x.red) { 2341 x.red = false; 2342 return root; 2343 } 2344 else if ((xpl = xp.left) == x) { 2345 if ((xpr = xp.right) != null && xpr.red) { 2346 xpr.red = false; 2347 xp.red = true; 2348 root = rotateLeft(root, xp); 2349 xpr = (xp = x.parent) == null ? null : xp.right; 2350 } 2351 if (xpr == null) 2352 x = xp; 2353 else { 2354 TreeNode<K,V> sl = xpr.left, sr = xpr.right; 2355 if ((sr == null || !sr.red) && 2356 (sl == null || !sl.red)) { 2357 xpr.red = true; 2358 x = xp; 2359 } 2360 else { 2361 if (sr == null || !sr.red) { 2362 if (sl != null) 2363 sl.red = false; 2364 xpr.red = true; 2365 root = rotateRight(root, xpr); 2366 xpr = (xp = x.parent) == null ? 2367 null : xp.right; 2368 } 2369 if (xpr != null) { 2370 xpr.red = (xp == null) ? false : xp.red; 2371 if ((sr = xpr.right) != null) 2372 sr.red = false; 2373 } 2374 if (xp != null) { 2375 xp.red = false; 2376 root = rotateLeft(root, xp); 2377 } 2378 x = root; 2379 } 2380 } 2381 } 2382 else { // symmetric 2383 if (xpl != null && xpl.red) { 2384 xpl.red = false; 2385 xp.red = true; 2386 root = rotateRight(root, xp); 2387 xpl = (xp = x.parent) == null ? null : xp.left; 2388 } 2389 if (xpl == null) 2390 x = xp; 2391 else { 2392 TreeNode<K,V> sl = xpl.left, sr = xpl.right; 2393 if ((sl == null || !sl.red) && 2394 (sr == null || !sr.red)) { 2395 xpl.red = true; 2396 x = xp; 2397 } 2398 else { 2399 if (sl == null || !sl.red) { 2400 if (sr != null) 2401 sr.red = false; 2402 xpl.red = true; 2403 root = rotateLeft(root, xpl); 2404 xpl = (xp = x.parent) == null ? 2405 null : xp.left; 2406 } 2407 if (xpl != null) { 2408 xpl.red = (xp == null) ? false : xp.red; 2409 if ((sl = xpl.left) != null) 2410 sl.red = false; 2411 } 2412 if (xp != null) { 2413 xp.red = false; 2414 root = rotateRight(root, xp); 2415 } 2416 x = root; 2417 } 2418 } 2419 } 2420 } 2421 } 2422 2423 /** 2424 * Recursive invariant check 2425 */ 2426 static <K,V> boolean checkInvariants(TreeNode<K,V> t) { 2427 TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right, 2428 tb = t.prev, tn = (TreeNode<K,V>)t.next; 2429 if (tb != null && tb.next != t) 2430 return false; 2431 if (tn != null && tn.prev != t) 2432 return false; 2433 if (tp != null && t != tp.left && t != tp.right) 2434 return false; 2435 if (tl != null && (tl.parent != t || tl.hash > t.hash)) 2436 return false; 2437 if (tr != null && (tr.parent != t || tr.hash < t.hash)) 2438 return false; 2439 if (t.red && tl != null && tl.red && tr != null && tr.red) 2440 return false; 2441 if (tl != null && !checkInvariants(tl)) 2442 return false; 2443 if (tr != null && !checkInvariants(tr)) 2444 return false; 2445 return true; 2446 } 2447 } 2448 2449 }