1 /* 2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. Oracle designates this 7 * particular file as subject to the "Classpath" exception as provided 8 * by Oracle in the LICENSE file that accompanied this code. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 */ 24 25 /* 26 * This file is available under and governed by the GNU General Public 27 * License version 2 only, as published by the Free Software Foundation. 28 * However, the following notice accompanied the original version of this 29 * file: 30 * 31 * Written by Doug Lea with assistance from members of JCP JSR-166 32 * Expert Group and released to the public domain, as explained at 33 * http://creativecommons.org/publicdomain/zero/1.0/ 34 */ 35 36 package java.util.concurrent; 37 38 import java.io.ObjectStreamField; 39 import java.io.Serializable; 40 import java.lang.reflect.ParameterizedType; 41 import java.lang.reflect.Type; 42 import java.util.AbstractMap; 43 import java.util.Arrays; 44 import java.util.Collection; 45 import java.util.Enumeration; 46 import java.util.HashMap; 47 import java.util.Hashtable; 48 import java.util.Iterator; 49 import java.util.Map; 50 import java.util.NoSuchElementException; 51 import java.util.Set; 52 import java.util.Spliterator; 53 import java.util.concurrent.atomic.AtomicReference; 54 import java.util.concurrent.locks.LockSupport; 55 import java.util.concurrent.locks.ReentrantLock; 56 import java.util.function.BiConsumer; 57 import java.util.function.BiFunction; 58 import java.util.function.Consumer; 59 import java.util.function.DoubleBinaryOperator; 60 import java.util.function.Function; 61 import java.util.function.IntBinaryOperator; 62 import java.util.function.LongBinaryOperator; 63 import java.util.function.Predicate; 64 import java.util.function.ToDoubleBiFunction; 65 import java.util.function.ToDoubleFunction; 66 import java.util.function.ToIntBiFunction; 67 import java.util.function.ToIntFunction; 68 import java.util.function.ToLongBiFunction; 69 import java.util.function.ToLongFunction; 70 import java.util.stream.Stream; 71 import jdk.internal.misc.Unsafe; 72 73 /** 74 * A hash table supporting full concurrency of retrievals and 75 * high expected concurrency for updates. This class obeys the 76 * same functional specification as {@link java.util.Hashtable}, and 77 * includes versions of methods corresponding to each method of 78 * {@code Hashtable}. However, even though all operations are 79 * thread-safe, retrieval operations do <em>not</em> entail locking, 80 * and there is <em>not</em> any support for locking the entire table 81 * in a way that prevents all access. This class is fully 82 * interoperable with {@code Hashtable} in programs that rely on its 83 * thread safety but not on its synchronization details. 84 * 85 * <p>Retrieval operations (including {@code get}) generally do not 86 * block, so may overlap with update operations (including {@code put} 87 * and {@code remove}). Retrievals reflect the results of the most 88 * recently <em>completed</em> update operations holding upon their 89 * onset. (More formally, an update operation for a given key bears a 90 * <em>happens-before</em> relation with any (non-null) retrieval for 91 * that key reporting the updated value.) For aggregate operations 92 * such as {@code putAll} and {@code clear}, concurrent retrievals may 93 * reflect insertion or removal of only some entries. Similarly, 94 * Iterators, Spliterators and Enumerations return elements reflecting the 95 * state of the hash table at some point at or since the creation of the 96 * iterator/enumeration. They do <em>not</em> throw {@link 97 * java.util.ConcurrentModificationException ConcurrentModificationException}. 98 * However, iterators are designed to be used by only one thread at a time. 99 * Bear in mind that the results of aggregate status methods including 100 * {@code size}, {@code isEmpty}, and {@code containsValue} are typically 101 * useful only when a map is not undergoing concurrent updates in other threads. 102 * Otherwise the results of these methods reflect transient states 103 * that may be adequate for monitoring or estimation purposes, but not 104 * for program control. 105 * 106 * <p>The table is dynamically expanded when there are too many 107 * collisions (i.e., keys that have distinct hash codes but fall into 108 * the same slot modulo the table size), with the expected average 109 * effect of maintaining roughly two bins per mapping (corresponding 110 * to a 0.75 load factor threshold for resizing). There may be much 111 * variance around this average as mappings are added and removed, but 112 * overall, this maintains a commonly accepted time/space tradeoff for 113 * hash tables. However, resizing this or any other kind of hash 114 * table may be a relatively slow operation. When possible, it is a 115 * good idea to provide a size estimate as an optional {@code 116 * initialCapacity} constructor argument. An additional optional 117 * {@code loadFactor} constructor argument provides a further means of 118 * customizing initial table capacity by specifying the table density 119 * to be used in calculating the amount of space to allocate for the 120 * given number of elements. Also, for compatibility with previous 121 * versions of this class, constructors may optionally specify an 122 * expected {@code concurrencyLevel} as an additional hint for 123 * internal sizing. Note that using many keys with exactly the same 124 * {@code hashCode()} is a sure way to slow down performance of any 125 * hash table. To ameliorate impact, when keys are {@link Comparable}, 126 * this class may use comparison order among keys to help break ties. 127 * 128 * <p>A {@link Set} projection of a ConcurrentHashMap may be created 129 * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed 130 * (using {@link #keySet(Object)} when only keys are of interest, and the 131 * mapped values are (perhaps transiently) not used or all take the 132 * same mapping value. 133 * 134 * <p>A ConcurrentHashMap can be used as a scalable frequency map (a 135 * form of histogram or multiset) by using {@link 136 * java.util.concurrent.atomic.LongAdder} values and initializing via 137 * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count 138 * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use 139 * {@code freqs.computeIfAbsent(key, k -> new LongAdder()).increment();} 140 * 141 * <p>This class and its views and iterators implement all of the 142 * <em>optional</em> methods of the {@link Map} and {@link Iterator} 143 * interfaces. 144 * 145 * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class 146 * does <em>not</em> allow {@code null} to be used as a key or value. 147 * 148 * <p>ConcurrentHashMaps support a set of sequential and parallel bulk 149 * operations that, unlike most {@link Stream} methods, are designed 150 * to be safely, and often sensibly, applied even with maps that are 151 * being concurrently updated by other threads; for example, when 152 * computing a snapshot summary of the values in a shared registry. 153 * There are three kinds of operation, each with four forms, accepting 154 * functions with keys, values, entries, and (key, value) pairs as 155 * arguments and/or return values. Because the elements of a 156 * ConcurrentHashMap are not ordered in any particular way, and may be 157 * processed in different orders in different parallel executions, the 158 * correctness of supplied functions should not depend on any 159 * ordering, or on any other objects or values that may transiently 160 * change while computation is in progress; and except for forEach 161 * actions, should ideally be side-effect-free. Bulk operations on 162 * {@link java.util.Map.Entry} objects do not support method {@code 163 * setValue}. 164 * 165 * <ul> 166 * <li>forEach: Performs a given action on each element. 167 * A variant form applies a given transformation on each element 168 * before performing the action. 169 * 170 * <li>search: Returns the first available non-null result of 171 * applying a given function on each element; skipping further 172 * search when a result is found. 173 * 174 * <li>reduce: Accumulates each element. The supplied reduction 175 * function cannot rely on ordering (more formally, it should be 176 * both associative and commutative). There are five variants: 177 * 178 * <ul> 179 * 180 * <li>Plain reductions. (There is not a form of this method for 181 * (key, value) function arguments since there is no corresponding 182 * return type.) 183 * 184 * <li>Mapped reductions that accumulate the results of a given 185 * function applied to each element. 186 * 187 * <li>Reductions to scalar doubles, longs, and ints, using a 188 * given basis value. 189 * 190 * </ul> 191 * </ul> 192 * 193 * <p>These bulk operations accept a {@code parallelismThreshold} 194 * argument. Methods proceed sequentially if the current map size is 195 * estimated to be less than the given threshold. Using a value of 196 * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value 197 * of {@code 1} results in maximal parallelism by partitioning into 198 * enough subtasks to fully utilize the {@link 199 * ForkJoinPool#commonPool()} that is used for all parallel 200 * computations. Normally, you would initially choose one of these 201 * extreme values, and then measure performance of using in-between 202 * values that trade off overhead versus throughput. 203 * 204 * <p>The concurrency properties of bulk operations follow 205 * from those of ConcurrentHashMap: Any non-null result returned 206 * from {@code get(key)} and related access methods bears a 207 * happens-before relation with the associated insertion or 208 * update. The result of any bulk operation reflects the 209 * composition of these per-element relations (but is not 210 * necessarily atomic with respect to the map as a whole unless it 211 * is somehow known to be quiescent). Conversely, because keys 212 * and values in the map are never null, null serves as a reliable 213 * atomic indicator of the current lack of any result. To 214 * maintain this property, null serves as an implicit basis for 215 * all non-scalar reduction operations. For the double, long, and 216 * int versions, the basis should be one that, when combined with 217 * any other value, returns that other value (more formally, it 218 * should be the identity element for the reduction). Most common 219 * reductions have these properties; for example, computing a sum 220 * with basis 0 or a minimum with basis MAX_VALUE. 221 * 222 * <p>Search and transformation functions provided as arguments 223 * should similarly return null to indicate the lack of any result 224 * (in which case it is not used). In the case of mapped 225 * reductions, this also enables transformations to serve as 226 * filters, returning null (or, in the case of primitive 227 * specializations, the identity basis) if the element should not 228 * be combined. You can create compound transformations and 229 * filterings by composing them yourself under this "null means 230 * there is nothing there now" rule before using them in search or 231 * reduce operations. 232 * 233 * <p>Methods accepting and/or returning Entry arguments maintain 234 * key-value associations. They may be useful for example when 235 * finding the key for the greatest value. Note that "plain" Entry 236 * arguments can be supplied using {@code new 237 * AbstractMap.SimpleEntry(k,v)}. 238 * 239 * <p>Bulk operations may complete abruptly, throwing an 240 * exception encountered in the application of a supplied 241 * function. Bear in mind when handling such exceptions that other 242 * concurrently executing functions could also have thrown 243 * exceptions, or would have done so if the first exception had 244 * not occurred. 245 * 246 * <p>Speedups for parallel compared to sequential forms are common 247 * but not guaranteed. Parallel operations involving brief functions 248 * on small maps may execute more slowly than sequential forms if the 249 * underlying work to parallelize the computation is more expensive 250 * than the computation itself. Similarly, parallelization may not 251 * lead to much actual parallelism if all processors are busy 252 * performing unrelated tasks. 253 * 254 * <p>All arguments to all task methods must be non-null. 255 * 256 * <p>This class is a member of the 257 * <a href="{@docRoot}/java/util/package-summary.html#CollectionsFramework"> 258 * Java Collections Framework</a>. 259 * 260 * @since 1.5 261 * @author Doug Lea 262 * @param <K> the type of keys maintained by this map 263 * @param <V> the type of mapped values 264 */ 265 public class ConcurrentHashMap<K,V> extends AbstractMap<K,V> 266 implements ConcurrentMap<K,V>, Serializable { 267 private static final long serialVersionUID = 7249069246763182397L; 268 269 /* 270 * Overview: 271 * 272 * The primary design goal of this hash table is to maintain 273 * concurrent readability (typically method get(), but also 274 * iterators and related methods) while minimizing update 275 * contention. Secondary goals are to keep space consumption about 276 * the same or better than java.util.HashMap, and to support high 277 * initial insertion rates on an empty table by many threads. 278 * 279 * This map usually acts as a binned (bucketed) hash table. Each 280 * key-value mapping is held in a Node. Most nodes are instances 281 * of the basic Node class with hash, key, value, and next 282 * fields. However, various subclasses exist: TreeNodes are 283 * arranged in balanced trees, not lists. TreeBins hold the roots 284 * of sets of TreeNodes. ForwardingNodes are placed at the heads 285 * of bins during resizing. ReservationNodes are used as 286 * placeholders while establishing values in computeIfAbsent and 287 * related methods. The types TreeBin, ForwardingNode, and 288 * ReservationNode do not hold normal user keys, values, or 289 * hashes, and are readily distinguishable during search etc 290 * because they have negative hash fields and null key and value 291 * fields. (These special nodes are either uncommon or transient, 292 * so the impact of carrying around some unused fields is 293 * insignificant.) 294 * 295 * The table is lazily initialized to a power-of-two size upon the 296 * first insertion. Each bin in the table normally contains a 297 * list of Nodes (most often, the list has only zero or one Node). 298 * Table accesses require volatile/atomic reads, writes, and 299 * CASes. Because there is no other way to arrange this without 300 * adding further indirections, we use intrinsics 301 * (jdk.internal.misc.Unsafe) operations. 302 * 303 * We use the top (sign) bit of Node hash fields for control 304 * purposes -- it is available anyway because of addressing 305 * constraints. Nodes with negative hash fields are specially 306 * handled or ignored in map methods. 307 * 308 * Insertion (via put or its variants) of the first node in an 309 * empty bin is performed by just CASing it to the bin. This is 310 * by far the most common case for put operations under most 311 * key/hash distributions. Other update operations (insert, 312 * delete, and replace) require locks. We do not want to waste 313 * the space required to associate a distinct lock object with 314 * each bin, so instead use the first node of a bin list itself as 315 * a lock. Locking support for these locks relies on builtin 316 * "synchronized" monitors. 317 * 318 * Using the first node of a list as a lock does not by itself 319 * suffice though: When a node is locked, any update must first 320 * validate that it is still the first node after locking it, and 321 * retry if not. Because new nodes are always appended to lists, 322 * once a node is first in a bin, it remains first until deleted 323 * or the bin becomes invalidated (upon resizing). 324 * 325 * The main disadvantage of per-bin locks is that other update 326 * operations on other nodes in a bin list protected by the same 327 * lock can stall, for example when user equals() or mapping 328 * functions take a long time. However, statistically, under 329 * random hash codes, this is not a common problem. Ideally, the 330 * frequency of nodes in bins follows a Poisson distribution 331 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a 332 * parameter of about 0.5 on average, given the resizing threshold 333 * of 0.75, although with a large variance because of resizing 334 * granularity. Ignoring variance, the expected occurrences of 335 * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The 336 * first values are: 337 * 338 * 0: 0.60653066 339 * 1: 0.30326533 340 * 2: 0.07581633 341 * 3: 0.01263606 342 * 4: 0.00157952 343 * 5: 0.00015795 344 * 6: 0.00001316 345 * 7: 0.00000094 346 * 8: 0.00000006 347 * more: less than 1 in ten million 348 * 349 * Lock contention probability for two threads accessing distinct 350 * elements is roughly 1 / (8 * #elements) under random hashes. 351 * 352 * Actual hash code distributions encountered in practice 353 * sometimes deviate significantly from uniform randomness. This 354 * includes the case when N > (1<<30), so some keys MUST collide. 355 * Similarly for dumb or hostile usages in which multiple keys are 356 * designed to have identical hash codes or ones that differs only 357 * in masked-out high bits. So we use a secondary strategy that 358 * applies when the number of nodes in a bin exceeds a 359 * threshold. These TreeBins use a balanced tree to hold nodes (a 360 * specialized form of red-black trees), bounding search time to 361 * O(log N). Each search step in a TreeBin is at least twice as 362 * slow as in a regular list, but given that N cannot exceed 363 * (1<<64) (before running out of addresses) this bounds search 364 * steps, lock hold times, etc, to reasonable constants (roughly 365 * 100 nodes inspected per operation worst case) so long as keys 366 * are Comparable (which is very common -- String, Long, etc). 367 * TreeBin nodes (TreeNodes) also maintain the same "next" 368 * traversal pointers as regular nodes, so can be traversed in 369 * iterators in the same way. 370 * 371 * The table is resized when occupancy exceeds a percentage 372 * threshold (nominally, 0.75, but see below). Any thread 373 * noticing an overfull bin may assist in resizing after the 374 * initiating thread allocates and sets up the replacement array. 375 * However, rather than stalling, these other threads may proceed 376 * with insertions etc. The use of TreeBins shields us from the 377 * worst case effects of overfilling while resizes are in 378 * progress. Resizing proceeds by transferring bins, one by one, 379 * from the table to the next table. However, threads claim small 380 * blocks of indices to transfer (via field transferIndex) before 381 * doing so, reducing contention. A generation stamp in field 382 * sizeCtl ensures that resizings do not overlap. Because we are 383 * using power-of-two expansion, the elements from each bin must 384 * either stay at same index, or move with a power of two 385 * offset. We eliminate unnecessary node creation by catching 386 * cases where old nodes can be reused because their next fields 387 * won't change. On average, only about one-sixth of them need 388 * cloning when a table doubles. The nodes they replace will be 389 * garbage collectable as soon as they are no longer referenced by 390 * any reader thread that may be in the midst of concurrently 391 * traversing table. Upon transfer, the old table bin contains 392 * only a special forwarding node (with hash field "MOVED") that 393 * contains the next table as its key. On encountering a 394 * forwarding node, access and update operations restart, using 395 * the new table. 396 * 397 * Each bin transfer requires its bin lock, which can stall 398 * waiting for locks while resizing. However, because other 399 * threads can join in and help resize rather than contend for 400 * locks, average aggregate waits become shorter as resizing 401 * progresses. The transfer operation must also ensure that all 402 * accessible bins in both the old and new table are usable by any 403 * traversal. This is arranged in part by proceeding from the 404 * last bin (table.length - 1) up towards the first. Upon seeing 405 * a forwarding node, traversals (see class Traverser) arrange to 406 * move to the new table without revisiting nodes. To ensure that 407 * no intervening nodes are skipped even when moved out of order, 408 * a stack (see class TableStack) is created on first encounter of 409 * a forwarding node during a traversal, to maintain its place if 410 * later processing the current table. The need for these 411 * save/restore mechanics is relatively rare, but when one 412 * forwarding node is encountered, typically many more will be. 413 * So Traversers use a simple caching scheme to avoid creating so 414 * many new TableStack nodes. (Thanks to Peter Levart for 415 * suggesting use of a stack here.) 416 * 417 * The traversal scheme also applies to partial traversals of 418 * ranges of bins (via an alternate Traverser constructor) 419 * to support partitioned aggregate operations. Also, read-only 420 * operations give up if ever forwarded to a null table, which 421 * provides support for shutdown-style clearing, which is also not 422 * currently implemented. 423 * 424 * Lazy table initialization minimizes footprint until first use, 425 * and also avoids resizings when the first operation is from a 426 * putAll, constructor with map argument, or deserialization. 427 * These cases attempt to override the initial capacity settings, 428 * but harmlessly fail to take effect in cases of races. 429 * 430 * The element count is maintained using a specialization of 431 * LongAdder. We need to incorporate a specialization rather than 432 * just use a LongAdder in order to access implicit 433 * contention-sensing that leads to creation of multiple 434 * CounterCells. The counter mechanics avoid contention on 435 * updates but can encounter cache thrashing if read too 436 * frequently during concurrent access. To avoid reading so often, 437 * resizing under contention is attempted only upon adding to a 438 * bin already holding two or more nodes. Under uniform hash 439 * distributions, the probability of this occurring at threshold 440 * is around 13%, meaning that only about 1 in 8 puts check 441 * threshold (and after resizing, many fewer do so). 442 * 443 * TreeBins use a special form of comparison for search and 444 * related operations (which is the main reason we cannot use 445 * existing collections such as TreeMaps). TreeBins contain 446 * Comparable elements, but may contain others, as well as 447 * elements that are Comparable but not necessarily Comparable for 448 * the same T, so we cannot invoke compareTo among them. To handle 449 * this, the tree is ordered primarily by hash value, then by 450 * Comparable.compareTo order if applicable. On lookup at a node, 451 * if elements are not comparable or compare as 0 then both left 452 * and right children may need to be searched in the case of tied 453 * hash values. (This corresponds to the full list search that 454 * would be necessary if all elements were non-Comparable and had 455 * tied hashes.) On insertion, to keep a total ordering (or as 456 * close as is required here) across rebalancings, we compare 457 * classes and identityHashCodes as tie-breakers. The red-black 458 * balancing code is updated from pre-jdk-collections 459 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) 460 * based in turn on Cormen, Leiserson, and Rivest "Introduction to 461 * Algorithms" (CLR). 462 * 463 * TreeBins also require an additional locking mechanism. While 464 * list traversal is always possible by readers even during 465 * updates, tree traversal is not, mainly because of tree-rotations 466 * that may change the root node and/or its linkages. TreeBins 467 * include a simple read-write lock mechanism parasitic on the 468 * main bin-synchronization strategy: Structural adjustments 469 * associated with an insertion or removal are already bin-locked 470 * (and so cannot conflict with other writers) but must wait for 471 * ongoing readers to finish. Since there can be only one such 472 * waiter, we use a simple scheme using a single "waiter" field to 473 * block writers. However, readers need never block. If the root 474 * lock is held, they proceed along the slow traversal path (via 475 * next-pointers) until the lock becomes available or the list is 476 * exhausted, whichever comes first. These cases are not fast, but 477 * maximize aggregate expected throughput. 478 * 479 * Maintaining API and serialization compatibility with previous 480 * versions of this class introduces several oddities. Mainly: We 481 * leave untouched but unused constructor arguments referring to 482 * concurrencyLevel. We accept a loadFactor constructor argument, 483 * but apply it only to initial table capacity (which is the only 484 * time that we can guarantee to honor it.) We also declare an 485 * unused "Segment" class that is instantiated in minimal form 486 * only when serializing. 487 * 488 * Also, solely for compatibility with previous versions of this 489 * class, it extends AbstractMap, even though all of its methods 490 * are overridden, so it is just useless baggage. 491 * 492 * This file is organized to make things a little easier to follow 493 * while reading than they might otherwise: First the main static 494 * declarations and utilities, then fields, then main public 495 * methods (with a few factorings of multiple public methods into 496 * internal ones), then sizing methods, trees, traversers, and 497 * bulk operations. 498 */ 499 500 /* ---------------- Constants -------------- */ 501 502 /** 503 * The largest possible table capacity. This value must be 504 * exactly 1<<30 to stay within Java array allocation and indexing 505 * bounds for power of two table sizes, and is further required 506 * because the top two bits of 32bit hash fields are used for 507 * control purposes. 508 */ 509 private static final int MAXIMUM_CAPACITY = 1 << 30; 510 511 /** 512 * The default initial table capacity. Must be a power of 2 513 * (i.e., at least 1) and at most MAXIMUM_CAPACITY. 514 */ 515 private static final int DEFAULT_CAPACITY = 16; 516 517 /** 518 * The largest possible (non-power of two) array size. 519 * Needed by toArray and related methods. 520 */ 521 static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; 522 523 /** 524 * The default concurrency level for this table. Unused but 525 * defined for compatibility with previous versions of this class. 526 */ 527 private static final int DEFAULT_CONCURRENCY_LEVEL = 16; 528 529 /** 530 * The load factor for this table. Overrides of this value in 531 * constructors affect only the initial table capacity. The 532 * actual floating point value isn't normally used -- it is 533 * simpler to use expressions such as {@code n - (n >>> 2)} for 534 * the associated resizing threshold. 535 */ 536 private static final float LOAD_FACTOR = 0.75f; 537 538 /** 539 * The bin count threshold for using a tree rather than list for a 540 * bin. Bins are converted to trees when adding an element to a 541 * bin with at least this many nodes. The value must be greater 542 * than 2, and should be at least 8 to mesh with assumptions in 543 * tree removal about conversion back to plain bins upon 544 * shrinkage. 545 */ 546 static final int TREEIFY_THRESHOLD = 8; 547 548 /** 549 * The bin count threshold for untreeifying a (split) bin during a 550 * resize operation. Should be less than TREEIFY_THRESHOLD, and at 551 * most 6 to mesh with shrinkage detection under removal. 552 */ 553 static final int UNTREEIFY_THRESHOLD = 6; 554 555 /** 556 * The smallest table capacity for which bins may be treeified. 557 * (Otherwise the table is resized if too many nodes in a bin.) 558 * The value should be at least 4 * TREEIFY_THRESHOLD to avoid 559 * conflicts between resizing and treeification thresholds. 560 */ 561 static final int MIN_TREEIFY_CAPACITY = 64; 562 563 /** 564 * Minimum number of rebinnings per transfer step. Ranges are 565 * subdivided to allow multiple resizer threads. This value 566 * serves as a lower bound to avoid resizers encountering 567 * excessive memory contention. The value should be at least 568 * DEFAULT_CAPACITY. 569 */ 570 private static final int MIN_TRANSFER_STRIDE = 16; 571 572 /** 573 * The number of bits used for generation stamp in sizeCtl. 574 * Must be at least 6 for 32bit arrays. 575 */ 576 private static final int RESIZE_STAMP_BITS = 16; 577 578 /** 579 * The maximum number of threads that can help resize. 580 * Must fit in 32 - RESIZE_STAMP_BITS bits. 581 */ 582 private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1; 583 584 /** 585 * The bit shift for recording size stamp in sizeCtl. 586 */ 587 private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS; 588 589 /* 590 * Encodings for Node hash fields. See above for explanation. 591 */ 592 static final int MOVED = -1; // hash for forwarding nodes 593 static final int TREEBIN = -2; // hash for roots of trees 594 static final int RESERVED = -3; // hash for transient reservations 595 static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash 596 597 /** Number of CPUS, to place bounds on some sizings */ 598 static final int NCPU = Runtime.getRuntime().availableProcessors(); 599 600 /** 601 * Serialized pseudo-fields, provided only for jdk7 compatibility. 602 * @serialField segments Segment[] 603 * The segments, each of which is a specialized hash table. 604 * @serialField segmentMask int 605 * Mask value for indexing into segments. The upper bits of a 606 * key's hash code are used to choose the segment. 607 * @serialField segmentShift int 608 * Shift value for indexing within segments. 609 */ 610 private static final ObjectStreamField[] serialPersistentFields = { 611 new ObjectStreamField("segments", Segment[].class), 612 new ObjectStreamField("segmentMask", Integer.TYPE), 613 new ObjectStreamField("segmentShift", Integer.TYPE), 614 }; 615 616 /* ---------------- Nodes -------------- */ 617 618 /** 619 * Key-value entry. This class is never exported out as a 620 * user-mutable Map.Entry (i.e., one supporting setValue; see 621 * MapEntry below), but can be used for read-only traversals used 622 * in bulk tasks. Subclasses of Node with a negative hash field 623 * are special, and contain null keys and values (but are never 624 * exported). Otherwise, keys and vals are never null. 625 */ 626 static class Node<K,V> implements Map.Entry<K,V> { 627 final int hash; 628 final K key; 629 volatile V val; 630 volatile Node<K,V> next; 631 632 Node(int hash, K key, V val) { 633 this.hash = hash; 634 this.key = key; 635 this.val = val; 636 } 637 638 Node(int hash, K key, V val, Node<K,V> next) { 639 this(hash, key, val); 640 this.next = next; 641 } 642 643 public final K getKey() { return key; } 644 public final V getValue() { return val; } 645 public final int hashCode() { return key.hashCode() ^ val.hashCode(); } 646 public final String toString() { 647 return Helpers.mapEntryToString(key, val); 648 } 649 public final V setValue(V value) { 650 throw new UnsupportedOperationException(); 651 } 652 653 public final boolean equals(Object o) { 654 Object k, v, u; Map.Entry<?,?> e; 655 return ((o instanceof Map.Entry) && 656 (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 657 (v = e.getValue()) != null && 658 (k == key || k.equals(key)) && 659 (v == (u = val) || v.equals(u))); 660 } 661 662 /** 663 * Virtualized support for map.get(); overridden in subclasses. 664 */ 665 Node<K,V> find(int h, Object k) { 666 Node<K,V> e = this; 667 if (k != null) { 668 do { 669 K ek; 670 if (e.hash == h && 671 ((ek = e.key) == k || (ek != null && k.equals(ek)))) 672 return e; 673 } while ((e = e.next) != null); 674 } 675 return null; 676 } 677 } 678 679 /* ---------------- Static utilities -------------- */ 680 681 /** 682 * Spreads (XORs) higher bits of hash to lower and also forces top 683 * bit to 0. Because the table uses power-of-two masking, sets of 684 * hashes that vary only in bits above the current mask will 685 * always collide. (Among known examples are sets of Float keys 686 * holding consecutive whole numbers in small tables.) So we 687 * apply a transform that spreads the impact of higher bits 688 * downward. There is a tradeoff between speed, utility, and 689 * quality of bit-spreading. Because many common sets of hashes 690 * are already reasonably distributed (so don't benefit from 691 * spreading), and because we use trees to handle large sets of 692 * collisions in bins, we just XOR some shifted bits in the 693 * cheapest possible way to reduce systematic lossage, as well as 694 * to incorporate impact of the highest bits that would otherwise 695 * never be used in index calculations because of table bounds. 696 */ 697 static final int spread(int h) { 698 return (h ^ (h >>> 16)) & HASH_BITS; 699 } 700 701 /** 702 * Returns a power of two table size for the given desired capacity. 703 * See Hackers Delight, sec 3.2 704 */ 705 private static final int tableSizeFor(int c) { 706 int n = c - 1; 707 n |= n >>> 1; 708 n |= n >>> 2; 709 n |= n >>> 4; 710 n |= n >>> 8; 711 n |= n >>> 16; 712 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; 713 } 714 715 /** 716 * Returns x's Class if it is of the form "class C implements 717 * Comparable<C>", else null. 718 */ 719 static Class<?> comparableClassFor(Object x) { 720 if (x instanceof Comparable) { 721 Class<?> c; Type[] ts, as; Type t; ParameterizedType p; 722 if ((c = x.getClass()) == String.class) // bypass checks 723 return c; 724 if ((ts = c.getGenericInterfaces()) != null) { 725 for (int i = 0; i < ts.length; ++i) { 726 if (((t = ts[i]) instanceof ParameterizedType) && 727 ((p = (ParameterizedType)t).getRawType() == 728 Comparable.class) && 729 (as = p.getActualTypeArguments()) != null && 730 as.length == 1 && as[0] == c) // type arg is c 731 return c; 732 } 733 } 734 } 735 return null; 736 } 737 738 /** 739 * Returns k.compareTo(x) if x matches kc (k's screened comparable 740 * class), else 0. 741 */ 742 @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable 743 static int compareComparables(Class<?> kc, Object k, Object x) { 744 return (x == null || x.getClass() != kc ? 0 : 745 ((Comparable)k).compareTo(x)); 746 } 747 748 /* ---------------- Table element access -------------- */ 749 750 /* 751 * Atomic access methods are used for table elements as well as 752 * elements of in-progress next table while resizing. All uses of 753 * the tab arguments must be null checked by callers. All callers 754 * also paranoically precheck that tab's length is not zero (or an 755 * equivalent check), thus ensuring that any index argument taking 756 * the form of a hash value anded with (length - 1) is a valid 757 * index. Note that, to be correct wrt arbitrary concurrency 758 * errors by users, these checks must operate on local variables, 759 * which accounts for some odd-looking inline assignments below. 760 * Note that calls to setTabAt always occur within locked regions, 761 * and so require only release ordering. 762 */ 763 764 @SuppressWarnings("unchecked") 765 static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) { 766 return (Node<K,V>)U.getObjectAcquire(tab, ((long)i << ASHIFT) + ABASE); 767 } 768 769 static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i, 770 Node<K,V> c, Node<K,V> v) { 771 return U.compareAndSetObject(tab, ((long)i << ASHIFT) + ABASE, c, v); 772 } 773 774 static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) { 775 U.putObjectRelease(tab, ((long)i << ASHIFT) + ABASE, v); 776 } 777 778 /* ---------------- Fields -------------- */ 779 780 /** 781 * The array of bins. Lazily initialized upon first insertion. 782 * Size is always a power of two. Accessed directly by iterators. 783 */ 784 transient volatile Node<K,V>[] table; 785 786 /** 787 * The next table to use; non-null only while resizing. 788 */ 789 private transient volatile Node<K,V>[] nextTable; 790 791 /** 792 * Base counter value, used mainly when there is no contention, 793 * but also as a fallback during table initialization 794 * races. Updated via CAS. 795 */ 796 private transient volatile long baseCount; 797 798 /** 799 * Table initialization and resizing control. When negative, the 800 * table is being initialized or resized: -1 for initialization, 801 * else -(1 + the number of active resizing threads). Otherwise, 802 * when table is null, holds the initial table size to use upon 803 * creation, or 0 for default. After initialization, holds the 804 * next element count value upon which to resize the table. 805 */ 806 private transient volatile int sizeCtl; 807 808 /** 809 * The next table index (plus one) to split while resizing. 810 */ 811 private transient volatile int transferIndex; 812 813 /** 814 * Spinlock (locked via CAS) used when resizing and/or creating CounterCells. 815 */ 816 private transient volatile int cellsBusy; 817 818 /** 819 * Table of counter cells. When non-null, size is a power of 2. 820 */ 821 private transient volatile CounterCell[] counterCells; 822 823 // views 824 private transient KeySetView<K,V> keySet; 825 private transient ValuesView<K,V> values; 826 private transient EntrySetView<K,V> entrySet; 827 828 829 /* ---------------- Public operations -------------- */ 830 831 /** 832 * Creates a new, empty map with the default initial table size (16). 833 */ 834 public ConcurrentHashMap() { 835 } 836 837 /** 838 * Creates a new, empty map with an initial table size 839 * accommodating the specified number of elements without the need 840 * to dynamically resize. 841 * 842 * @param initialCapacity The implementation performs internal 843 * sizing to accommodate this many elements. 844 * @throws IllegalArgumentException if the initial capacity of 845 * elements is negative 846 */ 847 public ConcurrentHashMap(int initialCapacity) { 848 if (initialCapacity < 0) 849 throw new IllegalArgumentException(); 850 int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? 851 MAXIMUM_CAPACITY : 852 tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); 853 this.sizeCtl = cap; 854 } 855 856 /** 857 * Creates a new map with the same mappings as the given map. 858 * 859 * @param m the map 860 */ 861 public ConcurrentHashMap(Map<? extends K, ? extends V> m) { 862 this.sizeCtl = DEFAULT_CAPACITY; 863 putAll(m); 864 } 865 866 /** 867 * Creates a new, empty map with an initial table size based on 868 * the given number of elements ({@code initialCapacity}) and 869 * initial table density ({@code loadFactor}). 870 * 871 * @param initialCapacity the initial capacity. The implementation 872 * performs internal sizing to accommodate this many elements, 873 * given the specified load factor. 874 * @param loadFactor the load factor (table density) for 875 * establishing the initial table size 876 * @throws IllegalArgumentException if the initial capacity of 877 * elements is negative or the load factor is nonpositive 878 * 879 * @since 1.6 880 */ 881 public ConcurrentHashMap(int initialCapacity, float loadFactor) { 882 this(initialCapacity, loadFactor, 1); 883 } 884 885 /** 886 * Creates a new, empty map with an initial table size based on 887 * the given number of elements ({@code initialCapacity}), table 888 * density ({@code loadFactor}), and number of concurrently 889 * updating threads ({@code concurrencyLevel}). 890 * 891 * @param initialCapacity the initial capacity. The implementation 892 * performs internal sizing to accommodate this many elements, 893 * given the specified load factor. 894 * @param loadFactor the load factor (table density) for 895 * establishing the initial table size 896 * @param concurrencyLevel the estimated number of concurrently 897 * updating threads. The implementation may use this value as 898 * a sizing hint. 899 * @throws IllegalArgumentException if the initial capacity is 900 * negative or the load factor or concurrencyLevel are 901 * nonpositive 902 */ 903 public ConcurrentHashMap(int initialCapacity, 904 float loadFactor, int concurrencyLevel) { 905 if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) 906 throw new IllegalArgumentException(); 907 if (initialCapacity < concurrencyLevel) // Use at least as many bins 908 initialCapacity = concurrencyLevel; // as estimated threads 909 long size = (long)(1.0 + (long)initialCapacity / loadFactor); 910 int cap = (size >= (long)MAXIMUM_CAPACITY) ? 911 MAXIMUM_CAPACITY : tableSizeFor((int)size); 912 this.sizeCtl = cap; 913 } 914 915 // Original (since JDK1.2) Map methods 916 917 /** 918 * {@inheritDoc} 919 */ 920 public int size() { 921 long n = sumCount(); 922 return ((n < 0L) ? 0 : 923 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : 924 (int)n); 925 } 926 927 /** 928 * {@inheritDoc} 929 */ 930 public boolean isEmpty() { 931 return sumCount() <= 0L; // ignore transient negative values 932 } 933 934 /** 935 * Returns the value to which the specified key is mapped, 936 * or {@code null} if this map contains no mapping for the key. 937 * 938 * <p>More formally, if this map contains a mapping from a key 939 * {@code k} to a value {@code v} such that {@code key.equals(k)}, 940 * then this method returns {@code v}; otherwise it returns 941 * {@code null}. (There can be at most one such mapping.) 942 * 943 * @throws NullPointerException if the specified key is null 944 */ 945 public V get(Object key) { 946 Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek; 947 int h = spread(key.hashCode()); 948 if ((tab = table) != null && (n = tab.length) > 0 && 949 (e = tabAt(tab, (n - 1) & h)) != null) { 950 if ((eh = e.hash) == h) { 951 if ((ek = e.key) == key || (ek != null && key.equals(ek))) 952 return e.val; 953 } 954 else if (eh < 0) 955 return (p = e.find(h, key)) != null ? p.val : null; 956 while ((e = e.next) != null) { 957 if (e.hash == h && 958 ((ek = e.key) == key || (ek != null && key.equals(ek)))) 959 return e.val; 960 } 961 } 962 return null; 963 } 964 965 /** 966 * Tests if the specified object is a key in this table. 967 * 968 * @param key possible key 969 * @return {@code true} if and only if the specified object 970 * is a key in this table, as determined by the 971 * {@code equals} method; {@code false} otherwise 972 * @throws NullPointerException if the specified key is null 973 */ 974 public boolean containsKey(Object key) { 975 return get(key) != null; 976 } 977 978 /** 979 * Returns {@code true} if this map maps one or more keys to the 980 * specified value. Note: This method may require a full traversal 981 * of the map, and is much slower than method {@code containsKey}. 982 * 983 * @param value value whose presence in this map is to be tested 984 * @return {@code true} if this map maps one or more keys to the 985 * specified value 986 * @throws NullPointerException if the specified value is null 987 */ 988 public boolean containsValue(Object value) { 989 if (value == null) 990 throw new NullPointerException(); 991 Node<K,V>[] t; 992 if ((t = table) != null) { 993 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 994 for (Node<K,V> p; (p = it.advance()) != null; ) { 995 V v; 996 if ((v = p.val) == value || (v != null && value.equals(v))) 997 return true; 998 } 999 } 1000 return false; 1001 } 1002 1003 /** 1004 * Maps the specified key to the specified value in this table. 1005 * Neither the key nor the value can be null. 1006 * 1007 * <p>The value can be retrieved by calling the {@code get} method 1008 * with a key that is equal to the original key. 1009 * 1010 * @param key key with which the specified value is to be associated 1011 * @param value value to be associated with the specified key 1012 * @return the previous value associated with {@code key}, or 1013 * {@code null} if there was no mapping for {@code key} 1014 * @throws NullPointerException if the specified key or value is null 1015 */ 1016 public V put(K key, V value) { 1017 return putVal(key, value, false); 1018 } 1019 1020 /** Implementation for put and putIfAbsent */ 1021 final V putVal(K key, V value, boolean onlyIfAbsent) { 1022 if (key == null || value == null) throw new NullPointerException(); 1023 int hash = spread(key.hashCode()); 1024 int binCount = 0; 1025 for (Node<K,V>[] tab = table;;) { 1026 Node<K,V> f; int n, i, fh; K fk; V fv; 1027 if (tab == null || (n = tab.length) == 0) 1028 tab = initTable(); 1029 else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) { 1030 if (casTabAt(tab, i, null, new Node<K,V>(hash, key, value))) 1031 break; // no lock when adding to empty bin 1032 } 1033 else if ((fh = f.hash) == MOVED) 1034 tab = helpTransfer(tab, f); 1035 else if (onlyIfAbsent // check first node without acquiring lock 1036 && fh == hash 1037 && ((fk = f.key) == key || (fk != null && key.equals(fk))) 1038 && (fv = f.val) != null) 1039 return fv; 1040 else { 1041 V oldVal = null; 1042 synchronized (f) { 1043 if (tabAt(tab, i) == f) { 1044 if (fh >= 0) { 1045 binCount = 1; 1046 for (Node<K,V> e = f;; ++binCount) { 1047 K ek; 1048 if (e.hash == hash && 1049 ((ek = e.key) == key || 1050 (ek != null && key.equals(ek)))) { 1051 oldVal = e.val; 1052 if (!onlyIfAbsent) 1053 e.val = value; 1054 break; 1055 } 1056 Node<K,V> pred = e; 1057 if ((e = e.next) == null) { 1058 pred.next = new Node<K,V>(hash, key, value); 1059 break; 1060 } 1061 } 1062 } 1063 else if (f instanceof TreeBin) { 1064 Node<K,V> p; 1065 binCount = 2; 1066 if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key, 1067 value)) != null) { 1068 oldVal = p.val; 1069 if (!onlyIfAbsent) 1070 p.val = value; 1071 } 1072 } 1073 else if (f instanceof ReservationNode) 1074 throw new IllegalStateException("Recursive update"); 1075 } 1076 } 1077 if (binCount != 0) { 1078 if (binCount >= TREEIFY_THRESHOLD) 1079 treeifyBin(tab, i); 1080 if (oldVal != null) 1081 return oldVal; 1082 break; 1083 } 1084 } 1085 } 1086 addCount(1L, binCount); 1087 return null; 1088 } 1089 1090 /** 1091 * Copies all of the mappings from the specified map to this one. 1092 * These mappings replace any mappings that this map had for any of the 1093 * keys currently in the specified map. 1094 * 1095 * @param m mappings to be stored in this map 1096 */ 1097 public void putAll(Map<? extends K, ? extends V> m) { 1098 tryPresize(m.size()); 1099 for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) 1100 putVal(e.getKey(), e.getValue(), false); 1101 } 1102 1103 /** 1104 * Removes the key (and its corresponding value) from this map. 1105 * This method does nothing if the key is not in the map. 1106 * 1107 * @param key the key that needs to be removed 1108 * @return the previous value associated with {@code key}, or 1109 * {@code null} if there was no mapping for {@code key} 1110 * @throws NullPointerException if the specified key is null 1111 */ 1112 public V remove(Object key) { 1113 return replaceNode(key, null, null); 1114 } 1115 1116 /** 1117 * Implementation for the four public remove/replace methods: 1118 * Replaces node value with v, conditional upon match of cv if 1119 * non-null. If resulting value is null, delete. 1120 */ 1121 final V replaceNode(Object key, V value, Object cv) { 1122 int hash = spread(key.hashCode()); 1123 for (Node<K,V>[] tab = table;;) { 1124 Node<K,V> f; int n, i, fh; 1125 if (tab == null || (n = tab.length) == 0 || 1126 (f = tabAt(tab, i = (n - 1) & hash)) == null) 1127 break; 1128 else if ((fh = f.hash) == MOVED) 1129 tab = helpTransfer(tab, f); 1130 else { 1131 V oldVal = null; 1132 boolean validated = false; 1133 synchronized (f) { 1134 if (tabAt(tab, i) == f) { 1135 if (fh >= 0) { 1136 validated = true; 1137 for (Node<K,V> e = f, pred = null;;) { 1138 K ek; 1139 if (e.hash == hash && 1140 ((ek = e.key) == key || 1141 (ek != null && key.equals(ek)))) { 1142 V ev = e.val; 1143 if (cv == null || cv == ev || 1144 (ev != null && cv.equals(ev))) { 1145 oldVal = ev; 1146 if (value != null) 1147 e.val = value; 1148 else if (pred != null) 1149 pred.next = e.next; 1150 else 1151 setTabAt(tab, i, e.next); 1152 } 1153 break; 1154 } 1155 pred = e; 1156 if ((e = e.next) == null) 1157 break; 1158 } 1159 } 1160 else if (f instanceof TreeBin) { 1161 validated = true; 1162 TreeBin<K,V> t = (TreeBin<K,V>)f; 1163 TreeNode<K,V> r, p; 1164 if ((r = t.root) != null && 1165 (p = r.findTreeNode(hash, key, null)) != null) { 1166 V pv = p.val; 1167 if (cv == null || cv == pv || 1168 (pv != null && cv.equals(pv))) { 1169 oldVal = pv; 1170 if (value != null) 1171 p.val = value; 1172 else if (t.removeTreeNode(p)) 1173 setTabAt(tab, i, untreeify(t.first)); 1174 } 1175 } 1176 } 1177 else if (f instanceof ReservationNode) 1178 throw new IllegalStateException("Recursive update"); 1179 } 1180 } 1181 if (validated) { 1182 if (oldVal != null) { 1183 if (value == null) 1184 addCount(-1L, -1); 1185 return oldVal; 1186 } 1187 break; 1188 } 1189 } 1190 } 1191 return null; 1192 } 1193 1194 /** 1195 * Removes all of the mappings from this map. 1196 */ 1197 public void clear() { 1198 long delta = 0L; // negative number of deletions 1199 int i = 0; 1200 Node<K,V>[] tab = table; 1201 while (tab != null && i < tab.length) { 1202 int fh; 1203 Node<K,V> f = tabAt(tab, i); 1204 if (f == null) 1205 ++i; 1206 else if ((fh = f.hash) == MOVED) { 1207 tab = helpTransfer(tab, f); 1208 i = 0; // restart 1209 } 1210 else { 1211 synchronized (f) { 1212 if (tabAt(tab, i) == f) { 1213 Node<K,V> p = (fh >= 0 ? f : 1214 (f instanceof TreeBin) ? 1215 ((TreeBin<K,V>)f).first : null); 1216 while (p != null) { 1217 --delta; 1218 p = p.next; 1219 } 1220 setTabAt(tab, i++, null); 1221 } 1222 } 1223 } 1224 } 1225 if (delta != 0L) 1226 addCount(delta, -1); 1227 } 1228 1229 /** 1230 * Returns a {@link Set} view of the keys contained in this map. 1231 * The set is backed by the map, so changes to the map are 1232 * reflected in the set, and vice-versa. The set supports element 1233 * removal, which removes the corresponding mapping from this map, 1234 * via the {@code Iterator.remove}, {@code Set.remove}, 1235 * {@code removeAll}, {@code retainAll}, and {@code clear} 1236 * operations. It does not support the {@code add} or 1237 * {@code addAll} operations. 1238 * 1239 * <p>The view's iterators and spliterators are 1240 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 1241 * 1242 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}, 1243 * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}. 1244 * 1245 * @return the set view 1246 */ 1247 public KeySetView<K,V> keySet() { 1248 KeySetView<K,V> ks; 1249 if ((ks = keySet) != null) return ks; 1250 return keySet = new KeySetView<K,V>(this, null); 1251 } 1252 1253 /** 1254 * Returns a {@link Collection} view of the values contained in this map. 1255 * The collection is backed by the map, so changes to the map are 1256 * reflected in the collection, and vice-versa. The collection 1257 * supports element removal, which removes the corresponding 1258 * mapping from this map, via the {@code Iterator.remove}, 1259 * {@code Collection.remove}, {@code removeAll}, 1260 * {@code retainAll}, and {@code clear} operations. It does not 1261 * support the {@code add} or {@code addAll} operations. 1262 * 1263 * <p>The view's iterators and spliterators are 1264 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 1265 * 1266 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT} 1267 * and {@link Spliterator#NONNULL}. 1268 * 1269 * @return the collection view 1270 */ 1271 public Collection<V> values() { 1272 ValuesView<K,V> vs; 1273 if ((vs = values) != null) return vs; 1274 return values = new ValuesView<K,V>(this); 1275 } 1276 1277 /** 1278 * Returns a {@link Set} view of the mappings contained in this map. 1279 * The set is backed by the map, so changes to the map are 1280 * reflected in the set, and vice-versa. The set supports element 1281 * removal, which removes the corresponding mapping from the map, 1282 * via the {@code Iterator.remove}, {@code Set.remove}, 1283 * {@code removeAll}, {@code retainAll}, and {@code clear} 1284 * operations. 1285 * 1286 * <p>The view's iterators and spliterators are 1287 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 1288 * 1289 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}, 1290 * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}. 1291 * 1292 * @return the set view 1293 */ 1294 public Set<Map.Entry<K,V>> entrySet() { 1295 EntrySetView<K,V> es; 1296 if ((es = entrySet) != null) return es; 1297 return entrySet = new EntrySetView<K,V>(this); 1298 } 1299 1300 /** 1301 * Returns the hash code value for this {@link Map}, i.e., 1302 * the sum of, for each key-value pair in the map, 1303 * {@code key.hashCode() ^ value.hashCode()}. 1304 * 1305 * @return the hash code value for this map 1306 */ 1307 public int hashCode() { 1308 int h = 0; 1309 Node<K,V>[] t; 1310 if ((t = table) != null) { 1311 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1312 for (Node<K,V> p; (p = it.advance()) != null; ) 1313 h += p.key.hashCode() ^ p.val.hashCode(); 1314 } 1315 return h; 1316 } 1317 1318 /** 1319 * Returns a string representation of this map. The string 1320 * representation consists of a list of key-value mappings (in no 1321 * particular order) enclosed in braces ("{@code {}}"). Adjacent 1322 * mappings are separated by the characters {@code ", "} (comma 1323 * and space). Each key-value mapping is rendered as the key 1324 * followed by an equals sign ("{@code =}") followed by the 1325 * associated value. 1326 * 1327 * @return a string representation of this map 1328 */ 1329 public String toString() { 1330 Node<K,V>[] t; 1331 int f = (t = table) == null ? 0 : t.length; 1332 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f); 1333 StringBuilder sb = new StringBuilder(); 1334 sb.append('{'); 1335 Node<K,V> p; 1336 if ((p = it.advance()) != null) { 1337 for (;;) { 1338 K k = p.key; 1339 V v = p.val; 1340 sb.append(k == this ? "(this Map)" : k); 1341 sb.append('='); 1342 sb.append(v == this ? "(this Map)" : v); 1343 if ((p = it.advance()) == null) 1344 break; 1345 sb.append(',').append(' '); 1346 } 1347 } 1348 return sb.append('}').toString(); 1349 } 1350 1351 /** 1352 * Compares the specified object with this map for equality. 1353 * Returns {@code true} if the given object is a map with the same 1354 * mappings as this map. This operation may return misleading 1355 * results if either map is concurrently modified during execution 1356 * of this method. 1357 * 1358 * @param o object to be compared for equality with this map 1359 * @return {@code true} if the specified object is equal to this map 1360 */ 1361 public boolean equals(Object o) { 1362 if (o != this) { 1363 if (!(o instanceof Map)) 1364 return false; 1365 Map<?,?> m = (Map<?,?>) o; 1366 Node<K,V>[] t; 1367 int f = (t = table) == null ? 0 : t.length; 1368 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f); 1369 for (Node<K,V> p; (p = it.advance()) != null; ) { 1370 V val = p.val; 1371 Object v = m.get(p.key); 1372 if (v == null || (v != val && !v.equals(val))) 1373 return false; 1374 } 1375 for (Map.Entry<?,?> e : m.entrySet()) { 1376 Object mk, mv, v; 1377 if ((mk = e.getKey()) == null || 1378 (mv = e.getValue()) == null || 1379 (v = get(mk)) == null || 1380 (mv != v && !mv.equals(v))) 1381 return false; 1382 } 1383 } 1384 return true; 1385 } 1386 1387 /** 1388 * Stripped-down version of helper class used in previous version, 1389 * declared for the sake of serialization compatibility. 1390 */ 1391 static class Segment<K,V> extends ReentrantLock implements Serializable { 1392 private static final long serialVersionUID = 2249069246763182397L; 1393 final float loadFactor; 1394 Segment(float lf) { this.loadFactor = lf; } 1395 } 1396 1397 /** 1398 * Saves the state of the {@code ConcurrentHashMap} instance to a 1399 * stream (i.e., serializes it). 1400 * @param s the stream 1401 * @throws java.io.IOException if an I/O error occurs 1402 * @serialData 1403 * the serialized fields, followed by the key (Object) and value 1404 * (Object) for each key-value mapping, followed by a null pair. 1405 * The key-value mappings are emitted in no particular order. 1406 */ 1407 private void writeObject(java.io.ObjectOutputStream s) 1408 throws java.io.IOException { 1409 // For serialization compatibility 1410 // Emulate segment calculation from previous version of this class 1411 int sshift = 0; 1412 int ssize = 1; 1413 while (ssize < DEFAULT_CONCURRENCY_LEVEL) { 1414 ++sshift; 1415 ssize <<= 1; 1416 } 1417 int segmentShift = 32 - sshift; 1418 int segmentMask = ssize - 1; 1419 @SuppressWarnings("unchecked") 1420 Segment<K,V>[] segments = (Segment<K,V>[]) 1421 new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL]; 1422 for (int i = 0; i < segments.length; ++i) 1423 segments[i] = new Segment<K,V>(LOAD_FACTOR); 1424 java.io.ObjectOutputStream.PutField streamFields = s.putFields(); 1425 streamFields.put("segments", segments); 1426 streamFields.put("segmentShift", segmentShift); 1427 streamFields.put("segmentMask", segmentMask); 1428 s.writeFields(); 1429 1430 Node<K,V>[] t; 1431 if ((t = table) != null) { 1432 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1433 for (Node<K,V> p; (p = it.advance()) != null; ) { 1434 s.writeObject(p.key); 1435 s.writeObject(p.val); 1436 } 1437 } 1438 s.writeObject(null); 1439 s.writeObject(null); 1440 } 1441 1442 /** 1443 * Reconstitutes the instance from a stream (that is, deserializes it). 1444 * @param s the stream 1445 * @throws ClassNotFoundException if the class of a serialized object 1446 * could not be found 1447 * @throws java.io.IOException if an I/O error occurs 1448 */ 1449 private void readObject(java.io.ObjectInputStream s) 1450 throws java.io.IOException, ClassNotFoundException { 1451 /* 1452 * To improve performance in typical cases, we create nodes 1453 * while reading, then place in table once size is known. 1454 * However, we must also validate uniqueness and deal with 1455 * overpopulated bins while doing so, which requires 1456 * specialized versions of putVal mechanics. 1457 */ 1458 sizeCtl = -1; // force exclusion for table construction 1459 s.defaultReadObject(); 1460 long size = 0L; 1461 Node<K,V> p = null; 1462 for (;;) { 1463 @SuppressWarnings("unchecked") 1464 K k = (K) s.readObject(); 1465 @SuppressWarnings("unchecked") 1466 V v = (V) s.readObject(); 1467 if (k != null && v != null) { 1468 p = new Node<K,V>(spread(k.hashCode()), k, v, p); 1469 ++size; 1470 } 1471 else 1472 break; 1473 } 1474 if (size == 0L) 1475 sizeCtl = 0; 1476 else { 1477 int n; 1478 if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) 1479 n = MAXIMUM_CAPACITY; 1480 else { 1481 int sz = (int)size; 1482 n = tableSizeFor(sz + (sz >>> 1) + 1); 1483 } 1484 @SuppressWarnings("unchecked") 1485 Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n]; 1486 int mask = n - 1; 1487 long added = 0L; 1488 while (p != null) { 1489 boolean insertAtFront; 1490 Node<K,V> next = p.next, first; 1491 int h = p.hash, j = h & mask; 1492 if ((first = tabAt(tab, j)) == null) 1493 insertAtFront = true; 1494 else { 1495 K k = p.key; 1496 if (first.hash < 0) { 1497 TreeBin<K,V> t = (TreeBin<K,V>)first; 1498 if (t.putTreeVal(h, k, p.val) == null) 1499 ++added; 1500 insertAtFront = false; 1501 } 1502 else { 1503 int binCount = 0; 1504 insertAtFront = true; 1505 Node<K,V> q; K qk; 1506 for (q = first; q != null; q = q.next) { 1507 if (q.hash == h && 1508 ((qk = q.key) == k || 1509 (qk != null && k.equals(qk)))) { 1510 insertAtFront = false; 1511 break; 1512 } 1513 ++binCount; 1514 } 1515 if (insertAtFront && binCount >= TREEIFY_THRESHOLD) { 1516 insertAtFront = false; 1517 ++added; 1518 p.next = first; 1519 TreeNode<K,V> hd = null, tl = null; 1520 for (q = p; q != null; q = q.next) { 1521 TreeNode<K,V> t = new TreeNode<K,V> 1522 (q.hash, q.key, q.val, null, null); 1523 if ((t.prev = tl) == null) 1524 hd = t; 1525 else 1526 tl.next = t; 1527 tl = t; 1528 } 1529 setTabAt(tab, j, new TreeBin<K,V>(hd)); 1530 } 1531 } 1532 } 1533 if (insertAtFront) { 1534 ++added; 1535 p.next = first; 1536 setTabAt(tab, j, p); 1537 } 1538 p = next; 1539 } 1540 table = tab; 1541 sizeCtl = n - (n >>> 2); 1542 baseCount = added; 1543 } 1544 } 1545 1546 // ConcurrentMap methods 1547 1548 /** 1549 * {@inheritDoc} 1550 * 1551 * @return the previous value associated with the specified key, 1552 * or {@code null} if there was no mapping for the key 1553 * @throws NullPointerException if the specified key or value is null 1554 */ 1555 public V putIfAbsent(K key, V value) { 1556 return putVal(key, value, true); 1557 } 1558 1559 /** 1560 * {@inheritDoc} 1561 * 1562 * @throws NullPointerException if the specified key is null 1563 */ 1564 public boolean remove(Object key, Object value) { 1565 if (key == null) 1566 throw new NullPointerException(); 1567 return value != null && replaceNode(key, null, value) != null; 1568 } 1569 1570 /** 1571 * {@inheritDoc} 1572 * 1573 * @throws NullPointerException if any of the arguments are null 1574 */ 1575 public boolean replace(K key, V oldValue, V newValue) { 1576 if (key == null || oldValue == null || newValue == null) 1577 throw new NullPointerException(); 1578 return replaceNode(key, newValue, oldValue) != null; 1579 } 1580 1581 /** 1582 * {@inheritDoc} 1583 * 1584 * @return the previous value associated with the specified key, 1585 * or {@code null} if there was no mapping for the key 1586 * @throws NullPointerException if the specified key or value is null 1587 */ 1588 public V replace(K key, V value) { 1589 if (key == null || value == null) 1590 throw new NullPointerException(); 1591 return replaceNode(key, value, null); 1592 } 1593 1594 // Overrides of JDK8+ Map extension method defaults 1595 1596 /** 1597 * Returns the value to which the specified key is mapped, or the 1598 * given default value if this map contains no mapping for the 1599 * key. 1600 * 1601 * @param key the key whose associated value is to be returned 1602 * @param defaultValue the value to return if this map contains 1603 * no mapping for the given key 1604 * @return the mapping for the key, if present; else the default value 1605 * @throws NullPointerException if the specified key is null 1606 */ 1607 public V getOrDefault(Object key, V defaultValue) { 1608 V v; 1609 return (v = get(key)) == null ? defaultValue : v; 1610 } 1611 1612 public void forEach(BiConsumer<? super K, ? super V> action) { 1613 if (action == null) throw new NullPointerException(); 1614 Node<K,V>[] t; 1615 if ((t = table) != null) { 1616 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1617 for (Node<K,V> p; (p = it.advance()) != null; ) { 1618 action.accept(p.key, p.val); 1619 } 1620 } 1621 } 1622 1623 public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) { 1624 if (function == null) throw new NullPointerException(); 1625 Node<K,V>[] t; 1626 if ((t = table) != null) { 1627 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1628 for (Node<K,V> p; (p = it.advance()) != null; ) { 1629 V oldValue = p.val; 1630 for (K key = p.key;;) { 1631 V newValue = function.apply(key, oldValue); 1632 if (newValue == null) 1633 throw new NullPointerException(); 1634 if (replaceNode(key, newValue, oldValue) != null || 1635 (oldValue = get(key)) == null) 1636 break; 1637 } 1638 } 1639 } 1640 } 1641 1642 /** 1643 * Helper method for EntrySetView.removeIf. 1644 */ 1645 boolean removeEntryIf(Predicate<? super Entry<K,V>> function) { 1646 if (function == null) throw new NullPointerException(); 1647 Node<K,V>[] t; 1648 boolean removed = false; 1649 if ((t = table) != null) { 1650 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1651 for (Node<K,V> p; (p = it.advance()) != null; ) { 1652 K k = p.key; 1653 V v = p.val; 1654 Map.Entry<K,V> e = new AbstractMap.SimpleImmutableEntry<>(k, v); 1655 if (function.test(e) && replaceNode(k, null, v) != null) 1656 removed = true; 1657 } 1658 } 1659 return removed; 1660 } 1661 1662 /** 1663 * Helper method for ValuesView.removeIf. 1664 */ 1665 boolean removeValueIf(Predicate<? super V> function) { 1666 if (function == null) throw new NullPointerException(); 1667 Node<K,V>[] t; 1668 boolean removed = false; 1669 if ((t = table) != null) { 1670 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1671 for (Node<K,V> p; (p = it.advance()) != null; ) { 1672 K k = p.key; 1673 V v = p.val; 1674 if (function.test(v) && replaceNode(k, null, v) != null) 1675 removed = true; 1676 } 1677 } 1678 return removed; 1679 } 1680 1681 /** 1682 * If the specified key is not already associated with a value, 1683 * attempts to compute its value using the given mapping function 1684 * and enters it into this map unless {@code null}. The entire 1685 * method invocation is performed atomically, so the function is 1686 * applied at most once per key. Some attempted update operations 1687 * on this map by other threads may be blocked while computation 1688 * is in progress, so the computation should be short and simple, 1689 * and must not attempt to update any other mappings of this map. 1690 * 1691 * @param key key with which the specified value is to be associated 1692 * @param mappingFunction the function to compute a value 1693 * @return the current (existing or computed) value associated with 1694 * the specified key, or null if the computed value is null 1695 * @throws NullPointerException if the specified key or mappingFunction 1696 * is null 1697 * @throws IllegalStateException if the computation detectably 1698 * attempts a recursive update to this map that would 1699 * otherwise never complete 1700 * @throws RuntimeException or Error if the mappingFunction does so, 1701 * in which case the mapping is left unestablished 1702 */ 1703 public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) { 1704 if (key == null || mappingFunction == null) 1705 throw new NullPointerException(); 1706 int h = spread(key.hashCode()); 1707 V val = null; 1708 int binCount = 0; 1709 for (Node<K,V>[] tab = table;;) { 1710 Node<K,V> f; int n, i, fh; K fk; V fv; 1711 if (tab == null || (n = tab.length) == 0) 1712 tab = initTable(); 1713 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { 1714 Node<K,V> r = new ReservationNode<K,V>(); 1715 synchronized (r) { 1716 if (casTabAt(tab, i, null, r)) { 1717 binCount = 1; 1718 Node<K,V> node = null; 1719 try { 1720 if ((val = mappingFunction.apply(key)) != null) 1721 node = new Node<K,V>(h, key, val); 1722 } finally { 1723 setTabAt(tab, i, node); 1724 } 1725 } 1726 } 1727 if (binCount != 0) 1728 break; 1729 } 1730 else if ((fh = f.hash) == MOVED) 1731 tab = helpTransfer(tab, f); 1732 else if (fh == h // check first node without acquiring lock 1733 && ((fk = f.key) == key || (fk != null && key.equals(fk))) 1734 && (fv = f.val) != null) 1735 return fv; 1736 else { 1737 boolean added = false; 1738 synchronized (f) { 1739 if (tabAt(tab, i) == f) { 1740 if (fh >= 0) { 1741 binCount = 1; 1742 for (Node<K,V> e = f;; ++binCount) { 1743 K ek; 1744 if (e.hash == h && 1745 ((ek = e.key) == key || 1746 (ek != null && key.equals(ek)))) { 1747 val = e.val; 1748 break; 1749 } 1750 Node<K,V> pred = e; 1751 if ((e = e.next) == null) { 1752 if ((val = mappingFunction.apply(key)) != null) { 1753 if (pred.next != null) 1754 throw new IllegalStateException("Recursive update"); 1755 added = true; 1756 pred.next = new Node<K,V>(h, key, val); 1757 } 1758 break; 1759 } 1760 } 1761 } 1762 else if (f instanceof TreeBin) { 1763 binCount = 2; 1764 TreeBin<K,V> t = (TreeBin<K,V>)f; 1765 TreeNode<K,V> r, p; 1766 if ((r = t.root) != null && 1767 (p = r.findTreeNode(h, key, null)) != null) 1768 val = p.val; 1769 else if ((val = mappingFunction.apply(key)) != null) { 1770 added = true; 1771 t.putTreeVal(h, key, val); 1772 } 1773 } 1774 else if (f instanceof ReservationNode) 1775 throw new IllegalStateException("Recursive update"); 1776 } 1777 } 1778 if (binCount != 0) { 1779 if (binCount >= TREEIFY_THRESHOLD) 1780 treeifyBin(tab, i); 1781 if (!added) 1782 return val; 1783 break; 1784 } 1785 } 1786 } 1787 if (val != null) 1788 addCount(1L, binCount); 1789 return val; 1790 } 1791 1792 /** 1793 * If the value for the specified key is present, attempts to 1794 * compute a new mapping given the key and its current mapped 1795 * value. The entire method invocation is performed atomically. 1796 * Some attempted update operations on this map by other threads 1797 * may be blocked while computation is in progress, so the 1798 * computation should be short and simple, and must not attempt to 1799 * update any other mappings of this map. 1800 * 1801 * @param key key with which a value may be associated 1802 * @param remappingFunction the function to compute a value 1803 * @return the new value associated with the specified key, or null if none 1804 * @throws NullPointerException if the specified key or remappingFunction 1805 * is null 1806 * @throws IllegalStateException if the computation detectably 1807 * attempts a recursive update to this map that would 1808 * otherwise never complete 1809 * @throws RuntimeException or Error if the remappingFunction does so, 1810 * in which case the mapping is unchanged 1811 */ 1812 public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1813 if (key == null || remappingFunction == null) 1814 throw new NullPointerException(); 1815 int h = spread(key.hashCode()); 1816 V val = null; 1817 int delta = 0; 1818 int binCount = 0; 1819 for (Node<K,V>[] tab = table;;) { 1820 Node<K,V> f; int n, i, fh; 1821 if (tab == null || (n = tab.length) == 0) 1822 tab = initTable(); 1823 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) 1824 break; 1825 else if ((fh = f.hash) == MOVED) 1826 tab = helpTransfer(tab, f); 1827 else { 1828 synchronized (f) { 1829 if (tabAt(tab, i) == f) { 1830 if (fh >= 0) { 1831 binCount = 1; 1832 for (Node<K,V> e = f, pred = null;; ++binCount) { 1833 K ek; 1834 if (e.hash == h && 1835 ((ek = e.key) == key || 1836 (ek != null && key.equals(ek)))) { 1837 val = remappingFunction.apply(key, e.val); 1838 if (val != null) 1839 e.val = val; 1840 else { 1841 delta = -1; 1842 Node<K,V> en = e.next; 1843 if (pred != null) 1844 pred.next = en; 1845 else 1846 setTabAt(tab, i, en); 1847 } 1848 break; 1849 } 1850 pred = e; 1851 if ((e = e.next) == null) 1852 break; 1853 } 1854 } 1855 else if (f instanceof TreeBin) { 1856 binCount = 2; 1857 TreeBin<K,V> t = (TreeBin<K,V>)f; 1858 TreeNode<K,V> r, p; 1859 if ((r = t.root) != null && 1860 (p = r.findTreeNode(h, key, null)) != null) { 1861 val = remappingFunction.apply(key, p.val); 1862 if (val != null) 1863 p.val = val; 1864 else { 1865 delta = -1; 1866 if (t.removeTreeNode(p)) 1867 setTabAt(tab, i, untreeify(t.first)); 1868 } 1869 } 1870 } 1871 else if (f instanceof ReservationNode) 1872 throw new IllegalStateException("Recursive update"); 1873 } 1874 } 1875 if (binCount != 0) 1876 break; 1877 } 1878 } 1879 if (delta != 0) 1880 addCount((long)delta, binCount); 1881 return val; 1882 } 1883 1884 /** 1885 * Attempts to compute a mapping for the specified key and its 1886 * current mapped value (or {@code null} if there is no current 1887 * mapping). The entire method invocation is performed atomically. 1888 * Some attempted update operations on this map by other threads 1889 * may be blocked while computation is in progress, so the 1890 * computation should be short and simple, and must not attempt to 1891 * update any other mappings of this Map. 1892 * 1893 * @param key key with which the specified value is to be associated 1894 * @param remappingFunction the function to compute a value 1895 * @return the new value associated with the specified key, or null if none 1896 * @throws NullPointerException if the specified key or remappingFunction 1897 * is null 1898 * @throws IllegalStateException if the computation detectably 1899 * attempts a recursive update to this map that would 1900 * otherwise never complete 1901 * @throws RuntimeException or Error if the remappingFunction does so, 1902 * in which case the mapping is unchanged 1903 */ 1904 public V compute(K key, 1905 BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1906 if (key == null || remappingFunction == null) 1907 throw new NullPointerException(); 1908 int h = spread(key.hashCode()); 1909 V val = null; 1910 int delta = 0; 1911 int binCount = 0; 1912 for (Node<K,V>[] tab = table;;) { 1913 Node<K,V> f; int n, i, fh; 1914 if (tab == null || (n = tab.length) == 0) 1915 tab = initTable(); 1916 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { 1917 Node<K,V> r = new ReservationNode<K,V>(); 1918 synchronized (r) { 1919 if (casTabAt(tab, i, null, r)) { 1920 binCount = 1; 1921 Node<K,V> node = null; 1922 try { 1923 if ((val = remappingFunction.apply(key, null)) != null) { 1924 delta = 1; 1925 node = new Node<K,V>(h, key, val); 1926 } 1927 } finally { 1928 setTabAt(tab, i, node); 1929 } 1930 } 1931 } 1932 if (binCount != 0) 1933 break; 1934 } 1935 else if ((fh = f.hash) == MOVED) 1936 tab = helpTransfer(tab, f); 1937 else { 1938 synchronized (f) { 1939 if (tabAt(tab, i) == f) { 1940 if (fh >= 0) { 1941 binCount = 1; 1942 for (Node<K,V> e = f, pred = null;; ++binCount) { 1943 K ek; 1944 if (e.hash == h && 1945 ((ek = e.key) == key || 1946 (ek != null && key.equals(ek)))) { 1947 val = remappingFunction.apply(key, e.val); 1948 if (val != null) 1949 e.val = val; 1950 else { 1951 delta = -1; 1952 Node<K,V> en = e.next; 1953 if (pred != null) 1954 pred.next = en; 1955 else 1956 setTabAt(tab, i, en); 1957 } 1958 break; 1959 } 1960 pred = e; 1961 if ((e = e.next) == null) { 1962 val = remappingFunction.apply(key, null); 1963 if (val != null) { 1964 if (pred.next != null) 1965 throw new IllegalStateException("Recursive update"); 1966 delta = 1; 1967 pred.next = new Node<K,V>(h, key, val); 1968 } 1969 break; 1970 } 1971 } 1972 } 1973 else if (f instanceof TreeBin) { 1974 binCount = 1; 1975 TreeBin<K,V> t = (TreeBin<K,V>)f; 1976 TreeNode<K,V> r, p; 1977 if ((r = t.root) != null) 1978 p = r.findTreeNode(h, key, null); 1979 else 1980 p = null; 1981 V pv = (p == null) ? null : p.val; 1982 val = remappingFunction.apply(key, pv); 1983 if (val != null) { 1984 if (p != null) 1985 p.val = val; 1986 else { 1987 delta = 1; 1988 t.putTreeVal(h, key, val); 1989 } 1990 } 1991 else if (p != null) { 1992 delta = -1; 1993 if (t.removeTreeNode(p)) 1994 setTabAt(tab, i, untreeify(t.first)); 1995 } 1996 } 1997 else if (f instanceof ReservationNode) 1998 throw new IllegalStateException("Recursive update"); 1999 } 2000 } 2001 if (binCount != 0) { 2002 if (binCount >= TREEIFY_THRESHOLD) 2003 treeifyBin(tab, i); 2004 break; 2005 } 2006 } 2007 } 2008 if (delta != 0) 2009 addCount((long)delta, binCount); 2010 return val; 2011 } 2012 2013 /** 2014 * If the specified key is not already associated with a 2015 * (non-null) value, associates it with the given value. 2016 * Otherwise, replaces the value with the results of the given 2017 * remapping function, or removes if {@code null}. The entire 2018 * method invocation is performed atomically. Some attempted 2019 * update operations on this map by other threads may be blocked 2020 * while computation is in progress, so the computation should be 2021 * short and simple, and must not attempt to update any other 2022 * mappings of this Map. 2023 * 2024 * @param key key with which the specified value is to be associated 2025 * @param value the value to use if absent 2026 * @param remappingFunction the function to recompute a value if present 2027 * @return the new value associated with the specified key, or null if none 2028 * @throws NullPointerException if the specified key or the 2029 * remappingFunction is null 2030 * @throws RuntimeException or Error if the remappingFunction does so, 2031 * in which case the mapping is unchanged 2032 */ 2033 public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) { 2034 if (key == null || value == null || remappingFunction == null) 2035 throw new NullPointerException(); 2036 int h = spread(key.hashCode()); 2037 V val = null; 2038 int delta = 0; 2039 int binCount = 0; 2040 for (Node<K,V>[] tab = table;;) { 2041 Node<K,V> f; int n, i, fh; 2042 if (tab == null || (n = tab.length) == 0) 2043 tab = initTable(); 2044 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { 2045 if (casTabAt(tab, i, null, new Node<K,V>(h, key, value))) { 2046 delta = 1; 2047 val = value; 2048 break; 2049 } 2050 } 2051 else if ((fh = f.hash) == MOVED) 2052 tab = helpTransfer(tab, f); 2053 else { 2054 synchronized (f) { 2055 if (tabAt(tab, i) == f) { 2056 if (fh >= 0) { 2057 binCount = 1; 2058 for (Node<K,V> e = f, pred = null;; ++binCount) { 2059 K ek; 2060 if (e.hash == h && 2061 ((ek = e.key) == key || 2062 (ek != null && key.equals(ek)))) { 2063 val = remappingFunction.apply(e.val, value); 2064 if (val != null) 2065 e.val = val; 2066 else { 2067 delta = -1; 2068 Node<K,V> en = e.next; 2069 if (pred != null) 2070 pred.next = en; 2071 else 2072 setTabAt(tab, i, en); 2073 } 2074 break; 2075 } 2076 pred = e; 2077 if ((e = e.next) == null) { 2078 delta = 1; 2079 val = value; 2080 pred.next = new Node<K,V>(h, key, val); 2081 break; 2082 } 2083 } 2084 } 2085 else if (f instanceof TreeBin) { 2086 binCount = 2; 2087 TreeBin<K,V> t = (TreeBin<K,V>)f; 2088 TreeNode<K,V> r = t.root; 2089 TreeNode<K,V> p = (r == null) ? null : 2090 r.findTreeNode(h, key, null); 2091 val = (p == null) ? value : 2092 remappingFunction.apply(p.val, value); 2093 if (val != null) { 2094 if (p != null) 2095 p.val = val; 2096 else { 2097 delta = 1; 2098 t.putTreeVal(h, key, val); 2099 } 2100 } 2101 else if (p != null) { 2102 delta = -1; 2103 if (t.removeTreeNode(p)) 2104 setTabAt(tab, i, untreeify(t.first)); 2105 } 2106 } 2107 else if (f instanceof ReservationNode) 2108 throw new IllegalStateException("Recursive update"); 2109 } 2110 } 2111 if (binCount != 0) { 2112 if (binCount >= TREEIFY_THRESHOLD) 2113 treeifyBin(tab, i); 2114 break; 2115 } 2116 } 2117 } 2118 if (delta != 0) 2119 addCount((long)delta, binCount); 2120 return val; 2121 } 2122 2123 // Hashtable legacy methods 2124 2125 /** 2126 * Tests if some key maps into the specified value in this table. 2127 * 2128 * <p>Note that this method is identical in functionality to 2129 * {@link #containsValue(Object)}, and exists solely to ensure 2130 * full compatibility with class {@link java.util.Hashtable}, 2131 * which supported this method prior to introduction of the 2132 * Java Collections Framework. 2133 * 2134 * @param value a value to search for 2135 * @return {@code true} if and only if some key maps to the 2136 * {@code value} argument in this table as 2137 * determined by the {@code equals} method; 2138 * {@code false} otherwise 2139 * @throws NullPointerException if the specified value is null 2140 */ 2141 public boolean contains(Object value) { 2142 return containsValue(value); 2143 } 2144 2145 /** 2146 * Returns an enumeration of the keys in this table. 2147 * 2148 * @return an enumeration of the keys in this table 2149 * @see #keySet() 2150 */ 2151 public Enumeration<K> keys() { 2152 Node<K,V>[] t; 2153 int f = (t = table) == null ? 0 : t.length; 2154 return new KeyIterator<K,V>(t, f, 0, f, this); 2155 } 2156 2157 /** 2158 * Returns an enumeration of the values in this table. 2159 * 2160 * @return an enumeration of the values in this table 2161 * @see #values() 2162 */ 2163 public Enumeration<V> elements() { 2164 Node<K,V>[] t; 2165 int f = (t = table) == null ? 0 : t.length; 2166 return new ValueIterator<K,V>(t, f, 0, f, this); 2167 } 2168 2169 // ConcurrentHashMap-only methods 2170 2171 /** 2172 * Returns the number of mappings. This method should be used 2173 * instead of {@link #size} because a ConcurrentHashMap may 2174 * contain more mappings than can be represented as an int. The 2175 * value returned is an estimate; the actual count may differ if 2176 * there are concurrent insertions or removals. 2177 * 2178 * @return the number of mappings 2179 * @since 1.8 2180 */ 2181 public long mappingCount() { 2182 long n = sumCount(); 2183 return (n < 0L) ? 0L : n; // ignore transient negative values 2184 } 2185 2186 /** 2187 * Creates a new {@link Set} backed by a ConcurrentHashMap 2188 * from the given type to {@code Boolean.TRUE}. 2189 * 2190 * @param <K> the element type of the returned set 2191 * @return the new set 2192 * @since 1.8 2193 */ 2194 public static <K> KeySetView<K,Boolean> newKeySet() { 2195 return new KeySetView<K,Boolean> 2196 (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE); 2197 } 2198 2199 /** 2200 * Creates a new {@link Set} backed by a ConcurrentHashMap 2201 * from the given type to {@code Boolean.TRUE}. 2202 * 2203 * @param initialCapacity The implementation performs internal 2204 * sizing to accommodate this many elements. 2205 * @param <K> the element type of the returned set 2206 * @return the new set 2207 * @throws IllegalArgumentException if the initial capacity of 2208 * elements is negative 2209 * @since 1.8 2210 */ 2211 public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) { 2212 return new KeySetView<K,Boolean> 2213 (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE); 2214 } 2215 2216 /** 2217 * Returns a {@link Set} view of the keys in this map, using the 2218 * given common mapped value for any additions (i.e., {@link 2219 * Collection#add} and {@link Collection#addAll(Collection)}). 2220 * This is of course only appropriate if it is acceptable to use 2221 * the same value for all additions from this view. 2222 * 2223 * @param mappedValue the mapped value to use for any additions 2224 * @return the set view 2225 * @throws NullPointerException if the mappedValue is null 2226 */ 2227 public KeySetView<K,V> keySet(V mappedValue) { 2228 if (mappedValue == null) 2229 throw new NullPointerException(); 2230 return new KeySetView<K,V>(this, mappedValue); 2231 } 2232 2233 /* ---------------- Special Nodes -------------- */ 2234 2235 /** 2236 * A node inserted at head of bins during transfer operations. 2237 */ 2238 static final class ForwardingNode<K,V> extends Node<K,V> { 2239 final Node<K,V>[] nextTable; 2240 ForwardingNode(Node<K,V>[] tab) { 2241 super(MOVED, null, null); 2242 this.nextTable = tab; 2243 } 2244 2245 Node<K,V> find(int h, Object k) { 2246 // loop to avoid arbitrarily deep recursion on forwarding nodes 2247 outer: for (Node<K,V>[] tab = nextTable;;) { 2248 Node<K,V> e; int n; 2249 if (k == null || tab == null || (n = tab.length) == 0 || 2250 (e = tabAt(tab, (n - 1) & h)) == null) 2251 return null; 2252 for (;;) { 2253 int eh; K ek; 2254 if ((eh = e.hash) == h && 2255 ((ek = e.key) == k || (ek != null && k.equals(ek)))) 2256 return e; 2257 if (eh < 0) { 2258 if (e instanceof ForwardingNode) { 2259 tab = ((ForwardingNode<K,V>)e).nextTable; 2260 continue outer; 2261 } 2262 else 2263 return e.find(h, k); 2264 } 2265 if ((e = e.next) == null) 2266 return null; 2267 } 2268 } 2269 } 2270 } 2271 2272 /** 2273 * A place-holder node used in computeIfAbsent and compute. 2274 */ 2275 static final class ReservationNode<K,V> extends Node<K,V> { 2276 ReservationNode() { 2277 super(RESERVED, null, null); 2278 } 2279 2280 Node<K,V> find(int h, Object k) { 2281 return null; 2282 } 2283 } 2284 2285 /* ---------------- Table Initialization and Resizing -------------- */ 2286 2287 /** 2288 * Returns the stamp bits for resizing a table of size n. 2289 * Must be negative when shifted left by RESIZE_STAMP_SHIFT. 2290 */ 2291 static final int resizeStamp(int n) { 2292 return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1)); 2293 } 2294 2295 /** 2296 * Initializes table, using the size recorded in sizeCtl. 2297 */ 2298 private final Node<K,V>[] initTable() { 2299 Node<K,V>[] tab; int sc; 2300 while ((tab = table) == null || tab.length == 0) { 2301 if ((sc = sizeCtl) < 0) 2302 Thread.yield(); // lost initialization race; just spin 2303 else if (U.compareAndSetInt(this, SIZECTL, sc, -1)) { 2304 try { 2305 if ((tab = table) == null || tab.length == 0) { 2306 int n = (sc > 0) ? sc : DEFAULT_CAPACITY; 2307 @SuppressWarnings("unchecked") 2308 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n]; 2309 table = tab = nt; 2310 sc = n - (n >>> 2); 2311 } 2312 } finally { 2313 sizeCtl = sc; 2314 } 2315 break; 2316 } 2317 } 2318 return tab; 2319 } 2320 2321 /** 2322 * Adds to count, and if table is too small and not already 2323 * resizing, initiates transfer. If already resizing, helps 2324 * perform transfer if work is available. Rechecks occupancy 2325 * after a transfer to see if another resize is already needed 2326 * because resizings are lagging additions. 2327 * 2328 * @param x the count to add 2329 * @param check if <0, don't check resize, if <= 1 only check if uncontended 2330 */ 2331 private final void addCount(long x, int check) { 2332 CounterCell[] as; long b, s; 2333 if ((as = counterCells) != null || 2334 !U.compareAndSetLong(this, BASECOUNT, b = baseCount, s = b + x)) { 2335 CounterCell a; long v; int m; 2336 boolean uncontended = true; 2337 if (as == null || (m = as.length - 1) < 0 || 2338 (a = as[ThreadLocalRandom.getProbe() & m]) == null || 2339 !(uncontended = 2340 U.compareAndSetLong(a, CELLVALUE, v = a.value, v + x))) { 2341 fullAddCount(x, uncontended); 2342 return; 2343 } 2344 if (check <= 1) 2345 return; 2346 s = sumCount(); 2347 } 2348 if (check >= 0) { 2349 Node<K,V>[] tab, nt; int n, sc; 2350 while (s >= (long)(sc = sizeCtl) && (tab = table) != null && 2351 (n = tab.length) < MAXIMUM_CAPACITY) { 2352 int rs = resizeStamp(n); 2353 if (sc < 0) { 2354 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || 2355 sc == rs + MAX_RESIZERS || (nt = nextTable) == null || 2356 transferIndex <= 0) 2357 break; 2358 if (U.compareAndSetInt(this, SIZECTL, sc, sc + 1)) 2359 transfer(tab, nt); 2360 } 2361 else if (U.compareAndSetInt(this, SIZECTL, sc, 2362 (rs << RESIZE_STAMP_SHIFT) + 2)) 2363 transfer(tab, null); 2364 s = sumCount(); 2365 } 2366 } 2367 } 2368 2369 /** 2370 * Helps transfer if a resize is in progress. 2371 */ 2372 final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) { 2373 Node<K,V>[] nextTab; int sc; 2374 if (tab != null && (f instanceof ForwardingNode) && 2375 (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) { 2376 int rs = resizeStamp(tab.length); 2377 while (nextTab == nextTable && table == tab && 2378 (sc = sizeCtl) < 0) { 2379 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || 2380 sc == rs + MAX_RESIZERS || transferIndex <= 0) 2381 break; 2382 if (U.compareAndSetInt(this, SIZECTL, sc, sc + 1)) { 2383 transfer(tab, nextTab); 2384 break; 2385 } 2386 } 2387 return nextTab; 2388 } 2389 return table; 2390 } 2391 2392 /** 2393 * Tries to presize table to accommodate the given number of elements. 2394 * 2395 * @param size number of elements (doesn't need to be perfectly accurate) 2396 */ 2397 private final void tryPresize(int size) { 2398 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : 2399 tableSizeFor(size + (size >>> 1) + 1); 2400 int sc; 2401 while ((sc = sizeCtl) >= 0) { 2402 Node<K,V>[] tab = table; int n; 2403 if (tab == null || (n = tab.length) == 0) { 2404 n = (sc > c) ? sc : c; 2405 if (U.compareAndSetInt(this, SIZECTL, sc, -1)) { 2406 try { 2407 if (table == tab) { 2408 @SuppressWarnings("unchecked") 2409 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n]; 2410 table = nt; 2411 sc = n - (n >>> 2); 2412 } 2413 } finally { 2414 sizeCtl = sc; 2415 } 2416 } 2417 } 2418 else if (c <= sc || n >= MAXIMUM_CAPACITY) 2419 break; 2420 else if (tab == table) { 2421 int rs = resizeStamp(n); 2422 if (U.compareAndSetInt(this, SIZECTL, sc, 2423 (rs << RESIZE_STAMP_SHIFT) + 2)) 2424 transfer(tab, null); 2425 } 2426 } 2427 } 2428 2429 /** 2430 * Moves and/or copies the nodes in each bin to new table. See 2431 * above for explanation. 2432 */ 2433 private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) { 2434 int n = tab.length, stride; 2435 if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE) 2436 stride = MIN_TRANSFER_STRIDE; // subdivide range 2437 if (nextTab == null) { // initiating 2438 try { 2439 @SuppressWarnings("unchecked") 2440 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1]; 2441 nextTab = nt; 2442 } catch (Throwable ex) { // try to cope with OOME 2443 sizeCtl = Integer.MAX_VALUE; 2444 return; 2445 } 2446 nextTable = nextTab; 2447 transferIndex = n; 2448 } 2449 int nextn = nextTab.length; 2450 ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab); 2451 boolean advance = true; 2452 boolean finishing = false; // to ensure sweep before committing nextTab 2453 for (int i = 0, bound = 0;;) { 2454 Node<K,V> f; int fh; 2455 while (advance) { 2456 int nextIndex, nextBound; 2457 if (--i >= bound || finishing) 2458 advance = false; 2459 else if ((nextIndex = transferIndex) <= 0) { 2460 i = -1; 2461 advance = false; 2462 } 2463 else if (U.compareAndSetInt 2464 (this, TRANSFERINDEX, nextIndex, 2465 nextBound = (nextIndex > stride ? 2466 nextIndex - stride : 0))) { 2467 bound = nextBound; 2468 i = nextIndex - 1; 2469 advance = false; 2470 } 2471 } 2472 if (i < 0 || i >= n || i + n >= nextn) { 2473 int sc; 2474 if (finishing) { 2475 nextTable = null; 2476 table = nextTab; 2477 sizeCtl = (n << 1) - (n >>> 1); 2478 return; 2479 } 2480 if (U.compareAndSetInt(this, SIZECTL, sc = sizeCtl, sc - 1)) { 2481 if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT) 2482 return; 2483 finishing = advance = true; 2484 i = n; // recheck before commit 2485 } 2486 } 2487 else if ((f = tabAt(tab, i)) == null) 2488 advance = casTabAt(tab, i, null, fwd); 2489 else if ((fh = f.hash) == MOVED) 2490 advance = true; // already processed 2491 else { 2492 synchronized (f) { 2493 if (tabAt(tab, i) == f) { 2494 Node<K,V> ln, hn; 2495 if (fh >= 0) { 2496 int runBit = fh & n; 2497 Node<K,V> lastRun = f; 2498 for (Node<K,V> p = f.next; p != null; p = p.next) { 2499 int b = p.hash & n; 2500 if (b != runBit) { 2501 runBit = b; 2502 lastRun = p; 2503 } 2504 } 2505 if (runBit == 0) { 2506 ln = lastRun; 2507 hn = null; 2508 } 2509 else { 2510 hn = lastRun; 2511 ln = null; 2512 } 2513 for (Node<K,V> p = f; p != lastRun; p = p.next) { 2514 int ph = p.hash; K pk = p.key; V pv = p.val; 2515 if ((ph & n) == 0) 2516 ln = new Node<K,V>(ph, pk, pv, ln); 2517 else 2518 hn = new Node<K,V>(ph, pk, pv, hn); 2519 } 2520 setTabAt(nextTab, i, ln); 2521 setTabAt(nextTab, i + n, hn); 2522 setTabAt(tab, i, fwd); 2523 advance = true; 2524 } 2525 else if (f instanceof TreeBin) { 2526 TreeBin<K,V> t = (TreeBin<K,V>)f; 2527 TreeNode<K,V> lo = null, loTail = null; 2528 TreeNode<K,V> hi = null, hiTail = null; 2529 int lc = 0, hc = 0; 2530 for (Node<K,V> e = t.first; e != null; e = e.next) { 2531 int h = e.hash; 2532 TreeNode<K,V> p = new TreeNode<K,V> 2533 (h, e.key, e.val, null, null); 2534 if ((h & n) == 0) { 2535 if ((p.prev = loTail) == null) 2536 lo = p; 2537 else 2538 loTail.next = p; 2539 loTail = p; 2540 ++lc; 2541 } 2542 else { 2543 if ((p.prev = hiTail) == null) 2544 hi = p; 2545 else 2546 hiTail.next = p; 2547 hiTail = p; 2548 ++hc; 2549 } 2550 } 2551 ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) : 2552 (hc != 0) ? new TreeBin<K,V>(lo) : t; 2553 hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) : 2554 (lc != 0) ? new TreeBin<K,V>(hi) : t; 2555 setTabAt(nextTab, i, ln); 2556 setTabAt(nextTab, i + n, hn); 2557 setTabAt(tab, i, fwd); 2558 advance = true; 2559 } 2560 } 2561 } 2562 } 2563 } 2564 } 2565 2566 /* ---------------- Counter support -------------- */ 2567 2568 /** 2569 * A padded cell for distributing counts. Adapted from LongAdder 2570 * and Striped64. See their internal docs for explanation. 2571 */ 2572 @jdk.internal.vm.annotation.Contended static final class CounterCell { 2573 volatile long value; 2574 CounterCell(long x) { value = x; } 2575 } 2576 2577 final long sumCount() { 2578 CounterCell[] as = counterCells; CounterCell a; 2579 long sum = baseCount; 2580 if (as != null) { 2581 for (int i = 0; i < as.length; ++i) { 2582 if ((a = as[i]) != null) 2583 sum += a.value; 2584 } 2585 } 2586 return sum; 2587 } 2588 2589 // See LongAdder version for explanation 2590 private final void fullAddCount(long x, boolean wasUncontended) { 2591 int h; 2592 if ((h = ThreadLocalRandom.getProbe()) == 0) { 2593 ThreadLocalRandom.localInit(); // force initialization 2594 h = ThreadLocalRandom.getProbe(); 2595 wasUncontended = true; 2596 } 2597 boolean collide = false; // True if last slot nonempty 2598 for (;;) { 2599 CounterCell[] as; CounterCell a; int n; long v; 2600 if ((as = counterCells) != null && (n = as.length) > 0) { 2601 if ((a = as[(n - 1) & h]) == null) { 2602 if (cellsBusy == 0) { // Try to attach new Cell 2603 CounterCell r = new CounterCell(x); // Optimistic create 2604 if (cellsBusy == 0 && 2605 U.compareAndSetInt(this, CELLSBUSY, 0, 1)) { 2606 boolean created = false; 2607 try { // Recheck under lock 2608 CounterCell[] rs; int m, j; 2609 if ((rs = counterCells) != null && 2610 (m = rs.length) > 0 && 2611 rs[j = (m - 1) & h] == null) { 2612 rs[j] = r; 2613 created = true; 2614 } 2615 } finally { 2616 cellsBusy = 0; 2617 } 2618 if (created) 2619 break; 2620 continue; // Slot is now non-empty 2621 } 2622 } 2623 collide = false; 2624 } 2625 else if (!wasUncontended) // CAS already known to fail 2626 wasUncontended = true; // Continue after rehash 2627 else if (U.compareAndSetLong(a, CELLVALUE, v = a.value, v + x)) 2628 break; 2629 else if (counterCells != as || n >= NCPU) 2630 collide = false; // At max size or stale 2631 else if (!collide) 2632 collide = true; 2633 else if (cellsBusy == 0 && 2634 U.compareAndSetInt(this, CELLSBUSY, 0, 1)) { 2635 try { 2636 if (counterCells == as) {// Expand table unless stale 2637 CounterCell[] rs = new CounterCell[n << 1]; 2638 for (int i = 0; i < n; ++i) 2639 rs[i] = as[i]; 2640 counterCells = rs; 2641 } 2642 } finally { 2643 cellsBusy = 0; 2644 } 2645 collide = false; 2646 continue; // Retry with expanded table 2647 } 2648 h = ThreadLocalRandom.advanceProbe(h); 2649 } 2650 else if (cellsBusy == 0 && counterCells == as && 2651 U.compareAndSetInt(this, CELLSBUSY, 0, 1)) { 2652 boolean init = false; 2653 try { // Initialize table 2654 if (counterCells == as) { 2655 CounterCell[] rs = new CounterCell[2]; 2656 rs[h & 1] = new CounterCell(x); 2657 counterCells = rs; 2658 init = true; 2659 } 2660 } finally { 2661 cellsBusy = 0; 2662 } 2663 if (init) 2664 break; 2665 } 2666 else if (U.compareAndSetLong(this, BASECOUNT, v = baseCount, v + x)) 2667 break; // Fall back on using base 2668 } 2669 } 2670 2671 /* ---------------- Conversion from/to TreeBins -------------- */ 2672 2673 /** 2674 * Replaces all linked nodes in bin at given index unless table is 2675 * too small, in which case resizes instead. 2676 */ 2677 private final void treeifyBin(Node<K,V>[] tab, int index) { 2678 Node<K,V> b; int n; 2679 if (tab != null) { 2680 if ((n = tab.length) < MIN_TREEIFY_CAPACITY) 2681 tryPresize(n << 1); 2682 else if ((b = tabAt(tab, index)) != null && b.hash >= 0) { 2683 synchronized (b) { 2684 if (tabAt(tab, index) == b) { 2685 TreeNode<K,V> hd = null, tl = null; 2686 for (Node<K,V> e = b; e != null; e = e.next) { 2687 TreeNode<K,V> p = 2688 new TreeNode<K,V>(e.hash, e.key, e.val, 2689 null, null); 2690 if ((p.prev = tl) == null) 2691 hd = p; 2692 else 2693 tl.next = p; 2694 tl = p; 2695 } 2696 setTabAt(tab, index, new TreeBin<K,V>(hd)); 2697 } 2698 } 2699 } 2700 } 2701 } 2702 2703 /** 2704 * Returns a list of non-TreeNodes replacing those in given list. 2705 */ 2706 static <K,V> Node<K,V> untreeify(Node<K,V> b) { 2707 Node<K,V> hd = null, tl = null; 2708 for (Node<K,V> q = b; q != null; q = q.next) { 2709 Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val); 2710 if (tl == null) 2711 hd = p; 2712 else 2713 tl.next = p; 2714 tl = p; 2715 } 2716 return hd; 2717 } 2718 2719 /* ---------------- TreeNodes -------------- */ 2720 2721 /** 2722 * Nodes for use in TreeBins. 2723 */ 2724 static final class TreeNode<K,V> extends Node<K,V> { 2725 TreeNode<K,V> parent; // red-black tree links 2726 TreeNode<K,V> left; 2727 TreeNode<K,V> right; 2728 TreeNode<K,V> prev; // needed to unlink next upon deletion 2729 boolean red; 2730 2731 TreeNode(int hash, K key, V val, Node<K,V> next, 2732 TreeNode<K,V> parent) { 2733 super(hash, key, val, next); 2734 this.parent = parent; 2735 } 2736 2737 Node<K,V> find(int h, Object k) { 2738 return findTreeNode(h, k, null); 2739 } 2740 2741 /** 2742 * Returns the TreeNode (or null if not found) for the given key 2743 * starting at given root. 2744 */ 2745 final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) { 2746 if (k != null) { 2747 TreeNode<K,V> p = this; 2748 do { 2749 int ph, dir; K pk; TreeNode<K,V> q; 2750 TreeNode<K,V> pl = p.left, pr = p.right; 2751 if ((ph = p.hash) > h) 2752 p = pl; 2753 else if (ph < h) 2754 p = pr; 2755 else if ((pk = p.key) == k || (pk != null && k.equals(pk))) 2756 return p; 2757 else if (pl == null) 2758 p = pr; 2759 else if (pr == null) 2760 p = pl; 2761 else if ((kc != null || 2762 (kc = comparableClassFor(k)) != null) && 2763 (dir = compareComparables(kc, k, pk)) != 0) 2764 p = (dir < 0) ? pl : pr; 2765 else if ((q = pr.findTreeNode(h, k, kc)) != null) 2766 return q; 2767 else 2768 p = pl; 2769 } while (p != null); 2770 } 2771 return null; 2772 } 2773 } 2774 2775 /* ---------------- TreeBins -------------- */ 2776 2777 /** 2778 * TreeNodes used at the heads of bins. TreeBins do not hold user 2779 * keys or values, but instead point to list of TreeNodes and 2780 * their root. They also maintain a parasitic read-write lock 2781 * forcing writers (who hold bin lock) to wait for readers (who do 2782 * not) to complete before tree restructuring operations. 2783 */ 2784 static final class TreeBin<K,V> extends Node<K,V> { 2785 TreeNode<K,V> root; 2786 volatile TreeNode<K,V> first; 2787 volatile Thread waiter; 2788 volatile int lockState; 2789 // values for lockState 2790 static final int WRITER = 1; // set while holding write lock 2791 static final int WAITER = 2; // set when waiting for write lock 2792 static final int READER = 4; // increment value for setting read lock 2793 2794 /** 2795 * Tie-breaking utility for ordering insertions when equal 2796 * hashCodes and non-comparable. We don't require a total 2797 * order, just a consistent insertion rule to maintain 2798 * equivalence across rebalancings. Tie-breaking further than 2799 * necessary simplifies testing a bit. 2800 */ 2801 static int tieBreakOrder(Object a, Object b) { 2802 int d; 2803 if (a == null || b == null || 2804 (d = a.getClass().getName(). 2805 compareTo(b.getClass().getName())) == 0) 2806 d = (System.identityHashCode(a) <= System.identityHashCode(b) ? 2807 -1 : 1); 2808 return d; 2809 } 2810 2811 /** 2812 * Creates bin with initial set of nodes headed by b. 2813 */ 2814 TreeBin(TreeNode<K,V> b) { 2815 super(TREEBIN, null, null); 2816 this.first = b; 2817 TreeNode<K,V> r = null; 2818 for (TreeNode<K,V> x = b, next; x != null; x = next) { 2819 next = (TreeNode<K,V>)x.next; 2820 x.left = x.right = null; 2821 if (r == null) { 2822 x.parent = null; 2823 x.red = false; 2824 r = x; 2825 } 2826 else { 2827 K k = x.key; 2828 int h = x.hash; 2829 Class<?> kc = null; 2830 for (TreeNode<K,V> p = r;;) { 2831 int dir, ph; 2832 K pk = p.key; 2833 if ((ph = p.hash) > h) 2834 dir = -1; 2835 else if (ph < h) 2836 dir = 1; 2837 else if ((kc == null && 2838 (kc = comparableClassFor(k)) == null) || 2839 (dir = compareComparables(kc, k, pk)) == 0) 2840 dir = tieBreakOrder(k, pk); 2841 TreeNode<K,V> xp = p; 2842 if ((p = (dir <= 0) ? p.left : p.right) == null) { 2843 x.parent = xp; 2844 if (dir <= 0) 2845 xp.left = x; 2846 else 2847 xp.right = x; 2848 r = balanceInsertion(r, x); 2849 break; 2850 } 2851 } 2852 } 2853 } 2854 this.root = r; 2855 assert checkInvariants(root); 2856 } 2857 2858 /** 2859 * Acquires write lock for tree restructuring. 2860 */ 2861 private final void lockRoot() { 2862 if (!U.compareAndSetInt(this, LOCKSTATE, 0, WRITER)) 2863 contendedLock(); // offload to separate method 2864 } 2865 2866 /** 2867 * Releases write lock for tree restructuring. 2868 */ 2869 private final void unlockRoot() { 2870 lockState = 0; 2871 } 2872 2873 /** 2874 * Possibly blocks awaiting root lock. 2875 */ 2876 private final void contendedLock() { 2877 boolean waiting = false; 2878 for (int s;;) { 2879 if (((s = lockState) & ~WAITER) == 0) { 2880 if (U.compareAndSetInt(this, LOCKSTATE, s, WRITER)) { 2881 if (waiting) 2882 waiter = null; 2883 return; 2884 } 2885 } 2886 else if ((s & WAITER) == 0) { 2887 if (U.compareAndSetInt(this, LOCKSTATE, s, s | WAITER)) { 2888 waiting = true; 2889 waiter = Thread.currentThread(); 2890 } 2891 } 2892 else if (waiting) 2893 LockSupport.park(this); 2894 } 2895 } 2896 2897 /** 2898 * Returns matching node or null if none. Tries to search 2899 * using tree comparisons from root, but continues linear 2900 * search when lock not available. 2901 */ 2902 final Node<K,V> find(int h, Object k) { 2903 if (k != null) { 2904 for (Node<K,V> e = first; e != null; ) { 2905 int s; K ek; 2906 if (((s = lockState) & (WAITER|WRITER)) != 0) { 2907 if (e.hash == h && 2908 ((ek = e.key) == k || (ek != null && k.equals(ek)))) 2909 return e; 2910 e = e.next; 2911 } 2912 else if (U.compareAndSetInt(this, LOCKSTATE, s, 2913 s + READER)) { 2914 TreeNode<K,V> r, p; 2915 try { 2916 p = ((r = root) == null ? null : 2917 r.findTreeNode(h, k, null)); 2918 } finally { 2919 Thread w; 2920 if (U.getAndAddInt(this, LOCKSTATE, -READER) == 2921 (READER|WAITER) && (w = waiter) != null) 2922 LockSupport.unpark(w); 2923 } 2924 return p; 2925 } 2926 } 2927 } 2928 return null; 2929 } 2930 2931 /** 2932 * Finds or adds a node. 2933 * @return null if added 2934 */ 2935 final TreeNode<K,V> putTreeVal(int h, K k, V v) { 2936 Class<?> kc = null; 2937 boolean searched = false; 2938 for (TreeNode<K,V> p = root;;) { 2939 int dir, ph; K pk; 2940 if (p == null) { 2941 first = root = new TreeNode<K,V>(h, k, v, null, null); 2942 break; 2943 } 2944 else if ((ph = p.hash) > h) 2945 dir = -1; 2946 else if (ph < h) 2947 dir = 1; 2948 else if ((pk = p.key) == k || (pk != null && k.equals(pk))) 2949 return p; 2950 else if ((kc == null && 2951 (kc = comparableClassFor(k)) == null) || 2952 (dir = compareComparables(kc, k, pk)) == 0) { 2953 if (!searched) { 2954 TreeNode<K,V> q, ch; 2955 searched = true; 2956 if (((ch = p.left) != null && 2957 (q = ch.findTreeNode(h, k, kc)) != null) || 2958 ((ch = p.right) != null && 2959 (q = ch.findTreeNode(h, k, kc)) != null)) 2960 return q; 2961 } 2962 dir = tieBreakOrder(k, pk); 2963 } 2964 2965 TreeNode<K,V> xp = p; 2966 if ((p = (dir <= 0) ? p.left : p.right) == null) { 2967 TreeNode<K,V> x, f = first; 2968 first = x = new TreeNode<K,V>(h, k, v, f, xp); 2969 if (f != null) 2970 f.prev = x; 2971 if (dir <= 0) 2972 xp.left = x; 2973 else 2974 xp.right = x; 2975 if (!xp.red) 2976 x.red = true; 2977 else { 2978 lockRoot(); 2979 try { 2980 root = balanceInsertion(root, x); 2981 } finally { 2982 unlockRoot(); 2983 } 2984 } 2985 break; 2986 } 2987 } 2988 assert checkInvariants(root); 2989 return null; 2990 } 2991 2992 /** 2993 * Removes the given node, that must be present before this 2994 * call. This is messier than typical red-black deletion code 2995 * because we cannot swap the contents of an interior node 2996 * with a leaf successor that is pinned by "next" pointers 2997 * that are accessible independently of lock. So instead we 2998 * swap the tree linkages. 2999 * 3000 * @return true if now too small, so should be untreeified 3001 */ 3002 final boolean removeTreeNode(TreeNode<K,V> p) { 3003 TreeNode<K,V> next = (TreeNode<K,V>)p.next; 3004 TreeNode<K,V> pred = p.prev; // unlink traversal pointers 3005 TreeNode<K,V> r, rl; 3006 if (pred == null) 3007 first = next; 3008 else 3009 pred.next = next; 3010 if (next != null) 3011 next.prev = pred; 3012 if (first == null) { 3013 root = null; 3014 return true; 3015 } 3016 if ((r = root) == null || r.right == null || // too small 3017 (rl = r.left) == null || rl.left == null) 3018 return true; 3019 lockRoot(); 3020 try { 3021 TreeNode<K,V> replacement; 3022 TreeNode<K,V> pl = p.left; 3023 TreeNode<K,V> pr = p.right; 3024 if (pl != null && pr != null) { 3025 TreeNode<K,V> s = pr, sl; 3026 while ((sl = s.left) != null) // find successor 3027 s = sl; 3028 boolean c = s.red; s.red = p.red; p.red = c; // swap colors 3029 TreeNode<K,V> sr = s.right; 3030 TreeNode<K,V> pp = p.parent; 3031 if (s == pr) { // p was s's direct parent 3032 p.parent = s; 3033 s.right = p; 3034 } 3035 else { 3036 TreeNode<K,V> sp = s.parent; 3037 if ((p.parent = sp) != null) { 3038 if (s == sp.left) 3039 sp.left = p; 3040 else 3041 sp.right = p; 3042 } 3043 if ((s.right = pr) != null) 3044 pr.parent = s; 3045 } 3046 p.left = null; 3047 if ((p.right = sr) != null) 3048 sr.parent = p; 3049 if ((s.left = pl) != null) 3050 pl.parent = s; 3051 if ((s.parent = pp) == null) 3052 r = s; 3053 else if (p == pp.left) 3054 pp.left = s; 3055 else 3056 pp.right = s; 3057 if (sr != null) 3058 replacement = sr; 3059 else 3060 replacement = p; 3061 } 3062 else if (pl != null) 3063 replacement = pl; 3064 else if (pr != null) 3065 replacement = pr; 3066 else 3067 replacement = p; 3068 if (replacement != p) { 3069 TreeNode<K,V> pp = replacement.parent = p.parent; 3070 if (pp == null) 3071 r = replacement; 3072 else if (p == pp.left) 3073 pp.left = replacement; 3074 else 3075 pp.right = replacement; 3076 p.left = p.right = p.parent = null; 3077 } 3078 3079 root = (p.red) ? r : balanceDeletion(r, replacement); 3080 3081 if (p == replacement) { // detach pointers 3082 TreeNode<K,V> pp; 3083 if ((pp = p.parent) != null) { 3084 if (p == pp.left) 3085 pp.left = null; 3086 else if (p == pp.right) 3087 pp.right = null; 3088 p.parent = null; 3089 } 3090 } 3091 } finally { 3092 unlockRoot(); 3093 } 3094 assert checkInvariants(root); 3095 return false; 3096 } 3097 3098 /* ------------------------------------------------------------ */ 3099 // Red-black tree methods, all adapted from CLR 3100 3101 static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root, 3102 TreeNode<K,V> p) { 3103 TreeNode<K,V> r, pp, rl; 3104 if (p != null && (r = p.right) != null) { 3105 if ((rl = p.right = r.left) != null) 3106 rl.parent = p; 3107 if ((pp = r.parent = p.parent) == null) 3108 (root = r).red = false; 3109 else if (pp.left == p) 3110 pp.left = r; 3111 else 3112 pp.right = r; 3113 r.left = p; 3114 p.parent = r; 3115 } 3116 return root; 3117 } 3118 3119 static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root, 3120 TreeNode<K,V> p) { 3121 TreeNode<K,V> l, pp, lr; 3122 if (p != null && (l = p.left) != null) { 3123 if ((lr = p.left = l.right) != null) 3124 lr.parent = p; 3125 if ((pp = l.parent = p.parent) == null) 3126 (root = l).red = false; 3127 else if (pp.right == p) 3128 pp.right = l; 3129 else 3130 pp.left = l; 3131 l.right = p; 3132 p.parent = l; 3133 } 3134 return root; 3135 } 3136 3137 static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root, 3138 TreeNode<K,V> x) { 3139 x.red = true; 3140 for (TreeNode<K,V> xp, xpp, xppl, xppr;;) { 3141 if ((xp = x.parent) == null) { 3142 x.red = false; 3143 return x; 3144 } 3145 else if (!xp.red || (xpp = xp.parent) == null) 3146 return root; 3147 if (xp == (xppl = xpp.left)) { 3148 if ((xppr = xpp.right) != null && xppr.red) { 3149 xppr.red = false; 3150 xp.red = false; 3151 xpp.red = true; 3152 x = xpp; 3153 } 3154 else { 3155 if (x == xp.right) { 3156 root = rotateLeft(root, x = xp); 3157 xpp = (xp = x.parent) == null ? null : xp.parent; 3158 } 3159 if (xp != null) { 3160 xp.red = false; 3161 if (xpp != null) { 3162 xpp.red = true; 3163 root = rotateRight(root, xpp); 3164 } 3165 } 3166 } 3167 } 3168 else { 3169 if (xppl != null && xppl.red) { 3170 xppl.red = false; 3171 xp.red = false; 3172 xpp.red = true; 3173 x = xpp; 3174 } 3175 else { 3176 if (x == xp.left) { 3177 root = rotateRight(root, x = xp); 3178 xpp = (xp = x.parent) == null ? null : xp.parent; 3179 } 3180 if (xp != null) { 3181 xp.red = false; 3182 if (xpp != null) { 3183 xpp.red = true; 3184 root = rotateLeft(root, xpp); 3185 } 3186 } 3187 } 3188 } 3189 } 3190 } 3191 3192 static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root, 3193 TreeNode<K,V> x) { 3194 for (TreeNode<K,V> xp, xpl, xpr;;) { 3195 if (x == null || x == root) 3196 return root; 3197 else if ((xp = x.parent) == null) { 3198 x.red = false; 3199 return x; 3200 } 3201 else if (x.red) { 3202 x.red = false; 3203 return root; 3204 } 3205 else if ((xpl = xp.left) == x) { 3206 if ((xpr = xp.right) != null && xpr.red) { 3207 xpr.red = false; 3208 xp.red = true; 3209 root = rotateLeft(root, xp); 3210 xpr = (xp = x.parent) == null ? null : xp.right; 3211 } 3212 if (xpr == null) 3213 x = xp; 3214 else { 3215 TreeNode<K,V> sl = xpr.left, sr = xpr.right; 3216 if ((sr == null || !sr.red) && 3217 (sl == null || !sl.red)) { 3218 xpr.red = true; 3219 x = xp; 3220 } 3221 else { 3222 if (sr == null || !sr.red) { 3223 if (sl != null) 3224 sl.red = false; 3225 xpr.red = true; 3226 root = rotateRight(root, xpr); 3227 xpr = (xp = x.parent) == null ? 3228 null : xp.right; 3229 } 3230 if (xpr != null) { 3231 xpr.red = (xp == null) ? false : xp.red; 3232 if ((sr = xpr.right) != null) 3233 sr.red = false; 3234 } 3235 if (xp != null) { 3236 xp.red = false; 3237 root = rotateLeft(root, xp); 3238 } 3239 x = root; 3240 } 3241 } 3242 } 3243 else { // symmetric 3244 if (xpl != null && xpl.red) { 3245 xpl.red = false; 3246 xp.red = true; 3247 root = rotateRight(root, xp); 3248 xpl = (xp = x.parent) == null ? null : xp.left; 3249 } 3250 if (xpl == null) 3251 x = xp; 3252 else { 3253 TreeNode<K,V> sl = xpl.left, sr = xpl.right; 3254 if ((sl == null || !sl.red) && 3255 (sr == null || !sr.red)) { 3256 xpl.red = true; 3257 x = xp; 3258 } 3259 else { 3260 if (sl == null || !sl.red) { 3261 if (sr != null) 3262 sr.red = false; 3263 xpl.red = true; 3264 root = rotateLeft(root, xpl); 3265 xpl = (xp = x.parent) == null ? 3266 null : xp.left; 3267 } 3268 if (xpl != null) { 3269 xpl.red = (xp == null) ? false : xp.red; 3270 if ((sl = xpl.left) != null) 3271 sl.red = false; 3272 } 3273 if (xp != null) { 3274 xp.red = false; 3275 root = rotateRight(root, xp); 3276 } 3277 x = root; 3278 } 3279 } 3280 } 3281 } 3282 } 3283 3284 /** 3285 * Checks invariants recursively for the tree of Nodes rooted at t. 3286 */ 3287 static <K,V> boolean checkInvariants(TreeNode<K,V> t) { 3288 TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right, 3289 tb = t.prev, tn = (TreeNode<K,V>)t.next; 3290 if (tb != null && tb.next != t) 3291 return false; 3292 if (tn != null && tn.prev != t) 3293 return false; 3294 if (tp != null && t != tp.left && t != tp.right) 3295 return false; 3296 if (tl != null && (tl.parent != t || tl.hash > t.hash)) 3297 return false; 3298 if (tr != null && (tr.parent != t || tr.hash < t.hash)) 3299 return false; 3300 if (t.red && tl != null && tl.red && tr != null && tr.red) 3301 return false; 3302 if (tl != null && !checkInvariants(tl)) 3303 return false; 3304 if (tr != null && !checkInvariants(tr)) 3305 return false; 3306 return true; 3307 } 3308 3309 private static final Unsafe U = Unsafe.getUnsafe(); 3310 private static final long LOCKSTATE; 3311 static { 3312 LOCKSTATE = U.objectFieldOffset 3313 (TreeBin.class, "lockState"); 3314 } 3315 } 3316 3317 /* ----------------Table Traversal -------------- */ 3318 3319 /** 3320 * Records the table, its length, and current traversal index for a 3321 * traverser that must process a region of a forwarded table before 3322 * proceeding with current table. 3323 */ 3324 static final class TableStack<K,V> { 3325 int length; 3326 int index; 3327 Node<K,V>[] tab; 3328 TableStack<K,V> next; 3329 } 3330 3331 /** 3332 * Encapsulates traversal for methods such as containsValue; also 3333 * serves as a base class for other iterators and spliterators. 3334 * 3335 * Method advance visits once each still-valid node that was 3336 * reachable upon iterator construction. It might miss some that 3337 * were added to a bin after the bin was visited, which is OK wrt 3338 * consistency guarantees. Maintaining this property in the face 3339 * of possible ongoing resizes requires a fair amount of 3340 * bookkeeping state that is difficult to optimize away amidst 3341 * volatile accesses. Even so, traversal maintains reasonable 3342 * throughput. 3343 * 3344 * Normally, iteration proceeds bin-by-bin traversing lists. 3345 * However, if the table has been resized, then all future steps 3346 * must traverse both the bin at the current index as well as at 3347 * (index + baseSize); and so on for further resizings. To 3348 * paranoically cope with potential sharing by users of iterators 3349 * across threads, iteration terminates if a bounds checks fails 3350 * for a table read. 3351 */ 3352 static class Traverser<K,V> { 3353 Node<K,V>[] tab; // current table; updated if resized 3354 Node<K,V> next; // the next entry to use 3355 TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes 3356 int index; // index of bin to use next 3357 int baseIndex; // current index of initial table 3358 int baseLimit; // index bound for initial table 3359 final int baseSize; // initial table size 3360 3361 Traverser(Node<K,V>[] tab, int size, int index, int limit) { 3362 this.tab = tab; 3363 this.baseSize = size; 3364 this.baseIndex = this.index = index; 3365 this.baseLimit = limit; 3366 this.next = null; 3367 } 3368 3369 /** 3370 * Advances if possible, returning next valid node, or null if none. 3371 */ 3372 final Node<K,V> advance() { 3373 Node<K,V> e; 3374 if ((e = next) != null) 3375 e = e.next; 3376 for (;;) { 3377 Node<K,V>[] t; int i, n; // must use locals in checks 3378 if (e != null) 3379 return next = e; 3380 if (baseIndex >= baseLimit || (t = tab) == null || 3381 (n = t.length) <= (i = index) || i < 0) 3382 return next = null; 3383 if ((e = tabAt(t, i)) != null && e.hash < 0) { 3384 if (e instanceof ForwardingNode) { 3385 tab = ((ForwardingNode<K,V>)e).nextTable; 3386 e = null; 3387 pushState(t, i, n); 3388 continue; 3389 } 3390 else if (e instanceof TreeBin) 3391 e = ((TreeBin<K,V>)e).first; 3392 else 3393 e = null; 3394 } 3395 if (stack != null) 3396 recoverState(n); 3397 else if ((index = i + baseSize) >= n) 3398 index = ++baseIndex; // visit upper slots if present 3399 } 3400 } 3401 3402 /** 3403 * Saves traversal state upon encountering a forwarding node. 3404 */ 3405 private void pushState(Node<K,V>[] t, int i, int n) { 3406 TableStack<K,V> s = spare; // reuse if possible 3407 if (s != null) 3408 spare = s.next; 3409 else 3410 s = new TableStack<K,V>(); 3411 s.tab = t; 3412 s.length = n; 3413 s.index = i; 3414 s.next = stack; 3415 stack = s; 3416 } 3417 3418 /** 3419 * Possibly pops traversal state. 3420 * 3421 * @param n length of current table 3422 */ 3423 private void recoverState(int n) { 3424 TableStack<K,V> s; int len; 3425 while ((s = stack) != null && (index += (len = s.length)) >= n) { 3426 n = len; 3427 index = s.index; 3428 tab = s.tab; 3429 s.tab = null; 3430 TableStack<K,V> next = s.next; 3431 s.next = spare; // save for reuse 3432 stack = next; 3433 spare = s; 3434 } 3435 if (s == null && (index += baseSize) >= n) 3436 index = ++baseIndex; 3437 } 3438 } 3439 3440 /** 3441 * Base of key, value, and entry Iterators. Adds fields to 3442 * Traverser to support iterator.remove. 3443 */ 3444 static class BaseIterator<K,V> extends Traverser<K,V> { 3445 final ConcurrentHashMap<K,V> map; 3446 Node<K,V> lastReturned; 3447 BaseIterator(Node<K,V>[] tab, int size, int index, int limit, 3448 ConcurrentHashMap<K,V> map) { 3449 super(tab, size, index, limit); 3450 this.map = map; 3451 advance(); 3452 } 3453 3454 public final boolean hasNext() { return next != null; } 3455 public final boolean hasMoreElements() { return next != null; } 3456 3457 public final void remove() { 3458 Node<K,V> p; 3459 if ((p = lastReturned) == null) 3460 throw new IllegalStateException(); 3461 lastReturned = null; 3462 map.replaceNode(p.key, null, null); 3463 } 3464 } 3465 3466 static final class KeyIterator<K,V> extends BaseIterator<K,V> 3467 implements Iterator<K>, Enumeration<K> { 3468 KeyIterator(Node<K,V>[] tab, int size, int index, int limit, 3469 ConcurrentHashMap<K,V> map) { 3470 super(tab, size, index, limit, map); 3471 } 3472 3473 public final K next() { 3474 Node<K,V> p; 3475 if ((p = next) == null) 3476 throw new NoSuchElementException(); 3477 K k = p.key; 3478 lastReturned = p; 3479 advance(); 3480 return k; 3481 } 3482 3483 public final K nextElement() { return next(); } 3484 } 3485 3486 static final class ValueIterator<K,V> extends BaseIterator<K,V> 3487 implements Iterator<V>, Enumeration<V> { 3488 ValueIterator(Node<K,V>[] tab, int size, int index, int limit, 3489 ConcurrentHashMap<K,V> map) { 3490 super(tab, size, index, limit, map); 3491 } 3492 3493 public final V next() { 3494 Node<K,V> p; 3495 if ((p = next) == null) 3496 throw new NoSuchElementException(); 3497 V v = p.val; 3498 lastReturned = p; 3499 advance(); 3500 return v; 3501 } 3502 3503 public final V nextElement() { return next(); } 3504 } 3505 3506 static final class EntryIterator<K,V> extends BaseIterator<K,V> 3507 implements Iterator<Map.Entry<K,V>> { 3508 EntryIterator(Node<K,V>[] tab, int size, int index, int limit, 3509 ConcurrentHashMap<K,V> map) { 3510 super(tab, size, index, limit, map); 3511 } 3512 3513 public final Map.Entry<K,V> next() { 3514 Node<K,V> p; 3515 if ((p = next) == null) 3516 throw new NoSuchElementException(); 3517 K k = p.key; 3518 V v = p.val; 3519 lastReturned = p; 3520 advance(); 3521 return new MapEntry<K,V>(k, v, map); 3522 } 3523 } 3524 3525 /** 3526 * Exported Entry for EntryIterator. 3527 */ 3528 static final class MapEntry<K,V> implements Map.Entry<K,V> { 3529 final K key; // non-null 3530 V val; // non-null 3531 final ConcurrentHashMap<K,V> map; 3532 MapEntry(K key, V val, ConcurrentHashMap<K,V> map) { 3533 this.key = key; 3534 this.val = val; 3535 this.map = map; 3536 } 3537 public K getKey() { return key; } 3538 public V getValue() { return val; } 3539 public int hashCode() { return key.hashCode() ^ val.hashCode(); } 3540 public String toString() { 3541 return Helpers.mapEntryToString(key, val); 3542 } 3543 3544 public boolean equals(Object o) { 3545 Object k, v; Map.Entry<?,?> e; 3546 return ((o instanceof Map.Entry) && 3547 (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 3548 (v = e.getValue()) != null && 3549 (k == key || k.equals(key)) && 3550 (v == val || v.equals(val))); 3551 } 3552 3553 /** 3554 * Sets our entry's value and writes through to the map. The 3555 * value to return is somewhat arbitrary here. Since we do not 3556 * necessarily track asynchronous changes, the most recent 3557 * "previous" value could be different from what we return (or 3558 * could even have been removed, in which case the put will 3559 * re-establish). We do not and cannot guarantee more. 3560 */ 3561 public V setValue(V value) { 3562 if (value == null) throw new NullPointerException(); 3563 V v = val; 3564 val = value; 3565 map.put(key, value); 3566 return v; 3567 } 3568 } 3569 3570 static final class KeySpliterator<K,V> extends Traverser<K,V> 3571 implements Spliterator<K> { 3572 long est; // size estimate 3573 KeySpliterator(Node<K,V>[] tab, int size, int index, int limit, 3574 long est) { 3575 super(tab, size, index, limit); 3576 this.est = est; 3577 } 3578 3579 public KeySpliterator<K,V> trySplit() { 3580 int i, f, h; 3581 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : 3582 new KeySpliterator<K,V>(tab, baseSize, baseLimit = h, 3583 f, est >>>= 1); 3584 } 3585 3586 public void forEachRemaining(Consumer<? super K> action) { 3587 if (action == null) throw new NullPointerException(); 3588 for (Node<K,V> p; (p = advance()) != null;) 3589 action.accept(p.key); 3590 } 3591 3592 public boolean tryAdvance(Consumer<? super K> action) { 3593 if (action == null) throw new NullPointerException(); 3594 Node<K,V> p; 3595 if ((p = advance()) == null) 3596 return false; 3597 action.accept(p.key); 3598 return true; 3599 } 3600 3601 public long estimateSize() { return est; } 3602 3603 public int characteristics() { 3604 return Spliterator.DISTINCT | Spliterator.CONCURRENT | 3605 Spliterator.NONNULL; 3606 } 3607 } 3608 3609 static final class ValueSpliterator<K,V> extends Traverser<K,V> 3610 implements Spliterator<V> { 3611 long est; // size estimate 3612 ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit, 3613 long est) { 3614 super(tab, size, index, limit); 3615 this.est = est; 3616 } 3617 3618 public ValueSpliterator<K,V> trySplit() { 3619 int i, f, h; 3620 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : 3621 new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h, 3622 f, est >>>= 1); 3623 } 3624 3625 public void forEachRemaining(Consumer<? super V> action) { 3626 if (action == null) throw new NullPointerException(); 3627 for (Node<K,V> p; (p = advance()) != null;) 3628 action.accept(p.val); 3629 } 3630 3631 public boolean tryAdvance(Consumer<? super V> action) { 3632 if (action == null) throw new NullPointerException(); 3633 Node<K,V> p; 3634 if ((p = advance()) == null) 3635 return false; 3636 action.accept(p.val); 3637 return true; 3638 } 3639 3640 public long estimateSize() { return est; } 3641 3642 public int characteristics() { 3643 return Spliterator.CONCURRENT | Spliterator.NONNULL; 3644 } 3645 } 3646 3647 static final class EntrySpliterator<K,V> extends Traverser<K,V> 3648 implements Spliterator<Map.Entry<K,V>> { 3649 final ConcurrentHashMap<K,V> map; // To export MapEntry 3650 long est; // size estimate 3651 EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit, 3652 long est, ConcurrentHashMap<K,V> map) { 3653 super(tab, size, index, limit); 3654 this.map = map; 3655 this.est = est; 3656 } 3657 3658 public EntrySpliterator<K,V> trySplit() { 3659 int i, f, h; 3660 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : 3661 new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h, 3662 f, est >>>= 1, map); 3663 } 3664 3665 public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) { 3666 if (action == null) throw new NullPointerException(); 3667 for (Node<K,V> p; (p = advance()) != null; ) 3668 action.accept(new MapEntry<K,V>(p.key, p.val, map)); 3669 } 3670 3671 public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) { 3672 if (action == null) throw new NullPointerException(); 3673 Node<K,V> p; 3674 if ((p = advance()) == null) 3675 return false; 3676 action.accept(new MapEntry<K,V>(p.key, p.val, map)); 3677 return true; 3678 } 3679 3680 public long estimateSize() { return est; } 3681 3682 public int characteristics() { 3683 return Spliterator.DISTINCT | Spliterator.CONCURRENT | 3684 Spliterator.NONNULL; 3685 } 3686 } 3687 3688 // Parallel bulk operations 3689 3690 /** 3691 * Computes initial batch value for bulk tasks. The returned value 3692 * is approximately exp2 of the number of times (minus one) to 3693 * split task by two before executing leaf action. This value is 3694 * faster to compute and more convenient to use as a guide to 3695 * splitting than is the depth, since it is used while dividing by 3696 * two anyway. 3697 */ 3698 final int batchFor(long b) { 3699 long n; 3700 if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b) 3701 return 0; 3702 int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4 3703 return (b <= 0L || (n /= b) >= sp) ? sp : (int)n; 3704 } 3705 3706 /** 3707 * Performs the given action for each (key, value). 3708 * 3709 * @param parallelismThreshold the (estimated) number of elements 3710 * needed for this operation to be executed in parallel 3711 * @param action the action 3712 * @since 1.8 3713 */ 3714 public void forEach(long parallelismThreshold, 3715 BiConsumer<? super K,? super V> action) { 3716 if (action == null) throw new NullPointerException(); 3717 new ForEachMappingTask<K,V> 3718 (null, batchFor(parallelismThreshold), 0, 0, table, 3719 action).invoke(); 3720 } 3721 3722 /** 3723 * Performs the given action for each non-null transformation 3724 * of each (key, value). 3725 * 3726 * @param parallelismThreshold the (estimated) number of elements 3727 * needed for this operation to be executed in parallel 3728 * @param transformer a function returning the transformation 3729 * for an element, or null if there is no transformation (in 3730 * which case the action is not applied) 3731 * @param action the action 3732 * @param <U> the return type of the transformer 3733 * @since 1.8 3734 */ 3735 public <U> void forEach(long parallelismThreshold, 3736 BiFunction<? super K, ? super V, ? extends U> transformer, 3737 Consumer<? super U> action) { 3738 if (transformer == null || action == null) 3739 throw new NullPointerException(); 3740 new ForEachTransformedMappingTask<K,V,U> 3741 (null, batchFor(parallelismThreshold), 0, 0, table, 3742 transformer, action).invoke(); 3743 } 3744 3745 /** 3746 * Returns a non-null result from applying the given search 3747 * function on each (key, value), or null if none. Upon 3748 * success, further element processing is suppressed and the 3749 * results of any other parallel invocations of the search 3750 * function are ignored. 3751 * 3752 * @param parallelismThreshold the (estimated) number of elements 3753 * needed for this operation to be executed in parallel 3754 * @param searchFunction a function returning a non-null 3755 * result on success, else null 3756 * @param <U> the return type of the search function 3757 * @return a non-null result from applying the given search 3758 * function on each (key, value), or null if none 3759 * @since 1.8 3760 */ 3761 public <U> U search(long parallelismThreshold, 3762 BiFunction<? super K, ? super V, ? extends U> searchFunction) { 3763 if (searchFunction == null) throw new NullPointerException(); 3764 return new SearchMappingsTask<K,V,U> 3765 (null, batchFor(parallelismThreshold), 0, 0, table, 3766 searchFunction, new AtomicReference<U>()).invoke(); 3767 } 3768 3769 /** 3770 * Returns the result of accumulating the given transformation 3771 * of all (key, value) pairs using the given reducer to 3772 * combine values, or null if none. 3773 * 3774 * @param parallelismThreshold the (estimated) number of elements 3775 * needed for this operation to be executed in parallel 3776 * @param transformer a function returning the transformation 3777 * for an element, or null if there is no transformation (in 3778 * which case it is not combined) 3779 * @param reducer a commutative associative combining function 3780 * @param <U> the return type of the transformer 3781 * @return the result of accumulating the given transformation 3782 * of all (key, value) pairs 3783 * @since 1.8 3784 */ 3785 public <U> U reduce(long parallelismThreshold, 3786 BiFunction<? super K, ? super V, ? extends U> transformer, 3787 BiFunction<? super U, ? super U, ? extends U> reducer) { 3788 if (transformer == null || reducer == null) 3789 throw new NullPointerException(); 3790 return new MapReduceMappingsTask<K,V,U> 3791 (null, batchFor(parallelismThreshold), 0, 0, table, 3792 null, transformer, reducer).invoke(); 3793 } 3794 3795 /** 3796 * Returns the result of accumulating the given transformation 3797 * of all (key, value) pairs using the given reducer to 3798 * combine values, and the given basis as an identity value. 3799 * 3800 * @param parallelismThreshold the (estimated) number of elements 3801 * needed for this operation to be executed in parallel 3802 * @param transformer a function returning the transformation 3803 * for an element 3804 * @param basis the identity (initial default value) for the reduction 3805 * @param reducer a commutative associative combining function 3806 * @return the result of accumulating the given transformation 3807 * of all (key, value) pairs 3808 * @since 1.8 3809 */ 3810 public double reduceToDouble(long parallelismThreshold, 3811 ToDoubleBiFunction<? super K, ? super V> transformer, 3812 double basis, 3813 DoubleBinaryOperator reducer) { 3814 if (transformer == null || reducer == null) 3815 throw new NullPointerException(); 3816 return new MapReduceMappingsToDoubleTask<K,V> 3817 (null, batchFor(parallelismThreshold), 0, 0, table, 3818 null, transformer, basis, reducer).invoke(); 3819 } 3820 3821 /** 3822 * Returns the result of accumulating the given transformation 3823 * of all (key, value) pairs using the given reducer to 3824 * combine values, and the given basis as an identity value. 3825 * 3826 * @param parallelismThreshold the (estimated) number of elements 3827 * needed for this operation to be executed in parallel 3828 * @param transformer a function returning the transformation 3829 * for an element 3830 * @param basis the identity (initial default value) for the reduction 3831 * @param reducer a commutative associative combining function 3832 * @return the result of accumulating the given transformation 3833 * of all (key, value) pairs 3834 * @since 1.8 3835 */ 3836 public long reduceToLong(long parallelismThreshold, 3837 ToLongBiFunction<? super K, ? super V> transformer, 3838 long basis, 3839 LongBinaryOperator reducer) { 3840 if (transformer == null || reducer == null) 3841 throw new NullPointerException(); 3842 return new MapReduceMappingsToLongTask<K,V> 3843 (null, batchFor(parallelismThreshold), 0, 0, table, 3844 null, transformer, basis, reducer).invoke(); 3845 } 3846 3847 /** 3848 * Returns the result of accumulating the given transformation 3849 * of all (key, value) pairs using the given reducer to 3850 * combine values, and the given basis as an identity value. 3851 * 3852 * @param parallelismThreshold the (estimated) number of elements 3853 * needed for this operation to be executed in parallel 3854 * @param transformer a function returning the transformation 3855 * for an element 3856 * @param basis the identity (initial default value) for the reduction 3857 * @param reducer a commutative associative combining function 3858 * @return the result of accumulating the given transformation 3859 * of all (key, value) pairs 3860 * @since 1.8 3861 */ 3862 public int reduceToInt(long parallelismThreshold, 3863 ToIntBiFunction<? super K, ? super V> transformer, 3864 int basis, 3865 IntBinaryOperator reducer) { 3866 if (transformer == null || reducer == null) 3867 throw new NullPointerException(); 3868 return new MapReduceMappingsToIntTask<K,V> 3869 (null, batchFor(parallelismThreshold), 0, 0, table, 3870 null, transformer, basis, reducer).invoke(); 3871 } 3872 3873 /** 3874 * Performs the given action for each key. 3875 * 3876 * @param parallelismThreshold the (estimated) number of elements 3877 * needed for this operation to be executed in parallel 3878 * @param action the action 3879 * @since 1.8 3880 */ 3881 public void forEachKey(long parallelismThreshold, 3882 Consumer<? super K> action) { 3883 if (action == null) throw new NullPointerException(); 3884 new ForEachKeyTask<K,V> 3885 (null, batchFor(parallelismThreshold), 0, 0, table, 3886 action).invoke(); 3887 } 3888 3889 /** 3890 * Performs the given action for each non-null transformation 3891 * of each key. 3892 * 3893 * @param parallelismThreshold the (estimated) number of elements 3894 * needed for this operation to be executed in parallel 3895 * @param transformer a function returning the transformation 3896 * for an element, or null if there is no transformation (in 3897 * which case the action is not applied) 3898 * @param action the action 3899 * @param <U> the return type of the transformer 3900 * @since 1.8 3901 */ 3902 public <U> void forEachKey(long parallelismThreshold, 3903 Function<? super K, ? extends U> transformer, 3904 Consumer<? super U> action) { 3905 if (transformer == null || action == null) 3906 throw new NullPointerException(); 3907 new ForEachTransformedKeyTask<K,V,U> 3908 (null, batchFor(parallelismThreshold), 0, 0, table, 3909 transformer, action).invoke(); 3910 } 3911 3912 /** 3913 * Returns a non-null result from applying the given search 3914 * function on each key, or null if none. Upon success, 3915 * further element processing is suppressed and the results of 3916 * any other parallel invocations of the search function are 3917 * ignored. 3918 * 3919 * @param parallelismThreshold the (estimated) number of elements 3920 * needed for this operation to be executed in parallel 3921 * @param searchFunction a function returning a non-null 3922 * result on success, else null 3923 * @param <U> the return type of the search function 3924 * @return a non-null result from applying the given search 3925 * function on each key, or null if none 3926 * @since 1.8 3927 */ 3928 public <U> U searchKeys(long parallelismThreshold, 3929 Function<? super K, ? extends U> searchFunction) { 3930 if (searchFunction == null) throw new NullPointerException(); 3931 return new SearchKeysTask<K,V,U> 3932 (null, batchFor(parallelismThreshold), 0, 0, table, 3933 searchFunction, new AtomicReference<U>()).invoke(); 3934 } 3935 3936 /** 3937 * Returns the result of accumulating all keys using the given 3938 * reducer to combine values, or null if none. 3939 * 3940 * @param parallelismThreshold the (estimated) number of elements 3941 * needed for this operation to be executed in parallel 3942 * @param reducer a commutative associative combining function 3943 * @return the result of accumulating all keys using the given 3944 * reducer to combine values, or null if none 3945 * @since 1.8 3946 */ 3947 public K reduceKeys(long parallelismThreshold, 3948 BiFunction<? super K, ? super K, ? extends K> reducer) { 3949 if (reducer == null) throw new NullPointerException(); 3950 return new ReduceKeysTask<K,V> 3951 (null, batchFor(parallelismThreshold), 0, 0, table, 3952 null, reducer).invoke(); 3953 } 3954 3955 /** 3956 * Returns the result of accumulating the given transformation 3957 * of all keys using the given reducer to combine values, or 3958 * null if none. 3959 * 3960 * @param parallelismThreshold the (estimated) number of elements 3961 * needed for this operation to be executed in parallel 3962 * @param transformer a function returning the transformation 3963 * for an element, or null if there is no transformation (in 3964 * which case it is not combined) 3965 * @param reducer a commutative associative combining function 3966 * @param <U> the return type of the transformer 3967 * @return the result of accumulating the given transformation 3968 * of all keys 3969 * @since 1.8 3970 */ 3971 public <U> U reduceKeys(long parallelismThreshold, 3972 Function<? super K, ? extends U> transformer, 3973 BiFunction<? super U, ? super U, ? extends U> reducer) { 3974 if (transformer == null || reducer == null) 3975 throw new NullPointerException(); 3976 return new MapReduceKeysTask<K,V,U> 3977 (null, batchFor(parallelismThreshold), 0, 0, table, 3978 null, transformer, reducer).invoke(); 3979 } 3980 3981 /** 3982 * Returns the result of accumulating the given transformation 3983 * of all keys using the given reducer to combine values, and 3984 * the given basis as an identity value. 3985 * 3986 * @param parallelismThreshold the (estimated) number of elements 3987 * needed for this operation to be executed in parallel 3988 * @param transformer a function returning the transformation 3989 * for an element 3990 * @param basis the identity (initial default value) for the reduction 3991 * @param reducer a commutative associative combining function 3992 * @return the result of accumulating the given transformation 3993 * of all keys 3994 * @since 1.8 3995 */ 3996 public double reduceKeysToDouble(long parallelismThreshold, 3997 ToDoubleFunction<? super K> transformer, 3998 double basis, 3999 DoubleBinaryOperator reducer) { 4000 if (transformer == null || reducer == null) 4001 throw new NullPointerException(); 4002 return new MapReduceKeysToDoubleTask<K,V> 4003 (null, batchFor(parallelismThreshold), 0, 0, table, 4004 null, transformer, basis, reducer).invoke(); 4005 } 4006 4007 /** 4008 * Returns the result of accumulating the given transformation 4009 * of all keys using the given reducer to combine values, and 4010 * the given basis as an identity value. 4011 * 4012 * @param parallelismThreshold the (estimated) number of elements 4013 * needed for this operation to be executed in parallel 4014 * @param transformer a function returning the transformation 4015 * for an element 4016 * @param basis the identity (initial default value) for the reduction 4017 * @param reducer a commutative associative combining function 4018 * @return the result of accumulating the given transformation 4019 * of all keys 4020 * @since 1.8 4021 */ 4022 public long reduceKeysToLong(long parallelismThreshold, 4023 ToLongFunction<? super K> transformer, 4024 long basis, 4025 LongBinaryOperator reducer) { 4026 if (transformer == null || reducer == null) 4027 throw new NullPointerException(); 4028 return new MapReduceKeysToLongTask<K,V> 4029 (null, batchFor(parallelismThreshold), 0, 0, table, 4030 null, transformer, basis, reducer).invoke(); 4031 } 4032 4033 /** 4034 * Returns the result of accumulating the given transformation 4035 * of all keys using the given reducer to combine values, and 4036 * the given basis as an identity value. 4037 * 4038 * @param parallelismThreshold the (estimated) number of elements 4039 * needed for this operation to be executed in parallel 4040 * @param transformer a function returning the transformation 4041 * for an element 4042 * @param basis the identity (initial default value) for the reduction 4043 * @param reducer a commutative associative combining function 4044 * @return the result of accumulating the given transformation 4045 * of all keys 4046 * @since 1.8 4047 */ 4048 public int reduceKeysToInt(long parallelismThreshold, 4049 ToIntFunction<? super K> transformer, 4050 int basis, 4051 IntBinaryOperator reducer) { 4052 if (transformer == null || reducer == null) 4053 throw new NullPointerException(); 4054 return new MapReduceKeysToIntTask<K,V> 4055 (null, batchFor(parallelismThreshold), 0, 0, table, 4056 null, transformer, basis, reducer).invoke(); 4057 } 4058 4059 /** 4060 * Performs the given action for each value. 4061 * 4062 * @param parallelismThreshold the (estimated) number of elements 4063 * needed for this operation to be executed in parallel 4064 * @param action the action 4065 * @since 1.8 4066 */ 4067 public void forEachValue(long parallelismThreshold, 4068 Consumer<? super V> action) { 4069 if (action == null) 4070 throw new NullPointerException(); 4071 new ForEachValueTask<K,V> 4072 (null, batchFor(parallelismThreshold), 0, 0, table, 4073 action).invoke(); 4074 } 4075 4076 /** 4077 * Performs the given action for each non-null transformation 4078 * of each value. 4079 * 4080 * @param parallelismThreshold the (estimated) number of elements 4081 * needed for this operation to be executed in parallel 4082 * @param transformer a function returning the transformation 4083 * for an element, or null if there is no transformation (in 4084 * which case the action is not applied) 4085 * @param action the action 4086 * @param <U> the return type of the transformer 4087 * @since 1.8 4088 */ 4089 public <U> void forEachValue(long parallelismThreshold, 4090 Function<? super V, ? extends U> transformer, 4091 Consumer<? super U> action) { 4092 if (transformer == null || action == null) 4093 throw new NullPointerException(); 4094 new ForEachTransformedValueTask<K,V,U> 4095 (null, batchFor(parallelismThreshold), 0, 0, table, 4096 transformer, action).invoke(); 4097 } 4098 4099 /** 4100 * Returns a non-null result from applying the given search 4101 * function on each value, or null if none. Upon success, 4102 * further element processing is suppressed and the results of 4103 * any other parallel invocations of the search function are 4104 * ignored. 4105 * 4106 * @param parallelismThreshold the (estimated) number of elements 4107 * needed for this operation to be executed in parallel 4108 * @param searchFunction a function returning a non-null 4109 * result on success, else null 4110 * @param <U> the return type of the search function 4111 * @return a non-null result from applying the given search 4112 * function on each value, or null if none 4113 * @since 1.8 4114 */ 4115 public <U> U searchValues(long parallelismThreshold, 4116 Function<? super V, ? extends U> searchFunction) { 4117 if (searchFunction == null) throw new NullPointerException(); 4118 return new SearchValuesTask<K,V,U> 4119 (null, batchFor(parallelismThreshold), 0, 0, table, 4120 searchFunction, new AtomicReference<U>()).invoke(); 4121 } 4122 4123 /** 4124 * Returns the result of accumulating all values using the 4125 * given reducer to combine values, or null if none. 4126 * 4127 * @param parallelismThreshold the (estimated) number of elements 4128 * needed for this operation to be executed in parallel 4129 * @param reducer a commutative associative combining function 4130 * @return the result of accumulating all values 4131 * @since 1.8 4132 */ 4133 public V reduceValues(long parallelismThreshold, 4134 BiFunction<? super V, ? super V, ? extends V> reducer) { 4135 if (reducer == null) throw new NullPointerException(); 4136 return new ReduceValuesTask<K,V> 4137 (null, batchFor(parallelismThreshold), 0, 0, table, 4138 null, reducer).invoke(); 4139 } 4140 4141 /** 4142 * Returns the result of accumulating the given transformation 4143 * of all values using the given reducer to combine values, or 4144 * null if none. 4145 * 4146 * @param parallelismThreshold the (estimated) number of elements 4147 * needed for this operation to be executed in parallel 4148 * @param transformer a function returning the transformation 4149 * for an element, or null if there is no transformation (in 4150 * which case it is not combined) 4151 * @param reducer a commutative associative combining function 4152 * @param <U> the return type of the transformer 4153 * @return the result of accumulating the given transformation 4154 * of all values 4155 * @since 1.8 4156 */ 4157 public <U> U reduceValues(long parallelismThreshold, 4158 Function<? super V, ? extends U> transformer, 4159 BiFunction<? super U, ? super U, ? extends U> reducer) { 4160 if (transformer == null || reducer == null) 4161 throw new NullPointerException(); 4162 return new MapReduceValuesTask<K,V,U> 4163 (null, batchFor(parallelismThreshold), 0, 0, table, 4164 null, transformer, reducer).invoke(); 4165 } 4166 4167 /** 4168 * Returns the result of accumulating the given transformation 4169 * of all values using the given reducer to combine values, 4170 * and the given basis as an identity value. 4171 * 4172 * @param parallelismThreshold the (estimated) number of elements 4173 * needed for this operation to be executed in parallel 4174 * @param transformer a function returning the transformation 4175 * for an element 4176 * @param basis the identity (initial default value) for the reduction 4177 * @param reducer a commutative associative combining function 4178 * @return the result of accumulating the given transformation 4179 * of all values 4180 * @since 1.8 4181 */ 4182 public double reduceValuesToDouble(long parallelismThreshold, 4183 ToDoubleFunction<? super V> transformer, 4184 double basis, 4185 DoubleBinaryOperator reducer) { 4186 if (transformer == null || reducer == null) 4187 throw new NullPointerException(); 4188 return new MapReduceValuesToDoubleTask<K,V> 4189 (null, batchFor(parallelismThreshold), 0, 0, table, 4190 null, transformer, basis, reducer).invoke(); 4191 } 4192 4193 /** 4194 * Returns the result of accumulating the given transformation 4195 * of all values using the given reducer to combine values, 4196 * and the given basis as an identity value. 4197 * 4198 * @param parallelismThreshold the (estimated) number of elements 4199 * needed for this operation to be executed in parallel 4200 * @param transformer a function returning the transformation 4201 * for an element 4202 * @param basis the identity (initial default value) for the reduction 4203 * @param reducer a commutative associative combining function 4204 * @return the result of accumulating the given transformation 4205 * of all values 4206 * @since 1.8 4207 */ 4208 public long reduceValuesToLong(long parallelismThreshold, 4209 ToLongFunction<? super V> transformer, 4210 long basis, 4211 LongBinaryOperator reducer) { 4212 if (transformer == null || reducer == null) 4213 throw new NullPointerException(); 4214 return new MapReduceValuesToLongTask<K,V> 4215 (null, batchFor(parallelismThreshold), 0, 0, table, 4216 null, transformer, basis, reducer).invoke(); 4217 } 4218 4219 /** 4220 * Returns the result of accumulating the given transformation 4221 * of all values using the given reducer to combine values, 4222 * and the given basis as an identity value. 4223 * 4224 * @param parallelismThreshold the (estimated) number of elements 4225 * needed for this operation to be executed in parallel 4226 * @param transformer a function returning the transformation 4227 * for an element 4228 * @param basis the identity (initial default value) for the reduction 4229 * @param reducer a commutative associative combining function 4230 * @return the result of accumulating the given transformation 4231 * of all values 4232 * @since 1.8 4233 */ 4234 public int reduceValuesToInt(long parallelismThreshold, 4235 ToIntFunction<? super V> transformer, 4236 int basis, 4237 IntBinaryOperator reducer) { 4238 if (transformer == null || reducer == null) 4239 throw new NullPointerException(); 4240 return new MapReduceValuesToIntTask<K,V> 4241 (null, batchFor(parallelismThreshold), 0, 0, table, 4242 null, transformer, basis, reducer).invoke(); 4243 } 4244 4245 /** 4246 * Performs the given action for each entry. 4247 * 4248 * @param parallelismThreshold the (estimated) number of elements 4249 * needed for this operation to be executed in parallel 4250 * @param action the action 4251 * @since 1.8 4252 */ 4253 public void forEachEntry(long parallelismThreshold, 4254 Consumer<? super Map.Entry<K,V>> action) { 4255 if (action == null) throw new NullPointerException(); 4256 new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table, 4257 action).invoke(); 4258 } 4259 4260 /** 4261 * Performs the given action for each non-null transformation 4262 * of each entry. 4263 * 4264 * @param parallelismThreshold the (estimated) number of elements 4265 * needed for this operation to be executed in parallel 4266 * @param transformer a function returning the transformation 4267 * for an element, or null if there is no transformation (in 4268 * which case the action is not applied) 4269 * @param action the action 4270 * @param <U> the return type of the transformer 4271 * @since 1.8 4272 */ 4273 public <U> void forEachEntry(long parallelismThreshold, 4274 Function<Map.Entry<K,V>, ? extends U> transformer, 4275 Consumer<? super U> action) { 4276 if (transformer == null || action == null) 4277 throw new NullPointerException(); 4278 new ForEachTransformedEntryTask<K,V,U> 4279 (null, batchFor(parallelismThreshold), 0, 0, table, 4280 transformer, action).invoke(); 4281 } 4282 4283 /** 4284 * Returns a non-null result from applying the given search 4285 * function on each entry, or null if none. Upon success, 4286 * further element processing is suppressed and the results of 4287 * any other parallel invocations of the search function are 4288 * ignored. 4289 * 4290 * @param parallelismThreshold the (estimated) number of elements 4291 * needed for this operation to be executed in parallel 4292 * @param searchFunction a function returning a non-null 4293 * result on success, else null 4294 * @param <U> the return type of the search function 4295 * @return a non-null result from applying the given search 4296 * function on each entry, or null if none 4297 * @since 1.8 4298 */ 4299 public <U> U searchEntries(long parallelismThreshold, 4300 Function<Map.Entry<K,V>, ? extends U> searchFunction) { 4301 if (searchFunction == null) throw new NullPointerException(); 4302 return new SearchEntriesTask<K,V,U> 4303 (null, batchFor(parallelismThreshold), 0, 0, table, 4304 searchFunction, new AtomicReference<U>()).invoke(); 4305 } 4306 4307 /** 4308 * Returns the result of accumulating all entries using the 4309 * given reducer to combine values, or null if none. 4310 * 4311 * @param parallelismThreshold the (estimated) number of elements 4312 * needed for this operation to be executed in parallel 4313 * @param reducer a commutative associative combining function 4314 * @return the result of accumulating all entries 4315 * @since 1.8 4316 */ 4317 public Map.Entry<K,V> reduceEntries(long parallelismThreshold, 4318 BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) { 4319 if (reducer == null) throw new NullPointerException(); 4320 return new ReduceEntriesTask<K,V> 4321 (null, batchFor(parallelismThreshold), 0, 0, table, 4322 null, reducer).invoke(); 4323 } 4324 4325 /** 4326 * Returns the result of accumulating the given transformation 4327 * of all entries using the given reducer to combine values, 4328 * or null if none. 4329 * 4330 * @param parallelismThreshold the (estimated) number of elements 4331 * needed for this operation to be executed in parallel 4332 * @param transformer a function returning the transformation 4333 * for an element, or null if there is no transformation (in 4334 * which case it is not combined) 4335 * @param reducer a commutative associative combining function 4336 * @param <U> the return type of the transformer 4337 * @return the result of accumulating the given transformation 4338 * of all entries 4339 * @since 1.8 4340 */ 4341 public <U> U reduceEntries(long parallelismThreshold, 4342 Function<Map.Entry<K,V>, ? extends U> transformer, 4343 BiFunction<? super U, ? super U, ? extends U> reducer) { 4344 if (transformer == null || reducer == null) 4345 throw new NullPointerException(); 4346 return new MapReduceEntriesTask<K,V,U> 4347 (null, batchFor(parallelismThreshold), 0, 0, table, 4348 null, transformer, reducer).invoke(); 4349 } 4350 4351 /** 4352 * Returns the result of accumulating the given transformation 4353 * of all entries using the given reducer to combine values, 4354 * and the given basis as an identity value. 4355 * 4356 * @param parallelismThreshold the (estimated) number of elements 4357 * needed for this operation to be executed in parallel 4358 * @param transformer a function returning the transformation 4359 * for an element 4360 * @param basis the identity (initial default value) for the reduction 4361 * @param reducer a commutative associative combining function 4362 * @return the result of accumulating the given transformation 4363 * of all entries 4364 * @since 1.8 4365 */ 4366 public double reduceEntriesToDouble(long parallelismThreshold, 4367 ToDoubleFunction<Map.Entry<K,V>> transformer, 4368 double basis, 4369 DoubleBinaryOperator reducer) { 4370 if (transformer == null || reducer == null) 4371 throw new NullPointerException(); 4372 return new MapReduceEntriesToDoubleTask<K,V> 4373 (null, batchFor(parallelismThreshold), 0, 0, table, 4374 null, transformer, basis, reducer).invoke(); 4375 } 4376 4377 /** 4378 * Returns the result of accumulating the given transformation 4379 * of all entries using the given reducer to combine values, 4380 * and the given basis as an identity value. 4381 * 4382 * @param parallelismThreshold the (estimated) number of elements 4383 * needed for this operation to be executed in parallel 4384 * @param transformer a function returning the transformation 4385 * for an element 4386 * @param basis the identity (initial default value) for the reduction 4387 * @param reducer a commutative associative combining function 4388 * @return the result of accumulating the given transformation 4389 * of all entries 4390 * @since 1.8 4391 */ 4392 public long reduceEntriesToLong(long parallelismThreshold, 4393 ToLongFunction<Map.Entry<K,V>> transformer, 4394 long basis, 4395 LongBinaryOperator reducer) { 4396 if (transformer == null || reducer == null) 4397 throw new NullPointerException(); 4398 return new MapReduceEntriesToLongTask<K,V> 4399 (null, batchFor(parallelismThreshold), 0, 0, table, 4400 null, transformer, basis, reducer).invoke(); 4401 } 4402 4403 /** 4404 * Returns the result of accumulating the given transformation 4405 * of all entries using the given reducer to combine values, 4406 * and the given basis as an identity value. 4407 * 4408 * @param parallelismThreshold the (estimated) number of elements 4409 * needed for this operation to be executed in parallel 4410 * @param transformer a function returning the transformation 4411 * for an element 4412 * @param basis the identity (initial default value) for the reduction 4413 * @param reducer a commutative associative combining function 4414 * @return the result of accumulating the given transformation 4415 * of all entries 4416 * @since 1.8 4417 */ 4418 public int reduceEntriesToInt(long parallelismThreshold, 4419 ToIntFunction<Map.Entry<K,V>> transformer, 4420 int basis, 4421 IntBinaryOperator reducer) { 4422 if (transformer == null || reducer == null) 4423 throw new NullPointerException(); 4424 return new MapReduceEntriesToIntTask<K,V> 4425 (null, batchFor(parallelismThreshold), 0, 0, table, 4426 null, transformer, basis, reducer).invoke(); 4427 } 4428 4429 4430 /* ----------------Views -------------- */ 4431 4432 /** 4433 * Base class for views. 4434 */ 4435 abstract static class CollectionView<K,V,E> 4436 implements Collection<E>, java.io.Serializable { 4437 private static final long serialVersionUID = 7249069246763182397L; 4438 final ConcurrentHashMap<K,V> map; 4439 CollectionView(ConcurrentHashMap<K,V> map) { this.map = map; } 4440 4441 /** 4442 * Returns the map backing this view. 4443 * 4444 * @return the map backing this view 4445 */ 4446 public ConcurrentHashMap<K,V> getMap() { return map; } 4447 4448 /** 4449 * Removes all of the elements from this view, by removing all 4450 * the mappings from the map backing this view. 4451 */ 4452 public final void clear() { map.clear(); } 4453 public final int size() { return map.size(); } 4454 public final boolean isEmpty() { return map.isEmpty(); } 4455 4456 // implementations below rely on concrete classes supplying these 4457 // abstract methods 4458 /** 4459 * Returns an iterator over the elements in this collection. 4460 * 4461 * <p>The returned iterator is 4462 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 4463 * 4464 * @return an iterator over the elements in this collection 4465 */ 4466 public abstract Iterator<E> iterator(); 4467 public abstract boolean contains(Object o); 4468 public abstract boolean remove(Object o); 4469 4470 private static final String OOME_MSG = "Required array size too large"; 4471 4472 public final Object[] toArray() { 4473 long sz = map.mappingCount(); 4474 if (sz > MAX_ARRAY_SIZE) 4475 throw new OutOfMemoryError(OOME_MSG); 4476 int n = (int)sz; 4477 Object[] r = new Object[n]; 4478 int i = 0; 4479 for (E e : this) { 4480 if (i == n) { 4481 if (n >= MAX_ARRAY_SIZE) 4482 throw new OutOfMemoryError(OOME_MSG); 4483 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) 4484 n = MAX_ARRAY_SIZE; 4485 else 4486 n += (n >>> 1) + 1; 4487 r = Arrays.copyOf(r, n); 4488 } 4489 r[i++] = e; 4490 } 4491 return (i == n) ? r : Arrays.copyOf(r, i); 4492 } 4493 4494 @SuppressWarnings("unchecked") 4495 public final <T> T[] toArray(T[] a) { 4496 long sz = map.mappingCount(); 4497 if (sz > MAX_ARRAY_SIZE) 4498 throw new OutOfMemoryError(OOME_MSG); 4499 int m = (int)sz; 4500 T[] r = (a.length >= m) ? a : 4501 (T[])java.lang.reflect.Array 4502 .newInstance(a.getClass().getComponentType(), m); 4503 int n = r.length; 4504 int i = 0; 4505 for (E e : this) { 4506 if (i == n) { 4507 if (n >= MAX_ARRAY_SIZE) 4508 throw new OutOfMemoryError(OOME_MSG); 4509 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) 4510 n = MAX_ARRAY_SIZE; 4511 else 4512 n += (n >>> 1) + 1; 4513 r = Arrays.copyOf(r, n); 4514 } 4515 r[i++] = (T)e; 4516 } 4517 if (a == r && i < n) { 4518 r[i] = null; // null-terminate 4519 return r; 4520 } 4521 return (i == n) ? r : Arrays.copyOf(r, i); 4522 } 4523 4524 /** 4525 * Returns a string representation of this collection. 4526 * The string representation consists of the string representations 4527 * of the collection's elements in the order they are returned by 4528 * its iterator, enclosed in square brackets ({@code "[]"}). 4529 * Adjacent elements are separated by the characters {@code ", "} 4530 * (comma and space). Elements are converted to strings as by 4531 * {@link String#valueOf(Object)}. 4532 * 4533 * @return a string representation of this collection 4534 */ 4535 public final String toString() { 4536 StringBuilder sb = new StringBuilder(); 4537 sb.append('['); 4538 Iterator<E> it = iterator(); 4539 if (it.hasNext()) { 4540 for (;;) { 4541 Object e = it.next(); 4542 sb.append(e == this ? "(this Collection)" : e); 4543 if (!it.hasNext()) 4544 break; 4545 sb.append(',').append(' '); 4546 } 4547 } 4548 return sb.append(']').toString(); 4549 } 4550 4551 public final boolean containsAll(Collection<?> c) { 4552 if (c != this) { 4553 for (Object e : c) { 4554 if (e == null || !contains(e)) 4555 return false; 4556 } 4557 } 4558 return true; 4559 } 4560 4561 public boolean removeAll(Collection<?> c) { 4562 if (c == null) throw new NullPointerException(); 4563 boolean modified = false; 4564 // Use (c instanceof Set) as a hint that lookup in c is as 4565 // efficient as this view 4566 Node<K,V>[] t; 4567 if ((t = map.table) == null) { 4568 return false; 4569 } else if (c instanceof Set<?> && c.size() > t.length) { 4570 for (Iterator<?> it = iterator(); it.hasNext(); ) { 4571 if (c.contains(it.next())) { 4572 it.remove(); 4573 modified = true; 4574 } 4575 } 4576 } else { 4577 for (Object e : c) 4578 modified |= remove(e); 4579 } 4580 return modified; 4581 } 4582 4583 public final boolean retainAll(Collection<?> c) { 4584 if (c == null) throw new NullPointerException(); 4585 boolean modified = false; 4586 for (Iterator<E> it = iterator(); it.hasNext();) { 4587 if (!c.contains(it.next())) { 4588 it.remove(); 4589 modified = true; 4590 } 4591 } 4592 return modified; 4593 } 4594 4595 } 4596 4597 /** 4598 * A view of a ConcurrentHashMap as a {@link Set} of keys, in 4599 * which additions may optionally be enabled by mapping to a 4600 * common value. This class cannot be directly instantiated. 4601 * See {@link #keySet() keySet()}, 4602 * {@link #keySet(Object) keySet(V)}, 4603 * {@link #newKeySet() newKeySet()}, 4604 * {@link #newKeySet(int) newKeySet(int)}. 4605 * 4606 * @since 1.8 4607 */ 4608 public static class KeySetView<K,V> extends CollectionView<K,V,K> 4609 implements Set<K>, java.io.Serializable { 4610 private static final long serialVersionUID = 7249069246763182397L; 4611 private final V value; 4612 KeySetView(ConcurrentHashMap<K,V> map, V value) { // non-public 4613 super(map); 4614 this.value = value; 4615 } 4616 4617 /** 4618 * Returns the default mapped value for additions, 4619 * or {@code null} if additions are not supported. 4620 * 4621 * @return the default mapped value for additions, or {@code null} 4622 * if not supported 4623 */ 4624 public V getMappedValue() { return value; } 4625 4626 /** 4627 * {@inheritDoc} 4628 * @throws NullPointerException if the specified key is null 4629 */ 4630 public boolean contains(Object o) { return map.containsKey(o); } 4631 4632 /** 4633 * Removes the key from this map view, by removing the key (and its 4634 * corresponding value) from the backing map. This method does 4635 * nothing if the key is not in the map. 4636 * 4637 * @param o the key to be removed from the backing map 4638 * @return {@code true} if the backing map contained the specified key 4639 * @throws NullPointerException if the specified key is null 4640 */ 4641 public boolean remove(Object o) { return map.remove(o) != null; } 4642 4643 /** 4644 * @return an iterator over the keys of the backing map 4645 */ 4646 public Iterator<K> iterator() { 4647 Node<K,V>[] t; 4648 ConcurrentHashMap<K,V> m = map; 4649 int f = (t = m.table) == null ? 0 : t.length; 4650 return new KeyIterator<K,V>(t, f, 0, f, m); 4651 } 4652 4653 /** 4654 * Adds the specified key to this set view by mapping the key to 4655 * the default mapped value in the backing map, if defined. 4656 * 4657 * @param e key to be added 4658 * @return {@code true} if this set changed as a result of the call 4659 * @throws NullPointerException if the specified key is null 4660 * @throws UnsupportedOperationException if no default mapped value 4661 * for additions was provided 4662 */ 4663 public boolean add(K e) { 4664 V v; 4665 if ((v = value) == null) 4666 throw new UnsupportedOperationException(); 4667 return map.putVal(e, v, true) == null; 4668 } 4669 4670 /** 4671 * Adds all of the elements in the specified collection to this set, 4672 * as if by calling {@link #add} on each one. 4673 * 4674 * @param c the elements to be inserted into this set 4675 * @return {@code true} if this set changed as a result of the call 4676 * @throws NullPointerException if the collection or any of its 4677 * elements are {@code null} 4678 * @throws UnsupportedOperationException if no default mapped value 4679 * for additions was provided 4680 */ 4681 public boolean addAll(Collection<? extends K> c) { 4682 boolean added = false; 4683 V v; 4684 if ((v = value) == null) 4685 throw new UnsupportedOperationException(); 4686 for (K e : c) { 4687 if (map.putVal(e, v, true) == null) 4688 added = true; 4689 } 4690 return added; 4691 } 4692 4693 public int hashCode() { 4694 int h = 0; 4695 for (K e : this) 4696 h += e.hashCode(); 4697 return h; 4698 } 4699 4700 public boolean equals(Object o) { 4701 Set<?> c; 4702 return ((o instanceof Set) && 4703 ((c = (Set<?>)o) == this || 4704 (containsAll(c) && c.containsAll(this)))); 4705 } 4706 4707 public Spliterator<K> spliterator() { 4708 Node<K,V>[] t; 4709 ConcurrentHashMap<K,V> m = map; 4710 long n = m.sumCount(); 4711 int f = (t = m.table) == null ? 0 : t.length; 4712 return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n); 4713 } 4714 4715 public void forEach(Consumer<? super K> action) { 4716 if (action == null) throw new NullPointerException(); 4717 Node<K,V>[] t; 4718 if ((t = map.table) != null) { 4719 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 4720 for (Node<K,V> p; (p = it.advance()) != null; ) 4721 action.accept(p.key); 4722 } 4723 } 4724 } 4725 4726 /** 4727 * A view of a ConcurrentHashMap as a {@link Collection} of 4728 * values, in which additions are disabled. This class cannot be 4729 * directly instantiated. See {@link #values()}. 4730 */ 4731 static final class ValuesView<K,V> extends CollectionView<K,V,V> 4732 implements Collection<V>, java.io.Serializable { 4733 private static final long serialVersionUID = 2249069246763182397L; 4734 ValuesView(ConcurrentHashMap<K,V> map) { super(map); } 4735 public final boolean contains(Object o) { 4736 return map.containsValue(o); 4737 } 4738 4739 public final boolean remove(Object o) { 4740 if (o != null) { 4741 for (Iterator<V> it = iterator(); it.hasNext();) { 4742 if (o.equals(it.next())) { 4743 it.remove(); 4744 return true; 4745 } 4746 } 4747 } 4748 return false; 4749 } 4750 4751 public final Iterator<V> iterator() { 4752 ConcurrentHashMap<K,V> m = map; 4753 Node<K,V>[] t; 4754 int f = (t = m.table) == null ? 0 : t.length; 4755 return new ValueIterator<K,V>(t, f, 0, f, m); 4756 } 4757 4758 public final boolean add(V e) { 4759 throw new UnsupportedOperationException(); 4760 } 4761 public final boolean addAll(Collection<? extends V> c) { 4762 throw new UnsupportedOperationException(); 4763 } 4764 4765 @Override public boolean removeAll(Collection<?> c) { 4766 if (c == null) throw new NullPointerException(); 4767 boolean modified = false; 4768 for (Iterator<V> it = iterator(); it.hasNext();) { 4769 if (c.contains(it.next())) { 4770 it.remove(); 4771 modified = true; 4772 } 4773 } 4774 return modified; 4775 } 4776 4777 public boolean removeIf(Predicate<? super V> filter) { 4778 return map.removeValueIf(filter); 4779 } 4780 4781 public Spliterator<V> spliterator() { 4782 Node<K,V>[] t; 4783 ConcurrentHashMap<K,V> m = map; 4784 long n = m.sumCount(); 4785 int f = (t = m.table) == null ? 0 : t.length; 4786 return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n); 4787 } 4788 4789 public void forEach(Consumer<? super V> action) { 4790 if (action == null) throw new NullPointerException(); 4791 Node<K,V>[] t; 4792 if ((t = map.table) != null) { 4793 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 4794 for (Node<K,V> p; (p = it.advance()) != null; ) 4795 action.accept(p.val); 4796 } 4797 } 4798 } 4799 4800 /** 4801 * A view of a ConcurrentHashMap as a {@link Set} of (key, value) 4802 * entries. This class cannot be directly instantiated. See 4803 * {@link #entrySet()}. 4804 */ 4805 static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>> 4806 implements Set<Map.Entry<K,V>>, java.io.Serializable { 4807 private static final long serialVersionUID = 2249069246763182397L; 4808 EntrySetView(ConcurrentHashMap<K,V> map) { super(map); } 4809 4810 public boolean contains(Object o) { 4811 Object k, v, r; Map.Entry<?,?> e; 4812 return ((o instanceof Map.Entry) && 4813 (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 4814 (r = map.get(k)) != null && 4815 (v = e.getValue()) != null && 4816 (v == r || v.equals(r))); 4817 } 4818 4819 public boolean remove(Object o) { 4820 Object k, v; Map.Entry<?,?> e; 4821 return ((o instanceof Map.Entry) && 4822 (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 4823 (v = e.getValue()) != null && 4824 map.remove(k, v)); 4825 } 4826 4827 /** 4828 * @return an iterator over the entries of the backing map 4829 */ 4830 public Iterator<Map.Entry<K,V>> iterator() { 4831 ConcurrentHashMap<K,V> m = map; 4832 Node<K,V>[] t; 4833 int f = (t = m.table) == null ? 0 : t.length; 4834 return new EntryIterator<K,V>(t, f, 0, f, m); 4835 } 4836 4837 public boolean add(Entry<K,V> e) { 4838 return map.putVal(e.getKey(), e.getValue(), false) == null; 4839 } 4840 4841 public boolean addAll(Collection<? extends Entry<K,V>> c) { 4842 boolean added = false; 4843 for (Entry<K,V> e : c) { 4844 if (add(e)) 4845 added = true; 4846 } 4847 return added; 4848 } 4849 4850 public boolean removeIf(Predicate<? super Entry<K,V>> filter) { 4851 return map.removeEntryIf(filter); 4852 } 4853 4854 public final int hashCode() { 4855 int h = 0; 4856 Node<K,V>[] t; 4857 if ((t = map.table) != null) { 4858 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 4859 for (Node<K,V> p; (p = it.advance()) != null; ) { 4860 h += p.hashCode(); 4861 } 4862 } 4863 return h; 4864 } 4865 4866 public final boolean equals(Object o) { 4867 Set<?> c; 4868 return ((o instanceof Set) && 4869 ((c = (Set<?>)o) == this || 4870 (containsAll(c) && c.containsAll(this)))); 4871 } 4872 4873 public Spliterator<Map.Entry<K,V>> spliterator() { 4874 Node<K,V>[] t; 4875 ConcurrentHashMap<K,V> m = map; 4876 long n = m.sumCount(); 4877 int f = (t = m.table) == null ? 0 : t.length; 4878 return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m); 4879 } 4880 4881 public void forEach(Consumer<? super Map.Entry<K,V>> action) { 4882 if (action == null) throw new NullPointerException(); 4883 Node<K,V>[] t; 4884 if ((t = map.table) != null) { 4885 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 4886 for (Node<K,V> p; (p = it.advance()) != null; ) 4887 action.accept(new MapEntry<K,V>(p.key, p.val, map)); 4888 } 4889 } 4890 4891 } 4892 4893 // ------------------------------------------------------- 4894 4895 /** 4896 * Base class for bulk tasks. Repeats some fields and code from 4897 * class Traverser, because we need to subclass CountedCompleter. 4898 */ 4899 @SuppressWarnings("serial") 4900 abstract static class BulkTask<K,V,R> extends CountedCompleter<R> { 4901 Node<K,V>[] tab; // same as Traverser 4902 Node<K,V> next; 4903 TableStack<K,V> stack, spare; 4904 int index; 4905 int baseIndex; 4906 int baseLimit; 4907 final int baseSize; 4908 int batch; // split control 4909 4910 BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) { 4911 super(par); 4912 this.batch = b; 4913 this.index = this.baseIndex = i; 4914 if ((this.tab = t) == null) 4915 this.baseSize = this.baseLimit = 0; 4916 else if (par == null) 4917 this.baseSize = this.baseLimit = t.length; 4918 else { 4919 this.baseLimit = f; 4920 this.baseSize = par.baseSize; 4921 } 4922 } 4923 4924 /** 4925 * Same as Traverser version. 4926 */ 4927 final Node<K,V> advance() { 4928 Node<K,V> e; 4929 if ((e = next) != null) 4930 e = e.next; 4931 for (;;) { 4932 Node<K,V>[] t; int i, n; 4933 if (e != null) 4934 return next = e; 4935 if (baseIndex >= baseLimit || (t = tab) == null || 4936 (n = t.length) <= (i = index) || i < 0) 4937 return next = null; 4938 if ((e = tabAt(t, i)) != null && e.hash < 0) { 4939 if (e instanceof ForwardingNode) { 4940 tab = ((ForwardingNode<K,V>)e).nextTable; 4941 e = null; 4942 pushState(t, i, n); 4943 continue; 4944 } 4945 else if (e instanceof TreeBin) 4946 e = ((TreeBin<K,V>)e).first; 4947 else 4948 e = null; 4949 } 4950 if (stack != null) 4951 recoverState(n); 4952 else if ((index = i + baseSize) >= n) 4953 index = ++baseIndex; 4954 } 4955 } 4956 4957 private void pushState(Node<K,V>[] t, int i, int n) { 4958 TableStack<K,V> s = spare; 4959 if (s != null) 4960 spare = s.next; 4961 else 4962 s = new TableStack<K,V>(); 4963 s.tab = t; 4964 s.length = n; 4965 s.index = i; 4966 s.next = stack; 4967 stack = s; 4968 } 4969 4970 private void recoverState(int n) { 4971 TableStack<K,V> s; int len; 4972 while ((s = stack) != null && (index += (len = s.length)) >= n) { 4973 n = len; 4974 index = s.index; 4975 tab = s.tab; 4976 s.tab = null; 4977 TableStack<K,V> next = s.next; 4978 s.next = spare; // save for reuse 4979 stack = next; 4980 spare = s; 4981 } 4982 if (s == null && (index += baseSize) >= n) 4983 index = ++baseIndex; 4984 } 4985 } 4986 4987 /* 4988 * Task classes. Coded in a regular but ugly format/style to 4989 * simplify checks that each variant differs in the right way from 4990 * others. The null screenings exist because compilers cannot tell 4991 * that we've already null-checked task arguments, so we force 4992 * simplest hoisted bypass to help avoid convoluted traps. 4993 */ 4994 @SuppressWarnings("serial") 4995 static final class ForEachKeyTask<K,V> 4996 extends BulkTask<K,V,Void> { 4997 final Consumer<? super K> action; 4998 ForEachKeyTask 4999 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5000 Consumer<? super K> action) { 5001 super(p, b, i, f, t); 5002 this.action = action; 5003 } 5004 public final void compute() { 5005 final Consumer<? super K> action; 5006 if ((action = this.action) != null) { 5007 for (int i = baseIndex, f, h; batch > 0 && 5008 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5009 addToPendingCount(1); 5010 new ForEachKeyTask<K,V> 5011 (this, batch >>>= 1, baseLimit = h, f, tab, 5012 action).fork(); 5013 } 5014 for (Node<K,V> p; (p = advance()) != null;) 5015 action.accept(p.key); 5016 propagateCompletion(); 5017 } 5018 } 5019 } 5020 5021 @SuppressWarnings("serial") 5022 static final class ForEachValueTask<K,V> 5023 extends BulkTask<K,V,Void> { 5024 final Consumer<? super V> action; 5025 ForEachValueTask 5026 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5027 Consumer<? super V> action) { 5028 super(p, b, i, f, t); 5029 this.action = action; 5030 } 5031 public final void compute() { 5032 final Consumer<? super V> action; 5033 if ((action = this.action) != null) { 5034 for (int i = baseIndex, f, h; batch > 0 && 5035 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5036 addToPendingCount(1); 5037 new ForEachValueTask<K,V> 5038 (this, batch >>>= 1, baseLimit = h, f, tab, 5039 action).fork(); 5040 } 5041 for (Node<K,V> p; (p = advance()) != null;) 5042 action.accept(p.val); 5043 propagateCompletion(); 5044 } 5045 } 5046 } 5047 5048 @SuppressWarnings("serial") 5049 static final class ForEachEntryTask<K,V> 5050 extends BulkTask<K,V,Void> { 5051 final Consumer<? super Entry<K,V>> action; 5052 ForEachEntryTask 5053 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5054 Consumer<? super Entry<K,V>> action) { 5055 super(p, b, i, f, t); 5056 this.action = action; 5057 } 5058 public final void compute() { 5059 final Consumer<? super Entry<K,V>> action; 5060 if ((action = this.action) != null) { 5061 for (int i = baseIndex, f, h; batch > 0 && 5062 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5063 addToPendingCount(1); 5064 new ForEachEntryTask<K,V> 5065 (this, batch >>>= 1, baseLimit = h, f, tab, 5066 action).fork(); 5067 } 5068 for (Node<K,V> p; (p = advance()) != null; ) 5069 action.accept(p); 5070 propagateCompletion(); 5071 } 5072 } 5073 } 5074 5075 @SuppressWarnings("serial") 5076 static final class ForEachMappingTask<K,V> 5077 extends BulkTask<K,V,Void> { 5078 final BiConsumer<? super K, ? super V> action; 5079 ForEachMappingTask 5080 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5081 BiConsumer<? super K,? super V> action) { 5082 super(p, b, i, f, t); 5083 this.action = action; 5084 } 5085 public final void compute() { 5086 final BiConsumer<? super K, ? super V> action; 5087 if ((action = this.action) != null) { 5088 for (int i = baseIndex, f, h; batch > 0 && 5089 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5090 addToPendingCount(1); 5091 new ForEachMappingTask<K,V> 5092 (this, batch >>>= 1, baseLimit = h, f, tab, 5093 action).fork(); 5094 } 5095 for (Node<K,V> p; (p = advance()) != null; ) 5096 action.accept(p.key, p.val); 5097 propagateCompletion(); 5098 } 5099 } 5100 } 5101 5102 @SuppressWarnings("serial") 5103 static final class ForEachTransformedKeyTask<K,V,U> 5104 extends BulkTask<K,V,Void> { 5105 final Function<? super K, ? extends U> transformer; 5106 final Consumer<? super U> action; 5107 ForEachTransformedKeyTask 5108 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5109 Function<? super K, ? extends U> transformer, Consumer<? super U> action) { 5110 super(p, b, i, f, t); 5111 this.transformer = transformer; this.action = action; 5112 } 5113 public final void compute() { 5114 final Function<? super K, ? extends U> transformer; 5115 final Consumer<? super U> action; 5116 if ((transformer = this.transformer) != null && 5117 (action = this.action) != null) { 5118 for (int i = baseIndex, f, h; batch > 0 && 5119 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5120 addToPendingCount(1); 5121 new ForEachTransformedKeyTask<K,V,U> 5122 (this, batch >>>= 1, baseLimit = h, f, tab, 5123 transformer, action).fork(); 5124 } 5125 for (Node<K,V> p; (p = advance()) != null; ) { 5126 U u; 5127 if ((u = transformer.apply(p.key)) != null) 5128 action.accept(u); 5129 } 5130 propagateCompletion(); 5131 } 5132 } 5133 } 5134 5135 @SuppressWarnings("serial") 5136 static final class ForEachTransformedValueTask<K,V,U> 5137 extends BulkTask<K,V,Void> { 5138 final Function<? super V, ? extends U> transformer; 5139 final Consumer<? super U> action; 5140 ForEachTransformedValueTask 5141 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5142 Function<? super V, ? extends U> transformer, Consumer<? super U> action) { 5143 super(p, b, i, f, t); 5144 this.transformer = transformer; this.action = action; 5145 } 5146 public final void compute() { 5147 final Function<? super V, ? extends U> transformer; 5148 final Consumer<? super U> action; 5149 if ((transformer = this.transformer) != null && 5150 (action = this.action) != null) { 5151 for (int i = baseIndex, f, h; batch > 0 && 5152 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5153 addToPendingCount(1); 5154 new ForEachTransformedValueTask<K,V,U> 5155 (this, batch >>>= 1, baseLimit = h, f, tab, 5156 transformer, action).fork(); 5157 } 5158 for (Node<K,V> p; (p = advance()) != null; ) { 5159 U u; 5160 if ((u = transformer.apply(p.val)) != null) 5161 action.accept(u); 5162 } 5163 propagateCompletion(); 5164 } 5165 } 5166 } 5167 5168 @SuppressWarnings("serial") 5169 static final class ForEachTransformedEntryTask<K,V,U> 5170 extends BulkTask<K,V,Void> { 5171 final Function<Map.Entry<K,V>, ? extends U> transformer; 5172 final Consumer<? super U> action; 5173 ForEachTransformedEntryTask 5174 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5175 Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) { 5176 super(p, b, i, f, t); 5177 this.transformer = transformer; this.action = action; 5178 } 5179 public final void compute() { 5180 final Function<Map.Entry<K,V>, ? extends U> transformer; 5181 final Consumer<? super U> action; 5182 if ((transformer = this.transformer) != null && 5183 (action = this.action) != null) { 5184 for (int i = baseIndex, f, h; batch > 0 && 5185 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5186 addToPendingCount(1); 5187 new ForEachTransformedEntryTask<K,V,U> 5188 (this, batch >>>= 1, baseLimit = h, f, tab, 5189 transformer, action).fork(); 5190 } 5191 for (Node<K,V> p; (p = advance()) != null; ) { 5192 U u; 5193 if ((u = transformer.apply(p)) != null) 5194 action.accept(u); 5195 } 5196 propagateCompletion(); 5197 } 5198 } 5199 } 5200 5201 @SuppressWarnings("serial") 5202 static final class ForEachTransformedMappingTask<K,V,U> 5203 extends BulkTask<K,V,Void> { 5204 final BiFunction<? super K, ? super V, ? extends U> transformer; 5205 final Consumer<? super U> action; 5206 ForEachTransformedMappingTask 5207 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5208 BiFunction<? super K, ? super V, ? extends U> transformer, 5209 Consumer<? super U> action) { 5210 super(p, b, i, f, t); 5211 this.transformer = transformer; this.action = action; 5212 } 5213 public final void compute() { 5214 final BiFunction<? super K, ? super V, ? extends U> transformer; 5215 final Consumer<? super U> action; 5216 if ((transformer = this.transformer) != null && 5217 (action = this.action) != null) { 5218 for (int i = baseIndex, f, h; batch > 0 && 5219 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5220 addToPendingCount(1); 5221 new ForEachTransformedMappingTask<K,V,U> 5222 (this, batch >>>= 1, baseLimit = h, f, tab, 5223 transformer, action).fork(); 5224 } 5225 for (Node<K,V> p; (p = advance()) != null; ) { 5226 U u; 5227 if ((u = transformer.apply(p.key, p.val)) != null) 5228 action.accept(u); 5229 } 5230 propagateCompletion(); 5231 } 5232 } 5233 } 5234 5235 @SuppressWarnings("serial") 5236 static final class SearchKeysTask<K,V,U> 5237 extends BulkTask<K,V,U> { 5238 final Function<? super K, ? extends U> searchFunction; 5239 final AtomicReference<U> result; 5240 SearchKeysTask 5241 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5242 Function<? super K, ? extends U> searchFunction, 5243 AtomicReference<U> result) { 5244 super(p, b, i, f, t); 5245 this.searchFunction = searchFunction; this.result = result; 5246 } 5247 public final U getRawResult() { return result.get(); } 5248 public final void compute() { 5249 final Function<? super K, ? extends U> searchFunction; 5250 final AtomicReference<U> result; 5251 if ((searchFunction = this.searchFunction) != null && 5252 (result = this.result) != null) { 5253 for (int i = baseIndex, f, h; batch > 0 && 5254 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5255 if (result.get() != null) 5256 return; 5257 addToPendingCount(1); 5258 new SearchKeysTask<K,V,U> 5259 (this, batch >>>= 1, baseLimit = h, f, tab, 5260 searchFunction, result).fork(); 5261 } 5262 while (result.get() == null) { 5263 U u; 5264 Node<K,V> p; 5265 if ((p = advance()) == null) { 5266 propagateCompletion(); 5267 break; 5268 } 5269 if ((u = searchFunction.apply(p.key)) != null) { 5270 if (result.compareAndSet(null, u)) 5271 quietlyCompleteRoot(); 5272 break; 5273 } 5274 } 5275 } 5276 } 5277 } 5278 5279 @SuppressWarnings("serial") 5280 static final class SearchValuesTask<K,V,U> 5281 extends BulkTask<K,V,U> { 5282 final Function<? super V, ? extends U> searchFunction; 5283 final AtomicReference<U> result; 5284 SearchValuesTask 5285 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5286 Function<? super V, ? extends U> searchFunction, 5287 AtomicReference<U> result) { 5288 super(p, b, i, f, t); 5289 this.searchFunction = searchFunction; this.result = result; 5290 } 5291 public final U getRawResult() { return result.get(); } 5292 public final void compute() { 5293 final Function<? super V, ? extends U> searchFunction; 5294 final AtomicReference<U> result; 5295 if ((searchFunction = this.searchFunction) != null && 5296 (result = this.result) != null) { 5297 for (int i = baseIndex, f, h; batch > 0 && 5298 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5299 if (result.get() != null) 5300 return; 5301 addToPendingCount(1); 5302 new SearchValuesTask<K,V,U> 5303 (this, batch >>>= 1, baseLimit = h, f, tab, 5304 searchFunction, result).fork(); 5305 } 5306 while (result.get() == null) { 5307 U u; 5308 Node<K,V> p; 5309 if ((p = advance()) == null) { 5310 propagateCompletion(); 5311 break; 5312 } 5313 if ((u = searchFunction.apply(p.val)) != null) { 5314 if (result.compareAndSet(null, u)) 5315 quietlyCompleteRoot(); 5316 break; 5317 } 5318 } 5319 } 5320 } 5321 } 5322 5323 @SuppressWarnings("serial") 5324 static final class SearchEntriesTask<K,V,U> 5325 extends BulkTask<K,V,U> { 5326 final Function<Entry<K,V>, ? extends U> searchFunction; 5327 final AtomicReference<U> result; 5328 SearchEntriesTask 5329 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5330 Function<Entry<K,V>, ? extends U> searchFunction, 5331 AtomicReference<U> result) { 5332 super(p, b, i, f, t); 5333 this.searchFunction = searchFunction; this.result = result; 5334 } 5335 public final U getRawResult() { return result.get(); } 5336 public final void compute() { 5337 final Function<Entry<K,V>, ? extends U> searchFunction; 5338 final AtomicReference<U> result; 5339 if ((searchFunction = this.searchFunction) != null && 5340 (result = this.result) != null) { 5341 for (int i = baseIndex, f, h; batch > 0 && 5342 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5343 if (result.get() != null) 5344 return; 5345 addToPendingCount(1); 5346 new SearchEntriesTask<K,V,U> 5347 (this, batch >>>= 1, baseLimit = h, f, tab, 5348 searchFunction, result).fork(); 5349 } 5350 while (result.get() == null) { 5351 U u; 5352 Node<K,V> p; 5353 if ((p = advance()) == null) { 5354 propagateCompletion(); 5355 break; 5356 } 5357 if ((u = searchFunction.apply(p)) != null) { 5358 if (result.compareAndSet(null, u)) 5359 quietlyCompleteRoot(); 5360 return; 5361 } 5362 } 5363 } 5364 } 5365 } 5366 5367 @SuppressWarnings("serial") 5368 static final class SearchMappingsTask<K,V,U> 5369 extends BulkTask<K,V,U> { 5370 final BiFunction<? super K, ? super V, ? extends U> searchFunction; 5371 final AtomicReference<U> result; 5372 SearchMappingsTask 5373 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5374 BiFunction<? super K, ? super V, ? extends U> searchFunction, 5375 AtomicReference<U> result) { 5376 super(p, b, i, f, t); 5377 this.searchFunction = searchFunction; this.result = result; 5378 } 5379 public final U getRawResult() { return result.get(); } 5380 public final void compute() { 5381 final BiFunction<? super K, ? super V, ? extends U> searchFunction; 5382 final AtomicReference<U> result; 5383 if ((searchFunction = this.searchFunction) != null && 5384 (result = this.result) != null) { 5385 for (int i = baseIndex, f, h; batch > 0 && 5386 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5387 if (result.get() != null) 5388 return; 5389 addToPendingCount(1); 5390 new SearchMappingsTask<K,V,U> 5391 (this, batch >>>= 1, baseLimit = h, f, tab, 5392 searchFunction, result).fork(); 5393 } 5394 while (result.get() == null) { 5395 U u; 5396 Node<K,V> p; 5397 if ((p = advance()) == null) { 5398 propagateCompletion(); 5399 break; 5400 } 5401 if ((u = searchFunction.apply(p.key, p.val)) != null) { 5402 if (result.compareAndSet(null, u)) 5403 quietlyCompleteRoot(); 5404 break; 5405 } 5406 } 5407 } 5408 } 5409 } 5410 5411 @SuppressWarnings("serial") 5412 static final class ReduceKeysTask<K,V> 5413 extends BulkTask<K,V,K> { 5414 final BiFunction<? super K, ? super K, ? extends K> reducer; 5415 K result; 5416 ReduceKeysTask<K,V> rights, nextRight; 5417 ReduceKeysTask 5418 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5419 ReduceKeysTask<K,V> nextRight, 5420 BiFunction<? super K, ? super K, ? extends K> reducer) { 5421 super(p, b, i, f, t); this.nextRight = nextRight; 5422 this.reducer = reducer; 5423 } 5424 public final K getRawResult() { return result; } 5425 public final void compute() { 5426 final BiFunction<? super K, ? super K, ? extends K> reducer; 5427 if ((reducer = this.reducer) != null) { 5428 for (int i = baseIndex, f, h; batch > 0 && 5429 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5430 addToPendingCount(1); 5431 (rights = new ReduceKeysTask<K,V> 5432 (this, batch >>>= 1, baseLimit = h, f, tab, 5433 rights, reducer)).fork(); 5434 } 5435 K r = null; 5436 for (Node<K,V> p; (p = advance()) != null; ) { 5437 K u = p.key; 5438 r = (r == null) ? u : u == null ? r : reducer.apply(r, u); 5439 } 5440 result = r; 5441 CountedCompleter<?> c; 5442 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5443 @SuppressWarnings("unchecked") 5444 ReduceKeysTask<K,V> 5445 t = (ReduceKeysTask<K,V>)c, 5446 s = t.rights; 5447 while (s != null) { 5448 K tr, sr; 5449 if ((sr = s.result) != null) 5450 t.result = (((tr = t.result) == null) ? sr : 5451 reducer.apply(tr, sr)); 5452 s = t.rights = s.nextRight; 5453 } 5454 } 5455 } 5456 } 5457 } 5458 5459 @SuppressWarnings("serial") 5460 static final class ReduceValuesTask<K,V> 5461 extends BulkTask<K,V,V> { 5462 final BiFunction<? super V, ? super V, ? extends V> reducer; 5463 V result; 5464 ReduceValuesTask<K,V> rights, nextRight; 5465 ReduceValuesTask 5466 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5467 ReduceValuesTask<K,V> nextRight, 5468 BiFunction<? super V, ? super V, ? extends V> reducer) { 5469 super(p, b, i, f, t); this.nextRight = nextRight; 5470 this.reducer = reducer; 5471 } 5472 public final V getRawResult() { return result; } 5473 public final void compute() { 5474 final BiFunction<? super V, ? super V, ? extends V> reducer; 5475 if ((reducer = this.reducer) != null) { 5476 for (int i = baseIndex, f, h; batch > 0 && 5477 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5478 addToPendingCount(1); 5479 (rights = new ReduceValuesTask<K,V> 5480 (this, batch >>>= 1, baseLimit = h, f, tab, 5481 rights, reducer)).fork(); 5482 } 5483 V r = null; 5484 for (Node<K,V> p; (p = advance()) != null; ) { 5485 V v = p.val; 5486 r = (r == null) ? v : reducer.apply(r, v); 5487 } 5488 result = r; 5489 CountedCompleter<?> c; 5490 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5491 @SuppressWarnings("unchecked") 5492 ReduceValuesTask<K,V> 5493 t = (ReduceValuesTask<K,V>)c, 5494 s = t.rights; 5495 while (s != null) { 5496 V tr, sr; 5497 if ((sr = s.result) != null) 5498 t.result = (((tr = t.result) == null) ? sr : 5499 reducer.apply(tr, sr)); 5500 s = t.rights = s.nextRight; 5501 } 5502 } 5503 } 5504 } 5505 } 5506 5507 @SuppressWarnings("serial") 5508 static final class ReduceEntriesTask<K,V> 5509 extends BulkTask<K,V,Map.Entry<K,V>> { 5510 final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer; 5511 Map.Entry<K,V> result; 5512 ReduceEntriesTask<K,V> rights, nextRight; 5513 ReduceEntriesTask 5514 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5515 ReduceEntriesTask<K,V> nextRight, 5516 BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) { 5517 super(p, b, i, f, t); this.nextRight = nextRight; 5518 this.reducer = reducer; 5519 } 5520 public final Map.Entry<K,V> getRawResult() { return result; } 5521 public final void compute() { 5522 final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer; 5523 if ((reducer = this.reducer) != null) { 5524 for (int i = baseIndex, f, h; batch > 0 && 5525 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5526 addToPendingCount(1); 5527 (rights = new ReduceEntriesTask<K,V> 5528 (this, batch >>>= 1, baseLimit = h, f, tab, 5529 rights, reducer)).fork(); 5530 } 5531 Map.Entry<K,V> r = null; 5532 for (Node<K,V> p; (p = advance()) != null; ) 5533 r = (r == null) ? p : reducer.apply(r, p); 5534 result = r; 5535 CountedCompleter<?> c; 5536 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5537 @SuppressWarnings("unchecked") 5538 ReduceEntriesTask<K,V> 5539 t = (ReduceEntriesTask<K,V>)c, 5540 s = t.rights; 5541 while (s != null) { 5542 Map.Entry<K,V> tr, sr; 5543 if ((sr = s.result) != null) 5544 t.result = (((tr = t.result) == null) ? sr : 5545 reducer.apply(tr, sr)); 5546 s = t.rights = s.nextRight; 5547 } 5548 } 5549 } 5550 } 5551 } 5552 5553 @SuppressWarnings("serial") 5554 static final class MapReduceKeysTask<K,V,U> 5555 extends BulkTask<K,V,U> { 5556 final Function<? super K, ? extends U> transformer; 5557 final BiFunction<? super U, ? super U, ? extends U> reducer; 5558 U result; 5559 MapReduceKeysTask<K,V,U> rights, nextRight; 5560 MapReduceKeysTask 5561 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5562 MapReduceKeysTask<K,V,U> nextRight, 5563 Function<? super K, ? extends U> transformer, 5564 BiFunction<? super U, ? super U, ? extends U> reducer) { 5565 super(p, b, i, f, t); this.nextRight = nextRight; 5566 this.transformer = transformer; 5567 this.reducer = reducer; 5568 } 5569 public final U getRawResult() { return result; } 5570 public final void compute() { 5571 final Function<? super K, ? extends U> transformer; 5572 final BiFunction<? super U, ? super U, ? extends U> reducer; 5573 if ((transformer = this.transformer) != null && 5574 (reducer = this.reducer) != null) { 5575 for (int i = baseIndex, f, h; batch > 0 && 5576 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5577 addToPendingCount(1); 5578 (rights = new MapReduceKeysTask<K,V,U> 5579 (this, batch >>>= 1, baseLimit = h, f, tab, 5580 rights, transformer, reducer)).fork(); 5581 } 5582 U r = null; 5583 for (Node<K,V> p; (p = advance()) != null; ) { 5584 U u; 5585 if ((u = transformer.apply(p.key)) != null) 5586 r = (r == null) ? u : reducer.apply(r, u); 5587 } 5588 result = r; 5589 CountedCompleter<?> c; 5590 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5591 @SuppressWarnings("unchecked") 5592 MapReduceKeysTask<K,V,U> 5593 t = (MapReduceKeysTask<K,V,U>)c, 5594 s = t.rights; 5595 while (s != null) { 5596 U tr, sr; 5597 if ((sr = s.result) != null) 5598 t.result = (((tr = t.result) == null) ? sr : 5599 reducer.apply(tr, sr)); 5600 s = t.rights = s.nextRight; 5601 } 5602 } 5603 } 5604 } 5605 } 5606 5607 @SuppressWarnings("serial") 5608 static final class MapReduceValuesTask<K,V,U> 5609 extends BulkTask<K,V,U> { 5610 final Function<? super V, ? extends U> transformer; 5611 final BiFunction<? super U, ? super U, ? extends U> reducer; 5612 U result; 5613 MapReduceValuesTask<K,V,U> rights, nextRight; 5614 MapReduceValuesTask 5615 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5616 MapReduceValuesTask<K,V,U> nextRight, 5617 Function<? super V, ? extends U> transformer, 5618 BiFunction<? super U, ? super U, ? extends U> reducer) { 5619 super(p, b, i, f, t); this.nextRight = nextRight; 5620 this.transformer = transformer; 5621 this.reducer = reducer; 5622 } 5623 public final U getRawResult() { return result; } 5624 public final void compute() { 5625 final Function<? super V, ? extends U> transformer; 5626 final BiFunction<? super U, ? super U, ? extends U> reducer; 5627 if ((transformer = this.transformer) != null && 5628 (reducer = this.reducer) != null) { 5629 for (int i = baseIndex, f, h; batch > 0 && 5630 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5631 addToPendingCount(1); 5632 (rights = new MapReduceValuesTask<K,V,U> 5633 (this, batch >>>= 1, baseLimit = h, f, tab, 5634 rights, transformer, reducer)).fork(); 5635 } 5636 U r = null; 5637 for (Node<K,V> p; (p = advance()) != null; ) { 5638 U u; 5639 if ((u = transformer.apply(p.val)) != null) 5640 r = (r == null) ? u : reducer.apply(r, u); 5641 } 5642 result = r; 5643 CountedCompleter<?> c; 5644 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5645 @SuppressWarnings("unchecked") 5646 MapReduceValuesTask<K,V,U> 5647 t = (MapReduceValuesTask<K,V,U>)c, 5648 s = t.rights; 5649 while (s != null) { 5650 U tr, sr; 5651 if ((sr = s.result) != null) 5652 t.result = (((tr = t.result) == null) ? sr : 5653 reducer.apply(tr, sr)); 5654 s = t.rights = s.nextRight; 5655 } 5656 } 5657 } 5658 } 5659 } 5660 5661 @SuppressWarnings("serial") 5662 static final class MapReduceEntriesTask<K,V,U> 5663 extends BulkTask<K,V,U> { 5664 final Function<Map.Entry<K,V>, ? extends U> transformer; 5665 final BiFunction<? super U, ? super U, ? extends U> reducer; 5666 U result; 5667 MapReduceEntriesTask<K,V,U> rights, nextRight; 5668 MapReduceEntriesTask 5669 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5670 MapReduceEntriesTask<K,V,U> nextRight, 5671 Function<Map.Entry<K,V>, ? extends U> transformer, 5672 BiFunction<? super U, ? super U, ? extends U> reducer) { 5673 super(p, b, i, f, t); this.nextRight = nextRight; 5674 this.transformer = transformer; 5675 this.reducer = reducer; 5676 } 5677 public final U getRawResult() { return result; } 5678 public final void compute() { 5679 final Function<Map.Entry<K,V>, ? extends U> transformer; 5680 final BiFunction<? super U, ? super U, ? extends U> reducer; 5681 if ((transformer = this.transformer) != null && 5682 (reducer = this.reducer) != null) { 5683 for (int i = baseIndex, f, h; batch > 0 && 5684 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5685 addToPendingCount(1); 5686 (rights = new MapReduceEntriesTask<K,V,U> 5687 (this, batch >>>= 1, baseLimit = h, f, tab, 5688 rights, transformer, reducer)).fork(); 5689 } 5690 U r = null; 5691 for (Node<K,V> p; (p = advance()) != null; ) { 5692 U u; 5693 if ((u = transformer.apply(p)) != null) 5694 r = (r == null) ? u : reducer.apply(r, u); 5695 } 5696 result = r; 5697 CountedCompleter<?> c; 5698 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5699 @SuppressWarnings("unchecked") 5700 MapReduceEntriesTask<K,V,U> 5701 t = (MapReduceEntriesTask<K,V,U>)c, 5702 s = t.rights; 5703 while (s != null) { 5704 U tr, sr; 5705 if ((sr = s.result) != null) 5706 t.result = (((tr = t.result) == null) ? sr : 5707 reducer.apply(tr, sr)); 5708 s = t.rights = s.nextRight; 5709 } 5710 } 5711 } 5712 } 5713 } 5714 5715 @SuppressWarnings("serial") 5716 static final class MapReduceMappingsTask<K,V,U> 5717 extends BulkTask<K,V,U> { 5718 final BiFunction<? super K, ? super V, ? extends U> transformer; 5719 final BiFunction<? super U, ? super U, ? extends U> reducer; 5720 U result; 5721 MapReduceMappingsTask<K,V,U> rights, nextRight; 5722 MapReduceMappingsTask 5723 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5724 MapReduceMappingsTask<K,V,U> nextRight, 5725 BiFunction<? super K, ? super V, ? extends U> transformer, 5726 BiFunction<? super U, ? super U, ? extends U> reducer) { 5727 super(p, b, i, f, t); this.nextRight = nextRight; 5728 this.transformer = transformer; 5729 this.reducer = reducer; 5730 } 5731 public final U getRawResult() { return result; } 5732 public final void compute() { 5733 final BiFunction<? super K, ? super V, ? extends U> transformer; 5734 final BiFunction<? super U, ? super U, ? extends U> reducer; 5735 if ((transformer = this.transformer) != null && 5736 (reducer = this.reducer) != null) { 5737 for (int i = baseIndex, f, h; batch > 0 && 5738 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5739 addToPendingCount(1); 5740 (rights = new MapReduceMappingsTask<K,V,U> 5741 (this, batch >>>= 1, baseLimit = h, f, tab, 5742 rights, transformer, reducer)).fork(); 5743 } 5744 U r = null; 5745 for (Node<K,V> p; (p = advance()) != null; ) { 5746 U u; 5747 if ((u = transformer.apply(p.key, p.val)) != null) 5748 r = (r == null) ? u : reducer.apply(r, u); 5749 } 5750 result = r; 5751 CountedCompleter<?> c; 5752 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5753 @SuppressWarnings("unchecked") 5754 MapReduceMappingsTask<K,V,U> 5755 t = (MapReduceMappingsTask<K,V,U>)c, 5756 s = t.rights; 5757 while (s != null) { 5758 U tr, sr; 5759 if ((sr = s.result) != null) 5760 t.result = (((tr = t.result) == null) ? sr : 5761 reducer.apply(tr, sr)); 5762 s = t.rights = s.nextRight; 5763 } 5764 } 5765 } 5766 } 5767 } 5768 5769 @SuppressWarnings("serial") 5770 static final class MapReduceKeysToDoubleTask<K,V> 5771 extends BulkTask<K,V,Double> { 5772 final ToDoubleFunction<? super K> transformer; 5773 final DoubleBinaryOperator reducer; 5774 final double basis; 5775 double result; 5776 MapReduceKeysToDoubleTask<K,V> rights, nextRight; 5777 MapReduceKeysToDoubleTask 5778 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5779 MapReduceKeysToDoubleTask<K,V> nextRight, 5780 ToDoubleFunction<? super K> transformer, 5781 double basis, 5782 DoubleBinaryOperator reducer) { 5783 super(p, b, i, f, t); this.nextRight = nextRight; 5784 this.transformer = transformer; 5785 this.basis = basis; this.reducer = reducer; 5786 } 5787 public final Double getRawResult() { return result; } 5788 public final void compute() { 5789 final ToDoubleFunction<? super K> transformer; 5790 final DoubleBinaryOperator reducer; 5791 if ((transformer = this.transformer) != null && 5792 (reducer = this.reducer) != null) { 5793 double r = this.basis; 5794 for (int i = baseIndex, f, h; batch > 0 && 5795 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5796 addToPendingCount(1); 5797 (rights = new MapReduceKeysToDoubleTask<K,V> 5798 (this, batch >>>= 1, baseLimit = h, f, tab, 5799 rights, transformer, r, reducer)).fork(); 5800 } 5801 for (Node<K,V> p; (p = advance()) != null; ) 5802 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key)); 5803 result = r; 5804 CountedCompleter<?> c; 5805 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5806 @SuppressWarnings("unchecked") 5807 MapReduceKeysToDoubleTask<K,V> 5808 t = (MapReduceKeysToDoubleTask<K,V>)c, 5809 s = t.rights; 5810 while (s != null) { 5811 t.result = reducer.applyAsDouble(t.result, s.result); 5812 s = t.rights = s.nextRight; 5813 } 5814 } 5815 } 5816 } 5817 } 5818 5819 @SuppressWarnings("serial") 5820 static final class MapReduceValuesToDoubleTask<K,V> 5821 extends BulkTask<K,V,Double> { 5822 final ToDoubleFunction<? super V> transformer; 5823 final DoubleBinaryOperator reducer; 5824 final double basis; 5825 double result; 5826 MapReduceValuesToDoubleTask<K,V> rights, nextRight; 5827 MapReduceValuesToDoubleTask 5828 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5829 MapReduceValuesToDoubleTask<K,V> nextRight, 5830 ToDoubleFunction<? super V> transformer, 5831 double basis, 5832 DoubleBinaryOperator reducer) { 5833 super(p, b, i, f, t); this.nextRight = nextRight; 5834 this.transformer = transformer; 5835 this.basis = basis; this.reducer = reducer; 5836 } 5837 public final Double getRawResult() { return result; } 5838 public final void compute() { 5839 final ToDoubleFunction<? super V> transformer; 5840 final DoubleBinaryOperator reducer; 5841 if ((transformer = this.transformer) != null && 5842 (reducer = this.reducer) != null) { 5843 double r = this.basis; 5844 for (int i = baseIndex, f, h; batch > 0 && 5845 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5846 addToPendingCount(1); 5847 (rights = new MapReduceValuesToDoubleTask<K,V> 5848 (this, batch >>>= 1, baseLimit = h, f, tab, 5849 rights, transformer, r, reducer)).fork(); 5850 } 5851 for (Node<K,V> p; (p = advance()) != null; ) 5852 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val)); 5853 result = r; 5854 CountedCompleter<?> c; 5855 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5856 @SuppressWarnings("unchecked") 5857 MapReduceValuesToDoubleTask<K,V> 5858 t = (MapReduceValuesToDoubleTask<K,V>)c, 5859 s = t.rights; 5860 while (s != null) { 5861 t.result = reducer.applyAsDouble(t.result, s.result); 5862 s = t.rights = s.nextRight; 5863 } 5864 } 5865 } 5866 } 5867 } 5868 5869 @SuppressWarnings("serial") 5870 static final class MapReduceEntriesToDoubleTask<K,V> 5871 extends BulkTask<K,V,Double> { 5872 final ToDoubleFunction<Map.Entry<K,V>> transformer; 5873 final DoubleBinaryOperator reducer; 5874 final double basis; 5875 double result; 5876 MapReduceEntriesToDoubleTask<K,V> rights, nextRight; 5877 MapReduceEntriesToDoubleTask 5878 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5879 MapReduceEntriesToDoubleTask<K,V> nextRight, 5880 ToDoubleFunction<Map.Entry<K,V>> transformer, 5881 double basis, 5882 DoubleBinaryOperator reducer) { 5883 super(p, b, i, f, t); this.nextRight = nextRight; 5884 this.transformer = transformer; 5885 this.basis = basis; this.reducer = reducer; 5886 } 5887 public final Double getRawResult() { return result; } 5888 public final void compute() { 5889 final ToDoubleFunction<Map.Entry<K,V>> transformer; 5890 final DoubleBinaryOperator reducer; 5891 if ((transformer = this.transformer) != null && 5892 (reducer = this.reducer) != null) { 5893 double r = this.basis; 5894 for (int i = baseIndex, f, h; batch > 0 && 5895 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5896 addToPendingCount(1); 5897 (rights = new MapReduceEntriesToDoubleTask<K,V> 5898 (this, batch >>>= 1, baseLimit = h, f, tab, 5899 rights, transformer, r, reducer)).fork(); 5900 } 5901 for (Node<K,V> p; (p = advance()) != null; ) 5902 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p)); 5903 result = r; 5904 CountedCompleter<?> c; 5905 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5906 @SuppressWarnings("unchecked") 5907 MapReduceEntriesToDoubleTask<K,V> 5908 t = (MapReduceEntriesToDoubleTask<K,V>)c, 5909 s = t.rights; 5910 while (s != null) { 5911 t.result = reducer.applyAsDouble(t.result, s.result); 5912 s = t.rights = s.nextRight; 5913 } 5914 } 5915 } 5916 } 5917 } 5918 5919 @SuppressWarnings("serial") 5920 static final class MapReduceMappingsToDoubleTask<K,V> 5921 extends BulkTask<K,V,Double> { 5922 final ToDoubleBiFunction<? super K, ? super V> transformer; 5923 final DoubleBinaryOperator reducer; 5924 final double basis; 5925 double result; 5926 MapReduceMappingsToDoubleTask<K,V> rights, nextRight; 5927 MapReduceMappingsToDoubleTask 5928 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5929 MapReduceMappingsToDoubleTask<K,V> nextRight, 5930 ToDoubleBiFunction<? super K, ? super V> transformer, 5931 double basis, 5932 DoubleBinaryOperator reducer) { 5933 super(p, b, i, f, t); this.nextRight = nextRight; 5934 this.transformer = transformer; 5935 this.basis = basis; this.reducer = reducer; 5936 } 5937 public final Double getRawResult() { return result; } 5938 public final void compute() { 5939 final ToDoubleBiFunction<? super K, ? super V> transformer; 5940 final DoubleBinaryOperator reducer; 5941 if ((transformer = this.transformer) != null && 5942 (reducer = this.reducer) != null) { 5943 double r = this.basis; 5944 for (int i = baseIndex, f, h; batch > 0 && 5945 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5946 addToPendingCount(1); 5947 (rights = new MapReduceMappingsToDoubleTask<K,V> 5948 (this, batch >>>= 1, baseLimit = h, f, tab, 5949 rights, transformer, r, reducer)).fork(); 5950 } 5951 for (Node<K,V> p; (p = advance()) != null; ) 5952 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key, p.val)); 5953 result = r; 5954 CountedCompleter<?> c; 5955 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5956 @SuppressWarnings("unchecked") 5957 MapReduceMappingsToDoubleTask<K,V> 5958 t = (MapReduceMappingsToDoubleTask<K,V>)c, 5959 s = t.rights; 5960 while (s != null) { 5961 t.result = reducer.applyAsDouble(t.result, s.result); 5962 s = t.rights = s.nextRight; 5963 } 5964 } 5965 } 5966 } 5967 } 5968 5969 @SuppressWarnings("serial") 5970 static final class MapReduceKeysToLongTask<K,V> 5971 extends BulkTask<K,V,Long> { 5972 final ToLongFunction<? super K> transformer; 5973 final LongBinaryOperator reducer; 5974 final long basis; 5975 long result; 5976 MapReduceKeysToLongTask<K,V> rights, nextRight; 5977 MapReduceKeysToLongTask 5978 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5979 MapReduceKeysToLongTask<K,V> nextRight, 5980 ToLongFunction<? super K> transformer, 5981 long basis, 5982 LongBinaryOperator reducer) { 5983 super(p, b, i, f, t); this.nextRight = nextRight; 5984 this.transformer = transformer; 5985 this.basis = basis; this.reducer = reducer; 5986 } 5987 public final Long getRawResult() { return result; } 5988 public final void compute() { 5989 final ToLongFunction<? super K> transformer; 5990 final LongBinaryOperator reducer; 5991 if ((transformer = this.transformer) != null && 5992 (reducer = this.reducer) != null) { 5993 long r = this.basis; 5994 for (int i = baseIndex, f, h; batch > 0 && 5995 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5996 addToPendingCount(1); 5997 (rights = new MapReduceKeysToLongTask<K,V> 5998 (this, batch >>>= 1, baseLimit = h, f, tab, 5999 rights, transformer, r, reducer)).fork(); 6000 } 6001 for (Node<K,V> p; (p = advance()) != null; ) 6002 r = reducer.applyAsLong(r, transformer.applyAsLong(p.key)); 6003 result = r; 6004 CountedCompleter<?> c; 6005 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6006 @SuppressWarnings("unchecked") 6007 MapReduceKeysToLongTask<K,V> 6008 t = (MapReduceKeysToLongTask<K,V>)c, 6009 s = t.rights; 6010 while (s != null) { 6011 t.result = reducer.applyAsLong(t.result, s.result); 6012 s = t.rights = s.nextRight; 6013 } 6014 } 6015 } 6016 } 6017 } 6018 6019 @SuppressWarnings("serial") 6020 static final class MapReduceValuesToLongTask<K,V> 6021 extends BulkTask<K,V,Long> { 6022 final ToLongFunction<? super V> transformer; 6023 final LongBinaryOperator reducer; 6024 final long basis; 6025 long result; 6026 MapReduceValuesToLongTask<K,V> rights, nextRight; 6027 MapReduceValuesToLongTask 6028 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6029 MapReduceValuesToLongTask<K,V> nextRight, 6030 ToLongFunction<? super V> transformer, 6031 long basis, 6032 LongBinaryOperator reducer) { 6033 super(p, b, i, f, t); this.nextRight = nextRight; 6034 this.transformer = transformer; 6035 this.basis = basis; this.reducer = reducer; 6036 } 6037 public final Long getRawResult() { return result; } 6038 public final void compute() { 6039 final ToLongFunction<? super V> transformer; 6040 final LongBinaryOperator reducer; 6041 if ((transformer = this.transformer) != null && 6042 (reducer = this.reducer) != null) { 6043 long r = this.basis; 6044 for (int i = baseIndex, f, h; batch > 0 && 6045 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6046 addToPendingCount(1); 6047 (rights = new MapReduceValuesToLongTask<K,V> 6048 (this, batch >>>= 1, baseLimit = h, f, tab, 6049 rights, transformer, r, reducer)).fork(); 6050 } 6051 for (Node<K,V> p; (p = advance()) != null; ) 6052 r = reducer.applyAsLong(r, transformer.applyAsLong(p.val)); 6053 result = r; 6054 CountedCompleter<?> c; 6055 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6056 @SuppressWarnings("unchecked") 6057 MapReduceValuesToLongTask<K,V> 6058 t = (MapReduceValuesToLongTask<K,V>)c, 6059 s = t.rights; 6060 while (s != null) { 6061 t.result = reducer.applyAsLong(t.result, s.result); 6062 s = t.rights = s.nextRight; 6063 } 6064 } 6065 } 6066 } 6067 } 6068 6069 @SuppressWarnings("serial") 6070 static final class MapReduceEntriesToLongTask<K,V> 6071 extends BulkTask<K,V,Long> { 6072 final ToLongFunction<Map.Entry<K,V>> transformer; 6073 final LongBinaryOperator reducer; 6074 final long basis; 6075 long result; 6076 MapReduceEntriesToLongTask<K,V> rights, nextRight; 6077 MapReduceEntriesToLongTask 6078 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6079 MapReduceEntriesToLongTask<K,V> nextRight, 6080 ToLongFunction<Map.Entry<K,V>> transformer, 6081 long basis, 6082 LongBinaryOperator reducer) { 6083 super(p, b, i, f, t); this.nextRight = nextRight; 6084 this.transformer = transformer; 6085 this.basis = basis; this.reducer = reducer; 6086 } 6087 public final Long getRawResult() { return result; } 6088 public final void compute() { 6089 final ToLongFunction<Map.Entry<K,V>> transformer; 6090 final LongBinaryOperator reducer; 6091 if ((transformer = this.transformer) != null && 6092 (reducer = this.reducer) != null) { 6093 long r = this.basis; 6094 for (int i = baseIndex, f, h; batch > 0 && 6095 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6096 addToPendingCount(1); 6097 (rights = new MapReduceEntriesToLongTask<K,V> 6098 (this, batch >>>= 1, baseLimit = h, f, tab, 6099 rights, transformer, r, reducer)).fork(); 6100 } 6101 for (Node<K,V> p; (p = advance()) != null; ) 6102 r = reducer.applyAsLong(r, transformer.applyAsLong(p)); 6103 result = r; 6104 CountedCompleter<?> c; 6105 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6106 @SuppressWarnings("unchecked") 6107 MapReduceEntriesToLongTask<K,V> 6108 t = (MapReduceEntriesToLongTask<K,V>)c, 6109 s = t.rights; 6110 while (s != null) { 6111 t.result = reducer.applyAsLong(t.result, s.result); 6112 s = t.rights = s.nextRight; 6113 } 6114 } 6115 } 6116 } 6117 } 6118 6119 @SuppressWarnings("serial") 6120 static final class MapReduceMappingsToLongTask<K,V> 6121 extends BulkTask<K,V,Long> { 6122 final ToLongBiFunction<? super K, ? super V> transformer; 6123 final LongBinaryOperator reducer; 6124 final long basis; 6125 long result; 6126 MapReduceMappingsToLongTask<K,V> rights, nextRight; 6127 MapReduceMappingsToLongTask 6128 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6129 MapReduceMappingsToLongTask<K,V> nextRight, 6130 ToLongBiFunction<? super K, ? super V> transformer, 6131 long basis, 6132 LongBinaryOperator reducer) { 6133 super(p, b, i, f, t); this.nextRight = nextRight; 6134 this.transformer = transformer; 6135 this.basis = basis; this.reducer = reducer; 6136 } 6137 public final Long getRawResult() { return result; } 6138 public final void compute() { 6139 final ToLongBiFunction<? super K, ? super V> transformer; 6140 final LongBinaryOperator reducer; 6141 if ((transformer = this.transformer) != null && 6142 (reducer = this.reducer) != null) { 6143 long r = this.basis; 6144 for (int i = baseIndex, f, h; batch > 0 && 6145 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6146 addToPendingCount(1); 6147 (rights = new MapReduceMappingsToLongTask<K,V> 6148 (this, batch >>>= 1, baseLimit = h, f, tab, 6149 rights, transformer, r, reducer)).fork(); 6150 } 6151 for (Node<K,V> p; (p = advance()) != null; ) 6152 r = reducer.applyAsLong(r, transformer.applyAsLong(p.key, p.val)); 6153 result = r; 6154 CountedCompleter<?> c; 6155 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6156 @SuppressWarnings("unchecked") 6157 MapReduceMappingsToLongTask<K,V> 6158 t = (MapReduceMappingsToLongTask<K,V>)c, 6159 s = t.rights; 6160 while (s != null) { 6161 t.result = reducer.applyAsLong(t.result, s.result); 6162 s = t.rights = s.nextRight; 6163 } 6164 } 6165 } 6166 } 6167 } 6168 6169 @SuppressWarnings("serial") 6170 static final class MapReduceKeysToIntTask<K,V> 6171 extends BulkTask<K,V,Integer> { 6172 final ToIntFunction<? super K> transformer; 6173 final IntBinaryOperator reducer; 6174 final int basis; 6175 int result; 6176 MapReduceKeysToIntTask<K,V> rights, nextRight; 6177 MapReduceKeysToIntTask 6178 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6179 MapReduceKeysToIntTask<K,V> nextRight, 6180 ToIntFunction<? super K> transformer, 6181 int basis, 6182 IntBinaryOperator reducer) { 6183 super(p, b, i, f, t); this.nextRight = nextRight; 6184 this.transformer = transformer; 6185 this.basis = basis; this.reducer = reducer; 6186 } 6187 public final Integer getRawResult() { return result; } 6188 public final void compute() { 6189 final ToIntFunction<? super K> transformer; 6190 final IntBinaryOperator reducer; 6191 if ((transformer = this.transformer) != null && 6192 (reducer = this.reducer) != null) { 6193 int r = this.basis; 6194 for (int i = baseIndex, f, h; batch > 0 && 6195 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6196 addToPendingCount(1); 6197 (rights = new MapReduceKeysToIntTask<K,V> 6198 (this, batch >>>= 1, baseLimit = h, f, tab, 6199 rights, transformer, r, reducer)).fork(); 6200 } 6201 for (Node<K,V> p; (p = advance()) != null; ) 6202 r = reducer.applyAsInt(r, transformer.applyAsInt(p.key)); 6203 result = r; 6204 CountedCompleter<?> c; 6205 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6206 @SuppressWarnings("unchecked") 6207 MapReduceKeysToIntTask<K,V> 6208 t = (MapReduceKeysToIntTask<K,V>)c, 6209 s = t.rights; 6210 while (s != null) { 6211 t.result = reducer.applyAsInt(t.result, s.result); 6212 s = t.rights = s.nextRight; 6213 } 6214 } 6215 } 6216 } 6217 } 6218 6219 @SuppressWarnings("serial") 6220 static final class MapReduceValuesToIntTask<K,V> 6221 extends BulkTask<K,V,Integer> { 6222 final ToIntFunction<? super V> transformer; 6223 final IntBinaryOperator reducer; 6224 final int basis; 6225 int result; 6226 MapReduceValuesToIntTask<K,V> rights, nextRight; 6227 MapReduceValuesToIntTask 6228 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6229 MapReduceValuesToIntTask<K,V> nextRight, 6230 ToIntFunction<? super V> transformer, 6231 int basis, 6232 IntBinaryOperator reducer) { 6233 super(p, b, i, f, t); this.nextRight = nextRight; 6234 this.transformer = transformer; 6235 this.basis = basis; this.reducer = reducer; 6236 } 6237 public final Integer getRawResult() { return result; } 6238 public final void compute() { 6239 final ToIntFunction<? super V> transformer; 6240 final IntBinaryOperator reducer; 6241 if ((transformer = this.transformer) != null && 6242 (reducer = this.reducer) != null) { 6243 int r = this.basis; 6244 for (int i = baseIndex, f, h; batch > 0 && 6245 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6246 addToPendingCount(1); 6247 (rights = new MapReduceValuesToIntTask<K,V> 6248 (this, batch >>>= 1, baseLimit = h, f, tab, 6249 rights, transformer, r, reducer)).fork(); 6250 } 6251 for (Node<K,V> p; (p = advance()) != null; ) 6252 r = reducer.applyAsInt(r, transformer.applyAsInt(p.val)); 6253 result = r; 6254 CountedCompleter<?> c; 6255 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6256 @SuppressWarnings("unchecked") 6257 MapReduceValuesToIntTask<K,V> 6258 t = (MapReduceValuesToIntTask<K,V>)c, 6259 s = t.rights; 6260 while (s != null) { 6261 t.result = reducer.applyAsInt(t.result, s.result); 6262 s = t.rights = s.nextRight; 6263 } 6264 } 6265 } 6266 } 6267 } 6268 6269 @SuppressWarnings("serial") 6270 static final class MapReduceEntriesToIntTask<K,V> 6271 extends BulkTask<K,V,Integer> { 6272 final ToIntFunction<Map.Entry<K,V>> transformer; 6273 final IntBinaryOperator reducer; 6274 final int basis; 6275 int result; 6276 MapReduceEntriesToIntTask<K,V> rights, nextRight; 6277 MapReduceEntriesToIntTask 6278 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6279 MapReduceEntriesToIntTask<K,V> nextRight, 6280 ToIntFunction<Map.Entry<K,V>> transformer, 6281 int basis, 6282 IntBinaryOperator reducer) { 6283 super(p, b, i, f, t); this.nextRight = nextRight; 6284 this.transformer = transformer; 6285 this.basis = basis; this.reducer = reducer; 6286 } 6287 public final Integer getRawResult() { return result; } 6288 public final void compute() { 6289 final ToIntFunction<Map.Entry<K,V>> transformer; 6290 final IntBinaryOperator reducer; 6291 if ((transformer = this.transformer) != null && 6292 (reducer = this.reducer) != null) { 6293 int r = this.basis; 6294 for (int i = baseIndex, f, h; batch > 0 && 6295 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6296 addToPendingCount(1); 6297 (rights = new MapReduceEntriesToIntTask<K,V> 6298 (this, batch >>>= 1, baseLimit = h, f, tab, 6299 rights, transformer, r, reducer)).fork(); 6300 } 6301 for (Node<K,V> p; (p = advance()) != null; ) 6302 r = reducer.applyAsInt(r, transformer.applyAsInt(p)); 6303 result = r; 6304 CountedCompleter<?> c; 6305 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6306 @SuppressWarnings("unchecked") 6307 MapReduceEntriesToIntTask<K,V> 6308 t = (MapReduceEntriesToIntTask<K,V>)c, 6309 s = t.rights; 6310 while (s != null) { 6311 t.result = reducer.applyAsInt(t.result, s.result); 6312 s = t.rights = s.nextRight; 6313 } 6314 } 6315 } 6316 } 6317 } 6318 6319 @SuppressWarnings("serial") 6320 static final class MapReduceMappingsToIntTask<K,V> 6321 extends BulkTask<K,V,Integer> { 6322 final ToIntBiFunction<? super K, ? super V> transformer; 6323 final IntBinaryOperator reducer; 6324 final int basis; 6325 int result; 6326 MapReduceMappingsToIntTask<K,V> rights, nextRight; 6327 MapReduceMappingsToIntTask 6328 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6329 MapReduceMappingsToIntTask<K,V> nextRight, 6330 ToIntBiFunction<? super K, ? super V> transformer, 6331 int basis, 6332 IntBinaryOperator reducer) { 6333 super(p, b, i, f, t); this.nextRight = nextRight; 6334 this.transformer = transformer; 6335 this.basis = basis; this.reducer = reducer; 6336 } 6337 public final Integer getRawResult() { return result; } 6338 public final void compute() { 6339 final ToIntBiFunction<? super K, ? super V> transformer; 6340 final IntBinaryOperator reducer; 6341 if ((transformer = this.transformer) != null && 6342 (reducer = this.reducer) != null) { 6343 int r = this.basis; 6344 for (int i = baseIndex, f, h; batch > 0 && 6345 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6346 addToPendingCount(1); 6347 (rights = new MapReduceMappingsToIntTask<K,V> 6348 (this, batch >>>= 1, baseLimit = h, f, tab, 6349 rights, transformer, r, reducer)).fork(); 6350 } 6351 for (Node<K,V> p; (p = advance()) != null; ) 6352 r = reducer.applyAsInt(r, transformer.applyAsInt(p.key, p.val)); 6353 result = r; 6354 CountedCompleter<?> c; 6355 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6356 @SuppressWarnings("unchecked") 6357 MapReduceMappingsToIntTask<K,V> 6358 t = (MapReduceMappingsToIntTask<K,V>)c, 6359 s = t.rights; 6360 while (s != null) { 6361 t.result = reducer.applyAsInt(t.result, s.result); 6362 s = t.rights = s.nextRight; 6363 } 6364 } 6365 } 6366 } 6367 } 6368 6369 // Unsafe mechanics 6370 private static final Unsafe U = Unsafe.getUnsafe(); 6371 private static final long SIZECTL; 6372 private static final long TRANSFERINDEX; 6373 private static final long BASECOUNT; 6374 private static final long CELLSBUSY; 6375 private static final long CELLVALUE; 6376 private static final int ABASE; 6377 private static final int ASHIFT; 6378 6379 static { 6380 SIZECTL = U.objectFieldOffset 6381 (ConcurrentHashMap.class, "sizeCtl"); 6382 TRANSFERINDEX = U.objectFieldOffset 6383 (ConcurrentHashMap.class, "transferIndex"); 6384 BASECOUNT = U.objectFieldOffset 6385 (ConcurrentHashMap.class, "baseCount"); 6386 CELLSBUSY = U.objectFieldOffset 6387 (ConcurrentHashMap.class, "cellsBusy"); 6388 6389 CELLVALUE = U.objectFieldOffset 6390 (CounterCell.class, "value"); 6391 6392 ABASE = U.arrayBaseOffset(Node[].class); 6393 int scale = U.arrayIndexScale(Node[].class); 6394 if ((scale & (scale - 1)) != 0) 6395 throw new Error("array index scale not a power of two"); 6396 ASHIFT = 31 - Integer.numberOfLeadingZeros(scale); 6397 6398 // Reduce the risk of rare disastrous classloading in first call to 6399 // LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773 6400 Class<?> ensureLoaded = LockSupport.class; 6401 } 6402 }