1 /* 2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. Oracle designates this 7 * particular file as subject to the "Classpath" exception as provided 8 * by Oracle in the LICENSE file that accompanied this code. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 */ 24 25 /* 26 * This file is available under and governed by the GNU General Public 27 * License version 2 only, as published by the Free Software Foundation. 28 * However, the following notice accompanied the original version of this 29 * file: 30 * 31 * Written by Doug Lea with assistance from members of JCP JSR-166 32 * Expert Group and released to the public domain, as explained at 33 * http://creativecommons.org/publicdomain/zero/1.0/ 34 */ 35 36 package java.util.concurrent; 37 38 import java.io.ObjectStreamField; 39 import java.io.Serializable; 40 import java.lang.reflect.ParameterizedType; 41 import java.lang.reflect.Type; 42 import java.util.AbstractMap; 43 import java.util.Arrays; 44 import java.util.Collection; 45 import java.util.Enumeration; 46 import java.util.HashMap; 47 import java.util.Hashtable; 48 import java.util.Iterator; 49 import java.util.Map; 50 import java.util.NoSuchElementException; 51 import java.util.Set; 52 import java.util.Spliterator; 53 import java.util.concurrent.atomic.AtomicReference; 54 import java.util.concurrent.locks.LockSupport; 55 import java.util.concurrent.locks.ReentrantLock; 56 import java.util.function.BiConsumer; 57 import java.util.function.BiFunction; 58 import java.util.function.Consumer; 59 import java.util.function.DoubleBinaryOperator; 60 import java.util.function.Function; 61 import java.util.function.IntBinaryOperator; 62 import java.util.function.LongBinaryOperator; 63 import java.util.function.Predicate; 64 import java.util.function.ToDoubleBiFunction; 65 import java.util.function.ToDoubleFunction; 66 import java.util.function.ToIntBiFunction; 67 import java.util.function.ToIntFunction; 68 import java.util.function.ToLongBiFunction; 69 import java.util.function.ToLongFunction; 70 import java.util.stream.Stream; 71 import jdk.internal.misc.Unsafe; 72 73 /** 74 * A hash table supporting full concurrency of retrievals and 75 * high expected concurrency for updates. This class obeys the 76 * same functional specification as {@link java.util.Hashtable}, and 77 * includes versions of methods corresponding to each method of 78 * {@code Hashtable}. However, even though all operations are 79 * thread-safe, retrieval operations do <em>not</em> entail locking, 80 * and there is <em>not</em> any support for locking the entire table 81 * in a way that prevents all access. This class is fully 82 * interoperable with {@code Hashtable} in programs that rely on its 83 * thread safety but not on its synchronization details. 84 * 85 * <p>Retrieval operations (including {@code get}) generally do not 86 * block, so may overlap with update operations (including {@code put} 87 * and {@code remove}). Retrievals reflect the results of the most 88 * recently <em>completed</em> update operations holding upon their 89 * onset. (More formally, an update operation for a given key bears a 90 * <em>happens-before</em> relation with any (non-null) retrieval for 91 * that key reporting the updated value.) For aggregate operations 92 * such as {@code putAll} and {@code clear}, concurrent retrievals may 93 * reflect insertion or removal of only some entries. Similarly, 94 * Iterators, Spliterators and Enumerations return elements reflecting the 95 * state of the hash table at some point at or since the creation of the 96 * iterator/enumeration. They do <em>not</em> throw {@link 97 * java.util.ConcurrentModificationException ConcurrentModificationException}. 98 * However, iterators are designed to be used by only one thread at a time. 99 * Bear in mind that the results of aggregate status methods including 100 * {@code size}, {@code isEmpty}, and {@code containsValue} are typically 101 * useful only when a map is not undergoing concurrent updates in other threads. 102 * Otherwise the results of these methods reflect transient states 103 * that may be adequate for monitoring or estimation purposes, but not 104 * for program control. 105 * 106 * <p>The table is dynamically expanded when there are too many 107 * collisions (i.e., keys that have distinct hash codes but fall into 108 * the same slot modulo the table size), with the expected average 109 * effect of maintaining roughly two bins per mapping (corresponding 110 * to a 0.75 load factor threshold for resizing). There may be much 111 * variance around this average as mappings are added and removed, but 112 * overall, this maintains a commonly accepted time/space tradeoff for 113 * hash tables. However, resizing this or any other kind of hash 114 * table may be a relatively slow operation. When possible, it is a 115 * good idea to provide a size estimate as an optional {@code 116 * initialCapacity} constructor argument. An additional optional 117 * {@code loadFactor} constructor argument provides a further means of 118 * customizing initial table capacity by specifying the table density 119 * to be used in calculating the amount of space to allocate for the 120 * given number of elements. Also, for compatibility with previous 121 * versions of this class, constructors may optionally specify an 122 * expected {@code concurrencyLevel} as an additional hint for 123 * internal sizing. Note that using many keys with exactly the same 124 * {@code hashCode()} is a sure way to slow down performance of any 125 * hash table. To ameliorate impact, when keys are {@link Comparable}, 126 * this class may use comparison order among keys to help break ties. 127 * 128 * <p>A {@link Set} projection of a ConcurrentHashMap may be created 129 * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed 130 * (using {@link #keySet(Object)} when only keys are of interest, and the 131 * mapped values are (perhaps transiently) not used or all take the 132 * same mapping value. 133 * 134 * <p>A ConcurrentHashMap can be used as a scalable frequency map (a 135 * form of histogram or multiset) by using {@link 136 * java.util.concurrent.atomic.LongAdder} values and initializing via 137 * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count 138 * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use 139 * {@code freqs.computeIfAbsent(key, k -> new LongAdder()).increment();} 140 * 141 * <p>This class and its views and iterators implement all of the 142 * <em>optional</em> methods of the {@link Map} and {@link Iterator} 143 * interfaces. 144 * 145 * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class 146 * does <em>not</em> allow {@code null} to be used as a key or value. 147 * 148 * <p>ConcurrentHashMaps support a set of sequential and parallel bulk 149 * operations that, unlike most {@link Stream} methods, are designed 150 * to be safely, and often sensibly, applied even with maps that are 151 * being concurrently updated by other threads; for example, when 152 * computing a snapshot summary of the values in a shared registry. 153 * There are three kinds of operation, each with four forms, accepting 154 * functions with keys, values, entries, and (key, value) pairs as 155 * arguments and/or return values. Because the elements of a 156 * ConcurrentHashMap are not ordered in any particular way, and may be 157 * processed in different orders in different parallel executions, the 158 * correctness of supplied functions should not depend on any 159 * ordering, or on any other objects or values that may transiently 160 * change while computation is in progress; and except for forEach 161 * actions, should ideally be side-effect-free. Bulk operations on 162 * {@link java.util.Map.Entry} objects do not support method {@code 163 * setValue}. 164 * 165 * <ul> 166 * <li>forEach: Performs a given action on each element. 167 * A variant form applies a given transformation on each element 168 * before performing the action. 169 * 170 * <li>search: Returns the first available non-null result of 171 * applying a given function on each element; skipping further 172 * search when a result is found. 173 * 174 * <li>reduce: Accumulates each element. The supplied reduction 175 * function cannot rely on ordering (more formally, it should be 176 * both associative and commutative). There are five variants: 177 * 178 * <ul> 179 * 180 * <li>Plain reductions. (There is not a form of this method for 181 * (key, value) function arguments since there is no corresponding 182 * return type.) 183 * 184 * <li>Mapped reductions that accumulate the results of a given 185 * function applied to each element. 186 * 187 * <li>Reductions to scalar doubles, longs, and ints, using a 188 * given basis value. 189 * 190 * </ul> 191 * </ul> 192 * 193 * <p>These bulk operations accept a {@code parallelismThreshold} 194 * argument. Methods proceed sequentially if the current map size is 195 * estimated to be less than the given threshold. Using a value of 196 * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value 197 * of {@code 1} results in maximal parallelism by partitioning into 198 * enough subtasks to fully utilize the {@link 199 * ForkJoinPool#commonPool()} that is used for all parallel 200 * computations. Normally, you would initially choose one of these 201 * extreme values, and then measure performance of using in-between 202 * values that trade off overhead versus throughput. 203 * 204 * <p>The concurrency properties of bulk operations follow 205 * from those of ConcurrentHashMap: Any non-null result returned 206 * from {@code get(key)} and related access methods bears a 207 * happens-before relation with the associated insertion or 208 * update. The result of any bulk operation reflects the 209 * composition of these per-element relations (but is not 210 * necessarily atomic with respect to the map as a whole unless it 211 * is somehow known to be quiescent). Conversely, because keys 212 * and values in the map are never null, null serves as a reliable 213 * atomic indicator of the current lack of any result. To 214 * maintain this property, null serves as an implicit basis for 215 * all non-scalar reduction operations. For the double, long, and 216 * int versions, the basis should be one that, when combined with 217 * any other value, returns that other value (more formally, it 218 * should be the identity element for the reduction). Most common 219 * reductions have these properties; for example, computing a sum 220 * with basis 0 or a minimum with basis MAX_VALUE. 221 * 222 * <p>Search and transformation functions provided as arguments 223 * should similarly return null to indicate the lack of any result 224 * (in which case it is not used). In the case of mapped 225 * reductions, this also enables transformations to serve as 226 * filters, returning null (or, in the case of primitive 227 * specializations, the identity basis) if the element should not 228 * be combined. You can create compound transformations and 229 * filterings by composing them yourself under this "null means 230 * there is nothing there now" rule before using them in search or 231 * reduce operations. 232 * 233 * <p>Methods accepting and/or returning Entry arguments maintain 234 * key-value associations. They may be useful for example when 235 * finding the key for the greatest value. Note that "plain" Entry 236 * arguments can be supplied using {@code new 237 * AbstractMap.SimpleEntry(k,v)}. 238 * 239 * <p>Bulk operations may complete abruptly, throwing an 240 * exception encountered in the application of a supplied 241 * function. Bear in mind when handling such exceptions that other 242 * concurrently executing functions could also have thrown 243 * exceptions, or would have done so if the first exception had 244 * not occurred. 245 * 246 * <p>Speedups for parallel compared to sequential forms are common 247 * but not guaranteed. Parallel operations involving brief functions 248 * on small maps may execute more slowly than sequential forms if the 249 * underlying work to parallelize the computation is more expensive 250 * than the computation itself. Similarly, parallelization may not 251 * lead to much actual parallelism if all processors are busy 252 * performing unrelated tasks. 253 * 254 * <p>All arguments to all task methods must be non-null. 255 * 256 * <p>This class is a member of the 257 * <a href="{@docRoot}/java/util/package-summary.html#CollectionsFramework"> 258 * Java Collections Framework</a>. 259 * 260 * @since 1.5 261 * @author Doug Lea 262 * @param <K> the type of keys maintained by this map 263 * @param <V> the type of mapped values 264 */ 265 public class ConcurrentHashMap<K,V> extends AbstractMap<K,V> 266 implements ConcurrentMap<K,V>, Serializable { 267 private static final long serialVersionUID = 7249069246763182397L; 268 269 /* 270 * Overview: 271 * 272 * The primary design goal of this hash table is to maintain 273 * concurrent readability (typically method get(), but also 274 * iterators and related methods) while minimizing update 275 * contention. Secondary goals are to keep space consumption about 276 * the same or better than java.util.HashMap, and to support high 277 * initial insertion rates on an empty table by many threads. 278 * 279 * This map usually acts as a binned (bucketed) hash table. Each 280 * key-value mapping is held in a Node. Most nodes are instances 281 * of the basic Node class with hash, key, value, and next 282 * fields. However, various subclasses exist: TreeNodes are 283 * arranged in balanced trees, not lists. TreeBins hold the roots 284 * of sets of TreeNodes. ForwardingNodes are placed at the heads 285 * of bins during resizing. ReservationNodes are used as 286 * placeholders while establishing values in computeIfAbsent and 287 * related methods. The types TreeBin, ForwardingNode, and 288 * ReservationNode do not hold normal user keys, values, or 289 * hashes, and are readily distinguishable during search etc 290 * because they have negative hash fields and null key and value 291 * fields. (These special nodes are either uncommon or transient, 292 * so the impact of carrying around some unused fields is 293 * insignificant.) 294 * 295 * The table is lazily initialized to a power-of-two size upon the 296 * first insertion. Each bin in the table normally contains a 297 * list of Nodes (most often, the list has only zero or one Node). 298 * Table accesses require volatile/atomic reads, writes, and 299 * CASes. Because there is no other way to arrange this without 300 * adding further indirections, we use intrinsics 301 * (jdk.internal.misc.Unsafe) operations. 302 * 303 * We use the top (sign) bit of Node hash fields for control 304 * purposes -- it is available anyway because of addressing 305 * constraints. Nodes with negative hash fields are specially 306 * handled or ignored in map methods. 307 * 308 * Insertion (via put or its variants) of the first node in an 309 * empty bin is performed by just CASing it to the bin. This is 310 * by far the most common case for put operations under most 311 * key/hash distributions. Other update operations (insert, 312 * delete, and replace) require locks. We do not want to waste 313 * the space required to associate a distinct lock object with 314 * each bin, so instead use the first node of a bin list itself as 315 * a lock. Locking support for these locks relies on builtin 316 * "synchronized" monitors. 317 * 318 * Using the first node of a list as a lock does not by itself 319 * suffice though: When a node is locked, any update must first 320 * validate that it is still the first node after locking it, and 321 * retry if not. Because new nodes are always appended to lists, 322 * once a node is first in a bin, it remains first until deleted 323 * or the bin becomes invalidated (upon resizing). 324 * 325 * The main disadvantage of per-bin locks is that other update 326 * operations on other nodes in a bin list protected by the same 327 * lock can stall, for example when user equals() or mapping 328 * functions take a long time. However, statistically, under 329 * random hash codes, this is not a common problem. Ideally, the 330 * frequency of nodes in bins follows a Poisson distribution 331 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a 332 * parameter of about 0.5 on average, given the resizing threshold 333 * of 0.75, although with a large variance because of resizing 334 * granularity. Ignoring variance, the expected occurrences of 335 * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The 336 * first values are: 337 * 338 * 0: 0.60653066 339 * 1: 0.30326533 340 * 2: 0.07581633 341 * 3: 0.01263606 342 * 4: 0.00157952 343 * 5: 0.00015795 344 * 6: 0.00001316 345 * 7: 0.00000094 346 * 8: 0.00000006 347 * more: less than 1 in ten million 348 * 349 * Lock contention probability for two threads accessing distinct 350 * elements is roughly 1 / (8 * #elements) under random hashes. 351 * 352 * Actual hash code distributions encountered in practice 353 * sometimes deviate significantly from uniform randomness. This 354 * includes the case when N > (1<<30), so some keys MUST collide. 355 * Similarly for dumb or hostile usages in which multiple keys are 356 * designed to have identical hash codes or ones that differs only 357 * in masked-out high bits. So we use a secondary strategy that 358 * applies when the number of nodes in a bin exceeds a 359 * threshold. These TreeBins use a balanced tree to hold nodes (a 360 * specialized form of red-black trees), bounding search time to 361 * O(log N). Each search step in a TreeBin is at least twice as 362 * slow as in a regular list, but given that N cannot exceed 363 * (1<<64) (before running out of addresses) this bounds search 364 * steps, lock hold times, etc, to reasonable constants (roughly 365 * 100 nodes inspected per operation worst case) so long as keys 366 * are Comparable (which is very common -- String, Long, etc). 367 * TreeBin nodes (TreeNodes) also maintain the same "next" 368 * traversal pointers as regular nodes, so can be traversed in 369 * iterators in the same way. 370 * 371 * The table is resized when occupancy exceeds a percentage 372 * threshold (nominally, 0.75, but see below). Any thread 373 * noticing an overfull bin may assist in resizing after the 374 * initiating thread allocates and sets up the replacement array. 375 * However, rather than stalling, these other threads may proceed 376 * with insertions etc. The use of TreeBins shields us from the 377 * worst case effects of overfilling while resizes are in 378 * progress. Resizing proceeds by transferring bins, one by one, 379 * from the table to the next table. However, threads claim small 380 * blocks of indices to transfer (via field transferIndex) before 381 * doing so, reducing contention. A generation stamp in field 382 * sizeCtl ensures that resizings do not overlap. Because we are 383 * using power-of-two expansion, the elements from each bin must 384 * either stay at same index, or move with a power of two 385 * offset. We eliminate unnecessary node creation by catching 386 * cases where old nodes can be reused because their next fields 387 * won't change. On average, only about one-sixth of them need 388 * cloning when a table doubles. The nodes they replace will be 389 * garbage collectable as soon as they are no longer referenced by 390 * any reader thread that may be in the midst of concurrently 391 * traversing table. Upon transfer, the old table bin contains 392 * only a special forwarding node (with hash field "MOVED") that 393 * contains the next table as its key. On encountering a 394 * forwarding node, access and update operations restart, using 395 * the new table. 396 * 397 * Each bin transfer requires its bin lock, which can stall 398 * waiting for locks while resizing. However, because other 399 * threads can join in and help resize rather than contend for 400 * locks, average aggregate waits become shorter as resizing 401 * progresses. The transfer operation must also ensure that all 402 * accessible bins in both the old and new table are usable by any 403 * traversal. This is arranged in part by proceeding from the 404 * last bin (table.length - 1) up towards the first. Upon seeing 405 * a forwarding node, traversals (see class Traverser) arrange to 406 * move to the new table without revisiting nodes. To ensure that 407 * no intervening nodes are skipped even when moved out of order, 408 * a stack (see class TableStack) is created on first encounter of 409 * a forwarding node during a traversal, to maintain its place if 410 * later processing the current table. The need for these 411 * save/restore mechanics is relatively rare, but when one 412 * forwarding node is encountered, typically many more will be. 413 * So Traversers use a simple caching scheme to avoid creating so 414 * many new TableStack nodes. (Thanks to Peter Levart for 415 * suggesting use of a stack here.) 416 * 417 * The traversal scheme also applies to partial traversals of 418 * ranges of bins (via an alternate Traverser constructor) 419 * to support partitioned aggregate operations. Also, read-only 420 * operations give up if ever forwarded to a null table, which 421 * provides support for shutdown-style clearing, which is also not 422 * currently implemented. 423 * 424 * Lazy table initialization minimizes footprint until first use, 425 * and also avoids resizings when the first operation is from a 426 * putAll, constructor with map argument, or deserialization. 427 * These cases attempt to override the initial capacity settings, 428 * but harmlessly fail to take effect in cases of races. 429 * 430 * The element count is maintained using a specialization of 431 * LongAdder. We need to incorporate a specialization rather than 432 * just use a LongAdder in order to access implicit 433 * contention-sensing that leads to creation of multiple 434 * CounterCells. The counter mechanics avoid contention on 435 * updates but can encounter cache thrashing if read too 436 * frequently during concurrent access. To avoid reading so often, 437 * resizing under contention is attempted only upon adding to a 438 * bin already holding two or more nodes. Under uniform hash 439 * distributions, the probability of this occurring at threshold 440 * is around 13%, meaning that only about 1 in 8 puts check 441 * threshold (and after resizing, many fewer do so). 442 * 443 * TreeBins use a special form of comparison for search and 444 * related operations (which is the main reason we cannot use 445 * existing collections such as TreeMaps). TreeBins contain 446 * Comparable elements, but may contain others, as well as 447 * elements that are Comparable but not necessarily Comparable for 448 * the same T, so we cannot invoke compareTo among them. To handle 449 * this, the tree is ordered primarily by hash value, then by 450 * Comparable.compareTo order if applicable. On lookup at a node, 451 * if elements are not comparable or compare as 0 then both left 452 * and right children may need to be searched in the case of tied 453 * hash values. (This corresponds to the full list search that 454 * would be necessary if all elements were non-Comparable and had 455 * tied hashes.) On insertion, to keep a total ordering (or as 456 * close as is required here) across rebalancings, we compare 457 * classes and identityHashCodes as tie-breakers. The red-black 458 * balancing code is updated from pre-jdk-collections 459 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) 460 * based in turn on Cormen, Leiserson, and Rivest "Introduction to 461 * Algorithms" (CLR). 462 * 463 * TreeBins also require an additional locking mechanism. While 464 * list traversal is always possible by readers even during 465 * updates, tree traversal is not, mainly because of tree-rotations 466 * that may change the root node and/or its linkages. TreeBins 467 * include a simple read-write lock mechanism parasitic on the 468 * main bin-synchronization strategy: Structural adjustments 469 * associated with an insertion or removal are already bin-locked 470 * (and so cannot conflict with other writers) but must wait for 471 * ongoing readers to finish. Since there can be only one such 472 * waiter, we use a simple scheme using a single "waiter" field to 473 * block writers. However, readers need never block. If the root 474 * lock is held, they proceed along the slow traversal path (via 475 * next-pointers) until the lock becomes available or the list is 476 * exhausted, whichever comes first. These cases are not fast, but 477 * maximize aggregate expected throughput. 478 * 479 * Maintaining API and serialization compatibility with previous 480 * versions of this class introduces several oddities. Mainly: We 481 * leave untouched but unused constructor arguments referring to 482 * concurrencyLevel. We accept a loadFactor constructor argument, 483 * but apply it only to initial table capacity (which is the only 484 * time that we can guarantee to honor it.) We also declare an 485 * unused "Segment" class that is instantiated in minimal form 486 * only when serializing. 487 * 488 * Also, solely for compatibility with previous versions of this 489 * class, it extends AbstractMap, even though all of its methods 490 * are overridden, so it is just useless baggage. 491 * 492 * This file is organized to make things a little easier to follow 493 * while reading than they might otherwise: First the main static 494 * declarations and utilities, then fields, then main public 495 * methods (with a few factorings of multiple public methods into 496 * internal ones), then sizing methods, trees, traversers, and 497 * bulk operations. 498 */ 499 500 /* ---------------- Constants -------------- */ 501 502 /** 503 * The largest possible table capacity. This value must be 504 * exactly 1<<30 to stay within Java array allocation and indexing 505 * bounds for power of two table sizes, and is further required 506 * because the top two bits of 32bit hash fields are used for 507 * control purposes. 508 */ 509 private static final int MAXIMUM_CAPACITY = 1 << 30; 510 511 /** 512 * The default initial table capacity. Must be a power of 2 513 * (i.e., at least 1) and at most MAXIMUM_CAPACITY. 514 */ 515 private static final int DEFAULT_CAPACITY = 16; 516 517 /** 518 * The largest possible (non-power of two) array size. 519 * Needed by toArray and related methods. 520 */ 521 static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; 522 523 /** 524 * The default concurrency level for this table. Unused but 525 * defined for compatibility with previous versions of this class. 526 */ 527 private static final int DEFAULT_CONCURRENCY_LEVEL = 16; 528 529 /** 530 * The load factor for this table. Overrides of this value in 531 * constructors affect only the initial table capacity. The 532 * actual floating point value isn't normally used -- it is 533 * simpler to use expressions such as {@code n - (n >>> 2)} for 534 * the associated resizing threshold. 535 */ 536 private static final float LOAD_FACTOR = 0.75f; 537 538 /** 539 * The bin count threshold for using a tree rather than list for a 540 * bin. Bins are converted to trees when adding an element to a 541 * bin with at least this many nodes. The value must be greater 542 * than 2, and should be at least 8 to mesh with assumptions in 543 * tree removal about conversion back to plain bins upon 544 * shrinkage. 545 */ 546 static final int TREEIFY_THRESHOLD = 8; 547 548 /** 549 * The bin count threshold for untreeifying a (split) bin during a 550 * resize operation. Should be less than TREEIFY_THRESHOLD, and at 551 * most 6 to mesh with shrinkage detection under removal. 552 */ 553 static final int UNTREEIFY_THRESHOLD = 6; 554 555 /** 556 * The smallest table capacity for which bins may be treeified. 557 * (Otherwise the table is resized if too many nodes in a bin.) 558 * The value should be at least 4 * TREEIFY_THRESHOLD to avoid 559 * conflicts between resizing and treeification thresholds. 560 */ 561 static final int MIN_TREEIFY_CAPACITY = 64; 562 563 /** 564 * Minimum number of rebinnings per transfer step. Ranges are 565 * subdivided to allow multiple resizer threads. This value 566 * serves as a lower bound to avoid resizers encountering 567 * excessive memory contention. The value should be at least 568 * DEFAULT_CAPACITY. 569 */ 570 private static final int MIN_TRANSFER_STRIDE = 16; 571 572 /** 573 * The number of bits used for generation stamp in sizeCtl. 574 * Must be at least 6 for 32bit arrays. 575 */ 576 private static final int RESIZE_STAMP_BITS = 16; 577 578 /** 579 * The maximum number of threads that can help resize. 580 * Must fit in 32 - RESIZE_STAMP_BITS bits. 581 */ 582 private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1; 583 584 /** 585 * The bit shift for recording size stamp in sizeCtl. 586 */ 587 private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS; 588 589 /* 590 * Encodings for Node hash fields. See above for explanation. 591 */ 592 static final int MOVED = -1; // hash for forwarding nodes 593 static final int TREEBIN = -2; // hash for roots of trees 594 static final int RESERVED = -3; // hash for transient reservations 595 static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash 596 597 /** Number of CPUS, to place bounds on some sizings */ 598 static final int NCPU = Runtime.getRuntime().availableProcessors(); 599 600 /** 601 * Serialized pseudo-fields, provided only for jdk7 compatibility. 602 * @serialField segments Segment[] 603 * The segments, each of which is a specialized hash table. 604 * @serialField segmentMask int 605 * Mask value for indexing into segments. The upper bits of a 606 * key's hash code are used to choose the segment. 607 * @serialField segmentShift int 608 * Shift value for indexing within segments. 609 */ 610 private static final ObjectStreamField[] serialPersistentFields = { 611 new ObjectStreamField("segments", Segment[].class), 612 new ObjectStreamField("segmentMask", Integer.TYPE), 613 new ObjectStreamField("segmentShift", Integer.TYPE), 614 }; 615 616 /* ---------------- Nodes -------------- */ 617 618 /** 619 * Key-value entry. This class is never exported out as a 620 * user-mutable Map.Entry (i.e., one supporting setValue; see 621 * MapEntry below), but can be used for read-only traversals used 622 * in bulk tasks. Subclasses of Node with a negative hash field 623 * are special, and contain null keys and values (but are never 624 * exported). Otherwise, keys and vals are never null. 625 */ 626 static class Node<K,V> implements Map.Entry<K,V> { 627 final int hash; 628 final K key; 629 volatile V val; 630 volatile Node<K,V> next; 631 632 Node(int hash, K key, V val) { 633 this.hash = hash; 634 this.key = key; 635 this.val = val; 636 } 637 638 Node(int hash, K key, V val, Node<K,V> next) { 639 this(hash, key, val); 640 this.next = next; 641 } 642 643 public final K getKey() { return key; } 644 public final V getValue() { return val; } 645 public final int hashCode() { return key.hashCode() ^ val.hashCode(); } 646 public final String toString() { 647 return Helpers.mapEntryToString(key, val); 648 } 649 public final V setValue(V value) { 650 throw new UnsupportedOperationException(); 651 } 652 653 public final boolean equals(Object o) { 654 Object k, v, u; Map.Entry<?,?> e; 655 return ((o instanceof Map.Entry) && 656 (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 657 (v = e.getValue()) != null && 658 (k == key || k.equals(key)) && 659 (v == (u = val) || v.equals(u))); 660 } 661 662 /** 663 * Virtualized support for map.get(); overridden in subclasses. 664 */ 665 Node<K,V> find(int h, Object k) { 666 Node<K,V> e = this; 667 if (k != null) { 668 do { 669 K ek; 670 if (e.hash == h && 671 ((ek = e.key) == k || (ek != null && k.equals(ek)))) 672 return e; 673 } while ((e = e.next) != null); 674 } 675 return null; 676 } 677 } 678 679 /* ---------------- Static utilities -------------- */ 680 681 /** 682 * Spreads (XORs) higher bits of hash to lower and also forces top 683 * bit to 0. Because the table uses power-of-two masking, sets of 684 * hashes that vary only in bits above the current mask will 685 * always collide. (Among known examples are sets of Float keys 686 * holding consecutive whole numbers in small tables.) So we 687 * apply a transform that spreads the impact of higher bits 688 * downward. There is a tradeoff between speed, utility, and 689 * quality of bit-spreading. Because many common sets of hashes 690 * are already reasonably distributed (so don't benefit from 691 * spreading), and because we use trees to handle large sets of 692 * collisions in bins, we just XOR some shifted bits in the 693 * cheapest possible way to reduce systematic lossage, as well as 694 * to incorporate impact of the highest bits that would otherwise 695 * never be used in index calculations because of table bounds. 696 */ 697 static final int spread(int h) { 698 return (h ^ (h >>> 16)) & HASH_BITS; 699 } 700 701 /** 702 * Returns a power of two table size for the given desired capacity. 703 * See Hackers Delight, sec 3.2 704 */ 705 private static final int tableSizeFor(int c) { 706 int n = c - 1; 707 n |= n >>> 1; 708 n |= n >>> 2; 709 n |= n >>> 4; 710 n |= n >>> 8; 711 n |= n >>> 16; 712 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; 713 } 714 715 /** 716 * Returns x's Class if it is of the form "class C implements 717 * Comparable<C>", else null. 718 */ 719 static Class<?> comparableClassFor(Object x) { 720 if (x instanceof Comparable) { 721 Class<?> c; Type[] ts, as; Type t; ParameterizedType p; 722 if ((c = x.getClass()) == String.class) // bypass checks 723 return c; 724 if ((ts = c.getGenericInterfaces()) != null) { 725 for (int i = 0; i < ts.length; ++i) { 726 if (((t = ts[i]) instanceof ParameterizedType) && 727 ((p = (ParameterizedType)t).getRawType() == 728 Comparable.class) && 729 (as = p.getActualTypeArguments()) != null && 730 as.length == 1 && as[0] == c) // type arg is c 731 return c; 732 } 733 } 734 } 735 return null; 736 } 737 738 /** 739 * Returns k.compareTo(x) if x matches kc (k's screened comparable 740 * class), else 0. 741 */ 742 @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable 743 static int compareComparables(Class<?> kc, Object k, Object x) { 744 return (x == null || x.getClass() != kc ? 0 : 745 ((Comparable)k).compareTo(x)); 746 } 747 748 /* ---------------- Table element access -------------- */ 749 750 /* 751 * Atomic access methods are used for table elements as well as 752 * elements of in-progress next table while resizing. All uses of 753 * the tab arguments must be null checked by callers. All callers 754 * also paranoically precheck that tab's length is not zero (or an 755 * equivalent check), thus ensuring that any index argument taking 756 * the form of a hash value anded with (length - 1) is a valid 757 * index. Note that, to be correct wrt arbitrary concurrency 758 * errors by users, these checks must operate on local variables, 759 * which accounts for some odd-looking inline assignments below. 760 * Note that calls to setTabAt always occur within locked regions, 761 * and so require only release ordering. 762 */ 763 764 @SuppressWarnings("unchecked") 765 static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) { 766 return (Node<K,V>)U.getObjectAcquire(tab, ((long)i << ASHIFT) + ABASE); 767 } 768 769 static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i, 770 Node<K,V> c, Node<K,V> v) { 771 return U.compareAndSetObject(tab, ((long)i << ASHIFT) + ABASE, c, v); 772 } 773 774 static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) { 775 U.putObjectRelease(tab, ((long)i << ASHIFT) + ABASE, v); 776 } 777 778 /* ---------------- Fields -------------- */ 779 780 /** 781 * The array of bins. Lazily initialized upon first insertion. 782 * Size is always a power of two. Accessed directly by iterators. 783 */ 784 transient volatile Node<K,V>[] table; 785 786 /** 787 * The next table to use; non-null only while resizing. 788 */ 789 private transient volatile Node<K,V>[] nextTable; 790 791 /** 792 * Base counter value, used mainly when there is no contention, 793 * but also as a fallback during table initialization 794 * races. Updated via CAS. 795 */ 796 private transient volatile long baseCount; 797 798 /** 799 * Table initialization and resizing control. When negative, the 800 * table is being initialized or resized: -1 for initialization, 801 * else -(1 + the number of active resizing threads). Otherwise, 802 * when table is null, holds the initial table size to use upon 803 * creation, or 0 for default. After initialization, holds the 804 * next element count value upon which to resize the table. 805 */ 806 private transient volatile int sizeCtl; 807 808 /** 809 * The next table index (plus one) to split while resizing. 810 */ 811 private transient volatile int transferIndex; 812 813 /** 814 * Spinlock (locked via CAS) used when resizing and/or creating CounterCells. 815 */ 816 private transient volatile int cellsBusy; 817 818 /** 819 * Table of counter cells. When non-null, size is a power of 2. 820 */ 821 private transient volatile CounterCell[] counterCells; 822 823 // views 824 private transient KeySetView<K,V> keySet; 825 private transient ValuesView<K,V> values; 826 private transient EntrySetView<K,V> entrySet; 827 828 829 /* ---------------- Public operations -------------- */ 830 831 /** 832 * Creates a new, empty map with the default initial table size (16). 833 */ 834 public ConcurrentHashMap() { 835 } 836 837 /** 838 * Creates a new, empty map with an initial table size 839 * accommodating the specified number of elements without the need 840 * to dynamically resize. 841 * 842 * @param initialCapacity The implementation performs internal 843 * sizing to accommodate this many elements. 844 * @throws IllegalArgumentException if the initial capacity of 845 * elements is negative 846 */ 847 public ConcurrentHashMap(int initialCapacity) { 848 if (initialCapacity < 0) 849 throw new IllegalArgumentException(); 850 int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? 851 MAXIMUM_CAPACITY : 852 tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); 853 this.sizeCtl = cap; 854 } 855 856 /** 857 * Creates a new map with the same mappings as the given map. 858 * 859 * @param m the map 860 */ 861 public ConcurrentHashMap(Map<? extends K, ? extends V> m) { 862 this.sizeCtl = DEFAULT_CAPACITY; 863 putAll(m); 864 } 865 866 /** 867 * Creates a new, empty map with an initial table size based on 868 * the given number of elements ({@code initialCapacity}) and 869 * initial table density ({@code loadFactor}). 870 * 871 * @param initialCapacity the initial capacity. The implementation 872 * performs internal sizing to accommodate this many elements, 873 * given the specified load factor. 874 * @param loadFactor the load factor (table density) for 875 * establishing the initial table size 876 * @throws IllegalArgumentException if the initial capacity of 877 * elements is negative or the load factor is nonpositive 878 * 879 * @since 1.6 880 */ 881 public ConcurrentHashMap(int initialCapacity, float loadFactor) { 882 this(initialCapacity, loadFactor, 1); 883 } 884 885 /** 886 * Creates a new, empty map with an initial table size based on 887 * the given number of elements ({@code initialCapacity}), table 888 * density ({@code loadFactor}), and number of concurrently 889 * updating threads ({@code concurrencyLevel}). 890 * 891 * @param initialCapacity the initial capacity. The implementation 892 * performs internal sizing to accommodate this many elements, 893 * given the specified load factor. 894 * @param loadFactor the load factor (table density) for 895 * establishing the initial table size 896 * @param concurrencyLevel the estimated number of concurrently 897 * updating threads. The implementation may use this value as 898 * a sizing hint. 899 * @throws IllegalArgumentException if the initial capacity is 900 * negative or the load factor or concurrencyLevel are 901 * nonpositive 902 */ 903 public ConcurrentHashMap(int initialCapacity, 904 float loadFactor, int concurrencyLevel) { 905 if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) 906 throw new IllegalArgumentException(); 907 if (initialCapacity < concurrencyLevel) // Use at least as many bins 908 initialCapacity = concurrencyLevel; // as estimated threads 909 long size = (long)(1.0 + (long)initialCapacity / loadFactor); 910 int cap = (size >= (long)MAXIMUM_CAPACITY) ? 911 MAXIMUM_CAPACITY : tableSizeFor((int)size); 912 this.sizeCtl = cap; 913 } 914 915 // Original (since JDK1.2) Map methods 916 917 /** 918 * {@inheritDoc} 919 */ 920 public int size() { 921 long n = sumCount(); 922 return ((n < 0L) ? 0 : 923 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : 924 (int)n); 925 } 926 927 /** 928 * {@inheritDoc} 929 */ 930 public boolean isEmpty() { 931 return sumCount() <= 0L; // ignore transient negative values 932 } 933 934 /** 935 * Returns the value to which the specified key is mapped, 936 * or {@code null} if this map contains no mapping for the key. 937 * 938 * <p>More formally, if this map contains a mapping from a key 939 * {@code k} to a value {@code v} such that {@code key.equals(k)}, 940 * then this method returns {@code v}; otherwise it returns 941 * {@code null}. (There can be at most one such mapping.) 942 * 943 * @throws NullPointerException if the specified key is null 944 */ 945 public V get(Object key) { 946 Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek; 947 int h = spread(key.hashCode()); 948 if ((tab = table) != null && (n = tab.length) > 0 && 949 (e = tabAt(tab, (n - 1) & h)) != null) { 950 if ((eh = e.hash) == h) { 951 if ((ek = e.key) == key || (ek != null && key.equals(ek))) 952 return e.val; 953 } 954 else if (eh < 0) 955 return (p = e.find(h, key)) != null ? p.val : null; 956 while ((e = e.next) != null) { 957 if (e.hash == h && 958 ((ek = e.key) == key || (ek != null && key.equals(ek)))) 959 return e.val; 960 } 961 } 962 return null; 963 } 964 965 /** 966 * Tests if the specified object is a key in this table. 967 * 968 * @param key possible key 969 * @return {@code true} if and only if the specified object 970 * is a key in this table, as determined by the 971 * {@code equals} method; {@code false} otherwise 972 * @throws NullPointerException if the specified key is null 973 */ 974 public boolean containsKey(Object key) { 975 return get(key) != null; 976 } 977 978 /** 979 * Returns {@code true} if this map maps one or more keys to the 980 * specified value. Note: This method may require a full traversal 981 * of the map, and is much slower than method {@code containsKey}. 982 * 983 * @param value value whose presence in this map is to be tested 984 * @return {@code true} if this map maps one or more keys to the 985 * specified value 986 * @throws NullPointerException if the specified value is null 987 */ 988 public boolean containsValue(Object value) { 989 if (value == null) 990 throw new NullPointerException(); 991 Node<K,V>[] t; 992 if ((t = table) != null) { 993 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 994 for (Node<K,V> p; (p = it.advance()) != null; ) { 995 V v; 996 if ((v = p.val) == value || (v != null && value.equals(v))) 997 return true; 998 } 999 } 1000 return false; 1001 } 1002 1003 /** 1004 * Maps the specified key to the specified value in this table. 1005 * Neither the key nor the value can be null. 1006 * 1007 * <p>The value can be retrieved by calling the {@code get} method 1008 * with a key that is equal to the original key. 1009 * 1010 * @param key key with which the specified value is to be associated 1011 * @param value value to be associated with the specified key 1012 * @return the previous value associated with {@code key}, or 1013 * {@code null} if there was no mapping for {@code key} 1014 * @throws NullPointerException if the specified key or value is null 1015 */ 1016 public V put(K key, V value) { 1017 return putVal(key, value, false); 1018 } 1019 1020 /** Implementation for put and putIfAbsent */ 1021 final V putVal(K key, V value, boolean onlyIfAbsent) { 1022 if (key == null || value == null) throw new NullPointerException(); 1023 int hash = spread(key.hashCode()); 1024 int binCount = 0; 1025 for (Node<K,V>[] tab = table;;) { 1026 Node<K,V> f; int n, i, fh; K fk; V fv; 1027 if (tab == null || (n = tab.length) == 0) 1028 tab = initTable(); 1029 else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) { 1030 if (casTabAt(tab, i, null, new Node<K,V>(hash, key, value))) 1031 break; // no lock when adding to empty bin 1032 } 1033 else if ((fh = f.hash) == MOVED) 1034 tab = helpTransfer(tab, f); 1035 else if (onlyIfAbsent // check first node without acquiring lock 1036 && fh == hash 1037 && ((fk = f.key) == key || (fk != null && key.equals(fk))) 1038 && (fv = f.val) != null) 1039 return fv; 1040 else { 1041 V oldVal = null; 1042 synchronized (f) { 1043 if (tabAt(tab, i) == f) { 1044 if (fh >= 0) { 1045 binCount = 1; 1046 for (Node<K,V> e = f;; ++binCount) { 1047 K ek; 1048 if (e.hash == hash && 1049 ((ek = e.key) == key || 1050 (ek != null && key.equals(ek)))) { 1051 oldVal = e.val; 1052 if (!onlyIfAbsent) 1053 e.val = value; 1054 break; 1055 } 1056 Node<K,V> pred = e; 1057 if ((e = e.next) == null) { 1058 pred.next = new Node<K,V>(hash, key, value); 1059 break; 1060 } 1061 } 1062 } 1063 else if (f instanceof TreeBin) { 1064 Node<K,V> p; 1065 binCount = 2; 1066 if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key, 1067 value)) != null) { 1068 oldVal = p.val; 1069 if (!onlyIfAbsent) 1070 p.val = value; 1071 } 1072 } 1073 else if (f instanceof ReservationNode) 1074 throw new IllegalStateException("Recursive update"); 1075 } 1076 } 1077 if (binCount != 0) { 1078 if (binCount >= TREEIFY_THRESHOLD) 1079 treeifyBin(tab, i); 1080 if (oldVal != null) 1081 return oldVal; 1082 break; 1083 } 1084 } 1085 } 1086 addCount(1L, binCount); 1087 return null; 1088 } 1089 1090 /** 1091 * Copies all of the mappings from the specified map to this one. 1092 * These mappings replace any mappings that this map had for any of the 1093 * keys currently in the specified map. 1094 * 1095 * @param m mappings to be stored in this map 1096 */ 1097 public void putAll(Map<? extends K, ? extends V> m) { 1098 tryPresize(m.size()); 1099 for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) 1100 putVal(e.getKey(), e.getValue(), false); 1101 } 1102 1103 /** 1104 * Removes the key (and its corresponding value) from this map. 1105 * This method does nothing if the key is not in the map. 1106 * 1107 * @param key the key that needs to be removed 1108 * @return the previous value associated with {@code key}, or 1109 * {@code null} if there was no mapping for {@code key} 1110 * @throws NullPointerException if the specified key is null 1111 */ 1112 public V remove(Object key) { 1113 return replaceNode(key, null, null); 1114 } 1115 1116 /** 1117 * Implementation for the four public remove/replace methods: 1118 * Replaces node value with v, conditional upon match of cv if 1119 * non-null. If resulting value is null, delete. 1120 */ 1121 final V replaceNode(Object key, V value, Object cv) { 1122 int hash = spread(key.hashCode()); 1123 for (Node<K,V>[] tab = table;;) { 1124 Node<K,V> f; int n, i, fh; 1125 if (tab == null || (n = tab.length) == 0 || 1126 (f = tabAt(tab, i = (n - 1) & hash)) == null) 1127 break; 1128 else if ((fh = f.hash) == MOVED) 1129 tab = helpTransfer(tab, f); 1130 else { 1131 V oldVal = null; 1132 boolean validated = false; 1133 synchronized (f) { 1134 if (tabAt(tab, i) == f) { 1135 if (fh >= 0) { 1136 validated = true; 1137 for (Node<K,V> e = f, pred = null;;) { 1138 K ek; 1139 if (e.hash == hash && 1140 ((ek = e.key) == key || 1141 (ek != null && key.equals(ek)))) { 1142 V ev = e.val; 1143 if (cv == null || cv == ev || 1144 (ev != null && cv.equals(ev))) { 1145 oldVal = ev; 1146 if (value != null) 1147 e.val = value; 1148 else if (pred != null) 1149 pred.next = e.next; 1150 else 1151 setTabAt(tab, i, e.next); 1152 } 1153 break; 1154 } 1155 pred = e; 1156 if ((e = e.next) == null) 1157 break; 1158 } 1159 } 1160 else if (f instanceof TreeBin) { 1161 validated = true; 1162 TreeBin<K,V> t = (TreeBin<K,V>)f; 1163 TreeNode<K,V> r, p; 1164 if ((r = t.root) != null && 1165 (p = r.findTreeNode(hash, key, null)) != null) { 1166 V pv = p.val; 1167 if (cv == null || cv == pv || 1168 (pv != null && cv.equals(pv))) { 1169 oldVal = pv; 1170 if (value != null) 1171 p.val = value; 1172 else if (t.removeTreeNode(p)) 1173 setTabAt(tab, i, untreeify(t.first)); 1174 } 1175 } 1176 } 1177 else if (f instanceof ReservationNode) 1178 throw new IllegalStateException("Recursive update"); 1179 } 1180 } 1181 if (validated) { 1182 if (oldVal != null) { 1183 if (value == null) 1184 addCount(-1L, -1); 1185 return oldVal; 1186 } 1187 break; 1188 } 1189 } 1190 } 1191 return null; 1192 } 1193 1194 /** 1195 * Removes all of the mappings from this map. 1196 */ 1197 public void clear() { 1198 long delta = 0L; // negative number of deletions 1199 int i = 0; 1200 Node<K,V>[] tab = table; 1201 while (tab != null && i < tab.length) { 1202 int fh; 1203 Node<K,V> f = tabAt(tab, i); 1204 if (f == null) 1205 ++i; 1206 else if ((fh = f.hash) == MOVED) { 1207 tab = helpTransfer(tab, f); 1208 i = 0; // restart 1209 } 1210 else { 1211 synchronized (f) { 1212 if (tabAt(tab, i) == f) { 1213 Node<K,V> p = (fh >= 0 ? f : 1214 (f instanceof TreeBin) ? 1215 ((TreeBin<K,V>)f).first : null); 1216 while (p != null) { 1217 --delta; 1218 p = p.next; 1219 } 1220 setTabAt(tab, i++, null); 1221 } 1222 } 1223 } 1224 } 1225 if (delta != 0L) 1226 addCount(delta, -1); 1227 } 1228 1229 /** 1230 * Returns a {@link Set} view of the keys contained in this map. 1231 * The set is backed by the map, so changes to the map are 1232 * reflected in the set, and vice-versa. The set supports element 1233 * removal, which removes the corresponding mapping from this map, 1234 * via the {@code Iterator.remove}, {@code Set.remove}, 1235 * {@code removeAll}, {@code retainAll}, and {@code clear} 1236 * operations. It does not support the {@code add} or 1237 * {@code addAll} operations. 1238 * 1239 * <p>The view's iterators and spliterators are 1240 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 1241 * 1242 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}, 1243 * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}. 1244 * 1245 * @return the set view 1246 */ 1247 public KeySetView<K,V> keySet() { 1248 KeySetView<K,V> ks; 1249 if ((ks = keySet) != null) return ks; 1250 return keySet = new KeySetView<K,V>(this, null); 1251 } 1252 1253 /** 1254 * Returns a {@link Collection} view of the values contained in this map. 1255 * The collection is backed by the map, so changes to the map are 1256 * reflected in the collection, and vice-versa. The collection 1257 * supports element removal, which removes the corresponding 1258 * mapping from this map, via the {@code Iterator.remove}, 1259 * {@code Collection.remove}, {@code removeAll}, 1260 * {@code retainAll}, and {@code clear} operations. It does not 1261 * support the {@code add} or {@code addAll} operations. 1262 * 1263 * <p>The view's iterators and spliterators are 1264 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 1265 * 1266 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT} 1267 * and {@link Spliterator#NONNULL}. 1268 * 1269 * @return the collection view 1270 */ 1271 public Collection<V> values() { 1272 ValuesView<K,V> vs; 1273 if ((vs = values) != null) return vs; 1274 return values = new ValuesView<K,V>(this); 1275 } 1276 1277 /** 1278 * Returns a {@link Set} view of the mappings contained in this map. 1279 * The set is backed by the map, so changes to the map are 1280 * reflected in the set, and vice-versa. The set supports element 1281 * removal, which removes the corresponding mapping from the map, 1282 * via the {@code Iterator.remove}, {@code Set.remove}, 1283 * {@code removeAll}, {@code retainAll}, and {@code clear} 1284 * operations. 1285 * 1286 * <p>The view's iterators and spliterators are 1287 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 1288 * 1289 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}, 1290 * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}. 1291 * 1292 * @return the set view 1293 */ 1294 public Set<Map.Entry<K,V>> entrySet() { 1295 EntrySetView<K,V> es; 1296 if ((es = entrySet) != null) return es; 1297 return entrySet = new EntrySetView<K,V>(this); 1298 } 1299 1300 /** 1301 * Returns the hash code value for this {@link Map}, i.e., 1302 * the sum of, for each key-value pair in the map, 1303 * {@code key.hashCode() ^ value.hashCode()}. 1304 * 1305 * @return the hash code value for this map 1306 */ 1307 public int hashCode() { 1308 int h = 0; 1309 Node<K,V>[] t; 1310 if ((t = table) != null) { 1311 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1312 for (Node<K,V> p; (p = it.advance()) != null; ) 1313 h += p.key.hashCode() ^ p.val.hashCode(); 1314 } 1315 return h; 1316 } 1317 1318 /** 1319 * Returns a string representation of this map. The string 1320 * representation consists of a list of key-value mappings (in no 1321 * particular order) enclosed in braces ("{@code {}}"). Adjacent 1322 * mappings are separated by the characters {@code ", "} (comma 1323 * and space). Each key-value mapping is rendered as the key 1324 * followed by an equals sign ("{@code =}") followed by the 1325 * associated value. 1326 * 1327 * @return a string representation of this map 1328 */ 1329 public String toString() { 1330 Node<K,V>[] t; 1331 int f = (t = table) == null ? 0 : t.length; 1332 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f); 1333 StringBuilder sb = new StringBuilder(); 1334 sb.append('{'); 1335 Node<K,V> p; 1336 if ((p = it.advance()) != null) { 1337 for (;;) { 1338 K k = p.key; 1339 V v = p.val; 1340 sb.append(k == this ? "(this Map)" : k); 1341 sb.append('='); 1342 sb.append(v == this ? "(this Map)" : v); 1343 if ((p = it.advance()) == null) 1344 break; 1345 sb.append(',').append(' '); 1346 } 1347 } 1348 return sb.append('}').toString(); 1349 } 1350 1351 /** 1352 * Compares the specified object with this map for equality. 1353 * Returns {@code true} if the given object is a map with the same 1354 * mappings as this map. This operation may return misleading 1355 * results if either map is concurrently modified during execution 1356 * of this method. 1357 * 1358 * @param o object to be compared for equality with this map 1359 * @return {@code true} if the specified object is equal to this map 1360 */ 1361 public boolean equals(Object o) { 1362 if (o != this) { 1363 if (!(o instanceof Map)) 1364 return false; 1365 Map<?,?> m = (Map<?,?>) o; 1366 Node<K,V>[] t; 1367 int f = (t = table) == null ? 0 : t.length; 1368 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f); 1369 for (Node<K,V> p; (p = it.advance()) != null; ) { 1370 V val = p.val; 1371 Object v = m.get(p.key); 1372 if (v == null || (v != val && !v.equals(val))) 1373 return false; 1374 } 1375 for (Map.Entry<?,?> e : m.entrySet()) { 1376 Object mk, mv, v; 1377 if ((mk = e.getKey()) == null || 1378 (mv = e.getValue()) == null || 1379 (v = get(mk)) == null || 1380 (mv != v && !mv.equals(v))) 1381 return false; 1382 } 1383 } 1384 return true; 1385 } 1386 1387 /** 1388 * Stripped-down version of helper class used in previous version, 1389 * declared for the sake of serialization compatibility. 1390 */ 1391 static class Segment<K,V> extends ReentrantLock implements Serializable { 1392 private static final long serialVersionUID = 2249069246763182397L; 1393 final float loadFactor; 1394 Segment(float lf) { this.loadFactor = lf; } 1395 } 1396 1397 /** 1398 * Saves the state of the {@code ConcurrentHashMap} instance to a 1399 * stream (i.e., serializes it). 1400 * @param s the stream 1401 * @throws java.io.IOException if an I/O error occurs 1402 * @serialData 1403 * the serialized fields, followed by the key (Object) and value 1404 * (Object) for each key-value mapping, followed by a null pair. 1405 * The key-value mappings are emitted in no particular order. 1406 */ 1407 private void writeObject(java.io.ObjectOutputStream s) 1408 throws java.io.IOException { 1409 // For serialization compatibility 1410 // Emulate segment calculation from previous version of this class 1411 int sshift = 0; 1412 int ssize = 1; 1413 while (ssize < DEFAULT_CONCURRENCY_LEVEL) { 1414 ++sshift; 1415 ssize <<= 1; 1416 } 1417 int segmentShift = 32 - sshift; 1418 int segmentMask = ssize - 1; 1419 @SuppressWarnings("unchecked") 1420 Segment<K,V>[] segments = (Segment<K,V>[]) 1421 new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL]; 1422 for (int i = 0; i < segments.length; ++i) 1423 segments[i] = new Segment<K,V>(LOAD_FACTOR); 1424 java.io.ObjectOutputStream.PutField streamFields = s.putFields(); 1425 streamFields.put("segments", segments); 1426 streamFields.put("segmentShift", segmentShift); 1427 streamFields.put("segmentMask", segmentMask); 1428 s.writeFields(); 1429 1430 Node<K,V>[] t; 1431 if ((t = table) != null) { 1432 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1433 for (Node<K,V> p; (p = it.advance()) != null; ) { 1434 s.writeObject(p.key); 1435 s.writeObject(p.val); 1436 } 1437 } 1438 s.writeObject(null); 1439 s.writeObject(null); 1440 } 1441 1442 /** 1443 * Reconstitutes the instance from a stream (that is, deserializes it). 1444 * @param s the stream 1445 * @throws ClassNotFoundException if the class of a serialized object 1446 * could not be found 1447 * @throws java.io.IOException if an I/O error occurs 1448 */ 1449 private void readObject(java.io.ObjectInputStream s) 1450 throws java.io.IOException, ClassNotFoundException { 1451 /* 1452 * To improve performance in typical cases, we create nodes 1453 * while reading, then place in table once size is known. 1454 * However, we must also validate uniqueness and deal with 1455 * overpopulated bins while doing so, which requires 1456 * specialized versions of putVal mechanics. 1457 */ 1458 sizeCtl = -1; // force exclusion for table construction 1459 s.defaultReadObject(); 1460 long size = 0L; 1461 Node<K,V> p = null; 1462 for (;;) { 1463 @SuppressWarnings("unchecked") 1464 K k = (K) s.readObject(); 1465 @SuppressWarnings("unchecked") 1466 V v = (V) s.readObject(); 1467 if (k != null && v != null) { 1468 p = new Node<K,V>(spread(k.hashCode()), k, v, p); 1469 ++size; 1470 } 1471 else 1472 break; 1473 } 1474 if (size == 0L) 1475 sizeCtl = 0; 1476 else { 1477 int n; 1478 if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) 1479 n = MAXIMUM_CAPACITY; 1480 else { 1481 int sz = (int)size; 1482 n = tableSizeFor(sz + (sz >>> 1) + 1); 1483 } 1484 @SuppressWarnings("unchecked") 1485 Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n]; 1486 int mask = n - 1; 1487 long added = 0L; 1488 while (p != null) { 1489 boolean insertAtFront; 1490 Node<K,V> next = p.next, first; 1491 int h = p.hash, j = h & mask; 1492 if ((first = tabAt(tab, j)) == null) 1493 insertAtFront = true; 1494 else { 1495 K k = p.key; 1496 if (first.hash < 0) { 1497 TreeBin<K,V> t = (TreeBin<K,V>)first; 1498 if (t.putTreeVal(h, k, p.val) == null) 1499 ++added; 1500 insertAtFront = false; 1501 } 1502 else { 1503 int binCount = 0; 1504 insertAtFront = true; 1505 Node<K,V> q; K qk; 1506 for (q = first; q != null; q = q.next) { 1507 if (q.hash == h && 1508 ((qk = q.key) == k || 1509 (qk != null && k.equals(qk)))) { 1510 insertAtFront = false; 1511 break; 1512 } 1513 ++binCount; 1514 } 1515 if (insertAtFront && binCount >= TREEIFY_THRESHOLD) { 1516 insertAtFront = false; 1517 ++added; 1518 p.next = first; 1519 TreeNode<K,V> hd = null, tl = null; 1520 for (q = p; q != null; q = q.next) { 1521 TreeNode<K,V> t = new TreeNode<K,V> 1522 (q.hash, q.key, q.val, null, null); 1523 if ((t.prev = tl) == null) 1524 hd = t; 1525 else 1526 tl.next = t; 1527 tl = t; 1528 } 1529 setTabAt(tab, j, new TreeBin<K,V>(hd)); 1530 } 1531 } 1532 } 1533 if (insertAtFront) { 1534 ++added; 1535 p.next = first; 1536 setTabAt(tab, j, p); 1537 } 1538 p = next; 1539 } 1540 table = tab; 1541 sizeCtl = n - (n >>> 2); 1542 baseCount = added; 1543 } 1544 } 1545 1546 // ConcurrentMap methods 1547 1548 /** 1549 * {@inheritDoc} 1550 * 1551 * @return the previous value associated with the specified key, 1552 * or {@code null} if there was no mapping for the key 1553 * @throws NullPointerException if the specified key or value is null 1554 */ 1555 public V putIfAbsent(K key, V value) { 1556 return putVal(key, value, true); 1557 } 1558 1559 /** 1560 * {@inheritDoc} 1561 * 1562 * @throws NullPointerException if the specified key is null 1563 */ 1564 public boolean remove(Object key, Object value) { 1565 if (key == null) 1566 throw new NullPointerException(); 1567 return value != null && replaceNode(key, null, value) != null; 1568 } 1569 1570 /** 1571 * {@inheritDoc} 1572 * 1573 * @throws NullPointerException if any of the arguments are null 1574 */ 1575 public boolean replace(K key, V oldValue, V newValue) { 1576 if (key == null || oldValue == null || newValue == null) 1577 throw new NullPointerException(); 1578 return replaceNode(key, newValue, oldValue) != null; 1579 } 1580 1581 /** 1582 * {@inheritDoc} 1583 * 1584 * @return the previous value associated with the specified key, 1585 * or {@code null} if there was no mapping for the key 1586 * @throws NullPointerException if the specified key or value is null 1587 */ 1588 public V replace(K key, V value) { 1589 if (key == null || value == null) 1590 throw new NullPointerException(); 1591 return replaceNode(key, value, null); 1592 } 1593 1594 // Overrides of JDK8+ Map extension method defaults 1595 1596 /** 1597 * Returns the value to which the specified key is mapped, or the 1598 * given default value if this map contains no mapping for the 1599 * key. 1600 * 1601 * @param key the key whose associated value is to be returned 1602 * @param defaultValue the value to return if this map contains 1603 * no mapping for the given key 1604 * @return the mapping for the key, if present; else the default value 1605 * @throws NullPointerException if the specified key is null 1606 */ 1607 public V getOrDefault(Object key, V defaultValue) { 1608 V v; 1609 return (v = get(key)) == null ? defaultValue : v; 1610 } 1611 1612 public void forEach(BiConsumer<? super K, ? super V> action) { 1613 if (action == null) throw new NullPointerException(); 1614 Node<K,V>[] t; 1615 if ((t = table) != null) { 1616 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1617 for (Node<K,V> p; (p = it.advance()) != null; ) { 1618 action.accept(p.key, p.val); 1619 } 1620 } 1621 } 1622 1623 public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) { 1624 if (function == null) throw new NullPointerException(); 1625 Node<K,V>[] t; 1626 if ((t = table) != null) { 1627 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1628 for (Node<K,V> p; (p = it.advance()) != null; ) { 1629 V oldValue = p.val; 1630 for (K key = p.key;;) { 1631 V newValue = function.apply(key, oldValue); 1632 if (newValue == null) 1633 throw new NullPointerException(); 1634 if (replaceNode(key, newValue, oldValue) != null || 1635 (oldValue = get(key)) == null) 1636 break; 1637 } 1638 } 1639 } 1640 } 1641 1642 /** 1643 * Helper method for EntrySetView.removeIf. 1644 */ 1645 boolean removeEntryIf(Predicate<? super Entry<K,V>> function) { 1646 if (function == null) throw new NullPointerException(); 1647 Node<K,V>[] t; 1648 boolean removed = false; 1649 if ((t = table) != null) { 1650 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1651 for (Node<K,V> p; (p = it.advance()) != null; ) { 1652 K k = p.key; 1653 V v = p.val; 1654 Map.Entry<K,V> e = new AbstractMap.SimpleImmutableEntry<>(k, v); 1655 if (function.test(e) && replaceNode(k, null, v) != null) 1656 removed = true; 1657 } 1658 } 1659 return removed; 1660 } 1661 1662 /** 1663 * Helper method for ValuesView.removeIf. 1664 */ 1665 boolean removeValueIf(Predicate<? super V> function) { 1666 if (function == null) throw new NullPointerException(); 1667 Node<K,V>[] t; 1668 boolean removed = false; 1669 if ((t = table) != null) { 1670 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1671 for (Node<K,V> p; (p = it.advance()) != null; ) { 1672 K k = p.key; 1673 V v = p.val; 1674 if (function.test(v) && replaceNode(k, null, v) != null) 1675 removed = true; 1676 } 1677 } 1678 return removed; 1679 } 1680 1681 /** 1682 * If the specified key is not already associated with a value, 1683 * attempts to compute its value using the given mapping function 1684 * and enters it into this map unless {@code null}. The entire 1685 * method invocation is performed atomically, so the function is 1686 * applied at most once per key. Some attempted update operations 1687 * on this map by other threads may be blocked while computation 1688 * is in progress, so the computation should be short and simple, 1689 * and must not attempt to update any other mappings of this map. 1690 * 1691 * @param key key with which the specified value is to be associated 1692 * @param mappingFunction the function to compute a value 1693 * @return the current (existing or computed) value associated with 1694 * the specified key, or null if the computed value is null 1695 * @throws NullPointerException if the specified key or mappingFunction 1696 * is null 1697 * @throws IllegalStateException if the computation detectably 1698 * attempts a recursive update to this map that would 1699 * otherwise never complete 1700 * @throws RuntimeException or Error if the mappingFunction does so, 1701 * in which case the mapping is left unestablished 1702 */ 1703 public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) { 1704 if (key == null || mappingFunction == null) 1705 throw new NullPointerException(); 1706 int h = spread(key.hashCode()); 1707 V val = null; 1708 int binCount = 0; 1709 for (Node<K,V>[] tab = table;;) { 1710 Node<K,V> f; int n, i, fh; K fk; V fv; 1711 if (tab == null || (n = tab.length) == 0) 1712 tab = initTable(); 1713 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { 1714 Node<K,V> r = new ReservationNode<K,V>(); 1715 synchronized (r) { 1716 if (casTabAt(tab, i, null, r)) { 1717 binCount = 1; 1718 Node<K,V> node = null; 1719 try { 1720 if ((val = mappingFunction.apply(key)) != null) 1721 node = new Node<K,V>(h, key, val); 1722 } finally { 1723 setTabAt(tab, i, node); 1724 } 1725 } 1726 } 1727 if (binCount != 0) 1728 break; 1729 } 1730 else if ((fh = f.hash) == MOVED) 1731 tab = helpTransfer(tab, f); 1732 else if (fh == h // check first node without acquiring lock 1733 && ((fk = f.key) == key || (fk != null && key.equals(fk))) 1734 && (fv = f.val) != null) 1735 return fv; 1736 else { 1737 boolean added = false; 1738 synchronized (f) { 1739 if (tabAt(tab, i) == f) { 1740 if (fh >= 0) { 1741 binCount = 1; 1742 for (Node<K,V> e = f;; ++binCount) { 1743 K ek; 1744 if (e.hash == h && 1745 ((ek = e.key) == key || 1746 (ek != null && key.equals(ek)))) { 1747 val = e.val; 1748 break; 1749 } 1750 Node<K,V> pred = e; 1751 if ((e = e.next) == null) { 1752 if ((val = mappingFunction.apply(key)) != null) { 1753 if (pred.next != null) 1754 throw new IllegalStateException("Recursive update"); 1755 added = true; 1756 pred.next = new Node<K,V>(h, key, val); 1757 } 1758 break; 1759 } 1760 } 1761 } 1762 else if (f instanceof TreeBin) { 1763 binCount = 2; 1764 TreeBin<K,V> t = (TreeBin<K,V>)f; 1765 TreeNode<K,V> r, p; 1766 if ((r = t.root) != null && 1767 (p = r.findTreeNode(h, key, null)) != null) 1768 val = p.val; 1769 else if ((val = mappingFunction.apply(key)) != null) { 1770 added = true; 1771 t.putTreeVal(h, key, val); 1772 } 1773 } 1774 else if (f instanceof ReservationNode) 1775 throw new IllegalStateException("Recursive update"); 1776 } 1777 } 1778 if (binCount != 0) { 1779 if (binCount >= TREEIFY_THRESHOLD) 1780 treeifyBin(tab, i); 1781 if (!added) 1782 return val; 1783 break; 1784 } 1785 } 1786 } 1787 if (val != null) 1788 addCount(1L, binCount); 1789 return val; 1790 } 1791 1792 /** 1793 * If the value for the specified key is present, attempts to 1794 * compute a new mapping given the key and its current mapped 1795 * value. The entire method invocation is performed atomically. 1796 * Some attempted update operations on this map by other threads 1797 * may be blocked while computation is in progress, so the 1798 * computation should be short and simple, and must not attempt to 1799 * update any other mappings of this map. 1800 * 1801 * @param key key with which a value may be associated 1802 * @param remappingFunction the function to compute a value 1803 * @return the new value associated with the specified key, or null if none 1804 * @throws NullPointerException if the specified key or remappingFunction 1805 * is null 1806 * @throws IllegalStateException if the computation detectably 1807 * attempts a recursive update to this map that would 1808 * otherwise never complete 1809 * @throws RuntimeException or Error if the remappingFunction does so, 1810 * in which case the mapping is unchanged 1811 */ 1812 public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1813 if (key == null || remappingFunction == null) 1814 throw new NullPointerException(); 1815 int h = spread(key.hashCode()); 1816 V val = null; 1817 int delta = 0; 1818 int binCount = 0; 1819 for (Node<K,V>[] tab = table;;) { 1820 Node<K,V> f; int n, i, fh; 1821 if (tab == null || (n = tab.length) == 0) 1822 tab = initTable(); 1823 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) 1824 break; 1825 else if ((fh = f.hash) == MOVED) 1826 tab = helpTransfer(tab, f); 1827 else { 1828 synchronized (f) { 1829 if (tabAt(tab, i) == f) { 1830 if (fh >= 0) { 1831 binCount = 1; 1832 for (Node<K,V> e = f, pred = null;; ++binCount) { 1833 K ek; 1834 if (e.hash == h && 1835 ((ek = e.key) == key || 1836 (ek != null && key.equals(ek)))) { 1837 val = remappingFunction.apply(key, e.val); 1838 if (val != null) 1839 e.val = val; 1840 else { 1841 delta = -1; 1842 Node<K,V> en = e.next; 1843 if (pred != null) 1844 pred.next = en; 1845 else 1846 setTabAt(tab, i, en); 1847 } 1848 break; 1849 } 1850 pred = e; 1851 if ((e = e.next) == null) 1852 break; 1853 } 1854 } 1855 else if (f instanceof TreeBin) { 1856 binCount = 2; 1857 TreeBin<K,V> t = (TreeBin<K,V>)f; 1858 TreeNode<K,V> r, p; 1859 if ((r = t.root) != null && 1860 (p = r.findTreeNode(h, key, null)) != null) { 1861 val = remappingFunction.apply(key, p.val); 1862 if (val != null) 1863 p.val = val; 1864 else { 1865 delta = -1; 1866 if (t.removeTreeNode(p)) 1867 setTabAt(tab, i, untreeify(t.first)); 1868 } 1869 } 1870 } 1871 else if (f instanceof ReservationNode) 1872 throw new IllegalStateException("Recursive update"); 1873 } 1874 } 1875 if (binCount != 0) 1876 break; 1877 } 1878 } 1879 if (delta != 0) 1880 addCount((long)delta, binCount); 1881 return val; 1882 } 1883 1884 /** 1885 * Attempts to compute a mapping for the specified key and its 1886 * current mapped value (or {@code null} if there is no current 1887 * mapping). The entire method invocation is performed atomically. 1888 * Some attempted update operations on this map by other threads 1889 * may be blocked while computation is in progress, so the 1890 * computation should be short and simple, and must not attempt to 1891 * update any other mappings of this Map. 1892 * 1893 * @param key key with which the specified value is to be associated 1894 * @param remappingFunction the function to compute a value 1895 * @return the new value associated with the specified key, or null if none 1896 * @throws NullPointerException if the specified key or remappingFunction 1897 * is null 1898 * @throws IllegalStateException if the computation detectably 1899 * attempts a recursive update to this map that would 1900 * otherwise never complete 1901 * @throws RuntimeException or Error if the remappingFunction does so, 1902 * in which case the mapping is unchanged 1903 */ 1904 public V compute(K key, 1905 BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1906 if (key == null || remappingFunction == null) 1907 throw new NullPointerException(); 1908 int h = spread(key.hashCode()); 1909 V val = null; 1910 int delta = 0; 1911 int binCount = 0; 1912 for (Node<K,V>[] tab = table;;) { 1913 Node<K,V> f; int n, i, fh; 1914 if (tab == null || (n = tab.length) == 0) 1915 tab = initTable(); 1916 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { 1917 Node<K,V> r = new ReservationNode<K,V>(); 1918 synchronized (r) { 1919 if (casTabAt(tab, i, null, r)) { 1920 binCount = 1; 1921 Node<K,V> node = null; 1922 try { 1923 if ((val = remappingFunction.apply(key, null)) != null) { 1924 delta = 1; 1925 node = new Node<K,V>(h, key, val); 1926 } 1927 } finally { 1928 setTabAt(tab, i, node); 1929 } 1930 } 1931 } 1932 if (binCount != 0) 1933 break; 1934 } 1935 else if ((fh = f.hash) == MOVED) 1936 tab = helpTransfer(tab, f); 1937 else { 1938 synchronized (f) { 1939 if (tabAt(tab, i) == f) { 1940 if (fh >= 0) { 1941 binCount = 1; 1942 for (Node<K,V> e = f, pred = null;; ++binCount) { 1943 K ek; 1944 if (e.hash == h && 1945 ((ek = e.key) == key || 1946 (ek != null && key.equals(ek)))) { 1947 val = remappingFunction.apply(key, e.val); 1948 if (val != null) 1949 e.val = val; 1950 else { 1951 delta = -1; 1952 Node<K,V> en = e.next; 1953 if (pred != null) 1954 pred.next = en; 1955 else 1956 setTabAt(tab, i, en); 1957 } 1958 break; 1959 } 1960 pred = e; 1961 if ((e = e.next) == null) { 1962 val = remappingFunction.apply(key, null); 1963 if (val != null) { 1964 if (pred.next != null) 1965 throw new IllegalStateException("Recursive update"); 1966 delta = 1; 1967 pred.next = new Node<K,V>(h, key, val); 1968 } 1969 break; 1970 } 1971 } 1972 } 1973 else if (f instanceof TreeBin) { 1974 binCount = 1; 1975 TreeBin<K,V> t = (TreeBin<K,V>)f; 1976 TreeNode<K,V> r, p; 1977 if ((r = t.root) != null) 1978 p = r.findTreeNode(h, key, null); 1979 else 1980 p = null; 1981 V pv = (p == null) ? null : p.val; 1982 val = remappingFunction.apply(key, pv); 1983 if (val != null) { 1984 if (p != null) 1985 p.val = val; 1986 else { 1987 delta = 1; 1988 t.putTreeVal(h, key, val); 1989 } 1990 } 1991 else if (p != null) { 1992 delta = -1; 1993 if (t.removeTreeNode(p)) 1994 setTabAt(tab, i, untreeify(t.first)); 1995 } 1996 } 1997 else if (f instanceof ReservationNode) 1998 throw new IllegalStateException("Recursive update"); 1999 } 2000 } 2001 if (binCount != 0) { 2002 if (binCount >= TREEIFY_THRESHOLD) 2003 treeifyBin(tab, i); 2004 break; 2005 } 2006 } 2007 } 2008 if (delta != 0) 2009 addCount((long)delta, binCount); 2010 return val; 2011 } 2012 2013 /** 2014 * If the specified key is not already associated with a 2015 * (non-null) value, associates it with the given value. 2016 * Otherwise, replaces the value with the results of the given 2017 * remapping function, or removes if {@code null}. The entire 2018 * method invocation is performed atomically. Some attempted 2019 * update operations on this map by other threads may be blocked 2020 * while computation is in progress, so the computation should be 2021 * short and simple, and must not attempt to update any other 2022 * mappings of this Map. 2023 * 2024 * @param key key with which the specified value is to be associated 2025 * @param value the value to use if absent 2026 * @param remappingFunction the function to recompute a value if present 2027 * @return the new value associated with the specified key, or null if none 2028 * @throws NullPointerException if the specified key or the 2029 * remappingFunction is null 2030 * @throws RuntimeException or Error if the remappingFunction does so, 2031 * in which case the mapping is unchanged 2032 */ 2033 public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) { 2034 if (key == null || value == null || remappingFunction == null) 2035 throw new NullPointerException(); 2036 int h = spread(key.hashCode()); 2037 V val = null; 2038 int delta = 0; 2039 int binCount = 0; 2040 for (Node<K,V>[] tab = table;;) { 2041 Node<K,V> f; int n, i, fh; 2042 if (tab == null || (n = tab.length) == 0) 2043 tab = initTable(); 2044 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { 2045 if (casTabAt(tab, i, null, new Node<K,V>(h, key, value))) { 2046 delta = 1; 2047 val = value; 2048 break; 2049 } 2050 } 2051 else if ((fh = f.hash) == MOVED) 2052 tab = helpTransfer(tab, f); 2053 else { 2054 synchronized (f) { 2055 if (tabAt(tab, i) == f) { 2056 if (fh >= 0) { 2057 binCount = 1; 2058 for (Node<K,V> e = f, pred = null;; ++binCount) { 2059 K ek; 2060 if (e.hash == h && 2061 ((ek = e.key) == key || 2062 (ek != null && key.equals(ek)))) { 2063 val = remappingFunction.apply(e.val, value); 2064 if (val != null) 2065 e.val = val; 2066 else { 2067 delta = -1; 2068 Node<K,V> en = e.next; 2069 if (pred != null) 2070 pred.next = en; 2071 else 2072 setTabAt(tab, i, en); 2073 } 2074 break; 2075 } 2076 pred = e; 2077 if ((e = e.next) == null) { 2078 delta = 1; 2079 val = value; 2080 pred.next = new Node<K,V>(h, key, val); 2081 break; 2082 } 2083 } 2084 } 2085 else if (f instanceof TreeBin) { 2086 binCount = 2; 2087 TreeBin<K,V> t = (TreeBin<K,V>)f; 2088 TreeNode<K,V> r = t.root; 2089 TreeNode<K,V> p = (r == null) ? null : 2090 r.findTreeNode(h, key, null); 2091 val = (p == null) ? value : 2092 remappingFunction.apply(p.val, value); 2093 if (val != null) { 2094 if (p != null) 2095 p.val = val; 2096 else { 2097 delta = 1; 2098 t.putTreeVal(h, key, val); 2099 } 2100 } 2101 else if (p != null) { 2102 delta = -1; 2103 if (t.removeTreeNode(p)) 2104 setTabAt(tab, i, untreeify(t.first)); 2105 } 2106 } 2107 else if (f instanceof ReservationNode) 2108 throw new IllegalStateException("Recursive update"); 2109 } 2110 } 2111 if (binCount != 0) { 2112 if (binCount >= TREEIFY_THRESHOLD) 2113 treeifyBin(tab, i); 2114 break; 2115 } 2116 } 2117 } 2118 if (delta != 0) 2119 addCount((long)delta, binCount); 2120 return val; 2121 } 2122 2123 // Hashtable legacy methods 2124 2125 /** 2126 * Tests if some key maps into the specified value in this table. 2127 * 2128 * <p>Note that this method is identical in functionality to 2129 * {@link #containsValue(Object)}, and exists solely to ensure 2130 * full compatibility with class {@link java.util.Hashtable}, 2131 * which supported this method prior to introduction of the 2132 * Java Collections Framework. 2133 * 2134 * @param value a value to search for 2135 * @return {@code true} if and only if some key maps to the 2136 * {@code value} argument in this table as 2137 * determined by the {@code equals} method; 2138 * {@code false} otherwise 2139 * @throws NullPointerException if the specified value is null 2140 */ 2141 public boolean contains(Object value) { 2142 return containsValue(value); 2143 } 2144 2145 /** 2146 * Returns an enumeration of the keys in this table. 2147 * 2148 * @return an enumeration of the keys in this table 2149 * @see #keySet() 2150 */ 2151 public Enumeration<K> keys() { 2152 Node<K,V>[] t; 2153 int f = (t = table) == null ? 0 : t.length; 2154 return new KeyIterator<K,V>(t, f, 0, f, this); 2155 } 2156 2157 /** 2158 * Returns an enumeration of the values in this table. 2159 * 2160 * @return an enumeration of the values in this table 2161 * @see #values() 2162 */ 2163 public Enumeration<V> elements() { 2164 Node<K,V>[] t; 2165 int f = (t = table) == null ? 0 : t.length; 2166 return new ValueIterator<K,V>(t, f, 0, f, this); 2167 } 2168 2169 // ConcurrentHashMap-only methods 2170 2171 /** 2172 * Returns the number of mappings. This method should be used 2173 * instead of {@link #size} because a ConcurrentHashMap may 2174 * contain more mappings than can be represented as an int. The 2175 * value returned is an estimate; the actual count may differ if 2176 * there are concurrent insertions or removals. 2177 * 2178 * @return the number of mappings 2179 * @since 1.8 2180 */ 2181 public long mappingCount() { 2182 long n = sumCount(); 2183 return (n < 0L) ? 0L : n; // ignore transient negative values 2184 } 2185 2186 /** 2187 * Creates a new {@link Set} backed by a ConcurrentHashMap 2188 * from the given type to {@code Boolean.TRUE}. 2189 * 2190 * @param <K> the element type of the returned set 2191 * @return the new set 2192 * @since 1.8 2193 */ 2194 public static <K> KeySetView<K,Boolean> newKeySet() { 2195 return new KeySetView<K,Boolean> 2196 (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE); 2197 } 2198 2199 /** 2200 * Creates a new {@link Set} backed by a ConcurrentHashMap 2201 * from the given type to {@code Boolean.TRUE}. 2202 * 2203 * @param initialCapacity The implementation performs internal 2204 * sizing to accommodate this many elements. 2205 * @param <K> the element type of the returned set 2206 * @return the new set 2207 * @throws IllegalArgumentException if the initial capacity of 2208 * elements is negative 2209 * @since 1.8 2210 */ 2211 public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) { 2212 return new KeySetView<K,Boolean> 2213 (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE); 2214 } 2215 2216 /** 2217 * Returns a {@link Set} view of the keys in this map, using the 2218 * given common mapped value for any additions (i.e., {@link 2219 * Collection#add} and {@link Collection#addAll(Collection)}). 2220 * This is of course only appropriate if it is acceptable to use 2221 * the same value for all additions from this view. 2222 * 2223 * @param mappedValue the mapped value to use for any additions 2224 * @return the set view 2225 * @throws NullPointerException if the mappedValue is null 2226 */ 2227 public KeySetView<K,V> keySet(V mappedValue) { 2228 if (mappedValue == null) 2229 throw new NullPointerException(); 2230 return new KeySetView<K,V>(this, mappedValue); 2231 } 2232 2233 /* ---------------- Special Nodes -------------- */ 2234 2235 /** 2236 * A node inserted at head of bins during transfer operations. 2237 */ 2238 static final class ForwardingNode<K,V> extends Node<K,V> { 2239 final Node<K,V>[] nextTable; 2240 ForwardingNode(Node<K,V>[] tab) { 2241 super(MOVED, null, null); 2242 this.nextTable = tab; 2243 } 2244 2245 Node<K,V> find(int h, Object k) { 2246 // loop to avoid arbitrarily deep recursion on forwarding nodes 2247 outer: for (Node<K,V>[] tab = nextTable;;) { 2248 Node<K,V> e; int n; 2249 if (k == null || tab == null || (n = tab.length) == 0 || 2250 (e = tabAt(tab, (n - 1) & h)) == null) 2251 return null; 2252 for (;;) { 2253 int eh; K ek; 2254 if ((eh = e.hash) == h && 2255 ((ek = e.key) == k || (ek != null && k.equals(ek)))) 2256 return e; 2257 if (eh < 0) { 2258 if (e instanceof ForwardingNode) { 2259 tab = ((ForwardingNode<K,V>)e).nextTable; 2260 continue outer; 2261 } 2262 else 2263 return e.find(h, k); 2264 } 2265 if ((e = e.next) == null) 2266 return null; 2267 } 2268 } 2269 } 2270 } 2271 2272 /** 2273 * A place-holder node used in computeIfAbsent and compute. 2274 */ 2275 static final class ReservationNode<K,V> extends Node<K,V> { 2276 ReservationNode() { 2277 super(RESERVED, null, null); 2278 } 2279 2280 Node<K,V> find(int h, Object k) { 2281 return null; 2282 } 2283 } 2284 2285 /* ---------------- Table Initialization and Resizing -------------- */ 2286 2287 /** 2288 * Returns the stamp bits for resizing a table of size n. 2289 * Must be negative when shifted left by RESIZE_STAMP_SHIFT. 2290 */ 2291 static final int resizeStamp(int n) { 2292 return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1)); 2293 } 2294 2295 /** 2296 * Initializes table, using the size recorded in sizeCtl. 2297 */ 2298 private final Node<K,V>[] initTable() { 2299 Node<K,V>[] tab; int sc; 2300 while ((tab = table) == null || tab.length == 0) { 2301 if ((sc = sizeCtl) < 0) 2302 Thread.yield(); // lost initialization race; just spin 2303 else if (U.compareAndSetInt(this, SIZECTL, sc, -1)) { 2304 try { 2305 if ((tab = table) == null || tab.length == 0) { 2306 int n = (sc > 0) ? sc : DEFAULT_CAPACITY; 2307 @SuppressWarnings("unchecked") 2308 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n]; 2309 table = tab = nt; 2310 sc = n - (n >>> 2); 2311 } 2312 } finally { 2313 sizeCtl = sc; 2314 } 2315 break; 2316 } 2317 } 2318 return tab; 2319 } 2320 2321 /** 2322 * Adds to count, and if table is too small and not already 2323 * resizing, initiates transfer. If already resizing, helps 2324 * perform transfer if work is available. Rechecks occupancy 2325 * after a transfer to see if another resize is already needed 2326 * because resizings are lagging additions. 2327 * 2328 * @param x the count to add 2329 * @param check if <0, don't check resize, if <= 1 only check if uncontended 2330 */ 2331 private final void addCount(long x, int check) { 2332 CounterCell[] as; long b, s; 2333 if ((as = counterCells) != null || 2334 !U.compareAndSetLong(this, BASECOUNT, b = baseCount, s = b + x)) { 2335 CounterCell a; long v; int m; 2336 boolean uncontended = true; 2337 if (as == null || (m = as.length - 1) < 0 || 2338 (a = as[ThreadLocalRandom.getProbe() & m]) == null || 2339 !(uncontended = 2340 U.compareAndSetLong(a, CELLVALUE, v = a.value, v + x))) { 2341 fullAddCount(x, uncontended); 2342 return; 2343 } 2344 if (check <= 1) 2345 return; 2346 s = sumCount(); 2347 } 2348 if (check >= 0) { 2349 Node<K,V>[] tab, nt; int n, sc; 2350 while (s >= (long)(sc = sizeCtl) && (tab = table) != null && 2351 (n = tab.length) < MAXIMUM_CAPACITY) { 2352 int rs = resizeStamp(n); 2353 if (sc < 0) { 2354 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || 2355 sc == rs + MAX_RESIZERS || (nt = nextTable) == null || 2356 transferIndex <= 0) 2357 break; 2358 if (U.compareAndSetInt(this, SIZECTL, sc, sc + 1)) 2359 transfer(tab, nt); 2360 } 2361 else if (U.compareAndSetInt(this, SIZECTL, sc, 2362 (rs << RESIZE_STAMP_SHIFT) + 2)) 2363 transfer(tab, null); 2364 s = sumCount(); 2365 } 2366 } 2367 } 2368 2369 /** 2370 * Helps transfer if a resize is in progress. 2371 */ 2372 final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) { 2373 Node<K,V>[] nextTab; int sc; 2374 if (tab != null && (f instanceof ForwardingNode) && 2375 (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) { 2376 int rs = resizeStamp(tab.length); 2377 while (nextTab == nextTable && table == tab && 2378 (sc = sizeCtl) < 0) { 2379 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || 2380 sc == rs + MAX_RESIZERS || transferIndex <= 0) 2381 break; 2382 if (U.compareAndSetInt(this, SIZECTL, sc, sc + 1)) { 2383 transfer(tab, nextTab); 2384 break; 2385 } 2386 } 2387 return nextTab; 2388 } 2389 return table; 2390 } 2391 2392 /** 2393 * Tries to presize table to accommodate the given number of elements. 2394 * 2395 * @param size number of elements (doesn't need to be perfectly accurate) 2396 */ 2397 private final void tryPresize(int size) { 2398 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : 2399 tableSizeFor(size + (size >>> 1) + 1); 2400 int sc; 2401 while ((sc = sizeCtl) >= 0) { 2402 Node<K,V>[] tab = table; int n; 2403 if (tab == null || (n = tab.length) == 0) { 2404 n = (sc > c) ? sc : c; 2405 if (U.compareAndSetInt(this, SIZECTL, sc, -1)) { 2406 try { 2407 if (table == tab) { 2408 @SuppressWarnings("unchecked") 2409 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n]; 2410 table = nt; 2411 sc = n - (n >>> 2); 2412 } 2413 } finally { 2414 sizeCtl = sc; 2415 } 2416 } 2417 } 2418 else if (c <= sc || n >= MAXIMUM_CAPACITY) 2419 break; 2420 else if (tab == table) { 2421 int rs = resizeStamp(n); 2422 if (U.compareAndSetInt(this, SIZECTL, sc, 2423 (rs << RESIZE_STAMP_SHIFT) + 2)) 2424 transfer(tab, null); 2425 } 2426 } 2427 } 2428 2429 /** 2430 * Moves and/or copies the nodes in each bin to new table. See 2431 * above for explanation. 2432 */ 2433 private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) { 2434 int n = tab.length, stride; 2435 if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE) 2436 stride = MIN_TRANSFER_STRIDE; // subdivide range 2437 if (nextTab == null) { // initiating 2438 try { 2439 @SuppressWarnings("unchecked") 2440 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1]; 2441 nextTab = nt; 2442 } catch (Throwable ex) { // try to cope with OOME 2443 sizeCtl = Integer.MAX_VALUE; 2444 return; 2445 } 2446 nextTable = nextTab; 2447 transferIndex = n; 2448 } 2449 int nextn = nextTab.length; 2450 ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab); 2451 boolean advance = true; 2452 boolean finishing = false; // to ensure sweep before committing nextTab 2453 for (int i = 0, bound = 0;;) { 2454 Node<K,V> f; int fh; 2455 while (advance) { 2456 int nextIndex, nextBound; 2457 if (--i >= bound || finishing) 2458 advance = false; 2459 else if ((nextIndex = transferIndex) <= 0) { 2460 i = -1; 2461 advance = false; 2462 } 2463 else if (U.compareAndSetInt 2464 (this, TRANSFERINDEX, nextIndex, 2465 nextBound = (nextIndex > stride ? 2466 nextIndex - stride : 0))) { 2467 bound = nextBound; 2468 i = nextIndex - 1; 2469 advance = false; 2470 } 2471 } 2472 if (i < 0 || i >= n || i + n >= nextn) { 2473 int sc; 2474 if (finishing) { 2475 nextTable = null; 2476 table = nextTab; 2477 sizeCtl = (n << 1) - (n >>> 1); 2478 return; 2479 } 2480 if (U.compareAndSetInt(this, SIZECTL, sc = sizeCtl, sc - 1)) { 2481 if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT) 2482 return; 2483 finishing = advance = true; 2484 i = n; // recheck before commit 2485 } 2486 } 2487 else if ((f = tabAt(tab, i)) == null) 2488 advance = casTabAt(tab, i, null, fwd); 2489 else if ((fh = f.hash) == MOVED) 2490 advance = true; // already processed 2491 else { 2492 synchronized (f) { 2493 if (tabAt(tab, i) == f) { 2494 Node<K,V> ln, hn; 2495 if (fh >= 0) { 2496 int runBit = fh & n; 2497 Node<K,V> lastRun = f; 2498 for (Node<K,V> p = f.next; p != null; p = p.next) { 2499 int b = p.hash & n; 2500 if (b != runBit) { 2501 runBit = b; 2502 lastRun = p; 2503 } 2504 } 2505 if (runBit == 0) { 2506 ln = lastRun; 2507 hn = null; 2508 } 2509 else { 2510 hn = lastRun; 2511 ln = null; 2512 } 2513 for (Node<K,V> p = f; p != lastRun; p = p.next) { 2514 int ph = p.hash; K pk = p.key; V pv = p.val; 2515 if ((ph & n) == 0) 2516 ln = new Node<K,V>(ph, pk, pv, ln); 2517 else 2518 hn = new Node<K,V>(ph, pk, pv, hn); 2519 } 2520 setTabAt(nextTab, i, ln); 2521 setTabAt(nextTab, i + n, hn); 2522 setTabAt(tab, i, fwd); 2523 advance = true; 2524 } 2525 else if (f instanceof TreeBin) { 2526 TreeBin<K,V> t = (TreeBin<K,V>)f; 2527 TreeNode<K,V> lo = null, loTail = null; 2528 TreeNode<K,V> hi = null, hiTail = null; 2529 int lc = 0, hc = 0; 2530 for (Node<K,V> e = t.first; e != null; e = e.next) { 2531 int h = e.hash; 2532 TreeNode<K,V> p = new TreeNode<K,V> 2533 (h, e.key, e.val, null, null); 2534 if ((h & n) == 0) { 2535 if ((p.prev = loTail) == null) 2536 lo = p; 2537 else 2538 loTail.next = p; 2539 loTail = p; 2540 ++lc; 2541 } 2542 else { 2543 if ((p.prev = hiTail) == null) 2544 hi = p; 2545 else 2546 hiTail.next = p; 2547 hiTail = p; 2548 ++hc; 2549 } 2550 } 2551 ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) : 2552 (hc != 0) ? new TreeBin<K,V>(lo) : t; 2553 hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) : 2554 (lc != 0) ? new TreeBin<K,V>(hi) : t; 2555 setTabAt(nextTab, i, ln); 2556 setTabAt(nextTab, i + n, hn); 2557 setTabAt(tab, i, fwd); 2558 advance = true; 2559 } 2560 } 2561 } 2562 } 2563 } 2564 } 2565 2566 /* ---------------- Counter support -------------- */ 2567 2568 /** 2569 * A padded cell for distributing counts. Adapted from LongAdder 2570 * and Striped64. See their internal docs for explanation. 2571 */ 2572 @jdk.internal.vm.annotation.Contended static final class CounterCell { 2573 volatile long value; 2574 CounterCell(long x) { value = x; } 2575 } 2576 2577 final long sumCount() { 2578 CounterCell[] as = counterCells; CounterCell a; 2579 long sum = baseCount; 2580 if (as != null) { 2581 for (int i = 0; i < as.length; ++i) { 2582 if ((a = as[i]) != null) 2583 sum += a.value; 2584 } 2585 } 2586 return sum; 2587 } 2588 2589 // See LongAdder version for explanation 2590 private final void fullAddCount(long x, boolean wasUncontended) { 2591 int h; 2592 if ((h = ThreadLocalRandom.getProbe()) == 0) { 2593 ThreadLocalRandom.localInit(); // force initialization 2594 h = ThreadLocalRandom.getProbe(); 2595 wasUncontended = true; 2596 } 2597 boolean collide = false; // True if last slot nonempty 2598 for (;;) { 2599 CounterCell[] as; CounterCell a; int n; long v; 2600 if ((as = counterCells) != null && (n = as.length) > 0) { 2601 if ((a = as[(n - 1) & h]) == null) { 2602 if (cellsBusy == 0) { // Try to attach new Cell 2603 CounterCell r = new CounterCell(x); // Optimistic create 2604 if (cellsBusy == 0 && 2605 U.compareAndSetInt(this, CELLSBUSY, 0, 1)) { 2606 boolean created = false; 2607 try { // Recheck under lock 2608 CounterCell[] rs; int m, j; 2609 if ((rs = counterCells) != null && 2610 (m = rs.length) > 0 && 2611 rs[j = (m - 1) & h] == null) { 2612 rs[j] = r; 2613 created = true; 2614 } 2615 } finally { 2616 cellsBusy = 0; 2617 } 2618 if (created) 2619 break; 2620 continue; // Slot is now non-empty 2621 } 2622 } 2623 collide = false; 2624 } 2625 else if (!wasUncontended) // CAS already known to fail 2626 wasUncontended = true; // Continue after rehash 2627 else if (U.compareAndSetLong(a, CELLVALUE, v = a.value, v + x)) 2628 break; 2629 else if (counterCells != as || n >= NCPU) 2630 collide = false; // At max size or stale 2631 else if (!collide) 2632 collide = true; 2633 else if (cellsBusy == 0 && 2634 U.compareAndSetInt(this, CELLSBUSY, 0, 1)) { 2635 try { 2636 if (counterCells == as) {// Expand table unless stale 2637 CounterCell[] rs = new CounterCell[n << 1]; 2638 for (int i = 0; i < n; ++i) 2639 rs[i] = as[i]; 2640 counterCells = rs; 2641 } 2642 } finally { 2643 cellsBusy = 0; 2644 } 2645 collide = false; 2646 continue; // Retry with expanded table 2647 } 2648 h = ThreadLocalRandom.advanceProbe(h); 2649 } 2650 else if (cellsBusy == 0 && counterCells == as && 2651 U.compareAndSetInt(this, CELLSBUSY, 0, 1)) { 2652 boolean init = false; 2653 try { // Initialize table 2654 if (counterCells == as) { 2655 CounterCell[] rs = new CounterCell[2]; 2656 rs[h & 1] = new CounterCell(x); 2657 counterCells = rs; 2658 init = true; 2659 } 2660 } finally { 2661 cellsBusy = 0; 2662 } 2663 if (init) 2664 break; 2665 } 2666 else if (U.compareAndSetLong(this, BASECOUNT, v = baseCount, v + x)) 2667 break; // Fall back on using base 2668 } 2669 } 2670 2671 /* ---------------- Conversion from/to TreeBins -------------- */ 2672 2673 /** 2674 * Replaces all linked nodes in bin at given index unless table is 2675 * too small, in which case resizes instead. 2676 */ 2677 private final void treeifyBin(Node<K,V>[] tab, int index) { 2678 Node<K,V> b; int n; 2679 if (tab != null) { 2680 if ((n = tab.length) < MIN_TREEIFY_CAPACITY) 2681 tryPresize(n << 1); 2682 else if ((b = tabAt(tab, index)) != null && b.hash >= 0) { 2683 synchronized (b) { 2684 if (tabAt(tab, index) == b) { 2685 TreeNode<K,V> hd = null, tl = null; 2686 for (Node<K,V> e = b; e != null; e = e.next) { 2687 TreeNode<K,V> p = 2688 new TreeNode<K,V>(e.hash, e.key, e.val, 2689 null, null); 2690 if ((p.prev = tl) == null) 2691 hd = p; 2692 else 2693 tl.next = p; 2694 tl = p; 2695 } 2696 setTabAt(tab, index, new TreeBin<K,V>(hd)); 2697 } 2698 } 2699 } 2700 } 2701 } 2702 2703 /** 2704 * Returns a list of non-TreeNodes replacing those in given list. 2705 */ 2706 static <K,V> Node<K,V> untreeify(Node<K,V> b) { 2707 Node<K,V> hd = null, tl = null; 2708 for (Node<K,V> q = b; q != null; q = q.next) { 2709 Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val); 2710 if (tl == null) 2711 hd = p; 2712 else 2713 tl.next = p; 2714 tl = p; 2715 } 2716 return hd; 2717 } 2718 2719 /* ---------------- TreeNodes -------------- */ 2720 2721 /** 2722 * Nodes for use in TreeBins. 2723 */ 2724 static final class TreeNode<K,V> extends Node<K,V> { 2725 TreeNode<K,V> parent; // red-black tree links 2726 TreeNode<K,V> left; 2727 TreeNode<K,V> right; 2728 TreeNode<K,V> prev; // needed to unlink next upon deletion 2729 boolean red; 2730 2731 TreeNode(int hash, K key, V val, Node<K,V> next, 2732 TreeNode<K,V> parent) { 2733 super(hash, key, val, next); 2734 this.parent = parent; 2735 } 2736 2737 Node<K,V> find(int h, Object k) { 2738 return findTreeNode(h, k, null); 2739 } 2740 2741 /** 2742 * Returns the TreeNode (or null if not found) for the given key 2743 * starting at given root. 2744 */ 2745 final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) { 2746 if (k != null) { 2747 TreeNode<K,V> p = this; 2748 do { 2749 int ph, dir; K pk; TreeNode<K,V> q; 2750 TreeNode<K,V> pl = p.left, pr = p.right; 2751 if ((ph = p.hash) > h) 2752 p = pl; 2753 else if (ph < h) 2754 p = pr; 2755 else if ((pk = p.key) == k || (pk != null && k.equals(pk))) 2756 return p; 2757 else if (pl == null) 2758 p = pr; 2759 else if (pr == null) 2760 p = pl; 2761 else if ((kc != null || 2762 (kc = comparableClassFor(k)) != null) && 2763 (dir = compareComparables(kc, k, pk)) != 0) 2764 p = (dir < 0) ? pl : pr; 2765 else if ((q = pr.findTreeNode(h, k, kc)) != null) 2766 return q; 2767 else 2768 p = pl; 2769 } while (p != null); 2770 } 2771 return null; 2772 } 2773 } 2774 2775 /* ---------------- TreeBins -------------- */ 2776 2777 /** 2778 * TreeNodes used at the heads of bins. TreeBins do not hold user 2779 * keys or values, but instead point to list of TreeNodes and 2780 * their root. They also maintain a parasitic read-write lock 2781 * forcing writers (who hold bin lock) to wait for readers (who do 2782 * not) to complete before tree restructuring operations. 2783 */ 2784 static final class TreeBin<K,V> extends Node<K,V> { 2785 TreeNode<K,V> root; 2786 volatile TreeNode<K,V> first; 2787 volatile Thread waiter; 2788 volatile int lockState; 2789 // values for lockState 2790 static final int WRITER = 1; // set while holding write lock 2791 static final int WAITER = 2; // set when waiting for write lock 2792 static final int READER = 4; // increment value for setting read lock 2793 2794 /** 2795 * Tie-breaking utility for ordering insertions when equal 2796 * hashCodes and non-comparable. We don't require a total 2797 * order, just a consistent insertion rule to maintain 2798 * equivalence across rebalancings. Tie-breaking further than 2799 * necessary simplifies testing a bit. 2800 */ 2801 static int tieBreakOrder(Object a, Object b) { 2802 int d; 2803 if (a == null || b == null || 2804 (d = a.getClass().getName(). 2805 compareTo(b.getClass().getName())) == 0) 2806 d = (System.identityHashCode(a) <= System.identityHashCode(b) ? 2807 -1 : 1); 2808 return d; 2809 } 2810 2811 /** 2812 * Creates bin with initial set of nodes headed by b. 2813 */ 2814 TreeBin(TreeNode<K,V> b) { 2815 super(TREEBIN, null, null); 2816 this.first = b; 2817 TreeNode<K,V> r = null; 2818 for (TreeNode<K,V> x = b, next; x != null; x = next) { 2819 next = (TreeNode<K,V>)x.next; 2820 x.left = x.right = null; 2821 if (r == null) { 2822 x.parent = null; 2823 x.red = false; 2824 r = x; 2825 } 2826 else { 2827 K k = x.key; 2828 int h = x.hash; 2829 Class<?> kc = null; 2830 for (TreeNode<K,V> p = r;;) { 2831 int dir, ph; 2832 K pk = p.key; 2833 if ((ph = p.hash) > h) 2834 dir = -1; 2835 else if (ph < h) 2836 dir = 1; 2837 else if ((kc == null && 2838 (kc = comparableClassFor(k)) == null) || 2839 (dir = compareComparables(kc, k, pk)) == 0) 2840 dir = tieBreakOrder(k, pk); 2841 TreeNode<K,V> xp = p; 2842 if ((p = (dir <= 0) ? p.left : p.right) == null) { 2843 x.parent = xp; 2844 if (dir <= 0) 2845 xp.left = x; 2846 else 2847 xp.right = x; 2848 r = balanceInsertion(r, x); 2849 break; 2850 } 2851 } 2852 } 2853 } 2854 this.root = r; 2855 assert checkInvariants(root); 2856 } 2857 2858 /** 2859 * Acquires write lock for tree restructuring. 2860 */ 2861 private final void lockRoot() { 2862 if (!U.compareAndSetInt(this, LOCKSTATE, 0, WRITER)) 2863 contendedLock(); // offload to separate method 2864 } 2865 2866 /** 2867 * Releases write lock for tree restructuring. 2868 */ 2869 private final void unlockRoot() { 2870 lockState = 0; 2871 } 2872 2873 /** 2874 * Possibly blocks awaiting root lock. 2875 */ 2876 private final void contendedLock() { 2877 boolean waiting = false; 2878 for (int s;;) { 2879 if (((s = lockState) & ~WAITER) == 0) { 2880 if (U.compareAndSetInt(this, LOCKSTATE, s, WRITER)) { 2881 if (waiting) 2882 waiter = null; 2883 return; 2884 } 2885 } 2886 else if ((s & WAITER) == 0) { 2887 if (U.compareAndSetInt(this, LOCKSTATE, s, s | WAITER)) { 2888 waiting = true; 2889 waiter = Thread.currentThread(); 2890 } 2891 } 2892 else if (waiting) 2893 LockSupport.park(this); 2894 } 2895 } 2896 2897 /** 2898 * Returns matching node or null if none. Tries to search 2899 * using tree comparisons from root, but continues linear 2900 * search when lock not available. 2901 */ 2902 final Node<K,V> find(int h, Object k) { 2903 if (k != null) { 2904 for (Node<K,V> e = first; e != null; ) { 2905 int s; K ek; 2906 if (((s = lockState) & (WAITER|WRITER)) != 0) { 2907 if (e.hash == h && 2908 ((ek = e.key) == k || (ek != null && k.equals(ek)))) 2909 return e; 2910 e = e.next; 2911 } 2912 else if (U.compareAndSetInt(this, LOCKSTATE, s, 2913 s + READER)) { 2914 TreeNode<K,V> r, p; 2915 try { 2916 p = ((r = root) == null ? null : 2917 r.findTreeNode(h, k, null)); 2918 } finally { 2919 Thread w; 2920 if (U.getAndAddInt(this, LOCKSTATE, -READER) == 2921 (READER|WAITER) && (w = waiter) != null) 2922 LockSupport.unpark(w); 2923 } 2924 return p; 2925 } 2926 } 2927 } 2928 return null; 2929 } 2930 2931 /** 2932 * Finds or adds a node. 2933 * @return null if added 2934 */ 2935 final TreeNode<K,V> putTreeVal(int h, K k, V v) { 2936 Class<?> kc = null; 2937 boolean searched = false; 2938 for (TreeNode<K,V> p = root;;) { 2939 int dir, ph; K pk; 2940 if (p == null) { 2941 first = root = new TreeNode<K,V>(h, k, v, null, null); 2942 break; 2943 } 2944 else if ((ph = p.hash) > h) 2945 dir = -1; 2946 else if (ph < h) 2947 dir = 1; 2948 else if ((pk = p.key) == k || (pk != null && k.equals(pk))) 2949 return p; 2950 else if ((kc == null && 2951 (kc = comparableClassFor(k)) == null) || 2952 (dir = compareComparables(kc, k, pk)) == 0) { 2953 if (!searched) { 2954 TreeNode<K,V> q, ch; 2955 searched = true; 2956 if (((ch = p.left) != null && 2957 (q = ch.findTreeNode(h, k, kc)) != null) || 2958 ((ch = p.right) != null && 2959 (q = ch.findTreeNode(h, k, kc)) != null)) 2960 return q; 2961 } 2962 dir = tieBreakOrder(k, pk); 2963 } 2964 2965 TreeNode<K,V> xp = p; 2966 if ((p = (dir <= 0) ? p.left : p.right) == null) { 2967 TreeNode<K,V> x, f = first; 2968 first = x = new TreeNode<K,V>(h, k, v, f, xp); 2969 if (f != null) 2970 f.prev = x; 2971 if (dir <= 0) 2972 xp.left = x; 2973 else 2974 xp.right = x; 2975 if (!xp.red) 2976 x.red = true; 2977 else { 2978 lockRoot(); 2979 try { 2980 root = balanceInsertion(root, x); 2981 } finally { 2982 unlockRoot(); 2983 } 2984 } 2985 break; 2986 } 2987 } 2988 assert checkInvariants(root); 2989 return null; 2990 } 2991 2992 /** 2993 * Removes the given node, that must be present before this 2994 * call. This is messier than typical red-black deletion code 2995 * because we cannot swap the contents of an interior node 2996 * with a leaf successor that is pinned by "next" pointers 2997 * that are accessible independently of lock. So instead we 2998 * swap the tree linkages. 2999 * 3000 * @return true if now too small, so should be untreeified 3001 */ 3002 final boolean removeTreeNode(TreeNode<K,V> p) { 3003 TreeNode<K,V> next = (TreeNode<K,V>)p.next; 3004 TreeNode<K,V> pred = p.prev; // unlink traversal pointers 3005 TreeNode<K,V> r, rl; 3006 if (pred == null) 3007 first = next; 3008 else 3009 pred.next = next; 3010 if (next != null) 3011 next.prev = pred; 3012 if (first == null) { 3013 root = null; 3014 return true; 3015 } 3016 if ((r = root) == null || r.right == null || // too small 3017 (rl = r.left) == null || rl.left == null) 3018 return true; 3019 lockRoot(); 3020 try { 3021 TreeNode<K,V> replacement; 3022 TreeNode<K,V> pl = p.left; 3023 TreeNode<K,V> pr = p.right; 3024 if (pl != null && pr != null) { 3025 TreeNode<K,V> s = pr, sl; 3026 while ((sl = s.left) != null) // find successor 3027 s = sl; 3028 boolean c = s.red; s.red = p.red; p.red = c; // swap colors 3029 TreeNode<K,V> sr = s.right; 3030 TreeNode<K,V> pp = p.parent; 3031 if (s == pr) { // p was s's direct parent 3032 p.parent = s; 3033 s.right = p; 3034 } 3035 else { 3036 TreeNode<K,V> sp = s.parent; 3037 if ((p.parent = sp) != null) { 3038 if (s == sp.left) 3039 sp.left = p; 3040 else 3041 sp.right = p; 3042 } 3043 if ((s.right = pr) != null) 3044 pr.parent = s; 3045 } 3046 p.left = null; 3047 if ((p.right = sr) != null) 3048 sr.parent = p; 3049 if ((s.left = pl) != null) 3050 pl.parent = s; 3051 if ((s.parent = pp) == null) 3052 r = s; 3053 else if (p == pp.left) 3054 pp.left = s; 3055 else 3056 pp.right = s; 3057 if (sr != null) 3058 replacement = sr; 3059 else 3060 replacement = p; 3061 } 3062 else if (pl != null) 3063 replacement = pl; 3064 else if (pr != null) 3065 replacement = pr; 3066 else 3067 replacement = p; 3068 if (replacement != p) { 3069 TreeNode<K,V> pp = replacement.parent = p.parent; 3070 if (pp == null) 3071 r = replacement; 3072 else if (p == pp.left) 3073 pp.left = replacement; 3074 else 3075 pp.right = replacement; 3076 p.left = p.right = p.parent = null; 3077 } 3078 3079 root = (p.red) ? r : balanceDeletion(r, replacement); 3080 3081 if (p == replacement) { // detach pointers 3082 TreeNode<K,V> pp; 3083 if ((pp = p.parent) != null) { 3084 if (p == pp.left) 3085 pp.left = null; 3086 else if (p == pp.right) 3087 pp.right = null; 3088 p.parent = null; 3089 } 3090 } 3091 } finally { 3092 unlockRoot(); 3093 } 3094 assert checkInvariants(root); 3095 return false; 3096 } 3097 3098 /* ------------------------------------------------------------ */ 3099 // Red-black tree methods, all adapted from CLR 3100 3101 static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root, 3102 TreeNode<K,V> p) { 3103 TreeNode<K,V> r, pp, rl; 3104 if (p != null && (r = p.right) != null) { 3105 if ((rl = p.right = r.left) != null) 3106 rl.parent = p; 3107 if ((pp = r.parent = p.parent) == null) 3108 (root = r).red = false; 3109 else if (pp.left == p) 3110 pp.left = r; 3111 else 3112 pp.right = r; 3113 r.left = p; 3114 p.parent = r; 3115 } 3116 return root; 3117 } 3118 3119 static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root, 3120 TreeNode<K,V> p) { 3121 TreeNode<K,V> l, pp, lr; 3122 if (p != null && (l = p.left) != null) { 3123 if ((lr = p.left = l.right) != null) 3124 lr.parent = p; 3125 if ((pp = l.parent = p.parent) == null) 3126 (root = l).red = false; 3127 else if (pp.right == p) 3128 pp.right = l; 3129 else 3130 pp.left = l; 3131 l.right = p; 3132 p.parent = l; 3133 } 3134 return root; 3135 } 3136 3137 static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root, 3138 TreeNode<K,V> x) { 3139 x.red = true; 3140 for (TreeNode<K,V> xp, xpp, xppl, xppr;;) { 3141 if ((xp = x.parent) == null) { 3142 x.red = false; 3143 return x; 3144 } 3145 else if (!xp.red || (xpp = xp.parent) == null) 3146 return root; 3147 if (xp == (xppl = xpp.left)) { 3148 if ((xppr = xpp.right) != null && xppr.red) { 3149 xppr.red = false; 3150 xp.red = false; 3151 xpp.red = true; 3152 x = xpp; 3153 } 3154 else { 3155 if (x == xp.right) { 3156 root = rotateLeft(root, x = xp); 3157 xpp = (xp = x.parent) == null ? null : xp.parent; 3158 } 3159 if (xp != null) { 3160 xp.red = false; 3161 if (xpp != null) { 3162 xpp.red = true; 3163 root = rotateRight(root, xpp); 3164 } 3165 } 3166 } 3167 } 3168 else { 3169 if (xppl != null && xppl.red) { 3170 xppl.red = false; 3171 xp.red = false; 3172 xpp.red = true; 3173 x = xpp; 3174 } 3175 else { 3176 if (x == xp.left) { 3177 root = rotateRight(root, x = xp); 3178 xpp = (xp = x.parent) == null ? null : xp.parent; 3179 } 3180 if (xp != null) { 3181 xp.red = false; 3182 if (xpp != null) { 3183 xpp.red = true; 3184 root = rotateLeft(root, xpp); 3185 } 3186 } 3187 } 3188 } 3189 } 3190 } 3191 3192 static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root, 3193 TreeNode<K,V> x) { 3194 for (TreeNode<K,V> xp, xpl, xpr;;) { 3195 if (x == null || x == root) 3196 return root; 3197 else if ((xp = x.parent) == null) { 3198 x.red = false; 3199 return x; 3200 } 3201 else if (x.red) { 3202 x.red = false; 3203 return root; 3204 } 3205 else if ((xpl = xp.left) == x) { 3206 if ((xpr = xp.right) != null && xpr.red) { 3207 xpr.red = false; 3208 xp.red = true; 3209 root = rotateLeft(root, xp); 3210 xpr = (xp = x.parent) == null ? null : xp.right; 3211 } 3212 if (xpr == null) 3213 x = xp; 3214 else { 3215 TreeNode<K,V> sl = xpr.left, sr = xpr.right; 3216 if ((sr == null || !sr.red) && 3217 (sl == null || !sl.red)) { 3218 xpr.red = true; 3219 x = xp; 3220 } 3221 else { 3222 if (sr == null || !sr.red) { 3223 if (sl != null) 3224 sl.red = false; 3225 xpr.red = true; 3226 root = rotateRight(root, xpr); 3227 xpr = (xp = x.parent) == null ? 3228 null : xp.right; 3229 } 3230 if (xpr != null) { 3231 xpr.red = (xp == null) ? false : xp.red; 3232 if ((sr = xpr.right) != null) 3233 sr.red = false; 3234 } 3235 if (xp != null) { 3236 xp.red = false; 3237 root = rotateLeft(root, xp); 3238 } 3239 x = root; 3240 } 3241 } 3242 } 3243 else { // symmetric 3244 if (xpl != null && xpl.red) { 3245 xpl.red = false; 3246 xp.red = true; 3247 root = rotateRight(root, xp); 3248 xpl = (xp = x.parent) == null ? null : xp.left; 3249 } 3250 if (xpl == null) 3251 x = xp; 3252 else { 3253 TreeNode<K,V> sl = xpl.left, sr = xpl.right; 3254 if ((sl == null || !sl.red) && 3255 (sr == null || !sr.red)) { 3256 xpl.red = true; 3257 x = xp; 3258 } 3259 else { 3260 if (sl == null || !sl.red) { 3261 if (sr != null) 3262 sr.red = false; 3263 xpl.red = true; 3264 root = rotateLeft(root, xpl); 3265 xpl = (xp = x.parent) == null ? 3266 null : xp.left; 3267 } 3268 if (xpl != null) { 3269 xpl.red = (xp == null) ? false : xp.red; 3270 if ((sl = xpl.left) != null) 3271 sl.red = false; 3272 } 3273 if (xp != null) { 3274 xp.red = false; 3275 root = rotateRight(root, xp); 3276 } 3277 x = root; 3278 } 3279 } 3280 } 3281 } 3282 } 3283 3284 /** 3285 * Checks invariants recursively for the tree of Nodes rooted at t. 3286 */ 3287 static <K,V> boolean checkInvariants(TreeNode<K,V> t) { 3288 TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right, 3289 tb = t.prev, tn = (TreeNode<K,V>)t.next; 3290 if (tb != null && tb.next != t) 3291 return false; 3292 if (tn != null && tn.prev != t) 3293 return false; 3294 if (tp != null && t != tp.left && t != tp.right) 3295 return false; 3296 if (tl != null && (tl.parent != t || tl.hash > t.hash)) 3297 return false; 3298 if (tr != null && (tr.parent != t || tr.hash < t.hash)) 3299 return false; 3300 if (t.red && tl != null && tl.red && tr != null && tr.red) 3301 return false; 3302 if (tl != null && !checkInvariants(tl)) 3303 return false; 3304 if (tr != null && !checkInvariants(tr)) 3305 return false; 3306 return true; 3307 } 3308 3309 private static final Unsafe U = Unsafe.getUnsafe(); 3310 private static final long LOCKSTATE 3311 = U.objectFieldOffset(TreeBin.class, "lockState"); 3312 } 3313 3314 /* ----------------Table Traversal -------------- */ 3315 3316 /** 3317 * Records the table, its length, and current traversal index for a 3318 * traverser that must process a region of a forwarded table before 3319 * proceeding with current table. 3320 */ 3321 static final class TableStack<K,V> { 3322 int length; 3323 int index; 3324 Node<K,V>[] tab; 3325 TableStack<K,V> next; 3326 } 3327 3328 /** 3329 * Encapsulates traversal for methods such as containsValue; also 3330 * serves as a base class for other iterators and spliterators. 3331 * 3332 * Method advance visits once each still-valid node that was 3333 * reachable upon iterator construction. It might miss some that 3334 * were added to a bin after the bin was visited, which is OK wrt 3335 * consistency guarantees. Maintaining this property in the face 3336 * of possible ongoing resizes requires a fair amount of 3337 * bookkeeping state that is difficult to optimize away amidst 3338 * volatile accesses. Even so, traversal maintains reasonable 3339 * throughput. 3340 * 3341 * Normally, iteration proceeds bin-by-bin traversing lists. 3342 * However, if the table has been resized, then all future steps 3343 * must traverse both the bin at the current index as well as at 3344 * (index + baseSize); and so on for further resizings. To 3345 * paranoically cope with potential sharing by users of iterators 3346 * across threads, iteration terminates if a bounds checks fails 3347 * for a table read. 3348 */ 3349 static class Traverser<K,V> { 3350 Node<K,V>[] tab; // current table; updated if resized 3351 Node<K,V> next; // the next entry to use 3352 TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes 3353 int index; // index of bin to use next 3354 int baseIndex; // current index of initial table 3355 int baseLimit; // index bound for initial table 3356 final int baseSize; // initial table size 3357 3358 Traverser(Node<K,V>[] tab, int size, int index, int limit) { 3359 this.tab = tab; 3360 this.baseSize = size; 3361 this.baseIndex = this.index = index; 3362 this.baseLimit = limit; 3363 this.next = null; 3364 } 3365 3366 /** 3367 * Advances if possible, returning next valid node, or null if none. 3368 */ 3369 final Node<K,V> advance() { 3370 Node<K,V> e; 3371 if ((e = next) != null) 3372 e = e.next; 3373 for (;;) { 3374 Node<K,V>[] t; int i, n; // must use locals in checks 3375 if (e != null) 3376 return next = e; 3377 if (baseIndex >= baseLimit || (t = tab) == null || 3378 (n = t.length) <= (i = index) || i < 0) 3379 return next = null; 3380 if ((e = tabAt(t, i)) != null && e.hash < 0) { 3381 if (e instanceof ForwardingNode) { 3382 tab = ((ForwardingNode<K,V>)e).nextTable; 3383 e = null; 3384 pushState(t, i, n); 3385 continue; 3386 } 3387 else if (e instanceof TreeBin) 3388 e = ((TreeBin<K,V>)e).first; 3389 else 3390 e = null; 3391 } 3392 if (stack != null) 3393 recoverState(n); 3394 else if ((index = i + baseSize) >= n) 3395 index = ++baseIndex; // visit upper slots if present 3396 } 3397 } 3398 3399 /** 3400 * Saves traversal state upon encountering a forwarding node. 3401 */ 3402 private void pushState(Node<K,V>[] t, int i, int n) { 3403 TableStack<K,V> s = spare; // reuse if possible 3404 if (s != null) 3405 spare = s.next; 3406 else 3407 s = new TableStack<K,V>(); 3408 s.tab = t; 3409 s.length = n; 3410 s.index = i; 3411 s.next = stack; 3412 stack = s; 3413 } 3414 3415 /** 3416 * Possibly pops traversal state. 3417 * 3418 * @param n length of current table 3419 */ 3420 private void recoverState(int n) { 3421 TableStack<K,V> s; int len; 3422 while ((s = stack) != null && (index += (len = s.length)) >= n) { 3423 n = len; 3424 index = s.index; 3425 tab = s.tab; 3426 s.tab = null; 3427 TableStack<K,V> next = s.next; 3428 s.next = spare; // save for reuse 3429 stack = next; 3430 spare = s; 3431 } 3432 if (s == null && (index += baseSize) >= n) 3433 index = ++baseIndex; 3434 } 3435 } 3436 3437 /** 3438 * Base of key, value, and entry Iterators. Adds fields to 3439 * Traverser to support iterator.remove. 3440 */ 3441 static class BaseIterator<K,V> extends Traverser<K,V> { 3442 final ConcurrentHashMap<K,V> map; 3443 Node<K,V> lastReturned; 3444 BaseIterator(Node<K,V>[] tab, int size, int index, int limit, 3445 ConcurrentHashMap<K,V> map) { 3446 super(tab, size, index, limit); 3447 this.map = map; 3448 advance(); 3449 } 3450 3451 public final boolean hasNext() { return next != null; } 3452 public final boolean hasMoreElements() { return next != null; } 3453 3454 public final void remove() { 3455 Node<K,V> p; 3456 if ((p = lastReturned) == null) 3457 throw new IllegalStateException(); 3458 lastReturned = null; 3459 map.replaceNode(p.key, null, null); 3460 } 3461 } 3462 3463 static final class KeyIterator<K,V> extends BaseIterator<K,V> 3464 implements Iterator<K>, Enumeration<K> { 3465 KeyIterator(Node<K,V>[] tab, int size, int index, int limit, 3466 ConcurrentHashMap<K,V> map) { 3467 super(tab, size, index, limit, map); 3468 } 3469 3470 public final K next() { 3471 Node<K,V> p; 3472 if ((p = next) == null) 3473 throw new NoSuchElementException(); 3474 K k = p.key; 3475 lastReturned = p; 3476 advance(); 3477 return k; 3478 } 3479 3480 public final K nextElement() { return next(); } 3481 } 3482 3483 static final class ValueIterator<K,V> extends BaseIterator<K,V> 3484 implements Iterator<V>, Enumeration<V> { 3485 ValueIterator(Node<K,V>[] tab, int size, int index, int limit, 3486 ConcurrentHashMap<K,V> map) { 3487 super(tab, size, index, limit, map); 3488 } 3489 3490 public final V next() { 3491 Node<K,V> p; 3492 if ((p = next) == null) 3493 throw new NoSuchElementException(); 3494 V v = p.val; 3495 lastReturned = p; 3496 advance(); 3497 return v; 3498 } 3499 3500 public final V nextElement() { return next(); } 3501 } 3502 3503 static final class EntryIterator<K,V> extends BaseIterator<K,V> 3504 implements Iterator<Map.Entry<K,V>> { 3505 EntryIterator(Node<K,V>[] tab, int size, int index, int limit, 3506 ConcurrentHashMap<K,V> map) { 3507 super(tab, size, index, limit, map); 3508 } 3509 3510 public final Map.Entry<K,V> next() { 3511 Node<K,V> p; 3512 if ((p = next) == null) 3513 throw new NoSuchElementException(); 3514 K k = p.key; 3515 V v = p.val; 3516 lastReturned = p; 3517 advance(); 3518 return new MapEntry<K,V>(k, v, map); 3519 } 3520 } 3521 3522 /** 3523 * Exported Entry for EntryIterator. 3524 */ 3525 static final class MapEntry<K,V> implements Map.Entry<K,V> { 3526 final K key; // non-null 3527 V val; // non-null 3528 final ConcurrentHashMap<K,V> map; 3529 MapEntry(K key, V val, ConcurrentHashMap<K,V> map) { 3530 this.key = key; 3531 this.val = val; 3532 this.map = map; 3533 } 3534 public K getKey() { return key; } 3535 public V getValue() { return val; } 3536 public int hashCode() { return key.hashCode() ^ val.hashCode(); } 3537 public String toString() { 3538 return Helpers.mapEntryToString(key, val); 3539 } 3540 3541 public boolean equals(Object o) { 3542 Object k, v; Map.Entry<?,?> e; 3543 return ((o instanceof Map.Entry) && 3544 (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 3545 (v = e.getValue()) != null && 3546 (k == key || k.equals(key)) && 3547 (v == val || v.equals(val))); 3548 } 3549 3550 /** 3551 * Sets our entry's value and writes through to the map. The 3552 * value to return is somewhat arbitrary here. Since we do not 3553 * necessarily track asynchronous changes, the most recent 3554 * "previous" value could be different from what we return (or 3555 * could even have been removed, in which case the put will 3556 * re-establish). We do not and cannot guarantee more. 3557 */ 3558 public V setValue(V value) { 3559 if (value == null) throw new NullPointerException(); 3560 V v = val; 3561 val = value; 3562 map.put(key, value); 3563 return v; 3564 } 3565 } 3566 3567 static final class KeySpliterator<K,V> extends Traverser<K,V> 3568 implements Spliterator<K> { 3569 long est; // size estimate 3570 KeySpliterator(Node<K,V>[] tab, int size, int index, int limit, 3571 long est) { 3572 super(tab, size, index, limit); 3573 this.est = est; 3574 } 3575 3576 public KeySpliterator<K,V> trySplit() { 3577 int i, f, h; 3578 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : 3579 new KeySpliterator<K,V>(tab, baseSize, baseLimit = h, 3580 f, est >>>= 1); 3581 } 3582 3583 public void forEachRemaining(Consumer<? super K> action) { 3584 if (action == null) throw new NullPointerException(); 3585 for (Node<K,V> p; (p = advance()) != null;) 3586 action.accept(p.key); 3587 } 3588 3589 public boolean tryAdvance(Consumer<? super K> action) { 3590 if (action == null) throw new NullPointerException(); 3591 Node<K,V> p; 3592 if ((p = advance()) == null) 3593 return false; 3594 action.accept(p.key); 3595 return true; 3596 } 3597 3598 public long estimateSize() { return est; } 3599 3600 public int characteristics() { 3601 return Spliterator.DISTINCT | Spliterator.CONCURRENT | 3602 Spliterator.NONNULL; 3603 } 3604 } 3605 3606 static final class ValueSpliterator<K,V> extends Traverser<K,V> 3607 implements Spliterator<V> { 3608 long est; // size estimate 3609 ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit, 3610 long est) { 3611 super(tab, size, index, limit); 3612 this.est = est; 3613 } 3614 3615 public ValueSpliterator<K,V> trySplit() { 3616 int i, f, h; 3617 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : 3618 new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h, 3619 f, est >>>= 1); 3620 } 3621 3622 public void forEachRemaining(Consumer<? super V> action) { 3623 if (action == null) throw new NullPointerException(); 3624 for (Node<K,V> p; (p = advance()) != null;) 3625 action.accept(p.val); 3626 } 3627 3628 public boolean tryAdvance(Consumer<? super V> action) { 3629 if (action == null) throw new NullPointerException(); 3630 Node<K,V> p; 3631 if ((p = advance()) == null) 3632 return false; 3633 action.accept(p.val); 3634 return true; 3635 } 3636 3637 public long estimateSize() { return est; } 3638 3639 public int characteristics() { 3640 return Spliterator.CONCURRENT | Spliterator.NONNULL; 3641 } 3642 } 3643 3644 static final class EntrySpliterator<K,V> extends Traverser<K,V> 3645 implements Spliterator<Map.Entry<K,V>> { 3646 final ConcurrentHashMap<K,V> map; // To export MapEntry 3647 long est; // size estimate 3648 EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit, 3649 long est, ConcurrentHashMap<K,V> map) { 3650 super(tab, size, index, limit); 3651 this.map = map; 3652 this.est = est; 3653 } 3654 3655 public EntrySpliterator<K,V> trySplit() { 3656 int i, f, h; 3657 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : 3658 new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h, 3659 f, est >>>= 1, map); 3660 } 3661 3662 public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) { 3663 if (action == null) throw new NullPointerException(); 3664 for (Node<K,V> p; (p = advance()) != null; ) 3665 action.accept(new MapEntry<K,V>(p.key, p.val, map)); 3666 } 3667 3668 public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) { 3669 if (action == null) throw new NullPointerException(); 3670 Node<K,V> p; 3671 if ((p = advance()) == null) 3672 return false; 3673 action.accept(new MapEntry<K,V>(p.key, p.val, map)); 3674 return true; 3675 } 3676 3677 public long estimateSize() { return est; } 3678 3679 public int characteristics() { 3680 return Spliterator.DISTINCT | Spliterator.CONCURRENT | 3681 Spliterator.NONNULL; 3682 } 3683 } 3684 3685 // Parallel bulk operations 3686 3687 /** 3688 * Computes initial batch value for bulk tasks. The returned value 3689 * is approximately exp2 of the number of times (minus one) to 3690 * split task by two before executing leaf action. This value is 3691 * faster to compute and more convenient to use as a guide to 3692 * splitting than is the depth, since it is used while dividing by 3693 * two anyway. 3694 */ 3695 final int batchFor(long b) { 3696 long n; 3697 if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b) 3698 return 0; 3699 int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4 3700 return (b <= 0L || (n /= b) >= sp) ? sp : (int)n; 3701 } 3702 3703 /** 3704 * Performs the given action for each (key, value). 3705 * 3706 * @param parallelismThreshold the (estimated) number of elements 3707 * needed for this operation to be executed in parallel 3708 * @param action the action 3709 * @since 1.8 3710 */ 3711 public void forEach(long parallelismThreshold, 3712 BiConsumer<? super K,? super V> action) { 3713 if (action == null) throw new NullPointerException(); 3714 new ForEachMappingTask<K,V> 3715 (null, batchFor(parallelismThreshold), 0, 0, table, 3716 action).invoke(); 3717 } 3718 3719 /** 3720 * Performs the given action for each non-null transformation 3721 * of each (key, value). 3722 * 3723 * @param parallelismThreshold the (estimated) number of elements 3724 * needed for this operation to be executed in parallel 3725 * @param transformer a function returning the transformation 3726 * for an element, or null if there is no transformation (in 3727 * which case the action is not applied) 3728 * @param action the action 3729 * @param <U> the return type of the transformer 3730 * @since 1.8 3731 */ 3732 public <U> void forEach(long parallelismThreshold, 3733 BiFunction<? super K, ? super V, ? extends U> transformer, 3734 Consumer<? super U> action) { 3735 if (transformer == null || action == null) 3736 throw new NullPointerException(); 3737 new ForEachTransformedMappingTask<K,V,U> 3738 (null, batchFor(parallelismThreshold), 0, 0, table, 3739 transformer, action).invoke(); 3740 } 3741 3742 /** 3743 * Returns a non-null result from applying the given search 3744 * function on each (key, value), or null if none. Upon 3745 * success, further element processing is suppressed and the 3746 * results of any other parallel invocations of the search 3747 * function are ignored. 3748 * 3749 * @param parallelismThreshold the (estimated) number of elements 3750 * needed for this operation to be executed in parallel 3751 * @param searchFunction a function returning a non-null 3752 * result on success, else null 3753 * @param <U> the return type of the search function 3754 * @return a non-null result from applying the given search 3755 * function on each (key, value), or null if none 3756 * @since 1.8 3757 */ 3758 public <U> U search(long parallelismThreshold, 3759 BiFunction<? super K, ? super V, ? extends U> searchFunction) { 3760 if (searchFunction == null) throw new NullPointerException(); 3761 return new SearchMappingsTask<K,V,U> 3762 (null, batchFor(parallelismThreshold), 0, 0, table, 3763 searchFunction, new AtomicReference<U>()).invoke(); 3764 } 3765 3766 /** 3767 * Returns the result of accumulating the given transformation 3768 * of all (key, value) pairs using the given reducer to 3769 * combine values, or null if none. 3770 * 3771 * @param parallelismThreshold the (estimated) number of elements 3772 * needed for this operation to be executed in parallel 3773 * @param transformer a function returning the transformation 3774 * for an element, or null if there is no transformation (in 3775 * which case it is not combined) 3776 * @param reducer a commutative associative combining function 3777 * @param <U> the return type of the transformer 3778 * @return the result of accumulating the given transformation 3779 * of all (key, value) pairs 3780 * @since 1.8 3781 */ 3782 public <U> U reduce(long parallelismThreshold, 3783 BiFunction<? super K, ? super V, ? extends U> transformer, 3784 BiFunction<? super U, ? super U, ? extends U> reducer) { 3785 if (transformer == null || reducer == null) 3786 throw new NullPointerException(); 3787 return new MapReduceMappingsTask<K,V,U> 3788 (null, batchFor(parallelismThreshold), 0, 0, table, 3789 null, transformer, reducer).invoke(); 3790 } 3791 3792 /** 3793 * Returns the result of accumulating the given transformation 3794 * of all (key, value) pairs using the given reducer to 3795 * combine values, and the given basis as an identity value. 3796 * 3797 * @param parallelismThreshold the (estimated) number of elements 3798 * needed for this operation to be executed in parallel 3799 * @param transformer a function returning the transformation 3800 * for an element 3801 * @param basis the identity (initial default value) for the reduction 3802 * @param reducer a commutative associative combining function 3803 * @return the result of accumulating the given transformation 3804 * of all (key, value) pairs 3805 * @since 1.8 3806 */ 3807 public double reduceToDouble(long parallelismThreshold, 3808 ToDoubleBiFunction<? super K, ? super V> transformer, 3809 double basis, 3810 DoubleBinaryOperator reducer) { 3811 if (transformer == null || reducer == null) 3812 throw new NullPointerException(); 3813 return new MapReduceMappingsToDoubleTask<K,V> 3814 (null, batchFor(parallelismThreshold), 0, 0, table, 3815 null, transformer, basis, reducer).invoke(); 3816 } 3817 3818 /** 3819 * Returns the result of accumulating the given transformation 3820 * of all (key, value) pairs using the given reducer to 3821 * combine values, and the given basis as an identity value. 3822 * 3823 * @param parallelismThreshold the (estimated) number of elements 3824 * needed for this operation to be executed in parallel 3825 * @param transformer a function returning the transformation 3826 * for an element 3827 * @param basis the identity (initial default value) for the reduction 3828 * @param reducer a commutative associative combining function 3829 * @return the result of accumulating the given transformation 3830 * of all (key, value) pairs 3831 * @since 1.8 3832 */ 3833 public long reduceToLong(long parallelismThreshold, 3834 ToLongBiFunction<? super K, ? super V> transformer, 3835 long basis, 3836 LongBinaryOperator reducer) { 3837 if (transformer == null || reducer == null) 3838 throw new NullPointerException(); 3839 return new MapReduceMappingsToLongTask<K,V> 3840 (null, batchFor(parallelismThreshold), 0, 0, table, 3841 null, transformer, basis, reducer).invoke(); 3842 } 3843 3844 /** 3845 * Returns the result of accumulating the given transformation 3846 * of all (key, value) pairs using the given reducer to 3847 * combine values, and the given basis as an identity value. 3848 * 3849 * @param parallelismThreshold the (estimated) number of elements 3850 * needed for this operation to be executed in parallel 3851 * @param transformer a function returning the transformation 3852 * for an element 3853 * @param basis the identity (initial default value) for the reduction 3854 * @param reducer a commutative associative combining function 3855 * @return the result of accumulating the given transformation 3856 * of all (key, value) pairs 3857 * @since 1.8 3858 */ 3859 public int reduceToInt(long parallelismThreshold, 3860 ToIntBiFunction<? super K, ? super V> transformer, 3861 int basis, 3862 IntBinaryOperator reducer) { 3863 if (transformer == null || reducer == null) 3864 throw new NullPointerException(); 3865 return new MapReduceMappingsToIntTask<K,V> 3866 (null, batchFor(parallelismThreshold), 0, 0, table, 3867 null, transformer, basis, reducer).invoke(); 3868 } 3869 3870 /** 3871 * Performs the given action for each key. 3872 * 3873 * @param parallelismThreshold the (estimated) number of elements 3874 * needed for this operation to be executed in parallel 3875 * @param action the action 3876 * @since 1.8 3877 */ 3878 public void forEachKey(long parallelismThreshold, 3879 Consumer<? super K> action) { 3880 if (action == null) throw new NullPointerException(); 3881 new ForEachKeyTask<K,V> 3882 (null, batchFor(parallelismThreshold), 0, 0, table, 3883 action).invoke(); 3884 } 3885 3886 /** 3887 * Performs the given action for each non-null transformation 3888 * of each key. 3889 * 3890 * @param parallelismThreshold the (estimated) number of elements 3891 * needed for this operation to be executed in parallel 3892 * @param transformer a function returning the transformation 3893 * for an element, or null if there is no transformation (in 3894 * which case the action is not applied) 3895 * @param action the action 3896 * @param <U> the return type of the transformer 3897 * @since 1.8 3898 */ 3899 public <U> void forEachKey(long parallelismThreshold, 3900 Function<? super K, ? extends U> transformer, 3901 Consumer<? super U> action) { 3902 if (transformer == null || action == null) 3903 throw new NullPointerException(); 3904 new ForEachTransformedKeyTask<K,V,U> 3905 (null, batchFor(parallelismThreshold), 0, 0, table, 3906 transformer, action).invoke(); 3907 } 3908 3909 /** 3910 * Returns a non-null result from applying the given search 3911 * function on each key, or null if none. Upon success, 3912 * further element processing is suppressed and the results of 3913 * any other parallel invocations of the search function are 3914 * ignored. 3915 * 3916 * @param parallelismThreshold the (estimated) number of elements 3917 * needed for this operation to be executed in parallel 3918 * @param searchFunction a function returning a non-null 3919 * result on success, else null 3920 * @param <U> the return type of the search function 3921 * @return a non-null result from applying the given search 3922 * function on each key, or null if none 3923 * @since 1.8 3924 */ 3925 public <U> U searchKeys(long parallelismThreshold, 3926 Function<? super K, ? extends U> searchFunction) { 3927 if (searchFunction == null) throw new NullPointerException(); 3928 return new SearchKeysTask<K,V,U> 3929 (null, batchFor(parallelismThreshold), 0, 0, table, 3930 searchFunction, new AtomicReference<U>()).invoke(); 3931 } 3932 3933 /** 3934 * Returns the result of accumulating all keys using the given 3935 * reducer to combine values, or null if none. 3936 * 3937 * @param parallelismThreshold the (estimated) number of elements 3938 * needed for this operation to be executed in parallel 3939 * @param reducer a commutative associative combining function 3940 * @return the result of accumulating all keys using the given 3941 * reducer to combine values, or null if none 3942 * @since 1.8 3943 */ 3944 public K reduceKeys(long parallelismThreshold, 3945 BiFunction<? super K, ? super K, ? extends K> reducer) { 3946 if (reducer == null) throw new NullPointerException(); 3947 return new ReduceKeysTask<K,V> 3948 (null, batchFor(parallelismThreshold), 0, 0, table, 3949 null, reducer).invoke(); 3950 } 3951 3952 /** 3953 * Returns the result of accumulating the given transformation 3954 * of all keys using the given reducer to combine values, or 3955 * null if none. 3956 * 3957 * @param parallelismThreshold the (estimated) number of elements 3958 * needed for this operation to be executed in parallel 3959 * @param transformer a function returning the transformation 3960 * for an element, or null if there is no transformation (in 3961 * which case it is not combined) 3962 * @param reducer a commutative associative combining function 3963 * @param <U> the return type of the transformer 3964 * @return the result of accumulating the given transformation 3965 * of all keys 3966 * @since 1.8 3967 */ 3968 public <U> U reduceKeys(long parallelismThreshold, 3969 Function<? super K, ? extends U> transformer, 3970 BiFunction<? super U, ? super U, ? extends U> reducer) { 3971 if (transformer == null || reducer == null) 3972 throw new NullPointerException(); 3973 return new MapReduceKeysTask<K,V,U> 3974 (null, batchFor(parallelismThreshold), 0, 0, table, 3975 null, transformer, reducer).invoke(); 3976 } 3977 3978 /** 3979 * Returns the result of accumulating the given transformation 3980 * of all keys using the given reducer to combine values, and 3981 * the given basis as an identity value. 3982 * 3983 * @param parallelismThreshold the (estimated) number of elements 3984 * needed for this operation to be executed in parallel 3985 * @param transformer a function returning the transformation 3986 * for an element 3987 * @param basis the identity (initial default value) for the reduction 3988 * @param reducer a commutative associative combining function 3989 * @return the result of accumulating the given transformation 3990 * of all keys 3991 * @since 1.8 3992 */ 3993 public double reduceKeysToDouble(long parallelismThreshold, 3994 ToDoubleFunction<? super K> transformer, 3995 double basis, 3996 DoubleBinaryOperator reducer) { 3997 if (transformer == null || reducer == null) 3998 throw new NullPointerException(); 3999 return new MapReduceKeysToDoubleTask<K,V> 4000 (null, batchFor(parallelismThreshold), 0, 0, table, 4001 null, transformer, basis, reducer).invoke(); 4002 } 4003 4004 /** 4005 * Returns the result of accumulating the given transformation 4006 * of all keys using the given reducer to combine values, and 4007 * the given basis as an identity value. 4008 * 4009 * @param parallelismThreshold the (estimated) number of elements 4010 * needed for this operation to be executed in parallel 4011 * @param transformer a function returning the transformation 4012 * for an element 4013 * @param basis the identity (initial default value) for the reduction 4014 * @param reducer a commutative associative combining function 4015 * @return the result of accumulating the given transformation 4016 * of all keys 4017 * @since 1.8 4018 */ 4019 public long reduceKeysToLong(long parallelismThreshold, 4020 ToLongFunction<? super K> transformer, 4021 long basis, 4022 LongBinaryOperator reducer) { 4023 if (transformer == null || reducer == null) 4024 throw new NullPointerException(); 4025 return new MapReduceKeysToLongTask<K,V> 4026 (null, batchFor(parallelismThreshold), 0, 0, table, 4027 null, transformer, basis, reducer).invoke(); 4028 } 4029 4030 /** 4031 * Returns the result of accumulating the given transformation 4032 * of all keys using the given reducer to combine values, and 4033 * the given basis as an identity value. 4034 * 4035 * @param parallelismThreshold the (estimated) number of elements 4036 * needed for this operation to be executed in parallel 4037 * @param transformer a function returning the transformation 4038 * for an element 4039 * @param basis the identity (initial default value) for the reduction 4040 * @param reducer a commutative associative combining function 4041 * @return the result of accumulating the given transformation 4042 * of all keys 4043 * @since 1.8 4044 */ 4045 public int reduceKeysToInt(long parallelismThreshold, 4046 ToIntFunction<? super K> transformer, 4047 int basis, 4048 IntBinaryOperator reducer) { 4049 if (transformer == null || reducer == null) 4050 throw new NullPointerException(); 4051 return new MapReduceKeysToIntTask<K,V> 4052 (null, batchFor(parallelismThreshold), 0, 0, table, 4053 null, transformer, basis, reducer).invoke(); 4054 } 4055 4056 /** 4057 * Performs the given action for each value. 4058 * 4059 * @param parallelismThreshold the (estimated) number of elements 4060 * needed for this operation to be executed in parallel 4061 * @param action the action 4062 * @since 1.8 4063 */ 4064 public void forEachValue(long parallelismThreshold, 4065 Consumer<? super V> action) { 4066 if (action == null) 4067 throw new NullPointerException(); 4068 new ForEachValueTask<K,V> 4069 (null, batchFor(parallelismThreshold), 0, 0, table, 4070 action).invoke(); 4071 } 4072 4073 /** 4074 * Performs the given action for each non-null transformation 4075 * of each value. 4076 * 4077 * @param parallelismThreshold the (estimated) number of elements 4078 * needed for this operation to be executed in parallel 4079 * @param transformer a function returning the transformation 4080 * for an element, or null if there is no transformation (in 4081 * which case the action is not applied) 4082 * @param action the action 4083 * @param <U> the return type of the transformer 4084 * @since 1.8 4085 */ 4086 public <U> void forEachValue(long parallelismThreshold, 4087 Function<? super V, ? extends U> transformer, 4088 Consumer<? super U> action) { 4089 if (transformer == null || action == null) 4090 throw new NullPointerException(); 4091 new ForEachTransformedValueTask<K,V,U> 4092 (null, batchFor(parallelismThreshold), 0, 0, table, 4093 transformer, action).invoke(); 4094 } 4095 4096 /** 4097 * Returns a non-null result from applying the given search 4098 * function on each value, or null if none. Upon success, 4099 * further element processing is suppressed and the results of 4100 * any other parallel invocations of the search function are 4101 * ignored. 4102 * 4103 * @param parallelismThreshold the (estimated) number of elements 4104 * needed for this operation to be executed in parallel 4105 * @param searchFunction a function returning a non-null 4106 * result on success, else null 4107 * @param <U> the return type of the search function 4108 * @return a non-null result from applying the given search 4109 * function on each value, or null if none 4110 * @since 1.8 4111 */ 4112 public <U> U searchValues(long parallelismThreshold, 4113 Function<? super V, ? extends U> searchFunction) { 4114 if (searchFunction == null) throw new NullPointerException(); 4115 return new SearchValuesTask<K,V,U> 4116 (null, batchFor(parallelismThreshold), 0, 0, table, 4117 searchFunction, new AtomicReference<U>()).invoke(); 4118 } 4119 4120 /** 4121 * Returns the result of accumulating all values using the 4122 * given reducer to combine values, or null if none. 4123 * 4124 * @param parallelismThreshold the (estimated) number of elements 4125 * needed for this operation to be executed in parallel 4126 * @param reducer a commutative associative combining function 4127 * @return the result of accumulating all values 4128 * @since 1.8 4129 */ 4130 public V reduceValues(long parallelismThreshold, 4131 BiFunction<? super V, ? super V, ? extends V> reducer) { 4132 if (reducer == null) throw new NullPointerException(); 4133 return new ReduceValuesTask<K,V> 4134 (null, batchFor(parallelismThreshold), 0, 0, table, 4135 null, reducer).invoke(); 4136 } 4137 4138 /** 4139 * Returns the result of accumulating the given transformation 4140 * of all values using the given reducer to combine values, or 4141 * null if none. 4142 * 4143 * @param parallelismThreshold the (estimated) number of elements 4144 * needed for this operation to be executed in parallel 4145 * @param transformer a function returning the transformation 4146 * for an element, or null if there is no transformation (in 4147 * which case it is not combined) 4148 * @param reducer a commutative associative combining function 4149 * @param <U> the return type of the transformer 4150 * @return the result of accumulating the given transformation 4151 * of all values 4152 * @since 1.8 4153 */ 4154 public <U> U reduceValues(long parallelismThreshold, 4155 Function<? super V, ? extends U> transformer, 4156 BiFunction<? super U, ? super U, ? extends U> reducer) { 4157 if (transformer == null || reducer == null) 4158 throw new NullPointerException(); 4159 return new MapReduceValuesTask<K,V,U> 4160 (null, batchFor(parallelismThreshold), 0, 0, table, 4161 null, transformer, reducer).invoke(); 4162 } 4163 4164 /** 4165 * Returns the result of accumulating the given transformation 4166 * of all values using the given reducer to combine values, 4167 * and the given basis as an identity value. 4168 * 4169 * @param parallelismThreshold the (estimated) number of elements 4170 * needed for this operation to be executed in parallel 4171 * @param transformer a function returning the transformation 4172 * for an element 4173 * @param basis the identity (initial default value) for the reduction 4174 * @param reducer a commutative associative combining function 4175 * @return the result of accumulating the given transformation 4176 * of all values 4177 * @since 1.8 4178 */ 4179 public double reduceValuesToDouble(long parallelismThreshold, 4180 ToDoubleFunction<? super V> transformer, 4181 double basis, 4182 DoubleBinaryOperator reducer) { 4183 if (transformer == null || reducer == null) 4184 throw new NullPointerException(); 4185 return new MapReduceValuesToDoubleTask<K,V> 4186 (null, batchFor(parallelismThreshold), 0, 0, table, 4187 null, transformer, basis, reducer).invoke(); 4188 } 4189 4190 /** 4191 * Returns the result of accumulating the given transformation 4192 * of all values using the given reducer to combine values, 4193 * and the given basis as an identity value. 4194 * 4195 * @param parallelismThreshold the (estimated) number of elements 4196 * needed for this operation to be executed in parallel 4197 * @param transformer a function returning the transformation 4198 * for an element 4199 * @param basis the identity (initial default value) for the reduction 4200 * @param reducer a commutative associative combining function 4201 * @return the result of accumulating the given transformation 4202 * of all values 4203 * @since 1.8 4204 */ 4205 public long reduceValuesToLong(long parallelismThreshold, 4206 ToLongFunction<? super V> transformer, 4207 long basis, 4208 LongBinaryOperator reducer) { 4209 if (transformer == null || reducer == null) 4210 throw new NullPointerException(); 4211 return new MapReduceValuesToLongTask<K,V> 4212 (null, batchFor(parallelismThreshold), 0, 0, table, 4213 null, transformer, basis, reducer).invoke(); 4214 } 4215 4216 /** 4217 * Returns the result of accumulating the given transformation 4218 * of all values using the given reducer to combine values, 4219 * and the given basis as an identity value. 4220 * 4221 * @param parallelismThreshold the (estimated) number of elements 4222 * needed for this operation to be executed in parallel 4223 * @param transformer a function returning the transformation 4224 * for an element 4225 * @param basis the identity (initial default value) for the reduction 4226 * @param reducer a commutative associative combining function 4227 * @return the result of accumulating the given transformation 4228 * of all values 4229 * @since 1.8 4230 */ 4231 public int reduceValuesToInt(long parallelismThreshold, 4232 ToIntFunction<? super V> transformer, 4233 int basis, 4234 IntBinaryOperator reducer) { 4235 if (transformer == null || reducer == null) 4236 throw new NullPointerException(); 4237 return new MapReduceValuesToIntTask<K,V> 4238 (null, batchFor(parallelismThreshold), 0, 0, table, 4239 null, transformer, basis, reducer).invoke(); 4240 } 4241 4242 /** 4243 * Performs the given action for each entry. 4244 * 4245 * @param parallelismThreshold the (estimated) number of elements 4246 * needed for this operation to be executed in parallel 4247 * @param action the action 4248 * @since 1.8 4249 */ 4250 public void forEachEntry(long parallelismThreshold, 4251 Consumer<? super Map.Entry<K,V>> action) { 4252 if (action == null) throw new NullPointerException(); 4253 new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table, 4254 action).invoke(); 4255 } 4256 4257 /** 4258 * Performs the given action for each non-null transformation 4259 * of each entry. 4260 * 4261 * @param parallelismThreshold the (estimated) number of elements 4262 * needed for this operation to be executed in parallel 4263 * @param transformer a function returning the transformation 4264 * for an element, or null if there is no transformation (in 4265 * which case the action is not applied) 4266 * @param action the action 4267 * @param <U> the return type of the transformer 4268 * @since 1.8 4269 */ 4270 public <U> void forEachEntry(long parallelismThreshold, 4271 Function<Map.Entry<K,V>, ? extends U> transformer, 4272 Consumer<? super U> action) { 4273 if (transformer == null || action == null) 4274 throw new NullPointerException(); 4275 new ForEachTransformedEntryTask<K,V,U> 4276 (null, batchFor(parallelismThreshold), 0, 0, table, 4277 transformer, action).invoke(); 4278 } 4279 4280 /** 4281 * Returns a non-null result from applying the given search 4282 * function on each entry, or null if none. Upon success, 4283 * further element processing is suppressed and the results of 4284 * any other parallel invocations of the search function are 4285 * ignored. 4286 * 4287 * @param parallelismThreshold the (estimated) number of elements 4288 * needed for this operation to be executed in parallel 4289 * @param searchFunction a function returning a non-null 4290 * result on success, else null 4291 * @param <U> the return type of the search function 4292 * @return a non-null result from applying the given search 4293 * function on each entry, or null if none 4294 * @since 1.8 4295 */ 4296 public <U> U searchEntries(long parallelismThreshold, 4297 Function<Map.Entry<K,V>, ? extends U> searchFunction) { 4298 if (searchFunction == null) throw new NullPointerException(); 4299 return new SearchEntriesTask<K,V,U> 4300 (null, batchFor(parallelismThreshold), 0, 0, table, 4301 searchFunction, new AtomicReference<U>()).invoke(); 4302 } 4303 4304 /** 4305 * Returns the result of accumulating all entries using the 4306 * given reducer to combine values, or null if none. 4307 * 4308 * @param parallelismThreshold the (estimated) number of elements 4309 * needed for this operation to be executed in parallel 4310 * @param reducer a commutative associative combining function 4311 * @return the result of accumulating all entries 4312 * @since 1.8 4313 */ 4314 public Map.Entry<K,V> reduceEntries(long parallelismThreshold, 4315 BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) { 4316 if (reducer == null) throw new NullPointerException(); 4317 return new ReduceEntriesTask<K,V> 4318 (null, batchFor(parallelismThreshold), 0, 0, table, 4319 null, reducer).invoke(); 4320 } 4321 4322 /** 4323 * Returns the result of accumulating the given transformation 4324 * of all entries using the given reducer to combine values, 4325 * or null if none. 4326 * 4327 * @param parallelismThreshold the (estimated) number of elements 4328 * needed for this operation to be executed in parallel 4329 * @param transformer a function returning the transformation 4330 * for an element, or null if there is no transformation (in 4331 * which case it is not combined) 4332 * @param reducer a commutative associative combining function 4333 * @param <U> the return type of the transformer 4334 * @return the result of accumulating the given transformation 4335 * of all entries 4336 * @since 1.8 4337 */ 4338 public <U> U reduceEntries(long parallelismThreshold, 4339 Function<Map.Entry<K,V>, ? extends U> transformer, 4340 BiFunction<? super U, ? super U, ? extends U> reducer) { 4341 if (transformer == null || reducer == null) 4342 throw new NullPointerException(); 4343 return new MapReduceEntriesTask<K,V,U> 4344 (null, batchFor(parallelismThreshold), 0, 0, table, 4345 null, transformer, reducer).invoke(); 4346 } 4347 4348 /** 4349 * Returns the result of accumulating the given transformation 4350 * of all entries using the given reducer to combine values, 4351 * and the given basis as an identity value. 4352 * 4353 * @param parallelismThreshold the (estimated) number of elements 4354 * needed for this operation to be executed in parallel 4355 * @param transformer a function returning the transformation 4356 * for an element 4357 * @param basis the identity (initial default value) for the reduction 4358 * @param reducer a commutative associative combining function 4359 * @return the result of accumulating the given transformation 4360 * of all entries 4361 * @since 1.8 4362 */ 4363 public double reduceEntriesToDouble(long parallelismThreshold, 4364 ToDoubleFunction<Map.Entry<K,V>> transformer, 4365 double basis, 4366 DoubleBinaryOperator reducer) { 4367 if (transformer == null || reducer == null) 4368 throw new NullPointerException(); 4369 return new MapReduceEntriesToDoubleTask<K,V> 4370 (null, batchFor(parallelismThreshold), 0, 0, table, 4371 null, transformer, basis, reducer).invoke(); 4372 } 4373 4374 /** 4375 * Returns the result of accumulating the given transformation 4376 * of all entries using the given reducer to combine values, 4377 * and the given basis as an identity value. 4378 * 4379 * @param parallelismThreshold the (estimated) number of elements 4380 * needed for this operation to be executed in parallel 4381 * @param transformer a function returning the transformation 4382 * for an element 4383 * @param basis the identity (initial default value) for the reduction 4384 * @param reducer a commutative associative combining function 4385 * @return the result of accumulating the given transformation 4386 * of all entries 4387 * @since 1.8 4388 */ 4389 public long reduceEntriesToLong(long parallelismThreshold, 4390 ToLongFunction<Map.Entry<K,V>> transformer, 4391 long basis, 4392 LongBinaryOperator reducer) { 4393 if (transformer == null || reducer == null) 4394 throw new NullPointerException(); 4395 return new MapReduceEntriesToLongTask<K,V> 4396 (null, batchFor(parallelismThreshold), 0, 0, table, 4397 null, transformer, basis, reducer).invoke(); 4398 } 4399 4400 /** 4401 * Returns the result of accumulating the given transformation 4402 * of all entries using the given reducer to combine values, 4403 * and the given basis as an identity value. 4404 * 4405 * @param parallelismThreshold the (estimated) number of elements 4406 * needed for this operation to be executed in parallel 4407 * @param transformer a function returning the transformation 4408 * for an element 4409 * @param basis the identity (initial default value) for the reduction 4410 * @param reducer a commutative associative combining function 4411 * @return the result of accumulating the given transformation 4412 * of all entries 4413 * @since 1.8 4414 */ 4415 public int reduceEntriesToInt(long parallelismThreshold, 4416 ToIntFunction<Map.Entry<K,V>> transformer, 4417 int basis, 4418 IntBinaryOperator reducer) { 4419 if (transformer == null || reducer == null) 4420 throw new NullPointerException(); 4421 return new MapReduceEntriesToIntTask<K,V> 4422 (null, batchFor(parallelismThreshold), 0, 0, table, 4423 null, transformer, basis, reducer).invoke(); 4424 } 4425 4426 4427 /* ----------------Views -------------- */ 4428 4429 /** 4430 * Base class for views. 4431 */ 4432 abstract static class CollectionView<K,V,E> 4433 implements Collection<E>, java.io.Serializable { 4434 private static final long serialVersionUID = 7249069246763182397L; 4435 final ConcurrentHashMap<K,V> map; 4436 CollectionView(ConcurrentHashMap<K,V> map) { this.map = map; } 4437 4438 /** 4439 * Returns the map backing this view. 4440 * 4441 * @return the map backing this view 4442 */ 4443 public ConcurrentHashMap<K,V> getMap() { return map; } 4444 4445 /** 4446 * Removes all of the elements from this view, by removing all 4447 * the mappings from the map backing this view. 4448 */ 4449 public final void clear() { map.clear(); } 4450 public final int size() { return map.size(); } 4451 public final boolean isEmpty() { return map.isEmpty(); } 4452 4453 // implementations below rely on concrete classes supplying these 4454 // abstract methods 4455 /** 4456 * Returns an iterator over the elements in this collection. 4457 * 4458 * <p>The returned iterator is 4459 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 4460 * 4461 * @return an iterator over the elements in this collection 4462 */ 4463 public abstract Iterator<E> iterator(); 4464 public abstract boolean contains(Object o); 4465 public abstract boolean remove(Object o); 4466 4467 private static final String OOME_MSG = "Required array size too large"; 4468 4469 public final Object[] toArray() { 4470 long sz = map.mappingCount(); 4471 if (sz > MAX_ARRAY_SIZE) 4472 throw new OutOfMemoryError(OOME_MSG); 4473 int n = (int)sz; 4474 Object[] r = new Object[n]; 4475 int i = 0; 4476 for (E e : this) { 4477 if (i == n) { 4478 if (n >= MAX_ARRAY_SIZE) 4479 throw new OutOfMemoryError(OOME_MSG); 4480 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) 4481 n = MAX_ARRAY_SIZE; 4482 else 4483 n += (n >>> 1) + 1; 4484 r = Arrays.copyOf(r, n); 4485 } 4486 r[i++] = e; 4487 } 4488 return (i == n) ? r : Arrays.copyOf(r, i); 4489 } 4490 4491 @SuppressWarnings("unchecked") 4492 public final <T> T[] toArray(T[] a) { 4493 long sz = map.mappingCount(); 4494 if (sz > MAX_ARRAY_SIZE) 4495 throw new OutOfMemoryError(OOME_MSG); 4496 int m = (int)sz; 4497 T[] r = (a.length >= m) ? a : 4498 (T[])java.lang.reflect.Array 4499 .newInstance(a.getClass().getComponentType(), m); 4500 int n = r.length; 4501 int i = 0; 4502 for (E e : this) { 4503 if (i == n) { 4504 if (n >= MAX_ARRAY_SIZE) 4505 throw new OutOfMemoryError(OOME_MSG); 4506 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) 4507 n = MAX_ARRAY_SIZE; 4508 else 4509 n += (n >>> 1) + 1; 4510 r = Arrays.copyOf(r, n); 4511 } 4512 r[i++] = (T)e; 4513 } 4514 if (a == r && i < n) { 4515 r[i] = null; // null-terminate 4516 return r; 4517 } 4518 return (i == n) ? r : Arrays.copyOf(r, i); 4519 } 4520 4521 /** 4522 * Returns a string representation of this collection. 4523 * The string representation consists of the string representations 4524 * of the collection's elements in the order they are returned by 4525 * its iterator, enclosed in square brackets ({@code "[]"}). 4526 * Adjacent elements are separated by the characters {@code ", "} 4527 * (comma and space). Elements are converted to strings as by 4528 * {@link String#valueOf(Object)}. 4529 * 4530 * @return a string representation of this collection 4531 */ 4532 public final String toString() { 4533 StringBuilder sb = new StringBuilder(); 4534 sb.append('['); 4535 Iterator<E> it = iterator(); 4536 if (it.hasNext()) { 4537 for (;;) { 4538 Object e = it.next(); 4539 sb.append(e == this ? "(this Collection)" : e); 4540 if (!it.hasNext()) 4541 break; 4542 sb.append(',').append(' '); 4543 } 4544 } 4545 return sb.append(']').toString(); 4546 } 4547 4548 public final boolean containsAll(Collection<?> c) { 4549 if (c != this) { 4550 for (Object e : c) { 4551 if (e == null || !contains(e)) 4552 return false; 4553 } 4554 } 4555 return true; 4556 } 4557 4558 public boolean removeAll(Collection<?> c) { 4559 if (c == null) throw new NullPointerException(); 4560 boolean modified = false; 4561 // Use (c instanceof Set) as a hint that lookup in c is as 4562 // efficient as this view 4563 Node<K,V>[] t; 4564 if ((t = map.table) == null) { 4565 return false; 4566 } else if (c instanceof Set<?> && c.size() > t.length) { 4567 for (Iterator<?> it = iterator(); it.hasNext(); ) { 4568 if (c.contains(it.next())) { 4569 it.remove(); 4570 modified = true; 4571 } 4572 } 4573 } else { 4574 for (Object e : c) 4575 modified |= remove(e); 4576 } 4577 return modified; 4578 } 4579 4580 public final boolean retainAll(Collection<?> c) { 4581 if (c == null) throw new NullPointerException(); 4582 boolean modified = false; 4583 for (Iterator<E> it = iterator(); it.hasNext();) { 4584 if (!c.contains(it.next())) { 4585 it.remove(); 4586 modified = true; 4587 } 4588 } 4589 return modified; 4590 } 4591 4592 } 4593 4594 /** 4595 * A view of a ConcurrentHashMap as a {@link Set} of keys, in 4596 * which additions may optionally be enabled by mapping to a 4597 * common value. This class cannot be directly instantiated. 4598 * See {@link #keySet() keySet()}, 4599 * {@link #keySet(Object) keySet(V)}, 4600 * {@link #newKeySet() newKeySet()}, 4601 * {@link #newKeySet(int) newKeySet(int)}. 4602 * 4603 * @since 1.8 4604 */ 4605 public static class KeySetView<K,V> extends CollectionView<K,V,K> 4606 implements Set<K>, java.io.Serializable { 4607 private static final long serialVersionUID = 7249069246763182397L; 4608 private final V value; 4609 KeySetView(ConcurrentHashMap<K,V> map, V value) { // non-public 4610 super(map); 4611 this.value = value; 4612 } 4613 4614 /** 4615 * Returns the default mapped value for additions, 4616 * or {@code null} if additions are not supported. 4617 * 4618 * @return the default mapped value for additions, or {@code null} 4619 * if not supported 4620 */ 4621 public V getMappedValue() { return value; } 4622 4623 /** 4624 * {@inheritDoc} 4625 * @throws NullPointerException if the specified key is null 4626 */ 4627 public boolean contains(Object o) { return map.containsKey(o); } 4628 4629 /** 4630 * Removes the key from this map view, by removing the key (and its 4631 * corresponding value) from the backing map. This method does 4632 * nothing if the key is not in the map. 4633 * 4634 * @param o the key to be removed from the backing map 4635 * @return {@code true} if the backing map contained the specified key 4636 * @throws NullPointerException if the specified key is null 4637 */ 4638 public boolean remove(Object o) { return map.remove(o) != null; } 4639 4640 /** 4641 * @return an iterator over the keys of the backing map 4642 */ 4643 public Iterator<K> iterator() { 4644 Node<K,V>[] t; 4645 ConcurrentHashMap<K,V> m = map; 4646 int f = (t = m.table) == null ? 0 : t.length; 4647 return new KeyIterator<K,V>(t, f, 0, f, m); 4648 } 4649 4650 /** 4651 * Adds the specified key to this set view by mapping the key to 4652 * the default mapped value in the backing map, if defined. 4653 * 4654 * @param e key to be added 4655 * @return {@code true} if this set changed as a result of the call 4656 * @throws NullPointerException if the specified key is null 4657 * @throws UnsupportedOperationException if no default mapped value 4658 * for additions was provided 4659 */ 4660 public boolean add(K e) { 4661 V v; 4662 if ((v = value) == null) 4663 throw new UnsupportedOperationException(); 4664 return map.putVal(e, v, true) == null; 4665 } 4666 4667 /** 4668 * Adds all of the elements in the specified collection to this set, 4669 * as if by calling {@link #add} on each one. 4670 * 4671 * @param c the elements to be inserted into this set 4672 * @return {@code true} if this set changed as a result of the call 4673 * @throws NullPointerException if the collection or any of its 4674 * elements are {@code null} 4675 * @throws UnsupportedOperationException if no default mapped value 4676 * for additions was provided 4677 */ 4678 public boolean addAll(Collection<? extends K> c) { 4679 boolean added = false; 4680 V v; 4681 if ((v = value) == null) 4682 throw new UnsupportedOperationException(); 4683 for (K e : c) { 4684 if (map.putVal(e, v, true) == null) 4685 added = true; 4686 } 4687 return added; 4688 } 4689 4690 public int hashCode() { 4691 int h = 0; 4692 for (K e : this) 4693 h += e.hashCode(); 4694 return h; 4695 } 4696 4697 public boolean equals(Object o) { 4698 Set<?> c; 4699 return ((o instanceof Set) && 4700 ((c = (Set<?>)o) == this || 4701 (containsAll(c) && c.containsAll(this)))); 4702 } 4703 4704 public Spliterator<K> spliterator() { 4705 Node<K,V>[] t; 4706 ConcurrentHashMap<K,V> m = map; 4707 long n = m.sumCount(); 4708 int f = (t = m.table) == null ? 0 : t.length; 4709 return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n); 4710 } 4711 4712 public void forEach(Consumer<? super K> action) { 4713 if (action == null) throw new NullPointerException(); 4714 Node<K,V>[] t; 4715 if ((t = map.table) != null) { 4716 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 4717 for (Node<K,V> p; (p = it.advance()) != null; ) 4718 action.accept(p.key); 4719 } 4720 } 4721 } 4722 4723 /** 4724 * A view of a ConcurrentHashMap as a {@link Collection} of 4725 * values, in which additions are disabled. This class cannot be 4726 * directly instantiated. See {@link #values()}. 4727 */ 4728 static final class ValuesView<K,V> extends CollectionView<K,V,V> 4729 implements Collection<V>, java.io.Serializable { 4730 private static final long serialVersionUID = 2249069246763182397L; 4731 ValuesView(ConcurrentHashMap<K,V> map) { super(map); } 4732 public final boolean contains(Object o) { 4733 return map.containsValue(o); 4734 } 4735 4736 public final boolean remove(Object o) { 4737 if (o != null) { 4738 for (Iterator<V> it = iterator(); it.hasNext();) { 4739 if (o.equals(it.next())) { 4740 it.remove(); 4741 return true; 4742 } 4743 } 4744 } 4745 return false; 4746 } 4747 4748 public final Iterator<V> iterator() { 4749 ConcurrentHashMap<K,V> m = map; 4750 Node<K,V>[] t; 4751 int f = (t = m.table) == null ? 0 : t.length; 4752 return new ValueIterator<K,V>(t, f, 0, f, m); 4753 } 4754 4755 public final boolean add(V e) { 4756 throw new UnsupportedOperationException(); 4757 } 4758 public final boolean addAll(Collection<? extends V> c) { 4759 throw new UnsupportedOperationException(); 4760 } 4761 4762 @Override public boolean removeAll(Collection<?> c) { 4763 if (c == null) throw new NullPointerException(); 4764 boolean modified = false; 4765 for (Iterator<V> it = iterator(); it.hasNext();) { 4766 if (c.contains(it.next())) { 4767 it.remove(); 4768 modified = true; 4769 } 4770 } 4771 return modified; 4772 } 4773 4774 public boolean removeIf(Predicate<? super V> filter) { 4775 return map.removeValueIf(filter); 4776 } 4777 4778 public Spliterator<V> spliterator() { 4779 Node<K,V>[] t; 4780 ConcurrentHashMap<K,V> m = map; 4781 long n = m.sumCount(); 4782 int f = (t = m.table) == null ? 0 : t.length; 4783 return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n); 4784 } 4785 4786 public void forEach(Consumer<? super V> action) { 4787 if (action == null) throw new NullPointerException(); 4788 Node<K,V>[] t; 4789 if ((t = map.table) != null) { 4790 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 4791 for (Node<K,V> p; (p = it.advance()) != null; ) 4792 action.accept(p.val); 4793 } 4794 } 4795 } 4796 4797 /** 4798 * A view of a ConcurrentHashMap as a {@link Set} of (key, value) 4799 * entries. This class cannot be directly instantiated. See 4800 * {@link #entrySet()}. 4801 */ 4802 static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>> 4803 implements Set<Map.Entry<K,V>>, java.io.Serializable { 4804 private static final long serialVersionUID = 2249069246763182397L; 4805 EntrySetView(ConcurrentHashMap<K,V> map) { super(map); } 4806 4807 public boolean contains(Object o) { 4808 Object k, v, r; Map.Entry<?,?> e; 4809 return ((o instanceof Map.Entry) && 4810 (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 4811 (r = map.get(k)) != null && 4812 (v = e.getValue()) != null && 4813 (v == r || v.equals(r))); 4814 } 4815 4816 public boolean remove(Object o) { 4817 Object k, v; Map.Entry<?,?> e; 4818 return ((o instanceof Map.Entry) && 4819 (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 4820 (v = e.getValue()) != null && 4821 map.remove(k, v)); 4822 } 4823 4824 /** 4825 * @return an iterator over the entries of the backing map 4826 */ 4827 public Iterator<Map.Entry<K,V>> iterator() { 4828 ConcurrentHashMap<K,V> m = map; 4829 Node<K,V>[] t; 4830 int f = (t = m.table) == null ? 0 : t.length; 4831 return new EntryIterator<K,V>(t, f, 0, f, m); 4832 } 4833 4834 public boolean add(Entry<K,V> e) { 4835 return map.putVal(e.getKey(), e.getValue(), false) == null; 4836 } 4837 4838 public boolean addAll(Collection<? extends Entry<K,V>> c) { 4839 boolean added = false; 4840 for (Entry<K,V> e : c) { 4841 if (add(e)) 4842 added = true; 4843 } 4844 return added; 4845 } 4846 4847 public boolean removeIf(Predicate<? super Entry<K,V>> filter) { 4848 return map.removeEntryIf(filter); 4849 } 4850 4851 public final int hashCode() { 4852 int h = 0; 4853 Node<K,V>[] t; 4854 if ((t = map.table) != null) { 4855 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 4856 for (Node<K,V> p; (p = it.advance()) != null; ) { 4857 h += p.hashCode(); 4858 } 4859 } 4860 return h; 4861 } 4862 4863 public final boolean equals(Object o) { 4864 Set<?> c; 4865 return ((o instanceof Set) && 4866 ((c = (Set<?>)o) == this || 4867 (containsAll(c) && c.containsAll(this)))); 4868 } 4869 4870 public Spliterator<Map.Entry<K,V>> spliterator() { 4871 Node<K,V>[] t; 4872 ConcurrentHashMap<K,V> m = map; 4873 long n = m.sumCount(); 4874 int f = (t = m.table) == null ? 0 : t.length; 4875 return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m); 4876 } 4877 4878 public void forEach(Consumer<? super Map.Entry<K,V>> action) { 4879 if (action == null) throw new NullPointerException(); 4880 Node<K,V>[] t; 4881 if ((t = map.table) != null) { 4882 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 4883 for (Node<K,V> p; (p = it.advance()) != null; ) 4884 action.accept(new MapEntry<K,V>(p.key, p.val, map)); 4885 } 4886 } 4887 4888 } 4889 4890 // ------------------------------------------------------- 4891 4892 /** 4893 * Base class for bulk tasks. Repeats some fields and code from 4894 * class Traverser, because we need to subclass CountedCompleter. 4895 */ 4896 @SuppressWarnings("serial") 4897 abstract static class BulkTask<K,V,R> extends CountedCompleter<R> { 4898 Node<K,V>[] tab; // same as Traverser 4899 Node<K,V> next; 4900 TableStack<K,V> stack, spare; 4901 int index; 4902 int baseIndex; 4903 int baseLimit; 4904 final int baseSize; 4905 int batch; // split control 4906 4907 BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) { 4908 super(par); 4909 this.batch = b; 4910 this.index = this.baseIndex = i; 4911 if ((this.tab = t) == null) 4912 this.baseSize = this.baseLimit = 0; 4913 else if (par == null) 4914 this.baseSize = this.baseLimit = t.length; 4915 else { 4916 this.baseLimit = f; 4917 this.baseSize = par.baseSize; 4918 } 4919 } 4920 4921 /** 4922 * Same as Traverser version. 4923 */ 4924 final Node<K,V> advance() { 4925 Node<K,V> e; 4926 if ((e = next) != null) 4927 e = e.next; 4928 for (;;) { 4929 Node<K,V>[] t; int i, n; 4930 if (e != null) 4931 return next = e; 4932 if (baseIndex >= baseLimit || (t = tab) == null || 4933 (n = t.length) <= (i = index) || i < 0) 4934 return next = null; 4935 if ((e = tabAt(t, i)) != null && e.hash < 0) { 4936 if (e instanceof ForwardingNode) { 4937 tab = ((ForwardingNode<K,V>)e).nextTable; 4938 e = null; 4939 pushState(t, i, n); 4940 continue; 4941 } 4942 else if (e instanceof TreeBin) 4943 e = ((TreeBin<K,V>)e).first; 4944 else 4945 e = null; 4946 } 4947 if (stack != null) 4948 recoverState(n); 4949 else if ((index = i + baseSize) >= n) 4950 index = ++baseIndex; 4951 } 4952 } 4953 4954 private void pushState(Node<K,V>[] t, int i, int n) { 4955 TableStack<K,V> s = spare; 4956 if (s != null) 4957 spare = s.next; 4958 else 4959 s = new TableStack<K,V>(); 4960 s.tab = t; 4961 s.length = n; 4962 s.index = i; 4963 s.next = stack; 4964 stack = s; 4965 } 4966 4967 private void recoverState(int n) { 4968 TableStack<K,V> s; int len; 4969 while ((s = stack) != null && (index += (len = s.length)) >= n) { 4970 n = len; 4971 index = s.index; 4972 tab = s.tab; 4973 s.tab = null; 4974 TableStack<K,V> next = s.next; 4975 s.next = spare; // save for reuse 4976 stack = next; 4977 spare = s; 4978 } 4979 if (s == null && (index += baseSize) >= n) 4980 index = ++baseIndex; 4981 } 4982 } 4983 4984 /* 4985 * Task classes. Coded in a regular but ugly format/style to 4986 * simplify checks that each variant differs in the right way from 4987 * others. The null screenings exist because compilers cannot tell 4988 * that we've already null-checked task arguments, so we force 4989 * simplest hoisted bypass to help avoid convoluted traps. 4990 */ 4991 @SuppressWarnings("serial") 4992 static final class ForEachKeyTask<K,V> 4993 extends BulkTask<K,V,Void> { 4994 final Consumer<? super K> action; 4995 ForEachKeyTask 4996 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 4997 Consumer<? super K> action) { 4998 super(p, b, i, f, t); 4999 this.action = action; 5000 } 5001 public final void compute() { 5002 final Consumer<? super K> action; 5003 if ((action = this.action) != null) { 5004 for (int i = baseIndex, f, h; batch > 0 && 5005 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5006 addToPendingCount(1); 5007 new ForEachKeyTask<K,V> 5008 (this, batch >>>= 1, baseLimit = h, f, tab, 5009 action).fork(); 5010 } 5011 for (Node<K,V> p; (p = advance()) != null;) 5012 action.accept(p.key); 5013 propagateCompletion(); 5014 } 5015 } 5016 } 5017 5018 @SuppressWarnings("serial") 5019 static final class ForEachValueTask<K,V> 5020 extends BulkTask<K,V,Void> { 5021 final Consumer<? super V> action; 5022 ForEachValueTask 5023 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5024 Consumer<? super V> action) { 5025 super(p, b, i, f, t); 5026 this.action = action; 5027 } 5028 public final void compute() { 5029 final Consumer<? super V> action; 5030 if ((action = this.action) != null) { 5031 for (int i = baseIndex, f, h; batch > 0 && 5032 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5033 addToPendingCount(1); 5034 new ForEachValueTask<K,V> 5035 (this, batch >>>= 1, baseLimit = h, f, tab, 5036 action).fork(); 5037 } 5038 for (Node<K,V> p; (p = advance()) != null;) 5039 action.accept(p.val); 5040 propagateCompletion(); 5041 } 5042 } 5043 } 5044 5045 @SuppressWarnings("serial") 5046 static final class ForEachEntryTask<K,V> 5047 extends BulkTask<K,V,Void> { 5048 final Consumer<? super Entry<K,V>> action; 5049 ForEachEntryTask 5050 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5051 Consumer<? super Entry<K,V>> action) { 5052 super(p, b, i, f, t); 5053 this.action = action; 5054 } 5055 public final void compute() { 5056 final Consumer<? super Entry<K,V>> action; 5057 if ((action = this.action) != null) { 5058 for (int i = baseIndex, f, h; batch > 0 && 5059 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5060 addToPendingCount(1); 5061 new ForEachEntryTask<K,V> 5062 (this, batch >>>= 1, baseLimit = h, f, tab, 5063 action).fork(); 5064 } 5065 for (Node<K,V> p; (p = advance()) != null; ) 5066 action.accept(p); 5067 propagateCompletion(); 5068 } 5069 } 5070 } 5071 5072 @SuppressWarnings("serial") 5073 static final class ForEachMappingTask<K,V> 5074 extends BulkTask<K,V,Void> { 5075 final BiConsumer<? super K, ? super V> action; 5076 ForEachMappingTask 5077 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5078 BiConsumer<? super K,? super V> action) { 5079 super(p, b, i, f, t); 5080 this.action = action; 5081 } 5082 public final void compute() { 5083 final BiConsumer<? super K, ? super V> action; 5084 if ((action = this.action) != null) { 5085 for (int i = baseIndex, f, h; batch > 0 && 5086 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5087 addToPendingCount(1); 5088 new ForEachMappingTask<K,V> 5089 (this, batch >>>= 1, baseLimit = h, f, tab, 5090 action).fork(); 5091 } 5092 for (Node<K,V> p; (p = advance()) != null; ) 5093 action.accept(p.key, p.val); 5094 propagateCompletion(); 5095 } 5096 } 5097 } 5098 5099 @SuppressWarnings("serial") 5100 static final class ForEachTransformedKeyTask<K,V,U> 5101 extends BulkTask<K,V,Void> { 5102 final Function<? super K, ? extends U> transformer; 5103 final Consumer<? super U> action; 5104 ForEachTransformedKeyTask 5105 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5106 Function<? super K, ? extends U> transformer, Consumer<? super U> action) { 5107 super(p, b, i, f, t); 5108 this.transformer = transformer; this.action = action; 5109 } 5110 public final void compute() { 5111 final Function<? super K, ? extends U> transformer; 5112 final Consumer<? super U> action; 5113 if ((transformer = this.transformer) != null && 5114 (action = this.action) != null) { 5115 for (int i = baseIndex, f, h; batch > 0 && 5116 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5117 addToPendingCount(1); 5118 new ForEachTransformedKeyTask<K,V,U> 5119 (this, batch >>>= 1, baseLimit = h, f, tab, 5120 transformer, action).fork(); 5121 } 5122 for (Node<K,V> p; (p = advance()) != null; ) { 5123 U u; 5124 if ((u = transformer.apply(p.key)) != null) 5125 action.accept(u); 5126 } 5127 propagateCompletion(); 5128 } 5129 } 5130 } 5131 5132 @SuppressWarnings("serial") 5133 static final class ForEachTransformedValueTask<K,V,U> 5134 extends BulkTask<K,V,Void> { 5135 final Function<? super V, ? extends U> transformer; 5136 final Consumer<? super U> action; 5137 ForEachTransformedValueTask 5138 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5139 Function<? super V, ? extends U> transformer, Consumer<? super U> action) { 5140 super(p, b, i, f, t); 5141 this.transformer = transformer; this.action = action; 5142 } 5143 public final void compute() { 5144 final Function<? super V, ? extends U> transformer; 5145 final Consumer<? super U> action; 5146 if ((transformer = this.transformer) != null && 5147 (action = this.action) != null) { 5148 for (int i = baseIndex, f, h; batch > 0 && 5149 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5150 addToPendingCount(1); 5151 new ForEachTransformedValueTask<K,V,U> 5152 (this, batch >>>= 1, baseLimit = h, f, tab, 5153 transformer, action).fork(); 5154 } 5155 for (Node<K,V> p; (p = advance()) != null; ) { 5156 U u; 5157 if ((u = transformer.apply(p.val)) != null) 5158 action.accept(u); 5159 } 5160 propagateCompletion(); 5161 } 5162 } 5163 } 5164 5165 @SuppressWarnings("serial") 5166 static final class ForEachTransformedEntryTask<K,V,U> 5167 extends BulkTask<K,V,Void> { 5168 final Function<Map.Entry<K,V>, ? extends U> transformer; 5169 final Consumer<? super U> action; 5170 ForEachTransformedEntryTask 5171 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5172 Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) { 5173 super(p, b, i, f, t); 5174 this.transformer = transformer; this.action = action; 5175 } 5176 public final void compute() { 5177 final Function<Map.Entry<K,V>, ? extends U> transformer; 5178 final Consumer<? super U> action; 5179 if ((transformer = this.transformer) != null && 5180 (action = this.action) != null) { 5181 for (int i = baseIndex, f, h; batch > 0 && 5182 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5183 addToPendingCount(1); 5184 new ForEachTransformedEntryTask<K,V,U> 5185 (this, batch >>>= 1, baseLimit = h, f, tab, 5186 transformer, action).fork(); 5187 } 5188 for (Node<K,V> p; (p = advance()) != null; ) { 5189 U u; 5190 if ((u = transformer.apply(p)) != null) 5191 action.accept(u); 5192 } 5193 propagateCompletion(); 5194 } 5195 } 5196 } 5197 5198 @SuppressWarnings("serial") 5199 static final class ForEachTransformedMappingTask<K,V,U> 5200 extends BulkTask<K,V,Void> { 5201 final BiFunction<? super K, ? super V, ? extends U> transformer; 5202 final Consumer<? super U> action; 5203 ForEachTransformedMappingTask 5204 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5205 BiFunction<? super K, ? super V, ? extends U> transformer, 5206 Consumer<? super U> action) { 5207 super(p, b, i, f, t); 5208 this.transformer = transformer; this.action = action; 5209 } 5210 public final void compute() { 5211 final BiFunction<? super K, ? super V, ? extends U> transformer; 5212 final Consumer<? super U> action; 5213 if ((transformer = this.transformer) != null && 5214 (action = this.action) != null) { 5215 for (int i = baseIndex, f, h; batch > 0 && 5216 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5217 addToPendingCount(1); 5218 new ForEachTransformedMappingTask<K,V,U> 5219 (this, batch >>>= 1, baseLimit = h, f, tab, 5220 transformer, action).fork(); 5221 } 5222 for (Node<K,V> p; (p = advance()) != null; ) { 5223 U u; 5224 if ((u = transformer.apply(p.key, p.val)) != null) 5225 action.accept(u); 5226 } 5227 propagateCompletion(); 5228 } 5229 } 5230 } 5231 5232 @SuppressWarnings("serial") 5233 static final class SearchKeysTask<K,V,U> 5234 extends BulkTask<K,V,U> { 5235 final Function<? super K, ? extends U> searchFunction; 5236 final AtomicReference<U> result; 5237 SearchKeysTask 5238 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5239 Function<? super K, ? extends U> searchFunction, 5240 AtomicReference<U> result) { 5241 super(p, b, i, f, t); 5242 this.searchFunction = searchFunction; this.result = result; 5243 } 5244 public final U getRawResult() { return result.get(); } 5245 public final void compute() { 5246 final Function<? super K, ? extends U> searchFunction; 5247 final AtomicReference<U> result; 5248 if ((searchFunction = this.searchFunction) != null && 5249 (result = this.result) != null) { 5250 for (int i = baseIndex, f, h; batch > 0 && 5251 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5252 if (result.get() != null) 5253 return; 5254 addToPendingCount(1); 5255 new SearchKeysTask<K,V,U> 5256 (this, batch >>>= 1, baseLimit = h, f, tab, 5257 searchFunction, result).fork(); 5258 } 5259 while (result.get() == null) { 5260 U u; 5261 Node<K,V> p; 5262 if ((p = advance()) == null) { 5263 propagateCompletion(); 5264 break; 5265 } 5266 if ((u = searchFunction.apply(p.key)) != null) { 5267 if (result.compareAndSet(null, u)) 5268 quietlyCompleteRoot(); 5269 break; 5270 } 5271 } 5272 } 5273 } 5274 } 5275 5276 @SuppressWarnings("serial") 5277 static final class SearchValuesTask<K,V,U> 5278 extends BulkTask<K,V,U> { 5279 final Function<? super V, ? extends U> searchFunction; 5280 final AtomicReference<U> result; 5281 SearchValuesTask 5282 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5283 Function<? super V, ? extends U> searchFunction, 5284 AtomicReference<U> result) { 5285 super(p, b, i, f, t); 5286 this.searchFunction = searchFunction; this.result = result; 5287 } 5288 public final U getRawResult() { return result.get(); } 5289 public final void compute() { 5290 final Function<? super V, ? extends U> searchFunction; 5291 final AtomicReference<U> result; 5292 if ((searchFunction = this.searchFunction) != null && 5293 (result = this.result) != null) { 5294 for (int i = baseIndex, f, h; batch > 0 && 5295 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5296 if (result.get() != null) 5297 return; 5298 addToPendingCount(1); 5299 new SearchValuesTask<K,V,U> 5300 (this, batch >>>= 1, baseLimit = h, f, tab, 5301 searchFunction, result).fork(); 5302 } 5303 while (result.get() == null) { 5304 U u; 5305 Node<K,V> p; 5306 if ((p = advance()) == null) { 5307 propagateCompletion(); 5308 break; 5309 } 5310 if ((u = searchFunction.apply(p.val)) != null) { 5311 if (result.compareAndSet(null, u)) 5312 quietlyCompleteRoot(); 5313 break; 5314 } 5315 } 5316 } 5317 } 5318 } 5319 5320 @SuppressWarnings("serial") 5321 static final class SearchEntriesTask<K,V,U> 5322 extends BulkTask<K,V,U> { 5323 final Function<Entry<K,V>, ? extends U> searchFunction; 5324 final AtomicReference<U> result; 5325 SearchEntriesTask 5326 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5327 Function<Entry<K,V>, ? extends U> searchFunction, 5328 AtomicReference<U> result) { 5329 super(p, b, i, f, t); 5330 this.searchFunction = searchFunction; this.result = result; 5331 } 5332 public final U getRawResult() { return result.get(); } 5333 public final void compute() { 5334 final Function<Entry<K,V>, ? extends U> searchFunction; 5335 final AtomicReference<U> result; 5336 if ((searchFunction = this.searchFunction) != null && 5337 (result = this.result) != null) { 5338 for (int i = baseIndex, f, h; batch > 0 && 5339 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5340 if (result.get() != null) 5341 return; 5342 addToPendingCount(1); 5343 new SearchEntriesTask<K,V,U> 5344 (this, batch >>>= 1, baseLimit = h, f, tab, 5345 searchFunction, result).fork(); 5346 } 5347 while (result.get() == null) { 5348 U u; 5349 Node<K,V> p; 5350 if ((p = advance()) == null) { 5351 propagateCompletion(); 5352 break; 5353 } 5354 if ((u = searchFunction.apply(p)) != null) { 5355 if (result.compareAndSet(null, u)) 5356 quietlyCompleteRoot(); 5357 return; 5358 } 5359 } 5360 } 5361 } 5362 } 5363 5364 @SuppressWarnings("serial") 5365 static final class SearchMappingsTask<K,V,U> 5366 extends BulkTask<K,V,U> { 5367 final BiFunction<? super K, ? super V, ? extends U> searchFunction; 5368 final AtomicReference<U> result; 5369 SearchMappingsTask 5370 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5371 BiFunction<? super K, ? super V, ? extends U> searchFunction, 5372 AtomicReference<U> result) { 5373 super(p, b, i, f, t); 5374 this.searchFunction = searchFunction; this.result = result; 5375 } 5376 public final U getRawResult() { return result.get(); } 5377 public final void compute() { 5378 final BiFunction<? super K, ? super V, ? extends U> searchFunction; 5379 final AtomicReference<U> result; 5380 if ((searchFunction = this.searchFunction) != null && 5381 (result = this.result) != null) { 5382 for (int i = baseIndex, f, h; batch > 0 && 5383 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5384 if (result.get() != null) 5385 return; 5386 addToPendingCount(1); 5387 new SearchMappingsTask<K,V,U> 5388 (this, batch >>>= 1, baseLimit = h, f, tab, 5389 searchFunction, result).fork(); 5390 } 5391 while (result.get() == null) { 5392 U u; 5393 Node<K,V> p; 5394 if ((p = advance()) == null) { 5395 propagateCompletion(); 5396 break; 5397 } 5398 if ((u = searchFunction.apply(p.key, p.val)) != null) { 5399 if (result.compareAndSet(null, u)) 5400 quietlyCompleteRoot(); 5401 break; 5402 } 5403 } 5404 } 5405 } 5406 } 5407 5408 @SuppressWarnings("serial") 5409 static final class ReduceKeysTask<K,V> 5410 extends BulkTask<K,V,K> { 5411 final BiFunction<? super K, ? super K, ? extends K> reducer; 5412 K result; 5413 ReduceKeysTask<K,V> rights, nextRight; 5414 ReduceKeysTask 5415 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5416 ReduceKeysTask<K,V> nextRight, 5417 BiFunction<? super K, ? super K, ? extends K> reducer) { 5418 super(p, b, i, f, t); this.nextRight = nextRight; 5419 this.reducer = reducer; 5420 } 5421 public final K getRawResult() { return result; } 5422 public final void compute() { 5423 final BiFunction<? super K, ? super K, ? extends K> reducer; 5424 if ((reducer = this.reducer) != null) { 5425 for (int i = baseIndex, f, h; batch > 0 && 5426 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5427 addToPendingCount(1); 5428 (rights = new ReduceKeysTask<K,V> 5429 (this, batch >>>= 1, baseLimit = h, f, tab, 5430 rights, reducer)).fork(); 5431 } 5432 K r = null; 5433 for (Node<K,V> p; (p = advance()) != null; ) { 5434 K u = p.key; 5435 r = (r == null) ? u : u == null ? r : reducer.apply(r, u); 5436 } 5437 result = r; 5438 CountedCompleter<?> c; 5439 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5440 @SuppressWarnings("unchecked") 5441 ReduceKeysTask<K,V> 5442 t = (ReduceKeysTask<K,V>)c, 5443 s = t.rights; 5444 while (s != null) { 5445 K tr, sr; 5446 if ((sr = s.result) != null) 5447 t.result = (((tr = t.result) == null) ? sr : 5448 reducer.apply(tr, sr)); 5449 s = t.rights = s.nextRight; 5450 } 5451 } 5452 } 5453 } 5454 } 5455 5456 @SuppressWarnings("serial") 5457 static final class ReduceValuesTask<K,V> 5458 extends BulkTask<K,V,V> { 5459 final BiFunction<? super V, ? super V, ? extends V> reducer; 5460 V result; 5461 ReduceValuesTask<K,V> rights, nextRight; 5462 ReduceValuesTask 5463 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5464 ReduceValuesTask<K,V> nextRight, 5465 BiFunction<? super V, ? super V, ? extends V> reducer) { 5466 super(p, b, i, f, t); this.nextRight = nextRight; 5467 this.reducer = reducer; 5468 } 5469 public final V getRawResult() { return result; } 5470 public final void compute() { 5471 final BiFunction<? super V, ? super V, ? extends V> reducer; 5472 if ((reducer = this.reducer) != null) { 5473 for (int i = baseIndex, f, h; batch > 0 && 5474 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5475 addToPendingCount(1); 5476 (rights = new ReduceValuesTask<K,V> 5477 (this, batch >>>= 1, baseLimit = h, f, tab, 5478 rights, reducer)).fork(); 5479 } 5480 V r = null; 5481 for (Node<K,V> p; (p = advance()) != null; ) { 5482 V v = p.val; 5483 r = (r == null) ? v : reducer.apply(r, v); 5484 } 5485 result = r; 5486 CountedCompleter<?> c; 5487 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5488 @SuppressWarnings("unchecked") 5489 ReduceValuesTask<K,V> 5490 t = (ReduceValuesTask<K,V>)c, 5491 s = t.rights; 5492 while (s != null) { 5493 V tr, sr; 5494 if ((sr = s.result) != null) 5495 t.result = (((tr = t.result) == null) ? sr : 5496 reducer.apply(tr, sr)); 5497 s = t.rights = s.nextRight; 5498 } 5499 } 5500 } 5501 } 5502 } 5503 5504 @SuppressWarnings("serial") 5505 static final class ReduceEntriesTask<K,V> 5506 extends BulkTask<K,V,Map.Entry<K,V>> { 5507 final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer; 5508 Map.Entry<K,V> result; 5509 ReduceEntriesTask<K,V> rights, nextRight; 5510 ReduceEntriesTask 5511 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5512 ReduceEntriesTask<K,V> nextRight, 5513 BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) { 5514 super(p, b, i, f, t); this.nextRight = nextRight; 5515 this.reducer = reducer; 5516 } 5517 public final Map.Entry<K,V> getRawResult() { return result; } 5518 public final void compute() { 5519 final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer; 5520 if ((reducer = this.reducer) != null) { 5521 for (int i = baseIndex, f, h; batch > 0 && 5522 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5523 addToPendingCount(1); 5524 (rights = new ReduceEntriesTask<K,V> 5525 (this, batch >>>= 1, baseLimit = h, f, tab, 5526 rights, reducer)).fork(); 5527 } 5528 Map.Entry<K,V> r = null; 5529 for (Node<K,V> p; (p = advance()) != null; ) 5530 r = (r == null) ? p : reducer.apply(r, p); 5531 result = r; 5532 CountedCompleter<?> c; 5533 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5534 @SuppressWarnings("unchecked") 5535 ReduceEntriesTask<K,V> 5536 t = (ReduceEntriesTask<K,V>)c, 5537 s = t.rights; 5538 while (s != null) { 5539 Map.Entry<K,V> tr, sr; 5540 if ((sr = s.result) != null) 5541 t.result = (((tr = t.result) == null) ? sr : 5542 reducer.apply(tr, sr)); 5543 s = t.rights = s.nextRight; 5544 } 5545 } 5546 } 5547 } 5548 } 5549 5550 @SuppressWarnings("serial") 5551 static final class MapReduceKeysTask<K,V,U> 5552 extends BulkTask<K,V,U> { 5553 final Function<? super K, ? extends U> transformer; 5554 final BiFunction<? super U, ? super U, ? extends U> reducer; 5555 U result; 5556 MapReduceKeysTask<K,V,U> rights, nextRight; 5557 MapReduceKeysTask 5558 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5559 MapReduceKeysTask<K,V,U> nextRight, 5560 Function<? super K, ? extends U> transformer, 5561 BiFunction<? super U, ? super U, ? extends U> reducer) { 5562 super(p, b, i, f, t); this.nextRight = nextRight; 5563 this.transformer = transformer; 5564 this.reducer = reducer; 5565 } 5566 public final U getRawResult() { return result; } 5567 public final void compute() { 5568 final Function<? super K, ? extends U> transformer; 5569 final BiFunction<? super U, ? super U, ? extends U> reducer; 5570 if ((transformer = this.transformer) != null && 5571 (reducer = this.reducer) != null) { 5572 for (int i = baseIndex, f, h; batch > 0 && 5573 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5574 addToPendingCount(1); 5575 (rights = new MapReduceKeysTask<K,V,U> 5576 (this, batch >>>= 1, baseLimit = h, f, tab, 5577 rights, transformer, reducer)).fork(); 5578 } 5579 U r = null; 5580 for (Node<K,V> p; (p = advance()) != null; ) { 5581 U u; 5582 if ((u = transformer.apply(p.key)) != null) 5583 r = (r == null) ? u : reducer.apply(r, u); 5584 } 5585 result = r; 5586 CountedCompleter<?> c; 5587 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5588 @SuppressWarnings("unchecked") 5589 MapReduceKeysTask<K,V,U> 5590 t = (MapReduceKeysTask<K,V,U>)c, 5591 s = t.rights; 5592 while (s != null) { 5593 U tr, sr; 5594 if ((sr = s.result) != null) 5595 t.result = (((tr = t.result) == null) ? sr : 5596 reducer.apply(tr, sr)); 5597 s = t.rights = s.nextRight; 5598 } 5599 } 5600 } 5601 } 5602 } 5603 5604 @SuppressWarnings("serial") 5605 static final class MapReduceValuesTask<K,V,U> 5606 extends BulkTask<K,V,U> { 5607 final Function<? super V, ? extends U> transformer; 5608 final BiFunction<? super U, ? super U, ? extends U> reducer; 5609 U result; 5610 MapReduceValuesTask<K,V,U> rights, nextRight; 5611 MapReduceValuesTask 5612 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5613 MapReduceValuesTask<K,V,U> nextRight, 5614 Function<? super V, ? extends U> transformer, 5615 BiFunction<? super U, ? super U, ? extends U> reducer) { 5616 super(p, b, i, f, t); this.nextRight = nextRight; 5617 this.transformer = transformer; 5618 this.reducer = reducer; 5619 } 5620 public final U getRawResult() { return result; } 5621 public final void compute() { 5622 final Function<? super V, ? extends U> transformer; 5623 final BiFunction<? super U, ? super U, ? extends U> reducer; 5624 if ((transformer = this.transformer) != null && 5625 (reducer = this.reducer) != null) { 5626 for (int i = baseIndex, f, h; batch > 0 && 5627 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5628 addToPendingCount(1); 5629 (rights = new MapReduceValuesTask<K,V,U> 5630 (this, batch >>>= 1, baseLimit = h, f, tab, 5631 rights, transformer, reducer)).fork(); 5632 } 5633 U r = null; 5634 for (Node<K,V> p; (p = advance()) != null; ) { 5635 U u; 5636 if ((u = transformer.apply(p.val)) != null) 5637 r = (r == null) ? u : reducer.apply(r, u); 5638 } 5639 result = r; 5640 CountedCompleter<?> c; 5641 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5642 @SuppressWarnings("unchecked") 5643 MapReduceValuesTask<K,V,U> 5644 t = (MapReduceValuesTask<K,V,U>)c, 5645 s = t.rights; 5646 while (s != null) { 5647 U tr, sr; 5648 if ((sr = s.result) != null) 5649 t.result = (((tr = t.result) == null) ? sr : 5650 reducer.apply(tr, sr)); 5651 s = t.rights = s.nextRight; 5652 } 5653 } 5654 } 5655 } 5656 } 5657 5658 @SuppressWarnings("serial") 5659 static final class MapReduceEntriesTask<K,V,U> 5660 extends BulkTask<K,V,U> { 5661 final Function<Map.Entry<K,V>, ? extends U> transformer; 5662 final BiFunction<? super U, ? super U, ? extends U> reducer; 5663 U result; 5664 MapReduceEntriesTask<K,V,U> rights, nextRight; 5665 MapReduceEntriesTask 5666 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5667 MapReduceEntriesTask<K,V,U> nextRight, 5668 Function<Map.Entry<K,V>, ? extends U> transformer, 5669 BiFunction<? super U, ? super U, ? extends U> reducer) { 5670 super(p, b, i, f, t); this.nextRight = nextRight; 5671 this.transformer = transformer; 5672 this.reducer = reducer; 5673 } 5674 public final U getRawResult() { return result; } 5675 public final void compute() { 5676 final Function<Map.Entry<K,V>, ? extends U> transformer; 5677 final BiFunction<? super U, ? super U, ? extends U> reducer; 5678 if ((transformer = this.transformer) != null && 5679 (reducer = this.reducer) != null) { 5680 for (int i = baseIndex, f, h; batch > 0 && 5681 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5682 addToPendingCount(1); 5683 (rights = new MapReduceEntriesTask<K,V,U> 5684 (this, batch >>>= 1, baseLimit = h, f, tab, 5685 rights, transformer, reducer)).fork(); 5686 } 5687 U r = null; 5688 for (Node<K,V> p; (p = advance()) != null; ) { 5689 U u; 5690 if ((u = transformer.apply(p)) != null) 5691 r = (r == null) ? u : reducer.apply(r, u); 5692 } 5693 result = r; 5694 CountedCompleter<?> c; 5695 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5696 @SuppressWarnings("unchecked") 5697 MapReduceEntriesTask<K,V,U> 5698 t = (MapReduceEntriesTask<K,V,U>)c, 5699 s = t.rights; 5700 while (s != null) { 5701 U tr, sr; 5702 if ((sr = s.result) != null) 5703 t.result = (((tr = t.result) == null) ? sr : 5704 reducer.apply(tr, sr)); 5705 s = t.rights = s.nextRight; 5706 } 5707 } 5708 } 5709 } 5710 } 5711 5712 @SuppressWarnings("serial") 5713 static final class MapReduceMappingsTask<K,V,U> 5714 extends BulkTask<K,V,U> { 5715 final BiFunction<? super K, ? super V, ? extends U> transformer; 5716 final BiFunction<? super U, ? super U, ? extends U> reducer; 5717 U result; 5718 MapReduceMappingsTask<K,V,U> rights, nextRight; 5719 MapReduceMappingsTask 5720 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5721 MapReduceMappingsTask<K,V,U> nextRight, 5722 BiFunction<? super K, ? super V, ? extends U> transformer, 5723 BiFunction<? super U, ? super U, ? extends U> reducer) { 5724 super(p, b, i, f, t); this.nextRight = nextRight; 5725 this.transformer = transformer; 5726 this.reducer = reducer; 5727 } 5728 public final U getRawResult() { return result; } 5729 public final void compute() { 5730 final BiFunction<? super K, ? super V, ? extends U> transformer; 5731 final BiFunction<? super U, ? super U, ? extends U> reducer; 5732 if ((transformer = this.transformer) != null && 5733 (reducer = this.reducer) != null) { 5734 for (int i = baseIndex, f, h; batch > 0 && 5735 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5736 addToPendingCount(1); 5737 (rights = new MapReduceMappingsTask<K,V,U> 5738 (this, batch >>>= 1, baseLimit = h, f, tab, 5739 rights, transformer, reducer)).fork(); 5740 } 5741 U r = null; 5742 for (Node<K,V> p; (p = advance()) != null; ) { 5743 U u; 5744 if ((u = transformer.apply(p.key, p.val)) != null) 5745 r = (r == null) ? u : reducer.apply(r, u); 5746 } 5747 result = r; 5748 CountedCompleter<?> c; 5749 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5750 @SuppressWarnings("unchecked") 5751 MapReduceMappingsTask<K,V,U> 5752 t = (MapReduceMappingsTask<K,V,U>)c, 5753 s = t.rights; 5754 while (s != null) { 5755 U tr, sr; 5756 if ((sr = s.result) != null) 5757 t.result = (((tr = t.result) == null) ? sr : 5758 reducer.apply(tr, sr)); 5759 s = t.rights = s.nextRight; 5760 } 5761 } 5762 } 5763 } 5764 } 5765 5766 @SuppressWarnings("serial") 5767 static final class MapReduceKeysToDoubleTask<K,V> 5768 extends BulkTask<K,V,Double> { 5769 final ToDoubleFunction<? super K> transformer; 5770 final DoubleBinaryOperator reducer; 5771 final double basis; 5772 double result; 5773 MapReduceKeysToDoubleTask<K,V> rights, nextRight; 5774 MapReduceKeysToDoubleTask 5775 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5776 MapReduceKeysToDoubleTask<K,V> nextRight, 5777 ToDoubleFunction<? super K> transformer, 5778 double basis, 5779 DoubleBinaryOperator reducer) { 5780 super(p, b, i, f, t); this.nextRight = nextRight; 5781 this.transformer = transformer; 5782 this.basis = basis; this.reducer = reducer; 5783 } 5784 public final Double getRawResult() { return result; } 5785 public final void compute() { 5786 final ToDoubleFunction<? super K> transformer; 5787 final DoubleBinaryOperator reducer; 5788 if ((transformer = this.transformer) != null && 5789 (reducer = this.reducer) != null) { 5790 double r = this.basis; 5791 for (int i = baseIndex, f, h; batch > 0 && 5792 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5793 addToPendingCount(1); 5794 (rights = new MapReduceKeysToDoubleTask<K,V> 5795 (this, batch >>>= 1, baseLimit = h, f, tab, 5796 rights, transformer, r, reducer)).fork(); 5797 } 5798 for (Node<K,V> p; (p = advance()) != null; ) 5799 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key)); 5800 result = r; 5801 CountedCompleter<?> c; 5802 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5803 @SuppressWarnings("unchecked") 5804 MapReduceKeysToDoubleTask<K,V> 5805 t = (MapReduceKeysToDoubleTask<K,V>)c, 5806 s = t.rights; 5807 while (s != null) { 5808 t.result = reducer.applyAsDouble(t.result, s.result); 5809 s = t.rights = s.nextRight; 5810 } 5811 } 5812 } 5813 } 5814 } 5815 5816 @SuppressWarnings("serial") 5817 static final class MapReduceValuesToDoubleTask<K,V> 5818 extends BulkTask<K,V,Double> { 5819 final ToDoubleFunction<? super V> transformer; 5820 final DoubleBinaryOperator reducer; 5821 final double basis; 5822 double result; 5823 MapReduceValuesToDoubleTask<K,V> rights, nextRight; 5824 MapReduceValuesToDoubleTask 5825 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5826 MapReduceValuesToDoubleTask<K,V> nextRight, 5827 ToDoubleFunction<? super V> transformer, 5828 double basis, 5829 DoubleBinaryOperator reducer) { 5830 super(p, b, i, f, t); this.nextRight = nextRight; 5831 this.transformer = transformer; 5832 this.basis = basis; this.reducer = reducer; 5833 } 5834 public final Double getRawResult() { return result; } 5835 public final void compute() { 5836 final ToDoubleFunction<? super V> transformer; 5837 final DoubleBinaryOperator reducer; 5838 if ((transformer = this.transformer) != null && 5839 (reducer = this.reducer) != null) { 5840 double r = this.basis; 5841 for (int i = baseIndex, f, h; batch > 0 && 5842 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5843 addToPendingCount(1); 5844 (rights = new MapReduceValuesToDoubleTask<K,V> 5845 (this, batch >>>= 1, baseLimit = h, f, tab, 5846 rights, transformer, r, reducer)).fork(); 5847 } 5848 for (Node<K,V> p; (p = advance()) != null; ) 5849 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val)); 5850 result = r; 5851 CountedCompleter<?> c; 5852 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5853 @SuppressWarnings("unchecked") 5854 MapReduceValuesToDoubleTask<K,V> 5855 t = (MapReduceValuesToDoubleTask<K,V>)c, 5856 s = t.rights; 5857 while (s != null) { 5858 t.result = reducer.applyAsDouble(t.result, s.result); 5859 s = t.rights = s.nextRight; 5860 } 5861 } 5862 } 5863 } 5864 } 5865 5866 @SuppressWarnings("serial") 5867 static final class MapReduceEntriesToDoubleTask<K,V> 5868 extends BulkTask<K,V,Double> { 5869 final ToDoubleFunction<Map.Entry<K,V>> transformer; 5870 final DoubleBinaryOperator reducer; 5871 final double basis; 5872 double result; 5873 MapReduceEntriesToDoubleTask<K,V> rights, nextRight; 5874 MapReduceEntriesToDoubleTask 5875 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5876 MapReduceEntriesToDoubleTask<K,V> nextRight, 5877 ToDoubleFunction<Map.Entry<K,V>> transformer, 5878 double basis, 5879 DoubleBinaryOperator reducer) { 5880 super(p, b, i, f, t); this.nextRight = nextRight; 5881 this.transformer = transformer; 5882 this.basis = basis; this.reducer = reducer; 5883 } 5884 public final Double getRawResult() { return result; } 5885 public final void compute() { 5886 final ToDoubleFunction<Map.Entry<K,V>> transformer; 5887 final DoubleBinaryOperator reducer; 5888 if ((transformer = this.transformer) != null && 5889 (reducer = this.reducer) != null) { 5890 double r = this.basis; 5891 for (int i = baseIndex, f, h; batch > 0 && 5892 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5893 addToPendingCount(1); 5894 (rights = new MapReduceEntriesToDoubleTask<K,V> 5895 (this, batch >>>= 1, baseLimit = h, f, tab, 5896 rights, transformer, r, reducer)).fork(); 5897 } 5898 for (Node<K,V> p; (p = advance()) != null; ) 5899 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p)); 5900 result = r; 5901 CountedCompleter<?> c; 5902 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5903 @SuppressWarnings("unchecked") 5904 MapReduceEntriesToDoubleTask<K,V> 5905 t = (MapReduceEntriesToDoubleTask<K,V>)c, 5906 s = t.rights; 5907 while (s != null) { 5908 t.result = reducer.applyAsDouble(t.result, s.result); 5909 s = t.rights = s.nextRight; 5910 } 5911 } 5912 } 5913 } 5914 } 5915 5916 @SuppressWarnings("serial") 5917 static final class MapReduceMappingsToDoubleTask<K,V> 5918 extends BulkTask<K,V,Double> { 5919 final ToDoubleBiFunction<? super K, ? super V> transformer; 5920 final DoubleBinaryOperator reducer; 5921 final double basis; 5922 double result; 5923 MapReduceMappingsToDoubleTask<K,V> rights, nextRight; 5924 MapReduceMappingsToDoubleTask 5925 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5926 MapReduceMappingsToDoubleTask<K,V> nextRight, 5927 ToDoubleBiFunction<? super K, ? super V> transformer, 5928 double basis, 5929 DoubleBinaryOperator reducer) { 5930 super(p, b, i, f, t); this.nextRight = nextRight; 5931 this.transformer = transformer; 5932 this.basis = basis; this.reducer = reducer; 5933 } 5934 public final Double getRawResult() { return result; } 5935 public final void compute() { 5936 final ToDoubleBiFunction<? super K, ? super V> transformer; 5937 final DoubleBinaryOperator reducer; 5938 if ((transformer = this.transformer) != null && 5939 (reducer = this.reducer) != null) { 5940 double r = this.basis; 5941 for (int i = baseIndex, f, h; batch > 0 && 5942 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5943 addToPendingCount(1); 5944 (rights = new MapReduceMappingsToDoubleTask<K,V> 5945 (this, batch >>>= 1, baseLimit = h, f, tab, 5946 rights, transformer, r, reducer)).fork(); 5947 } 5948 for (Node<K,V> p; (p = advance()) != null; ) 5949 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key, p.val)); 5950 result = r; 5951 CountedCompleter<?> c; 5952 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5953 @SuppressWarnings("unchecked") 5954 MapReduceMappingsToDoubleTask<K,V> 5955 t = (MapReduceMappingsToDoubleTask<K,V>)c, 5956 s = t.rights; 5957 while (s != null) { 5958 t.result = reducer.applyAsDouble(t.result, s.result); 5959 s = t.rights = s.nextRight; 5960 } 5961 } 5962 } 5963 } 5964 } 5965 5966 @SuppressWarnings("serial") 5967 static final class MapReduceKeysToLongTask<K,V> 5968 extends BulkTask<K,V,Long> { 5969 final ToLongFunction<? super K> transformer; 5970 final LongBinaryOperator reducer; 5971 final long basis; 5972 long result; 5973 MapReduceKeysToLongTask<K,V> rights, nextRight; 5974 MapReduceKeysToLongTask 5975 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5976 MapReduceKeysToLongTask<K,V> nextRight, 5977 ToLongFunction<? super K> transformer, 5978 long basis, 5979 LongBinaryOperator reducer) { 5980 super(p, b, i, f, t); this.nextRight = nextRight; 5981 this.transformer = transformer; 5982 this.basis = basis; this.reducer = reducer; 5983 } 5984 public final Long getRawResult() { return result; } 5985 public final void compute() { 5986 final ToLongFunction<? super K> transformer; 5987 final LongBinaryOperator reducer; 5988 if ((transformer = this.transformer) != null && 5989 (reducer = this.reducer) != null) { 5990 long r = this.basis; 5991 for (int i = baseIndex, f, h; batch > 0 && 5992 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5993 addToPendingCount(1); 5994 (rights = new MapReduceKeysToLongTask<K,V> 5995 (this, batch >>>= 1, baseLimit = h, f, tab, 5996 rights, transformer, r, reducer)).fork(); 5997 } 5998 for (Node<K,V> p; (p = advance()) != null; ) 5999 r = reducer.applyAsLong(r, transformer.applyAsLong(p.key)); 6000 result = r; 6001 CountedCompleter<?> c; 6002 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6003 @SuppressWarnings("unchecked") 6004 MapReduceKeysToLongTask<K,V> 6005 t = (MapReduceKeysToLongTask<K,V>)c, 6006 s = t.rights; 6007 while (s != null) { 6008 t.result = reducer.applyAsLong(t.result, s.result); 6009 s = t.rights = s.nextRight; 6010 } 6011 } 6012 } 6013 } 6014 } 6015 6016 @SuppressWarnings("serial") 6017 static final class MapReduceValuesToLongTask<K,V> 6018 extends BulkTask<K,V,Long> { 6019 final ToLongFunction<? super V> transformer; 6020 final LongBinaryOperator reducer; 6021 final long basis; 6022 long result; 6023 MapReduceValuesToLongTask<K,V> rights, nextRight; 6024 MapReduceValuesToLongTask 6025 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6026 MapReduceValuesToLongTask<K,V> nextRight, 6027 ToLongFunction<? super V> transformer, 6028 long basis, 6029 LongBinaryOperator reducer) { 6030 super(p, b, i, f, t); this.nextRight = nextRight; 6031 this.transformer = transformer; 6032 this.basis = basis; this.reducer = reducer; 6033 } 6034 public final Long getRawResult() { return result; } 6035 public final void compute() { 6036 final ToLongFunction<? super V> transformer; 6037 final LongBinaryOperator reducer; 6038 if ((transformer = this.transformer) != null && 6039 (reducer = this.reducer) != null) { 6040 long r = this.basis; 6041 for (int i = baseIndex, f, h; batch > 0 && 6042 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6043 addToPendingCount(1); 6044 (rights = new MapReduceValuesToLongTask<K,V> 6045 (this, batch >>>= 1, baseLimit = h, f, tab, 6046 rights, transformer, r, reducer)).fork(); 6047 } 6048 for (Node<K,V> p; (p = advance()) != null; ) 6049 r = reducer.applyAsLong(r, transformer.applyAsLong(p.val)); 6050 result = r; 6051 CountedCompleter<?> c; 6052 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6053 @SuppressWarnings("unchecked") 6054 MapReduceValuesToLongTask<K,V> 6055 t = (MapReduceValuesToLongTask<K,V>)c, 6056 s = t.rights; 6057 while (s != null) { 6058 t.result = reducer.applyAsLong(t.result, s.result); 6059 s = t.rights = s.nextRight; 6060 } 6061 } 6062 } 6063 } 6064 } 6065 6066 @SuppressWarnings("serial") 6067 static final class MapReduceEntriesToLongTask<K,V> 6068 extends BulkTask<K,V,Long> { 6069 final ToLongFunction<Map.Entry<K,V>> transformer; 6070 final LongBinaryOperator reducer; 6071 final long basis; 6072 long result; 6073 MapReduceEntriesToLongTask<K,V> rights, nextRight; 6074 MapReduceEntriesToLongTask 6075 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6076 MapReduceEntriesToLongTask<K,V> nextRight, 6077 ToLongFunction<Map.Entry<K,V>> transformer, 6078 long basis, 6079 LongBinaryOperator reducer) { 6080 super(p, b, i, f, t); this.nextRight = nextRight; 6081 this.transformer = transformer; 6082 this.basis = basis; this.reducer = reducer; 6083 } 6084 public final Long getRawResult() { return result; } 6085 public final void compute() { 6086 final ToLongFunction<Map.Entry<K,V>> transformer; 6087 final LongBinaryOperator reducer; 6088 if ((transformer = this.transformer) != null && 6089 (reducer = this.reducer) != null) { 6090 long r = this.basis; 6091 for (int i = baseIndex, f, h; batch > 0 && 6092 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6093 addToPendingCount(1); 6094 (rights = new MapReduceEntriesToLongTask<K,V> 6095 (this, batch >>>= 1, baseLimit = h, f, tab, 6096 rights, transformer, r, reducer)).fork(); 6097 } 6098 for (Node<K,V> p; (p = advance()) != null; ) 6099 r = reducer.applyAsLong(r, transformer.applyAsLong(p)); 6100 result = r; 6101 CountedCompleter<?> c; 6102 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6103 @SuppressWarnings("unchecked") 6104 MapReduceEntriesToLongTask<K,V> 6105 t = (MapReduceEntriesToLongTask<K,V>)c, 6106 s = t.rights; 6107 while (s != null) { 6108 t.result = reducer.applyAsLong(t.result, s.result); 6109 s = t.rights = s.nextRight; 6110 } 6111 } 6112 } 6113 } 6114 } 6115 6116 @SuppressWarnings("serial") 6117 static final class MapReduceMappingsToLongTask<K,V> 6118 extends BulkTask<K,V,Long> { 6119 final ToLongBiFunction<? super K, ? super V> transformer; 6120 final LongBinaryOperator reducer; 6121 final long basis; 6122 long result; 6123 MapReduceMappingsToLongTask<K,V> rights, nextRight; 6124 MapReduceMappingsToLongTask 6125 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6126 MapReduceMappingsToLongTask<K,V> nextRight, 6127 ToLongBiFunction<? super K, ? super V> transformer, 6128 long basis, 6129 LongBinaryOperator reducer) { 6130 super(p, b, i, f, t); this.nextRight = nextRight; 6131 this.transformer = transformer; 6132 this.basis = basis; this.reducer = reducer; 6133 } 6134 public final Long getRawResult() { return result; } 6135 public final void compute() { 6136 final ToLongBiFunction<? super K, ? super V> transformer; 6137 final LongBinaryOperator reducer; 6138 if ((transformer = this.transformer) != null && 6139 (reducer = this.reducer) != null) { 6140 long r = this.basis; 6141 for (int i = baseIndex, f, h; batch > 0 && 6142 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6143 addToPendingCount(1); 6144 (rights = new MapReduceMappingsToLongTask<K,V> 6145 (this, batch >>>= 1, baseLimit = h, f, tab, 6146 rights, transformer, r, reducer)).fork(); 6147 } 6148 for (Node<K,V> p; (p = advance()) != null; ) 6149 r = reducer.applyAsLong(r, transformer.applyAsLong(p.key, p.val)); 6150 result = r; 6151 CountedCompleter<?> c; 6152 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6153 @SuppressWarnings("unchecked") 6154 MapReduceMappingsToLongTask<K,V> 6155 t = (MapReduceMappingsToLongTask<K,V>)c, 6156 s = t.rights; 6157 while (s != null) { 6158 t.result = reducer.applyAsLong(t.result, s.result); 6159 s = t.rights = s.nextRight; 6160 } 6161 } 6162 } 6163 } 6164 } 6165 6166 @SuppressWarnings("serial") 6167 static final class MapReduceKeysToIntTask<K,V> 6168 extends BulkTask<K,V,Integer> { 6169 final ToIntFunction<? super K> transformer; 6170 final IntBinaryOperator reducer; 6171 final int basis; 6172 int result; 6173 MapReduceKeysToIntTask<K,V> rights, nextRight; 6174 MapReduceKeysToIntTask 6175 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6176 MapReduceKeysToIntTask<K,V> nextRight, 6177 ToIntFunction<? super K> transformer, 6178 int basis, 6179 IntBinaryOperator reducer) { 6180 super(p, b, i, f, t); this.nextRight = nextRight; 6181 this.transformer = transformer; 6182 this.basis = basis; this.reducer = reducer; 6183 } 6184 public final Integer getRawResult() { return result; } 6185 public final void compute() { 6186 final ToIntFunction<? super K> transformer; 6187 final IntBinaryOperator reducer; 6188 if ((transformer = this.transformer) != null && 6189 (reducer = this.reducer) != null) { 6190 int r = this.basis; 6191 for (int i = baseIndex, f, h; batch > 0 && 6192 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6193 addToPendingCount(1); 6194 (rights = new MapReduceKeysToIntTask<K,V> 6195 (this, batch >>>= 1, baseLimit = h, f, tab, 6196 rights, transformer, r, reducer)).fork(); 6197 } 6198 for (Node<K,V> p; (p = advance()) != null; ) 6199 r = reducer.applyAsInt(r, transformer.applyAsInt(p.key)); 6200 result = r; 6201 CountedCompleter<?> c; 6202 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6203 @SuppressWarnings("unchecked") 6204 MapReduceKeysToIntTask<K,V> 6205 t = (MapReduceKeysToIntTask<K,V>)c, 6206 s = t.rights; 6207 while (s != null) { 6208 t.result = reducer.applyAsInt(t.result, s.result); 6209 s = t.rights = s.nextRight; 6210 } 6211 } 6212 } 6213 } 6214 } 6215 6216 @SuppressWarnings("serial") 6217 static final class MapReduceValuesToIntTask<K,V> 6218 extends BulkTask<K,V,Integer> { 6219 final ToIntFunction<? super V> transformer; 6220 final IntBinaryOperator reducer; 6221 final int basis; 6222 int result; 6223 MapReduceValuesToIntTask<K,V> rights, nextRight; 6224 MapReduceValuesToIntTask 6225 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6226 MapReduceValuesToIntTask<K,V> nextRight, 6227 ToIntFunction<? super V> transformer, 6228 int basis, 6229 IntBinaryOperator reducer) { 6230 super(p, b, i, f, t); this.nextRight = nextRight; 6231 this.transformer = transformer; 6232 this.basis = basis; this.reducer = reducer; 6233 } 6234 public final Integer getRawResult() { return result; } 6235 public final void compute() { 6236 final ToIntFunction<? super V> transformer; 6237 final IntBinaryOperator reducer; 6238 if ((transformer = this.transformer) != null && 6239 (reducer = this.reducer) != null) { 6240 int r = this.basis; 6241 for (int i = baseIndex, f, h; batch > 0 && 6242 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6243 addToPendingCount(1); 6244 (rights = new MapReduceValuesToIntTask<K,V> 6245 (this, batch >>>= 1, baseLimit = h, f, tab, 6246 rights, transformer, r, reducer)).fork(); 6247 } 6248 for (Node<K,V> p; (p = advance()) != null; ) 6249 r = reducer.applyAsInt(r, transformer.applyAsInt(p.val)); 6250 result = r; 6251 CountedCompleter<?> c; 6252 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6253 @SuppressWarnings("unchecked") 6254 MapReduceValuesToIntTask<K,V> 6255 t = (MapReduceValuesToIntTask<K,V>)c, 6256 s = t.rights; 6257 while (s != null) { 6258 t.result = reducer.applyAsInt(t.result, s.result); 6259 s = t.rights = s.nextRight; 6260 } 6261 } 6262 } 6263 } 6264 } 6265 6266 @SuppressWarnings("serial") 6267 static final class MapReduceEntriesToIntTask<K,V> 6268 extends BulkTask<K,V,Integer> { 6269 final ToIntFunction<Map.Entry<K,V>> transformer; 6270 final IntBinaryOperator reducer; 6271 final int basis; 6272 int result; 6273 MapReduceEntriesToIntTask<K,V> rights, nextRight; 6274 MapReduceEntriesToIntTask 6275 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6276 MapReduceEntriesToIntTask<K,V> nextRight, 6277 ToIntFunction<Map.Entry<K,V>> transformer, 6278 int basis, 6279 IntBinaryOperator reducer) { 6280 super(p, b, i, f, t); this.nextRight = nextRight; 6281 this.transformer = transformer; 6282 this.basis = basis; this.reducer = reducer; 6283 } 6284 public final Integer getRawResult() { return result; } 6285 public final void compute() { 6286 final ToIntFunction<Map.Entry<K,V>> transformer; 6287 final IntBinaryOperator reducer; 6288 if ((transformer = this.transformer) != null && 6289 (reducer = this.reducer) != null) { 6290 int r = this.basis; 6291 for (int i = baseIndex, f, h; batch > 0 && 6292 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6293 addToPendingCount(1); 6294 (rights = new MapReduceEntriesToIntTask<K,V> 6295 (this, batch >>>= 1, baseLimit = h, f, tab, 6296 rights, transformer, r, reducer)).fork(); 6297 } 6298 for (Node<K,V> p; (p = advance()) != null; ) 6299 r = reducer.applyAsInt(r, transformer.applyAsInt(p)); 6300 result = r; 6301 CountedCompleter<?> c; 6302 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6303 @SuppressWarnings("unchecked") 6304 MapReduceEntriesToIntTask<K,V> 6305 t = (MapReduceEntriesToIntTask<K,V>)c, 6306 s = t.rights; 6307 while (s != null) { 6308 t.result = reducer.applyAsInt(t.result, s.result); 6309 s = t.rights = s.nextRight; 6310 } 6311 } 6312 } 6313 } 6314 } 6315 6316 @SuppressWarnings("serial") 6317 static final class MapReduceMappingsToIntTask<K,V> 6318 extends BulkTask<K,V,Integer> { 6319 final ToIntBiFunction<? super K, ? super V> transformer; 6320 final IntBinaryOperator reducer; 6321 final int basis; 6322 int result; 6323 MapReduceMappingsToIntTask<K,V> rights, nextRight; 6324 MapReduceMappingsToIntTask 6325 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6326 MapReduceMappingsToIntTask<K,V> nextRight, 6327 ToIntBiFunction<? super K, ? super V> transformer, 6328 int basis, 6329 IntBinaryOperator reducer) { 6330 super(p, b, i, f, t); this.nextRight = nextRight; 6331 this.transformer = transformer; 6332 this.basis = basis; this.reducer = reducer; 6333 } 6334 public final Integer getRawResult() { return result; } 6335 public final void compute() { 6336 final ToIntBiFunction<? super K, ? super V> transformer; 6337 final IntBinaryOperator reducer; 6338 if ((transformer = this.transformer) != null && 6339 (reducer = this.reducer) != null) { 6340 int r = this.basis; 6341 for (int i = baseIndex, f, h; batch > 0 && 6342 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6343 addToPendingCount(1); 6344 (rights = new MapReduceMappingsToIntTask<K,V> 6345 (this, batch >>>= 1, baseLimit = h, f, tab, 6346 rights, transformer, r, reducer)).fork(); 6347 } 6348 for (Node<K,V> p; (p = advance()) != null; ) 6349 r = reducer.applyAsInt(r, transformer.applyAsInt(p.key, p.val)); 6350 result = r; 6351 CountedCompleter<?> c; 6352 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6353 @SuppressWarnings("unchecked") 6354 MapReduceMappingsToIntTask<K,V> 6355 t = (MapReduceMappingsToIntTask<K,V>)c, 6356 s = t.rights; 6357 while (s != null) { 6358 t.result = reducer.applyAsInt(t.result, s.result); 6359 s = t.rights = s.nextRight; 6360 } 6361 } 6362 } 6363 } 6364 } 6365 6366 // Unsafe mechanics 6367 private static final Unsafe U = Unsafe.getUnsafe(); 6368 private static final long SIZECTL; 6369 private static final long TRANSFERINDEX; 6370 private static final long BASECOUNT; 6371 private static final long CELLSBUSY; 6372 private static final long CELLVALUE; 6373 private static final int ABASE; 6374 private static final int ASHIFT; 6375 6376 static { 6377 SIZECTL = U.objectFieldOffset 6378 (ConcurrentHashMap.class, "sizeCtl"); 6379 TRANSFERINDEX = U.objectFieldOffset 6380 (ConcurrentHashMap.class, "transferIndex"); 6381 BASECOUNT = U.objectFieldOffset 6382 (ConcurrentHashMap.class, "baseCount"); 6383 CELLSBUSY = U.objectFieldOffset 6384 (ConcurrentHashMap.class, "cellsBusy"); 6385 6386 CELLVALUE = U.objectFieldOffset 6387 (CounterCell.class, "value"); 6388 6389 ABASE = U.arrayBaseOffset(Node[].class); 6390 int scale = U.arrayIndexScale(Node[].class); 6391 if ((scale & (scale - 1)) != 0) 6392 throw new Error("array index scale not a power of two"); 6393 ASHIFT = 31 - Integer.numberOfLeadingZeros(scale); 6394 6395 // Reduce the risk of rare disastrous classloading in first call to 6396 // LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773 6397 Class<?> ensureLoaded = LockSupport.class; 6398 } 6399 }