1 /* 2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. Oracle designates this 7 * particular file as subject to the "Classpath" exception as provided 8 * by Oracle in the LICENSE file that accompanied this code. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 */ 24 25 /* 26 * This file is available under and governed by the GNU General Public 27 * License version 2 only, as published by the Free Software Foundation. 28 * However, the following notice accompanied the original version of this 29 * file: 30 * 31 * Written by Doug Lea with assistance from members of JCP JSR-166 32 * Expert Group and released to the public domain, as explained at 33 * http://creativecommons.org/publicdomain/zero/1.0/ 34 */ 35 36 package java.util.concurrent; 37 38 import java.io.ObjectStreamField; 39 import java.io.Serializable; 40 import java.lang.reflect.ParameterizedType; 41 import java.lang.reflect.Type; 42 import java.util.AbstractMap; 43 import java.util.Arrays; 44 import java.util.Collection; 45 import java.util.Enumeration; 46 import java.util.HashMap; 47 import java.util.Hashtable; 48 import java.util.Iterator; 49 import java.util.Map; 50 import java.util.NoSuchElementException; 51 import java.util.Set; 52 import java.util.Spliterator; 53 import java.util.concurrent.atomic.AtomicReference; 54 import java.util.concurrent.locks.LockSupport; 55 import java.util.concurrent.locks.ReentrantLock; 56 import java.util.function.BiConsumer; 57 import java.util.function.BiFunction; 58 import java.util.function.Consumer; 59 import java.util.function.DoubleBinaryOperator; 60 import java.util.function.Function; 61 import java.util.function.IntBinaryOperator; 62 import java.util.function.LongBinaryOperator; 63 import java.util.function.Predicate; 64 import java.util.function.ToDoubleBiFunction; 65 import java.util.function.ToDoubleFunction; 66 import java.util.function.ToIntBiFunction; 67 import java.util.function.ToIntFunction; 68 import java.util.function.ToLongBiFunction; 69 import java.util.function.ToLongFunction; 70 import java.util.stream.Stream; 71 72 /** 73 * A hash table supporting full concurrency of retrievals and 74 * high expected concurrency for updates. This class obeys the 75 * same functional specification as {@link java.util.Hashtable}, and 76 * includes versions of methods corresponding to each method of 77 * {@code Hashtable}. However, even though all operations are 78 * thread-safe, retrieval operations do <em>not</em> entail locking, 79 * and there is <em>not</em> any support for locking the entire table 80 * in a way that prevents all access. This class is fully 81 * interoperable with {@code Hashtable} in programs that rely on its 82 * thread safety but not on its synchronization details. 83 * 84 * <p>Retrieval operations (including {@code get}) generally do not 85 * block, so may overlap with update operations (including {@code put} 86 * and {@code remove}). Retrievals reflect the results of the most 87 * recently <em>completed</em> update operations holding upon their 88 * onset. (More formally, an update operation for a given key bears a 89 * <em>happens-before</em> relation with any (non-null) retrieval for 90 * that key reporting the updated value.) For aggregate operations 91 * such as {@code putAll} and {@code clear}, concurrent retrievals may 92 * reflect insertion or removal of only some entries. Similarly, 93 * Iterators, Spliterators and Enumerations return elements reflecting the 94 * state of the hash table at some point at or since the creation of the 95 * iterator/enumeration. They do <em>not</em> throw {@link 96 * java.util.ConcurrentModificationException ConcurrentModificationException}. 97 * However, iterators are designed to be used by only one thread at a time. 98 * Bear in mind that the results of aggregate status methods including 99 * {@code size}, {@code isEmpty}, and {@code containsValue} are typically 100 * useful only when a map is not undergoing concurrent updates in other threads. 101 * Otherwise the results of these methods reflect transient states 102 * that may be adequate for monitoring or estimation purposes, but not 103 * for program control. 104 * 105 * <p>The table is dynamically expanded when there are too many 106 * collisions (i.e., keys that have distinct hash codes but fall into 107 * the same slot modulo the table size), with the expected average 108 * effect of maintaining roughly two bins per mapping (corresponding 109 * to a 0.75 load factor threshold for resizing). There may be much 110 * variance around this average as mappings are added and removed, but 111 * overall, this maintains a commonly accepted time/space tradeoff for 112 * hash tables. However, resizing this or any other kind of hash 113 * table may be a relatively slow operation. When possible, it is a 114 * good idea to provide a size estimate as an optional {@code 115 * initialCapacity} constructor argument. An additional optional 116 * {@code loadFactor} constructor argument provides a further means of 117 * customizing initial table capacity by specifying the table density 118 * to be used in calculating the amount of space to allocate for the 119 * given number of elements. Also, for compatibility with previous 120 * versions of this class, constructors may optionally specify an 121 * expected {@code concurrencyLevel} as an additional hint for 122 * internal sizing. Note that using many keys with exactly the same 123 * {@code hashCode()} is a sure way to slow down performance of any 124 * hash table. To ameliorate impact, when keys are {@link Comparable}, 125 * this class may use comparison order among keys to help break ties. 126 * 127 * <p>A {@link Set} projection of a ConcurrentHashMap may be created 128 * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed 129 * (using {@link #keySet(Object)} when only keys are of interest, and the 130 * mapped values are (perhaps transiently) not used or all take the 131 * same mapping value. 132 * 133 * <p>A ConcurrentHashMap can be used as a scalable frequency map (a 134 * form of histogram or multiset) by using {@link 135 * java.util.concurrent.atomic.LongAdder} values and initializing via 136 * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count 137 * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use 138 * {@code freqs.computeIfAbsent(key, k -> new LongAdder()).increment();} 139 * 140 * <p>This class and its views and iterators implement all of the 141 * <em>optional</em> methods of the {@link Map} and {@link Iterator} 142 * interfaces. 143 * 144 * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class 145 * does <em>not</em> allow {@code null} to be used as a key or value. 146 * 147 * <p>ConcurrentHashMaps support a set of sequential and parallel bulk 148 * operations that, unlike most {@link Stream} methods, are designed 149 * to be safely, and often sensibly, applied even with maps that are 150 * being concurrently updated by other threads; for example, when 151 * computing a snapshot summary of the values in a shared registry. 152 * There are three kinds of operation, each with four forms, accepting 153 * functions with keys, values, entries, and (key, value) pairs as 154 * arguments and/or return values. Because the elements of a 155 * ConcurrentHashMap are not ordered in any particular way, and may be 156 * processed in different orders in different parallel executions, the 157 * correctness of supplied functions should not depend on any 158 * ordering, or on any other objects or values that may transiently 159 * change while computation is in progress; and except for forEach 160 * actions, should ideally be side-effect-free. Bulk operations on 161 * {@link java.util.Map.Entry} objects do not support method {@code 162 * setValue}. 163 * 164 * <ul> 165 * <li>forEach: Performs a given action on each element. 166 * A variant form applies a given transformation on each element 167 * before performing the action. 168 * 169 * <li>search: Returns the first available non-null result of 170 * applying a given function on each element; skipping further 171 * search when a result is found. 172 * 173 * <li>reduce: Accumulates each element. The supplied reduction 174 * function cannot rely on ordering (more formally, it should be 175 * both associative and commutative). There are five variants: 176 * 177 * <ul> 178 * 179 * <li>Plain reductions. (There is not a form of this method for 180 * (key, value) function arguments since there is no corresponding 181 * return type.) 182 * 183 * <li>Mapped reductions that accumulate the results of a given 184 * function applied to each element. 185 * 186 * <li>Reductions to scalar doubles, longs, and ints, using a 187 * given basis value. 188 * 189 * </ul> 190 * </ul> 191 * 192 * <p>These bulk operations accept a {@code parallelismThreshold} 193 * argument. Methods proceed sequentially if the current map size is 194 * estimated to be less than the given threshold. Using a value of 195 * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value 196 * of {@code 1} results in maximal parallelism by partitioning into 197 * enough subtasks to fully utilize the {@link 198 * ForkJoinPool#commonPool()} that is used for all parallel 199 * computations. Normally, you would initially choose one of these 200 * extreme values, and then measure performance of using in-between 201 * values that trade off overhead versus throughput. 202 * 203 * <p>The concurrency properties of bulk operations follow 204 * from those of ConcurrentHashMap: Any non-null result returned 205 * from {@code get(key)} and related access methods bears a 206 * happens-before relation with the associated insertion or 207 * update. The result of any bulk operation reflects the 208 * composition of these per-element relations (but is not 209 * necessarily atomic with respect to the map as a whole unless it 210 * is somehow known to be quiescent). Conversely, because keys 211 * and values in the map are never null, null serves as a reliable 212 * atomic indicator of the current lack of any result. To 213 * maintain this property, null serves as an implicit basis for 214 * all non-scalar reduction operations. For the double, long, and 215 * int versions, the basis should be one that, when combined with 216 * any other value, returns that other value (more formally, it 217 * should be the identity element for the reduction). Most common 218 * reductions have these properties; for example, computing a sum 219 * with basis 0 or a minimum with basis MAX_VALUE. 220 * 221 * <p>Search and transformation functions provided as arguments 222 * should similarly return null to indicate the lack of any result 223 * (in which case it is not used). In the case of mapped 224 * reductions, this also enables transformations to serve as 225 * filters, returning null (or, in the case of primitive 226 * specializations, the identity basis) if the element should not 227 * be combined. You can create compound transformations and 228 * filterings by composing them yourself under this "null means 229 * there is nothing there now" rule before using them in search or 230 * reduce operations. 231 * 232 * <p>Methods accepting and/or returning Entry arguments maintain 233 * key-value associations. They may be useful for example when 234 * finding the key for the greatest value. Note that "plain" Entry 235 * arguments can be supplied using {@code new 236 * AbstractMap.SimpleEntry(k,v)}. 237 * 238 * <p>Bulk operations may complete abruptly, throwing an 239 * exception encountered in the application of a supplied 240 * function. Bear in mind when handling such exceptions that other 241 * concurrently executing functions could also have thrown 242 * exceptions, or would have done so if the first exception had 243 * not occurred. 244 * 245 * <p>Speedups for parallel compared to sequential forms are common 246 * but not guaranteed. Parallel operations involving brief functions 247 * on small maps may execute more slowly than sequential forms if the 248 * underlying work to parallelize the computation is more expensive 249 * than the computation itself. Similarly, parallelization may not 250 * lead to much actual parallelism if all processors are busy 251 * performing unrelated tasks. 252 * 253 * <p>All arguments to all task methods must be non-null. 254 * 255 * <p>This class is a member of the 256 * <a href="{@docRoot}/../technotes/guides/collections/index.html"> 257 * Java Collections Framework</a>. 258 * 259 * @since 1.5 260 * @author Doug Lea 261 * @param <K> the type of keys maintained by this map 262 * @param <V> the type of mapped values 263 */ 264 public class ConcurrentHashMap<K,V> extends AbstractMap<K,V> 265 implements ConcurrentMap<K,V>, Serializable { 266 private static final long serialVersionUID = 7249069246763182397L; 267 268 /* 269 * Overview: 270 * 271 * The primary design goal of this hash table is to maintain 272 * concurrent readability (typically method get(), but also 273 * iterators and related methods) while minimizing update 274 * contention. Secondary goals are to keep space consumption about 275 * the same or better than java.util.HashMap, and to support high 276 * initial insertion rates on an empty table by many threads. 277 * 278 * This map usually acts as a binned (bucketed) hash table. Each 279 * key-value mapping is held in a Node. Most nodes are instances 280 * of the basic Node class with hash, key, value, and next 281 * fields. However, various subclasses exist: TreeNodes are 282 * arranged in balanced trees, not lists. TreeBins hold the roots 283 * of sets of TreeNodes. ForwardingNodes are placed at the heads 284 * of bins during resizing. ReservationNodes are used as 285 * placeholders while establishing values in computeIfAbsent and 286 * related methods. The types TreeBin, ForwardingNode, and 287 * ReservationNode do not hold normal user keys, values, or 288 * hashes, and are readily distinguishable during search etc 289 * because they have negative hash fields and null key and value 290 * fields. (These special nodes are either uncommon or transient, 291 * so the impact of carrying around some unused fields is 292 * insignificant.) 293 * 294 * The table is lazily initialized to a power-of-two size upon the 295 * first insertion. Each bin in the table normally contains a 296 * list of Nodes (most often, the list has only zero or one Node). 297 * Table accesses require volatile/atomic reads, writes, and 298 * CASes. Because there is no other way to arrange this without 299 * adding further indirections, we use intrinsics 300 * (sun.misc.Unsafe) operations. 301 * 302 * We use the top (sign) bit of Node hash fields for control 303 * purposes -- it is available anyway because of addressing 304 * constraints. Nodes with negative hash fields are specially 305 * handled or ignored in map methods. 306 * 307 * Insertion (via put or its variants) of the first node in an 308 * empty bin is performed by just CASing it to the bin. This is 309 * by far the most common case for put operations under most 310 * key/hash distributions. Other update operations (insert, 311 * delete, and replace) require locks. We do not want to waste 312 * the space required to associate a distinct lock object with 313 * each bin, so instead use the first node of a bin list itself as 314 * a lock. Locking support for these locks relies on builtin 315 * "synchronized" monitors. 316 * 317 * Using the first node of a list as a lock does not by itself 318 * suffice though: When a node is locked, any update must first 319 * validate that it is still the first node after locking it, and 320 * retry if not. Because new nodes are always appended to lists, 321 * once a node is first in a bin, it remains first until deleted 322 * or the bin becomes invalidated (upon resizing). 323 * 324 * The main disadvantage of per-bin locks is that other update 325 * operations on other nodes in a bin list protected by the same 326 * lock can stall, for example when user equals() or mapping 327 * functions take a long time. However, statistically, under 328 * random hash codes, this is not a common problem. Ideally, the 329 * frequency of nodes in bins follows a Poisson distribution 330 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a 331 * parameter of about 0.5 on average, given the resizing threshold 332 * of 0.75, although with a large variance because of resizing 333 * granularity. Ignoring variance, the expected occurrences of 334 * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The 335 * first values are: 336 * 337 * 0: 0.60653066 338 * 1: 0.30326533 339 * 2: 0.07581633 340 * 3: 0.01263606 341 * 4: 0.00157952 342 * 5: 0.00015795 343 * 6: 0.00001316 344 * 7: 0.00000094 345 * 8: 0.00000006 346 * more: less than 1 in ten million 347 * 348 * Lock contention probability for two threads accessing distinct 349 * elements is roughly 1 / (8 * #elements) under random hashes. 350 * 351 * Actual hash code distributions encountered in practice 352 * sometimes deviate significantly from uniform randomness. This 353 * includes the case when N > (1<<30), so some keys MUST collide. 354 * Similarly for dumb or hostile usages in which multiple keys are 355 * designed to have identical hash codes or ones that differs only 356 * in masked-out high bits. So we use a secondary strategy that 357 * applies when the number of nodes in a bin exceeds a 358 * threshold. These TreeBins use a balanced tree to hold nodes (a 359 * specialized form of red-black trees), bounding search time to 360 * O(log N). Each search step in a TreeBin is at least twice as 361 * slow as in a regular list, but given that N cannot exceed 362 * (1<<64) (before running out of addresses) this bounds search 363 * steps, lock hold times, etc, to reasonable constants (roughly 364 * 100 nodes inspected per operation worst case) so long as keys 365 * are Comparable (which is very common -- String, Long, etc). 366 * TreeBin nodes (TreeNodes) also maintain the same "next" 367 * traversal pointers as regular nodes, so can be traversed in 368 * iterators in the same way. 369 * 370 * The table is resized when occupancy exceeds a percentage 371 * threshold (nominally, 0.75, but see below). Any thread 372 * noticing an overfull bin may assist in resizing after the 373 * initiating thread allocates and sets up the replacement array. 374 * However, rather than stalling, these other threads may proceed 375 * with insertions etc. The use of TreeBins shields us from the 376 * worst case effects of overfilling while resizes are in 377 * progress. Resizing proceeds by transferring bins, one by one, 378 * from the table to the next table. However, threads claim small 379 * blocks of indices to transfer (via field transferIndex) before 380 * doing so, reducing contention. A generation stamp in field 381 * sizeCtl ensures that resizings do not overlap. Because we are 382 * using power-of-two expansion, the elements from each bin must 383 * either stay at same index, or move with a power of two 384 * offset. We eliminate unnecessary node creation by catching 385 * cases where old nodes can be reused because their next fields 386 * won't change. On average, only about one-sixth of them need 387 * cloning when a table doubles. The nodes they replace will be 388 * garbage collectable as soon as they are no longer referenced by 389 * any reader thread that may be in the midst of concurrently 390 * traversing table. Upon transfer, the old table bin contains 391 * only a special forwarding node (with hash field "MOVED") that 392 * contains the next table as its key. On encountering a 393 * forwarding node, access and update operations restart, using 394 * the new table. 395 * 396 * Each bin transfer requires its bin lock, which can stall 397 * waiting for locks while resizing. However, because other 398 * threads can join in and help resize rather than contend for 399 * locks, average aggregate waits become shorter as resizing 400 * progresses. The transfer operation must also ensure that all 401 * accessible bins in both the old and new table are usable by any 402 * traversal. This is arranged in part by proceeding from the 403 * last bin (table.length - 1) up towards the first. Upon seeing 404 * a forwarding node, traversals (see class Traverser) arrange to 405 * move to the new table without revisiting nodes. To ensure that 406 * no intervening nodes are skipped even when moved out of order, 407 * a stack (see class TableStack) is created on first encounter of 408 * a forwarding node during a traversal, to maintain its place if 409 * later processing the current table. The need for these 410 * save/restore mechanics is relatively rare, but when one 411 * forwarding node is encountered, typically many more will be. 412 * So Traversers use a simple caching scheme to avoid creating so 413 * many new TableStack nodes. (Thanks to Peter Levart for 414 * suggesting use of a stack here.) 415 * 416 * The traversal scheme also applies to partial traversals of 417 * ranges of bins (via an alternate Traverser constructor) 418 * to support partitioned aggregate operations. Also, read-only 419 * operations give up if ever forwarded to a null table, which 420 * provides support for shutdown-style clearing, which is also not 421 * currently implemented. 422 * 423 * Lazy table initialization minimizes footprint until first use, 424 * and also avoids resizings when the first operation is from a 425 * putAll, constructor with map argument, or deserialization. 426 * These cases attempt to override the initial capacity settings, 427 * but harmlessly fail to take effect in cases of races. 428 * 429 * The element count is maintained using a specialization of 430 * LongAdder. We need to incorporate a specialization rather than 431 * just use a LongAdder in order to access implicit 432 * contention-sensing that leads to creation of multiple 433 * CounterCells. The counter mechanics avoid contention on 434 * updates but can encounter cache thrashing if read too 435 * frequently during concurrent access. To avoid reading so often, 436 * resizing under contention is attempted only upon adding to a 437 * bin already holding two or more nodes. Under uniform hash 438 * distributions, the probability of this occurring at threshold 439 * is around 13%, meaning that only about 1 in 8 puts check 440 * threshold (and after resizing, many fewer do so). 441 * 442 * TreeBins use a special form of comparison for search and 443 * related operations (which is the main reason we cannot use 444 * existing collections such as TreeMaps). TreeBins contain 445 * Comparable elements, but may contain others, as well as 446 * elements that are Comparable but not necessarily Comparable for 447 * the same T, so we cannot invoke compareTo among them. To handle 448 * this, the tree is ordered primarily by hash value, then by 449 * Comparable.compareTo order if applicable. On lookup at a node, 450 * if elements are not comparable or compare as 0 then both left 451 * and right children may need to be searched in the case of tied 452 * hash values. (This corresponds to the full list search that 453 * would be necessary if all elements were non-Comparable and had 454 * tied hashes.) On insertion, to keep a total ordering (or as 455 * close as is required here) across rebalancings, we compare 456 * classes and identityHashCodes as tie-breakers. The red-black 457 * balancing code is updated from pre-jdk-collections 458 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) 459 * based in turn on Cormen, Leiserson, and Rivest "Introduction to 460 * Algorithms" (CLR). 461 * 462 * TreeBins also require an additional locking mechanism. While 463 * list traversal is always possible by readers even during 464 * updates, tree traversal is not, mainly because of tree-rotations 465 * that may change the root node and/or its linkages. TreeBins 466 * include a simple read-write lock mechanism parasitic on the 467 * main bin-synchronization strategy: Structural adjustments 468 * associated with an insertion or removal are already bin-locked 469 * (and so cannot conflict with other writers) but must wait for 470 * ongoing readers to finish. Since there can be only one such 471 * waiter, we use a simple scheme using a single "waiter" field to 472 * block writers. However, readers need never block. If the root 473 * lock is held, they proceed along the slow traversal path (via 474 * next-pointers) until the lock becomes available or the list is 475 * exhausted, whichever comes first. These cases are not fast, but 476 * maximize aggregate expected throughput. 477 * 478 * Maintaining API and serialization compatibility with previous 479 * versions of this class introduces several oddities. Mainly: We 480 * leave untouched but unused constructor arguments referring to 481 * concurrencyLevel. We accept a loadFactor constructor argument, 482 * but apply it only to initial table capacity (which is the only 483 * time that we can guarantee to honor it.) We also declare an 484 * unused "Segment" class that is instantiated in minimal form 485 * only when serializing. 486 * 487 * Also, solely for compatibility with previous versions of this 488 * class, it extends AbstractMap, even though all of its methods 489 * are overridden, so it is just useless baggage. 490 * 491 * This file is organized to make things a little easier to follow 492 * while reading than they might otherwise: First the main static 493 * declarations and utilities, then fields, then main public 494 * methods (with a few factorings of multiple public methods into 495 * internal ones), then sizing methods, trees, traversers, and 496 * bulk operations. 497 */ 498 499 /* ---------------- Constants -------------- */ 500 501 /** 502 * The largest possible table capacity. This value must be 503 * exactly 1<<30 to stay within Java array allocation and indexing 504 * bounds for power of two table sizes, and is further required 505 * because the top two bits of 32bit hash fields are used for 506 * control purposes. 507 */ 508 private static final int MAXIMUM_CAPACITY = 1 << 30; 509 510 /** 511 * The default initial table capacity. Must be a power of 2 512 * (i.e., at least 1) and at most MAXIMUM_CAPACITY. 513 */ 514 private static final int DEFAULT_CAPACITY = 16; 515 516 /** 517 * The largest possible (non-power of two) array size. 518 * Needed by toArray and related methods. 519 */ 520 static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; 521 522 /** 523 * The default concurrency level for this table. Unused but 524 * defined for compatibility with previous versions of this class. 525 */ 526 private static final int DEFAULT_CONCURRENCY_LEVEL = 16; 527 528 /** 529 * The load factor for this table. Overrides of this value in 530 * constructors affect only the initial table capacity. The 531 * actual floating point value isn't normally used -- it is 532 * simpler to use expressions such as {@code n - (n >>> 2)} for 533 * the associated resizing threshold. 534 */ 535 private static final float LOAD_FACTOR = 0.75f; 536 537 /** 538 * The bin count threshold for using a tree rather than list for a 539 * bin. Bins are converted to trees when adding an element to a 540 * bin with at least this many nodes. The value must be greater 541 * than 2, and should be at least 8 to mesh with assumptions in 542 * tree removal about conversion back to plain bins upon 543 * shrinkage. 544 */ 545 static final int TREEIFY_THRESHOLD = 8; 546 547 /** 548 * The bin count threshold for untreeifying a (split) bin during a 549 * resize operation. Should be less than TREEIFY_THRESHOLD, and at 550 * most 6 to mesh with shrinkage detection under removal. 551 */ 552 static final int UNTREEIFY_THRESHOLD = 6; 553 554 /** 555 * The smallest table capacity for which bins may be treeified. 556 * (Otherwise the table is resized if too many nodes in a bin.) 557 * The value should be at least 4 * TREEIFY_THRESHOLD to avoid 558 * conflicts between resizing and treeification thresholds. 559 */ 560 static final int MIN_TREEIFY_CAPACITY = 64; 561 562 /** 563 * Minimum number of rebinnings per transfer step. Ranges are 564 * subdivided to allow multiple resizer threads. This value 565 * serves as a lower bound to avoid resizers encountering 566 * excessive memory contention. The value should be at least 567 * DEFAULT_CAPACITY. 568 */ 569 private static final int MIN_TRANSFER_STRIDE = 16; 570 571 /** 572 * The number of bits used for generation stamp in sizeCtl. 573 * Must be at least 6 for 32bit arrays. 574 */ 575 private static final int RESIZE_STAMP_BITS = 16; 576 577 /** 578 * The maximum number of threads that can help resize. 579 * Must fit in 32 - RESIZE_STAMP_BITS bits. 580 */ 581 private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1; 582 583 /** 584 * The bit shift for recording size stamp in sizeCtl. 585 */ 586 private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS; 587 588 /* 589 * Encodings for Node hash fields. See above for explanation. 590 */ 591 static final int MOVED = -1; // hash for forwarding nodes 592 static final int TREEBIN = -2; // hash for roots of trees 593 static final int RESERVED = -3; // hash for transient reservations 594 static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash 595 596 /** Number of CPUS, to place bounds on some sizings */ 597 static final int NCPU = Runtime.getRuntime().availableProcessors(); 598 599 /** For serialization compatibility. */ 600 private static final ObjectStreamField[] serialPersistentFields = { 601 new ObjectStreamField("segments", Segment[].class), 602 new ObjectStreamField("segmentMask", Integer.TYPE), 603 new ObjectStreamField("segmentShift", Integer.TYPE), 604 }; 605 606 /* ---------------- Nodes -------------- */ 607 608 /** 609 * Key-value entry. This class is never exported out as a 610 * user-mutable Map.Entry (i.e., one supporting setValue; see 611 * MapEntry below), but can be used for read-only traversals used 612 * in bulk tasks. Subclasses of Node with a negative hash field 613 * are special, and contain null keys and values (but are never 614 * exported). Otherwise, keys and vals are never null. 615 */ 616 static class Node<K,V> implements Map.Entry<K,V> { 617 final int hash; 618 final K key; 619 volatile V val; 620 volatile Node<K,V> next; 621 622 Node(int hash, K key, V val, Node<K,V> next) { 623 this.hash = hash; 624 this.key = key; 625 this.val = val; 626 this.next = next; 627 } 628 629 public final K getKey() { return key; } 630 public final V getValue() { return val; } 631 public final int hashCode() { return key.hashCode() ^ val.hashCode(); } 632 public final String toString() { 633 return Helpers.mapEntryToString(key, val); 634 } 635 public final V setValue(V value) { 636 throw new UnsupportedOperationException(); 637 } 638 639 public final boolean equals(Object o) { 640 Object k, v, u; Map.Entry<?,?> e; 641 return ((o instanceof Map.Entry) && 642 (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 643 (v = e.getValue()) != null && 644 (k == key || k.equals(key)) && 645 (v == (u = val) || v.equals(u))); 646 } 647 648 /** 649 * Virtualized support for map.get(); overridden in subclasses. 650 */ 651 Node<K,V> find(int h, Object k) { 652 Node<K,V> e = this; 653 if (k != null) { 654 do { 655 K ek; 656 if (e.hash == h && 657 ((ek = e.key) == k || (ek != null && k.equals(ek)))) 658 return e; 659 } while ((e = e.next) != null); 660 } 661 return null; 662 } 663 } 664 665 /* ---------------- Static utilities -------------- */ 666 667 /** 668 * Spreads (XORs) higher bits of hash to lower and also forces top 669 * bit to 0. Because the table uses power-of-two masking, sets of 670 * hashes that vary only in bits above the current mask will 671 * always collide. (Among known examples are sets of Float keys 672 * holding consecutive whole numbers in small tables.) So we 673 * apply a transform that spreads the impact of higher bits 674 * downward. There is a tradeoff between speed, utility, and 675 * quality of bit-spreading. Because many common sets of hashes 676 * are already reasonably distributed (so don't benefit from 677 * spreading), and because we use trees to handle large sets of 678 * collisions in bins, we just XOR some shifted bits in the 679 * cheapest possible way to reduce systematic lossage, as well as 680 * to incorporate impact of the highest bits that would otherwise 681 * never be used in index calculations because of table bounds. 682 */ 683 static final int spread(int h) { 684 return (h ^ (h >>> 16)) & HASH_BITS; 685 } 686 687 /** 688 * Returns a power of two table size for the given desired capacity. 689 * See Hackers Delight, sec 3.2 690 */ 691 private static final int tableSizeFor(int c) { 692 int n = c - 1; 693 n |= n >>> 1; 694 n |= n >>> 2; 695 n |= n >>> 4; 696 n |= n >>> 8; 697 n |= n >>> 16; 698 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; 699 } 700 701 /** 702 * Returns x's Class if it is of the form "class C implements 703 * Comparable<C>", else null. 704 */ 705 static Class<?> comparableClassFor(Object x) { 706 if (x instanceof Comparable) { 707 Class<?> c; Type[] ts, as; Type t; ParameterizedType p; 708 if ((c = x.getClass()) == String.class) // bypass checks 709 return c; 710 if ((ts = c.getGenericInterfaces()) != null) { 711 for (int i = 0; i < ts.length; ++i) { 712 if (((t = ts[i]) instanceof ParameterizedType) && 713 ((p = (ParameterizedType)t).getRawType() == 714 Comparable.class) && 715 (as = p.getActualTypeArguments()) != null && 716 as.length == 1 && as[0] == c) // type arg is c 717 return c; 718 } 719 } 720 } 721 return null; 722 } 723 724 /** 725 * Returns k.compareTo(x) if x matches kc (k's screened comparable 726 * class), else 0. 727 */ 728 @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable 729 static int compareComparables(Class<?> kc, Object k, Object x) { 730 return (x == null || x.getClass() != kc ? 0 : 731 ((Comparable)k).compareTo(x)); 732 } 733 734 /* ---------------- Table element access -------------- */ 735 736 /* 737 * Volatile access methods are used for table elements as well as 738 * elements of in-progress next table while resizing. All uses of 739 * the tab arguments must be null checked by callers. All callers 740 * also paranoically precheck that tab's length is not zero (or an 741 * equivalent check), thus ensuring that any index argument taking 742 * the form of a hash value anded with (length - 1) is a valid 743 * index. Note that, to be correct wrt arbitrary concurrency 744 * errors by users, these checks must operate on local variables, 745 * which accounts for some odd-looking inline assignments below. 746 * Note that calls to setTabAt always occur within locked regions, 747 * and so in principle require only release ordering, not 748 * full volatile semantics, but are currently coded as volatile 749 * writes to be conservative. 750 */ 751 752 @SuppressWarnings("unchecked") 753 static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) { 754 return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE); 755 } 756 757 static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i, 758 Node<K,V> c, Node<K,V> v) { 759 return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v); 760 } 761 762 static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) { 763 U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v); 764 } 765 766 /* ---------------- Fields -------------- */ 767 768 /** 769 * The array of bins. Lazily initialized upon first insertion. 770 * Size is always a power of two. Accessed directly by iterators. 771 */ 772 transient volatile Node<K,V>[] table; 773 774 /** 775 * The next table to use; non-null only while resizing. 776 */ 777 private transient volatile Node<K,V>[] nextTable; 778 779 /** 780 * Base counter value, used mainly when there is no contention, 781 * but also as a fallback during table initialization 782 * races. Updated via CAS. 783 */ 784 private transient volatile long baseCount; 785 786 /** 787 * Table initialization and resizing control. When negative, the 788 * table is being initialized or resized: -1 for initialization, 789 * else -(1 + the number of active resizing threads). Otherwise, 790 * when table is null, holds the initial table size to use upon 791 * creation, or 0 for default. After initialization, holds the 792 * next element count value upon which to resize the table. 793 */ 794 private transient volatile int sizeCtl; 795 796 /** 797 * The next table index (plus one) to split while resizing. 798 */ 799 private transient volatile int transferIndex; 800 801 /** 802 * Spinlock (locked via CAS) used when resizing and/or creating CounterCells. 803 */ 804 private transient volatile int cellsBusy; 805 806 /** 807 * Table of counter cells. When non-null, size is a power of 2. 808 */ 809 private transient volatile CounterCell[] counterCells; 810 811 // views 812 private transient KeySetView<K,V> keySet; 813 private transient ValuesView<K,V> values; 814 private transient EntrySetView<K,V> entrySet; 815 816 817 /* ---------------- Public operations -------------- */ 818 819 /** 820 * Creates a new, empty map with the default initial table size (16). 821 */ 822 public ConcurrentHashMap() { 823 } 824 825 /** 826 * Creates a new, empty map with an initial table size 827 * accommodating the specified number of elements without the need 828 * to dynamically resize. 829 * 830 * @param initialCapacity The implementation performs internal 831 * sizing to accommodate this many elements. 832 * @throws IllegalArgumentException if the initial capacity of 833 * elements is negative 834 */ 835 public ConcurrentHashMap(int initialCapacity) { 836 if (initialCapacity < 0) 837 throw new IllegalArgumentException(); 838 int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? 839 MAXIMUM_CAPACITY : 840 tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); 841 this.sizeCtl = cap; 842 } 843 844 /** 845 * Creates a new map with the same mappings as the given map. 846 * 847 * @param m the map 848 */ 849 public ConcurrentHashMap(Map<? extends K, ? extends V> m) { 850 this.sizeCtl = DEFAULT_CAPACITY; 851 putAll(m); 852 } 853 854 /** 855 * Creates a new, empty map with an initial table size based on 856 * the given number of elements ({@code initialCapacity}) and 857 * initial table density ({@code loadFactor}). 858 * 859 * @param initialCapacity the initial capacity. The implementation 860 * performs internal sizing to accommodate this many elements, 861 * given the specified load factor. 862 * @param loadFactor the load factor (table density) for 863 * establishing the initial table size 864 * @throws IllegalArgumentException if the initial capacity of 865 * elements is negative or the load factor is nonpositive 866 * 867 * @since 1.6 868 */ 869 public ConcurrentHashMap(int initialCapacity, float loadFactor) { 870 this(initialCapacity, loadFactor, 1); 871 } 872 873 /** 874 * Creates a new, empty map with an initial table size based on 875 * the given number of elements ({@code initialCapacity}), table 876 * density ({@code loadFactor}), and number of concurrently 877 * updating threads ({@code concurrencyLevel}). 878 * 879 * @param initialCapacity the initial capacity. The implementation 880 * performs internal sizing to accommodate this many elements, 881 * given the specified load factor. 882 * @param loadFactor the load factor (table density) for 883 * establishing the initial table size 884 * @param concurrencyLevel the estimated number of concurrently 885 * updating threads. The implementation may use this value as 886 * a sizing hint. 887 * @throws IllegalArgumentException if the initial capacity is 888 * negative or the load factor or concurrencyLevel are 889 * nonpositive 890 */ 891 public ConcurrentHashMap(int initialCapacity, 892 float loadFactor, int concurrencyLevel) { 893 if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) 894 throw new IllegalArgumentException(); 895 if (initialCapacity < concurrencyLevel) // Use at least as many bins 896 initialCapacity = concurrencyLevel; // as estimated threads 897 long size = (long)(1.0 + (long)initialCapacity / loadFactor); 898 int cap = (size >= (long)MAXIMUM_CAPACITY) ? 899 MAXIMUM_CAPACITY : tableSizeFor((int)size); 900 this.sizeCtl = cap; 901 } 902 903 // Original (since JDK1.2) Map methods 904 905 /** 906 * {@inheritDoc} 907 */ 908 public int size() { 909 long n = sumCount(); 910 return ((n < 0L) ? 0 : 911 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : 912 (int)n); 913 } 914 915 /** 916 * {@inheritDoc} 917 */ 918 public boolean isEmpty() { 919 return sumCount() <= 0L; // ignore transient negative values 920 } 921 922 /** 923 * Returns the value to which the specified key is mapped, 924 * or {@code null} if this map contains no mapping for the key. 925 * 926 * <p>More formally, if this map contains a mapping from a key 927 * {@code k} to a value {@code v} such that {@code key.equals(k)}, 928 * then this method returns {@code v}; otherwise it returns 929 * {@code null}. (There can be at most one such mapping.) 930 * 931 * @throws NullPointerException if the specified key is null 932 */ 933 public V get(Object key) { 934 Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek; 935 int h = spread(key.hashCode()); 936 if ((tab = table) != null && (n = tab.length) > 0 && 937 (e = tabAt(tab, (n - 1) & h)) != null) { 938 if ((eh = e.hash) == h) { 939 if ((ek = e.key) == key || (ek != null && key.equals(ek))) 940 return e.val; 941 } 942 else if (eh < 0) 943 return (p = e.find(h, key)) != null ? p.val : null; 944 while ((e = e.next) != null) { 945 if (e.hash == h && 946 ((ek = e.key) == key || (ek != null && key.equals(ek)))) 947 return e.val; 948 } 949 } 950 return null; 951 } 952 953 /** 954 * Tests if the specified object is a key in this table. 955 * 956 * @param key possible key 957 * @return {@code true} if and only if the specified object 958 * is a key in this table, as determined by the 959 * {@code equals} method; {@code false} otherwise 960 * @throws NullPointerException if the specified key is null 961 */ 962 public boolean containsKey(Object key) { 963 return get(key) != null; 964 } 965 966 /** 967 * Returns {@code true} if this map maps one or more keys to the 968 * specified value. Note: This method may require a full traversal 969 * of the map, and is much slower than method {@code containsKey}. 970 * 971 * @param value value whose presence in this map is to be tested 972 * @return {@code true} if this map maps one or more keys to the 973 * specified value 974 * @throws NullPointerException if the specified value is null 975 */ 976 public boolean containsValue(Object value) { 977 if (value == null) 978 throw new NullPointerException(); 979 Node<K,V>[] t; 980 if ((t = table) != null) { 981 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 982 for (Node<K,V> p; (p = it.advance()) != null; ) { 983 V v; 984 if ((v = p.val) == value || (v != null && value.equals(v))) 985 return true; 986 } 987 } 988 return false; 989 } 990 991 /** 992 * Maps the specified key to the specified value in this table. 993 * Neither the key nor the value can be null. 994 * 995 * <p>The value can be retrieved by calling the {@code get} method 996 * with a key that is equal to the original key. 997 * 998 * @param key key with which the specified value is to be associated 999 * @param value value to be associated with the specified key 1000 * @return the previous value associated with {@code key}, or 1001 * {@code null} if there was no mapping for {@code key} 1002 * @throws NullPointerException if the specified key or value is null 1003 */ 1004 public V put(K key, V value) { 1005 return putVal(key, value, false); 1006 } 1007 1008 /** Implementation for put and putIfAbsent */ 1009 final V putVal(K key, V value, boolean onlyIfAbsent) { 1010 if (key == null || value == null) throw new NullPointerException(); 1011 int hash = spread(key.hashCode()); 1012 int binCount = 0; 1013 for (Node<K,V>[] tab = table;;) { 1014 Node<K,V> f; int n, i, fh; 1015 if (tab == null || (n = tab.length) == 0) 1016 tab = initTable(); 1017 else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) { 1018 if (casTabAt(tab, i, null, 1019 new Node<K,V>(hash, key, value, null))) 1020 break; // no lock when adding to empty bin 1021 } 1022 else if ((fh = f.hash) == MOVED) 1023 tab = helpTransfer(tab, f); 1024 else { 1025 V oldVal = null; 1026 synchronized (f) { 1027 if (tabAt(tab, i) == f) { 1028 if (fh >= 0) { 1029 binCount = 1; 1030 for (Node<K,V> e = f;; ++binCount) { 1031 K ek; 1032 if (e.hash == hash && 1033 ((ek = e.key) == key || 1034 (ek != null && key.equals(ek)))) { 1035 oldVal = e.val; 1036 if (!onlyIfAbsent) 1037 e.val = value; 1038 break; 1039 } 1040 Node<K,V> pred = e; 1041 if ((e = e.next) == null) { 1042 pred.next = new Node<K,V>(hash, key, 1043 value, null); 1044 break; 1045 } 1046 } 1047 } 1048 else if (f instanceof TreeBin) { 1049 Node<K,V> p; 1050 binCount = 2; 1051 if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key, 1052 value)) != null) { 1053 oldVal = p.val; 1054 if (!onlyIfAbsent) 1055 p.val = value; 1056 } 1057 } 1058 else if (f instanceof ReservationNode) 1059 throw new IllegalStateException("Recursive update"); 1060 } 1061 } 1062 if (binCount != 0) { 1063 if (binCount >= TREEIFY_THRESHOLD) 1064 treeifyBin(tab, i); 1065 if (oldVal != null) 1066 return oldVal; 1067 break; 1068 } 1069 } 1070 } 1071 addCount(1L, binCount); 1072 return null; 1073 } 1074 1075 /** 1076 * Copies all of the mappings from the specified map to this one. 1077 * These mappings replace any mappings that this map had for any of the 1078 * keys currently in the specified map. 1079 * 1080 * @param m mappings to be stored in this map 1081 */ 1082 public void putAll(Map<? extends K, ? extends V> m) { 1083 tryPresize(m.size()); 1084 for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) 1085 putVal(e.getKey(), e.getValue(), false); 1086 } 1087 1088 /** 1089 * Removes the key (and its corresponding value) from this map. 1090 * This method does nothing if the key is not in the map. 1091 * 1092 * @param key the key that needs to be removed 1093 * @return the previous value associated with {@code key}, or 1094 * {@code null} if there was no mapping for {@code key} 1095 * @throws NullPointerException if the specified key is null 1096 */ 1097 public V remove(Object key) { 1098 return replaceNode(key, null, null); 1099 } 1100 1101 /** 1102 * Implementation for the four public remove/replace methods: 1103 * Replaces node value with v, conditional upon match of cv if 1104 * non-null. If resulting value is null, delete. 1105 */ 1106 final V replaceNode(Object key, V value, Object cv) { 1107 int hash = spread(key.hashCode()); 1108 for (Node<K,V>[] tab = table;;) { 1109 Node<K,V> f; int n, i, fh; 1110 if (tab == null || (n = tab.length) == 0 || 1111 (f = tabAt(tab, i = (n - 1) & hash)) == null) 1112 break; 1113 else if ((fh = f.hash) == MOVED) 1114 tab = helpTransfer(tab, f); 1115 else { 1116 V oldVal = null; 1117 boolean validated = false; 1118 synchronized (f) { 1119 if (tabAt(tab, i) == f) { 1120 if (fh >= 0) { 1121 validated = true; 1122 for (Node<K,V> e = f, pred = null;;) { 1123 K ek; 1124 if (e.hash == hash && 1125 ((ek = e.key) == key || 1126 (ek != null && key.equals(ek)))) { 1127 V ev = e.val; 1128 if (cv == null || cv == ev || 1129 (ev != null && cv.equals(ev))) { 1130 oldVal = ev; 1131 if (value != null) 1132 e.val = value; 1133 else if (pred != null) 1134 pred.next = e.next; 1135 else 1136 setTabAt(tab, i, e.next); 1137 } 1138 break; 1139 } 1140 pred = e; 1141 if ((e = e.next) == null) 1142 break; 1143 } 1144 } 1145 else if (f instanceof TreeBin) { 1146 validated = true; 1147 TreeBin<K,V> t = (TreeBin<K,V>)f; 1148 TreeNode<K,V> r, p; 1149 if ((r = t.root) != null && 1150 (p = r.findTreeNode(hash, key, null)) != null) { 1151 V pv = p.val; 1152 if (cv == null || cv == pv || 1153 (pv != null && cv.equals(pv))) { 1154 oldVal = pv; 1155 if (value != null) 1156 p.val = value; 1157 else if (t.removeTreeNode(p)) 1158 setTabAt(tab, i, untreeify(t.first)); 1159 } 1160 } 1161 } 1162 else if (f instanceof ReservationNode) 1163 throw new IllegalStateException("Recursive update"); 1164 } 1165 } 1166 if (validated) { 1167 if (oldVal != null) { 1168 if (value == null) 1169 addCount(-1L, -1); 1170 return oldVal; 1171 } 1172 break; 1173 } 1174 } 1175 } 1176 return null; 1177 } 1178 1179 /** 1180 * Removes all of the mappings from this map. 1181 */ 1182 public void clear() { 1183 long delta = 0L; // negative number of deletions 1184 int i = 0; 1185 Node<K,V>[] tab = table; 1186 while (tab != null && i < tab.length) { 1187 int fh; 1188 Node<K,V> f = tabAt(tab, i); 1189 if (f == null) 1190 ++i; 1191 else if ((fh = f.hash) == MOVED) { 1192 tab = helpTransfer(tab, f); 1193 i = 0; // restart 1194 } 1195 else { 1196 synchronized (f) { 1197 if (tabAt(tab, i) == f) { 1198 Node<K,V> p = (fh >= 0 ? f : 1199 (f instanceof TreeBin) ? 1200 ((TreeBin<K,V>)f).first : null); 1201 while (p != null) { 1202 --delta; 1203 p = p.next; 1204 } 1205 setTabAt(tab, i++, null); 1206 } 1207 } 1208 } 1209 } 1210 if (delta != 0L) 1211 addCount(delta, -1); 1212 } 1213 1214 /** 1215 * Returns a {@link Set} view of the keys contained in this map. 1216 * The set is backed by the map, so changes to the map are 1217 * reflected in the set, and vice-versa. The set supports element 1218 * removal, which removes the corresponding mapping from this map, 1219 * via the {@code Iterator.remove}, {@code Set.remove}, 1220 * {@code removeAll}, {@code retainAll}, and {@code clear} 1221 * operations. It does not support the {@code add} or 1222 * {@code addAll} operations. 1223 * 1224 * <p>The view's iterators and spliterators are 1225 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 1226 * 1227 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}, 1228 * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}. 1229 * 1230 * @return the set view 1231 */ 1232 public KeySetView<K,V> keySet() { 1233 KeySetView<K,V> ks; 1234 return (ks = keySet) != null ? ks : (keySet = new KeySetView<K,V>(this, null)); 1235 } 1236 1237 /** 1238 * Returns a {@link Collection} view of the values contained in this map. 1239 * The collection is backed by the map, so changes to the map are 1240 * reflected in the collection, and vice-versa. The collection 1241 * supports element removal, which removes the corresponding 1242 * mapping from this map, via the {@code Iterator.remove}, 1243 * {@code Collection.remove}, {@code removeAll}, 1244 * {@code retainAll}, and {@code clear} operations. It does not 1245 * support the {@code add} or {@code addAll} operations. 1246 * 1247 * <p>The view's iterators and spliterators are 1248 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 1249 * 1250 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT} 1251 * and {@link Spliterator#NONNULL}. 1252 * 1253 * @return the collection view 1254 */ 1255 public Collection<V> values() { 1256 ValuesView<K,V> vs; 1257 return (vs = values) != null ? vs : (values = new ValuesView<K,V>(this)); 1258 } 1259 1260 /** 1261 * Returns a {@link Set} view of the mappings contained in this map. 1262 * The set is backed by the map, so changes to the map are 1263 * reflected in the set, and vice-versa. The set supports element 1264 * removal, which removes the corresponding mapping from the map, 1265 * via the {@code Iterator.remove}, {@code Set.remove}, 1266 * {@code removeAll}, {@code retainAll}, and {@code clear} 1267 * operations. 1268 * 1269 * <p>The view's iterators and spliterators are 1270 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 1271 * 1272 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}, 1273 * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}. 1274 * 1275 * @return the set view 1276 */ 1277 public Set<Map.Entry<K,V>> entrySet() { 1278 EntrySetView<K,V> es; 1279 return (es = entrySet) != null ? es : (entrySet = new EntrySetView<K,V>(this)); 1280 } 1281 1282 /** 1283 * Returns the hash code value for this {@link Map}, i.e., 1284 * the sum of, for each key-value pair in the map, 1285 * {@code key.hashCode() ^ value.hashCode()}. 1286 * 1287 * @return the hash code value for this map 1288 */ 1289 public int hashCode() { 1290 int h = 0; 1291 Node<K,V>[] t; 1292 if ((t = table) != null) { 1293 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1294 for (Node<K,V> p; (p = it.advance()) != null; ) 1295 h += p.key.hashCode() ^ p.val.hashCode(); 1296 } 1297 return h; 1298 } 1299 1300 /** 1301 * Returns a string representation of this map. The string 1302 * representation consists of a list of key-value mappings (in no 1303 * particular order) enclosed in braces ("{@code {}}"). Adjacent 1304 * mappings are separated by the characters {@code ", "} (comma 1305 * and space). Each key-value mapping is rendered as the key 1306 * followed by an equals sign ("{@code =}") followed by the 1307 * associated value. 1308 * 1309 * @return a string representation of this map 1310 */ 1311 public String toString() { 1312 Node<K,V>[] t; 1313 int f = (t = table) == null ? 0 : t.length; 1314 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f); 1315 StringBuilder sb = new StringBuilder(); 1316 sb.append('{'); 1317 Node<K,V> p; 1318 if ((p = it.advance()) != null) { 1319 for (;;) { 1320 K k = p.key; 1321 V v = p.val; 1322 sb.append(k == this ? "(this Map)" : k); 1323 sb.append('='); 1324 sb.append(v == this ? "(this Map)" : v); 1325 if ((p = it.advance()) == null) 1326 break; 1327 sb.append(',').append(' '); 1328 } 1329 } 1330 return sb.append('}').toString(); 1331 } 1332 1333 /** 1334 * Compares the specified object with this map for equality. 1335 * Returns {@code true} if the given object is a map with the same 1336 * mappings as this map. This operation may return misleading 1337 * results if either map is concurrently modified during execution 1338 * of this method. 1339 * 1340 * @param o object to be compared for equality with this map 1341 * @return {@code true} if the specified object is equal to this map 1342 */ 1343 public boolean equals(Object o) { 1344 if (o != this) { 1345 if (!(o instanceof Map)) 1346 return false; 1347 Map<?,?> m = (Map<?,?>) o; 1348 Node<K,V>[] t; 1349 int f = (t = table) == null ? 0 : t.length; 1350 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f); 1351 for (Node<K,V> p; (p = it.advance()) != null; ) { 1352 V val = p.val; 1353 Object v = m.get(p.key); 1354 if (v == null || (v != val && !v.equals(val))) 1355 return false; 1356 } 1357 for (Map.Entry<?,?> e : m.entrySet()) { 1358 Object mk, mv, v; 1359 if ((mk = e.getKey()) == null || 1360 (mv = e.getValue()) == null || 1361 (v = get(mk)) == null || 1362 (mv != v && !mv.equals(v))) 1363 return false; 1364 } 1365 } 1366 return true; 1367 } 1368 1369 /** 1370 * Stripped-down version of helper class used in previous version, 1371 * declared for the sake of serialization compatibility. 1372 */ 1373 static class Segment<K,V> extends ReentrantLock implements Serializable { 1374 private static final long serialVersionUID = 2249069246763182397L; 1375 final float loadFactor; 1376 Segment(float lf) { this.loadFactor = lf; } 1377 } 1378 1379 /** 1380 * Saves the state of the {@code ConcurrentHashMap} instance to a 1381 * stream (i.e., serializes it). 1382 * @param s the stream 1383 * @throws java.io.IOException if an I/O error occurs 1384 * @serialData 1385 * the key (Object) and value (Object) 1386 * for each key-value mapping, followed by a null pair. 1387 * The key-value mappings are emitted in no particular order. 1388 */ 1389 private void writeObject(java.io.ObjectOutputStream s) 1390 throws java.io.IOException { 1391 // For serialization compatibility 1392 // Emulate segment calculation from previous version of this class 1393 int sshift = 0; 1394 int ssize = 1; 1395 while (ssize < DEFAULT_CONCURRENCY_LEVEL) { 1396 ++sshift; 1397 ssize <<= 1; 1398 } 1399 int segmentShift = 32 - sshift; 1400 int segmentMask = ssize - 1; 1401 @SuppressWarnings("unchecked") 1402 Segment<K,V>[] segments = (Segment<K,V>[]) 1403 new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL]; 1404 for (int i = 0; i < segments.length; ++i) 1405 segments[i] = new Segment<K,V>(LOAD_FACTOR); 1406 java.io.ObjectOutputStream.PutField streamFields = s.putFields(); 1407 streamFields.put("segments", segments); 1408 streamFields.put("segmentShift", segmentShift); 1409 streamFields.put("segmentMask", segmentMask); 1410 s.writeFields(); 1411 1412 Node<K,V>[] t; 1413 if ((t = table) != null) { 1414 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1415 for (Node<K,V> p; (p = it.advance()) != null; ) { 1416 s.writeObject(p.key); 1417 s.writeObject(p.val); 1418 } 1419 } 1420 s.writeObject(null); 1421 s.writeObject(null); 1422 segments = null; // throw away 1423 } 1424 1425 /** 1426 * Reconstitutes the instance from a stream (that is, deserializes it). 1427 * @param s the stream 1428 * @throws ClassNotFoundException if the class of a serialized object 1429 * could not be found 1430 * @throws java.io.IOException if an I/O error occurs 1431 */ 1432 private void readObject(java.io.ObjectInputStream s) 1433 throws java.io.IOException, ClassNotFoundException { 1434 /* 1435 * To improve performance in typical cases, we create nodes 1436 * while reading, then place in table once size is known. 1437 * However, we must also validate uniqueness and deal with 1438 * overpopulated bins while doing so, which requires 1439 * specialized versions of putVal mechanics. 1440 */ 1441 sizeCtl = -1; // force exclusion for table construction 1442 s.defaultReadObject(); 1443 long size = 0L; 1444 Node<K,V> p = null; 1445 for (;;) { 1446 @SuppressWarnings("unchecked") 1447 K k = (K) s.readObject(); 1448 @SuppressWarnings("unchecked") 1449 V v = (V) s.readObject(); 1450 if (k != null && v != null) { 1451 p = new Node<K,V>(spread(k.hashCode()), k, v, p); 1452 ++size; 1453 } 1454 else 1455 break; 1456 } 1457 if (size == 0L) 1458 sizeCtl = 0; 1459 else { 1460 int n; 1461 if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) 1462 n = MAXIMUM_CAPACITY; 1463 else { 1464 int sz = (int)size; 1465 n = tableSizeFor(sz + (sz >>> 1) + 1); 1466 } 1467 @SuppressWarnings("unchecked") 1468 Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n]; 1469 int mask = n - 1; 1470 long added = 0L; 1471 while (p != null) { 1472 boolean insertAtFront; 1473 Node<K,V> next = p.next, first; 1474 int h = p.hash, j = h & mask; 1475 if ((first = tabAt(tab, j)) == null) 1476 insertAtFront = true; 1477 else { 1478 K k = p.key; 1479 if (first.hash < 0) { 1480 TreeBin<K,V> t = (TreeBin<K,V>)first; 1481 if (t.putTreeVal(h, k, p.val) == null) 1482 ++added; 1483 insertAtFront = false; 1484 } 1485 else { 1486 int binCount = 0; 1487 insertAtFront = true; 1488 Node<K,V> q; K qk; 1489 for (q = first; q != null; q = q.next) { 1490 if (q.hash == h && 1491 ((qk = q.key) == k || 1492 (qk != null && k.equals(qk)))) { 1493 insertAtFront = false; 1494 break; 1495 } 1496 ++binCount; 1497 } 1498 if (insertAtFront && binCount >= TREEIFY_THRESHOLD) { 1499 insertAtFront = false; 1500 ++added; 1501 p.next = first; 1502 TreeNode<K,V> hd = null, tl = null; 1503 for (q = p; q != null; q = q.next) { 1504 TreeNode<K,V> t = new TreeNode<K,V> 1505 (q.hash, q.key, q.val, null, null); 1506 if ((t.prev = tl) == null) 1507 hd = t; 1508 else 1509 tl.next = t; 1510 tl = t; 1511 } 1512 setTabAt(tab, j, new TreeBin<K,V>(hd)); 1513 } 1514 } 1515 } 1516 if (insertAtFront) { 1517 ++added; 1518 p.next = first; 1519 setTabAt(tab, j, p); 1520 } 1521 p = next; 1522 } 1523 table = tab; 1524 sizeCtl = n - (n >>> 2); 1525 baseCount = added; 1526 } 1527 } 1528 1529 // ConcurrentMap methods 1530 1531 /** 1532 * {@inheritDoc} 1533 * 1534 * @return the previous value associated with the specified key, 1535 * or {@code null} if there was no mapping for the key 1536 * @throws NullPointerException if the specified key or value is null 1537 */ 1538 public V putIfAbsent(K key, V value) { 1539 return putVal(key, value, true); 1540 } 1541 1542 /** 1543 * {@inheritDoc} 1544 * 1545 * @throws NullPointerException if the specified key is null 1546 */ 1547 public boolean remove(Object key, Object value) { 1548 if (key == null) 1549 throw new NullPointerException(); 1550 return value != null && replaceNode(key, null, value) != null; 1551 } 1552 1553 /** 1554 * {@inheritDoc} 1555 * 1556 * @throws NullPointerException if any of the arguments are null 1557 */ 1558 public boolean replace(K key, V oldValue, V newValue) { 1559 if (key == null || oldValue == null || newValue == null) 1560 throw new NullPointerException(); 1561 return replaceNode(key, newValue, oldValue) != null; 1562 } 1563 1564 /** 1565 * {@inheritDoc} 1566 * 1567 * @return the previous value associated with the specified key, 1568 * or {@code null} if there was no mapping for the key 1569 * @throws NullPointerException if the specified key or value is null 1570 */ 1571 public V replace(K key, V value) { 1572 if (key == null || value == null) 1573 throw new NullPointerException(); 1574 return replaceNode(key, value, null); 1575 } 1576 1577 // Overrides of JDK8+ Map extension method defaults 1578 1579 /** 1580 * Returns the value to which the specified key is mapped, or the 1581 * given default value if this map contains no mapping for the 1582 * key. 1583 * 1584 * @param key the key whose associated value is to be returned 1585 * @param defaultValue the value to return if this map contains 1586 * no mapping for the given key 1587 * @return the mapping for the key, if present; else the default value 1588 * @throws NullPointerException if the specified key is null 1589 */ 1590 public V getOrDefault(Object key, V defaultValue) { 1591 V v; 1592 return (v = get(key)) == null ? defaultValue : v; 1593 } 1594 1595 public void forEach(BiConsumer<? super K, ? super V> action) { 1596 if (action == null) throw new NullPointerException(); 1597 Node<K,V>[] t; 1598 if ((t = table) != null) { 1599 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1600 for (Node<K,V> p; (p = it.advance()) != null; ) { 1601 action.accept(p.key, p.val); 1602 } 1603 } 1604 } 1605 1606 public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) { 1607 if (function == null) throw new NullPointerException(); 1608 Node<K,V>[] t; 1609 if ((t = table) != null) { 1610 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1611 for (Node<K,V> p; (p = it.advance()) != null; ) { 1612 V oldValue = p.val; 1613 for (K key = p.key;;) { 1614 V newValue = function.apply(key, oldValue); 1615 if (newValue == null) 1616 throw new NullPointerException(); 1617 if (replaceNode(key, newValue, oldValue) != null || 1618 (oldValue = get(key)) == null) 1619 break; 1620 } 1621 } 1622 } 1623 } 1624 1625 /** 1626 * Helper method for EntrySetView.removeIf. 1627 */ 1628 boolean removeEntryIf(Predicate<? super Entry<K,V>> function) { 1629 if (function == null) throw new NullPointerException(); 1630 Node<K,V>[] t; 1631 boolean removed = false; 1632 if ((t = table) != null) { 1633 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1634 for (Node<K,V> p; (p = it.advance()) != null; ) { 1635 K k = p.key; 1636 V v = p.val; 1637 Map.Entry<K,V> e = new AbstractMap.SimpleImmutableEntry<>(k, v); 1638 if (function.test(e) && replaceNode(k, null, v) != null) 1639 removed = true; 1640 } 1641 } 1642 return removed; 1643 } 1644 1645 /** 1646 * Helper method for ValuesView.removeIf. 1647 */ 1648 boolean removeValueIf(Predicate<? super V> function) { 1649 if (function == null) throw new NullPointerException(); 1650 Node<K,V>[] t; 1651 boolean removed = false; 1652 if ((t = table) != null) { 1653 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1654 for (Node<K,V> p; (p = it.advance()) != null; ) { 1655 K k = p.key; 1656 V v = p.val; 1657 if (function.test(v) && replaceNode(k, null, v) != null) 1658 removed = true; 1659 } 1660 } 1661 return removed; 1662 } 1663 1664 /** 1665 * If the specified key is not already associated with a value, 1666 * attempts to compute its value using the given mapping function 1667 * and enters it into this map unless {@code null}. The entire 1668 * method invocation is performed atomically, so the function is 1669 * applied at most once per key. Some attempted update operations 1670 * on this map by other threads may be blocked while computation 1671 * is in progress, so the computation should be short and simple, 1672 * and must not attempt to update any other mappings of this map. 1673 * 1674 * @param key key with which the specified value is to be associated 1675 * @param mappingFunction the function to compute a value 1676 * @return the current (existing or computed) value associated with 1677 * the specified key, or null if the computed value is null 1678 * @throws NullPointerException if the specified key or mappingFunction 1679 * is null 1680 * @throws IllegalStateException if the computation detectably 1681 * attempts a recursive update to this map that would 1682 * otherwise never complete 1683 * @throws RuntimeException or Error if the mappingFunction does so, 1684 * in which case the mapping is left unestablished 1685 */ 1686 public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) { 1687 if (key == null || mappingFunction == null) 1688 throw new NullPointerException(); 1689 int h = spread(key.hashCode()); 1690 V val = null; 1691 int binCount = 0; 1692 for (Node<K,V>[] tab = table;;) { 1693 Node<K,V> f; int n, i, fh; 1694 if (tab == null || (n = tab.length) == 0) 1695 tab = initTable(); 1696 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { 1697 Node<K,V> r = new ReservationNode<K,V>(); 1698 synchronized (r) { 1699 if (casTabAt(tab, i, null, r)) { 1700 binCount = 1; 1701 Node<K,V> node = null; 1702 try { 1703 if ((val = mappingFunction.apply(key)) != null) 1704 node = new Node<K,V>(h, key, val, null); 1705 } finally { 1706 setTabAt(tab, i, node); 1707 } 1708 } 1709 } 1710 if (binCount != 0) 1711 break; 1712 } 1713 else if ((fh = f.hash) == MOVED) 1714 tab = helpTransfer(tab, f); 1715 else { 1716 boolean added = false; 1717 synchronized (f) { 1718 if (tabAt(tab, i) == f) { 1719 if (fh >= 0) { 1720 binCount = 1; 1721 for (Node<K,V> e = f;; ++binCount) { 1722 K ek; 1723 if (e.hash == h && 1724 ((ek = e.key) == key || 1725 (ek != null && key.equals(ek)))) { 1726 val = e.val; 1727 break; 1728 } 1729 Node<K,V> pred = e; 1730 if ((e = e.next) == null) { 1731 if ((val = mappingFunction.apply(key)) != null) { 1732 if (pred.next != null) 1733 throw new IllegalStateException("Recursive update"); 1734 added = true; 1735 pred.next = new Node<K,V>(h, key, val, null); 1736 } 1737 break; 1738 } 1739 } 1740 } 1741 else if (f instanceof TreeBin) { 1742 binCount = 2; 1743 TreeBin<K,V> t = (TreeBin<K,V>)f; 1744 TreeNode<K,V> r, p; 1745 if ((r = t.root) != null && 1746 (p = r.findTreeNode(h, key, null)) != null) 1747 val = p.val; 1748 else if ((val = mappingFunction.apply(key)) != null) { 1749 added = true; 1750 t.putTreeVal(h, key, val); 1751 } 1752 } 1753 else if (f instanceof ReservationNode) 1754 throw new IllegalStateException("Recursive update"); 1755 } 1756 } 1757 if (binCount != 0) { 1758 if (binCount >= TREEIFY_THRESHOLD) 1759 treeifyBin(tab, i); 1760 if (!added) 1761 return val; 1762 break; 1763 } 1764 } 1765 } 1766 if (val != null) 1767 addCount(1L, binCount); 1768 return val; 1769 } 1770 1771 /** 1772 * If the value for the specified key is present, attempts to 1773 * compute a new mapping given the key and its current mapped 1774 * value. The entire method invocation is performed atomically. 1775 * Some attempted update operations on this map by other threads 1776 * may be blocked while computation is in progress, so the 1777 * computation should be short and simple, and must not attempt to 1778 * update any other mappings of this map. 1779 * 1780 * @param key key with which a value may be associated 1781 * @param remappingFunction the function to compute a value 1782 * @return the new value associated with the specified key, or null if none 1783 * @throws NullPointerException if the specified key or remappingFunction 1784 * is null 1785 * @throws IllegalStateException if the computation detectably 1786 * attempts a recursive update to this map that would 1787 * otherwise never complete 1788 * @throws RuntimeException or Error if the remappingFunction does so, 1789 * in which case the mapping is unchanged 1790 */ 1791 public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1792 if (key == null || remappingFunction == null) 1793 throw new NullPointerException(); 1794 int h = spread(key.hashCode()); 1795 V val = null; 1796 int delta = 0; 1797 int binCount = 0; 1798 for (Node<K,V>[] tab = table;;) { 1799 Node<K,V> f; int n, i, fh; 1800 if (tab == null || (n = tab.length) == 0) 1801 tab = initTable(); 1802 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) 1803 break; 1804 else if ((fh = f.hash) == MOVED) 1805 tab = helpTransfer(tab, f); 1806 else { 1807 synchronized (f) { 1808 if (tabAt(tab, i) == f) { 1809 if (fh >= 0) { 1810 binCount = 1; 1811 for (Node<K,V> e = f, pred = null;; ++binCount) { 1812 K ek; 1813 if (e.hash == h && 1814 ((ek = e.key) == key || 1815 (ek != null && key.equals(ek)))) { 1816 val = remappingFunction.apply(key, e.val); 1817 if (val != null) 1818 e.val = val; 1819 else { 1820 delta = -1; 1821 Node<K,V> en = e.next; 1822 if (pred != null) 1823 pred.next = en; 1824 else 1825 setTabAt(tab, i, en); 1826 } 1827 break; 1828 } 1829 pred = e; 1830 if ((e = e.next) == null) 1831 break; 1832 } 1833 } 1834 else if (f instanceof TreeBin) { 1835 binCount = 2; 1836 TreeBin<K,V> t = (TreeBin<K,V>)f; 1837 TreeNode<K,V> r, p; 1838 if ((r = t.root) != null && 1839 (p = r.findTreeNode(h, key, null)) != null) { 1840 val = remappingFunction.apply(key, p.val); 1841 if (val != null) 1842 p.val = val; 1843 else { 1844 delta = -1; 1845 if (t.removeTreeNode(p)) 1846 setTabAt(tab, i, untreeify(t.first)); 1847 } 1848 } 1849 } 1850 else if (f instanceof ReservationNode) 1851 throw new IllegalStateException("Recursive update"); 1852 } 1853 } 1854 if (binCount != 0) 1855 break; 1856 } 1857 } 1858 if (delta != 0) 1859 addCount((long)delta, binCount); 1860 return val; 1861 } 1862 1863 /** 1864 * Attempts to compute a mapping for the specified key and its 1865 * current mapped value (or {@code null} if there is no current 1866 * mapping). The entire method invocation is performed atomically. 1867 * Some attempted update operations on this map by other threads 1868 * may be blocked while computation is in progress, so the 1869 * computation should be short and simple, and must not attempt to 1870 * update any other mappings of this Map. 1871 * 1872 * @param key key with which the specified value is to be associated 1873 * @param remappingFunction the function to compute a value 1874 * @return the new value associated with the specified key, or null if none 1875 * @throws NullPointerException if the specified key or remappingFunction 1876 * is null 1877 * @throws IllegalStateException if the computation detectably 1878 * attempts a recursive update to this map that would 1879 * otherwise never complete 1880 * @throws RuntimeException or Error if the remappingFunction does so, 1881 * in which case the mapping is unchanged 1882 */ 1883 public V compute(K key, 1884 BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1885 if (key == null || remappingFunction == null) 1886 throw new NullPointerException(); 1887 int h = spread(key.hashCode()); 1888 V val = null; 1889 int delta = 0; 1890 int binCount = 0; 1891 for (Node<K,V>[] tab = table;;) { 1892 Node<K,V> f; int n, i, fh; 1893 if (tab == null || (n = tab.length) == 0) 1894 tab = initTable(); 1895 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { 1896 Node<K,V> r = new ReservationNode<K,V>(); 1897 synchronized (r) { 1898 if (casTabAt(tab, i, null, r)) { 1899 binCount = 1; 1900 Node<K,V> node = null; 1901 try { 1902 if ((val = remappingFunction.apply(key, null)) != null) { 1903 delta = 1; 1904 node = new Node<K,V>(h, key, val, null); 1905 } 1906 } finally { 1907 setTabAt(tab, i, node); 1908 } 1909 } 1910 } 1911 if (binCount != 0) 1912 break; 1913 } 1914 else if ((fh = f.hash) == MOVED) 1915 tab = helpTransfer(tab, f); 1916 else { 1917 synchronized (f) { 1918 if (tabAt(tab, i) == f) { 1919 if (fh >= 0) { 1920 binCount = 1; 1921 for (Node<K,V> e = f, pred = null;; ++binCount) { 1922 K ek; 1923 if (e.hash == h && 1924 ((ek = e.key) == key || 1925 (ek != null && key.equals(ek)))) { 1926 val = remappingFunction.apply(key, e.val); 1927 if (val != null) 1928 e.val = val; 1929 else { 1930 delta = -1; 1931 Node<K,V> en = e.next; 1932 if (pred != null) 1933 pred.next = en; 1934 else 1935 setTabAt(tab, i, en); 1936 } 1937 break; 1938 } 1939 pred = e; 1940 if ((e = e.next) == null) { 1941 val = remappingFunction.apply(key, null); 1942 if (val != null) { 1943 if (pred.next != null) 1944 throw new IllegalStateException("Recursive update"); 1945 delta = 1; 1946 pred.next = 1947 new Node<K,V>(h, key, val, null); 1948 } 1949 break; 1950 } 1951 } 1952 } 1953 else if (f instanceof TreeBin) { 1954 binCount = 1; 1955 TreeBin<K,V> t = (TreeBin<K,V>)f; 1956 TreeNode<K,V> r, p; 1957 if ((r = t.root) != null) 1958 p = r.findTreeNode(h, key, null); 1959 else 1960 p = null; 1961 V pv = (p == null) ? null : p.val; 1962 val = remappingFunction.apply(key, pv); 1963 if (val != null) { 1964 if (p != null) 1965 p.val = val; 1966 else { 1967 delta = 1; 1968 t.putTreeVal(h, key, val); 1969 } 1970 } 1971 else if (p != null) { 1972 delta = -1; 1973 if (t.removeTreeNode(p)) 1974 setTabAt(tab, i, untreeify(t.first)); 1975 } 1976 } 1977 else if (f instanceof ReservationNode) 1978 throw new IllegalStateException("Recursive update"); 1979 } 1980 } 1981 if (binCount != 0) { 1982 if (binCount >= TREEIFY_THRESHOLD) 1983 treeifyBin(tab, i); 1984 break; 1985 } 1986 } 1987 } 1988 if (delta != 0) 1989 addCount((long)delta, binCount); 1990 return val; 1991 } 1992 1993 /** 1994 * If the specified key is not already associated with a 1995 * (non-null) value, associates it with the given value. 1996 * Otherwise, replaces the value with the results of the given 1997 * remapping function, or removes if {@code null}. The entire 1998 * method invocation is performed atomically. Some attempted 1999 * update operations on this map by other threads may be blocked 2000 * while computation is in progress, so the computation should be 2001 * short and simple, and must not attempt to update any other 2002 * mappings of this Map. 2003 * 2004 * @param key key with which the specified value is to be associated 2005 * @param value the value to use if absent 2006 * @param remappingFunction the function to recompute a value if present 2007 * @return the new value associated with the specified key, or null if none 2008 * @throws NullPointerException if the specified key or the 2009 * remappingFunction is null 2010 * @throws RuntimeException or Error if the remappingFunction does so, 2011 * in which case the mapping is unchanged 2012 */ 2013 public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) { 2014 if (key == null || value == null || remappingFunction == null) 2015 throw new NullPointerException(); 2016 int h = spread(key.hashCode()); 2017 V val = null; 2018 int delta = 0; 2019 int binCount = 0; 2020 for (Node<K,V>[] tab = table;;) { 2021 Node<K,V> f; int n, i, fh; 2022 if (tab == null || (n = tab.length) == 0) 2023 tab = initTable(); 2024 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { 2025 if (casTabAt(tab, i, null, new Node<K,V>(h, key, value, null))) { 2026 delta = 1; 2027 val = value; 2028 break; 2029 } 2030 } 2031 else if ((fh = f.hash) == MOVED) 2032 tab = helpTransfer(tab, f); 2033 else { 2034 synchronized (f) { 2035 if (tabAt(tab, i) == f) { 2036 if (fh >= 0) { 2037 binCount = 1; 2038 for (Node<K,V> e = f, pred = null;; ++binCount) { 2039 K ek; 2040 if (e.hash == h && 2041 ((ek = e.key) == key || 2042 (ek != null && key.equals(ek)))) { 2043 val = remappingFunction.apply(e.val, value); 2044 if (val != null) 2045 e.val = val; 2046 else { 2047 delta = -1; 2048 Node<K,V> en = e.next; 2049 if (pred != null) 2050 pred.next = en; 2051 else 2052 setTabAt(tab, i, en); 2053 } 2054 break; 2055 } 2056 pred = e; 2057 if ((e = e.next) == null) { 2058 delta = 1; 2059 val = value; 2060 pred.next = 2061 new Node<K,V>(h, key, val, null); 2062 break; 2063 } 2064 } 2065 } 2066 else if (f instanceof TreeBin) { 2067 binCount = 2; 2068 TreeBin<K,V> t = (TreeBin<K,V>)f; 2069 TreeNode<K,V> r = t.root; 2070 TreeNode<K,V> p = (r == null) ? null : 2071 r.findTreeNode(h, key, null); 2072 val = (p == null) ? value : 2073 remappingFunction.apply(p.val, value); 2074 if (val != null) { 2075 if (p != null) 2076 p.val = val; 2077 else { 2078 delta = 1; 2079 t.putTreeVal(h, key, val); 2080 } 2081 } 2082 else if (p != null) { 2083 delta = -1; 2084 if (t.removeTreeNode(p)) 2085 setTabAt(tab, i, untreeify(t.first)); 2086 } 2087 } 2088 else if (f instanceof ReservationNode) 2089 throw new IllegalStateException("Recursive update"); 2090 } 2091 } 2092 if (binCount != 0) { 2093 if (binCount >= TREEIFY_THRESHOLD) 2094 treeifyBin(tab, i); 2095 break; 2096 } 2097 } 2098 } 2099 if (delta != 0) 2100 addCount((long)delta, binCount); 2101 return val; 2102 } 2103 2104 // Hashtable legacy methods 2105 2106 /** 2107 * Tests if some key maps into the specified value in this table. 2108 * 2109 * <p>Note that this method is identical in functionality to 2110 * {@link #containsValue(Object)}, and exists solely to ensure 2111 * full compatibility with class {@link java.util.Hashtable}, 2112 * which supported this method prior to introduction of the 2113 * Java Collections Framework. 2114 * 2115 * @param value a value to search for 2116 * @return {@code true} if and only if some key maps to the 2117 * {@code value} argument in this table as 2118 * determined by the {@code equals} method; 2119 * {@code false} otherwise 2120 * @throws NullPointerException if the specified value is null 2121 */ 2122 public boolean contains(Object value) { 2123 return containsValue(value); 2124 } 2125 2126 /** 2127 * Returns an enumeration of the keys in this table. 2128 * 2129 * @return an enumeration of the keys in this table 2130 * @see #keySet() 2131 */ 2132 public Enumeration<K> keys() { 2133 Node<K,V>[] t; 2134 int f = (t = table) == null ? 0 : t.length; 2135 return new KeyIterator<K,V>(t, f, 0, f, this); 2136 } 2137 2138 /** 2139 * Returns an enumeration of the values in this table. 2140 * 2141 * @return an enumeration of the values in this table 2142 * @see #values() 2143 */ 2144 public Enumeration<V> elements() { 2145 Node<K,V>[] t; 2146 int f = (t = table) == null ? 0 : t.length; 2147 return new ValueIterator<K,V>(t, f, 0, f, this); 2148 } 2149 2150 // ConcurrentHashMap-only methods 2151 2152 /** 2153 * Returns the number of mappings. This method should be used 2154 * instead of {@link #size} because a ConcurrentHashMap may 2155 * contain more mappings than can be represented as an int. The 2156 * value returned is an estimate; the actual count may differ if 2157 * there are concurrent insertions or removals. 2158 * 2159 * @return the number of mappings 2160 * @since 1.8 2161 */ 2162 public long mappingCount() { 2163 long n = sumCount(); 2164 return (n < 0L) ? 0L : n; // ignore transient negative values 2165 } 2166 2167 /** 2168 * Creates a new {@link Set} backed by a ConcurrentHashMap 2169 * from the given type to {@code Boolean.TRUE}. 2170 * 2171 * @param <K> the element type of the returned set 2172 * @return the new set 2173 * @since 1.8 2174 */ 2175 public static <K> KeySetView<K,Boolean> newKeySet() { 2176 return new KeySetView<K,Boolean> 2177 (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE); 2178 } 2179 2180 /** 2181 * Creates a new {@link Set} backed by a ConcurrentHashMap 2182 * from the given type to {@code Boolean.TRUE}. 2183 * 2184 * @param initialCapacity The implementation performs internal 2185 * sizing to accommodate this many elements. 2186 * @param <K> the element type of the returned set 2187 * @return the new set 2188 * @throws IllegalArgumentException if the initial capacity of 2189 * elements is negative 2190 * @since 1.8 2191 */ 2192 public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) { 2193 return new KeySetView<K,Boolean> 2194 (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE); 2195 } 2196 2197 /** 2198 * Returns a {@link Set} view of the keys in this map, using the 2199 * given common mapped value for any additions (i.e., {@link 2200 * Collection#add} and {@link Collection#addAll(Collection)}). 2201 * This is of course only appropriate if it is acceptable to use 2202 * the same value for all additions from this view. 2203 * 2204 * @param mappedValue the mapped value to use for any additions 2205 * @return the set view 2206 * @throws NullPointerException if the mappedValue is null 2207 */ 2208 public KeySetView<K,V> keySet(V mappedValue) { 2209 if (mappedValue == null) 2210 throw new NullPointerException(); 2211 return new KeySetView<K,V>(this, mappedValue); 2212 } 2213 2214 /* ---------------- Special Nodes -------------- */ 2215 2216 /** 2217 * A node inserted at head of bins during transfer operations. 2218 */ 2219 static final class ForwardingNode<K,V> extends Node<K,V> { 2220 final Node<K,V>[] nextTable; 2221 ForwardingNode(Node<K,V>[] tab) { 2222 super(MOVED, null, null, null); 2223 this.nextTable = tab; 2224 } 2225 2226 Node<K,V> find(int h, Object k) { 2227 // loop to avoid arbitrarily deep recursion on forwarding nodes 2228 outer: for (Node<K,V>[] tab = nextTable;;) { 2229 Node<K,V> e; int n; 2230 if (k == null || tab == null || (n = tab.length) == 0 || 2231 (e = tabAt(tab, (n - 1) & h)) == null) 2232 return null; 2233 for (;;) { 2234 int eh; K ek; 2235 if ((eh = e.hash) == h && 2236 ((ek = e.key) == k || (ek != null && k.equals(ek)))) 2237 return e; 2238 if (eh < 0) { 2239 if (e instanceof ForwardingNode) { 2240 tab = ((ForwardingNode<K,V>)e).nextTable; 2241 continue outer; 2242 } 2243 else 2244 return e.find(h, k); 2245 } 2246 if ((e = e.next) == null) 2247 return null; 2248 } 2249 } 2250 } 2251 } 2252 2253 /** 2254 * A place-holder node used in computeIfAbsent and compute. 2255 */ 2256 static final class ReservationNode<K,V> extends Node<K,V> { 2257 ReservationNode() { 2258 super(RESERVED, null, null, null); 2259 } 2260 2261 Node<K,V> find(int h, Object k) { 2262 return null; 2263 } 2264 } 2265 2266 /* ---------------- Table Initialization and Resizing -------------- */ 2267 2268 /** 2269 * Returns the stamp bits for resizing a table of size n. 2270 * Must be negative when shifted left by RESIZE_STAMP_SHIFT. 2271 */ 2272 static final int resizeStamp(int n) { 2273 return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1)); 2274 } 2275 2276 /** 2277 * Initializes table, using the size recorded in sizeCtl. 2278 */ 2279 private final Node<K,V>[] initTable() { 2280 Node<K,V>[] tab; int sc; 2281 while ((tab = table) == null || tab.length == 0) { 2282 if ((sc = sizeCtl) < 0) 2283 Thread.yield(); // lost initialization race; just spin 2284 else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { 2285 try { 2286 if ((tab = table) == null || tab.length == 0) { 2287 int n = (sc > 0) ? sc : DEFAULT_CAPACITY; 2288 @SuppressWarnings("unchecked") 2289 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n]; 2290 table = tab = nt; 2291 sc = n - (n >>> 2); 2292 } 2293 } finally { 2294 sizeCtl = sc; 2295 } 2296 break; 2297 } 2298 } 2299 return tab; 2300 } 2301 2302 /** 2303 * Adds to count, and if table is too small and not already 2304 * resizing, initiates transfer. If already resizing, helps 2305 * perform transfer if work is available. Rechecks occupancy 2306 * after a transfer to see if another resize is already needed 2307 * because resizings are lagging additions. 2308 * 2309 * @param x the count to add 2310 * @param check if <0, don't check resize, if <= 1 only check if uncontended 2311 */ 2312 private final void addCount(long x, int check) { 2313 CounterCell[] as; long b, s; 2314 if ((as = counterCells) != null || 2315 !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) { 2316 CounterCell a; long v; int m; 2317 boolean uncontended = true; 2318 if (as == null || (m = as.length - 1) < 0 || 2319 (a = as[ThreadLocalRandom.getProbe() & m]) == null || 2320 !(uncontended = 2321 U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) { 2322 fullAddCount(x, uncontended); 2323 return; 2324 } 2325 if (check <= 1) 2326 return; 2327 s = sumCount(); 2328 } 2329 if (check >= 0) { 2330 Node<K,V>[] tab, nt; int n, sc; 2331 while (s >= (long)(sc = sizeCtl) && (tab = table) != null && 2332 (n = tab.length) < MAXIMUM_CAPACITY) { 2333 int rs = resizeStamp(n); 2334 if (sc < 0) { 2335 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || 2336 sc == rs + MAX_RESIZERS || (nt = nextTable) == null || 2337 transferIndex <= 0) 2338 break; 2339 if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) 2340 transfer(tab, nt); 2341 } 2342 else if (U.compareAndSwapInt(this, SIZECTL, sc, 2343 (rs << RESIZE_STAMP_SHIFT) + 2)) 2344 transfer(tab, null); 2345 s = sumCount(); 2346 } 2347 } 2348 } 2349 2350 /** 2351 * Helps transfer if a resize is in progress. 2352 */ 2353 final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) { 2354 Node<K,V>[] nextTab; int sc; 2355 if (tab != null && (f instanceof ForwardingNode) && 2356 (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) { 2357 int rs = resizeStamp(tab.length); 2358 while (nextTab == nextTable && table == tab && 2359 (sc = sizeCtl) < 0) { 2360 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || 2361 sc == rs + MAX_RESIZERS || transferIndex <= 0) 2362 break; 2363 if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) { 2364 transfer(tab, nextTab); 2365 break; 2366 } 2367 } 2368 return nextTab; 2369 } 2370 return table; 2371 } 2372 2373 /** 2374 * Tries to presize table to accommodate the given number of elements. 2375 * 2376 * @param size number of elements (doesn't need to be perfectly accurate) 2377 */ 2378 private final void tryPresize(int size) { 2379 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : 2380 tableSizeFor(size + (size >>> 1) + 1); 2381 int sc; 2382 while ((sc = sizeCtl) >= 0) { 2383 Node<K,V>[] tab = table; int n; 2384 if (tab == null || (n = tab.length) == 0) { 2385 n = (sc > c) ? sc : c; 2386 if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { 2387 try { 2388 if (table == tab) { 2389 @SuppressWarnings("unchecked") 2390 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n]; 2391 table = nt; 2392 sc = n - (n >>> 2); 2393 } 2394 } finally { 2395 sizeCtl = sc; 2396 } 2397 } 2398 } 2399 else if (c <= sc || n >= MAXIMUM_CAPACITY) 2400 break; 2401 else if (tab == table) { 2402 int rs = resizeStamp(n); 2403 if (U.compareAndSwapInt(this, SIZECTL, sc, 2404 (rs << RESIZE_STAMP_SHIFT) + 2)) 2405 transfer(tab, null); 2406 } 2407 } 2408 } 2409 2410 /** 2411 * Moves and/or copies the nodes in each bin to new table. See 2412 * above for explanation. 2413 */ 2414 private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) { 2415 int n = tab.length, stride; 2416 if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE) 2417 stride = MIN_TRANSFER_STRIDE; // subdivide range 2418 if (nextTab == null) { // initiating 2419 try { 2420 @SuppressWarnings("unchecked") 2421 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1]; 2422 nextTab = nt; 2423 } catch (Throwable ex) { // try to cope with OOME 2424 sizeCtl = Integer.MAX_VALUE; 2425 return; 2426 } 2427 nextTable = nextTab; 2428 transferIndex = n; 2429 } 2430 int nextn = nextTab.length; 2431 ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab); 2432 boolean advance = true; 2433 boolean finishing = false; // to ensure sweep before committing nextTab 2434 for (int i = 0, bound = 0;;) { 2435 Node<K,V> f; int fh; 2436 while (advance) { 2437 int nextIndex, nextBound; 2438 if (--i >= bound || finishing) 2439 advance = false; 2440 else if ((nextIndex = transferIndex) <= 0) { 2441 i = -1; 2442 advance = false; 2443 } 2444 else if (U.compareAndSwapInt 2445 (this, TRANSFERINDEX, nextIndex, 2446 nextBound = (nextIndex > stride ? 2447 nextIndex - stride : 0))) { 2448 bound = nextBound; 2449 i = nextIndex - 1; 2450 advance = false; 2451 } 2452 } 2453 if (i < 0 || i >= n || i + n >= nextn) { 2454 int sc; 2455 if (finishing) { 2456 nextTable = null; 2457 table = nextTab; 2458 sizeCtl = (n << 1) - (n >>> 1); 2459 return; 2460 } 2461 if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) { 2462 if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT) 2463 return; 2464 finishing = advance = true; 2465 i = n; // recheck before commit 2466 } 2467 } 2468 else if ((f = tabAt(tab, i)) == null) 2469 advance = casTabAt(tab, i, null, fwd); 2470 else if ((fh = f.hash) == MOVED) 2471 advance = true; // already processed 2472 else { 2473 synchronized (f) { 2474 if (tabAt(tab, i) == f) { 2475 Node<K,V> ln, hn; 2476 if (fh >= 0) { 2477 int runBit = fh & n; 2478 Node<K,V> lastRun = f; 2479 for (Node<K,V> p = f.next; p != null; p = p.next) { 2480 int b = p.hash & n; 2481 if (b != runBit) { 2482 runBit = b; 2483 lastRun = p; 2484 } 2485 } 2486 if (runBit == 0) { 2487 ln = lastRun; 2488 hn = null; 2489 } 2490 else { 2491 hn = lastRun; 2492 ln = null; 2493 } 2494 for (Node<K,V> p = f; p != lastRun; p = p.next) { 2495 int ph = p.hash; K pk = p.key; V pv = p.val; 2496 if ((ph & n) == 0) 2497 ln = new Node<K,V>(ph, pk, pv, ln); 2498 else 2499 hn = new Node<K,V>(ph, pk, pv, hn); 2500 } 2501 setTabAt(nextTab, i, ln); 2502 setTabAt(nextTab, i + n, hn); 2503 setTabAt(tab, i, fwd); 2504 advance = true; 2505 } 2506 else if (f instanceof TreeBin) { 2507 TreeBin<K,V> t = (TreeBin<K,V>)f; 2508 TreeNode<K,V> lo = null, loTail = null; 2509 TreeNode<K,V> hi = null, hiTail = null; 2510 int lc = 0, hc = 0; 2511 for (Node<K,V> e = t.first; e != null; e = e.next) { 2512 int h = e.hash; 2513 TreeNode<K,V> p = new TreeNode<K,V> 2514 (h, e.key, e.val, null, null); 2515 if ((h & n) == 0) { 2516 if ((p.prev = loTail) == null) 2517 lo = p; 2518 else 2519 loTail.next = p; 2520 loTail = p; 2521 ++lc; 2522 } 2523 else { 2524 if ((p.prev = hiTail) == null) 2525 hi = p; 2526 else 2527 hiTail.next = p; 2528 hiTail = p; 2529 ++hc; 2530 } 2531 } 2532 ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) : 2533 (hc != 0) ? new TreeBin<K,V>(lo) : t; 2534 hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) : 2535 (lc != 0) ? new TreeBin<K,V>(hi) : t; 2536 setTabAt(nextTab, i, ln); 2537 setTabAt(nextTab, i + n, hn); 2538 setTabAt(tab, i, fwd); 2539 advance = true; 2540 } 2541 } 2542 } 2543 } 2544 } 2545 } 2546 2547 /* ---------------- Counter support -------------- */ 2548 2549 /** 2550 * A padded cell for distributing counts. Adapted from LongAdder 2551 * and Striped64. See their internal docs for explanation. 2552 */ 2553 @sun.misc.Contended static final class CounterCell { 2554 volatile long value; 2555 CounterCell(long x) { value = x; } 2556 } 2557 2558 final long sumCount() { 2559 CounterCell[] as = counterCells; CounterCell a; 2560 long sum = baseCount; 2561 if (as != null) { 2562 for (int i = 0; i < as.length; ++i) { 2563 if ((a = as[i]) != null) 2564 sum += a.value; 2565 } 2566 } 2567 return sum; 2568 } 2569 2570 // See LongAdder version for explanation 2571 private final void fullAddCount(long x, boolean wasUncontended) { 2572 int h; 2573 if ((h = ThreadLocalRandom.getProbe()) == 0) { 2574 ThreadLocalRandom.localInit(); // force initialization 2575 h = ThreadLocalRandom.getProbe(); 2576 wasUncontended = true; 2577 } 2578 boolean collide = false; // True if last slot nonempty 2579 for (;;) { 2580 CounterCell[] as; CounterCell a; int n; long v; 2581 if ((as = counterCells) != null && (n = as.length) > 0) { 2582 if ((a = as[(n - 1) & h]) == null) { 2583 if (cellsBusy == 0) { // Try to attach new Cell 2584 CounterCell r = new CounterCell(x); // Optimistic create 2585 if (cellsBusy == 0 && 2586 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { 2587 boolean created = false; 2588 try { // Recheck under lock 2589 CounterCell[] rs; int m, j; 2590 if ((rs = counterCells) != null && 2591 (m = rs.length) > 0 && 2592 rs[j = (m - 1) & h] == null) { 2593 rs[j] = r; 2594 created = true; 2595 } 2596 } finally { 2597 cellsBusy = 0; 2598 } 2599 if (created) 2600 break; 2601 continue; // Slot is now non-empty 2602 } 2603 } 2604 collide = false; 2605 } 2606 else if (!wasUncontended) // CAS already known to fail 2607 wasUncontended = true; // Continue after rehash 2608 else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x)) 2609 break; 2610 else if (counterCells != as || n >= NCPU) 2611 collide = false; // At max size or stale 2612 else if (!collide) 2613 collide = true; 2614 else if (cellsBusy == 0 && 2615 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { 2616 try { 2617 if (counterCells == as) {// Expand table unless stale 2618 CounterCell[] rs = new CounterCell[n << 1]; 2619 for (int i = 0; i < n; ++i) 2620 rs[i] = as[i]; 2621 counterCells = rs; 2622 } 2623 } finally { 2624 cellsBusy = 0; 2625 } 2626 collide = false; 2627 continue; // Retry with expanded table 2628 } 2629 h = ThreadLocalRandom.advanceProbe(h); 2630 } 2631 else if (cellsBusy == 0 && counterCells == as && 2632 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { 2633 boolean init = false; 2634 try { // Initialize table 2635 if (counterCells == as) { 2636 CounterCell[] rs = new CounterCell[2]; 2637 rs[h & 1] = new CounterCell(x); 2638 counterCells = rs; 2639 init = true; 2640 } 2641 } finally { 2642 cellsBusy = 0; 2643 } 2644 if (init) 2645 break; 2646 } 2647 else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x)) 2648 break; // Fall back on using base 2649 } 2650 } 2651 2652 /* ---------------- Conversion from/to TreeBins -------------- */ 2653 2654 /** 2655 * Replaces all linked nodes in bin at given index unless table is 2656 * too small, in which case resizes instead. 2657 */ 2658 private final void treeifyBin(Node<K,V>[] tab, int index) { 2659 Node<K,V> b; int n; 2660 if (tab != null) { 2661 if ((n = tab.length) < MIN_TREEIFY_CAPACITY) 2662 tryPresize(n << 1); 2663 else if ((b = tabAt(tab, index)) != null && b.hash >= 0) { 2664 synchronized (b) { 2665 if (tabAt(tab, index) == b) { 2666 TreeNode<K,V> hd = null, tl = null; 2667 for (Node<K,V> e = b; e != null; e = e.next) { 2668 TreeNode<K,V> p = 2669 new TreeNode<K,V>(e.hash, e.key, e.val, 2670 null, null); 2671 if ((p.prev = tl) == null) 2672 hd = p; 2673 else 2674 tl.next = p; 2675 tl = p; 2676 } 2677 setTabAt(tab, index, new TreeBin<K,V>(hd)); 2678 } 2679 } 2680 } 2681 } 2682 } 2683 2684 /** 2685 * Returns a list on non-TreeNodes replacing those in given list. 2686 */ 2687 static <K,V> Node<K,V> untreeify(Node<K,V> b) { 2688 Node<K,V> hd = null, tl = null; 2689 for (Node<K,V> q = b; q != null; q = q.next) { 2690 Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val, null); 2691 if (tl == null) 2692 hd = p; 2693 else 2694 tl.next = p; 2695 tl = p; 2696 } 2697 return hd; 2698 } 2699 2700 /* ---------------- TreeNodes -------------- */ 2701 2702 /** 2703 * Nodes for use in TreeBins. 2704 */ 2705 static final class TreeNode<K,V> extends Node<K,V> { 2706 TreeNode<K,V> parent; // red-black tree links 2707 TreeNode<K,V> left; 2708 TreeNode<K,V> right; 2709 TreeNode<K,V> prev; // needed to unlink next upon deletion 2710 boolean red; 2711 2712 TreeNode(int hash, K key, V val, Node<K,V> next, 2713 TreeNode<K,V> parent) { 2714 super(hash, key, val, next); 2715 this.parent = parent; 2716 } 2717 2718 Node<K,V> find(int h, Object k) { 2719 return findTreeNode(h, k, null); 2720 } 2721 2722 /** 2723 * Returns the TreeNode (or null if not found) for the given key 2724 * starting at given root. 2725 */ 2726 final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) { 2727 if (k != null) { 2728 TreeNode<K,V> p = this; 2729 do { 2730 int ph, dir; K pk; TreeNode<K,V> q; 2731 TreeNode<K,V> pl = p.left, pr = p.right; 2732 if ((ph = p.hash) > h) 2733 p = pl; 2734 else if (ph < h) 2735 p = pr; 2736 else if ((pk = p.key) == k || (pk != null && k.equals(pk))) 2737 return p; 2738 else if (pl == null) 2739 p = pr; 2740 else if (pr == null) 2741 p = pl; 2742 else if ((kc != null || 2743 (kc = comparableClassFor(k)) != null) && 2744 (dir = compareComparables(kc, k, pk)) != 0) 2745 p = (dir < 0) ? pl : pr; 2746 else if ((q = pr.findTreeNode(h, k, kc)) != null) 2747 return q; 2748 else 2749 p = pl; 2750 } while (p != null); 2751 } 2752 return null; 2753 } 2754 } 2755 2756 /* ---------------- TreeBins -------------- */ 2757 2758 /** 2759 * TreeNodes used at the heads of bins. TreeBins do not hold user 2760 * keys or values, but instead point to list of TreeNodes and 2761 * their root. They also maintain a parasitic read-write lock 2762 * forcing writers (who hold bin lock) to wait for readers (who do 2763 * not) to complete before tree restructuring operations. 2764 */ 2765 static final class TreeBin<K,V> extends Node<K,V> { 2766 TreeNode<K,V> root; 2767 volatile TreeNode<K,V> first; 2768 volatile Thread waiter; 2769 volatile int lockState; 2770 // values for lockState 2771 static final int WRITER = 1; // set while holding write lock 2772 static final int WAITER = 2; // set when waiting for write lock 2773 static final int READER = 4; // increment value for setting read lock 2774 2775 /** 2776 * Tie-breaking utility for ordering insertions when equal 2777 * hashCodes and non-comparable. We don't require a total 2778 * order, just a consistent insertion rule to maintain 2779 * equivalence across rebalancings. Tie-breaking further than 2780 * necessary simplifies testing a bit. 2781 */ 2782 static int tieBreakOrder(Object a, Object b) { 2783 int d; 2784 if (a == null || b == null || 2785 (d = a.getClass().getName(). 2786 compareTo(b.getClass().getName())) == 0) 2787 d = (System.identityHashCode(a) <= System.identityHashCode(b) ? 2788 -1 : 1); 2789 return d; 2790 } 2791 2792 /** 2793 * Creates bin with initial set of nodes headed by b. 2794 */ 2795 TreeBin(TreeNode<K,V> b) { 2796 super(TREEBIN, null, null, null); 2797 this.first = b; 2798 TreeNode<K,V> r = null; 2799 for (TreeNode<K,V> x = b, next; x != null; x = next) { 2800 next = (TreeNode<K,V>)x.next; 2801 x.left = x.right = null; 2802 if (r == null) { 2803 x.parent = null; 2804 x.red = false; 2805 r = x; 2806 } 2807 else { 2808 K k = x.key; 2809 int h = x.hash; 2810 Class<?> kc = null; 2811 for (TreeNode<K,V> p = r;;) { 2812 int dir, ph; 2813 K pk = p.key; 2814 if ((ph = p.hash) > h) 2815 dir = -1; 2816 else if (ph < h) 2817 dir = 1; 2818 else if ((kc == null && 2819 (kc = comparableClassFor(k)) == null) || 2820 (dir = compareComparables(kc, k, pk)) == 0) 2821 dir = tieBreakOrder(k, pk); 2822 TreeNode<K,V> xp = p; 2823 if ((p = (dir <= 0) ? p.left : p.right) == null) { 2824 x.parent = xp; 2825 if (dir <= 0) 2826 xp.left = x; 2827 else 2828 xp.right = x; 2829 r = balanceInsertion(r, x); 2830 break; 2831 } 2832 } 2833 } 2834 } 2835 this.root = r; 2836 assert checkInvariants(root); 2837 } 2838 2839 /** 2840 * Acquires write lock for tree restructuring. 2841 */ 2842 private final void lockRoot() { 2843 if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER)) 2844 contendedLock(); // offload to separate method 2845 } 2846 2847 /** 2848 * Releases write lock for tree restructuring. 2849 */ 2850 private final void unlockRoot() { 2851 lockState = 0; 2852 } 2853 2854 /** 2855 * Possibly blocks awaiting root lock. 2856 */ 2857 private final void contendedLock() { 2858 boolean waiting = false; 2859 for (int s;;) { 2860 if (((s = lockState) & ~WAITER) == 0) { 2861 if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) { 2862 if (waiting) 2863 waiter = null; 2864 return; 2865 } 2866 } 2867 else if ((s & WAITER) == 0) { 2868 if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) { 2869 waiting = true; 2870 waiter = Thread.currentThread(); 2871 } 2872 } 2873 else if (waiting) 2874 LockSupport.park(this); 2875 } 2876 } 2877 2878 /** 2879 * Returns matching node or null if none. Tries to search 2880 * using tree comparisons from root, but continues linear 2881 * search when lock not available. 2882 */ 2883 final Node<K,V> find(int h, Object k) { 2884 if (k != null) { 2885 for (Node<K,V> e = first; e != null; ) { 2886 int s; K ek; 2887 if (((s = lockState) & (WAITER|WRITER)) != 0) { 2888 if (e.hash == h && 2889 ((ek = e.key) == k || (ek != null && k.equals(ek)))) 2890 return e; 2891 e = e.next; 2892 } 2893 else if (U.compareAndSwapInt(this, LOCKSTATE, s, 2894 s + READER)) { 2895 TreeNode<K,V> r, p; 2896 try { 2897 p = ((r = root) == null ? null : 2898 r.findTreeNode(h, k, null)); 2899 } finally { 2900 Thread w; 2901 if (U.getAndAddInt(this, LOCKSTATE, -READER) == 2902 (READER|WAITER) && (w = waiter) != null) 2903 LockSupport.unpark(w); 2904 } 2905 return p; 2906 } 2907 } 2908 } 2909 return null; 2910 } 2911 2912 /** 2913 * Finds or adds a node. 2914 * @return null if added 2915 */ 2916 final TreeNode<K,V> putTreeVal(int h, K k, V v) { 2917 Class<?> kc = null; 2918 boolean searched = false; 2919 for (TreeNode<K,V> p = root;;) { 2920 int dir, ph; K pk; 2921 if (p == null) { 2922 first = root = new TreeNode<K,V>(h, k, v, null, null); 2923 break; 2924 } 2925 else if ((ph = p.hash) > h) 2926 dir = -1; 2927 else if (ph < h) 2928 dir = 1; 2929 else if ((pk = p.key) == k || (pk != null && k.equals(pk))) 2930 return p; 2931 else if ((kc == null && 2932 (kc = comparableClassFor(k)) == null) || 2933 (dir = compareComparables(kc, k, pk)) == 0) { 2934 if (!searched) { 2935 TreeNode<K,V> q, ch; 2936 searched = true; 2937 if (((ch = p.left) != null && 2938 (q = ch.findTreeNode(h, k, kc)) != null) || 2939 ((ch = p.right) != null && 2940 (q = ch.findTreeNode(h, k, kc)) != null)) 2941 return q; 2942 } 2943 dir = tieBreakOrder(k, pk); 2944 } 2945 2946 TreeNode<K,V> xp = p; 2947 if ((p = (dir <= 0) ? p.left : p.right) == null) { 2948 TreeNode<K,V> x, f = first; 2949 first = x = new TreeNode<K,V>(h, k, v, f, xp); 2950 if (f != null) 2951 f.prev = x; 2952 if (dir <= 0) 2953 xp.left = x; 2954 else 2955 xp.right = x; 2956 if (!xp.red) 2957 x.red = true; 2958 else { 2959 lockRoot(); 2960 try { 2961 root = balanceInsertion(root, x); 2962 } finally { 2963 unlockRoot(); 2964 } 2965 } 2966 break; 2967 } 2968 } 2969 assert checkInvariants(root); 2970 return null; 2971 } 2972 2973 /** 2974 * Removes the given node, that must be present before this 2975 * call. This is messier than typical red-black deletion code 2976 * because we cannot swap the contents of an interior node 2977 * with a leaf successor that is pinned by "next" pointers 2978 * that are accessible independently of lock. So instead we 2979 * swap the tree linkages. 2980 * 2981 * @return true if now too small, so should be untreeified 2982 */ 2983 final boolean removeTreeNode(TreeNode<K,V> p) { 2984 TreeNode<K,V> next = (TreeNode<K,V>)p.next; 2985 TreeNode<K,V> pred = p.prev; // unlink traversal pointers 2986 TreeNode<K,V> r, rl; 2987 if (pred == null) 2988 first = next; 2989 else 2990 pred.next = next; 2991 if (next != null) 2992 next.prev = pred; 2993 if (first == null) { 2994 root = null; 2995 return true; 2996 } 2997 if ((r = root) == null || r.right == null || // too small 2998 (rl = r.left) == null || rl.left == null) 2999 return true; 3000 lockRoot(); 3001 try { 3002 TreeNode<K,V> replacement; 3003 TreeNode<K,V> pl = p.left; 3004 TreeNode<K,V> pr = p.right; 3005 if (pl != null && pr != null) { 3006 TreeNode<K,V> s = pr, sl; 3007 while ((sl = s.left) != null) // find successor 3008 s = sl; 3009 boolean c = s.red; s.red = p.red; p.red = c; // swap colors 3010 TreeNode<K,V> sr = s.right; 3011 TreeNode<K,V> pp = p.parent; 3012 if (s == pr) { // p was s's direct parent 3013 p.parent = s; 3014 s.right = p; 3015 } 3016 else { 3017 TreeNode<K,V> sp = s.parent; 3018 if ((p.parent = sp) != null) { 3019 if (s == sp.left) 3020 sp.left = p; 3021 else 3022 sp.right = p; 3023 } 3024 if ((s.right = pr) != null) 3025 pr.parent = s; 3026 } 3027 p.left = null; 3028 if ((p.right = sr) != null) 3029 sr.parent = p; 3030 if ((s.left = pl) != null) 3031 pl.parent = s; 3032 if ((s.parent = pp) == null) 3033 r = s; 3034 else if (p == pp.left) 3035 pp.left = s; 3036 else 3037 pp.right = s; 3038 if (sr != null) 3039 replacement = sr; 3040 else 3041 replacement = p; 3042 } 3043 else if (pl != null) 3044 replacement = pl; 3045 else if (pr != null) 3046 replacement = pr; 3047 else 3048 replacement = p; 3049 if (replacement != p) { 3050 TreeNode<K,V> pp = replacement.parent = p.parent; 3051 if (pp == null) 3052 r = replacement; 3053 else if (p == pp.left) 3054 pp.left = replacement; 3055 else 3056 pp.right = replacement; 3057 p.left = p.right = p.parent = null; 3058 } 3059 3060 root = (p.red) ? r : balanceDeletion(r, replacement); 3061 3062 if (p == replacement) { // detach pointers 3063 TreeNode<K,V> pp; 3064 if ((pp = p.parent) != null) { 3065 if (p == pp.left) 3066 pp.left = null; 3067 else if (p == pp.right) 3068 pp.right = null; 3069 p.parent = null; 3070 } 3071 } 3072 } finally { 3073 unlockRoot(); 3074 } 3075 assert checkInvariants(root); 3076 return false; 3077 } 3078 3079 /* ------------------------------------------------------------ */ 3080 // Red-black tree methods, all adapted from CLR 3081 3082 static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root, 3083 TreeNode<K,V> p) { 3084 TreeNode<K,V> r, pp, rl; 3085 if (p != null && (r = p.right) != null) { 3086 if ((rl = p.right = r.left) != null) 3087 rl.parent = p; 3088 if ((pp = r.parent = p.parent) == null) 3089 (root = r).red = false; 3090 else if (pp.left == p) 3091 pp.left = r; 3092 else 3093 pp.right = r; 3094 r.left = p; 3095 p.parent = r; 3096 } 3097 return root; 3098 } 3099 3100 static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root, 3101 TreeNode<K,V> p) { 3102 TreeNode<K,V> l, pp, lr; 3103 if (p != null && (l = p.left) != null) { 3104 if ((lr = p.left = l.right) != null) 3105 lr.parent = p; 3106 if ((pp = l.parent = p.parent) == null) 3107 (root = l).red = false; 3108 else if (pp.right == p) 3109 pp.right = l; 3110 else 3111 pp.left = l; 3112 l.right = p; 3113 p.parent = l; 3114 } 3115 return root; 3116 } 3117 3118 static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root, 3119 TreeNode<K,V> x) { 3120 x.red = true; 3121 for (TreeNode<K,V> xp, xpp, xppl, xppr;;) { 3122 if ((xp = x.parent) == null) { 3123 x.red = false; 3124 return x; 3125 } 3126 else if (!xp.red || (xpp = xp.parent) == null) 3127 return root; 3128 if (xp == (xppl = xpp.left)) { 3129 if ((xppr = xpp.right) != null && xppr.red) { 3130 xppr.red = false; 3131 xp.red = false; 3132 xpp.red = true; 3133 x = xpp; 3134 } 3135 else { 3136 if (x == xp.right) { 3137 root = rotateLeft(root, x = xp); 3138 xpp = (xp = x.parent) == null ? null : xp.parent; 3139 } 3140 if (xp != null) { 3141 xp.red = false; 3142 if (xpp != null) { 3143 xpp.red = true; 3144 root = rotateRight(root, xpp); 3145 } 3146 } 3147 } 3148 } 3149 else { 3150 if (xppl != null && xppl.red) { 3151 xppl.red = false; 3152 xp.red = false; 3153 xpp.red = true; 3154 x = xpp; 3155 } 3156 else { 3157 if (x == xp.left) { 3158 root = rotateRight(root, x = xp); 3159 xpp = (xp = x.parent) == null ? null : xp.parent; 3160 } 3161 if (xp != null) { 3162 xp.red = false; 3163 if (xpp != null) { 3164 xpp.red = true; 3165 root = rotateLeft(root, xpp); 3166 } 3167 } 3168 } 3169 } 3170 } 3171 } 3172 3173 static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root, 3174 TreeNode<K,V> x) { 3175 for (TreeNode<K,V> xp, xpl, xpr;;) { 3176 if (x == null || x == root) 3177 return root; 3178 else if ((xp = x.parent) == null) { 3179 x.red = false; 3180 return x; 3181 } 3182 else if (x.red) { 3183 x.red = false; 3184 return root; 3185 } 3186 else if ((xpl = xp.left) == x) { 3187 if ((xpr = xp.right) != null && xpr.red) { 3188 xpr.red = false; 3189 xp.red = true; 3190 root = rotateLeft(root, xp); 3191 xpr = (xp = x.parent) == null ? null : xp.right; 3192 } 3193 if (xpr == null) 3194 x = xp; 3195 else { 3196 TreeNode<K,V> sl = xpr.left, sr = xpr.right; 3197 if ((sr == null || !sr.red) && 3198 (sl == null || !sl.red)) { 3199 xpr.red = true; 3200 x = xp; 3201 } 3202 else { 3203 if (sr == null || !sr.red) { 3204 if (sl != null) 3205 sl.red = false; 3206 xpr.red = true; 3207 root = rotateRight(root, xpr); 3208 xpr = (xp = x.parent) == null ? 3209 null : xp.right; 3210 } 3211 if (xpr != null) { 3212 xpr.red = (xp == null) ? false : xp.red; 3213 if ((sr = xpr.right) != null) 3214 sr.red = false; 3215 } 3216 if (xp != null) { 3217 xp.red = false; 3218 root = rotateLeft(root, xp); 3219 } 3220 x = root; 3221 } 3222 } 3223 } 3224 else { // symmetric 3225 if (xpl != null && xpl.red) { 3226 xpl.red = false; 3227 xp.red = true; 3228 root = rotateRight(root, xp); 3229 xpl = (xp = x.parent) == null ? null : xp.left; 3230 } 3231 if (xpl == null) 3232 x = xp; 3233 else { 3234 TreeNode<K,V> sl = xpl.left, sr = xpl.right; 3235 if ((sl == null || !sl.red) && 3236 (sr == null || !sr.red)) { 3237 xpl.red = true; 3238 x = xp; 3239 } 3240 else { 3241 if (sl == null || !sl.red) { 3242 if (sr != null) 3243 sr.red = false; 3244 xpl.red = true; 3245 root = rotateLeft(root, xpl); 3246 xpl = (xp = x.parent) == null ? 3247 null : xp.left; 3248 } 3249 if (xpl != null) { 3250 xpl.red = (xp == null) ? false : xp.red; 3251 if ((sl = xpl.left) != null) 3252 sl.red = false; 3253 } 3254 if (xp != null) { 3255 xp.red = false; 3256 root = rotateRight(root, xp); 3257 } 3258 x = root; 3259 } 3260 } 3261 } 3262 } 3263 } 3264 3265 /** 3266 * Checks invariants recursively for the tree of Nodes rooted at t. 3267 */ 3268 static <K,V> boolean checkInvariants(TreeNode<K,V> t) { 3269 TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right, 3270 tb = t.prev, tn = (TreeNode<K,V>)t.next; 3271 if (tb != null && tb.next != t) 3272 return false; 3273 if (tn != null && tn.prev != t) 3274 return false; 3275 if (tp != null && t != tp.left && t != tp.right) 3276 return false; 3277 if (tl != null && (tl.parent != t || tl.hash > t.hash)) 3278 return false; 3279 if (tr != null && (tr.parent != t || tr.hash < t.hash)) 3280 return false; 3281 if (t.red && tl != null && tl.red && tr != null && tr.red) 3282 return false; 3283 if (tl != null && !checkInvariants(tl)) 3284 return false; 3285 if (tr != null && !checkInvariants(tr)) 3286 return false; 3287 return true; 3288 } 3289 3290 private static final sun.misc.Unsafe U = sun.misc.Unsafe.getUnsafe(); 3291 private static final long LOCKSTATE; 3292 static { 3293 try { 3294 LOCKSTATE = U.objectFieldOffset 3295 (TreeBin.class.getDeclaredField("lockState")); 3296 } catch (ReflectiveOperationException e) { 3297 throw new Error(e); 3298 } 3299 } 3300 } 3301 3302 /* ----------------Table Traversal -------------- */ 3303 3304 /** 3305 * Records the table, its length, and current traversal index for a 3306 * traverser that must process a region of a forwarded table before 3307 * proceeding with current table. 3308 */ 3309 static final class TableStack<K,V> { 3310 int length; 3311 int index; 3312 Node<K,V>[] tab; 3313 TableStack<K,V> next; 3314 } 3315 3316 /** 3317 * Encapsulates traversal for methods such as containsValue; also 3318 * serves as a base class for other iterators and spliterators. 3319 * 3320 * Method advance visits once each still-valid node that was 3321 * reachable upon iterator construction. It might miss some that 3322 * were added to a bin after the bin was visited, which is OK wrt 3323 * consistency guarantees. Maintaining this property in the face 3324 * of possible ongoing resizes requires a fair amount of 3325 * bookkeeping state that is difficult to optimize away amidst 3326 * volatile accesses. Even so, traversal maintains reasonable 3327 * throughput. 3328 * 3329 * Normally, iteration proceeds bin-by-bin traversing lists. 3330 * However, if the table has been resized, then all future steps 3331 * must traverse both the bin at the current index as well as at 3332 * (index + baseSize); and so on for further resizings. To 3333 * paranoically cope with potential sharing by users of iterators 3334 * across threads, iteration terminates if a bounds checks fails 3335 * for a table read. 3336 */ 3337 static class Traverser<K,V> { 3338 Node<K,V>[] tab; // current table; updated if resized 3339 Node<K,V> next; // the next entry to use 3340 TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes 3341 int index; // index of bin to use next 3342 int baseIndex; // current index of initial table 3343 int baseLimit; // index bound for initial table 3344 final int baseSize; // initial table size 3345 3346 Traverser(Node<K,V>[] tab, int size, int index, int limit) { 3347 this.tab = tab; 3348 this.baseSize = size; 3349 this.baseIndex = this.index = index; 3350 this.baseLimit = limit; 3351 this.next = null; 3352 } 3353 3354 /** 3355 * Advances if possible, returning next valid node, or null if none. 3356 */ 3357 final Node<K,V> advance() { 3358 Node<K,V> e; 3359 if ((e = next) != null) 3360 e = e.next; 3361 for (;;) { 3362 Node<K,V>[] t; int i, n; // must use locals in checks 3363 if (e != null) 3364 return next = e; 3365 if (baseIndex >= baseLimit || (t = tab) == null || 3366 (n = t.length) <= (i = index) || i < 0) 3367 return next = null; 3368 if ((e = tabAt(t, i)) != null && e.hash < 0) { 3369 if (e instanceof ForwardingNode) { 3370 tab = ((ForwardingNode<K,V>)e).nextTable; 3371 e = null; 3372 pushState(t, i, n); 3373 continue; 3374 } 3375 else if (e instanceof TreeBin) 3376 e = ((TreeBin<K,V>)e).first; 3377 else 3378 e = null; 3379 } 3380 if (stack != null) 3381 recoverState(n); 3382 else if ((index = i + baseSize) >= n) 3383 index = ++baseIndex; // visit upper slots if present 3384 } 3385 } 3386 3387 /** 3388 * Saves traversal state upon encountering a forwarding node. 3389 */ 3390 private void pushState(Node<K,V>[] t, int i, int n) { 3391 TableStack<K,V> s = spare; // reuse if possible 3392 if (s != null) 3393 spare = s.next; 3394 else 3395 s = new TableStack<K,V>(); 3396 s.tab = t; 3397 s.length = n; 3398 s.index = i; 3399 s.next = stack; 3400 stack = s; 3401 } 3402 3403 /** 3404 * Possibly pops traversal state. 3405 * 3406 * @param n length of current table 3407 */ 3408 private void recoverState(int n) { 3409 TableStack<K,V> s; int len; 3410 while ((s = stack) != null && (index += (len = s.length)) >= n) { 3411 n = len; 3412 index = s.index; 3413 tab = s.tab; 3414 s.tab = null; 3415 TableStack<K,V> next = s.next; 3416 s.next = spare; // save for reuse 3417 stack = next; 3418 spare = s; 3419 } 3420 if (s == null && (index += baseSize) >= n) 3421 index = ++baseIndex; 3422 } 3423 } 3424 3425 /** 3426 * Base of key, value, and entry Iterators. Adds fields to 3427 * Traverser to support iterator.remove. 3428 */ 3429 static class BaseIterator<K,V> extends Traverser<K,V> { 3430 final ConcurrentHashMap<K,V> map; 3431 Node<K,V> lastReturned; 3432 BaseIterator(Node<K,V>[] tab, int size, int index, int limit, 3433 ConcurrentHashMap<K,V> map) { 3434 super(tab, size, index, limit); 3435 this.map = map; 3436 advance(); 3437 } 3438 3439 public final boolean hasNext() { return next != null; } 3440 public final boolean hasMoreElements() { return next != null; } 3441 3442 public final void remove() { 3443 Node<K,V> p; 3444 if ((p = lastReturned) == null) 3445 throw new IllegalStateException(); 3446 lastReturned = null; 3447 map.replaceNode(p.key, null, null); 3448 } 3449 } 3450 3451 static final class KeyIterator<K,V> extends BaseIterator<K,V> 3452 implements Iterator<K>, Enumeration<K> { 3453 KeyIterator(Node<K,V>[] tab, int index, int size, int limit, 3454 ConcurrentHashMap<K,V> map) { 3455 super(tab, index, size, limit, map); 3456 } 3457 3458 public final K next() { 3459 Node<K,V> p; 3460 if ((p = next) == null) 3461 throw new NoSuchElementException(); 3462 K k = p.key; 3463 lastReturned = p; 3464 advance(); 3465 return k; 3466 } 3467 3468 public final K nextElement() { return next(); } 3469 } 3470 3471 static final class ValueIterator<K,V> extends BaseIterator<K,V> 3472 implements Iterator<V>, Enumeration<V> { 3473 ValueIterator(Node<K,V>[] tab, int index, int size, int limit, 3474 ConcurrentHashMap<K,V> map) { 3475 super(tab, index, size, limit, map); 3476 } 3477 3478 public final V next() { 3479 Node<K,V> p; 3480 if ((p = next) == null) 3481 throw new NoSuchElementException(); 3482 V v = p.val; 3483 lastReturned = p; 3484 advance(); 3485 return v; 3486 } 3487 3488 public final V nextElement() { return next(); } 3489 } 3490 3491 static final class EntryIterator<K,V> extends BaseIterator<K,V> 3492 implements Iterator<Map.Entry<K,V>> { 3493 EntryIterator(Node<K,V>[] tab, int index, int size, int limit, 3494 ConcurrentHashMap<K,V> map) { 3495 super(tab, index, size, limit, map); 3496 } 3497 3498 public final Map.Entry<K,V> next() { 3499 Node<K,V> p; 3500 if ((p = next) == null) 3501 throw new NoSuchElementException(); 3502 K k = p.key; 3503 V v = p.val; 3504 lastReturned = p; 3505 advance(); 3506 return new MapEntry<K,V>(k, v, map); 3507 } 3508 } 3509 3510 /** 3511 * Exported Entry for EntryIterator. 3512 */ 3513 static final class MapEntry<K,V> implements Map.Entry<K,V> { 3514 final K key; // non-null 3515 V val; // non-null 3516 final ConcurrentHashMap<K,V> map; 3517 MapEntry(K key, V val, ConcurrentHashMap<K,V> map) { 3518 this.key = key; 3519 this.val = val; 3520 this.map = map; 3521 } 3522 public K getKey() { return key; } 3523 public V getValue() { return val; } 3524 public int hashCode() { return key.hashCode() ^ val.hashCode(); } 3525 public String toString() { 3526 return Helpers.mapEntryToString(key, val); 3527 } 3528 3529 public boolean equals(Object o) { 3530 Object k, v; Map.Entry<?,?> e; 3531 return ((o instanceof Map.Entry) && 3532 (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 3533 (v = e.getValue()) != null && 3534 (k == key || k.equals(key)) && 3535 (v == val || v.equals(val))); 3536 } 3537 3538 /** 3539 * Sets our entry's value and writes through to the map. The 3540 * value to return is somewhat arbitrary here. Since we do not 3541 * necessarily track asynchronous changes, the most recent 3542 * "previous" value could be different from what we return (or 3543 * could even have been removed, in which case the put will 3544 * re-establish). We do not and cannot guarantee more. 3545 */ 3546 public V setValue(V value) { 3547 if (value == null) throw new NullPointerException(); 3548 V v = val; 3549 val = value; 3550 map.put(key, value); 3551 return v; 3552 } 3553 } 3554 3555 static final class KeySpliterator<K,V> extends Traverser<K,V> 3556 implements Spliterator<K> { 3557 long est; // size estimate 3558 KeySpliterator(Node<K,V>[] tab, int size, int index, int limit, 3559 long est) { 3560 super(tab, size, index, limit); 3561 this.est = est; 3562 } 3563 3564 public KeySpliterator<K,V> trySplit() { 3565 int i, f, h; 3566 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : 3567 new KeySpliterator<K,V>(tab, baseSize, baseLimit = h, 3568 f, est >>>= 1); 3569 } 3570 3571 public void forEachRemaining(Consumer<? super K> action) { 3572 if (action == null) throw new NullPointerException(); 3573 for (Node<K,V> p; (p = advance()) != null;) 3574 action.accept(p.key); 3575 } 3576 3577 public boolean tryAdvance(Consumer<? super K> action) { 3578 if (action == null) throw new NullPointerException(); 3579 Node<K,V> p; 3580 if ((p = advance()) == null) 3581 return false; 3582 action.accept(p.key); 3583 return true; 3584 } 3585 3586 public long estimateSize() { return est; } 3587 3588 public int characteristics() { 3589 return Spliterator.DISTINCT | Spliterator.CONCURRENT | 3590 Spliterator.NONNULL; 3591 } 3592 } 3593 3594 static final class ValueSpliterator<K,V> extends Traverser<K,V> 3595 implements Spliterator<V> { 3596 long est; // size estimate 3597 ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit, 3598 long est) { 3599 super(tab, size, index, limit); 3600 this.est = est; 3601 } 3602 3603 public ValueSpliterator<K,V> trySplit() { 3604 int i, f, h; 3605 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : 3606 new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h, 3607 f, est >>>= 1); 3608 } 3609 3610 public void forEachRemaining(Consumer<? super V> action) { 3611 if (action == null) throw new NullPointerException(); 3612 for (Node<K,V> p; (p = advance()) != null;) 3613 action.accept(p.val); 3614 } 3615 3616 public boolean tryAdvance(Consumer<? super V> action) { 3617 if (action == null) throw new NullPointerException(); 3618 Node<K,V> p; 3619 if ((p = advance()) == null) 3620 return false; 3621 action.accept(p.val); 3622 return true; 3623 } 3624 3625 public long estimateSize() { return est; } 3626 3627 public int characteristics() { 3628 return Spliterator.CONCURRENT | Spliterator.NONNULL; 3629 } 3630 } 3631 3632 static final class EntrySpliterator<K,V> extends Traverser<K,V> 3633 implements Spliterator<Map.Entry<K,V>> { 3634 final ConcurrentHashMap<K,V> map; // To export MapEntry 3635 long est; // size estimate 3636 EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit, 3637 long est, ConcurrentHashMap<K,V> map) { 3638 super(tab, size, index, limit); 3639 this.map = map; 3640 this.est = est; 3641 } 3642 3643 public EntrySpliterator<K,V> trySplit() { 3644 int i, f, h; 3645 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : 3646 new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h, 3647 f, est >>>= 1, map); 3648 } 3649 3650 public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) { 3651 if (action == null) throw new NullPointerException(); 3652 for (Node<K,V> p; (p = advance()) != null; ) 3653 action.accept(new MapEntry<K,V>(p.key, p.val, map)); 3654 } 3655 3656 public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) { 3657 if (action == null) throw new NullPointerException(); 3658 Node<K,V> p; 3659 if ((p = advance()) == null) 3660 return false; 3661 action.accept(new MapEntry<K,V>(p.key, p.val, map)); 3662 return true; 3663 } 3664 3665 public long estimateSize() { return est; } 3666 3667 public int characteristics() { 3668 return Spliterator.DISTINCT | Spliterator.CONCURRENT | 3669 Spliterator.NONNULL; 3670 } 3671 } 3672 3673 // Parallel bulk operations 3674 3675 /** 3676 * Computes initial batch value for bulk tasks. The returned value 3677 * is approximately exp2 of the number of times (minus one) to 3678 * split task by two before executing leaf action. This value is 3679 * faster to compute and more convenient to use as a guide to 3680 * splitting than is the depth, since it is used while dividing by 3681 * two anyway. 3682 */ 3683 final int batchFor(long b) { 3684 long n; 3685 if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b) 3686 return 0; 3687 int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4 3688 return (b <= 0L || (n /= b) >= sp) ? sp : (int)n; 3689 } 3690 3691 /** 3692 * Performs the given action for each (key, value). 3693 * 3694 * @param parallelismThreshold the (estimated) number of elements 3695 * needed for this operation to be executed in parallel 3696 * @param action the action 3697 * @since 1.8 3698 */ 3699 public void forEach(long parallelismThreshold, 3700 BiConsumer<? super K,? super V> action) { 3701 if (action == null) throw new NullPointerException(); 3702 new ForEachMappingTask<K,V> 3703 (null, batchFor(parallelismThreshold), 0, 0, table, 3704 action).invoke(); 3705 } 3706 3707 /** 3708 * Performs the given action for each non-null transformation 3709 * of each (key, value). 3710 * 3711 * @param parallelismThreshold the (estimated) number of elements 3712 * needed for this operation to be executed in parallel 3713 * @param transformer a function returning the transformation 3714 * for an element, or null if there is no transformation (in 3715 * which case the action is not applied) 3716 * @param action the action 3717 * @param <U> the return type of the transformer 3718 * @since 1.8 3719 */ 3720 public <U> void forEach(long parallelismThreshold, 3721 BiFunction<? super K, ? super V, ? extends U> transformer, 3722 Consumer<? super U> action) { 3723 if (transformer == null || action == null) 3724 throw new NullPointerException(); 3725 new ForEachTransformedMappingTask<K,V,U> 3726 (null, batchFor(parallelismThreshold), 0, 0, table, 3727 transformer, action).invoke(); 3728 } 3729 3730 /** 3731 * Returns a non-null result from applying the given search 3732 * function on each (key, value), or null if none. Upon 3733 * success, further element processing is suppressed and the 3734 * results of any other parallel invocations of the search 3735 * function are ignored. 3736 * 3737 * @param parallelismThreshold the (estimated) number of elements 3738 * needed for this operation to be executed in parallel 3739 * @param searchFunction a function returning a non-null 3740 * result on success, else null 3741 * @param <U> the return type of the search function 3742 * @return a non-null result from applying the given search 3743 * function on each (key, value), or null if none 3744 * @since 1.8 3745 */ 3746 public <U> U search(long parallelismThreshold, 3747 BiFunction<? super K, ? super V, ? extends U> searchFunction) { 3748 if (searchFunction == null) throw new NullPointerException(); 3749 return new SearchMappingsTask<K,V,U> 3750 (null, batchFor(parallelismThreshold), 0, 0, table, 3751 searchFunction, new AtomicReference<U>()).invoke(); 3752 } 3753 3754 /** 3755 * Returns the result of accumulating the given transformation 3756 * of all (key, value) pairs using the given reducer to 3757 * combine values, or null if none. 3758 * 3759 * @param parallelismThreshold the (estimated) number of elements 3760 * needed for this operation to be executed in parallel 3761 * @param transformer a function returning the transformation 3762 * for an element, or null if there is no transformation (in 3763 * which case it is not combined) 3764 * @param reducer a commutative associative combining function 3765 * @param <U> the return type of the transformer 3766 * @return the result of accumulating the given transformation 3767 * of all (key, value) pairs 3768 * @since 1.8 3769 */ 3770 public <U> U reduce(long parallelismThreshold, 3771 BiFunction<? super K, ? super V, ? extends U> transformer, 3772 BiFunction<? super U, ? super U, ? extends U> reducer) { 3773 if (transformer == null || reducer == null) 3774 throw new NullPointerException(); 3775 return new MapReduceMappingsTask<K,V,U> 3776 (null, batchFor(parallelismThreshold), 0, 0, table, 3777 null, transformer, reducer).invoke(); 3778 } 3779 3780 /** 3781 * Returns the result of accumulating the given transformation 3782 * of all (key, value) pairs using the given reducer to 3783 * combine values, and the given basis as an identity value. 3784 * 3785 * @param parallelismThreshold the (estimated) number of elements 3786 * needed for this operation to be executed in parallel 3787 * @param transformer a function returning the transformation 3788 * for an element 3789 * @param basis the identity (initial default value) for the reduction 3790 * @param reducer a commutative associative combining function 3791 * @return the result of accumulating the given transformation 3792 * of all (key, value) pairs 3793 * @since 1.8 3794 */ 3795 public double reduceToDouble(long parallelismThreshold, 3796 ToDoubleBiFunction<? super K, ? super V> transformer, 3797 double basis, 3798 DoubleBinaryOperator reducer) { 3799 if (transformer == null || reducer == null) 3800 throw new NullPointerException(); 3801 return new MapReduceMappingsToDoubleTask<K,V> 3802 (null, batchFor(parallelismThreshold), 0, 0, table, 3803 null, transformer, basis, reducer).invoke(); 3804 } 3805 3806 /** 3807 * Returns the result of accumulating the given transformation 3808 * of all (key, value) pairs using the given reducer to 3809 * combine values, and the given basis as an identity value. 3810 * 3811 * @param parallelismThreshold the (estimated) number of elements 3812 * needed for this operation to be executed in parallel 3813 * @param transformer a function returning the transformation 3814 * for an element 3815 * @param basis the identity (initial default value) for the reduction 3816 * @param reducer a commutative associative combining function 3817 * @return the result of accumulating the given transformation 3818 * of all (key, value) pairs 3819 * @since 1.8 3820 */ 3821 public long reduceToLong(long parallelismThreshold, 3822 ToLongBiFunction<? super K, ? super V> transformer, 3823 long basis, 3824 LongBinaryOperator reducer) { 3825 if (transformer == null || reducer == null) 3826 throw new NullPointerException(); 3827 return new MapReduceMappingsToLongTask<K,V> 3828 (null, batchFor(parallelismThreshold), 0, 0, table, 3829 null, transformer, basis, reducer).invoke(); 3830 } 3831 3832 /** 3833 * Returns the result of accumulating the given transformation 3834 * of all (key, value) pairs using the given reducer to 3835 * combine values, and the given basis as an identity value. 3836 * 3837 * @param parallelismThreshold the (estimated) number of elements 3838 * needed for this operation to be executed in parallel 3839 * @param transformer a function returning the transformation 3840 * for an element 3841 * @param basis the identity (initial default value) for the reduction 3842 * @param reducer a commutative associative combining function 3843 * @return the result of accumulating the given transformation 3844 * of all (key, value) pairs 3845 * @since 1.8 3846 */ 3847 public int reduceToInt(long parallelismThreshold, 3848 ToIntBiFunction<? super K, ? super V> transformer, 3849 int basis, 3850 IntBinaryOperator reducer) { 3851 if (transformer == null || reducer == null) 3852 throw new NullPointerException(); 3853 return new MapReduceMappingsToIntTask<K,V> 3854 (null, batchFor(parallelismThreshold), 0, 0, table, 3855 null, transformer, basis, reducer).invoke(); 3856 } 3857 3858 /** 3859 * Performs the given action for each key. 3860 * 3861 * @param parallelismThreshold the (estimated) number of elements 3862 * needed for this operation to be executed in parallel 3863 * @param action the action 3864 * @since 1.8 3865 */ 3866 public void forEachKey(long parallelismThreshold, 3867 Consumer<? super K> action) { 3868 if (action == null) throw new NullPointerException(); 3869 new ForEachKeyTask<K,V> 3870 (null, batchFor(parallelismThreshold), 0, 0, table, 3871 action).invoke(); 3872 } 3873 3874 /** 3875 * Performs the given action for each non-null transformation 3876 * of each key. 3877 * 3878 * @param parallelismThreshold the (estimated) number of elements 3879 * needed for this operation to be executed in parallel 3880 * @param transformer a function returning the transformation 3881 * for an element, or null if there is no transformation (in 3882 * which case the action is not applied) 3883 * @param action the action 3884 * @param <U> the return type of the transformer 3885 * @since 1.8 3886 */ 3887 public <U> void forEachKey(long parallelismThreshold, 3888 Function<? super K, ? extends U> transformer, 3889 Consumer<? super U> action) { 3890 if (transformer == null || action == null) 3891 throw new NullPointerException(); 3892 new ForEachTransformedKeyTask<K,V,U> 3893 (null, batchFor(parallelismThreshold), 0, 0, table, 3894 transformer, action).invoke(); 3895 } 3896 3897 /** 3898 * Returns a non-null result from applying the given search 3899 * function on each key, or null if none. Upon success, 3900 * further element processing is suppressed and the results of 3901 * any other parallel invocations of the search function are 3902 * ignored. 3903 * 3904 * @param parallelismThreshold the (estimated) number of elements 3905 * needed for this operation to be executed in parallel 3906 * @param searchFunction a function returning a non-null 3907 * result on success, else null 3908 * @param <U> the return type of the search function 3909 * @return a non-null result from applying the given search 3910 * function on each key, or null if none 3911 * @since 1.8 3912 */ 3913 public <U> U searchKeys(long parallelismThreshold, 3914 Function<? super K, ? extends U> searchFunction) { 3915 if (searchFunction == null) throw new NullPointerException(); 3916 return new SearchKeysTask<K,V,U> 3917 (null, batchFor(parallelismThreshold), 0, 0, table, 3918 searchFunction, new AtomicReference<U>()).invoke(); 3919 } 3920 3921 /** 3922 * Returns the result of accumulating all keys using the given 3923 * reducer to combine values, or null if none. 3924 * 3925 * @param parallelismThreshold the (estimated) number of elements 3926 * needed for this operation to be executed in parallel 3927 * @param reducer a commutative associative combining function 3928 * @return the result of accumulating all keys using the given 3929 * reducer to combine values, or null if none 3930 * @since 1.8 3931 */ 3932 public K reduceKeys(long parallelismThreshold, 3933 BiFunction<? super K, ? super K, ? extends K> reducer) { 3934 if (reducer == null) throw new NullPointerException(); 3935 return new ReduceKeysTask<K,V> 3936 (null, batchFor(parallelismThreshold), 0, 0, table, 3937 null, reducer).invoke(); 3938 } 3939 3940 /** 3941 * Returns the result of accumulating the given transformation 3942 * of all keys using the given reducer to combine values, or 3943 * null if none. 3944 * 3945 * @param parallelismThreshold the (estimated) number of elements 3946 * needed for this operation to be executed in parallel 3947 * @param transformer a function returning the transformation 3948 * for an element, or null if there is no transformation (in 3949 * which case it is not combined) 3950 * @param reducer a commutative associative combining function 3951 * @param <U> the return type of the transformer 3952 * @return the result of accumulating the given transformation 3953 * of all keys 3954 * @since 1.8 3955 */ 3956 public <U> U reduceKeys(long parallelismThreshold, 3957 Function<? super K, ? extends U> transformer, 3958 BiFunction<? super U, ? super U, ? extends U> reducer) { 3959 if (transformer == null || reducer == null) 3960 throw new NullPointerException(); 3961 return new MapReduceKeysTask<K,V,U> 3962 (null, batchFor(parallelismThreshold), 0, 0, table, 3963 null, transformer, reducer).invoke(); 3964 } 3965 3966 /** 3967 * Returns the result of accumulating the given transformation 3968 * of all keys using the given reducer to combine values, and 3969 * the given basis as an identity value. 3970 * 3971 * @param parallelismThreshold the (estimated) number of elements 3972 * needed for this operation to be executed in parallel 3973 * @param transformer a function returning the transformation 3974 * for an element 3975 * @param basis the identity (initial default value) for the reduction 3976 * @param reducer a commutative associative combining function 3977 * @return the result of accumulating the given transformation 3978 * of all keys 3979 * @since 1.8 3980 */ 3981 public double reduceKeysToDouble(long parallelismThreshold, 3982 ToDoubleFunction<? super K> transformer, 3983 double basis, 3984 DoubleBinaryOperator reducer) { 3985 if (transformer == null || reducer == null) 3986 throw new NullPointerException(); 3987 return new MapReduceKeysToDoubleTask<K,V> 3988 (null, batchFor(parallelismThreshold), 0, 0, table, 3989 null, transformer, basis, reducer).invoke(); 3990 } 3991 3992 /** 3993 * Returns the result of accumulating the given transformation 3994 * of all keys using the given reducer to combine values, and 3995 * the given basis as an identity value. 3996 * 3997 * @param parallelismThreshold the (estimated) number of elements 3998 * needed for this operation to be executed in parallel 3999 * @param transformer a function returning the transformation 4000 * for an element 4001 * @param basis the identity (initial default value) for the reduction 4002 * @param reducer a commutative associative combining function 4003 * @return the result of accumulating the given transformation 4004 * of all keys 4005 * @since 1.8 4006 */ 4007 public long reduceKeysToLong(long parallelismThreshold, 4008 ToLongFunction<? super K> transformer, 4009 long basis, 4010 LongBinaryOperator reducer) { 4011 if (transformer == null || reducer == null) 4012 throw new NullPointerException(); 4013 return new MapReduceKeysToLongTask<K,V> 4014 (null, batchFor(parallelismThreshold), 0, 0, table, 4015 null, transformer, basis, reducer).invoke(); 4016 } 4017 4018 /** 4019 * Returns the result of accumulating the given transformation 4020 * of all keys using the given reducer to combine values, and 4021 * the given basis as an identity value. 4022 * 4023 * @param parallelismThreshold the (estimated) number of elements 4024 * needed for this operation to be executed in parallel 4025 * @param transformer a function returning the transformation 4026 * for an element 4027 * @param basis the identity (initial default value) for the reduction 4028 * @param reducer a commutative associative combining function 4029 * @return the result of accumulating the given transformation 4030 * of all keys 4031 * @since 1.8 4032 */ 4033 public int reduceKeysToInt(long parallelismThreshold, 4034 ToIntFunction<? super K> transformer, 4035 int basis, 4036 IntBinaryOperator reducer) { 4037 if (transformer == null || reducer == null) 4038 throw new NullPointerException(); 4039 return new MapReduceKeysToIntTask<K,V> 4040 (null, batchFor(parallelismThreshold), 0, 0, table, 4041 null, transformer, basis, reducer).invoke(); 4042 } 4043 4044 /** 4045 * Performs the given action for each value. 4046 * 4047 * @param parallelismThreshold the (estimated) number of elements 4048 * needed for this operation to be executed in parallel 4049 * @param action the action 4050 * @since 1.8 4051 */ 4052 public void forEachValue(long parallelismThreshold, 4053 Consumer<? super V> action) { 4054 if (action == null) 4055 throw new NullPointerException(); 4056 new ForEachValueTask<K,V> 4057 (null, batchFor(parallelismThreshold), 0, 0, table, 4058 action).invoke(); 4059 } 4060 4061 /** 4062 * Performs the given action for each non-null transformation 4063 * of each value. 4064 * 4065 * @param parallelismThreshold the (estimated) number of elements 4066 * needed for this operation to be executed in parallel 4067 * @param transformer a function returning the transformation 4068 * for an element, or null if there is no transformation (in 4069 * which case the action is not applied) 4070 * @param action the action 4071 * @param <U> the return type of the transformer 4072 * @since 1.8 4073 */ 4074 public <U> void forEachValue(long parallelismThreshold, 4075 Function<? super V, ? extends U> transformer, 4076 Consumer<? super U> action) { 4077 if (transformer == null || action == null) 4078 throw new NullPointerException(); 4079 new ForEachTransformedValueTask<K,V,U> 4080 (null, batchFor(parallelismThreshold), 0, 0, table, 4081 transformer, action).invoke(); 4082 } 4083 4084 /** 4085 * Returns a non-null result from applying the given search 4086 * function on each value, or null if none. Upon success, 4087 * further element processing is suppressed and the results of 4088 * any other parallel invocations of the search function are 4089 * ignored. 4090 * 4091 * @param parallelismThreshold the (estimated) number of elements 4092 * needed for this operation to be executed in parallel 4093 * @param searchFunction a function returning a non-null 4094 * result on success, else null 4095 * @param <U> the return type of the search function 4096 * @return a non-null result from applying the given search 4097 * function on each value, or null if none 4098 * @since 1.8 4099 */ 4100 public <U> U searchValues(long parallelismThreshold, 4101 Function<? super V, ? extends U> searchFunction) { 4102 if (searchFunction == null) throw new NullPointerException(); 4103 return new SearchValuesTask<K,V,U> 4104 (null, batchFor(parallelismThreshold), 0, 0, table, 4105 searchFunction, new AtomicReference<U>()).invoke(); 4106 } 4107 4108 /** 4109 * Returns the result of accumulating all values using the 4110 * given reducer to combine values, or null if none. 4111 * 4112 * @param parallelismThreshold the (estimated) number of elements 4113 * needed for this operation to be executed in parallel 4114 * @param reducer a commutative associative combining function 4115 * @return the result of accumulating all values 4116 * @since 1.8 4117 */ 4118 public V reduceValues(long parallelismThreshold, 4119 BiFunction<? super V, ? super V, ? extends V> reducer) { 4120 if (reducer == null) throw new NullPointerException(); 4121 return new ReduceValuesTask<K,V> 4122 (null, batchFor(parallelismThreshold), 0, 0, table, 4123 null, reducer).invoke(); 4124 } 4125 4126 /** 4127 * Returns the result of accumulating the given transformation 4128 * of all values using the given reducer to combine values, or 4129 * null if none. 4130 * 4131 * @param parallelismThreshold the (estimated) number of elements 4132 * needed for this operation to be executed in parallel 4133 * @param transformer a function returning the transformation 4134 * for an element, or null if there is no transformation (in 4135 * which case it is not combined) 4136 * @param reducer a commutative associative combining function 4137 * @param <U> the return type of the transformer 4138 * @return the result of accumulating the given transformation 4139 * of all values 4140 * @since 1.8 4141 */ 4142 public <U> U reduceValues(long parallelismThreshold, 4143 Function<? super V, ? extends U> transformer, 4144 BiFunction<? super U, ? super U, ? extends U> reducer) { 4145 if (transformer == null || reducer == null) 4146 throw new NullPointerException(); 4147 return new MapReduceValuesTask<K,V,U> 4148 (null, batchFor(parallelismThreshold), 0, 0, table, 4149 null, transformer, reducer).invoke(); 4150 } 4151 4152 /** 4153 * Returns the result of accumulating the given transformation 4154 * of all values using the given reducer to combine values, 4155 * and the given basis as an identity value. 4156 * 4157 * @param parallelismThreshold the (estimated) number of elements 4158 * needed for this operation to be executed in parallel 4159 * @param transformer a function returning the transformation 4160 * for an element 4161 * @param basis the identity (initial default value) for the reduction 4162 * @param reducer a commutative associative combining function 4163 * @return the result of accumulating the given transformation 4164 * of all values 4165 * @since 1.8 4166 */ 4167 public double reduceValuesToDouble(long parallelismThreshold, 4168 ToDoubleFunction<? super V> transformer, 4169 double basis, 4170 DoubleBinaryOperator reducer) { 4171 if (transformer == null || reducer == null) 4172 throw new NullPointerException(); 4173 return new MapReduceValuesToDoubleTask<K,V> 4174 (null, batchFor(parallelismThreshold), 0, 0, table, 4175 null, transformer, basis, reducer).invoke(); 4176 } 4177 4178 /** 4179 * Returns the result of accumulating the given transformation 4180 * of all values using the given reducer to combine values, 4181 * and the given basis as an identity value. 4182 * 4183 * @param parallelismThreshold the (estimated) number of elements 4184 * needed for this operation to be executed in parallel 4185 * @param transformer a function returning the transformation 4186 * for an element 4187 * @param basis the identity (initial default value) for the reduction 4188 * @param reducer a commutative associative combining function 4189 * @return the result of accumulating the given transformation 4190 * of all values 4191 * @since 1.8 4192 */ 4193 public long reduceValuesToLong(long parallelismThreshold, 4194 ToLongFunction<? super V> transformer, 4195 long basis, 4196 LongBinaryOperator reducer) { 4197 if (transformer == null || reducer == null) 4198 throw new NullPointerException(); 4199 return new MapReduceValuesToLongTask<K,V> 4200 (null, batchFor(parallelismThreshold), 0, 0, table, 4201 null, transformer, basis, reducer).invoke(); 4202 } 4203 4204 /** 4205 * Returns the result of accumulating the given transformation 4206 * of all values using the given reducer to combine values, 4207 * and the given basis as an identity value. 4208 * 4209 * @param parallelismThreshold the (estimated) number of elements 4210 * needed for this operation to be executed in parallel 4211 * @param transformer a function returning the transformation 4212 * for an element 4213 * @param basis the identity (initial default value) for the reduction 4214 * @param reducer a commutative associative combining function 4215 * @return the result of accumulating the given transformation 4216 * of all values 4217 * @since 1.8 4218 */ 4219 public int reduceValuesToInt(long parallelismThreshold, 4220 ToIntFunction<? super V> transformer, 4221 int basis, 4222 IntBinaryOperator reducer) { 4223 if (transformer == null || reducer == null) 4224 throw new NullPointerException(); 4225 return new MapReduceValuesToIntTask<K,V> 4226 (null, batchFor(parallelismThreshold), 0, 0, table, 4227 null, transformer, basis, reducer).invoke(); 4228 } 4229 4230 /** 4231 * Performs the given action for each entry. 4232 * 4233 * @param parallelismThreshold the (estimated) number of elements 4234 * needed for this operation to be executed in parallel 4235 * @param action the action 4236 * @since 1.8 4237 */ 4238 public void forEachEntry(long parallelismThreshold, 4239 Consumer<? super Map.Entry<K,V>> action) { 4240 if (action == null) throw new NullPointerException(); 4241 new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table, 4242 action).invoke(); 4243 } 4244 4245 /** 4246 * Performs the given action for each non-null transformation 4247 * of each entry. 4248 * 4249 * @param parallelismThreshold the (estimated) number of elements 4250 * needed for this operation to be executed in parallel 4251 * @param transformer a function returning the transformation 4252 * for an element, or null if there is no transformation (in 4253 * which case the action is not applied) 4254 * @param action the action 4255 * @param <U> the return type of the transformer 4256 * @since 1.8 4257 */ 4258 public <U> void forEachEntry(long parallelismThreshold, 4259 Function<Map.Entry<K,V>, ? extends U> transformer, 4260 Consumer<? super U> action) { 4261 if (transformer == null || action == null) 4262 throw new NullPointerException(); 4263 new ForEachTransformedEntryTask<K,V,U> 4264 (null, batchFor(parallelismThreshold), 0, 0, table, 4265 transformer, action).invoke(); 4266 } 4267 4268 /** 4269 * Returns a non-null result from applying the given search 4270 * function on each entry, or null if none. Upon success, 4271 * further element processing is suppressed and the results of 4272 * any other parallel invocations of the search function are 4273 * ignored. 4274 * 4275 * @param parallelismThreshold the (estimated) number of elements 4276 * needed for this operation to be executed in parallel 4277 * @param searchFunction a function returning a non-null 4278 * result on success, else null 4279 * @param <U> the return type of the search function 4280 * @return a non-null result from applying the given search 4281 * function on each entry, or null if none 4282 * @since 1.8 4283 */ 4284 public <U> U searchEntries(long parallelismThreshold, 4285 Function<Map.Entry<K,V>, ? extends U> searchFunction) { 4286 if (searchFunction == null) throw new NullPointerException(); 4287 return new SearchEntriesTask<K,V,U> 4288 (null, batchFor(parallelismThreshold), 0, 0, table, 4289 searchFunction, new AtomicReference<U>()).invoke(); 4290 } 4291 4292 /** 4293 * Returns the result of accumulating all entries using the 4294 * given reducer to combine values, or null if none. 4295 * 4296 * @param parallelismThreshold the (estimated) number of elements 4297 * needed for this operation to be executed in parallel 4298 * @param reducer a commutative associative combining function 4299 * @return the result of accumulating all entries 4300 * @since 1.8 4301 */ 4302 public Map.Entry<K,V> reduceEntries(long parallelismThreshold, 4303 BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) { 4304 if (reducer == null) throw new NullPointerException(); 4305 return new ReduceEntriesTask<K,V> 4306 (null, batchFor(parallelismThreshold), 0, 0, table, 4307 null, reducer).invoke(); 4308 } 4309 4310 /** 4311 * Returns the result of accumulating the given transformation 4312 * of all entries using the given reducer to combine values, 4313 * or null if none. 4314 * 4315 * @param parallelismThreshold the (estimated) number of elements 4316 * needed for this operation to be executed in parallel 4317 * @param transformer a function returning the transformation 4318 * for an element, or null if there is no transformation (in 4319 * which case it is not combined) 4320 * @param reducer a commutative associative combining function 4321 * @param <U> the return type of the transformer 4322 * @return the result of accumulating the given transformation 4323 * of all entries 4324 * @since 1.8 4325 */ 4326 public <U> U reduceEntries(long parallelismThreshold, 4327 Function<Map.Entry<K,V>, ? extends U> transformer, 4328 BiFunction<? super U, ? super U, ? extends U> reducer) { 4329 if (transformer == null || reducer == null) 4330 throw new NullPointerException(); 4331 return new MapReduceEntriesTask<K,V,U> 4332 (null, batchFor(parallelismThreshold), 0, 0, table, 4333 null, transformer, reducer).invoke(); 4334 } 4335 4336 /** 4337 * Returns the result of accumulating the given transformation 4338 * of all entries using the given reducer to combine values, 4339 * and the given basis as an identity value. 4340 * 4341 * @param parallelismThreshold the (estimated) number of elements 4342 * needed for this operation to be executed in parallel 4343 * @param transformer a function returning the transformation 4344 * for an element 4345 * @param basis the identity (initial default value) for the reduction 4346 * @param reducer a commutative associative combining function 4347 * @return the result of accumulating the given transformation 4348 * of all entries 4349 * @since 1.8 4350 */ 4351 public double reduceEntriesToDouble(long parallelismThreshold, 4352 ToDoubleFunction<Map.Entry<K,V>> transformer, 4353 double basis, 4354 DoubleBinaryOperator reducer) { 4355 if (transformer == null || reducer == null) 4356 throw new NullPointerException(); 4357 return new MapReduceEntriesToDoubleTask<K,V> 4358 (null, batchFor(parallelismThreshold), 0, 0, table, 4359 null, transformer, basis, reducer).invoke(); 4360 } 4361 4362 /** 4363 * Returns the result of accumulating the given transformation 4364 * of all entries using the given reducer to combine values, 4365 * and the given basis as an identity value. 4366 * 4367 * @param parallelismThreshold the (estimated) number of elements 4368 * needed for this operation to be executed in parallel 4369 * @param transformer a function returning the transformation 4370 * for an element 4371 * @param basis the identity (initial default value) for the reduction 4372 * @param reducer a commutative associative combining function 4373 * @return the result of accumulating the given transformation 4374 * of all entries 4375 * @since 1.8 4376 */ 4377 public long reduceEntriesToLong(long parallelismThreshold, 4378 ToLongFunction<Map.Entry<K,V>> transformer, 4379 long basis, 4380 LongBinaryOperator reducer) { 4381 if (transformer == null || reducer == null) 4382 throw new NullPointerException(); 4383 return new MapReduceEntriesToLongTask<K,V> 4384 (null, batchFor(parallelismThreshold), 0, 0, table, 4385 null, transformer, basis, reducer).invoke(); 4386 } 4387 4388 /** 4389 * Returns the result of accumulating the given transformation 4390 * of all entries using the given reducer to combine values, 4391 * and the given basis as an identity value. 4392 * 4393 * @param parallelismThreshold the (estimated) number of elements 4394 * needed for this operation to be executed in parallel 4395 * @param transformer a function returning the transformation 4396 * for an element 4397 * @param basis the identity (initial default value) for the reduction 4398 * @param reducer a commutative associative combining function 4399 * @return the result of accumulating the given transformation 4400 * of all entries 4401 * @since 1.8 4402 */ 4403 public int reduceEntriesToInt(long parallelismThreshold, 4404 ToIntFunction<Map.Entry<K,V>> transformer, 4405 int basis, 4406 IntBinaryOperator reducer) { 4407 if (transformer == null || reducer == null) 4408 throw new NullPointerException(); 4409 return new MapReduceEntriesToIntTask<K,V> 4410 (null, batchFor(parallelismThreshold), 0, 0, table, 4411 null, transformer, basis, reducer).invoke(); 4412 } 4413 4414 4415 /* ----------------Views -------------- */ 4416 4417 /** 4418 * Base class for views. 4419 */ 4420 abstract static class CollectionView<K,V,E> 4421 implements Collection<E>, java.io.Serializable { 4422 private static final long serialVersionUID = 7249069246763182397L; 4423 final ConcurrentHashMap<K,V> map; 4424 CollectionView(ConcurrentHashMap<K,V> map) { this.map = map; } 4425 4426 /** 4427 * Returns the map backing this view. 4428 * 4429 * @return the map backing this view 4430 */ 4431 public ConcurrentHashMap<K,V> getMap() { return map; } 4432 4433 /** 4434 * Removes all of the elements from this view, by removing all 4435 * the mappings from the map backing this view. 4436 */ 4437 public final void clear() { map.clear(); } 4438 public final int size() { return map.size(); } 4439 public final boolean isEmpty() { return map.isEmpty(); } 4440 4441 // implementations below rely on concrete classes supplying these 4442 // abstract methods 4443 /** 4444 * Returns an iterator over the elements in this collection. 4445 * 4446 * <p>The returned iterator is 4447 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 4448 * 4449 * @return an iterator over the elements in this collection 4450 */ 4451 public abstract Iterator<E> iterator(); 4452 public abstract boolean contains(Object o); 4453 public abstract boolean remove(Object o); 4454 4455 private static final String OOME_MSG = "Required array size too large"; 4456 4457 public final Object[] toArray() { 4458 long sz = map.mappingCount(); 4459 if (sz > MAX_ARRAY_SIZE) 4460 throw new OutOfMemoryError(OOME_MSG); 4461 int n = (int)sz; 4462 Object[] r = new Object[n]; 4463 int i = 0; 4464 for (E e : this) { 4465 if (i == n) { 4466 if (n >= MAX_ARRAY_SIZE) 4467 throw new OutOfMemoryError(OOME_MSG); 4468 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) 4469 n = MAX_ARRAY_SIZE; 4470 else 4471 n += (n >>> 1) + 1; 4472 r = Arrays.copyOf(r, n); 4473 } 4474 r[i++] = e; 4475 } 4476 return (i == n) ? r : Arrays.copyOf(r, i); 4477 } 4478 4479 @SuppressWarnings("unchecked") 4480 public final <T> T[] toArray(T[] a) { 4481 long sz = map.mappingCount(); 4482 if (sz > MAX_ARRAY_SIZE) 4483 throw new OutOfMemoryError(OOME_MSG); 4484 int m = (int)sz; 4485 T[] r = (a.length >= m) ? a : 4486 (T[])java.lang.reflect.Array 4487 .newInstance(a.getClass().getComponentType(), m); 4488 int n = r.length; 4489 int i = 0; 4490 for (E e : this) { 4491 if (i == n) { 4492 if (n >= MAX_ARRAY_SIZE) 4493 throw new OutOfMemoryError(OOME_MSG); 4494 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) 4495 n = MAX_ARRAY_SIZE; 4496 else 4497 n += (n >>> 1) + 1; 4498 r = Arrays.copyOf(r, n); 4499 } 4500 r[i++] = (T)e; 4501 } 4502 if (a == r && i < n) { 4503 r[i] = null; // null-terminate 4504 return r; 4505 } 4506 return (i == n) ? r : Arrays.copyOf(r, i); 4507 } 4508 4509 /** 4510 * Returns a string representation of this collection. 4511 * The string representation consists of the string representations 4512 * of the collection's elements in the order they are returned by 4513 * its iterator, enclosed in square brackets ({@code "[]"}). 4514 * Adjacent elements are separated by the characters {@code ", "} 4515 * (comma and space). Elements are converted to strings as by 4516 * {@link String#valueOf(Object)}. 4517 * 4518 * @return a string representation of this collection 4519 */ 4520 public final String toString() { 4521 StringBuilder sb = new StringBuilder(); 4522 sb.append('['); 4523 Iterator<E> it = iterator(); 4524 if (it.hasNext()) { 4525 for (;;) { 4526 Object e = it.next(); 4527 sb.append(e == this ? "(this Collection)" : e); 4528 if (!it.hasNext()) 4529 break; 4530 sb.append(',').append(' '); 4531 } 4532 } 4533 return sb.append(']').toString(); 4534 } 4535 4536 public final boolean containsAll(Collection<?> c) { 4537 if (c != this) { 4538 for (Object e : c) { 4539 if (e == null || !contains(e)) 4540 return false; 4541 } 4542 } 4543 return true; 4544 } 4545 4546 public final boolean removeAll(Collection<?> c) { 4547 if (c == null) throw new NullPointerException(); 4548 boolean modified = false; 4549 for (Iterator<E> it = iterator(); it.hasNext();) { 4550 if (c.contains(it.next())) { 4551 it.remove(); 4552 modified = true; 4553 } 4554 } 4555 return modified; 4556 } 4557 4558 public final boolean retainAll(Collection<?> c) { 4559 if (c == null) throw new NullPointerException(); 4560 boolean modified = false; 4561 for (Iterator<E> it = iterator(); it.hasNext();) { 4562 if (!c.contains(it.next())) { 4563 it.remove(); 4564 modified = true; 4565 } 4566 } 4567 return modified; 4568 } 4569 4570 } 4571 4572 /** 4573 * A view of a ConcurrentHashMap as a {@link Set} of keys, in 4574 * which additions may optionally be enabled by mapping to a 4575 * common value. This class cannot be directly instantiated. 4576 * See {@link #keySet() keySet()}, 4577 * {@link #keySet(Object) keySet(V)}, 4578 * {@link #newKeySet() newKeySet()}, 4579 * {@link #newKeySet(int) newKeySet(int)}. 4580 * 4581 * @since 1.8 4582 */ 4583 public static class KeySetView<K,V> extends CollectionView<K,V,K> 4584 implements Set<K>, java.io.Serializable { 4585 private static final long serialVersionUID = 7249069246763182397L; 4586 private final V value; 4587 KeySetView(ConcurrentHashMap<K,V> map, V value) { // non-public 4588 super(map); 4589 this.value = value; 4590 } 4591 4592 /** 4593 * Returns the default mapped value for additions, 4594 * or {@code null} if additions are not supported. 4595 * 4596 * @return the default mapped value for additions, or {@code null} 4597 * if not supported 4598 */ 4599 public V getMappedValue() { return value; } 4600 4601 /** 4602 * {@inheritDoc} 4603 * @throws NullPointerException if the specified key is null 4604 */ 4605 public boolean contains(Object o) { return map.containsKey(o); } 4606 4607 /** 4608 * Removes the key from this map view, by removing the key (and its 4609 * corresponding value) from the backing map. This method does 4610 * nothing if the key is not in the map. 4611 * 4612 * @param o the key to be removed from the backing map 4613 * @return {@code true} if the backing map contained the specified key 4614 * @throws NullPointerException if the specified key is null 4615 */ 4616 public boolean remove(Object o) { return map.remove(o) != null; } 4617 4618 /** 4619 * @return an iterator over the keys of the backing map 4620 */ 4621 public Iterator<K> iterator() { 4622 Node<K,V>[] t; 4623 ConcurrentHashMap<K,V> m = map; 4624 int f = (t = m.table) == null ? 0 : t.length; 4625 return new KeyIterator<K,V>(t, f, 0, f, m); 4626 } 4627 4628 /** 4629 * Adds the specified key to this set view by mapping the key to 4630 * the default mapped value in the backing map, if defined. 4631 * 4632 * @param e key to be added 4633 * @return {@code true} if this set changed as a result of the call 4634 * @throws NullPointerException if the specified key is null 4635 * @throws UnsupportedOperationException if no default mapped value 4636 * for additions was provided 4637 */ 4638 public boolean add(K e) { 4639 V v; 4640 if ((v = value) == null) 4641 throw new UnsupportedOperationException(); 4642 return map.putVal(e, v, true) == null; 4643 } 4644 4645 /** 4646 * Adds all of the elements in the specified collection to this set, 4647 * as if by calling {@link #add} on each one. 4648 * 4649 * @param c the elements to be inserted into this set 4650 * @return {@code true} if this set changed as a result of the call 4651 * @throws NullPointerException if the collection or any of its 4652 * elements are {@code null} 4653 * @throws UnsupportedOperationException if no default mapped value 4654 * for additions was provided 4655 */ 4656 public boolean addAll(Collection<? extends K> c) { 4657 boolean added = false; 4658 V v; 4659 if ((v = value) == null) 4660 throw new UnsupportedOperationException(); 4661 for (K e : c) { 4662 if (map.putVal(e, v, true) == null) 4663 added = true; 4664 } 4665 return added; 4666 } 4667 4668 public int hashCode() { 4669 int h = 0; 4670 for (K e : this) 4671 h += e.hashCode(); 4672 return h; 4673 } 4674 4675 public boolean equals(Object o) { 4676 Set<?> c; 4677 return ((o instanceof Set) && 4678 ((c = (Set<?>)o) == this || 4679 (containsAll(c) && c.containsAll(this)))); 4680 } 4681 4682 public Spliterator<K> spliterator() { 4683 Node<K,V>[] t; 4684 ConcurrentHashMap<K,V> m = map; 4685 long n = m.sumCount(); 4686 int f = (t = m.table) == null ? 0 : t.length; 4687 return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n); 4688 } 4689 4690 public void forEach(Consumer<? super K> action) { 4691 if (action == null) throw new NullPointerException(); 4692 Node<K,V>[] t; 4693 if ((t = map.table) != null) { 4694 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 4695 for (Node<K,V> p; (p = it.advance()) != null; ) 4696 action.accept(p.key); 4697 } 4698 } 4699 } 4700 4701 /** 4702 * A view of a ConcurrentHashMap as a {@link Collection} of 4703 * values, in which additions are disabled. This class cannot be 4704 * directly instantiated. See {@link #values()}. 4705 */ 4706 static final class ValuesView<K,V> extends CollectionView<K,V,V> 4707 implements Collection<V>, java.io.Serializable { 4708 private static final long serialVersionUID = 2249069246763182397L; 4709 ValuesView(ConcurrentHashMap<K,V> map) { super(map); } 4710 public final boolean contains(Object o) { 4711 return map.containsValue(o); 4712 } 4713 4714 public final boolean remove(Object o) { 4715 if (o != null) { 4716 for (Iterator<V> it = iterator(); it.hasNext();) { 4717 if (o.equals(it.next())) { 4718 it.remove(); 4719 return true; 4720 } 4721 } 4722 } 4723 return false; 4724 } 4725 4726 public final Iterator<V> iterator() { 4727 ConcurrentHashMap<K,V> m = map; 4728 Node<K,V>[] t; 4729 int f = (t = m.table) == null ? 0 : t.length; 4730 return new ValueIterator<K,V>(t, f, 0, f, m); 4731 } 4732 4733 public final boolean add(V e) { 4734 throw new UnsupportedOperationException(); 4735 } 4736 public final boolean addAll(Collection<? extends V> c) { 4737 throw new UnsupportedOperationException(); 4738 } 4739 4740 public boolean removeIf(Predicate<? super V> filter) { 4741 return map.removeValueIf(filter); 4742 } 4743 4744 public Spliterator<V> spliterator() { 4745 Node<K,V>[] t; 4746 ConcurrentHashMap<K,V> m = map; 4747 long n = m.sumCount(); 4748 int f = (t = m.table) == null ? 0 : t.length; 4749 return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n); 4750 } 4751 4752 public void forEach(Consumer<? super V> action) { 4753 if (action == null) throw new NullPointerException(); 4754 Node<K,V>[] t; 4755 if ((t = map.table) != null) { 4756 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 4757 for (Node<K,V> p; (p = it.advance()) != null; ) 4758 action.accept(p.val); 4759 } 4760 } 4761 } 4762 4763 /** 4764 * A view of a ConcurrentHashMap as a {@link Set} of (key, value) 4765 * entries. This class cannot be directly instantiated. See 4766 * {@link #entrySet()}. 4767 */ 4768 static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>> 4769 implements Set<Map.Entry<K,V>>, java.io.Serializable { 4770 private static final long serialVersionUID = 2249069246763182397L; 4771 EntrySetView(ConcurrentHashMap<K,V> map) { super(map); } 4772 4773 public boolean contains(Object o) { 4774 Object k, v, r; Map.Entry<?,?> e; 4775 return ((o instanceof Map.Entry) && 4776 (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 4777 (r = map.get(k)) != null && 4778 (v = e.getValue()) != null && 4779 (v == r || v.equals(r))); 4780 } 4781 4782 public boolean remove(Object o) { 4783 Object k, v; Map.Entry<?,?> e; 4784 return ((o instanceof Map.Entry) && 4785 (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 4786 (v = e.getValue()) != null && 4787 map.remove(k, v)); 4788 } 4789 4790 /** 4791 * @return an iterator over the entries of the backing map 4792 */ 4793 public Iterator<Map.Entry<K,V>> iterator() { 4794 ConcurrentHashMap<K,V> m = map; 4795 Node<K,V>[] t; 4796 int f = (t = m.table) == null ? 0 : t.length; 4797 return new EntryIterator<K,V>(t, f, 0, f, m); 4798 } 4799 4800 public boolean add(Entry<K,V> e) { 4801 return map.putVal(e.getKey(), e.getValue(), false) == null; 4802 } 4803 4804 public boolean addAll(Collection<? extends Entry<K,V>> c) { 4805 boolean added = false; 4806 for (Entry<K,V> e : c) { 4807 if (add(e)) 4808 added = true; 4809 } 4810 return added; 4811 } 4812 4813 public boolean removeIf(Predicate<? super Entry<K,V>> filter) { 4814 return map.removeEntryIf(filter); 4815 } 4816 4817 public final int hashCode() { 4818 int h = 0; 4819 Node<K,V>[] t; 4820 if ((t = map.table) != null) { 4821 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 4822 for (Node<K,V> p; (p = it.advance()) != null; ) { 4823 h += p.hashCode(); 4824 } 4825 } 4826 return h; 4827 } 4828 4829 public final boolean equals(Object o) { 4830 Set<?> c; 4831 return ((o instanceof Set) && 4832 ((c = (Set<?>)o) == this || 4833 (containsAll(c) && c.containsAll(this)))); 4834 } 4835 4836 public Spliterator<Map.Entry<K,V>> spliterator() { 4837 Node<K,V>[] t; 4838 ConcurrentHashMap<K,V> m = map; 4839 long n = m.sumCount(); 4840 int f = (t = m.table) == null ? 0 : t.length; 4841 return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m); 4842 } 4843 4844 public void forEach(Consumer<? super Map.Entry<K,V>> action) { 4845 if (action == null) throw new NullPointerException(); 4846 Node<K,V>[] t; 4847 if ((t = map.table) != null) { 4848 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 4849 for (Node<K,V> p; (p = it.advance()) != null; ) 4850 action.accept(new MapEntry<K,V>(p.key, p.val, map)); 4851 } 4852 } 4853 4854 } 4855 4856 // ------------------------------------------------------- 4857 4858 /** 4859 * Base class for bulk tasks. Repeats some fields and code from 4860 * class Traverser, because we need to subclass CountedCompleter. 4861 */ 4862 @SuppressWarnings("serial") 4863 abstract static class BulkTask<K,V,R> extends CountedCompleter<R> { 4864 Node<K,V>[] tab; // same as Traverser 4865 Node<K,V> next; 4866 TableStack<K,V> stack, spare; 4867 int index; 4868 int baseIndex; 4869 int baseLimit; 4870 final int baseSize; 4871 int batch; // split control 4872 4873 BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) { 4874 super(par); 4875 this.batch = b; 4876 this.index = this.baseIndex = i; 4877 if ((this.tab = t) == null) 4878 this.baseSize = this.baseLimit = 0; 4879 else if (par == null) 4880 this.baseSize = this.baseLimit = t.length; 4881 else { 4882 this.baseLimit = f; 4883 this.baseSize = par.baseSize; 4884 } 4885 } 4886 4887 /** 4888 * Same as Traverser version. 4889 */ 4890 final Node<K,V> advance() { 4891 Node<K,V> e; 4892 if ((e = next) != null) 4893 e = e.next; 4894 for (;;) { 4895 Node<K,V>[] t; int i, n; 4896 if (e != null) 4897 return next = e; 4898 if (baseIndex >= baseLimit || (t = tab) == null || 4899 (n = t.length) <= (i = index) || i < 0) 4900 return next = null; 4901 if ((e = tabAt(t, i)) != null && e.hash < 0) { 4902 if (e instanceof ForwardingNode) { 4903 tab = ((ForwardingNode<K,V>)e).nextTable; 4904 e = null; 4905 pushState(t, i, n); 4906 continue; 4907 } 4908 else if (e instanceof TreeBin) 4909 e = ((TreeBin<K,V>)e).first; 4910 else 4911 e = null; 4912 } 4913 if (stack != null) 4914 recoverState(n); 4915 else if ((index = i + baseSize) >= n) 4916 index = ++baseIndex; 4917 } 4918 } 4919 4920 private void pushState(Node<K,V>[] t, int i, int n) { 4921 TableStack<K,V> s = spare; 4922 if (s != null) 4923 spare = s.next; 4924 else 4925 s = new TableStack<K,V>(); 4926 s.tab = t; 4927 s.length = n; 4928 s.index = i; 4929 s.next = stack; 4930 stack = s; 4931 } 4932 4933 private void recoverState(int n) { 4934 TableStack<K,V> s; int len; 4935 while ((s = stack) != null && (index += (len = s.length)) >= n) { 4936 n = len; 4937 index = s.index; 4938 tab = s.tab; 4939 s.tab = null; 4940 TableStack<K,V> next = s.next; 4941 s.next = spare; // save for reuse 4942 stack = next; 4943 spare = s; 4944 } 4945 if (s == null && (index += baseSize) >= n) 4946 index = ++baseIndex; 4947 } 4948 } 4949 4950 /* 4951 * Task classes. Coded in a regular but ugly format/style to 4952 * simplify checks that each variant differs in the right way from 4953 * others. The null screenings exist because compilers cannot tell 4954 * that we've already null-checked task arguments, so we force 4955 * simplest hoisted bypass to help avoid convoluted traps. 4956 */ 4957 @SuppressWarnings("serial") 4958 static final class ForEachKeyTask<K,V> 4959 extends BulkTask<K,V,Void> { 4960 final Consumer<? super K> action; 4961 ForEachKeyTask 4962 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 4963 Consumer<? super K> action) { 4964 super(p, b, i, f, t); 4965 this.action = action; 4966 } 4967 public final void compute() { 4968 final Consumer<? super K> action; 4969 if ((action = this.action) != null) { 4970 for (int i = baseIndex, f, h; batch > 0 && 4971 (h = ((f = baseLimit) + i) >>> 1) > i;) { 4972 addToPendingCount(1); 4973 new ForEachKeyTask<K,V> 4974 (this, batch >>>= 1, baseLimit = h, f, tab, 4975 action).fork(); 4976 } 4977 for (Node<K,V> p; (p = advance()) != null;) 4978 action.accept(p.key); 4979 propagateCompletion(); 4980 } 4981 } 4982 } 4983 4984 @SuppressWarnings("serial") 4985 static final class ForEachValueTask<K,V> 4986 extends BulkTask<K,V,Void> { 4987 final Consumer<? super V> action; 4988 ForEachValueTask 4989 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 4990 Consumer<? super V> action) { 4991 super(p, b, i, f, t); 4992 this.action = action; 4993 } 4994 public final void compute() { 4995 final Consumer<? super V> action; 4996 if ((action = this.action) != null) { 4997 for (int i = baseIndex, f, h; batch > 0 && 4998 (h = ((f = baseLimit) + i) >>> 1) > i;) { 4999 addToPendingCount(1); 5000 new ForEachValueTask<K,V> 5001 (this, batch >>>= 1, baseLimit = h, f, tab, 5002 action).fork(); 5003 } 5004 for (Node<K,V> p; (p = advance()) != null;) 5005 action.accept(p.val); 5006 propagateCompletion(); 5007 } 5008 } 5009 } 5010 5011 @SuppressWarnings("serial") 5012 static final class ForEachEntryTask<K,V> 5013 extends BulkTask<K,V,Void> { 5014 final Consumer<? super Entry<K,V>> action; 5015 ForEachEntryTask 5016 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5017 Consumer<? super Entry<K,V>> action) { 5018 super(p, b, i, f, t); 5019 this.action = action; 5020 } 5021 public final void compute() { 5022 final Consumer<? super Entry<K,V>> action; 5023 if ((action = this.action) != null) { 5024 for (int i = baseIndex, f, h; batch > 0 && 5025 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5026 addToPendingCount(1); 5027 new ForEachEntryTask<K,V> 5028 (this, batch >>>= 1, baseLimit = h, f, tab, 5029 action).fork(); 5030 } 5031 for (Node<K,V> p; (p = advance()) != null; ) 5032 action.accept(p); 5033 propagateCompletion(); 5034 } 5035 } 5036 } 5037 5038 @SuppressWarnings("serial") 5039 static final class ForEachMappingTask<K,V> 5040 extends BulkTask<K,V,Void> { 5041 final BiConsumer<? super K, ? super V> action; 5042 ForEachMappingTask 5043 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5044 BiConsumer<? super K,? super V> action) { 5045 super(p, b, i, f, t); 5046 this.action = action; 5047 } 5048 public final void compute() { 5049 final BiConsumer<? super K, ? super V> action; 5050 if ((action = this.action) != null) { 5051 for (int i = baseIndex, f, h; batch > 0 && 5052 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5053 addToPendingCount(1); 5054 new ForEachMappingTask<K,V> 5055 (this, batch >>>= 1, baseLimit = h, f, tab, 5056 action).fork(); 5057 } 5058 for (Node<K,V> p; (p = advance()) != null; ) 5059 action.accept(p.key, p.val); 5060 propagateCompletion(); 5061 } 5062 } 5063 } 5064 5065 @SuppressWarnings("serial") 5066 static final class ForEachTransformedKeyTask<K,V,U> 5067 extends BulkTask<K,V,Void> { 5068 final Function<? super K, ? extends U> transformer; 5069 final Consumer<? super U> action; 5070 ForEachTransformedKeyTask 5071 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5072 Function<? super K, ? extends U> transformer, Consumer<? super U> action) { 5073 super(p, b, i, f, t); 5074 this.transformer = transformer; this.action = action; 5075 } 5076 public final void compute() { 5077 final Function<? super K, ? extends U> transformer; 5078 final Consumer<? super U> action; 5079 if ((transformer = this.transformer) != null && 5080 (action = this.action) != null) { 5081 for (int i = baseIndex, f, h; batch > 0 && 5082 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5083 addToPendingCount(1); 5084 new ForEachTransformedKeyTask<K,V,U> 5085 (this, batch >>>= 1, baseLimit = h, f, tab, 5086 transformer, action).fork(); 5087 } 5088 for (Node<K,V> p; (p = advance()) != null; ) { 5089 U u; 5090 if ((u = transformer.apply(p.key)) != null) 5091 action.accept(u); 5092 } 5093 propagateCompletion(); 5094 } 5095 } 5096 } 5097 5098 @SuppressWarnings("serial") 5099 static final class ForEachTransformedValueTask<K,V,U> 5100 extends BulkTask<K,V,Void> { 5101 final Function<? super V, ? extends U> transformer; 5102 final Consumer<? super U> action; 5103 ForEachTransformedValueTask 5104 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5105 Function<? super V, ? extends U> transformer, Consumer<? super U> action) { 5106 super(p, b, i, f, t); 5107 this.transformer = transformer; this.action = action; 5108 } 5109 public final void compute() { 5110 final Function<? super V, ? extends U> transformer; 5111 final Consumer<? super U> action; 5112 if ((transformer = this.transformer) != null && 5113 (action = this.action) != null) { 5114 for (int i = baseIndex, f, h; batch > 0 && 5115 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5116 addToPendingCount(1); 5117 new ForEachTransformedValueTask<K,V,U> 5118 (this, batch >>>= 1, baseLimit = h, f, tab, 5119 transformer, action).fork(); 5120 } 5121 for (Node<K,V> p; (p = advance()) != null; ) { 5122 U u; 5123 if ((u = transformer.apply(p.val)) != null) 5124 action.accept(u); 5125 } 5126 propagateCompletion(); 5127 } 5128 } 5129 } 5130 5131 @SuppressWarnings("serial") 5132 static final class ForEachTransformedEntryTask<K,V,U> 5133 extends BulkTask<K,V,Void> { 5134 final Function<Map.Entry<K,V>, ? extends U> transformer; 5135 final Consumer<? super U> action; 5136 ForEachTransformedEntryTask 5137 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5138 Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) { 5139 super(p, b, i, f, t); 5140 this.transformer = transformer; this.action = action; 5141 } 5142 public final void compute() { 5143 final Function<Map.Entry<K,V>, ? extends U> transformer; 5144 final Consumer<? super U> action; 5145 if ((transformer = this.transformer) != null && 5146 (action = this.action) != null) { 5147 for (int i = baseIndex, f, h; batch > 0 && 5148 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5149 addToPendingCount(1); 5150 new ForEachTransformedEntryTask<K,V,U> 5151 (this, batch >>>= 1, baseLimit = h, f, tab, 5152 transformer, action).fork(); 5153 } 5154 for (Node<K,V> p; (p = advance()) != null; ) { 5155 U u; 5156 if ((u = transformer.apply(p)) != null) 5157 action.accept(u); 5158 } 5159 propagateCompletion(); 5160 } 5161 } 5162 } 5163 5164 @SuppressWarnings("serial") 5165 static final class ForEachTransformedMappingTask<K,V,U> 5166 extends BulkTask<K,V,Void> { 5167 final BiFunction<? super K, ? super V, ? extends U> transformer; 5168 final Consumer<? super U> action; 5169 ForEachTransformedMappingTask 5170 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5171 BiFunction<? super K, ? super V, ? extends U> transformer, 5172 Consumer<? super U> action) { 5173 super(p, b, i, f, t); 5174 this.transformer = transformer; this.action = action; 5175 } 5176 public final void compute() { 5177 final BiFunction<? super K, ? super V, ? extends U> transformer; 5178 final Consumer<? super U> action; 5179 if ((transformer = this.transformer) != null && 5180 (action = this.action) != null) { 5181 for (int i = baseIndex, f, h; batch > 0 && 5182 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5183 addToPendingCount(1); 5184 new ForEachTransformedMappingTask<K,V,U> 5185 (this, batch >>>= 1, baseLimit = h, f, tab, 5186 transformer, action).fork(); 5187 } 5188 for (Node<K,V> p; (p = advance()) != null; ) { 5189 U u; 5190 if ((u = transformer.apply(p.key, p.val)) != null) 5191 action.accept(u); 5192 } 5193 propagateCompletion(); 5194 } 5195 } 5196 } 5197 5198 @SuppressWarnings("serial") 5199 static final class SearchKeysTask<K,V,U> 5200 extends BulkTask<K,V,U> { 5201 final Function<? super K, ? extends U> searchFunction; 5202 final AtomicReference<U> result; 5203 SearchKeysTask 5204 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5205 Function<? super K, ? extends U> searchFunction, 5206 AtomicReference<U> result) { 5207 super(p, b, i, f, t); 5208 this.searchFunction = searchFunction; this.result = result; 5209 } 5210 public final U getRawResult() { return result.get(); } 5211 public final void compute() { 5212 final Function<? super K, ? extends U> searchFunction; 5213 final AtomicReference<U> result; 5214 if ((searchFunction = this.searchFunction) != null && 5215 (result = this.result) != null) { 5216 for (int i = baseIndex, f, h; batch > 0 && 5217 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5218 if (result.get() != null) 5219 return; 5220 addToPendingCount(1); 5221 new SearchKeysTask<K,V,U> 5222 (this, batch >>>= 1, baseLimit = h, f, tab, 5223 searchFunction, result).fork(); 5224 } 5225 while (result.get() == null) { 5226 U u; 5227 Node<K,V> p; 5228 if ((p = advance()) == null) { 5229 propagateCompletion(); 5230 break; 5231 } 5232 if ((u = searchFunction.apply(p.key)) != null) { 5233 if (result.compareAndSet(null, u)) 5234 quietlyCompleteRoot(); 5235 break; 5236 } 5237 } 5238 } 5239 } 5240 } 5241 5242 @SuppressWarnings("serial") 5243 static final class SearchValuesTask<K,V,U> 5244 extends BulkTask<K,V,U> { 5245 final Function<? super V, ? extends U> searchFunction; 5246 final AtomicReference<U> result; 5247 SearchValuesTask 5248 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5249 Function<? super V, ? extends U> searchFunction, 5250 AtomicReference<U> result) { 5251 super(p, b, i, f, t); 5252 this.searchFunction = searchFunction; this.result = result; 5253 } 5254 public final U getRawResult() { return result.get(); } 5255 public final void compute() { 5256 final Function<? super V, ? extends U> searchFunction; 5257 final AtomicReference<U> result; 5258 if ((searchFunction = this.searchFunction) != null && 5259 (result = this.result) != null) { 5260 for (int i = baseIndex, f, h; batch > 0 && 5261 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5262 if (result.get() != null) 5263 return; 5264 addToPendingCount(1); 5265 new SearchValuesTask<K,V,U> 5266 (this, batch >>>= 1, baseLimit = h, f, tab, 5267 searchFunction, result).fork(); 5268 } 5269 while (result.get() == null) { 5270 U u; 5271 Node<K,V> p; 5272 if ((p = advance()) == null) { 5273 propagateCompletion(); 5274 break; 5275 } 5276 if ((u = searchFunction.apply(p.val)) != null) { 5277 if (result.compareAndSet(null, u)) 5278 quietlyCompleteRoot(); 5279 break; 5280 } 5281 } 5282 } 5283 } 5284 } 5285 5286 @SuppressWarnings("serial") 5287 static final class SearchEntriesTask<K,V,U> 5288 extends BulkTask<K,V,U> { 5289 final Function<Entry<K,V>, ? extends U> searchFunction; 5290 final AtomicReference<U> result; 5291 SearchEntriesTask 5292 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5293 Function<Entry<K,V>, ? extends U> searchFunction, 5294 AtomicReference<U> result) { 5295 super(p, b, i, f, t); 5296 this.searchFunction = searchFunction; this.result = result; 5297 } 5298 public final U getRawResult() { return result.get(); } 5299 public final void compute() { 5300 final Function<Entry<K,V>, ? extends U> searchFunction; 5301 final AtomicReference<U> result; 5302 if ((searchFunction = this.searchFunction) != null && 5303 (result = this.result) != null) { 5304 for (int i = baseIndex, f, h; batch > 0 && 5305 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5306 if (result.get() != null) 5307 return; 5308 addToPendingCount(1); 5309 new SearchEntriesTask<K,V,U> 5310 (this, batch >>>= 1, baseLimit = h, f, tab, 5311 searchFunction, result).fork(); 5312 } 5313 while (result.get() == null) { 5314 U u; 5315 Node<K,V> p; 5316 if ((p = advance()) == null) { 5317 propagateCompletion(); 5318 break; 5319 } 5320 if ((u = searchFunction.apply(p)) != null) { 5321 if (result.compareAndSet(null, u)) 5322 quietlyCompleteRoot(); 5323 return; 5324 } 5325 } 5326 } 5327 } 5328 } 5329 5330 @SuppressWarnings("serial") 5331 static final class SearchMappingsTask<K,V,U> 5332 extends BulkTask<K,V,U> { 5333 final BiFunction<? super K, ? super V, ? extends U> searchFunction; 5334 final AtomicReference<U> result; 5335 SearchMappingsTask 5336 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5337 BiFunction<? super K, ? super V, ? extends U> searchFunction, 5338 AtomicReference<U> result) { 5339 super(p, b, i, f, t); 5340 this.searchFunction = searchFunction; this.result = result; 5341 } 5342 public final U getRawResult() { return result.get(); } 5343 public final void compute() { 5344 final BiFunction<? super K, ? super V, ? extends U> searchFunction; 5345 final AtomicReference<U> result; 5346 if ((searchFunction = this.searchFunction) != null && 5347 (result = this.result) != null) { 5348 for (int i = baseIndex, f, h; batch > 0 && 5349 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5350 if (result.get() != null) 5351 return; 5352 addToPendingCount(1); 5353 new SearchMappingsTask<K,V,U> 5354 (this, batch >>>= 1, baseLimit = h, f, tab, 5355 searchFunction, result).fork(); 5356 } 5357 while (result.get() == null) { 5358 U u; 5359 Node<K,V> p; 5360 if ((p = advance()) == null) { 5361 propagateCompletion(); 5362 break; 5363 } 5364 if ((u = searchFunction.apply(p.key, p.val)) != null) { 5365 if (result.compareAndSet(null, u)) 5366 quietlyCompleteRoot(); 5367 break; 5368 } 5369 } 5370 } 5371 } 5372 } 5373 5374 @SuppressWarnings("serial") 5375 static final class ReduceKeysTask<K,V> 5376 extends BulkTask<K,V,K> { 5377 final BiFunction<? super K, ? super K, ? extends K> reducer; 5378 K result; 5379 ReduceKeysTask<K,V> rights, nextRight; 5380 ReduceKeysTask 5381 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5382 ReduceKeysTask<K,V> nextRight, 5383 BiFunction<? super K, ? super K, ? extends K> reducer) { 5384 super(p, b, i, f, t); this.nextRight = nextRight; 5385 this.reducer = reducer; 5386 } 5387 public final K getRawResult() { return result; } 5388 public final void compute() { 5389 final BiFunction<? super K, ? super K, ? extends K> reducer; 5390 if ((reducer = this.reducer) != null) { 5391 for (int i = baseIndex, f, h; batch > 0 && 5392 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5393 addToPendingCount(1); 5394 (rights = new ReduceKeysTask<K,V> 5395 (this, batch >>>= 1, baseLimit = h, f, tab, 5396 rights, reducer)).fork(); 5397 } 5398 K r = null; 5399 for (Node<K,V> p; (p = advance()) != null; ) { 5400 K u = p.key; 5401 r = (r == null) ? u : u == null ? r : reducer.apply(r, u); 5402 } 5403 result = r; 5404 CountedCompleter<?> c; 5405 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5406 @SuppressWarnings("unchecked") 5407 ReduceKeysTask<K,V> 5408 t = (ReduceKeysTask<K,V>)c, 5409 s = t.rights; 5410 while (s != null) { 5411 K tr, sr; 5412 if ((sr = s.result) != null) 5413 t.result = (((tr = t.result) == null) ? sr : 5414 reducer.apply(tr, sr)); 5415 s = t.rights = s.nextRight; 5416 } 5417 } 5418 } 5419 } 5420 } 5421 5422 @SuppressWarnings("serial") 5423 static final class ReduceValuesTask<K,V> 5424 extends BulkTask<K,V,V> { 5425 final BiFunction<? super V, ? super V, ? extends V> reducer; 5426 V result; 5427 ReduceValuesTask<K,V> rights, nextRight; 5428 ReduceValuesTask 5429 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5430 ReduceValuesTask<K,V> nextRight, 5431 BiFunction<? super V, ? super V, ? extends V> reducer) { 5432 super(p, b, i, f, t); this.nextRight = nextRight; 5433 this.reducer = reducer; 5434 } 5435 public final V getRawResult() { return result; } 5436 public final void compute() { 5437 final BiFunction<? super V, ? super V, ? extends V> reducer; 5438 if ((reducer = this.reducer) != null) { 5439 for (int i = baseIndex, f, h; batch > 0 && 5440 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5441 addToPendingCount(1); 5442 (rights = new ReduceValuesTask<K,V> 5443 (this, batch >>>= 1, baseLimit = h, f, tab, 5444 rights, reducer)).fork(); 5445 } 5446 V r = null; 5447 for (Node<K,V> p; (p = advance()) != null; ) { 5448 V v = p.val; 5449 r = (r == null) ? v : reducer.apply(r, v); 5450 } 5451 result = r; 5452 CountedCompleter<?> c; 5453 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5454 @SuppressWarnings("unchecked") 5455 ReduceValuesTask<K,V> 5456 t = (ReduceValuesTask<K,V>)c, 5457 s = t.rights; 5458 while (s != null) { 5459 V tr, sr; 5460 if ((sr = s.result) != null) 5461 t.result = (((tr = t.result) == null) ? sr : 5462 reducer.apply(tr, sr)); 5463 s = t.rights = s.nextRight; 5464 } 5465 } 5466 } 5467 } 5468 } 5469 5470 @SuppressWarnings("serial") 5471 static final class ReduceEntriesTask<K,V> 5472 extends BulkTask<K,V,Map.Entry<K,V>> { 5473 final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer; 5474 Map.Entry<K,V> result; 5475 ReduceEntriesTask<K,V> rights, nextRight; 5476 ReduceEntriesTask 5477 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5478 ReduceEntriesTask<K,V> nextRight, 5479 BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) { 5480 super(p, b, i, f, t); this.nextRight = nextRight; 5481 this.reducer = reducer; 5482 } 5483 public final Map.Entry<K,V> getRawResult() { return result; } 5484 public final void compute() { 5485 final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer; 5486 if ((reducer = this.reducer) != null) { 5487 for (int i = baseIndex, f, h; batch > 0 && 5488 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5489 addToPendingCount(1); 5490 (rights = new ReduceEntriesTask<K,V> 5491 (this, batch >>>= 1, baseLimit = h, f, tab, 5492 rights, reducer)).fork(); 5493 } 5494 Map.Entry<K,V> r = null; 5495 for (Node<K,V> p; (p = advance()) != null; ) 5496 r = (r == null) ? p : reducer.apply(r, p); 5497 result = r; 5498 CountedCompleter<?> c; 5499 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5500 @SuppressWarnings("unchecked") 5501 ReduceEntriesTask<K,V> 5502 t = (ReduceEntriesTask<K,V>)c, 5503 s = t.rights; 5504 while (s != null) { 5505 Map.Entry<K,V> tr, sr; 5506 if ((sr = s.result) != null) 5507 t.result = (((tr = t.result) == null) ? sr : 5508 reducer.apply(tr, sr)); 5509 s = t.rights = s.nextRight; 5510 } 5511 } 5512 } 5513 } 5514 } 5515 5516 @SuppressWarnings("serial") 5517 static final class MapReduceKeysTask<K,V,U> 5518 extends BulkTask<K,V,U> { 5519 final Function<? super K, ? extends U> transformer; 5520 final BiFunction<? super U, ? super U, ? extends U> reducer; 5521 U result; 5522 MapReduceKeysTask<K,V,U> rights, nextRight; 5523 MapReduceKeysTask 5524 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5525 MapReduceKeysTask<K,V,U> nextRight, 5526 Function<? super K, ? extends U> transformer, 5527 BiFunction<? super U, ? super U, ? extends U> reducer) { 5528 super(p, b, i, f, t); this.nextRight = nextRight; 5529 this.transformer = transformer; 5530 this.reducer = reducer; 5531 } 5532 public final U getRawResult() { return result; } 5533 public final void compute() { 5534 final Function<? super K, ? extends U> transformer; 5535 final BiFunction<? super U, ? super U, ? extends U> reducer; 5536 if ((transformer = this.transformer) != null && 5537 (reducer = this.reducer) != null) { 5538 for (int i = baseIndex, f, h; batch > 0 && 5539 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5540 addToPendingCount(1); 5541 (rights = new MapReduceKeysTask<K,V,U> 5542 (this, batch >>>= 1, baseLimit = h, f, tab, 5543 rights, transformer, reducer)).fork(); 5544 } 5545 U r = null; 5546 for (Node<K,V> p; (p = advance()) != null; ) { 5547 U u; 5548 if ((u = transformer.apply(p.key)) != null) 5549 r = (r == null) ? u : reducer.apply(r, u); 5550 } 5551 result = r; 5552 CountedCompleter<?> c; 5553 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5554 @SuppressWarnings("unchecked") 5555 MapReduceKeysTask<K,V,U> 5556 t = (MapReduceKeysTask<K,V,U>)c, 5557 s = t.rights; 5558 while (s != null) { 5559 U tr, sr; 5560 if ((sr = s.result) != null) 5561 t.result = (((tr = t.result) == null) ? sr : 5562 reducer.apply(tr, sr)); 5563 s = t.rights = s.nextRight; 5564 } 5565 } 5566 } 5567 } 5568 } 5569 5570 @SuppressWarnings("serial") 5571 static final class MapReduceValuesTask<K,V,U> 5572 extends BulkTask<K,V,U> { 5573 final Function<? super V, ? extends U> transformer; 5574 final BiFunction<? super U, ? super U, ? extends U> reducer; 5575 U result; 5576 MapReduceValuesTask<K,V,U> rights, nextRight; 5577 MapReduceValuesTask 5578 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5579 MapReduceValuesTask<K,V,U> nextRight, 5580 Function<? super V, ? extends U> transformer, 5581 BiFunction<? super U, ? super U, ? extends U> reducer) { 5582 super(p, b, i, f, t); this.nextRight = nextRight; 5583 this.transformer = transformer; 5584 this.reducer = reducer; 5585 } 5586 public final U getRawResult() { return result; } 5587 public final void compute() { 5588 final Function<? super V, ? extends U> transformer; 5589 final BiFunction<? super U, ? super U, ? extends U> reducer; 5590 if ((transformer = this.transformer) != null && 5591 (reducer = this.reducer) != null) { 5592 for (int i = baseIndex, f, h; batch > 0 && 5593 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5594 addToPendingCount(1); 5595 (rights = new MapReduceValuesTask<K,V,U> 5596 (this, batch >>>= 1, baseLimit = h, f, tab, 5597 rights, transformer, reducer)).fork(); 5598 } 5599 U r = null; 5600 for (Node<K,V> p; (p = advance()) != null; ) { 5601 U u; 5602 if ((u = transformer.apply(p.val)) != null) 5603 r = (r == null) ? u : reducer.apply(r, u); 5604 } 5605 result = r; 5606 CountedCompleter<?> c; 5607 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5608 @SuppressWarnings("unchecked") 5609 MapReduceValuesTask<K,V,U> 5610 t = (MapReduceValuesTask<K,V,U>)c, 5611 s = t.rights; 5612 while (s != null) { 5613 U tr, sr; 5614 if ((sr = s.result) != null) 5615 t.result = (((tr = t.result) == null) ? sr : 5616 reducer.apply(tr, sr)); 5617 s = t.rights = s.nextRight; 5618 } 5619 } 5620 } 5621 } 5622 } 5623 5624 @SuppressWarnings("serial") 5625 static final class MapReduceEntriesTask<K,V,U> 5626 extends BulkTask<K,V,U> { 5627 final Function<Map.Entry<K,V>, ? extends U> transformer; 5628 final BiFunction<? super U, ? super U, ? extends U> reducer; 5629 U result; 5630 MapReduceEntriesTask<K,V,U> rights, nextRight; 5631 MapReduceEntriesTask 5632 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5633 MapReduceEntriesTask<K,V,U> nextRight, 5634 Function<Map.Entry<K,V>, ? extends U> transformer, 5635 BiFunction<? super U, ? super U, ? extends U> reducer) { 5636 super(p, b, i, f, t); this.nextRight = nextRight; 5637 this.transformer = transformer; 5638 this.reducer = reducer; 5639 } 5640 public final U getRawResult() { return result; } 5641 public final void compute() { 5642 final Function<Map.Entry<K,V>, ? extends U> transformer; 5643 final BiFunction<? super U, ? super U, ? extends U> reducer; 5644 if ((transformer = this.transformer) != null && 5645 (reducer = this.reducer) != null) { 5646 for (int i = baseIndex, f, h; batch > 0 && 5647 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5648 addToPendingCount(1); 5649 (rights = new MapReduceEntriesTask<K,V,U> 5650 (this, batch >>>= 1, baseLimit = h, f, tab, 5651 rights, transformer, reducer)).fork(); 5652 } 5653 U r = null; 5654 for (Node<K,V> p; (p = advance()) != null; ) { 5655 U u; 5656 if ((u = transformer.apply(p)) != null) 5657 r = (r == null) ? u : reducer.apply(r, u); 5658 } 5659 result = r; 5660 CountedCompleter<?> c; 5661 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5662 @SuppressWarnings("unchecked") 5663 MapReduceEntriesTask<K,V,U> 5664 t = (MapReduceEntriesTask<K,V,U>)c, 5665 s = t.rights; 5666 while (s != null) { 5667 U tr, sr; 5668 if ((sr = s.result) != null) 5669 t.result = (((tr = t.result) == null) ? sr : 5670 reducer.apply(tr, sr)); 5671 s = t.rights = s.nextRight; 5672 } 5673 } 5674 } 5675 } 5676 } 5677 5678 @SuppressWarnings("serial") 5679 static final class MapReduceMappingsTask<K,V,U> 5680 extends BulkTask<K,V,U> { 5681 final BiFunction<? super K, ? super V, ? extends U> transformer; 5682 final BiFunction<? super U, ? super U, ? extends U> reducer; 5683 U result; 5684 MapReduceMappingsTask<K,V,U> rights, nextRight; 5685 MapReduceMappingsTask 5686 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5687 MapReduceMappingsTask<K,V,U> nextRight, 5688 BiFunction<? super K, ? super V, ? extends U> transformer, 5689 BiFunction<? super U, ? super U, ? extends U> reducer) { 5690 super(p, b, i, f, t); this.nextRight = nextRight; 5691 this.transformer = transformer; 5692 this.reducer = reducer; 5693 } 5694 public final U getRawResult() { return result; } 5695 public final void compute() { 5696 final BiFunction<? super K, ? super V, ? extends U> transformer; 5697 final BiFunction<? super U, ? super U, ? extends U> reducer; 5698 if ((transformer = this.transformer) != null && 5699 (reducer = this.reducer) != null) { 5700 for (int i = baseIndex, f, h; batch > 0 && 5701 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5702 addToPendingCount(1); 5703 (rights = new MapReduceMappingsTask<K,V,U> 5704 (this, batch >>>= 1, baseLimit = h, f, tab, 5705 rights, transformer, reducer)).fork(); 5706 } 5707 U r = null; 5708 for (Node<K,V> p; (p = advance()) != null; ) { 5709 U u; 5710 if ((u = transformer.apply(p.key, p.val)) != null) 5711 r = (r == null) ? u : reducer.apply(r, u); 5712 } 5713 result = r; 5714 CountedCompleter<?> c; 5715 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5716 @SuppressWarnings("unchecked") 5717 MapReduceMappingsTask<K,V,U> 5718 t = (MapReduceMappingsTask<K,V,U>)c, 5719 s = t.rights; 5720 while (s != null) { 5721 U tr, sr; 5722 if ((sr = s.result) != null) 5723 t.result = (((tr = t.result) == null) ? sr : 5724 reducer.apply(tr, sr)); 5725 s = t.rights = s.nextRight; 5726 } 5727 } 5728 } 5729 } 5730 } 5731 5732 @SuppressWarnings("serial") 5733 static final class MapReduceKeysToDoubleTask<K,V> 5734 extends BulkTask<K,V,Double> { 5735 final ToDoubleFunction<? super K> transformer; 5736 final DoubleBinaryOperator reducer; 5737 final double basis; 5738 double result; 5739 MapReduceKeysToDoubleTask<K,V> rights, nextRight; 5740 MapReduceKeysToDoubleTask 5741 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5742 MapReduceKeysToDoubleTask<K,V> nextRight, 5743 ToDoubleFunction<? super K> transformer, 5744 double basis, 5745 DoubleBinaryOperator reducer) { 5746 super(p, b, i, f, t); this.nextRight = nextRight; 5747 this.transformer = transformer; 5748 this.basis = basis; this.reducer = reducer; 5749 } 5750 public final Double getRawResult() { return result; } 5751 public final void compute() { 5752 final ToDoubleFunction<? super K> transformer; 5753 final DoubleBinaryOperator reducer; 5754 if ((transformer = this.transformer) != null && 5755 (reducer = this.reducer) != null) { 5756 double r = this.basis; 5757 for (int i = baseIndex, f, h; batch > 0 && 5758 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5759 addToPendingCount(1); 5760 (rights = new MapReduceKeysToDoubleTask<K,V> 5761 (this, batch >>>= 1, baseLimit = h, f, tab, 5762 rights, transformer, r, reducer)).fork(); 5763 } 5764 for (Node<K,V> p; (p = advance()) != null; ) 5765 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key)); 5766 result = r; 5767 CountedCompleter<?> c; 5768 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5769 @SuppressWarnings("unchecked") 5770 MapReduceKeysToDoubleTask<K,V> 5771 t = (MapReduceKeysToDoubleTask<K,V>)c, 5772 s = t.rights; 5773 while (s != null) { 5774 t.result = reducer.applyAsDouble(t.result, s.result); 5775 s = t.rights = s.nextRight; 5776 } 5777 } 5778 } 5779 } 5780 } 5781 5782 @SuppressWarnings("serial") 5783 static final class MapReduceValuesToDoubleTask<K,V> 5784 extends BulkTask<K,V,Double> { 5785 final ToDoubleFunction<? super V> transformer; 5786 final DoubleBinaryOperator reducer; 5787 final double basis; 5788 double result; 5789 MapReduceValuesToDoubleTask<K,V> rights, nextRight; 5790 MapReduceValuesToDoubleTask 5791 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5792 MapReduceValuesToDoubleTask<K,V> nextRight, 5793 ToDoubleFunction<? super V> transformer, 5794 double basis, 5795 DoubleBinaryOperator reducer) { 5796 super(p, b, i, f, t); this.nextRight = nextRight; 5797 this.transformer = transformer; 5798 this.basis = basis; this.reducer = reducer; 5799 } 5800 public final Double getRawResult() { return result; } 5801 public final void compute() { 5802 final ToDoubleFunction<? super V> transformer; 5803 final DoubleBinaryOperator reducer; 5804 if ((transformer = this.transformer) != null && 5805 (reducer = this.reducer) != null) { 5806 double r = this.basis; 5807 for (int i = baseIndex, f, h; batch > 0 && 5808 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5809 addToPendingCount(1); 5810 (rights = new MapReduceValuesToDoubleTask<K,V> 5811 (this, batch >>>= 1, baseLimit = h, f, tab, 5812 rights, transformer, r, reducer)).fork(); 5813 } 5814 for (Node<K,V> p; (p = advance()) != null; ) 5815 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val)); 5816 result = r; 5817 CountedCompleter<?> c; 5818 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5819 @SuppressWarnings("unchecked") 5820 MapReduceValuesToDoubleTask<K,V> 5821 t = (MapReduceValuesToDoubleTask<K,V>)c, 5822 s = t.rights; 5823 while (s != null) { 5824 t.result = reducer.applyAsDouble(t.result, s.result); 5825 s = t.rights = s.nextRight; 5826 } 5827 } 5828 } 5829 } 5830 } 5831 5832 @SuppressWarnings("serial") 5833 static final class MapReduceEntriesToDoubleTask<K,V> 5834 extends BulkTask<K,V,Double> { 5835 final ToDoubleFunction<Map.Entry<K,V>> transformer; 5836 final DoubleBinaryOperator reducer; 5837 final double basis; 5838 double result; 5839 MapReduceEntriesToDoubleTask<K,V> rights, nextRight; 5840 MapReduceEntriesToDoubleTask 5841 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5842 MapReduceEntriesToDoubleTask<K,V> nextRight, 5843 ToDoubleFunction<Map.Entry<K,V>> transformer, 5844 double basis, 5845 DoubleBinaryOperator reducer) { 5846 super(p, b, i, f, t); this.nextRight = nextRight; 5847 this.transformer = transformer; 5848 this.basis = basis; this.reducer = reducer; 5849 } 5850 public final Double getRawResult() { return result; } 5851 public final void compute() { 5852 final ToDoubleFunction<Map.Entry<K,V>> transformer; 5853 final DoubleBinaryOperator reducer; 5854 if ((transformer = this.transformer) != null && 5855 (reducer = this.reducer) != null) { 5856 double r = this.basis; 5857 for (int i = baseIndex, f, h; batch > 0 && 5858 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5859 addToPendingCount(1); 5860 (rights = new MapReduceEntriesToDoubleTask<K,V> 5861 (this, batch >>>= 1, baseLimit = h, f, tab, 5862 rights, transformer, r, reducer)).fork(); 5863 } 5864 for (Node<K,V> p; (p = advance()) != null; ) 5865 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p)); 5866 result = r; 5867 CountedCompleter<?> c; 5868 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5869 @SuppressWarnings("unchecked") 5870 MapReduceEntriesToDoubleTask<K,V> 5871 t = (MapReduceEntriesToDoubleTask<K,V>)c, 5872 s = t.rights; 5873 while (s != null) { 5874 t.result = reducer.applyAsDouble(t.result, s.result); 5875 s = t.rights = s.nextRight; 5876 } 5877 } 5878 } 5879 } 5880 } 5881 5882 @SuppressWarnings("serial") 5883 static final class MapReduceMappingsToDoubleTask<K,V> 5884 extends BulkTask<K,V,Double> { 5885 final ToDoubleBiFunction<? super K, ? super V> transformer; 5886 final DoubleBinaryOperator reducer; 5887 final double basis; 5888 double result; 5889 MapReduceMappingsToDoubleTask<K,V> rights, nextRight; 5890 MapReduceMappingsToDoubleTask 5891 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5892 MapReduceMappingsToDoubleTask<K,V> nextRight, 5893 ToDoubleBiFunction<? super K, ? super V> transformer, 5894 double basis, 5895 DoubleBinaryOperator reducer) { 5896 super(p, b, i, f, t); this.nextRight = nextRight; 5897 this.transformer = transformer; 5898 this.basis = basis; this.reducer = reducer; 5899 } 5900 public final Double getRawResult() { return result; } 5901 public final void compute() { 5902 final ToDoubleBiFunction<? super K, ? super V> transformer; 5903 final DoubleBinaryOperator reducer; 5904 if ((transformer = this.transformer) != null && 5905 (reducer = this.reducer) != null) { 5906 double r = this.basis; 5907 for (int i = baseIndex, f, h; batch > 0 && 5908 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5909 addToPendingCount(1); 5910 (rights = new MapReduceMappingsToDoubleTask<K,V> 5911 (this, batch >>>= 1, baseLimit = h, f, tab, 5912 rights, transformer, r, reducer)).fork(); 5913 } 5914 for (Node<K,V> p; (p = advance()) != null; ) 5915 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key, p.val)); 5916 result = r; 5917 CountedCompleter<?> c; 5918 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5919 @SuppressWarnings("unchecked") 5920 MapReduceMappingsToDoubleTask<K,V> 5921 t = (MapReduceMappingsToDoubleTask<K,V>)c, 5922 s = t.rights; 5923 while (s != null) { 5924 t.result = reducer.applyAsDouble(t.result, s.result); 5925 s = t.rights = s.nextRight; 5926 } 5927 } 5928 } 5929 } 5930 } 5931 5932 @SuppressWarnings("serial") 5933 static final class MapReduceKeysToLongTask<K,V> 5934 extends BulkTask<K,V,Long> { 5935 final ToLongFunction<? super K> transformer; 5936 final LongBinaryOperator reducer; 5937 final long basis; 5938 long result; 5939 MapReduceKeysToLongTask<K,V> rights, nextRight; 5940 MapReduceKeysToLongTask 5941 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5942 MapReduceKeysToLongTask<K,V> nextRight, 5943 ToLongFunction<? super K> transformer, 5944 long basis, 5945 LongBinaryOperator reducer) { 5946 super(p, b, i, f, t); this.nextRight = nextRight; 5947 this.transformer = transformer; 5948 this.basis = basis; this.reducer = reducer; 5949 } 5950 public final Long getRawResult() { return result; } 5951 public final void compute() { 5952 final ToLongFunction<? super K> transformer; 5953 final LongBinaryOperator reducer; 5954 if ((transformer = this.transformer) != null && 5955 (reducer = this.reducer) != null) { 5956 long r = this.basis; 5957 for (int i = baseIndex, f, h; batch > 0 && 5958 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5959 addToPendingCount(1); 5960 (rights = new MapReduceKeysToLongTask<K,V> 5961 (this, batch >>>= 1, baseLimit = h, f, tab, 5962 rights, transformer, r, reducer)).fork(); 5963 } 5964 for (Node<K,V> p; (p = advance()) != null; ) 5965 r = reducer.applyAsLong(r, transformer.applyAsLong(p.key)); 5966 result = r; 5967 CountedCompleter<?> c; 5968 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5969 @SuppressWarnings("unchecked") 5970 MapReduceKeysToLongTask<K,V> 5971 t = (MapReduceKeysToLongTask<K,V>)c, 5972 s = t.rights; 5973 while (s != null) { 5974 t.result = reducer.applyAsLong(t.result, s.result); 5975 s = t.rights = s.nextRight; 5976 } 5977 } 5978 } 5979 } 5980 } 5981 5982 @SuppressWarnings("serial") 5983 static final class MapReduceValuesToLongTask<K,V> 5984 extends BulkTask<K,V,Long> { 5985 final ToLongFunction<? super V> transformer; 5986 final LongBinaryOperator reducer; 5987 final long basis; 5988 long result; 5989 MapReduceValuesToLongTask<K,V> rights, nextRight; 5990 MapReduceValuesToLongTask 5991 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5992 MapReduceValuesToLongTask<K,V> nextRight, 5993 ToLongFunction<? super V> transformer, 5994 long basis, 5995 LongBinaryOperator reducer) { 5996 super(p, b, i, f, t); this.nextRight = nextRight; 5997 this.transformer = transformer; 5998 this.basis = basis; this.reducer = reducer; 5999 } 6000 public final Long getRawResult() { return result; } 6001 public final void compute() { 6002 final ToLongFunction<? super V> transformer; 6003 final LongBinaryOperator reducer; 6004 if ((transformer = this.transformer) != null && 6005 (reducer = this.reducer) != null) { 6006 long r = this.basis; 6007 for (int i = baseIndex, f, h; batch > 0 && 6008 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6009 addToPendingCount(1); 6010 (rights = new MapReduceValuesToLongTask<K,V> 6011 (this, batch >>>= 1, baseLimit = h, f, tab, 6012 rights, transformer, r, reducer)).fork(); 6013 } 6014 for (Node<K,V> p; (p = advance()) != null; ) 6015 r = reducer.applyAsLong(r, transformer.applyAsLong(p.val)); 6016 result = r; 6017 CountedCompleter<?> c; 6018 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6019 @SuppressWarnings("unchecked") 6020 MapReduceValuesToLongTask<K,V> 6021 t = (MapReduceValuesToLongTask<K,V>)c, 6022 s = t.rights; 6023 while (s != null) { 6024 t.result = reducer.applyAsLong(t.result, s.result); 6025 s = t.rights = s.nextRight; 6026 } 6027 } 6028 } 6029 } 6030 } 6031 6032 @SuppressWarnings("serial") 6033 static final class MapReduceEntriesToLongTask<K,V> 6034 extends BulkTask<K,V,Long> { 6035 final ToLongFunction<Map.Entry<K,V>> transformer; 6036 final LongBinaryOperator reducer; 6037 final long basis; 6038 long result; 6039 MapReduceEntriesToLongTask<K,V> rights, nextRight; 6040 MapReduceEntriesToLongTask 6041 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6042 MapReduceEntriesToLongTask<K,V> nextRight, 6043 ToLongFunction<Map.Entry<K,V>> transformer, 6044 long basis, 6045 LongBinaryOperator reducer) { 6046 super(p, b, i, f, t); this.nextRight = nextRight; 6047 this.transformer = transformer; 6048 this.basis = basis; this.reducer = reducer; 6049 } 6050 public final Long getRawResult() { return result; } 6051 public final void compute() { 6052 final ToLongFunction<Map.Entry<K,V>> transformer; 6053 final LongBinaryOperator reducer; 6054 if ((transformer = this.transformer) != null && 6055 (reducer = this.reducer) != null) { 6056 long r = this.basis; 6057 for (int i = baseIndex, f, h; batch > 0 && 6058 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6059 addToPendingCount(1); 6060 (rights = new MapReduceEntriesToLongTask<K,V> 6061 (this, batch >>>= 1, baseLimit = h, f, tab, 6062 rights, transformer, r, reducer)).fork(); 6063 } 6064 for (Node<K,V> p; (p = advance()) != null; ) 6065 r = reducer.applyAsLong(r, transformer.applyAsLong(p)); 6066 result = r; 6067 CountedCompleter<?> c; 6068 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6069 @SuppressWarnings("unchecked") 6070 MapReduceEntriesToLongTask<K,V> 6071 t = (MapReduceEntriesToLongTask<K,V>)c, 6072 s = t.rights; 6073 while (s != null) { 6074 t.result = reducer.applyAsLong(t.result, s.result); 6075 s = t.rights = s.nextRight; 6076 } 6077 } 6078 } 6079 } 6080 } 6081 6082 @SuppressWarnings("serial") 6083 static final class MapReduceMappingsToLongTask<K,V> 6084 extends BulkTask<K,V,Long> { 6085 final ToLongBiFunction<? super K, ? super V> transformer; 6086 final LongBinaryOperator reducer; 6087 final long basis; 6088 long result; 6089 MapReduceMappingsToLongTask<K,V> rights, nextRight; 6090 MapReduceMappingsToLongTask 6091 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6092 MapReduceMappingsToLongTask<K,V> nextRight, 6093 ToLongBiFunction<? super K, ? super V> transformer, 6094 long basis, 6095 LongBinaryOperator reducer) { 6096 super(p, b, i, f, t); this.nextRight = nextRight; 6097 this.transformer = transformer; 6098 this.basis = basis; this.reducer = reducer; 6099 } 6100 public final Long getRawResult() { return result; } 6101 public final void compute() { 6102 final ToLongBiFunction<? super K, ? super V> transformer; 6103 final LongBinaryOperator reducer; 6104 if ((transformer = this.transformer) != null && 6105 (reducer = this.reducer) != null) { 6106 long r = this.basis; 6107 for (int i = baseIndex, f, h; batch > 0 && 6108 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6109 addToPendingCount(1); 6110 (rights = new MapReduceMappingsToLongTask<K,V> 6111 (this, batch >>>= 1, baseLimit = h, f, tab, 6112 rights, transformer, r, reducer)).fork(); 6113 } 6114 for (Node<K,V> p; (p = advance()) != null; ) 6115 r = reducer.applyAsLong(r, transformer.applyAsLong(p.key, p.val)); 6116 result = r; 6117 CountedCompleter<?> c; 6118 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6119 @SuppressWarnings("unchecked") 6120 MapReduceMappingsToLongTask<K,V> 6121 t = (MapReduceMappingsToLongTask<K,V>)c, 6122 s = t.rights; 6123 while (s != null) { 6124 t.result = reducer.applyAsLong(t.result, s.result); 6125 s = t.rights = s.nextRight; 6126 } 6127 } 6128 } 6129 } 6130 } 6131 6132 @SuppressWarnings("serial") 6133 static final class MapReduceKeysToIntTask<K,V> 6134 extends BulkTask<K,V,Integer> { 6135 final ToIntFunction<? super K> transformer; 6136 final IntBinaryOperator reducer; 6137 final int basis; 6138 int result; 6139 MapReduceKeysToIntTask<K,V> rights, nextRight; 6140 MapReduceKeysToIntTask 6141 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6142 MapReduceKeysToIntTask<K,V> nextRight, 6143 ToIntFunction<? super K> transformer, 6144 int basis, 6145 IntBinaryOperator reducer) { 6146 super(p, b, i, f, t); this.nextRight = nextRight; 6147 this.transformer = transformer; 6148 this.basis = basis; this.reducer = reducer; 6149 } 6150 public final Integer getRawResult() { return result; } 6151 public final void compute() { 6152 final ToIntFunction<? super K> transformer; 6153 final IntBinaryOperator reducer; 6154 if ((transformer = this.transformer) != null && 6155 (reducer = this.reducer) != null) { 6156 int r = this.basis; 6157 for (int i = baseIndex, f, h; batch > 0 && 6158 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6159 addToPendingCount(1); 6160 (rights = new MapReduceKeysToIntTask<K,V> 6161 (this, batch >>>= 1, baseLimit = h, f, tab, 6162 rights, transformer, r, reducer)).fork(); 6163 } 6164 for (Node<K,V> p; (p = advance()) != null; ) 6165 r = reducer.applyAsInt(r, transformer.applyAsInt(p.key)); 6166 result = r; 6167 CountedCompleter<?> c; 6168 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6169 @SuppressWarnings("unchecked") 6170 MapReduceKeysToIntTask<K,V> 6171 t = (MapReduceKeysToIntTask<K,V>)c, 6172 s = t.rights; 6173 while (s != null) { 6174 t.result = reducer.applyAsInt(t.result, s.result); 6175 s = t.rights = s.nextRight; 6176 } 6177 } 6178 } 6179 } 6180 } 6181 6182 @SuppressWarnings("serial") 6183 static final class MapReduceValuesToIntTask<K,V> 6184 extends BulkTask<K,V,Integer> { 6185 final ToIntFunction<? super V> transformer; 6186 final IntBinaryOperator reducer; 6187 final int basis; 6188 int result; 6189 MapReduceValuesToIntTask<K,V> rights, nextRight; 6190 MapReduceValuesToIntTask 6191 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6192 MapReduceValuesToIntTask<K,V> nextRight, 6193 ToIntFunction<? super V> transformer, 6194 int basis, 6195 IntBinaryOperator reducer) { 6196 super(p, b, i, f, t); this.nextRight = nextRight; 6197 this.transformer = transformer; 6198 this.basis = basis; this.reducer = reducer; 6199 } 6200 public final Integer getRawResult() { return result; } 6201 public final void compute() { 6202 final ToIntFunction<? super V> transformer; 6203 final IntBinaryOperator reducer; 6204 if ((transformer = this.transformer) != null && 6205 (reducer = this.reducer) != null) { 6206 int r = this.basis; 6207 for (int i = baseIndex, f, h; batch > 0 && 6208 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6209 addToPendingCount(1); 6210 (rights = new MapReduceValuesToIntTask<K,V> 6211 (this, batch >>>= 1, baseLimit = h, f, tab, 6212 rights, transformer, r, reducer)).fork(); 6213 } 6214 for (Node<K,V> p; (p = advance()) != null; ) 6215 r = reducer.applyAsInt(r, transformer.applyAsInt(p.val)); 6216 result = r; 6217 CountedCompleter<?> c; 6218 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6219 @SuppressWarnings("unchecked") 6220 MapReduceValuesToIntTask<K,V> 6221 t = (MapReduceValuesToIntTask<K,V>)c, 6222 s = t.rights; 6223 while (s != null) { 6224 t.result = reducer.applyAsInt(t.result, s.result); 6225 s = t.rights = s.nextRight; 6226 } 6227 } 6228 } 6229 } 6230 } 6231 6232 @SuppressWarnings("serial") 6233 static final class MapReduceEntriesToIntTask<K,V> 6234 extends BulkTask<K,V,Integer> { 6235 final ToIntFunction<Map.Entry<K,V>> transformer; 6236 final IntBinaryOperator reducer; 6237 final int basis; 6238 int result; 6239 MapReduceEntriesToIntTask<K,V> rights, nextRight; 6240 MapReduceEntriesToIntTask 6241 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6242 MapReduceEntriesToIntTask<K,V> nextRight, 6243 ToIntFunction<Map.Entry<K,V>> transformer, 6244 int basis, 6245 IntBinaryOperator reducer) { 6246 super(p, b, i, f, t); this.nextRight = nextRight; 6247 this.transformer = transformer; 6248 this.basis = basis; this.reducer = reducer; 6249 } 6250 public final Integer getRawResult() { return result; } 6251 public final void compute() { 6252 final ToIntFunction<Map.Entry<K,V>> transformer; 6253 final IntBinaryOperator reducer; 6254 if ((transformer = this.transformer) != null && 6255 (reducer = this.reducer) != null) { 6256 int r = this.basis; 6257 for (int i = baseIndex, f, h; batch > 0 && 6258 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6259 addToPendingCount(1); 6260 (rights = new MapReduceEntriesToIntTask<K,V> 6261 (this, batch >>>= 1, baseLimit = h, f, tab, 6262 rights, transformer, r, reducer)).fork(); 6263 } 6264 for (Node<K,V> p; (p = advance()) != null; ) 6265 r = reducer.applyAsInt(r, transformer.applyAsInt(p)); 6266 result = r; 6267 CountedCompleter<?> c; 6268 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6269 @SuppressWarnings("unchecked") 6270 MapReduceEntriesToIntTask<K,V> 6271 t = (MapReduceEntriesToIntTask<K,V>)c, 6272 s = t.rights; 6273 while (s != null) { 6274 t.result = reducer.applyAsInt(t.result, s.result); 6275 s = t.rights = s.nextRight; 6276 } 6277 } 6278 } 6279 } 6280 } 6281 6282 @SuppressWarnings("serial") 6283 static final class MapReduceMappingsToIntTask<K,V> 6284 extends BulkTask<K,V,Integer> { 6285 final ToIntBiFunction<? super K, ? super V> transformer; 6286 final IntBinaryOperator reducer; 6287 final int basis; 6288 int result; 6289 MapReduceMappingsToIntTask<K,V> rights, nextRight; 6290 MapReduceMappingsToIntTask 6291 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6292 MapReduceMappingsToIntTask<K,V> nextRight, 6293 ToIntBiFunction<? super K, ? super V> transformer, 6294 int basis, 6295 IntBinaryOperator reducer) { 6296 super(p, b, i, f, t); this.nextRight = nextRight; 6297 this.transformer = transformer; 6298 this.basis = basis; this.reducer = reducer; 6299 } 6300 public final Integer getRawResult() { return result; } 6301 public final void compute() { 6302 final ToIntBiFunction<? super K, ? super V> transformer; 6303 final IntBinaryOperator reducer; 6304 if ((transformer = this.transformer) != null && 6305 (reducer = this.reducer) != null) { 6306 int r = this.basis; 6307 for (int i = baseIndex, f, h; batch > 0 && 6308 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6309 addToPendingCount(1); 6310 (rights = new MapReduceMappingsToIntTask<K,V> 6311 (this, batch >>>= 1, baseLimit = h, f, tab, 6312 rights, transformer, r, reducer)).fork(); 6313 } 6314 for (Node<K,V> p; (p = advance()) != null; ) 6315 r = reducer.applyAsInt(r, transformer.applyAsInt(p.key, p.val)); 6316 result = r; 6317 CountedCompleter<?> c; 6318 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6319 @SuppressWarnings("unchecked") 6320 MapReduceMappingsToIntTask<K,V> 6321 t = (MapReduceMappingsToIntTask<K,V>)c, 6322 s = t.rights; 6323 while (s != null) { 6324 t.result = reducer.applyAsInt(t.result, s.result); 6325 s = t.rights = s.nextRight; 6326 } 6327 } 6328 } 6329 } 6330 } 6331 6332 // Unsafe mechanics 6333 private static final sun.misc.Unsafe U = sun.misc.Unsafe.getUnsafe(); 6334 private static final long SIZECTL; 6335 private static final long TRANSFERINDEX; 6336 private static final long BASECOUNT; 6337 private static final long CELLSBUSY; 6338 private static final long CELLVALUE; 6339 private static final int ABASE; 6340 private static final int ASHIFT; 6341 6342 static { 6343 try { 6344 SIZECTL = U.objectFieldOffset 6345 (ConcurrentHashMap.class.getDeclaredField("sizeCtl")); 6346 TRANSFERINDEX = U.objectFieldOffset 6347 (ConcurrentHashMap.class.getDeclaredField("transferIndex")); 6348 BASECOUNT = U.objectFieldOffset 6349 (ConcurrentHashMap.class.getDeclaredField("baseCount")); 6350 CELLSBUSY = U.objectFieldOffset 6351 (ConcurrentHashMap.class.getDeclaredField("cellsBusy")); 6352 6353 CELLVALUE = U.objectFieldOffset 6354 (CounterCell.class.getDeclaredField("value")); 6355 6356 ABASE = U.arrayBaseOffset(Node[].class); 6357 int scale = U.arrayIndexScale(Node[].class); 6358 if ((scale & (scale - 1)) != 0) 6359 throw new Error("array index scale not a power of two"); 6360 ASHIFT = 31 - Integer.numberOfLeadingZeros(scale); 6361 } catch (ReflectiveOperationException e) { 6362 throw new Error(e); 6363 } 6364 6365 // Reduce the risk of rare disastrous classloading in first call to 6366 // LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773 6367 Class<?> ensureLoaded = LockSupport.class; 6368 } 6369 }