1 /*
   2  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.  Oracle designates this
   7  * particular file as subject to the "Classpath" exception as provided
   8  * by Oracle in the LICENSE file that accompanied this code.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  */
  24 
  25 /*
  26  * This file is available under and governed by the GNU General Public
  27  * License version 2 only, as published by the Free Software Foundation.
  28  * However, the following notice accompanied the original version of this
  29  * file:
  30  *
  31  * Written by Doug Lea with assistance from members of JCP JSR-166
  32  * Expert Group and released to the public domain, as explained at
  33  * http://creativecommons.org/publicdomain/zero/1.0/
  34  */
  35 
  36 package java.util.concurrent;
  37 
  38 import java.io.ObjectStreamField;
  39 import java.io.Serializable;
  40 import java.lang.reflect.ParameterizedType;
  41 import java.lang.reflect.Type;
  42 import java.util.AbstractMap;
  43 import java.util.Arrays;
  44 import java.util.Collection;
  45 import java.util.Enumeration;
  46 import java.util.HashMap;
  47 import java.util.Hashtable;
  48 import java.util.Iterator;
  49 import java.util.Map;
  50 import java.util.NoSuchElementException;
  51 import java.util.Set;
  52 import java.util.Spliterator;
  53 import java.util.concurrent.atomic.AtomicReference;
  54 import java.util.concurrent.locks.LockSupport;
  55 import java.util.concurrent.locks.ReentrantLock;
  56 import java.util.function.BiConsumer;
  57 import java.util.function.BiFunction;
  58 import java.util.function.Consumer;
  59 import java.util.function.DoubleBinaryOperator;
  60 import java.util.function.Function;
  61 import java.util.function.IntBinaryOperator;
  62 import java.util.function.LongBinaryOperator;
  63 import java.util.function.Predicate;
  64 import java.util.function.ToDoubleBiFunction;
  65 import java.util.function.ToDoubleFunction;
  66 import java.util.function.ToIntBiFunction;
  67 import java.util.function.ToIntFunction;
  68 import java.util.function.ToLongBiFunction;
  69 import java.util.function.ToLongFunction;
  70 import java.util.stream.Stream;
  71 import jdk.internal.misc.Unsafe;
  72 
  73 /**
  74  * A hash table supporting full concurrency of retrievals and
  75  * high expected concurrency for updates. This class obeys the
  76  * same functional specification as {@link java.util.Hashtable}, and
  77  * includes versions of methods corresponding to each method of
  78  * {@code Hashtable}. However, even though all operations are
  79  * thread-safe, retrieval operations do <em>not</em> entail locking,
  80  * and there is <em>not</em> any support for locking the entire table
  81  * in a way that prevents all access.  This class is fully
  82  * interoperable with {@code Hashtable} in programs that rely on its
  83  * thread safety but not on its synchronization details.
  84  *
  85  * <p>Retrieval operations (including {@code get}) generally do not
  86  * block, so may overlap with update operations (including {@code put}
  87  * and {@code remove}). Retrievals reflect the results of the most
  88  * recently <em>completed</em> update operations holding upon their
  89  * onset. (More formally, an update operation for a given key bears a
  90  * <em>happens-before</em> relation with any (non-null) retrieval for
  91  * that key reporting the updated value.)  For aggregate operations
  92  * such as {@code putAll} and {@code clear}, concurrent retrievals may
  93  * reflect insertion or removal of only some entries.  Similarly,
  94  * Iterators, Spliterators and Enumerations return elements reflecting the
  95  * state of the hash table at some point at or since the creation of the
  96  * iterator/enumeration.  They do <em>not</em> throw {@link
  97  * java.util.ConcurrentModificationException ConcurrentModificationException}.
  98  * However, iterators are designed to be used by only one thread at a time.
  99  * Bear in mind that the results of aggregate status methods including
 100  * {@code size}, {@code isEmpty}, and {@code containsValue} are typically
 101  * useful only when a map is not undergoing concurrent updates in other threads.
 102  * Otherwise the results of these methods reflect transient states
 103  * that may be adequate for monitoring or estimation purposes, but not
 104  * for program control.
 105  *
 106  * <p>The table is dynamically expanded when there are too many
 107  * collisions (i.e., keys that have distinct hash codes but fall into
 108  * the same slot modulo the table size), with the expected average
 109  * effect of maintaining roughly two bins per mapping (corresponding
 110  * to a 0.75 load factor threshold for resizing). There may be much
 111  * variance around this average as mappings are added and removed, but
 112  * overall, this maintains a commonly accepted time/space tradeoff for
 113  * hash tables.  However, resizing this or any other kind of hash
 114  * table may be a relatively slow operation. When possible, it is a
 115  * good idea to provide a size estimate as an optional {@code
 116  * initialCapacity} constructor argument. An additional optional
 117  * {@code loadFactor} constructor argument provides a further means of
 118  * customizing initial table capacity by specifying the table density
 119  * to be used in calculating the amount of space to allocate for the
 120  * given number of elements.  Also, for compatibility with previous
 121  * versions of this class, constructors may optionally specify an
 122  * expected {@code concurrencyLevel} as an additional hint for
 123  * internal sizing.  Note that using many keys with exactly the same
 124  * {@code hashCode()} is a sure way to slow down performance of any
 125  * hash table. To ameliorate impact, when keys are {@link Comparable},
 126  * this class may use comparison order among keys to help break ties.
 127  *
 128  * <p>A {@link Set} projection of a ConcurrentHashMap may be created
 129  * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
 130  * (using {@link #keySet(Object)} when only keys are of interest, and the
 131  * mapped values are (perhaps transiently) not used or all take the
 132  * same mapping value.
 133  *
 134  * <p>A ConcurrentHashMap can be used as a scalable frequency map (a
 135  * form of histogram or multiset) by using {@link
 136  * java.util.concurrent.atomic.LongAdder} values and initializing via
 137  * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count
 138  * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use
 139  * {@code freqs.computeIfAbsent(key, k -> new LongAdder()).increment();}
 140  *
 141  * <p>This class and its views and iterators implement all of the
 142  * <em>optional</em> methods of the {@link Map} and {@link Iterator}
 143  * interfaces.
 144  *
 145  * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
 146  * does <em>not</em> allow {@code null} to be used as a key or value.
 147  *
 148  * <p>ConcurrentHashMaps support a set of sequential and parallel bulk
 149  * operations that, unlike most {@link Stream} methods, are designed
 150  * to be safely, and often sensibly, applied even with maps that are
 151  * being concurrently updated by other threads; for example, when
 152  * computing a snapshot summary of the values in a shared registry.
 153  * There are three kinds of operation, each with four forms, accepting
 154  * functions with keys, values, entries, and (key, value) pairs as
 155  * arguments and/or return values. Because the elements of a
 156  * ConcurrentHashMap are not ordered in any particular way, and may be
 157  * processed in different orders in different parallel executions, the
 158  * correctness of supplied functions should not depend on any
 159  * ordering, or on any other objects or values that may transiently
 160  * change while computation is in progress; and except for forEach
 161  * actions, should ideally be side-effect-free. Bulk operations on
 162  * {@link java.util.Map.Entry} objects do not support method {@code
 163  * setValue}.
 164  *
 165  * <ul>
 166  * <li>forEach: Performs a given action on each element.
 167  * A variant form applies a given transformation on each element
 168  * before performing the action.
 169  *
 170  * <li>search: Returns the first available non-null result of
 171  * applying a given function on each element; skipping further
 172  * search when a result is found.
 173  *
 174  * <li>reduce: Accumulates each element.  The supplied reduction
 175  * function cannot rely on ordering (more formally, it should be
 176  * both associative and commutative).  There are five variants:
 177  *
 178  * <ul>
 179  *
 180  * <li>Plain reductions. (There is not a form of this method for
 181  * (key, value) function arguments since there is no corresponding
 182  * return type.)
 183  *
 184  * <li>Mapped reductions that accumulate the results of a given
 185  * function applied to each element.
 186  *
 187  * <li>Reductions to scalar doubles, longs, and ints, using a
 188  * given basis value.
 189  *
 190  * </ul>
 191  * </ul>
 192  *
 193  * <p>These bulk operations accept a {@code parallelismThreshold}
 194  * argument. Methods proceed sequentially if the current map size is
 195  * estimated to be less than the given threshold. Using a value of
 196  * {@code Long.MAX_VALUE} suppresses all parallelism.  Using a value
 197  * of {@code 1} results in maximal parallelism by partitioning into
 198  * enough subtasks to fully utilize the {@link
 199  * ForkJoinPool#commonPool()} that is used for all parallel
 200  * computations. Normally, you would initially choose one of these
 201  * extreme values, and then measure performance of using in-between
 202  * values that trade off overhead versus throughput.
 203  *
 204  * <p>The concurrency properties of bulk operations follow
 205  * from those of ConcurrentHashMap: Any non-null result returned
 206  * from {@code get(key)} and related access methods bears a
 207  * happens-before relation with the associated insertion or
 208  * update.  The result of any bulk operation reflects the
 209  * composition of these per-element relations (but is not
 210  * necessarily atomic with respect to the map as a whole unless it
 211  * is somehow known to be quiescent).  Conversely, because keys
 212  * and values in the map are never null, null serves as a reliable
 213  * atomic indicator of the current lack of any result.  To
 214  * maintain this property, null serves as an implicit basis for
 215  * all non-scalar reduction operations. For the double, long, and
 216  * int versions, the basis should be one that, when combined with
 217  * any other value, returns that other value (more formally, it
 218  * should be the identity element for the reduction). Most common
 219  * reductions have these properties; for example, computing a sum
 220  * with basis 0 or a minimum with basis MAX_VALUE.
 221  *
 222  * <p>Search and transformation functions provided as arguments
 223  * should similarly return null to indicate the lack of any result
 224  * (in which case it is not used). In the case of mapped
 225  * reductions, this also enables transformations to serve as
 226  * filters, returning null (or, in the case of primitive
 227  * specializations, the identity basis) if the element should not
 228  * be combined. You can create compound transformations and
 229  * filterings by composing them yourself under this "null means
 230  * there is nothing there now" rule before using them in search or
 231  * reduce operations.
 232  *
 233  * <p>Methods accepting and/or returning Entry arguments maintain
 234  * key-value associations. They may be useful for example when
 235  * finding the key for the greatest value. Note that "plain" Entry
 236  * arguments can be supplied using {@code new
 237  * AbstractMap.SimpleEntry(k,v)}.
 238  *
 239  * <p>Bulk operations may complete abruptly, throwing an
 240  * exception encountered in the application of a supplied
 241  * function. Bear in mind when handling such exceptions that other
 242  * concurrently executing functions could also have thrown
 243  * exceptions, or would have done so if the first exception had
 244  * not occurred.
 245  *
 246  * <p>Speedups for parallel compared to sequential forms are common
 247  * but not guaranteed.  Parallel operations involving brief functions
 248  * on small maps may execute more slowly than sequential forms if the
 249  * underlying work to parallelize the computation is more expensive
 250  * than the computation itself.  Similarly, parallelization may not
 251  * lead to much actual parallelism if all processors are busy
 252  * performing unrelated tasks.
 253  *
 254  * <p>All arguments to all task methods must be non-null.
 255  *
 256  * <p>This class is a member of the
 257  * <a href="{@docRoot}/java/util/package-summary.html#CollectionsFramework">
 258  * Java Collections Framework</a>.
 259  *
 260  * @since 1.5
 261  * @author Doug Lea
 262  * @param <K> the type of keys maintained by this map
 263  * @param <V> the type of mapped values
 264  */
 265 public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
 266     implements ConcurrentMap<K,V>, Serializable {
 267     private static final long serialVersionUID = 7249069246763182397L;
 268 
 269     /*
 270      * Overview:
 271      *
 272      * The primary design goal of this hash table is to maintain
 273      * concurrent readability (typically method get(), but also
 274      * iterators and related methods) while minimizing update
 275      * contention. Secondary goals are to keep space consumption about
 276      * the same or better than java.util.HashMap, and to support high
 277      * initial insertion rates on an empty table by many threads.
 278      *
 279      * This map usually acts as a binned (bucketed) hash table.  Each
 280      * key-value mapping is held in a Node.  Most nodes are instances
 281      * of the basic Node class with hash, key, value, and next
 282      * fields. However, various subclasses exist: TreeNodes are
 283      * arranged in balanced trees, not lists.  TreeBins hold the roots
 284      * of sets of TreeNodes. ForwardingNodes are placed at the heads
 285      * of bins during resizing. ReservationNodes are used as
 286      * placeholders while establishing values in computeIfAbsent and
 287      * related methods.  The types TreeBin, ForwardingNode, and
 288      * ReservationNode do not hold normal user keys, values, or
 289      * hashes, and are readily distinguishable during search etc
 290      * because they have negative hash fields and null key and value
 291      * fields. (These special nodes are either uncommon or transient,
 292      * so the impact of carrying around some unused fields is
 293      * insignificant.)
 294      *
 295      * The table is lazily initialized to a power-of-two size upon the
 296      * first insertion.  Each bin in the table normally contains a
 297      * list of Nodes (most often, the list has only zero or one Node).
 298      * Table accesses require volatile/atomic reads, writes, and
 299      * CASes.  Because there is no other way to arrange this without
 300      * adding further indirections, we use intrinsics
 301      * (jdk.internal.misc.Unsafe) operations.
 302      *
 303      * We use the top (sign) bit of Node hash fields for control
 304      * purposes -- it is available anyway because of addressing
 305      * constraints.  Nodes with negative hash fields are specially
 306      * handled or ignored in map methods.
 307      *
 308      * Insertion (via put or its variants) of the first node in an
 309      * empty bin is performed by just CASing it to the bin.  This is
 310      * by far the most common case for put operations under most
 311      * key/hash distributions.  Other update operations (insert,
 312      * delete, and replace) require locks.  We do not want to waste
 313      * the space required to associate a distinct lock object with
 314      * each bin, so instead use the first node of a bin list itself as
 315      * a lock. Locking support for these locks relies on builtin
 316      * "synchronized" monitors.
 317      *
 318      * Using the first node of a list as a lock does not by itself
 319      * suffice though: When a node is locked, any update must first
 320      * validate that it is still the first node after locking it, and
 321      * retry if not. Because new nodes are always appended to lists,
 322      * once a node is first in a bin, it remains first until deleted
 323      * or the bin becomes invalidated (upon resizing).
 324      *
 325      * The main disadvantage of per-bin locks is that other update
 326      * operations on other nodes in a bin list protected by the same
 327      * lock can stall, for example when user equals() or mapping
 328      * functions take a long time.  However, statistically, under
 329      * random hash codes, this is not a common problem.  Ideally, the
 330      * frequency of nodes in bins follows a Poisson distribution
 331      * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
 332      * parameter of about 0.5 on average, given the resizing threshold
 333      * of 0.75, although with a large variance because of resizing
 334      * granularity. Ignoring variance, the expected occurrences of
 335      * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
 336      * first values are:
 337      *
 338      * 0:    0.60653066
 339      * 1:    0.30326533
 340      * 2:    0.07581633
 341      * 3:    0.01263606
 342      * 4:    0.00157952
 343      * 5:    0.00015795
 344      * 6:    0.00001316
 345      * 7:    0.00000094
 346      * 8:    0.00000006
 347      * more: less than 1 in ten million
 348      *
 349      * Lock contention probability for two threads accessing distinct
 350      * elements is roughly 1 / (8 * #elements) under random hashes.
 351      *
 352      * Actual hash code distributions encountered in practice
 353      * sometimes deviate significantly from uniform randomness.  This
 354      * includes the case when N > (1<<30), so some keys MUST collide.
 355      * Similarly for dumb or hostile usages in which multiple keys are
 356      * designed to have identical hash codes or ones that differs only
 357      * in masked-out high bits. So we use a secondary strategy that
 358      * applies when the number of nodes in a bin exceeds a
 359      * threshold. These TreeBins use a balanced tree to hold nodes (a
 360      * specialized form of red-black trees), bounding search time to
 361      * O(log N).  Each search step in a TreeBin is at least twice as
 362      * slow as in a regular list, but given that N cannot exceed
 363      * (1<<64) (before running out of addresses) this bounds search
 364      * steps, lock hold times, etc, to reasonable constants (roughly
 365      * 100 nodes inspected per operation worst case) so long as keys
 366      * are Comparable (which is very common -- String, Long, etc).
 367      * TreeBin nodes (TreeNodes) also maintain the same "next"
 368      * traversal pointers as regular nodes, so can be traversed in
 369      * iterators in the same way.
 370      *
 371      * The table is resized when occupancy exceeds a percentage
 372      * threshold (nominally, 0.75, but see below).  Any thread
 373      * noticing an overfull bin may assist in resizing after the
 374      * initiating thread allocates and sets up the replacement array.
 375      * However, rather than stalling, these other threads may proceed
 376      * with insertions etc.  The use of TreeBins shields us from the
 377      * worst case effects of overfilling while resizes are in
 378      * progress.  Resizing proceeds by transferring bins, one by one,
 379      * from the table to the next table. However, threads claim small
 380      * blocks of indices to transfer (via field transferIndex) before
 381      * doing so, reducing contention.  A generation stamp in field
 382      * sizeCtl ensures that resizings do not overlap. Because we are
 383      * using power-of-two expansion, the elements from each bin must
 384      * either stay at same index, or move with a power of two
 385      * offset. We eliminate unnecessary node creation by catching
 386      * cases where old nodes can be reused because their next fields
 387      * won't change.  On average, only about one-sixth of them need
 388      * cloning when a table doubles. The nodes they replace will be
 389      * garbage collectable as soon as they are no longer referenced by
 390      * any reader thread that may be in the midst of concurrently
 391      * traversing table.  Upon transfer, the old table bin contains
 392      * only a special forwarding node (with hash field "MOVED") that
 393      * contains the next table as its key. On encountering a
 394      * forwarding node, access and update operations restart, using
 395      * the new table.
 396      *
 397      * Each bin transfer requires its bin lock, which can stall
 398      * waiting for locks while resizing. However, because other
 399      * threads can join in and help resize rather than contend for
 400      * locks, average aggregate waits become shorter as resizing
 401      * progresses.  The transfer operation must also ensure that all
 402      * accessible bins in both the old and new table are usable by any
 403      * traversal.  This is arranged in part by proceeding from the
 404      * last bin (table.length - 1) up towards the first.  Upon seeing
 405      * a forwarding node, traversals (see class Traverser) arrange to
 406      * move to the new table without revisiting nodes.  To ensure that
 407      * no intervening nodes are skipped even when moved out of order,
 408      * a stack (see class TableStack) is created on first encounter of
 409      * a forwarding node during a traversal, to maintain its place if
 410      * later processing the current table. The need for these
 411      * save/restore mechanics is relatively rare, but when one
 412      * forwarding node is encountered, typically many more will be.
 413      * So Traversers use a simple caching scheme to avoid creating so
 414      * many new TableStack nodes. (Thanks to Peter Levart for
 415      * suggesting use of a stack here.)
 416      *
 417      * The traversal scheme also applies to partial traversals of
 418      * ranges of bins (via an alternate Traverser constructor)
 419      * to support partitioned aggregate operations.  Also, read-only
 420      * operations give up if ever forwarded to a null table, which
 421      * provides support for shutdown-style clearing, which is also not
 422      * currently implemented.
 423      *
 424      * Lazy table initialization minimizes footprint until first use,
 425      * and also avoids resizings when the first operation is from a
 426      * putAll, constructor with map argument, or deserialization.
 427      * These cases attempt to override the initial capacity settings,
 428      * but harmlessly fail to take effect in cases of races.
 429      *
 430      * The element count is maintained using a specialization of
 431      * LongAdder. We need to incorporate a specialization rather than
 432      * just use a LongAdder in order to access implicit
 433      * contention-sensing that leads to creation of multiple
 434      * CounterCells.  The counter mechanics avoid contention on
 435      * updates but can encounter cache thrashing if read too
 436      * frequently during concurrent access. To avoid reading so often,
 437      * resizing under contention is attempted only upon adding to a
 438      * bin already holding two or more nodes. Under uniform hash
 439      * distributions, the probability of this occurring at threshold
 440      * is around 13%, meaning that only about 1 in 8 puts check
 441      * threshold (and after resizing, many fewer do so).
 442      *
 443      * TreeBins use a special form of comparison for search and
 444      * related operations (which is the main reason we cannot use
 445      * existing collections such as TreeMaps). TreeBins contain
 446      * Comparable elements, but may contain others, as well as
 447      * elements that are Comparable but not necessarily Comparable for
 448      * the same T, so we cannot invoke compareTo among them. To handle
 449      * this, the tree is ordered primarily by hash value, then by
 450      * Comparable.compareTo order if applicable.  On lookup at a node,
 451      * if elements are not comparable or compare as 0 then both left
 452      * and right children may need to be searched in the case of tied
 453      * hash values. (This corresponds to the full list search that
 454      * would be necessary if all elements were non-Comparable and had
 455      * tied hashes.) On insertion, to keep a total ordering (or as
 456      * close as is required here) across rebalancings, we compare
 457      * classes and identityHashCodes as tie-breakers. The red-black
 458      * balancing code is updated from pre-jdk-collections
 459      * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
 460      * based in turn on Cormen, Leiserson, and Rivest "Introduction to
 461      * Algorithms" (CLR).
 462      *
 463      * TreeBins also require an additional locking mechanism.  While
 464      * list traversal is always possible by readers even during
 465      * updates, tree traversal is not, mainly because of tree-rotations
 466      * that may change the root node and/or its linkages.  TreeBins
 467      * include a simple read-write lock mechanism parasitic on the
 468      * main bin-synchronization strategy: Structural adjustments
 469      * associated with an insertion or removal are already bin-locked
 470      * (and so cannot conflict with other writers) but must wait for
 471      * ongoing readers to finish. Since there can be only one such
 472      * waiter, we use a simple scheme using a single "waiter" field to
 473      * block writers.  However, readers need never block.  If the root
 474      * lock is held, they proceed along the slow traversal path (via
 475      * next-pointers) until the lock becomes available or the list is
 476      * exhausted, whichever comes first. These cases are not fast, but
 477      * maximize aggregate expected throughput.
 478      *
 479      * Maintaining API and serialization compatibility with previous
 480      * versions of this class introduces several oddities. Mainly: We
 481      * leave untouched but unused constructor arguments referring to
 482      * concurrencyLevel. We accept a loadFactor constructor argument,
 483      * but apply it only to initial table capacity (which is the only
 484      * time that we can guarantee to honor it.) We also declare an
 485      * unused "Segment" class that is instantiated in minimal form
 486      * only when serializing.
 487      *
 488      * Also, solely for compatibility with previous versions of this
 489      * class, it extends AbstractMap, even though all of its methods
 490      * are overridden, so it is just useless baggage.
 491      *
 492      * This file is organized to make things a little easier to follow
 493      * while reading than they might otherwise: First the main static
 494      * declarations and utilities, then fields, then main public
 495      * methods (with a few factorings of multiple public methods into
 496      * internal ones), then sizing methods, trees, traversers, and
 497      * bulk operations.
 498      */
 499 
 500     /* ---------------- Constants -------------- */
 501 
 502     /**
 503      * The largest possible table capacity.  This value must be
 504      * exactly 1<<30 to stay within Java array allocation and indexing
 505      * bounds for power of two table sizes, and is further required
 506      * because the top two bits of 32bit hash fields are used for
 507      * control purposes.
 508      */
 509     private static final int MAXIMUM_CAPACITY = 1 << 30;
 510 
 511     /**
 512      * The default initial table capacity.  Must be a power of 2
 513      * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
 514      */
 515     private static final int DEFAULT_CAPACITY = 16;
 516 
 517     /**
 518      * The largest possible (non-power of two) array size.
 519      * Needed by toArray and related methods.
 520      */
 521     static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
 522 
 523     /**
 524      * The default concurrency level for this table. Unused but
 525      * defined for compatibility with previous versions of this class.
 526      */
 527     private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
 528 
 529     /**
 530      * The load factor for this table. Overrides of this value in
 531      * constructors affect only the initial table capacity.  The
 532      * actual floating point value isn't normally used -- it is
 533      * simpler to use expressions such as {@code n - (n >>> 2)} for
 534      * the associated resizing threshold.
 535      */
 536     private static final float LOAD_FACTOR = 0.75f;
 537 
 538     /**
 539      * The bin count threshold for using a tree rather than list for a
 540      * bin.  Bins are converted to trees when adding an element to a
 541      * bin with at least this many nodes. The value must be greater
 542      * than 2, and should be at least 8 to mesh with assumptions in
 543      * tree removal about conversion back to plain bins upon
 544      * shrinkage.
 545      */
 546     static final int TREEIFY_THRESHOLD = 8;
 547 
 548     /**
 549      * The bin count threshold for untreeifying a (split) bin during a
 550      * resize operation. Should be less than TREEIFY_THRESHOLD, and at
 551      * most 6 to mesh with shrinkage detection under removal.
 552      */
 553     static final int UNTREEIFY_THRESHOLD = 6;
 554 
 555     /**
 556      * The smallest table capacity for which bins may be treeified.
 557      * (Otherwise the table is resized if too many nodes in a bin.)
 558      * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
 559      * conflicts between resizing and treeification thresholds.
 560      */
 561     static final int MIN_TREEIFY_CAPACITY = 64;
 562 
 563     /**
 564      * Minimum number of rebinnings per transfer step. Ranges are
 565      * subdivided to allow multiple resizer threads.  This value
 566      * serves as a lower bound to avoid resizers encountering
 567      * excessive memory contention.  The value should be at least
 568      * DEFAULT_CAPACITY.
 569      */
 570     private static final int MIN_TRANSFER_STRIDE = 16;
 571 
 572     /**
 573      * The number of bits used for generation stamp in sizeCtl.
 574      * Must be at least 6 for 32bit arrays.
 575      */
 576     private static final int RESIZE_STAMP_BITS = 16;
 577 
 578     /**
 579      * The maximum number of threads that can help resize.
 580      * Must fit in 32 - RESIZE_STAMP_BITS bits.
 581      */
 582     private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
 583 
 584     /**
 585      * The bit shift for recording size stamp in sizeCtl.
 586      */
 587     private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
 588 
 589     /*
 590      * Encodings for Node hash fields. See above for explanation.
 591      */
 592     static final int MOVED     = -1; // hash for forwarding nodes
 593     static final int TREEBIN   = -2; // hash for roots of trees
 594     static final int RESERVED  = -3; // hash for transient reservations
 595     static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
 596 
 597     /** Number of CPUS, to place bounds on some sizings */
 598     static final int NCPU = Runtime.getRuntime().availableProcessors();
 599 
 600     /**
 601      * Serialized pseudo-fields, provided only for jdk7 compatibility.
 602      * @serialField segments Segment[]
 603      *   The segments, each of which is a specialized hash table.
 604      * @serialField segmentMask int
 605      *   Mask value for indexing into segments. The upper bits of a
 606      *   key's hash code are used to choose the segment.
 607      * @serialField segmentShift int
 608      *   Shift value for indexing within segments.
 609      */
 610     private static final ObjectStreamField[] serialPersistentFields = {
 611         new ObjectStreamField("segments", Segment[].class),
 612         new ObjectStreamField("segmentMask", Integer.TYPE),
 613         new ObjectStreamField("segmentShift", Integer.TYPE),
 614     };
 615 
 616     /* ---------------- Nodes -------------- */
 617 
 618     /**
 619      * Key-value entry.  This class is never exported out as a
 620      * user-mutable Map.Entry (i.e., one supporting setValue; see
 621      * MapEntry below), but can be used for read-only traversals used
 622      * in bulk tasks.  Subclasses of Node with a negative hash field
 623      * are special, and contain null keys and values (but are never
 624      * exported).  Otherwise, keys and vals are never null.
 625      */
 626     static class Node<K,V> implements Map.Entry<K,V> {
 627         final int hash;
 628         final K key;
 629         volatile V val;
 630         volatile Node<K,V> next;
 631 
 632         Node(int hash, K key, V val) {
 633             this.hash = hash;
 634             this.key = key;
 635             this.val = val;
 636         }
 637 
 638         Node(int hash, K key, V val, Node<K,V> next) {
 639             this(hash, key, val);
 640             this.next = next;
 641         }
 642 
 643         public final K getKey()     { return key; }
 644         public final V getValue()   { return val; }
 645         public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
 646         public final String toString() {
 647             return Helpers.mapEntryToString(key, val);
 648         }
 649         public final V setValue(V value) {
 650             throw new UnsupportedOperationException();
 651         }
 652 
 653         public final boolean equals(Object o) {
 654             Object k, v, u; Map.Entry<?,?> e;
 655             return ((o instanceof Map.Entry) &&
 656                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
 657                     (v = e.getValue()) != null &&
 658                     (k == key || k.equals(key)) &&
 659                     (v == (u = val) || v.equals(u)));
 660         }
 661 
 662         /**
 663          * Virtualized support for map.get(); overridden in subclasses.
 664          */
 665         Node<K,V> find(int h, Object k) {
 666             Node<K,V> e = this;
 667             if (k != null) {
 668                 do {
 669                     K ek;
 670                     if (e.hash == h &&
 671                         ((ek = e.key) == k || (ek != null && k.equals(ek))))
 672                         return e;
 673                 } while ((e = e.next) != null);
 674             }
 675             return null;
 676         }
 677     }
 678 
 679     /* ---------------- Static utilities -------------- */
 680 
 681     /**
 682      * Spreads (XORs) higher bits of hash to lower and also forces top
 683      * bit to 0. Because the table uses power-of-two masking, sets of
 684      * hashes that vary only in bits above the current mask will
 685      * always collide. (Among known examples are sets of Float keys
 686      * holding consecutive whole numbers in small tables.)  So we
 687      * apply a transform that spreads the impact of higher bits
 688      * downward. There is a tradeoff between speed, utility, and
 689      * quality of bit-spreading. Because many common sets of hashes
 690      * are already reasonably distributed (so don't benefit from
 691      * spreading), and because we use trees to handle large sets of
 692      * collisions in bins, we just XOR some shifted bits in the
 693      * cheapest possible way to reduce systematic lossage, as well as
 694      * to incorporate impact of the highest bits that would otherwise
 695      * never be used in index calculations because of table bounds.
 696      */
 697     static final int spread(int h) {
 698         return (h ^ (h >>> 16)) & HASH_BITS;
 699     }
 700 
 701     /**
 702      * Returns a power of two table size for the given desired capacity.
 703      * See Hackers Delight, sec 3.2
 704      */
 705     private static final int tableSizeFor(int c) {
 706         int n = c - 1;
 707         n |= n >>> 1;
 708         n |= n >>> 2;
 709         n |= n >>> 4;
 710         n |= n >>> 8;
 711         n |= n >>> 16;
 712         return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
 713     }
 714 
 715     /**
 716      * Returns x's Class if it is of the form "class C implements
 717      * Comparable<C>", else null.
 718      */
 719     static Class<?> comparableClassFor(Object x) {
 720         if (x instanceof Comparable) {
 721             Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
 722             if ((c = x.getClass()) == String.class) // bypass checks
 723                 return c;
 724             if ((ts = c.getGenericInterfaces()) != null) {
 725                 for (int i = 0; i < ts.length; ++i) {
 726                     if (((t = ts[i]) instanceof ParameterizedType) &&
 727                         ((p = (ParameterizedType)t).getRawType() ==
 728                          Comparable.class) &&
 729                         (as = p.getActualTypeArguments()) != null &&
 730                         as.length == 1 && as[0] == c) // type arg is c
 731                         return c;
 732                 }
 733             }
 734         }
 735         return null;
 736     }
 737 
 738     /**
 739      * Returns k.compareTo(x) if x matches kc (k's screened comparable
 740      * class), else 0.
 741      */
 742     @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
 743     static int compareComparables(Class<?> kc, Object k, Object x) {
 744         return (x == null || x.getClass() != kc ? 0 :
 745                 ((Comparable)k).compareTo(x));
 746     }
 747 
 748     /* ---------------- Table element access -------------- */
 749 
 750     /*
 751      * Atomic access methods are used for table elements as well as
 752      * elements of in-progress next table while resizing.  All uses of
 753      * the tab arguments must be null checked by callers.  All callers
 754      * also paranoically precheck that tab's length is not zero (or an
 755      * equivalent check), thus ensuring that any index argument taking
 756      * the form of a hash value anded with (length - 1) is a valid
 757      * index.  Note that, to be correct wrt arbitrary concurrency
 758      * errors by users, these checks must operate on local variables,
 759      * which accounts for some odd-looking inline assignments below.
 760      * Note that calls to setTabAt always occur within locked regions,
 761      * and so require only release ordering.
 762      */
 763 
 764     @SuppressWarnings("unchecked")
 765     static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
 766         return (Node<K,V>)U.getObjectAcquire(tab, ((long)i << ASHIFT) + ABASE);
 767     }
 768 
 769     static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
 770                                         Node<K,V> c, Node<K,V> v) {
 771         return U.compareAndSetObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
 772     }
 773 
 774     static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
 775         U.putObjectRelease(tab, ((long)i << ASHIFT) + ABASE, v);
 776     }
 777 
 778     /* ---------------- Fields -------------- */
 779 
 780     /**
 781      * The array of bins. Lazily initialized upon first insertion.
 782      * Size is always a power of two. Accessed directly by iterators.
 783      */
 784     transient volatile Node<K,V>[] table;
 785 
 786     /**
 787      * The next table to use; non-null only while resizing.
 788      */
 789     private transient volatile Node<K,V>[] nextTable;
 790 
 791     /**
 792      * Base counter value, used mainly when there is no contention,
 793      * but also as a fallback during table initialization
 794      * races. Updated via CAS.
 795      */
 796     private transient volatile long baseCount;
 797 
 798     /**
 799      * Table initialization and resizing control.  When negative, the
 800      * table is being initialized or resized: -1 for initialization,
 801      * else -(1 + the number of active resizing threads).  Otherwise,
 802      * when table is null, holds the initial table size to use upon
 803      * creation, or 0 for default. After initialization, holds the
 804      * next element count value upon which to resize the table.
 805      */
 806     private transient volatile int sizeCtl;
 807 
 808     /**
 809      * The next table index (plus one) to split while resizing.
 810      */
 811     private transient volatile int transferIndex;
 812 
 813     /**
 814      * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
 815      */
 816     private transient volatile int cellsBusy;
 817 
 818     /**
 819      * Table of counter cells. When non-null, size is a power of 2.
 820      */
 821     private transient volatile CounterCell[] counterCells;
 822 
 823     // views
 824     private transient KeySetView<K,V> keySet;
 825     private transient ValuesView<K,V> values;
 826     private transient EntrySetView<K,V> entrySet;
 827 
 828 
 829     /* ---------------- Public operations -------------- */
 830 
 831     /**
 832      * Creates a new, empty map with the default initial table size (16).
 833      */
 834     public ConcurrentHashMap() {
 835     }
 836 
 837     /**
 838      * Creates a new, empty map with an initial table size
 839      * accommodating the specified number of elements without the need
 840      * to dynamically resize.
 841      *
 842      * @param initialCapacity The implementation performs internal
 843      * sizing to accommodate this many elements.
 844      * @throws IllegalArgumentException if the initial capacity of
 845      * elements is negative
 846      */
 847     public ConcurrentHashMap(int initialCapacity) {
 848         if (initialCapacity < 0)
 849             throw new IllegalArgumentException();
 850         int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
 851                    MAXIMUM_CAPACITY :
 852                    tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
 853         this.sizeCtl = cap;
 854     }
 855 
 856     /**
 857      * Creates a new map with the same mappings as the given map.
 858      *
 859      * @param m the map
 860      */
 861     public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
 862         this.sizeCtl = DEFAULT_CAPACITY;
 863         putAll(m);
 864     }
 865 
 866     /**
 867      * Creates a new, empty map with an initial table size based on
 868      * the given number of elements ({@code initialCapacity}) and
 869      * initial table density ({@code loadFactor}).
 870      *
 871      * @param initialCapacity the initial capacity. The implementation
 872      * performs internal sizing to accommodate this many elements,
 873      * given the specified load factor.
 874      * @param loadFactor the load factor (table density) for
 875      * establishing the initial table size
 876      * @throws IllegalArgumentException if the initial capacity of
 877      * elements is negative or the load factor is nonpositive
 878      *
 879      * @since 1.6
 880      */
 881     public ConcurrentHashMap(int initialCapacity, float loadFactor) {
 882         this(initialCapacity, loadFactor, 1);
 883     }
 884 
 885     /**
 886      * Creates a new, empty map with an initial table size based on
 887      * the given number of elements ({@code initialCapacity}), table
 888      * density ({@code loadFactor}), and number of concurrently
 889      * updating threads ({@code concurrencyLevel}).
 890      *
 891      * @param initialCapacity the initial capacity. The implementation
 892      * performs internal sizing to accommodate this many elements,
 893      * given the specified load factor.
 894      * @param loadFactor the load factor (table density) for
 895      * establishing the initial table size
 896      * @param concurrencyLevel the estimated number of concurrently
 897      * updating threads. The implementation may use this value as
 898      * a sizing hint.
 899      * @throws IllegalArgumentException if the initial capacity is
 900      * negative or the load factor or concurrencyLevel are
 901      * nonpositive
 902      */
 903     public ConcurrentHashMap(int initialCapacity,
 904                              float loadFactor, int concurrencyLevel) {
 905         if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
 906             throw new IllegalArgumentException();
 907         if (initialCapacity < concurrencyLevel)   // Use at least as many bins
 908             initialCapacity = concurrencyLevel;   // as estimated threads
 909         long size = (long)(1.0 + (long)initialCapacity / loadFactor);
 910         int cap = (size >= (long)MAXIMUM_CAPACITY) ?
 911             MAXIMUM_CAPACITY : tableSizeFor((int)size);
 912         this.sizeCtl = cap;
 913     }
 914 
 915     // Original (since JDK1.2) Map methods
 916 
 917     /**
 918      * {@inheritDoc}
 919      */
 920     public int size() {
 921         long n = sumCount();
 922         return ((n < 0L) ? 0 :
 923                 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
 924                 (int)n);
 925     }
 926 
 927     /**
 928      * {@inheritDoc}
 929      */
 930     public boolean isEmpty() {
 931         return sumCount() <= 0L; // ignore transient negative values
 932     }
 933 
 934     /**
 935      * Returns the value to which the specified key is mapped,
 936      * or {@code null} if this map contains no mapping for the key.
 937      *
 938      * <p>More formally, if this map contains a mapping from a key
 939      * {@code k} to a value {@code v} such that {@code key.equals(k)},
 940      * then this method returns {@code v}; otherwise it returns
 941      * {@code null}.  (There can be at most one such mapping.)
 942      *
 943      * @throws NullPointerException if the specified key is null
 944      */
 945     public V get(Object key) {
 946         Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
 947         int h = spread(key.hashCode());
 948         if ((tab = table) != null && (n = tab.length) > 0 &&
 949             (e = tabAt(tab, (n - 1) & h)) != null) {
 950             if ((eh = e.hash) == h) {
 951                 if ((ek = e.key) == key || (ek != null && key.equals(ek)))
 952                     return e.val;
 953             }
 954             else if (eh < 0)
 955                 return (p = e.find(h, key)) != null ? p.val : null;
 956             while ((e = e.next) != null) {
 957                 if (e.hash == h &&
 958                     ((ek = e.key) == key || (ek != null && key.equals(ek))))
 959                     return e.val;
 960             }
 961         }
 962         return null;
 963     }
 964 
 965     /**
 966      * Tests if the specified object is a key in this table.
 967      *
 968      * @param  key possible key
 969      * @return {@code true} if and only if the specified object
 970      *         is a key in this table, as determined by the
 971      *         {@code equals} method; {@code false} otherwise
 972      * @throws NullPointerException if the specified key is null
 973      */
 974     public boolean containsKey(Object key) {
 975         return get(key) != null;
 976     }
 977 
 978     /**
 979      * Returns {@code true} if this map maps one or more keys to the
 980      * specified value. Note: This method may require a full traversal
 981      * of the map, and is much slower than method {@code containsKey}.
 982      *
 983      * @param value value whose presence in this map is to be tested
 984      * @return {@code true} if this map maps one or more keys to the
 985      *         specified value
 986      * @throws NullPointerException if the specified value is null
 987      */
 988     public boolean containsValue(Object value) {
 989         if (value == null)
 990             throw new NullPointerException();
 991         Node<K,V>[] t;
 992         if ((t = table) != null) {
 993             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
 994             for (Node<K,V> p; (p = it.advance()) != null; ) {
 995                 V v;
 996                 if ((v = p.val) == value || (v != null && value.equals(v)))
 997                     return true;
 998             }
 999         }
1000         return false;
1001     }
1002 
1003     /**
1004      * Maps the specified key to the specified value in this table.
1005      * Neither the key nor the value can be null.
1006      *
1007      * <p>The value can be retrieved by calling the {@code get} method
1008      * with a key that is equal to the original key.
1009      *
1010      * @param key key with which the specified value is to be associated
1011      * @param value value to be associated with the specified key
1012      * @return the previous value associated with {@code key}, or
1013      *         {@code null} if there was no mapping for {@code key}
1014      * @throws NullPointerException if the specified key or value is null
1015      */
1016     public V put(K key, V value) {
1017         return putVal(key, value, false);
1018     }
1019 
1020     /** Implementation for put and putIfAbsent */
1021     final V putVal(K key, V value, boolean onlyIfAbsent) {
1022         if (key == null || value == null) throw new NullPointerException();
1023         int hash = spread(key.hashCode());
1024         int binCount = 0;
1025         for (Node<K,V>[] tab = table;;) {
1026             Node<K,V> f; int n, i, fh; K fk; V fv;
1027             if (tab == null || (n = tab.length) == 0)
1028                 tab = initTable();
1029             else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
1030                 if (casTabAt(tab, i, null, new Node<K,V>(hash, key, value)))
1031                     break;                   // no lock when adding to empty bin
1032             }
1033             else if ((fh = f.hash) == MOVED)
1034                 tab = helpTransfer(tab, f);
1035             else if (onlyIfAbsent // check first node without acquiring lock
1036                      && fh == hash
1037                      && ((fk = f.key) == key || (fk != null && key.equals(fk)))
1038                      && (fv = f.val) != null)
1039                 return fv;
1040             else {
1041                 V oldVal = null;
1042                 synchronized (f) {
1043                     if (tabAt(tab, i) == f) {
1044                         if (fh >= 0) {
1045                             binCount = 1;
1046                             for (Node<K,V> e = f;; ++binCount) {
1047                                 K ek;
1048                                 if (e.hash == hash &&
1049                                     ((ek = e.key) == key ||
1050                                      (ek != null && key.equals(ek)))) {
1051                                     oldVal = e.val;
1052                                     if (!onlyIfAbsent)
1053                                         e.val = value;
1054                                     break;
1055                                 }
1056                                 Node<K,V> pred = e;
1057                                 if ((e = e.next) == null) {
1058                                     pred.next = new Node<K,V>(hash, key, value);
1059                                     break;
1060                                 }
1061                             }
1062                         }
1063                         else if (f instanceof TreeBin) {
1064                             Node<K,V> p;
1065                             binCount = 2;
1066                             if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
1067                                                            value)) != null) {
1068                                 oldVal = p.val;
1069                                 if (!onlyIfAbsent)
1070                                     p.val = value;
1071                             }
1072                         }
1073                         else if (f instanceof ReservationNode)
1074                             throw new IllegalStateException("Recursive update");
1075                     }
1076                 }
1077                 if (binCount != 0) {
1078                     if (binCount >= TREEIFY_THRESHOLD)
1079                         treeifyBin(tab, i);
1080                     if (oldVal != null)
1081                         return oldVal;
1082                     break;
1083                 }
1084             }
1085         }
1086         addCount(1L, binCount);
1087         return null;
1088     }
1089 
1090     /**
1091      * Copies all of the mappings from the specified map to this one.
1092      * These mappings replace any mappings that this map had for any of the
1093      * keys currently in the specified map.
1094      *
1095      * @param m mappings to be stored in this map
1096      */
1097     public void putAll(Map<? extends K, ? extends V> m) {
1098         tryPresize(m.size());
1099         for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
1100             putVal(e.getKey(), e.getValue(), false);
1101     }
1102 
1103     /**
1104      * Removes the key (and its corresponding value) from this map.
1105      * This method does nothing if the key is not in the map.
1106      *
1107      * @param  key the key that needs to be removed
1108      * @return the previous value associated with {@code key}, or
1109      *         {@code null} if there was no mapping for {@code key}
1110      * @throws NullPointerException if the specified key is null
1111      */
1112     public V remove(Object key) {
1113         return replaceNode(key, null, null);
1114     }
1115 
1116     /**
1117      * Implementation for the four public remove/replace methods:
1118      * Replaces node value with v, conditional upon match of cv if
1119      * non-null.  If resulting value is null, delete.
1120      */
1121     final V replaceNode(Object key, V value, Object cv) {
1122         int hash = spread(key.hashCode());
1123         for (Node<K,V>[] tab = table;;) {
1124             Node<K,V> f; int n, i, fh;
1125             if (tab == null || (n = tab.length) == 0 ||
1126                 (f = tabAt(tab, i = (n - 1) & hash)) == null)
1127                 break;
1128             else if ((fh = f.hash) == MOVED)
1129                 tab = helpTransfer(tab, f);
1130             else {
1131                 V oldVal = null;
1132                 boolean validated = false;
1133                 synchronized (f) {
1134                     if (tabAt(tab, i) == f) {
1135                         if (fh >= 0) {
1136                             validated = true;
1137                             for (Node<K,V> e = f, pred = null;;) {
1138                                 K ek;
1139                                 if (e.hash == hash &&
1140                                     ((ek = e.key) == key ||
1141                                      (ek != null && key.equals(ek)))) {
1142                                     V ev = e.val;
1143                                     if (cv == null || cv == ev ||
1144                                         (ev != null && cv.equals(ev))) {
1145                                         oldVal = ev;
1146                                         if (value != null)
1147                                             e.val = value;
1148                                         else if (pred != null)
1149                                             pred.next = e.next;
1150                                         else
1151                                             setTabAt(tab, i, e.next);
1152                                     }
1153                                     break;
1154                                 }
1155                                 pred = e;
1156                                 if ((e = e.next) == null)
1157                                     break;
1158                             }
1159                         }
1160                         else if (f instanceof TreeBin) {
1161                             validated = true;
1162                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1163                             TreeNode<K,V> r, p;
1164                             if ((r = t.root) != null &&
1165                                 (p = r.findTreeNode(hash, key, null)) != null) {
1166                                 V pv = p.val;
1167                                 if (cv == null || cv == pv ||
1168                                     (pv != null && cv.equals(pv))) {
1169                                     oldVal = pv;
1170                                     if (value != null)
1171                                         p.val = value;
1172                                     else if (t.removeTreeNode(p))
1173                                         setTabAt(tab, i, untreeify(t.first));
1174                                 }
1175                             }
1176                         }
1177                         else if (f instanceof ReservationNode)
1178                             throw new IllegalStateException("Recursive update");
1179                     }
1180                 }
1181                 if (validated) {
1182                     if (oldVal != null) {
1183                         if (value == null)
1184                             addCount(-1L, -1);
1185                         return oldVal;
1186                     }
1187                     break;
1188                 }
1189             }
1190         }
1191         return null;
1192     }
1193 
1194     /**
1195      * Removes all of the mappings from this map.
1196      */
1197     public void clear() {
1198         long delta = 0L; // negative number of deletions
1199         int i = 0;
1200         Node<K,V>[] tab = table;
1201         while (tab != null && i < tab.length) {
1202             int fh;
1203             Node<K,V> f = tabAt(tab, i);
1204             if (f == null)
1205                 ++i;
1206             else if ((fh = f.hash) == MOVED) {
1207                 tab = helpTransfer(tab, f);
1208                 i = 0; // restart
1209             }
1210             else {
1211                 synchronized (f) {
1212                     if (tabAt(tab, i) == f) {
1213                         Node<K,V> p = (fh >= 0 ? f :
1214                                        (f instanceof TreeBin) ?
1215                                        ((TreeBin<K,V>)f).first : null);
1216                         while (p != null) {
1217                             --delta;
1218                             p = p.next;
1219                         }
1220                         setTabAt(tab, i++, null);
1221                     }
1222                 }
1223             }
1224         }
1225         if (delta != 0L)
1226             addCount(delta, -1);
1227     }
1228 
1229     /**
1230      * Returns a {@link Set} view of the keys contained in this map.
1231      * The set is backed by the map, so changes to the map are
1232      * reflected in the set, and vice-versa. The set supports element
1233      * removal, which removes the corresponding mapping from this map,
1234      * via the {@code Iterator.remove}, {@code Set.remove},
1235      * {@code removeAll}, {@code retainAll}, and {@code clear}
1236      * operations.  It does not support the {@code add} or
1237      * {@code addAll} operations.
1238      *
1239      * <p>The view's iterators and spliterators are
1240      * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1241      *
1242      * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1243      * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1244      *
1245      * @return the set view
1246      */
1247     public KeySetView<K,V> keySet() {
1248         KeySetView<K,V> ks;
1249         if ((ks = keySet) != null) return ks;
1250         return keySet = new KeySetView<K,V>(this, null);
1251     }
1252 
1253     /**
1254      * Returns a {@link Collection} view of the values contained in this map.
1255      * The collection is backed by the map, so changes to the map are
1256      * reflected in the collection, and vice-versa.  The collection
1257      * supports element removal, which removes the corresponding
1258      * mapping from this map, via the {@code Iterator.remove},
1259      * {@code Collection.remove}, {@code removeAll},
1260      * {@code retainAll}, and {@code clear} operations.  It does not
1261      * support the {@code add} or {@code addAll} operations.
1262      *
1263      * <p>The view's iterators and spliterators are
1264      * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1265      *
1266      * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}
1267      * and {@link Spliterator#NONNULL}.
1268      *
1269      * @return the collection view
1270      */
1271     public Collection<V> values() {
1272         ValuesView<K,V> vs;
1273         if ((vs = values) != null) return vs;
1274         return values = new ValuesView<K,V>(this);
1275     }
1276 
1277     /**
1278      * Returns a {@link Set} view of the mappings contained in this map.
1279      * The set is backed by the map, so changes to the map are
1280      * reflected in the set, and vice-versa.  The set supports element
1281      * removal, which removes the corresponding mapping from the map,
1282      * via the {@code Iterator.remove}, {@code Set.remove},
1283      * {@code removeAll}, {@code retainAll}, and {@code clear}
1284      * operations.
1285      *
1286      * <p>The view's iterators and spliterators are
1287      * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1288      *
1289      * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1290      * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1291      *
1292      * @return the set view
1293      */
1294     public Set<Map.Entry<K,V>> entrySet() {
1295         EntrySetView<K,V> es;
1296         if ((es = entrySet) != null) return es;
1297         return entrySet = new EntrySetView<K,V>(this);
1298     }
1299 
1300     /**
1301      * Returns the hash code value for this {@link Map}, i.e.,
1302      * the sum of, for each key-value pair in the map,
1303      * {@code key.hashCode() ^ value.hashCode()}.
1304      *
1305      * @return the hash code value for this map
1306      */
1307     public int hashCode() {
1308         int h = 0;
1309         Node<K,V>[] t;
1310         if ((t = table) != null) {
1311             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1312             for (Node<K,V> p; (p = it.advance()) != null; )
1313                 h += p.key.hashCode() ^ p.val.hashCode();
1314         }
1315         return h;
1316     }
1317 
1318     /**
1319      * Returns a string representation of this map.  The string
1320      * representation consists of a list of key-value mappings (in no
1321      * particular order) enclosed in braces ("{@code {}}").  Adjacent
1322      * mappings are separated by the characters {@code ", "} (comma
1323      * and space).  Each key-value mapping is rendered as the key
1324      * followed by an equals sign ("{@code =}") followed by the
1325      * associated value.
1326      *
1327      * @return a string representation of this map
1328      */
1329     public String toString() {
1330         Node<K,V>[] t;
1331         int f = (t = table) == null ? 0 : t.length;
1332         Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1333         StringBuilder sb = new StringBuilder();
1334         sb.append('{');
1335         Node<K,V> p;
1336         if ((p = it.advance()) != null) {
1337             for (;;) {
1338                 K k = p.key;
1339                 V v = p.val;
1340                 sb.append(k == this ? "(this Map)" : k);
1341                 sb.append('=');
1342                 sb.append(v == this ? "(this Map)" : v);
1343                 if ((p = it.advance()) == null)
1344                     break;
1345                 sb.append(',').append(' ');
1346             }
1347         }
1348         return sb.append('}').toString();
1349     }
1350 
1351     /**
1352      * Compares the specified object with this map for equality.
1353      * Returns {@code true} if the given object is a map with the same
1354      * mappings as this map.  This operation may return misleading
1355      * results if either map is concurrently modified during execution
1356      * of this method.
1357      *
1358      * @param o object to be compared for equality with this map
1359      * @return {@code true} if the specified object is equal to this map
1360      */
1361     public boolean equals(Object o) {
1362         if (o != this) {
1363             if (!(o instanceof Map))
1364                 return false;
1365             Map<?,?> m = (Map<?,?>) o;
1366             Node<K,V>[] t;
1367             int f = (t = table) == null ? 0 : t.length;
1368             Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1369             for (Node<K,V> p; (p = it.advance()) != null; ) {
1370                 V val = p.val;
1371                 Object v = m.get(p.key);
1372                 if (v == null || (v != val && !v.equals(val)))
1373                     return false;
1374             }
1375             for (Map.Entry<?,?> e : m.entrySet()) {
1376                 Object mk, mv, v;
1377                 if ((mk = e.getKey()) == null ||
1378                     (mv = e.getValue()) == null ||
1379                     (v = get(mk)) == null ||
1380                     (mv != v && !mv.equals(v)))
1381                     return false;
1382             }
1383         }
1384         return true;
1385     }
1386 
1387     /**
1388      * Stripped-down version of helper class used in previous version,
1389      * declared for the sake of serialization compatibility.
1390      */
1391     static class Segment<K,V> extends ReentrantLock implements Serializable {
1392         private static final long serialVersionUID = 2249069246763182397L;
1393         final float loadFactor;
1394         Segment(float lf) { this.loadFactor = lf; }
1395     }
1396 
1397     /**
1398      * Saves the state of the {@code ConcurrentHashMap} instance to a
1399      * stream (i.e., serializes it).
1400      * @param s the stream
1401      * @throws java.io.IOException if an I/O error occurs
1402      * @serialData
1403      * the serialized fields, followed by the key (Object) and value
1404      * (Object) for each key-value mapping, followed by a null pair.
1405      * The key-value mappings are emitted in no particular order.
1406      */
1407     private void writeObject(java.io.ObjectOutputStream s)
1408         throws java.io.IOException {
1409         // For serialization compatibility
1410         // Emulate segment calculation from previous version of this class
1411         int sshift = 0;
1412         int ssize = 1;
1413         while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
1414             ++sshift;
1415             ssize <<= 1;
1416         }
1417         int segmentShift = 32 - sshift;
1418         int segmentMask = ssize - 1;
1419         @SuppressWarnings("unchecked")
1420         Segment<K,V>[] segments = (Segment<K,V>[])
1421             new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
1422         for (int i = 0; i < segments.length; ++i)
1423             segments[i] = new Segment<K,V>(LOAD_FACTOR);
1424         java.io.ObjectOutputStream.PutField streamFields = s.putFields();
1425         streamFields.put("segments", segments);
1426         streamFields.put("segmentShift", segmentShift);
1427         streamFields.put("segmentMask", segmentMask);
1428         s.writeFields();
1429 
1430         Node<K,V>[] t;
1431         if ((t = table) != null) {
1432             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1433             for (Node<K,V> p; (p = it.advance()) != null; ) {
1434                 s.writeObject(p.key);
1435                 s.writeObject(p.val);
1436             }
1437         }
1438         s.writeObject(null);
1439         s.writeObject(null);
1440     }
1441 
1442     /**
1443      * Reconstitutes the instance from a stream (that is, deserializes it).
1444      * @param s the stream
1445      * @throws ClassNotFoundException if the class of a serialized object
1446      *         could not be found
1447      * @throws java.io.IOException if an I/O error occurs
1448      */
1449     private void readObject(java.io.ObjectInputStream s)
1450         throws java.io.IOException, ClassNotFoundException {
1451         /*
1452          * To improve performance in typical cases, we create nodes
1453          * while reading, then place in table once size is known.
1454          * However, we must also validate uniqueness and deal with
1455          * overpopulated bins while doing so, which requires
1456          * specialized versions of putVal mechanics.
1457          */
1458         sizeCtl = -1; // force exclusion for table construction
1459         s.defaultReadObject();
1460         long size = 0L;
1461         Node<K,V> p = null;
1462         for (;;) {
1463             @SuppressWarnings("unchecked")
1464             K k = (K) s.readObject();
1465             @SuppressWarnings("unchecked")
1466             V v = (V) s.readObject();
1467             if (k != null && v != null) {
1468                 p = new Node<K,V>(spread(k.hashCode()), k, v, p);
1469                 ++size;
1470             }
1471             else
1472                 break;
1473         }
1474         if (size == 0L)
1475             sizeCtl = 0;
1476         else {
1477             int n;
1478             if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
1479                 n = MAXIMUM_CAPACITY;
1480             else {
1481                 int sz = (int)size;
1482                 n = tableSizeFor(sz + (sz >>> 1) + 1);
1483             }
1484             @SuppressWarnings("unchecked")
1485             Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n];
1486             int mask = n - 1;
1487             long added = 0L;
1488             while (p != null) {
1489                 boolean insertAtFront;
1490                 Node<K,V> next = p.next, first;
1491                 int h = p.hash, j = h & mask;
1492                 if ((first = tabAt(tab, j)) == null)
1493                     insertAtFront = true;
1494                 else {
1495                     K k = p.key;
1496                     if (first.hash < 0) {
1497                         TreeBin<K,V> t = (TreeBin<K,V>)first;
1498                         if (t.putTreeVal(h, k, p.val) == null)
1499                             ++added;
1500                         insertAtFront = false;
1501                     }
1502                     else {
1503                         int binCount = 0;
1504                         insertAtFront = true;
1505                         Node<K,V> q; K qk;
1506                         for (q = first; q != null; q = q.next) {
1507                             if (q.hash == h &&
1508                                 ((qk = q.key) == k ||
1509                                  (qk != null && k.equals(qk)))) {
1510                                 insertAtFront = false;
1511                                 break;
1512                             }
1513                             ++binCount;
1514                         }
1515                         if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
1516                             insertAtFront = false;
1517                             ++added;
1518                             p.next = first;
1519                             TreeNode<K,V> hd = null, tl = null;
1520                             for (q = p; q != null; q = q.next) {
1521                                 TreeNode<K,V> t = new TreeNode<K,V>
1522                                     (q.hash, q.key, q.val, null, null);
1523                                 if ((t.prev = tl) == null)
1524                                     hd = t;
1525                                 else
1526                                     tl.next = t;
1527                                 tl = t;
1528                             }
1529                             setTabAt(tab, j, new TreeBin<K,V>(hd));
1530                         }
1531                     }
1532                 }
1533                 if (insertAtFront) {
1534                     ++added;
1535                     p.next = first;
1536                     setTabAt(tab, j, p);
1537                 }
1538                 p = next;
1539             }
1540             table = tab;
1541             sizeCtl = n - (n >>> 2);
1542             baseCount = added;
1543         }
1544     }
1545 
1546     // ConcurrentMap methods
1547 
1548     /**
1549      * {@inheritDoc}
1550      *
1551      * @return the previous value associated with the specified key,
1552      *         or {@code null} if there was no mapping for the key
1553      * @throws NullPointerException if the specified key or value is null
1554      */
1555     public V putIfAbsent(K key, V value) {
1556         return putVal(key, value, true);
1557     }
1558 
1559     /**
1560      * {@inheritDoc}
1561      *
1562      * @throws NullPointerException if the specified key is null
1563      */
1564     public boolean remove(Object key, Object value) {
1565         if (key == null)
1566             throw new NullPointerException();
1567         return value != null && replaceNode(key, null, value) != null;
1568     }
1569 
1570     /**
1571      * {@inheritDoc}
1572      *
1573      * @throws NullPointerException if any of the arguments are null
1574      */
1575     public boolean replace(K key, V oldValue, V newValue) {
1576         if (key == null || oldValue == null || newValue == null)
1577             throw new NullPointerException();
1578         return replaceNode(key, newValue, oldValue) != null;
1579     }
1580 
1581     /**
1582      * {@inheritDoc}
1583      *
1584      * @return the previous value associated with the specified key,
1585      *         or {@code null} if there was no mapping for the key
1586      * @throws NullPointerException if the specified key or value is null
1587      */
1588     public V replace(K key, V value) {
1589         if (key == null || value == null)
1590             throw new NullPointerException();
1591         return replaceNode(key, value, null);
1592     }
1593 
1594     // Overrides of JDK8+ Map extension method defaults
1595 
1596     /**
1597      * Returns the value to which the specified key is mapped, or the
1598      * given default value if this map contains no mapping for the
1599      * key.
1600      *
1601      * @param key the key whose associated value is to be returned
1602      * @param defaultValue the value to return if this map contains
1603      * no mapping for the given key
1604      * @return the mapping for the key, if present; else the default value
1605      * @throws NullPointerException if the specified key is null
1606      */
1607     public V getOrDefault(Object key, V defaultValue) {
1608         V v;
1609         return (v = get(key)) == null ? defaultValue : v;
1610     }
1611 
1612     public void forEach(BiConsumer<? super K, ? super V> action) {
1613         if (action == null) throw new NullPointerException();
1614         Node<K,V>[] t;
1615         if ((t = table) != null) {
1616             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1617             for (Node<K,V> p; (p = it.advance()) != null; ) {
1618                 action.accept(p.key, p.val);
1619             }
1620         }
1621     }
1622 
1623     public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
1624         if (function == null) throw new NullPointerException();
1625         Node<K,V>[] t;
1626         if ((t = table) != null) {
1627             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1628             for (Node<K,V> p; (p = it.advance()) != null; ) {
1629                 V oldValue = p.val;
1630                 for (K key = p.key;;) {
1631                     V newValue = function.apply(key, oldValue);
1632                     if (newValue == null)
1633                         throw new NullPointerException();
1634                     if (replaceNode(key, newValue, oldValue) != null ||
1635                         (oldValue = get(key)) == null)
1636                         break;
1637                 }
1638             }
1639         }
1640     }
1641 
1642     /**
1643      * Helper method for EntrySetView.removeIf.
1644      */
1645     boolean removeEntryIf(Predicate<? super Entry<K,V>> function) {
1646         if (function == null) throw new NullPointerException();
1647         Node<K,V>[] t;
1648         boolean removed = false;
1649         if ((t = table) != null) {
1650             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1651             for (Node<K,V> p; (p = it.advance()) != null; ) {
1652                 K k = p.key;
1653                 V v = p.val;
1654                 Map.Entry<K,V> e = new AbstractMap.SimpleImmutableEntry<>(k, v);
1655                 if (function.test(e) && replaceNode(k, null, v) != null)
1656                     removed = true;
1657             }
1658         }
1659         return removed;
1660     }
1661 
1662     /**
1663      * Helper method for ValuesView.removeIf.
1664      */
1665     boolean removeValueIf(Predicate<? super V> function) {
1666         if (function == null) throw new NullPointerException();
1667         Node<K,V>[] t;
1668         boolean removed = false;
1669         if ((t = table) != null) {
1670             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1671             for (Node<K,V> p; (p = it.advance()) != null; ) {
1672                 K k = p.key;
1673                 V v = p.val;
1674                 if (function.test(v) && replaceNode(k, null, v) != null)
1675                     removed = true;
1676             }
1677         }
1678         return removed;
1679     }
1680 
1681     /**
1682      * If the specified key is not already associated with a value,
1683      * attempts to compute its value using the given mapping function
1684      * and enters it into this map unless {@code null}.  The entire
1685      * method invocation is performed atomically, so the function is
1686      * applied at most once per key.  Some attempted update operations
1687      * on this map by other threads may be blocked while computation
1688      * is in progress, so the computation should be short and simple,
1689      * and must not attempt to update any other mappings of this map.
1690      *
1691      * @param key key with which the specified value is to be associated
1692      * @param mappingFunction the function to compute a value
1693      * @return the current (existing or computed) value associated with
1694      *         the specified key, or null if the computed value is null
1695      * @throws NullPointerException if the specified key or mappingFunction
1696      *         is null
1697      * @throws IllegalStateException if the computation detectably
1698      *         attempts a recursive update to this map that would
1699      *         otherwise never complete
1700      * @throws RuntimeException or Error if the mappingFunction does so,
1701      *         in which case the mapping is left unestablished
1702      */
1703     public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
1704         if (key == null || mappingFunction == null)
1705             throw new NullPointerException();
1706         int h = spread(key.hashCode());
1707         V val = null;
1708         int binCount = 0;
1709         for (Node<K,V>[] tab = table;;) {
1710             Node<K,V> f; int n, i, fh; K fk; V fv;
1711             if (tab == null || (n = tab.length) == 0)
1712                 tab = initTable();
1713             else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1714                 Node<K,V> r = new ReservationNode<K,V>();
1715                 synchronized (r) {
1716                     if (casTabAt(tab, i, null, r)) {
1717                         binCount = 1;
1718                         Node<K,V> node = null;
1719                         try {
1720                             if ((val = mappingFunction.apply(key)) != null)
1721                                 node = new Node<K,V>(h, key, val);
1722                         } finally {
1723                             setTabAt(tab, i, node);
1724                         }
1725                     }
1726                 }
1727                 if (binCount != 0)
1728                     break;
1729             }
1730             else if ((fh = f.hash) == MOVED)
1731                 tab = helpTransfer(tab, f);
1732             else if (fh == h    // check first node without acquiring lock
1733                      && ((fk = f.key) == key || (fk != null && key.equals(fk)))
1734                      && (fv = f.val) != null)
1735                 return fv;
1736             else {
1737                 boolean added = false;
1738                 synchronized (f) {
1739                     if (tabAt(tab, i) == f) {
1740                         if (fh >= 0) {
1741                             binCount = 1;
1742                             for (Node<K,V> e = f;; ++binCount) {
1743                                 K ek;
1744                                 if (e.hash == h &&
1745                                     ((ek = e.key) == key ||
1746                                      (ek != null && key.equals(ek)))) {
1747                                     val = e.val;
1748                                     break;
1749                                 }
1750                                 Node<K,V> pred = e;
1751                                 if ((e = e.next) == null) {
1752                                     if ((val = mappingFunction.apply(key)) != null) {
1753                                         if (pred.next != null)
1754                                             throw new IllegalStateException("Recursive update");
1755                                         added = true;
1756                                         pred.next = new Node<K,V>(h, key, val);
1757                                     }
1758                                     break;
1759                                 }
1760                             }
1761                         }
1762                         else if (f instanceof TreeBin) {
1763                             binCount = 2;
1764                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1765                             TreeNode<K,V> r, p;
1766                             if ((r = t.root) != null &&
1767                                 (p = r.findTreeNode(h, key, null)) != null)
1768                                 val = p.val;
1769                             else if ((val = mappingFunction.apply(key)) != null) {
1770                                 added = true;
1771                                 t.putTreeVal(h, key, val);
1772                             }
1773                         }
1774                         else if (f instanceof ReservationNode)
1775                             throw new IllegalStateException("Recursive update");
1776                     }
1777                 }
1778                 if (binCount != 0) {
1779                     if (binCount >= TREEIFY_THRESHOLD)
1780                         treeifyBin(tab, i);
1781                     if (!added)
1782                         return val;
1783                     break;
1784                 }
1785             }
1786         }
1787         if (val != null)
1788             addCount(1L, binCount);
1789         return val;
1790     }
1791 
1792     /**
1793      * If the value for the specified key is present, attempts to
1794      * compute a new mapping given the key and its current mapped
1795      * value.  The entire method invocation is performed atomically.
1796      * Some attempted update operations on this map by other threads
1797      * may be blocked while computation is in progress, so the
1798      * computation should be short and simple, and must not attempt to
1799      * update any other mappings of this map.
1800      *
1801      * @param key key with which a value may be associated
1802      * @param remappingFunction the function to compute a value
1803      * @return the new value associated with the specified key, or null if none
1804      * @throws NullPointerException if the specified key or remappingFunction
1805      *         is null
1806      * @throws IllegalStateException if the computation detectably
1807      *         attempts a recursive update to this map that would
1808      *         otherwise never complete
1809      * @throws RuntimeException or Error if the remappingFunction does so,
1810      *         in which case the mapping is unchanged
1811      */
1812     public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1813         if (key == null || remappingFunction == null)
1814             throw new NullPointerException();
1815         int h = spread(key.hashCode());
1816         V val = null;
1817         int delta = 0;
1818         int binCount = 0;
1819         for (Node<K,V>[] tab = table;;) {
1820             Node<K,V> f; int n, i, fh;
1821             if (tab == null || (n = tab.length) == 0)
1822                 tab = initTable();
1823             else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
1824                 break;
1825             else if ((fh = f.hash) == MOVED)
1826                 tab = helpTransfer(tab, f);
1827             else {
1828                 synchronized (f) {
1829                     if (tabAt(tab, i) == f) {
1830                         if (fh >= 0) {
1831                             binCount = 1;
1832                             for (Node<K,V> e = f, pred = null;; ++binCount) {
1833                                 K ek;
1834                                 if (e.hash == h &&
1835                                     ((ek = e.key) == key ||
1836                                      (ek != null && key.equals(ek)))) {
1837                                     val = remappingFunction.apply(key, e.val);
1838                                     if (val != null)
1839                                         e.val = val;
1840                                     else {
1841                                         delta = -1;
1842                                         Node<K,V> en = e.next;
1843                                         if (pred != null)
1844                                             pred.next = en;
1845                                         else
1846                                             setTabAt(tab, i, en);
1847                                     }
1848                                     break;
1849                                 }
1850                                 pred = e;
1851                                 if ((e = e.next) == null)
1852                                     break;
1853                             }
1854                         }
1855                         else if (f instanceof TreeBin) {
1856                             binCount = 2;
1857                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1858                             TreeNode<K,V> r, p;
1859                             if ((r = t.root) != null &&
1860                                 (p = r.findTreeNode(h, key, null)) != null) {
1861                                 val = remappingFunction.apply(key, p.val);
1862                                 if (val != null)
1863                                     p.val = val;
1864                                 else {
1865                                     delta = -1;
1866                                     if (t.removeTreeNode(p))
1867                                         setTabAt(tab, i, untreeify(t.first));
1868                                 }
1869                             }
1870                         }
1871                         else if (f instanceof ReservationNode)
1872                             throw new IllegalStateException("Recursive update");
1873                     }
1874                 }
1875                 if (binCount != 0)
1876                     break;
1877             }
1878         }
1879         if (delta != 0)
1880             addCount((long)delta, binCount);
1881         return val;
1882     }
1883 
1884     /**
1885      * Attempts to compute a mapping for the specified key and its
1886      * current mapped value (or {@code null} if there is no current
1887      * mapping). The entire method invocation is performed atomically.
1888      * Some attempted update operations on this map by other threads
1889      * may be blocked while computation is in progress, so the
1890      * computation should be short and simple, and must not attempt to
1891      * update any other mappings of this Map.
1892      *
1893      * @param key key with which the specified value is to be associated
1894      * @param remappingFunction the function to compute a value
1895      * @return the new value associated with the specified key, or null if none
1896      * @throws NullPointerException if the specified key or remappingFunction
1897      *         is null
1898      * @throws IllegalStateException if the computation detectably
1899      *         attempts a recursive update to this map that would
1900      *         otherwise never complete
1901      * @throws RuntimeException or Error if the remappingFunction does so,
1902      *         in which case the mapping is unchanged
1903      */
1904     public V compute(K key,
1905                      BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1906         if (key == null || remappingFunction == null)
1907             throw new NullPointerException();
1908         int h = spread(key.hashCode());
1909         V val = null;
1910         int delta = 0;
1911         int binCount = 0;
1912         for (Node<K,V>[] tab = table;;) {
1913             Node<K,V> f; int n, i, fh;
1914             if (tab == null || (n = tab.length) == 0)
1915                 tab = initTable();
1916             else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1917                 Node<K,V> r = new ReservationNode<K,V>();
1918                 synchronized (r) {
1919                     if (casTabAt(tab, i, null, r)) {
1920                         binCount = 1;
1921                         Node<K,V> node = null;
1922                         try {
1923                             if ((val = remappingFunction.apply(key, null)) != null) {
1924                                 delta = 1;
1925                                 node = new Node<K,V>(h, key, val);
1926                             }
1927                         } finally {
1928                             setTabAt(tab, i, node);
1929                         }
1930                     }
1931                 }
1932                 if (binCount != 0)
1933                     break;
1934             }
1935             else if ((fh = f.hash) == MOVED)
1936                 tab = helpTransfer(tab, f);
1937             else {
1938                 synchronized (f) {
1939                     if (tabAt(tab, i) == f) {
1940                         if (fh >= 0) {
1941                             binCount = 1;
1942                             for (Node<K,V> e = f, pred = null;; ++binCount) {
1943                                 K ek;
1944                                 if (e.hash == h &&
1945                                     ((ek = e.key) == key ||
1946                                      (ek != null && key.equals(ek)))) {
1947                                     val = remappingFunction.apply(key, e.val);
1948                                     if (val != null)
1949                                         e.val = val;
1950                                     else {
1951                                         delta = -1;
1952                                         Node<K,V> en = e.next;
1953                                         if (pred != null)
1954                                             pred.next = en;
1955                                         else
1956                                             setTabAt(tab, i, en);
1957                                     }
1958                                     break;
1959                                 }
1960                                 pred = e;
1961                                 if ((e = e.next) == null) {
1962                                     val = remappingFunction.apply(key, null);
1963                                     if (val != null) {
1964                                         if (pred.next != null)
1965                                             throw new IllegalStateException("Recursive update");
1966                                         delta = 1;
1967                                         pred.next = new Node<K,V>(h, key, val);
1968                                     }
1969                                     break;
1970                                 }
1971                             }
1972                         }
1973                         else if (f instanceof TreeBin) {
1974                             binCount = 1;
1975                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1976                             TreeNode<K,V> r, p;
1977                             if ((r = t.root) != null)
1978                                 p = r.findTreeNode(h, key, null);
1979                             else
1980                                 p = null;
1981                             V pv = (p == null) ? null : p.val;
1982                             val = remappingFunction.apply(key, pv);
1983                             if (val != null) {
1984                                 if (p != null)
1985                                     p.val = val;
1986                                 else {
1987                                     delta = 1;
1988                                     t.putTreeVal(h, key, val);
1989                                 }
1990                             }
1991                             else if (p != null) {
1992                                 delta = -1;
1993                                 if (t.removeTreeNode(p))
1994                                     setTabAt(tab, i, untreeify(t.first));
1995                             }
1996                         }
1997                         else if (f instanceof ReservationNode)
1998                             throw new IllegalStateException("Recursive update");
1999                     }
2000                 }
2001                 if (binCount != 0) {
2002                     if (binCount >= TREEIFY_THRESHOLD)
2003                         treeifyBin(tab, i);
2004                     break;
2005                 }
2006             }
2007         }
2008         if (delta != 0)
2009             addCount((long)delta, binCount);
2010         return val;
2011     }
2012 
2013     /**
2014      * If the specified key is not already associated with a
2015      * (non-null) value, associates it with the given value.
2016      * Otherwise, replaces the value with the results of the given
2017      * remapping function, or removes if {@code null}. The entire
2018      * method invocation is performed atomically.  Some attempted
2019      * update operations on this map by other threads may be blocked
2020      * while computation is in progress, so the computation should be
2021      * short and simple, and must not attempt to update any other
2022      * mappings of this Map.
2023      *
2024      * @param key key with which the specified value is to be associated
2025      * @param value the value to use if absent
2026      * @param remappingFunction the function to recompute a value if present
2027      * @return the new value associated with the specified key, or null if none
2028      * @throws NullPointerException if the specified key or the
2029      *         remappingFunction is null
2030      * @throws RuntimeException or Error if the remappingFunction does so,
2031      *         in which case the mapping is unchanged
2032      */
2033     public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
2034         if (key == null || value == null || remappingFunction == null)
2035             throw new NullPointerException();
2036         int h = spread(key.hashCode());
2037         V val = null;
2038         int delta = 0;
2039         int binCount = 0;
2040         for (Node<K,V>[] tab = table;;) {
2041             Node<K,V> f; int n, i, fh;
2042             if (tab == null || (n = tab.length) == 0)
2043                 tab = initTable();
2044             else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
2045                 if (casTabAt(tab, i, null, new Node<K,V>(h, key, value))) {
2046                     delta = 1;
2047                     val = value;
2048                     break;
2049                 }
2050             }
2051             else if ((fh = f.hash) == MOVED)
2052                 tab = helpTransfer(tab, f);
2053             else {
2054                 synchronized (f) {
2055                     if (tabAt(tab, i) == f) {
2056                         if (fh >= 0) {
2057                             binCount = 1;
2058                             for (Node<K,V> e = f, pred = null;; ++binCount) {
2059                                 K ek;
2060                                 if (e.hash == h &&
2061                                     ((ek = e.key) == key ||
2062                                      (ek != null && key.equals(ek)))) {
2063                                     val = remappingFunction.apply(e.val, value);
2064                                     if (val != null)
2065                                         e.val = val;
2066                                     else {
2067                                         delta = -1;
2068                                         Node<K,V> en = e.next;
2069                                         if (pred != null)
2070                                             pred.next = en;
2071                                         else
2072                                             setTabAt(tab, i, en);
2073                                     }
2074                                     break;
2075                                 }
2076                                 pred = e;
2077                                 if ((e = e.next) == null) {
2078                                     delta = 1;
2079                                     val = value;
2080                                     pred.next = new Node<K,V>(h, key, val);
2081                                     break;
2082                                 }
2083                             }
2084                         }
2085                         else if (f instanceof TreeBin) {
2086                             binCount = 2;
2087                             TreeBin<K,V> t = (TreeBin<K,V>)f;
2088                             TreeNode<K,V> r = t.root;
2089                             TreeNode<K,V> p = (r == null) ? null :
2090                                 r.findTreeNode(h, key, null);
2091                             val = (p == null) ? value :
2092                                 remappingFunction.apply(p.val, value);
2093                             if (val != null) {
2094                                 if (p != null)
2095                                     p.val = val;
2096                                 else {
2097                                     delta = 1;
2098                                     t.putTreeVal(h, key, val);
2099                                 }
2100                             }
2101                             else if (p != null) {
2102                                 delta = -1;
2103                                 if (t.removeTreeNode(p))
2104                                     setTabAt(tab, i, untreeify(t.first));
2105                             }
2106                         }
2107                         else if (f instanceof ReservationNode)
2108                             throw new IllegalStateException("Recursive update");
2109                     }
2110                 }
2111                 if (binCount != 0) {
2112                     if (binCount >= TREEIFY_THRESHOLD)
2113                         treeifyBin(tab, i);
2114                     break;
2115                 }
2116             }
2117         }
2118         if (delta != 0)
2119             addCount((long)delta, binCount);
2120         return val;
2121     }
2122 
2123     // Hashtable legacy methods
2124 
2125     /**
2126      * Tests if some key maps into the specified value in this table.
2127      *
2128      * <p>Note that this method is identical in functionality to
2129      * {@link #containsValue(Object)}, and exists solely to ensure
2130      * full compatibility with class {@link java.util.Hashtable},
2131      * which supported this method prior to introduction of the
2132      * Java Collections Framework.
2133      *
2134      * @param  value a value to search for
2135      * @return {@code true} if and only if some key maps to the
2136      *         {@code value} argument in this table as
2137      *         determined by the {@code equals} method;
2138      *         {@code false} otherwise
2139      * @throws NullPointerException if the specified value is null
2140      */
2141     public boolean contains(Object value) {
2142         return containsValue(value);
2143     }
2144 
2145     /**
2146      * Returns an enumeration of the keys in this table.
2147      *
2148      * @return an enumeration of the keys in this table
2149      * @see #keySet()
2150      */
2151     public Enumeration<K> keys() {
2152         Node<K,V>[] t;
2153         int f = (t = table) == null ? 0 : t.length;
2154         return new KeyIterator<K,V>(t, f, 0, f, this);
2155     }
2156 
2157     /**
2158      * Returns an enumeration of the values in this table.
2159      *
2160      * @return an enumeration of the values in this table
2161      * @see #values()
2162      */
2163     public Enumeration<V> elements() {
2164         Node<K,V>[] t;
2165         int f = (t = table) == null ? 0 : t.length;
2166         return new ValueIterator<K,V>(t, f, 0, f, this);
2167     }
2168 
2169     // ConcurrentHashMap-only methods
2170 
2171     /**
2172      * Returns the number of mappings. This method should be used
2173      * instead of {@link #size} because a ConcurrentHashMap may
2174      * contain more mappings than can be represented as an int. The
2175      * value returned is an estimate; the actual count may differ if
2176      * there are concurrent insertions or removals.
2177      *
2178      * @return the number of mappings
2179      * @since 1.8
2180      */
2181     public long mappingCount() {
2182         long n = sumCount();
2183         return (n < 0L) ? 0L : n; // ignore transient negative values
2184     }
2185 
2186     /**
2187      * Creates a new {@link Set} backed by a ConcurrentHashMap
2188      * from the given type to {@code Boolean.TRUE}.
2189      *
2190      * @param <K> the element type of the returned set
2191      * @return the new set
2192      * @since 1.8
2193      */
2194     public static <K> KeySetView<K,Boolean> newKeySet() {
2195         return new KeySetView<K,Boolean>
2196             (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE);
2197     }
2198 
2199     /**
2200      * Creates a new {@link Set} backed by a ConcurrentHashMap
2201      * from the given type to {@code Boolean.TRUE}.
2202      *
2203      * @param initialCapacity The implementation performs internal
2204      * sizing to accommodate this many elements.
2205      * @param <K> the element type of the returned set
2206      * @return the new set
2207      * @throws IllegalArgumentException if the initial capacity of
2208      * elements is negative
2209      * @since 1.8
2210      */
2211     public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2212         return new KeySetView<K,Boolean>
2213             (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE);
2214     }
2215 
2216     /**
2217      * Returns a {@link Set} view of the keys in this map, using the
2218      * given common mapped value for any additions (i.e., {@link
2219      * Collection#add} and {@link Collection#addAll(Collection)}).
2220      * This is of course only appropriate if it is acceptable to use
2221      * the same value for all additions from this view.
2222      *
2223      * @param mappedValue the mapped value to use for any additions
2224      * @return the set view
2225      * @throws NullPointerException if the mappedValue is null
2226      */
2227     public KeySetView<K,V> keySet(V mappedValue) {
2228         if (mappedValue == null)
2229             throw new NullPointerException();
2230         return new KeySetView<K,V>(this, mappedValue);
2231     }
2232 
2233     /* ---------------- Special Nodes -------------- */
2234 
2235     /**
2236      * A node inserted at head of bins during transfer operations.
2237      */
2238     static final class ForwardingNode<K,V> extends Node<K,V> {
2239         final Node<K,V>[] nextTable;
2240         ForwardingNode(Node<K,V>[] tab) {
2241             super(MOVED, null, null);
2242             this.nextTable = tab;
2243         }
2244 
2245         Node<K,V> find(int h, Object k) {
2246             // loop to avoid arbitrarily deep recursion on forwarding nodes
2247             outer: for (Node<K,V>[] tab = nextTable;;) {
2248                 Node<K,V> e; int n;
2249                 if (k == null || tab == null || (n = tab.length) == 0 ||
2250                     (e = tabAt(tab, (n - 1) & h)) == null)
2251                     return null;
2252                 for (;;) {
2253                     int eh; K ek;
2254                     if ((eh = e.hash) == h &&
2255                         ((ek = e.key) == k || (ek != null && k.equals(ek))))
2256                         return e;
2257                     if (eh < 0) {
2258                         if (e instanceof ForwardingNode) {
2259                             tab = ((ForwardingNode<K,V>)e).nextTable;
2260                             continue outer;
2261                         }
2262                         else
2263                             return e.find(h, k);
2264                     }
2265                     if ((e = e.next) == null)
2266                         return null;
2267                 }
2268             }
2269         }
2270     }
2271 
2272     /**
2273      * A place-holder node used in computeIfAbsent and compute.
2274      */
2275     static final class ReservationNode<K,V> extends Node<K,V> {
2276         ReservationNode() {
2277             super(RESERVED, null, null);
2278         }
2279 
2280         Node<K,V> find(int h, Object k) {
2281             return null;
2282         }
2283     }
2284 
2285     /* ---------------- Table Initialization and Resizing -------------- */
2286 
2287     /**
2288      * Returns the stamp bits for resizing a table of size n.
2289      * Must be negative when shifted left by RESIZE_STAMP_SHIFT.
2290      */
2291     static final int resizeStamp(int n) {
2292         return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
2293     }
2294 
2295     /**
2296      * Initializes table, using the size recorded in sizeCtl.
2297      */
2298     private final Node<K,V>[] initTable() {
2299         Node<K,V>[] tab; int sc;
2300         while ((tab = table) == null || tab.length == 0) {
2301             if ((sc = sizeCtl) < 0)
2302                 Thread.yield(); // lost initialization race; just spin
2303             else if (U.compareAndSetInt(this, SIZECTL, sc, -1)) {
2304                 try {
2305                     if ((tab = table) == null || tab.length == 0) {
2306                         int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2307                         @SuppressWarnings("unchecked")
2308                         Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2309                         table = tab = nt;
2310                         sc = n - (n >>> 2);
2311                     }
2312                 } finally {
2313                     sizeCtl = sc;
2314                 }
2315                 break;
2316             }
2317         }
2318         return tab;
2319     }
2320 
2321     /**
2322      * Adds to count, and if table is too small and not already
2323      * resizing, initiates transfer. If already resizing, helps
2324      * perform transfer if work is available.  Rechecks occupancy
2325      * after a transfer to see if another resize is already needed
2326      * because resizings are lagging additions.
2327      *
2328      * @param x the count to add
2329      * @param check if <0, don't check resize, if <= 1 only check if uncontended
2330      */
2331     private final void addCount(long x, int check) {
2332         CounterCell[] as; long b, s;
2333         if ((as = counterCells) != null ||
2334             !U.compareAndSetLong(this, BASECOUNT, b = baseCount, s = b + x)) {
2335             CounterCell a; long v; int m;
2336             boolean uncontended = true;
2337             if (as == null || (m = as.length - 1) < 0 ||
2338                 (a = as[ThreadLocalRandom.getProbe() & m]) == null ||
2339                 !(uncontended =
2340                   U.compareAndSetLong(a, CELLVALUE, v = a.value, v + x))) {
2341                 fullAddCount(x, uncontended);
2342                 return;
2343             }
2344             if (check <= 1)
2345                 return;
2346             s = sumCount();
2347         }
2348         if (check >= 0) {
2349             Node<K,V>[] tab, nt; int n, sc;
2350             while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
2351                    (n = tab.length) < MAXIMUM_CAPACITY) {
2352                 int rs = resizeStamp(n);
2353                 if (sc < 0) {
2354                     if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2355                         sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
2356                         transferIndex <= 0)
2357                         break;
2358                     if (U.compareAndSetInt(this, SIZECTL, sc, sc + 1))
2359                         transfer(tab, nt);
2360                 }
2361                 else if (U.compareAndSetInt(this, SIZECTL, sc,
2362                                              (rs << RESIZE_STAMP_SHIFT) + 2))
2363                     transfer(tab, null);
2364                 s = sumCount();
2365             }
2366         }
2367     }
2368 
2369     /**
2370      * Helps transfer if a resize is in progress.
2371      */
2372     final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
2373         Node<K,V>[] nextTab; int sc;
2374         if (tab != null && (f instanceof ForwardingNode) &&
2375             (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
2376             int rs = resizeStamp(tab.length);
2377             while (nextTab == nextTable && table == tab &&
2378                    (sc = sizeCtl) < 0) {
2379                 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2380                     sc == rs + MAX_RESIZERS || transferIndex <= 0)
2381                     break;
2382                 if (U.compareAndSetInt(this, SIZECTL, sc, sc + 1)) {
2383                     transfer(tab, nextTab);
2384                     break;
2385                 }
2386             }
2387             return nextTab;
2388         }
2389         return table;
2390     }
2391 
2392     /**
2393      * Tries to presize table to accommodate the given number of elements.
2394      *
2395      * @param size number of elements (doesn't need to be perfectly accurate)
2396      */
2397     private final void tryPresize(int size) {
2398         int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2399             tableSizeFor(size + (size >>> 1) + 1);
2400         int sc;
2401         while ((sc = sizeCtl) >= 0) {
2402             Node<K,V>[] tab = table; int n;
2403             if (tab == null || (n = tab.length) == 0) {
2404                 n = (sc > c) ? sc : c;
2405                 if (U.compareAndSetInt(this, SIZECTL, sc, -1)) {
2406                     try {
2407                         if (table == tab) {
2408                             @SuppressWarnings("unchecked")
2409                             Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2410                             table = nt;
2411                             sc = n - (n >>> 2);
2412                         }
2413                     } finally {
2414                         sizeCtl = sc;
2415                     }
2416                 }
2417             }
2418             else if (c <= sc || n >= MAXIMUM_CAPACITY)
2419                 break;
2420             else if (tab == table) {
2421                 int rs = resizeStamp(n);
2422                 if (U.compareAndSetInt(this, SIZECTL, sc,
2423                                         (rs << RESIZE_STAMP_SHIFT) + 2))
2424                     transfer(tab, null);
2425             }
2426         }
2427     }
2428 
2429     /**
2430      * Moves and/or copies the nodes in each bin to new table. See
2431      * above for explanation.
2432      */
2433     private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
2434         int n = tab.length, stride;
2435         if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
2436             stride = MIN_TRANSFER_STRIDE; // subdivide range
2437         if (nextTab == null) {            // initiating
2438             try {
2439                 @SuppressWarnings("unchecked")
2440                 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
2441                 nextTab = nt;
2442             } catch (Throwable ex) {      // try to cope with OOME
2443                 sizeCtl = Integer.MAX_VALUE;
2444                 return;
2445             }
2446             nextTable = nextTab;
2447             transferIndex = n;
2448         }
2449         int nextn = nextTab.length;
2450         ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
2451         boolean advance = true;
2452         boolean finishing = false; // to ensure sweep before committing nextTab
2453         for (int i = 0, bound = 0;;) {
2454             Node<K,V> f; int fh;
2455             while (advance) {
2456                 int nextIndex, nextBound;
2457                 if (--i >= bound || finishing)
2458                     advance = false;
2459                 else if ((nextIndex = transferIndex) <= 0) {
2460                     i = -1;
2461                     advance = false;
2462                 }
2463                 else if (U.compareAndSetInt
2464                          (this, TRANSFERINDEX, nextIndex,
2465                           nextBound = (nextIndex > stride ?
2466                                        nextIndex - stride : 0))) {
2467                     bound = nextBound;
2468                     i = nextIndex - 1;
2469                     advance = false;
2470                 }
2471             }
2472             if (i < 0 || i >= n || i + n >= nextn) {
2473                 int sc;
2474                 if (finishing) {
2475                     nextTable = null;
2476                     table = nextTab;
2477                     sizeCtl = (n << 1) - (n >>> 1);
2478                     return;
2479                 }
2480                 if (U.compareAndSetInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
2481                     if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
2482                         return;
2483                     finishing = advance = true;
2484                     i = n; // recheck before commit
2485                 }
2486             }
2487             else if ((f = tabAt(tab, i)) == null)
2488                 advance = casTabAt(tab, i, null, fwd);
2489             else if ((fh = f.hash) == MOVED)
2490                 advance = true; // already processed
2491             else {
2492                 synchronized (f) {
2493                     if (tabAt(tab, i) == f) {
2494                         Node<K,V> ln, hn;
2495                         if (fh >= 0) {
2496                             int runBit = fh & n;
2497                             Node<K,V> lastRun = f;
2498                             for (Node<K,V> p = f.next; p != null; p = p.next) {
2499                                 int b = p.hash & n;
2500                                 if (b != runBit) {
2501                                     runBit = b;
2502                                     lastRun = p;
2503                                 }
2504                             }
2505                             if (runBit == 0) {
2506                                 ln = lastRun;
2507                                 hn = null;
2508                             }
2509                             else {
2510                                 hn = lastRun;
2511                                 ln = null;
2512                             }
2513                             for (Node<K,V> p = f; p != lastRun; p = p.next) {
2514                                 int ph = p.hash; K pk = p.key; V pv = p.val;
2515                                 if ((ph & n) == 0)
2516                                     ln = new Node<K,V>(ph, pk, pv, ln);
2517                                 else
2518                                     hn = new Node<K,V>(ph, pk, pv, hn);
2519                             }
2520                             setTabAt(nextTab, i, ln);
2521                             setTabAt(nextTab, i + n, hn);
2522                             setTabAt(tab, i, fwd);
2523                             advance = true;
2524                         }
2525                         else if (f instanceof TreeBin) {
2526                             TreeBin<K,V> t = (TreeBin<K,V>)f;
2527                             TreeNode<K,V> lo = null, loTail = null;
2528                             TreeNode<K,V> hi = null, hiTail = null;
2529                             int lc = 0, hc = 0;
2530                             for (Node<K,V> e = t.first; e != null; e = e.next) {
2531                                 int h = e.hash;
2532                                 TreeNode<K,V> p = new TreeNode<K,V>
2533                                     (h, e.key, e.val, null, null);
2534                                 if ((h & n) == 0) {
2535                                     if ((p.prev = loTail) == null)
2536                                         lo = p;
2537                                     else
2538                                         loTail.next = p;
2539                                     loTail = p;
2540                                     ++lc;
2541                                 }
2542                                 else {
2543                                     if ((p.prev = hiTail) == null)
2544                                         hi = p;
2545                                     else
2546                                         hiTail.next = p;
2547                                     hiTail = p;
2548                                     ++hc;
2549                                 }
2550                             }
2551                             ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
2552                                 (hc != 0) ? new TreeBin<K,V>(lo) : t;
2553                             hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
2554                                 (lc != 0) ? new TreeBin<K,V>(hi) : t;
2555                             setTabAt(nextTab, i, ln);
2556                             setTabAt(nextTab, i + n, hn);
2557                             setTabAt(tab, i, fwd);
2558                             advance = true;
2559                         }
2560                     }
2561                 }
2562             }
2563         }
2564     }
2565 
2566     /* ---------------- Counter support -------------- */
2567 
2568     /**
2569      * A padded cell for distributing counts.  Adapted from LongAdder
2570      * and Striped64.  See their internal docs for explanation.
2571      */
2572     @jdk.internal.vm.annotation.Contended static final class CounterCell {
2573         volatile long value;
2574         CounterCell(long x) { value = x; }
2575     }
2576 
2577     final long sumCount() {
2578         CounterCell[] as = counterCells; CounterCell a;
2579         long sum = baseCount;
2580         if (as != null) {
2581             for (int i = 0; i < as.length; ++i) {
2582                 if ((a = as[i]) != null)
2583                     sum += a.value;
2584             }
2585         }
2586         return sum;
2587     }
2588 
2589     // See LongAdder version for explanation
2590     private final void fullAddCount(long x, boolean wasUncontended) {
2591         int h;
2592         if ((h = ThreadLocalRandom.getProbe()) == 0) {
2593             ThreadLocalRandom.localInit();      // force initialization
2594             h = ThreadLocalRandom.getProbe();
2595             wasUncontended = true;
2596         }
2597         boolean collide = false;                // True if last slot nonempty
2598         for (;;) {
2599             CounterCell[] as; CounterCell a; int n; long v;
2600             if ((as = counterCells) != null && (n = as.length) > 0) {
2601                 if ((a = as[(n - 1) & h]) == null) {
2602                     if (cellsBusy == 0) {            // Try to attach new Cell
2603                         CounterCell r = new CounterCell(x); // Optimistic create
2604                         if (cellsBusy == 0 &&
2605                             U.compareAndSetInt(this, CELLSBUSY, 0, 1)) {
2606                             boolean created = false;
2607                             try {               // Recheck under lock
2608                                 CounterCell[] rs; int m, j;
2609                                 if ((rs = counterCells) != null &&
2610                                     (m = rs.length) > 0 &&
2611                                     rs[j = (m - 1) & h] == null) {
2612                                     rs[j] = r;
2613                                     created = true;
2614                                 }
2615                             } finally {
2616                                 cellsBusy = 0;
2617                             }
2618                             if (created)
2619                                 break;
2620                             continue;           // Slot is now non-empty
2621                         }
2622                     }
2623                     collide = false;
2624                 }
2625                 else if (!wasUncontended)       // CAS already known to fail
2626                     wasUncontended = true;      // Continue after rehash
2627                 else if (U.compareAndSetLong(a, CELLVALUE, v = a.value, v + x))
2628                     break;
2629                 else if (counterCells != as || n >= NCPU)
2630                     collide = false;            // At max size or stale
2631                 else if (!collide)
2632                     collide = true;
2633                 else if (cellsBusy == 0 &&
2634                          U.compareAndSetInt(this, CELLSBUSY, 0, 1)) {
2635                     try {
2636                         if (counterCells == as) {// Expand table unless stale
2637                             CounterCell[] rs = new CounterCell[n << 1];
2638                             for (int i = 0; i < n; ++i)
2639                                 rs[i] = as[i];
2640                             counterCells = rs;
2641                         }
2642                     } finally {
2643                         cellsBusy = 0;
2644                     }
2645                     collide = false;
2646                     continue;                   // Retry with expanded table
2647                 }
2648                 h = ThreadLocalRandom.advanceProbe(h);
2649             }
2650             else if (cellsBusy == 0 && counterCells == as &&
2651                      U.compareAndSetInt(this, CELLSBUSY, 0, 1)) {
2652                 boolean init = false;
2653                 try {                           // Initialize table
2654                     if (counterCells == as) {
2655                         CounterCell[] rs = new CounterCell[2];
2656                         rs[h & 1] = new CounterCell(x);
2657                         counterCells = rs;
2658                         init = true;
2659                     }
2660                 } finally {
2661                     cellsBusy = 0;
2662                 }
2663                 if (init)
2664                     break;
2665             }
2666             else if (U.compareAndSetLong(this, BASECOUNT, v = baseCount, v + x))
2667                 break;                          // Fall back on using base
2668         }
2669     }
2670 
2671     /* ---------------- Conversion from/to TreeBins -------------- */
2672 
2673     /**
2674      * Replaces all linked nodes in bin at given index unless table is
2675      * too small, in which case resizes instead.
2676      */
2677     private final void treeifyBin(Node<K,V>[] tab, int index) {
2678         Node<K,V> b; int n;
2679         if (tab != null) {
2680             if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
2681                 tryPresize(n << 1);
2682             else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
2683                 synchronized (b) {
2684                     if (tabAt(tab, index) == b) {
2685                         TreeNode<K,V> hd = null, tl = null;
2686                         for (Node<K,V> e = b; e != null; e = e.next) {
2687                             TreeNode<K,V> p =
2688                                 new TreeNode<K,V>(e.hash, e.key, e.val,
2689                                                   null, null);
2690                             if ((p.prev = tl) == null)
2691                                 hd = p;
2692                             else
2693                                 tl.next = p;
2694                             tl = p;
2695                         }
2696                         setTabAt(tab, index, new TreeBin<K,V>(hd));
2697                     }
2698                 }
2699             }
2700         }
2701     }
2702 
2703     /**
2704      * Returns a list of non-TreeNodes replacing those in given list.
2705      */
2706     static <K,V> Node<K,V> untreeify(Node<K,V> b) {
2707         Node<K,V> hd = null, tl = null;
2708         for (Node<K,V> q = b; q != null; q = q.next) {
2709             Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val);
2710             if (tl == null)
2711                 hd = p;
2712             else
2713                 tl.next = p;
2714             tl = p;
2715         }
2716         return hd;
2717     }
2718 
2719     /* ---------------- TreeNodes -------------- */
2720 
2721     /**
2722      * Nodes for use in TreeBins.
2723      */
2724     static final class TreeNode<K,V> extends Node<K,V> {
2725         TreeNode<K,V> parent;  // red-black tree links
2726         TreeNode<K,V> left;
2727         TreeNode<K,V> right;
2728         TreeNode<K,V> prev;    // needed to unlink next upon deletion
2729         boolean red;
2730 
2731         TreeNode(int hash, K key, V val, Node<K,V> next,
2732                  TreeNode<K,V> parent) {
2733             super(hash, key, val, next);
2734             this.parent = parent;
2735         }
2736 
2737         Node<K,V> find(int h, Object k) {
2738             return findTreeNode(h, k, null);
2739         }
2740 
2741         /**
2742          * Returns the TreeNode (or null if not found) for the given key
2743          * starting at given root.
2744          */
2745         final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
2746             if (k != null) {
2747                 TreeNode<K,V> p = this;
2748                 do {
2749                     int ph, dir; K pk; TreeNode<K,V> q;
2750                     TreeNode<K,V> pl = p.left, pr = p.right;
2751                     if ((ph = p.hash) > h)
2752                         p = pl;
2753                     else if (ph < h)
2754                         p = pr;
2755                     else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2756                         return p;
2757                     else if (pl == null)
2758                         p = pr;
2759                     else if (pr == null)
2760                         p = pl;
2761                     else if ((kc != null ||
2762                               (kc = comparableClassFor(k)) != null) &&
2763                              (dir = compareComparables(kc, k, pk)) != 0)
2764                         p = (dir < 0) ? pl : pr;
2765                     else if ((q = pr.findTreeNode(h, k, kc)) != null)
2766                         return q;
2767                     else
2768                         p = pl;
2769                 } while (p != null);
2770             }
2771             return null;
2772         }
2773     }
2774 
2775     /* ---------------- TreeBins -------------- */
2776 
2777     /**
2778      * TreeNodes used at the heads of bins. TreeBins do not hold user
2779      * keys or values, but instead point to list of TreeNodes and
2780      * their root. They also maintain a parasitic read-write lock
2781      * forcing writers (who hold bin lock) to wait for readers (who do
2782      * not) to complete before tree restructuring operations.
2783      */
2784     static final class TreeBin<K,V> extends Node<K,V> {
2785         TreeNode<K,V> root;
2786         volatile TreeNode<K,V> first;
2787         volatile Thread waiter;
2788         volatile int lockState;
2789         // values for lockState
2790         static final int WRITER = 1; // set while holding write lock
2791         static final int WAITER = 2; // set when waiting for write lock
2792         static final int READER = 4; // increment value for setting read lock
2793 
2794         /**
2795          * Tie-breaking utility for ordering insertions when equal
2796          * hashCodes and non-comparable. We don't require a total
2797          * order, just a consistent insertion rule to maintain
2798          * equivalence across rebalancings. Tie-breaking further than
2799          * necessary simplifies testing a bit.
2800          */
2801         static int tieBreakOrder(Object a, Object b) {
2802             int d;
2803             if (a == null || b == null ||
2804                 (d = a.getClass().getName().
2805                  compareTo(b.getClass().getName())) == 0)
2806                 d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
2807                      -1 : 1);
2808             return d;
2809         }
2810 
2811         /**
2812          * Creates bin with initial set of nodes headed by b.
2813          */
2814         TreeBin(TreeNode<K,V> b) {
2815             super(TREEBIN, null, null);
2816             this.first = b;
2817             TreeNode<K,V> r = null;
2818             for (TreeNode<K,V> x = b, next; x != null; x = next) {
2819                 next = (TreeNode<K,V>)x.next;
2820                 x.left = x.right = null;
2821                 if (r == null) {
2822                     x.parent = null;
2823                     x.red = false;
2824                     r = x;
2825                 }
2826                 else {
2827                     K k = x.key;
2828                     int h = x.hash;
2829                     Class<?> kc = null;
2830                     for (TreeNode<K,V> p = r;;) {
2831                         int dir, ph;
2832                         K pk = p.key;
2833                         if ((ph = p.hash) > h)
2834                             dir = -1;
2835                         else if (ph < h)
2836                             dir = 1;
2837                         else if ((kc == null &&
2838                                   (kc = comparableClassFor(k)) == null) ||
2839                                  (dir = compareComparables(kc, k, pk)) == 0)
2840                             dir = tieBreakOrder(k, pk);
2841                         TreeNode<K,V> xp = p;
2842                         if ((p = (dir <= 0) ? p.left : p.right) == null) {
2843                             x.parent = xp;
2844                             if (dir <= 0)
2845                                 xp.left = x;
2846                             else
2847                                 xp.right = x;
2848                             r = balanceInsertion(r, x);
2849                             break;
2850                         }
2851                     }
2852                 }
2853             }
2854             this.root = r;
2855             assert checkInvariants(root);
2856         }
2857 
2858         /**
2859          * Acquires write lock for tree restructuring.
2860          */
2861         private final void lockRoot() {
2862             if (!U.compareAndSetInt(this, LOCKSTATE, 0, WRITER))
2863                 contendedLock(); // offload to separate method
2864         }
2865 
2866         /**
2867          * Releases write lock for tree restructuring.
2868          */
2869         private final void unlockRoot() {
2870             lockState = 0;
2871         }
2872 
2873         /**
2874          * Possibly blocks awaiting root lock.
2875          */
2876         private final void contendedLock() {
2877             boolean waiting = false;
2878             for (int s;;) {
2879                 if (((s = lockState) & ~WAITER) == 0) {
2880                     if (U.compareAndSetInt(this, LOCKSTATE, s, WRITER)) {
2881                         if (waiting)
2882                             waiter = null;
2883                         return;
2884                     }
2885                 }
2886                 else if ((s & WAITER) == 0) {
2887                     if (U.compareAndSetInt(this, LOCKSTATE, s, s | WAITER)) {
2888                         waiting = true;
2889                         waiter = Thread.currentThread();
2890                     }
2891                 }
2892                 else if (waiting)
2893                     LockSupport.park(this);
2894             }
2895         }
2896 
2897         /**
2898          * Returns matching node or null if none. Tries to search
2899          * using tree comparisons from root, but continues linear
2900          * search when lock not available.
2901          */
2902         final Node<K,V> find(int h, Object k) {
2903             if (k != null) {
2904                 for (Node<K,V> e = first; e != null; ) {
2905                     int s; K ek;
2906                     if (((s = lockState) & (WAITER|WRITER)) != 0) {
2907                         if (e.hash == h &&
2908                             ((ek = e.key) == k || (ek != null && k.equals(ek))))
2909                             return e;
2910                         e = e.next;
2911                     }
2912                     else if (U.compareAndSetInt(this, LOCKSTATE, s,
2913                                                  s + READER)) {
2914                         TreeNode<K,V> r, p;
2915                         try {
2916                             p = ((r = root) == null ? null :
2917                                  r.findTreeNode(h, k, null));
2918                         } finally {
2919                             Thread w;
2920                             if (U.getAndAddInt(this, LOCKSTATE, -READER) ==
2921                                 (READER|WAITER) && (w = waiter) != null)
2922                                 LockSupport.unpark(w);
2923                         }
2924                         return p;
2925                     }
2926                 }
2927             }
2928             return null;
2929         }
2930 
2931         /**
2932          * Finds or adds a node.
2933          * @return null if added
2934          */
2935         final TreeNode<K,V> putTreeVal(int h, K k, V v) {
2936             Class<?> kc = null;
2937             boolean searched = false;
2938             for (TreeNode<K,V> p = root;;) {
2939                 int dir, ph; K pk;
2940                 if (p == null) {
2941                     first = root = new TreeNode<K,V>(h, k, v, null, null);
2942                     break;
2943                 }
2944                 else if ((ph = p.hash) > h)
2945                     dir = -1;
2946                 else if (ph < h)
2947                     dir = 1;
2948                 else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2949                     return p;
2950                 else if ((kc == null &&
2951                           (kc = comparableClassFor(k)) == null) ||
2952                          (dir = compareComparables(kc, k, pk)) == 0) {
2953                     if (!searched) {
2954                         TreeNode<K,V> q, ch;
2955                         searched = true;
2956                         if (((ch = p.left) != null &&
2957                              (q = ch.findTreeNode(h, k, kc)) != null) ||
2958                             ((ch = p.right) != null &&
2959                              (q = ch.findTreeNode(h, k, kc)) != null))
2960                             return q;
2961                     }
2962                     dir = tieBreakOrder(k, pk);
2963                 }
2964 
2965                 TreeNode<K,V> xp = p;
2966                 if ((p = (dir <= 0) ? p.left : p.right) == null) {
2967                     TreeNode<K,V> x, f = first;
2968                     first = x = new TreeNode<K,V>(h, k, v, f, xp);
2969                     if (f != null)
2970                         f.prev = x;
2971                     if (dir <= 0)
2972                         xp.left = x;
2973                     else
2974                         xp.right = x;
2975                     if (!xp.red)
2976                         x.red = true;
2977                     else {
2978                         lockRoot();
2979                         try {
2980                             root = balanceInsertion(root, x);
2981                         } finally {
2982                             unlockRoot();
2983                         }
2984                     }
2985                     break;
2986                 }
2987             }
2988             assert checkInvariants(root);
2989             return null;
2990         }
2991 
2992         /**
2993          * Removes the given node, that must be present before this
2994          * call.  This is messier than typical red-black deletion code
2995          * because we cannot swap the contents of an interior node
2996          * with a leaf successor that is pinned by "next" pointers
2997          * that are accessible independently of lock. So instead we
2998          * swap the tree linkages.
2999          *
3000          * @return true if now too small, so should be untreeified
3001          */
3002         final boolean removeTreeNode(TreeNode<K,V> p) {
3003             TreeNode<K,V> next = (TreeNode<K,V>)p.next;
3004             TreeNode<K,V> pred = p.prev;  // unlink traversal pointers
3005             TreeNode<K,V> r, rl;
3006             if (pred == null)
3007                 first = next;
3008             else
3009                 pred.next = next;
3010             if (next != null)
3011                 next.prev = pred;
3012             if (first == null) {
3013                 root = null;
3014                 return true;
3015             }
3016             if ((r = root) == null || r.right == null || // too small
3017                 (rl = r.left) == null || rl.left == null)
3018                 return true;
3019             lockRoot();
3020             try {
3021                 TreeNode<K,V> replacement;
3022                 TreeNode<K,V> pl = p.left;
3023                 TreeNode<K,V> pr = p.right;
3024                 if (pl != null && pr != null) {
3025                     TreeNode<K,V> s = pr, sl;
3026                     while ((sl = s.left) != null) // find successor
3027                         s = sl;
3028                     boolean c = s.red; s.red = p.red; p.red = c; // swap colors
3029                     TreeNode<K,V> sr = s.right;
3030                     TreeNode<K,V> pp = p.parent;
3031                     if (s == pr) { // p was s's direct parent
3032                         p.parent = s;
3033                         s.right = p;
3034                     }
3035                     else {
3036                         TreeNode<K,V> sp = s.parent;
3037                         if ((p.parent = sp) != null) {
3038                             if (s == sp.left)
3039                                 sp.left = p;
3040                             else
3041                                 sp.right = p;
3042                         }
3043                         if ((s.right = pr) != null)
3044                             pr.parent = s;
3045                     }
3046                     p.left = null;
3047                     if ((p.right = sr) != null)
3048                         sr.parent = p;
3049                     if ((s.left = pl) != null)
3050                         pl.parent = s;
3051                     if ((s.parent = pp) == null)
3052                         r = s;
3053                     else if (p == pp.left)
3054                         pp.left = s;
3055                     else
3056                         pp.right = s;
3057                     if (sr != null)
3058                         replacement = sr;
3059                     else
3060                         replacement = p;
3061                 }
3062                 else if (pl != null)
3063                     replacement = pl;
3064                 else if (pr != null)
3065                     replacement = pr;
3066                 else
3067                     replacement = p;
3068                 if (replacement != p) {
3069                     TreeNode<K,V> pp = replacement.parent = p.parent;
3070                     if (pp == null)
3071                         r = replacement;
3072                     else if (p == pp.left)
3073                         pp.left = replacement;
3074                     else
3075                         pp.right = replacement;
3076                     p.left = p.right = p.parent = null;
3077                 }
3078 
3079                 root = (p.red) ? r : balanceDeletion(r, replacement);
3080 
3081                 if (p == replacement) {  // detach pointers
3082                     TreeNode<K,V> pp;
3083                     if ((pp = p.parent) != null) {
3084                         if (p == pp.left)
3085                             pp.left = null;
3086                         else if (p == pp.right)
3087                             pp.right = null;
3088                         p.parent = null;
3089                     }
3090                 }
3091             } finally {
3092                 unlockRoot();
3093             }
3094             assert checkInvariants(root);
3095             return false;
3096         }
3097 
3098         /* ------------------------------------------------------------ */
3099         // Red-black tree methods, all adapted from CLR
3100 
3101         static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
3102                                               TreeNode<K,V> p) {
3103             TreeNode<K,V> r, pp, rl;
3104             if (p != null && (r = p.right) != null) {
3105                 if ((rl = p.right = r.left) != null)
3106                     rl.parent = p;
3107                 if ((pp = r.parent = p.parent) == null)
3108                     (root = r).red = false;
3109                 else if (pp.left == p)
3110                     pp.left = r;
3111                 else
3112                     pp.right = r;
3113                 r.left = p;
3114                 p.parent = r;
3115             }
3116             return root;
3117         }
3118 
3119         static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
3120                                                TreeNode<K,V> p) {
3121             TreeNode<K,V> l, pp, lr;
3122             if (p != null && (l = p.left) != null) {
3123                 if ((lr = p.left = l.right) != null)
3124                     lr.parent = p;
3125                 if ((pp = l.parent = p.parent) == null)
3126                     (root = l).red = false;
3127                 else if (pp.right == p)
3128                     pp.right = l;
3129                 else
3130                     pp.left = l;
3131                 l.right = p;
3132                 p.parent = l;
3133             }
3134             return root;
3135         }
3136 
3137         static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
3138                                                     TreeNode<K,V> x) {
3139             x.red = true;
3140             for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
3141                 if ((xp = x.parent) == null) {
3142                     x.red = false;
3143                     return x;
3144                 }
3145                 else if (!xp.red || (xpp = xp.parent) == null)
3146                     return root;
3147                 if (xp == (xppl = xpp.left)) {
3148                     if ((xppr = xpp.right) != null && xppr.red) {
3149                         xppr.red = false;
3150                         xp.red = false;
3151                         xpp.red = true;
3152                         x = xpp;
3153                     }
3154                     else {
3155                         if (x == xp.right) {
3156                             root = rotateLeft(root, x = xp);
3157                             xpp = (xp = x.parent) == null ? null : xp.parent;
3158                         }
3159                         if (xp != null) {
3160                             xp.red = false;
3161                             if (xpp != null) {
3162                                 xpp.red = true;
3163                                 root = rotateRight(root, xpp);
3164                             }
3165                         }
3166                     }
3167                 }
3168                 else {
3169                     if (xppl != null && xppl.red) {
3170                         xppl.red = false;
3171                         xp.red = false;
3172                         xpp.red = true;
3173                         x = xpp;
3174                     }
3175                     else {
3176                         if (x == xp.left) {
3177                             root = rotateRight(root, x = xp);
3178                             xpp = (xp = x.parent) == null ? null : xp.parent;
3179                         }
3180                         if (xp != null) {
3181                             xp.red = false;
3182                             if (xpp != null) {
3183                                 xpp.red = true;
3184                                 root = rotateLeft(root, xpp);
3185                             }
3186                         }
3187                     }
3188                 }
3189             }
3190         }
3191 
3192         static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
3193                                                    TreeNode<K,V> x) {
3194             for (TreeNode<K,V> xp, xpl, xpr;;) {
3195                 if (x == null || x == root)
3196                     return root;
3197                 else if ((xp = x.parent) == null) {
3198                     x.red = false;
3199                     return x;
3200                 }
3201                 else if (x.red) {
3202                     x.red = false;
3203                     return root;
3204                 }
3205                 else if ((xpl = xp.left) == x) {
3206                     if ((xpr = xp.right) != null && xpr.red) {
3207                         xpr.red = false;
3208                         xp.red = true;
3209                         root = rotateLeft(root, xp);
3210                         xpr = (xp = x.parent) == null ? null : xp.right;
3211                     }
3212                     if (xpr == null)
3213                         x = xp;
3214                     else {
3215                         TreeNode<K,V> sl = xpr.left, sr = xpr.right;
3216                         if ((sr == null || !sr.red) &&
3217                             (sl == null || !sl.red)) {
3218                             xpr.red = true;
3219                             x = xp;
3220                         }
3221                         else {
3222                             if (sr == null || !sr.red) {
3223                                 if (sl != null)
3224                                     sl.red = false;
3225                                 xpr.red = true;
3226                                 root = rotateRight(root, xpr);
3227                                 xpr = (xp = x.parent) == null ?
3228                                     null : xp.right;
3229                             }
3230                             if (xpr != null) {
3231                                 xpr.red = (xp == null) ? false : xp.red;
3232                                 if ((sr = xpr.right) != null)
3233                                     sr.red = false;
3234                             }
3235                             if (xp != null) {
3236                                 xp.red = false;
3237                                 root = rotateLeft(root, xp);
3238                             }
3239                             x = root;
3240                         }
3241                     }
3242                 }
3243                 else { // symmetric
3244                     if (xpl != null && xpl.red) {
3245                         xpl.red = false;
3246                         xp.red = true;
3247                         root = rotateRight(root, xp);
3248                         xpl = (xp = x.parent) == null ? null : xp.left;
3249                     }
3250                     if (xpl == null)
3251                         x = xp;
3252                     else {
3253                         TreeNode<K,V> sl = xpl.left, sr = xpl.right;
3254                         if ((sl == null || !sl.red) &&
3255                             (sr == null || !sr.red)) {
3256                             xpl.red = true;
3257                             x = xp;
3258                         }
3259                         else {
3260                             if (sl == null || !sl.red) {
3261                                 if (sr != null)
3262                                     sr.red = false;
3263                                 xpl.red = true;
3264                                 root = rotateLeft(root, xpl);
3265                                 xpl = (xp = x.parent) == null ?
3266                                     null : xp.left;
3267                             }
3268                             if (xpl != null) {
3269                                 xpl.red = (xp == null) ? false : xp.red;
3270                                 if ((sl = xpl.left) != null)
3271                                     sl.red = false;
3272                             }
3273                             if (xp != null) {
3274                                 xp.red = false;
3275                                 root = rotateRight(root, xp);
3276                             }
3277                             x = root;
3278                         }
3279                     }
3280                 }
3281             }
3282         }
3283 
3284         /**
3285          * Checks invariants recursively for the tree of Nodes rooted at t.
3286          */
3287         static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
3288             TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
3289                 tb = t.prev, tn = (TreeNode<K,V>)t.next;
3290             if (tb != null && tb.next != t)
3291                 return false;
3292             if (tn != null && tn.prev != t)
3293                 return false;
3294             if (tp != null && t != tp.left && t != tp.right)
3295                 return false;
3296             if (tl != null && (tl.parent != t || tl.hash > t.hash))
3297                 return false;
3298             if (tr != null && (tr.parent != t || tr.hash < t.hash))
3299                 return false;
3300             if (t.red && tl != null && tl.red && tr != null && tr.red)
3301                 return false;
3302             if (tl != null && !checkInvariants(tl))
3303                 return false;
3304             if (tr != null && !checkInvariants(tr))
3305                 return false;
3306             return true;
3307         }
3308 
3309         private static final Unsafe U = Unsafe.getUnsafe();
3310         private static final long LOCKSTATE;
3311         static {
3312             try {
3313                 LOCKSTATE = U.objectFieldOffset
3314                     (TreeBin.class.getDeclaredField("lockState"));
3315             } catch (ReflectiveOperationException e) {
3316                 throw new Error(e);
3317             }
3318         }
3319     }
3320 
3321     /* ----------------Table Traversal -------------- */
3322 
3323     /**
3324      * Records the table, its length, and current traversal index for a
3325      * traverser that must process a region of a forwarded table before
3326      * proceeding with current table.
3327      */
3328     static final class TableStack<K,V> {
3329         int length;
3330         int index;
3331         Node<K,V>[] tab;
3332         TableStack<K,V> next;
3333     }
3334 
3335     /**
3336      * Encapsulates traversal for methods such as containsValue; also
3337      * serves as a base class for other iterators and spliterators.
3338      *
3339      * Method advance visits once each still-valid node that was
3340      * reachable upon iterator construction. It might miss some that
3341      * were added to a bin after the bin was visited, which is OK wrt
3342      * consistency guarantees. Maintaining this property in the face
3343      * of possible ongoing resizes requires a fair amount of
3344      * bookkeeping state that is difficult to optimize away amidst
3345      * volatile accesses.  Even so, traversal maintains reasonable
3346      * throughput.
3347      *
3348      * Normally, iteration proceeds bin-by-bin traversing lists.
3349      * However, if the table has been resized, then all future steps
3350      * must traverse both the bin at the current index as well as at
3351      * (index + baseSize); and so on for further resizings. To
3352      * paranoically cope with potential sharing by users of iterators
3353      * across threads, iteration terminates if a bounds checks fails
3354      * for a table read.
3355      */
3356     static class Traverser<K,V> {
3357         Node<K,V>[] tab;        // current table; updated if resized
3358         Node<K,V> next;         // the next entry to use
3359         TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes
3360         int index;              // index of bin to use next
3361         int baseIndex;          // current index of initial table
3362         int baseLimit;          // index bound for initial table
3363         final int baseSize;     // initial table size
3364 
3365         Traverser(Node<K,V>[] tab, int size, int index, int limit) {
3366             this.tab = tab;
3367             this.baseSize = size;
3368             this.baseIndex = this.index = index;
3369             this.baseLimit = limit;
3370             this.next = null;
3371         }
3372 
3373         /**
3374          * Advances if possible, returning next valid node, or null if none.
3375          */
3376         final Node<K,V> advance() {
3377             Node<K,V> e;
3378             if ((e = next) != null)
3379                 e = e.next;
3380             for (;;) {
3381                 Node<K,V>[] t; int i, n;  // must use locals in checks
3382                 if (e != null)
3383                     return next = e;
3384                 if (baseIndex >= baseLimit || (t = tab) == null ||
3385                     (n = t.length) <= (i = index) || i < 0)
3386                     return next = null;
3387                 if ((e = tabAt(t, i)) != null && e.hash < 0) {
3388                     if (e instanceof ForwardingNode) {
3389                         tab = ((ForwardingNode<K,V>)e).nextTable;
3390                         e = null;
3391                         pushState(t, i, n);
3392                         continue;
3393                     }
3394                     else if (e instanceof TreeBin)
3395                         e = ((TreeBin<K,V>)e).first;
3396                     else
3397                         e = null;
3398                 }
3399                 if (stack != null)
3400                     recoverState(n);
3401                 else if ((index = i + baseSize) >= n)
3402                     index = ++baseIndex; // visit upper slots if present
3403             }
3404         }
3405 
3406         /**
3407          * Saves traversal state upon encountering a forwarding node.
3408          */
3409         private void pushState(Node<K,V>[] t, int i, int n) {
3410             TableStack<K,V> s = spare;  // reuse if possible
3411             if (s != null)
3412                 spare = s.next;
3413             else
3414                 s = new TableStack<K,V>();
3415             s.tab = t;
3416             s.length = n;
3417             s.index = i;
3418             s.next = stack;
3419             stack = s;
3420         }
3421 
3422         /**
3423          * Possibly pops traversal state.
3424          *
3425          * @param n length of current table
3426          */
3427         private void recoverState(int n) {
3428             TableStack<K,V> s; int len;
3429             while ((s = stack) != null && (index += (len = s.length)) >= n) {
3430                 n = len;
3431                 index = s.index;
3432                 tab = s.tab;
3433                 s.tab = null;
3434                 TableStack<K,V> next = s.next;
3435                 s.next = spare; // save for reuse
3436                 stack = next;
3437                 spare = s;
3438             }
3439             if (s == null && (index += baseSize) >= n)
3440                 index = ++baseIndex;
3441         }
3442     }
3443 
3444     /**
3445      * Base of key, value, and entry Iterators. Adds fields to
3446      * Traverser to support iterator.remove.
3447      */
3448     static class BaseIterator<K,V> extends Traverser<K,V> {
3449         final ConcurrentHashMap<K,V> map;
3450         Node<K,V> lastReturned;
3451         BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
3452                     ConcurrentHashMap<K,V> map) {
3453             super(tab, size, index, limit);
3454             this.map = map;
3455             advance();
3456         }
3457 
3458         public final boolean hasNext() { return next != null; }
3459         public final boolean hasMoreElements() { return next != null; }
3460 
3461         public final void remove() {
3462             Node<K,V> p;
3463             if ((p = lastReturned) == null)
3464                 throw new IllegalStateException();
3465             lastReturned = null;
3466             map.replaceNode(p.key, null, null);
3467         }
3468     }
3469 
3470     static final class KeyIterator<K,V> extends BaseIterator<K,V>
3471         implements Iterator<K>, Enumeration<K> {
3472         KeyIterator(Node<K,V>[] tab, int size, int index, int limit,
3473                     ConcurrentHashMap<K,V> map) {
3474             super(tab, size, index, limit, map);
3475         }
3476 
3477         public final K next() {
3478             Node<K,V> p;
3479             if ((p = next) == null)
3480                 throw new NoSuchElementException();
3481             K k = p.key;
3482             lastReturned = p;
3483             advance();
3484             return k;
3485         }
3486 
3487         public final K nextElement() { return next(); }
3488     }
3489 
3490     static final class ValueIterator<K,V> extends BaseIterator<K,V>
3491         implements Iterator<V>, Enumeration<V> {
3492         ValueIterator(Node<K,V>[] tab, int size, int index, int limit,
3493                       ConcurrentHashMap<K,V> map) {
3494             super(tab, size, index, limit, map);
3495         }
3496 
3497         public final V next() {
3498             Node<K,V> p;
3499             if ((p = next) == null)
3500                 throw new NoSuchElementException();
3501             V v = p.val;
3502             lastReturned = p;
3503             advance();
3504             return v;
3505         }
3506 
3507         public final V nextElement() { return next(); }
3508     }
3509 
3510     static final class EntryIterator<K,V> extends BaseIterator<K,V>
3511         implements Iterator<Map.Entry<K,V>> {
3512         EntryIterator(Node<K,V>[] tab, int size, int index, int limit,
3513                       ConcurrentHashMap<K,V> map) {
3514             super(tab, size, index, limit, map);
3515         }
3516 
3517         public final Map.Entry<K,V> next() {
3518             Node<K,V> p;
3519             if ((p = next) == null)
3520                 throw new NoSuchElementException();
3521             K k = p.key;
3522             V v = p.val;
3523             lastReturned = p;
3524             advance();
3525             return new MapEntry<K,V>(k, v, map);
3526         }
3527     }
3528 
3529     /**
3530      * Exported Entry for EntryIterator.
3531      */
3532     static final class MapEntry<K,V> implements Map.Entry<K,V> {
3533         final K key; // non-null
3534         V val;       // non-null
3535         final ConcurrentHashMap<K,V> map;
3536         MapEntry(K key, V val, ConcurrentHashMap<K,V> map) {
3537             this.key = key;
3538             this.val = val;
3539             this.map = map;
3540         }
3541         public K getKey()        { return key; }
3542         public V getValue()      { return val; }
3543         public int hashCode()    { return key.hashCode() ^ val.hashCode(); }
3544         public String toString() {
3545             return Helpers.mapEntryToString(key, val);
3546         }
3547 
3548         public boolean equals(Object o) {
3549             Object k, v; Map.Entry<?,?> e;
3550             return ((o instanceof Map.Entry) &&
3551                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3552                     (v = e.getValue()) != null &&
3553                     (k == key || k.equals(key)) &&
3554                     (v == val || v.equals(val)));
3555         }
3556 
3557         /**
3558          * Sets our entry's value and writes through to the map. The
3559          * value to return is somewhat arbitrary here. Since we do not
3560          * necessarily track asynchronous changes, the most recent
3561          * "previous" value could be different from what we return (or
3562          * could even have been removed, in which case the put will
3563          * re-establish). We do not and cannot guarantee more.
3564          */
3565         public V setValue(V value) {
3566             if (value == null) throw new NullPointerException();
3567             V v = val;
3568             val = value;
3569             map.put(key, value);
3570             return v;
3571         }
3572     }
3573 
3574     static final class KeySpliterator<K,V> extends Traverser<K,V>
3575         implements Spliterator<K> {
3576         long est;               // size estimate
3577         KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3578                        long est) {
3579             super(tab, size, index, limit);
3580             this.est = est;
3581         }
3582 
3583         public KeySpliterator<K,V> trySplit() {
3584             int i, f, h;
3585             return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3586                 new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
3587                                         f, est >>>= 1);
3588         }
3589 
3590         public void forEachRemaining(Consumer<? super K> action) {
3591             if (action == null) throw new NullPointerException();
3592             for (Node<K,V> p; (p = advance()) != null;)
3593                 action.accept(p.key);
3594         }
3595 
3596         public boolean tryAdvance(Consumer<? super K> action) {
3597             if (action == null) throw new NullPointerException();
3598             Node<K,V> p;
3599             if ((p = advance()) == null)
3600                 return false;
3601             action.accept(p.key);
3602             return true;
3603         }
3604 
3605         public long estimateSize() { return est; }
3606 
3607         public int characteristics() {
3608             return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3609                 Spliterator.NONNULL;
3610         }
3611     }
3612 
3613     static final class ValueSpliterator<K,V> extends Traverser<K,V>
3614         implements Spliterator<V> {
3615         long est;               // size estimate
3616         ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
3617                          long est) {
3618             super(tab, size, index, limit);
3619             this.est = est;
3620         }
3621 
3622         public ValueSpliterator<K,V> trySplit() {
3623             int i, f, h;
3624             return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3625                 new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
3626                                           f, est >>>= 1);
3627         }
3628 
3629         public void forEachRemaining(Consumer<? super V> action) {
3630             if (action == null) throw new NullPointerException();
3631             for (Node<K,V> p; (p = advance()) != null;)
3632                 action.accept(p.val);
3633         }
3634 
3635         public boolean tryAdvance(Consumer<? super V> action) {
3636             if (action == null) throw new NullPointerException();
3637             Node<K,V> p;
3638             if ((p = advance()) == null)
3639                 return false;
3640             action.accept(p.val);
3641             return true;
3642         }
3643 
3644         public long estimateSize() { return est; }
3645 
3646         public int characteristics() {
3647             return Spliterator.CONCURRENT | Spliterator.NONNULL;
3648         }
3649     }
3650 
3651     static final class EntrySpliterator<K,V> extends Traverser<K,V>
3652         implements Spliterator<Map.Entry<K,V>> {
3653         final ConcurrentHashMap<K,V> map; // To export MapEntry
3654         long est;               // size estimate
3655         EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3656                          long est, ConcurrentHashMap<K,V> map) {
3657             super(tab, size, index, limit);
3658             this.map = map;
3659             this.est = est;
3660         }
3661 
3662         public EntrySpliterator<K,V> trySplit() {
3663             int i, f, h;
3664             return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3665                 new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
3666                                           f, est >>>= 1, map);
3667         }
3668 
3669         public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
3670             if (action == null) throw new NullPointerException();
3671             for (Node<K,V> p; (p = advance()) != null; )
3672                 action.accept(new MapEntry<K,V>(p.key, p.val, map));
3673         }
3674 
3675         public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
3676             if (action == null) throw new NullPointerException();
3677             Node<K,V> p;
3678             if ((p = advance()) == null)
3679                 return false;
3680             action.accept(new MapEntry<K,V>(p.key, p.val, map));
3681             return true;
3682         }
3683 
3684         public long estimateSize() { return est; }
3685 
3686         public int characteristics() {
3687             return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3688                 Spliterator.NONNULL;
3689         }
3690     }
3691 
3692     // Parallel bulk operations
3693 
3694     /**
3695      * Computes initial batch value for bulk tasks. The returned value
3696      * is approximately exp2 of the number of times (minus one) to
3697      * split task by two before executing leaf action. This value is
3698      * faster to compute and more convenient to use as a guide to
3699      * splitting than is the depth, since it is used while dividing by
3700      * two anyway.
3701      */
3702     final int batchFor(long b) {
3703         long n;
3704         if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
3705             return 0;
3706         int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
3707         return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
3708     }
3709 
3710     /**
3711      * Performs the given action for each (key, value).
3712      *
3713      * @param parallelismThreshold the (estimated) number of elements
3714      * needed for this operation to be executed in parallel
3715      * @param action the action
3716      * @since 1.8
3717      */
3718     public void forEach(long parallelismThreshold,
3719                         BiConsumer<? super K,? super V> action) {
3720         if (action == null) throw new NullPointerException();
3721         new ForEachMappingTask<K,V>
3722             (null, batchFor(parallelismThreshold), 0, 0, table,
3723              action).invoke();
3724     }
3725 
3726     /**
3727      * Performs the given action for each non-null transformation
3728      * of each (key, value).
3729      *
3730      * @param parallelismThreshold the (estimated) number of elements
3731      * needed for this operation to be executed in parallel
3732      * @param transformer a function returning the transformation
3733      * for an element, or null if there is no transformation (in
3734      * which case the action is not applied)
3735      * @param action the action
3736      * @param <U> the return type of the transformer
3737      * @since 1.8
3738      */
3739     public <U> void forEach(long parallelismThreshold,
3740                             BiFunction<? super K, ? super V, ? extends U> transformer,
3741                             Consumer<? super U> action) {
3742         if (transformer == null || action == null)
3743             throw new NullPointerException();
3744         new ForEachTransformedMappingTask<K,V,U>
3745             (null, batchFor(parallelismThreshold), 0, 0, table,
3746              transformer, action).invoke();
3747     }
3748 
3749     /**
3750      * Returns a non-null result from applying the given search
3751      * function on each (key, value), or null if none.  Upon
3752      * success, further element processing is suppressed and the
3753      * results of any other parallel invocations of the search
3754      * function are ignored.
3755      *
3756      * @param parallelismThreshold the (estimated) number of elements
3757      * needed for this operation to be executed in parallel
3758      * @param searchFunction a function returning a non-null
3759      * result on success, else null
3760      * @param <U> the return type of the search function
3761      * @return a non-null result from applying the given search
3762      * function on each (key, value), or null if none
3763      * @since 1.8
3764      */
3765     public <U> U search(long parallelismThreshold,
3766                         BiFunction<? super K, ? super V, ? extends U> searchFunction) {
3767         if (searchFunction == null) throw new NullPointerException();
3768         return new SearchMappingsTask<K,V,U>
3769             (null, batchFor(parallelismThreshold), 0, 0, table,
3770              searchFunction, new AtomicReference<U>()).invoke();
3771     }
3772 
3773     /**
3774      * Returns the result of accumulating the given transformation
3775      * of all (key, value) pairs using the given reducer to
3776      * combine values, or null if none.
3777      *
3778      * @param parallelismThreshold the (estimated) number of elements
3779      * needed for this operation to be executed in parallel
3780      * @param transformer a function returning the transformation
3781      * for an element, or null if there is no transformation (in
3782      * which case it is not combined)
3783      * @param reducer a commutative associative combining function
3784      * @param <U> the return type of the transformer
3785      * @return the result of accumulating the given transformation
3786      * of all (key, value) pairs
3787      * @since 1.8
3788      */
3789     public <U> U reduce(long parallelismThreshold,
3790                         BiFunction<? super K, ? super V, ? extends U> transformer,
3791                         BiFunction<? super U, ? super U, ? extends U> reducer) {
3792         if (transformer == null || reducer == null)
3793             throw new NullPointerException();
3794         return new MapReduceMappingsTask<K,V,U>
3795             (null, batchFor(parallelismThreshold), 0, 0, table,
3796              null, transformer, reducer).invoke();
3797     }
3798 
3799     /**
3800      * Returns the result of accumulating the given transformation
3801      * of all (key, value) pairs using the given reducer to
3802      * combine values, and the given basis as an identity value.
3803      *
3804      * @param parallelismThreshold the (estimated) number of elements
3805      * needed for this operation to be executed in parallel
3806      * @param transformer a function returning the transformation
3807      * for an element
3808      * @param basis the identity (initial default value) for the reduction
3809      * @param reducer a commutative associative combining function
3810      * @return the result of accumulating the given transformation
3811      * of all (key, value) pairs
3812      * @since 1.8
3813      */
3814     public double reduceToDouble(long parallelismThreshold,
3815                                  ToDoubleBiFunction<? super K, ? super V> transformer,
3816                                  double basis,
3817                                  DoubleBinaryOperator reducer) {
3818         if (transformer == null || reducer == null)
3819             throw new NullPointerException();
3820         return new MapReduceMappingsToDoubleTask<K,V>
3821             (null, batchFor(parallelismThreshold), 0, 0, table,
3822              null, transformer, basis, reducer).invoke();
3823     }
3824 
3825     /**
3826      * Returns the result of accumulating the given transformation
3827      * of all (key, value) pairs using the given reducer to
3828      * combine values, and the given basis as an identity value.
3829      *
3830      * @param parallelismThreshold the (estimated) number of elements
3831      * needed for this operation to be executed in parallel
3832      * @param transformer a function returning the transformation
3833      * for an element
3834      * @param basis the identity (initial default value) for the reduction
3835      * @param reducer a commutative associative combining function
3836      * @return the result of accumulating the given transformation
3837      * of all (key, value) pairs
3838      * @since 1.8
3839      */
3840     public long reduceToLong(long parallelismThreshold,
3841                              ToLongBiFunction<? super K, ? super V> transformer,
3842                              long basis,
3843                              LongBinaryOperator reducer) {
3844         if (transformer == null || reducer == null)
3845             throw new NullPointerException();
3846         return new MapReduceMappingsToLongTask<K,V>
3847             (null, batchFor(parallelismThreshold), 0, 0, table,
3848              null, transformer, basis, reducer).invoke();
3849     }
3850 
3851     /**
3852      * Returns the result of accumulating the given transformation
3853      * of all (key, value) pairs using the given reducer to
3854      * combine values, and the given basis as an identity value.
3855      *
3856      * @param parallelismThreshold the (estimated) number of elements
3857      * needed for this operation to be executed in parallel
3858      * @param transformer a function returning the transformation
3859      * for an element
3860      * @param basis the identity (initial default value) for the reduction
3861      * @param reducer a commutative associative combining function
3862      * @return the result of accumulating the given transformation
3863      * of all (key, value) pairs
3864      * @since 1.8
3865      */
3866     public int reduceToInt(long parallelismThreshold,
3867                            ToIntBiFunction<? super K, ? super V> transformer,
3868                            int basis,
3869                            IntBinaryOperator reducer) {
3870         if (transformer == null || reducer == null)
3871             throw new NullPointerException();
3872         return new MapReduceMappingsToIntTask<K,V>
3873             (null, batchFor(parallelismThreshold), 0, 0, table,
3874              null, transformer, basis, reducer).invoke();
3875     }
3876 
3877     /**
3878      * Performs the given action for each key.
3879      *
3880      * @param parallelismThreshold the (estimated) number of elements
3881      * needed for this operation to be executed in parallel
3882      * @param action the action
3883      * @since 1.8
3884      */
3885     public void forEachKey(long parallelismThreshold,
3886                            Consumer<? super K> action) {
3887         if (action == null) throw new NullPointerException();
3888         new ForEachKeyTask<K,V>
3889             (null, batchFor(parallelismThreshold), 0, 0, table,
3890              action).invoke();
3891     }
3892 
3893     /**
3894      * Performs the given action for each non-null transformation
3895      * of each key.
3896      *
3897      * @param parallelismThreshold the (estimated) number of elements
3898      * needed for this operation to be executed in parallel
3899      * @param transformer a function returning the transformation
3900      * for an element, or null if there is no transformation (in
3901      * which case the action is not applied)
3902      * @param action the action
3903      * @param <U> the return type of the transformer
3904      * @since 1.8
3905      */
3906     public <U> void forEachKey(long parallelismThreshold,
3907                                Function<? super K, ? extends U> transformer,
3908                                Consumer<? super U> action) {
3909         if (transformer == null || action == null)
3910             throw new NullPointerException();
3911         new ForEachTransformedKeyTask<K,V,U>
3912             (null, batchFor(parallelismThreshold), 0, 0, table,
3913              transformer, action).invoke();
3914     }
3915 
3916     /**
3917      * Returns a non-null result from applying the given search
3918      * function on each key, or null if none. Upon success,
3919      * further element processing is suppressed and the results of
3920      * any other parallel invocations of the search function are
3921      * ignored.
3922      *
3923      * @param parallelismThreshold the (estimated) number of elements
3924      * needed for this operation to be executed in parallel
3925      * @param searchFunction a function returning a non-null
3926      * result on success, else null
3927      * @param <U> the return type of the search function
3928      * @return a non-null result from applying the given search
3929      * function on each key, or null if none
3930      * @since 1.8
3931      */
3932     public <U> U searchKeys(long parallelismThreshold,
3933                             Function<? super K, ? extends U> searchFunction) {
3934         if (searchFunction == null) throw new NullPointerException();
3935         return new SearchKeysTask<K,V,U>
3936             (null, batchFor(parallelismThreshold), 0, 0, table,
3937              searchFunction, new AtomicReference<U>()).invoke();
3938     }
3939 
3940     /**
3941      * Returns the result of accumulating all keys using the given
3942      * reducer to combine values, or null if none.
3943      *
3944      * @param parallelismThreshold the (estimated) number of elements
3945      * needed for this operation to be executed in parallel
3946      * @param reducer a commutative associative combining function
3947      * @return the result of accumulating all keys using the given
3948      * reducer to combine values, or null if none
3949      * @since 1.8
3950      */
3951     public K reduceKeys(long parallelismThreshold,
3952                         BiFunction<? super K, ? super K, ? extends K> reducer) {
3953         if (reducer == null) throw new NullPointerException();
3954         return new ReduceKeysTask<K,V>
3955             (null, batchFor(parallelismThreshold), 0, 0, table,
3956              null, reducer).invoke();
3957     }
3958 
3959     /**
3960      * Returns the result of accumulating the given transformation
3961      * of all keys using the given reducer to combine values, or
3962      * null if none.
3963      *
3964      * @param parallelismThreshold the (estimated) number of elements
3965      * needed for this operation to be executed in parallel
3966      * @param transformer a function returning the transformation
3967      * for an element, or null if there is no transformation (in
3968      * which case it is not combined)
3969      * @param reducer a commutative associative combining function
3970      * @param <U> the return type of the transformer
3971      * @return the result of accumulating the given transformation
3972      * of all keys
3973      * @since 1.8
3974      */
3975     public <U> U reduceKeys(long parallelismThreshold,
3976                             Function<? super K, ? extends U> transformer,
3977          BiFunction<? super U, ? super U, ? extends U> reducer) {
3978         if (transformer == null || reducer == null)
3979             throw new NullPointerException();
3980         return new MapReduceKeysTask<K,V,U>
3981             (null, batchFor(parallelismThreshold), 0, 0, table,
3982              null, transformer, reducer).invoke();
3983     }
3984 
3985     /**
3986      * Returns the result of accumulating the given transformation
3987      * of all keys using the given reducer to combine values, and
3988      * the given basis as an identity value.
3989      *
3990      * @param parallelismThreshold the (estimated) number of elements
3991      * needed for this operation to be executed in parallel
3992      * @param transformer a function returning the transformation
3993      * for an element
3994      * @param basis the identity (initial default value) for the reduction
3995      * @param reducer a commutative associative combining function
3996      * @return the result of accumulating the given transformation
3997      * of all keys
3998      * @since 1.8
3999      */
4000     public double reduceKeysToDouble(long parallelismThreshold,
4001                                      ToDoubleFunction<? super K> transformer,
4002                                      double basis,
4003                                      DoubleBinaryOperator reducer) {
4004         if (transformer == null || reducer == null)
4005             throw new NullPointerException();
4006         return new MapReduceKeysToDoubleTask<K,V>
4007             (null, batchFor(parallelismThreshold), 0, 0, table,
4008              null, transformer, basis, reducer).invoke();
4009     }
4010 
4011     /**
4012      * Returns the result of accumulating the given transformation
4013      * of all keys using the given reducer to combine values, and
4014      * the given basis as an identity value.
4015      *
4016      * @param parallelismThreshold the (estimated) number of elements
4017      * needed for this operation to be executed in parallel
4018      * @param transformer a function returning the transformation
4019      * for an element
4020      * @param basis the identity (initial default value) for the reduction
4021      * @param reducer a commutative associative combining function
4022      * @return the result of accumulating the given transformation
4023      * of all keys
4024      * @since 1.8
4025      */
4026     public long reduceKeysToLong(long parallelismThreshold,
4027                                  ToLongFunction<? super K> transformer,
4028                                  long basis,
4029                                  LongBinaryOperator reducer) {
4030         if (transformer == null || reducer == null)
4031             throw new NullPointerException();
4032         return new MapReduceKeysToLongTask<K,V>
4033             (null, batchFor(parallelismThreshold), 0, 0, table,
4034              null, transformer, basis, reducer).invoke();
4035     }
4036 
4037     /**
4038      * Returns the result of accumulating the given transformation
4039      * of all keys using the given reducer to combine values, and
4040      * the given basis as an identity value.
4041      *
4042      * @param parallelismThreshold the (estimated) number of elements
4043      * needed for this operation to be executed in parallel
4044      * @param transformer a function returning the transformation
4045      * for an element
4046      * @param basis the identity (initial default value) for the reduction
4047      * @param reducer a commutative associative combining function
4048      * @return the result of accumulating the given transformation
4049      * of all keys
4050      * @since 1.8
4051      */
4052     public int reduceKeysToInt(long parallelismThreshold,
4053                                ToIntFunction<? super K> transformer,
4054                                int basis,
4055                                IntBinaryOperator reducer) {
4056         if (transformer == null || reducer == null)
4057             throw new NullPointerException();
4058         return new MapReduceKeysToIntTask<K,V>
4059             (null, batchFor(parallelismThreshold), 0, 0, table,
4060              null, transformer, basis, reducer).invoke();
4061     }
4062 
4063     /**
4064      * Performs the given action for each value.
4065      *
4066      * @param parallelismThreshold the (estimated) number of elements
4067      * needed for this operation to be executed in parallel
4068      * @param action the action
4069      * @since 1.8
4070      */
4071     public void forEachValue(long parallelismThreshold,
4072                              Consumer<? super V> action) {
4073         if (action == null)
4074             throw new NullPointerException();
4075         new ForEachValueTask<K,V>
4076             (null, batchFor(parallelismThreshold), 0, 0, table,
4077              action).invoke();
4078     }
4079 
4080     /**
4081      * Performs the given action for each non-null transformation
4082      * of each value.
4083      *
4084      * @param parallelismThreshold the (estimated) number of elements
4085      * needed for this operation to be executed in parallel
4086      * @param transformer a function returning the transformation
4087      * for an element, or null if there is no transformation (in
4088      * which case the action is not applied)
4089      * @param action the action
4090      * @param <U> the return type of the transformer
4091      * @since 1.8
4092      */
4093     public <U> void forEachValue(long parallelismThreshold,
4094                                  Function<? super V, ? extends U> transformer,
4095                                  Consumer<? super U> action) {
4096         if (transformer == null || action == null)
4097             throw new NullPointerException();
4098         new ForEachTransformedValueTask<K,V,U>
4099             (null, batchFor(parallelismThreshold), 0, 0, table,
4100              transformer, action).invoke();
4101     }
4102 
4103     /**
4104      * Returns a non-null result from applying the given search
4105      * function on each value, or null if none.  Upon success,
4106      * further element processing is suppressed and the results of
4107      * any other parallel invocations of the search function are
4108      * ignored.
4109      *
4110      * @param parallelismThreshold the (estimated) number of elements
4111      * needed for this operation to be executed in parallel
4112      * @param searchFunction a function returning a non-null
4113      * result on success, else null
4114      * @param <U> the return type of the search function
4115      * @return a non-null result from applying the given search
4116      * function on each value, or null if none
4117      * @since 1.8
4118      */
4119     public <U> U searchValues(long parallelismThreshold,
4120                               Function<? super V, ? extends U> searchFunction) {
4121         if (searchFunction == null) throw new NullPointerException();
4122         return new SearchValuesTask<K,V,U>
4123             (null, batchFor(parallelismThreshold), 0, 0, table,
4124              searchFunction, new AtomicReference<U>()).invoke();
4125     }
4126 
4127     /**
4128      * Returns the result of accumulating all values using the
4129      * given reducer to combine values, or null if none.
4130      *
4131      * @param parallelismThreshold the (estimated) number of elements
4132      * needed for this operation to be executed in parallel
4133      * @param reducer a commutative associative combining function
4134      * @return the result of accumulating all values
4135      * @since 1.8
4136      */
4137     public V reduceValues(long parallelismThreshold,
4138                           BiFunction<? super V, ? super V, ? extends V> reducer) {
4139         if (reducer == null) throw new NullPointerException();
4140         return new ReduceValuesTask<K,V>
4141             (null, batchFor(parallelismThreshold), 0, 0, table,
4142              null, reducer).invoke();
4143     }
4144 
4145     /**
4146      * Returns the result of accumulating the given transformation
4147      * of all values using the given reducer to combine values, or
4148      * null if none.
4149      *
4150      * @param parallelismThreshold the (estimated) number of elements
4151      * needed for this operation to be executed in parallel
4152      * @param transformer a function returning the transformation
4153      * for an element, or null if there is no transformation (in
4154      * which case it is not combined)
4155      * @param reducer a commutative associative combining function
4156      * @param <U> the return type of the transformer
4157      * @return the result of accumulating the given transformation
4158      * of all values
4159      * @since 1.8
4160      */
4161     public <U> U reduceValues(long parallelismThreshold,
4162                               Function<? super V, ? extends U> transformer,
4163                               BiFunction<? super U, ? super U, ? extends U> reducer) {
4164         if (transformer == null || reducer == null)
4165             throw new NullPointerException();
4166         return new MapReduceValuesTask<K,V,U>
4167             (null, batchFor(parallelismThreshold), 0, 0, table,
4168              null, transformer, reducer).invoke();
4169     }
4170 
4171     /**
4172      * Returns the result of accumulating the given transformation
4173      * of all values using the given reducer to combine values,
4174      * and the given basis as an identity value.
4175      *
4176      * @param parallelismThreshold the (estimated) number of elements
4177      * needed for this operation to be executed in parallel
4178      * @param transformer a function returning the transformation
4179      * for an element
4180      * @param basis the identity (initial default value) for the reduction
4181      * @param reducer a commutative associative combining function
4182      * @return the result of accumulating the given transformation
4183      * of all values
4184      * @since 1.8
4185      */
4186     public double reduceValuesToDouble(long parallelismThreshold,
4187                                        ToDoubleFunction<? super V> transformer,
4188                                        double basis,
4189                                        DoubleBinaryOperator reducer) {
4190         if (transformer == null || reducer == null)
4191             throw new NullPointerException();
4192         return new MapReduceValuesToDoubleTask<K,V>
4193             (null, batchFor(parallelismThreshold), 0, 0, table,
4194              null, transformer, basis, reducer).invoke();
4195     }
4196 
4197     /**
4198      * Returns the result of accumulating the given transformation
4199      * of all values using the given reducer to combine values,
4200      * and the given basis as an identity value.
4201      *
4202      * @param parallelismThreshold the (estimated) number of elements
4203      * needed for this operation to be executed in parallel
4204      * @param transformer a function returning the transformation
4205      * for an element
4206      * @param basis the identity (initial default value) for the reduction
4207      * @param reducer a commutative associative combining function
4208      * @return the result of accumulating the given transformation
4209      * of all values
4210      * @since 1.8
4211      */
4212     public long reduceValuesToLong(long parallelismThreshold,
4213                                    ToLongFunction<? super V> transformer,
4214                                    long basis,
4215                                    LongBinaryOperator reducer) {
4216         if (transformer == null || reducer == null)
4217             throw new NullPointerException();
4218         return new MapReduceValuesToLongTask<K,V>
4219             (null, batchFor(parallelismThreshold), 0, 0, table,
4220              null, transformer, basis, reducer).invoke();
4221     }
4222 
4223     /**
4224      * Returns the result of accumulating the given transformation
4225      * of all values using the given reducer to combine values,
4226      * and the given basis as an identity value.
4227      *
4228      * @param parallelismThreshold the (estimated) number of elements
4229      * needed for this operation to be executed in parallel
4230      * @param transformer a function returning the transformation
4231      * for an element
4232      * @param basis the identity (initial default value) for the reduction
4233      * @param reducer a commutative associative combining function
4234      * @return the result of accumulating the given transformation
4235      * of all values
4236      * @since 1.8
4237      */
4238     public int reduceValuesToInt(long parallelismThreshold,
4239                                  ToIntFunction<? super V> transformer,
4240                                  int basis,
4241                                  IntBinaryOperator reducer) {
4242         if (transformer == null || reducer == null)
4243             throw new NullPointerException();
4244         return new MapReduceValuesToIntTask<K,V>
4245             (null, batchFor(parallelismThreshold), 0, 0, table,
4246              null, transformer, basis, reducer).invoke();
4247     }
4248 
4249     /**
4250      * Performs the given action for each entry.
4251      *
4252      * @param parallelismThreshold the (estimated) number of elements
4253      * needed for this operation to be executed in parallel
4254      * @param action the action
4255      * @since 1.8
4256      */
4257     public void forEachEntry(long parallelismThreshold,
4258                              Consumer<? super Map.Entry<K,V>> action) {
4259         if (action == null) throw new NullPointerException();
4260         new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
4261                                   action).invoke();
4262     }
4263 
4264     /**
4265      * Performs the given action for each non-null transformation
4266      * of each entry.
4267      *
4268      * @param parallelismThreshold the (estimated) number of elements
4269      * needed for this operation to be executed in parallel
4270      * @param transformer a function returning the transformation
4271      * for an element, or null if there is no transformation (in
4272      * which case the action is not applied)
4273      * @param action the action
4274      * @param <U> the return type of the transformer
4275      * @since 1.8
4276      */
4277     public <U> void forEachEntry(long parallelismThreshold,
4278                                  Function<Map.Entry<K,V>, ? extends U> transformer,
4279                                  Consumer<? super U> action) {
4280         if (transformer == null || action == null)
4281             throw new NullPointerException();
4282         new ForEachTransformedEntryTask<K,V,U>
4283             (null, batchFor(parallelismThreshold), 0, 0, table,
4284              transformer, action).invoke();
4285     }
4286 
4287     /**
4288      * Returns a non-null result from applying the given search
4289      * function on each entry, or null if none.  Upon success,
4290      * further element processing is suppressed and the results of
4291      * any other parallel invocations of the search function are
4292      * ignored.
4293      *
4294      * @param parallelismThreshold the (estimated) number of elements
4295      * needed for this operation to be executed in parallel
4296      * @param searchFunction a function returning a non-null
4297      * result on success, else null
4298      * @param <U> the return type of the search function
4299      * @return a non-null result from applying the given search
4300      * function on each entry, or null if none
4301      * @since 1.8
4302      */
4303     public <U> U searchEntries(long parallelismThreshold,
4304                                Function<Map.Entry<K,V>, ? extends U> searchFunction) {
4305         if (searchFunction == null) throw new NullPointerException();
4306         return new SearchEntriesTask<K,V,U>
4307             (null, batchFor(parallelismThreshold), 0, 0, table,
4308              searchFunction, new AtomicReference<U>()).invoke();
4309     }
4310 
4311     /**
4312      * Returns the result of accumulating all entries using the
4313      * given reducer to combine values, or null if none.
4314      *
4315      * @param parallelismThreshold the (estimated) number of elements
4316      * needed for this operation to be executed in parallel
4317      * @param reducer a commutative associative combining function
4318      * @return the result of accumulating all entries
4319      * @since 1.8
4320      */
4321     public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
4322                                         BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4323         if (reducer == null) throw new NullPointerException();
4324         return new ReduceEntriesTask<K,V>
4325             (null, batchFor(parallelismThreshold), 0, 0, table,
4326              null, reducer).invoke();
4327     }
4328 
4329     /**
4330      * Returns the result of accumulating the given transformation
4331      * of all entries using the given reducer to combine values,
4332      * or null if none.
4333      *
4334      * @param parallelismThreshold the (estimated) number of elements
4335      * needed for this operation to be executed in parallel
4336      * @param transformer a function returning the transformation
4337      * for an element, or null if there is no transformation (in
4338      * which case it is not combined)
4339      * @param reducer a commutative associative combining function
4340      * @param <U> the return type of the transformer
4341      * @return the result of accumulating the given transformation
4342      * of all entries
4343      * @since 1.8
4344      */
4345     public <U> U reduceEntries(long parallelismThreshold,
4346                                Function<Map.Entry<K,V>, ? extends U> transformer,
4347                                BiFunction<? super U, ? super U, ? extends U> reducer) {
4348         if (transformer == null || reducer == null)
4349             throw new NullPointerException();
4350         return new MapReduceEntriesTask<K,V,U>
4351             (null, batchFor(parallelismThreshold), 0, 0, table,
4352              null, transformer, reducer).invoke();
4353     }
4354 
4355     /**
4356      * Returns the result of accumulating the given transformation
4357      * of all entries using the given reducer to combine values,
4358      * and the given basis as an identity value.
4359      *
4360      * @param parallelismThreshold the (estimated) number of elements
4361      * needed for this operation to be executed in parallel
4362      * @param transformer a function returning the transformation
4363      * for an element
4364      * @param basis the identity (initial default value) for the reduction
4365      * @param reducer a commutative associative combining function
4366      * @return the result of accumulating the given transformation
4367      * of all entries
4368      * @since 1.8
4369      */
4370     public double reduceEntriesToDouble(long parallelismThreshold,
4371                                         ToDoubleFunction<Map.Entry<K,V>> transformer,
4372                                         double basis,
4373                                         DoubleBinaryOperator reducer) {
4374         if (transformer == null || reducer == null)
4375             throw new NullPointerException();
4376         return new MapReduceEntriesToDoubleTask<K,V>
4377             (null, batchFor(parallelismThreshold), 0, 0, table,
4378              null, transformer, basis, reducer).invoke();
4379     }
4380 
4381     /**
4382      * Returns the result of accumulating the given transformation
4383      * of all entries using the given reducer to combine values,
4384      * and the given basis as an identity value.
4385      *
4386      * @param parallelismThreshold the (estimated) number of elements
4387      * needed for this operation to be executed in parallel
4388      * @param transformer a function returning the transformation
4389      * for an element
4390      * @param basis the identity (initial default value) for the reduction
4391      * @param reducer a commutative associative combining function
4392      * @return the result of accumulating the given transformation
4393      * of all entries
4394      * @since 1.8
4395      */
4396     public long reduceEntriesToLong(long parallelismThreshold,
4397                                     ToLongFunction<Map.Entry<K,V>> transformer,
4398                                     long basis,
4399                                     LongBinaryOperator reducer) {
4400         if (transformer == null || reducer == null)
4401             throw new NullPointerException();
4402         return new MapReduceEntriesToLongTask<K,V>
4403             (null, batchFor(parallelismThreshold), 0, 0, table,
4404              null, transformer, basis, reducer).invoke();
4405     }
4406 
4407     /**
4408      * Returns the result of accumulating the given transformation
4409      * of all entries using the given reducer to combine values,
4410      * and the given basis as an identity value.
4411      *
4412      * @param parallelismThreshold the (estimated) number of elements
4413      * needed for this operation to be executed in parallel
4414      * @param transformer a function returning the transformation
4415      * for an element
4416      * @param basis the identity (initial default value) for the reduction
4417      * @param reducer a commutative associative combining function
4418      * @return the result of accumulating the given transformation
4419      * of all entries
4420      * @since 1.8
4421      */
4422     public int reduceEntriesToInt(long parallelismThreshold,
4423                                   ToIntFunction<Map.Entry<K,V>> transformer,
4424                                   int basis,
4425                                   IntBinaryOperator reducer) {
4426         if (transformer == null || reducer == null)
4427             throw new NullPointerException();
4428         return new MapReduceEntriesToIntTask<K,V>
4429             (null, batchFor(parallelismThreshold), 0, 0, table,
4430              null, transformer, basis, reducer).invoke();
4431     }
4432 
4433 
4434     /* ----------------Views -------------- */
4435 
4436     /**
4437      * Base class for views.
4438      */
4439     abstract static class CollectionView<K,V,E>
4440         implements Collection<E>, java.io.Serializable {
4441         private static final long serialVersionUID = 7249069246763182397L;
4442         final ConcurrentHashMap<K,V> map;
4443         CollectionView(ConcurrentHashMap<K,V> map)  { this.map = map; }
4444 
4445         /**
4446          * Returns the map backing this view.
4447          *
4448          * @return the map backing this view
4449          */
4450         public ConcurrentHashMap<K,V> getMap() { return map; }
4451 
4452         /**
4453          * Removes all of the elements from this view, by removing all
4454          * the mappings from the map backing this view.
4455          */
4456         public final void clear()      { map.clear(); }
4457         public final int size()        { return map.size(); }
4458         public final boolean isEmpty() { return map.isEmpty(); }
4459 
4460         // implementations below rely on concrete classes supplying these
4461         // abstract methods
4462         /**
4463          * Returns an iterator over the elements in this collection.
4464          *
4465          * <p>The returned iterator is
4466          * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
4467          *
4468          * @return an iterator over the elements in this collection
4469          */
4470         public abstract Iterator<E> iterator();
4471         public abstract boolean contains(Object o);
4472         public abstract boolean remove(Object o);
4473 
4474         private static final String OOME_MSG = "Required array size too large";
4475 
4476         public final Object[] toArray() {
4477             long sz = map.mappingCount();
4478             if (sz > MAX_ARRAY_SIZE)
4479                 throw new OutOfMemoryError(OOME_MSG);
4480             int n = (int)sz;
4481             Object[] r = new Object[n];
4482             int i = 0;
4483             for (E e : this) {
4484                 if (i == n) {
4485                     if (n >= MAX_ARRAY_SIZE)
4486                         throw new OutOfMemoryError(OOME_MSG);
4487                     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4488                         n = MAX_ARRAY_SIZE;
4489                     else
4490                         n += (n >>> 1) + 1;
4491                     r = Arrays.copyOf(r, n);
4492                 }
4493                 r[i++] = e;
4494             }
4495             return (i == n) ? r : Arrays.copyOf(r, i);
4496         }
4497 
4498         @SuppressWarnings("unchecked")
4499         public final <T> T[] toArray(T[] a) {
4500             long sz = map.mappingCount();
4501             if (sz > MAX_ARRAY_SIZE)
4502                 throw new OutOfMemoryError(OOME_MSG);
4503             int m = (int)sz;
4504             T[] r = (a.length >= m) ? a :
4505                 (T[])java.lang.reflect.Array
4506                 .newInstance(a.getClass().getComponentType(), m);
4507             int n = r.length;
4508             int i = 0;
4509             for (E e : this) {
4510                 if (i == n) {
4511                     if (n >= MAX_ARRAY_SIZE)
4512                         throw new OutOfMemoryError(OOME_MSG);
4513                     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4514                         n = MAX_ARRAY_SIZE;
4515                     else
4516                         n += (n >>> 1) + 1;
4517                     r = Arrays.copyOf(r, n);
4518                 }
4519                 r[i++] = (T)e;
4520             }
4521             if (a == r && i < n) {
4522                 r[i] = null; // null-terminate
4523                 return r;
4524             }
4525             return (i == n) ? r : Arrays.copyOf(r, i);
4526         }
4527 
4528         /**
4529          * Returns a string representation of this collection.
4530          * The string representation consists of the string representations
4531          * of the collection's elements in the order they are returned by
4532          * its iterator, enclosed in square brackets ({@code "[]"}).
4533          * Adjacent elements are separated by the characters {@code ", "}
4534          * (comma and space).  Elements are converted to strings as by
4535          * {@link String#valueOf(Object)}.
4536          *
4537          * @return a string representation of this collection
4538          */
4539         public final String toString() {
4540             StringBuilder sb = new StringBuilder();
4541             sb.append('[');
4542             Iterator<E> it = iterator();
4543             if (it.hasNext()) {
4544                 for (;;) {
4545                     Object e = it.next();
4546                     sb.append(e == this ? "(this Collection)" : e);
4547                     if (!it.hasNext())
4548                         break;
4549                     sb.append(',').append(' ');
4550                 }
4551             }
4552             return sb.append(']').toString();
4553         }
4554 
4555         public final boolean containsAll(Collection<?> c) {
4556             if (c != this) {
4557                 for (Object e : c) {
4558                     if (e == null || !contains(e))
4559                         return false;
4560                 }
4561             }
4562             return true;
4563         }
4564 
4565         public boolean removeAll(Collection<?> c) {
4566             if (c == null) throw new NullPointerException();
4567             boolean modified = false;
4568             // Use (c instanceof Set) as a hint that lookup in c is as
4569             // efficient as this view
4570             Node<K,V>[] t;
4571             if ((t = map.table) == null) {
4572                 return false;
4573             } else if (c instanceof Set<?> && c.size() > t.length) {
4574                 for (Iterator<?> it = iterator(); it.hasNext(); ) {
4575                     if (c.contains(it.next())) {
4576                         it.remove();
4577                         modified = true;
4578                     }
4579                 }
4580             } else {
4581                 for (Object e : c)
4582                     modified |= remove(e);
4583             }
4584             return modified;
4585         }
4586 
4587         public final boolean retainAll(Collection<?> c) {
4588             if (c == null) throw new NullPointerException();
4589             boolean modified = false;
4590             for (Iterator<E> it = iterator(); it.hasNext();) {
4591                 if (!c.contains(it.next())) {
4592                     it.remove();
4593                     modified = true;
4594                 }
4595             }
4596             return modified;
4597         }
4598 
4599     }
4600 
4601     /**
4602      * A view of a ConcurrentHashMap as a {@link Set} of keys, in
4603      * which additions may optionally be enabled by mapping to a
4604      * common value.  This class cannot be directly instantiated.
4605      * See {@link #keySet() keySet()},
4606      * {@link #keySet(Object) keySet(V)},
4607      * {@link #newKeySet() newKeySet()},
4608      * {@link #newKeySet(int) newKeySet(int)}.
4609      *
4610      * @since 1.8
4611      */
4612     public static class KeySetView<K,V> extends CollectionView<K,V,K>
4613         implements Set<K>, java.io.Serializable {
4614         private static final long serialVersionUID = 7249069246763182397L;
4615         private final V value;
4616         KeySetView(ConcurrentHashMap<K,V> map, V value) {  // non-public
4617             super(map);
4618             this.value = value;
4619         }
4620 
4621         /**
4622          * Returns the default mapped value for additions,
4623          * or {@code null} if additions are not supported.
4624          *
4625          * @return the default mapped value for additions, or {@code null}
4626          * if not supported
4627          */
4628         public V getMappedValue() { return value; }
4629 
4630         /**
4631          * {@inheritDoc}
4632          * @throws NullPointerException if the specified key is null
4633          */
4634         public boolean contains(Object o) { return map.containsKey(o); }
4635 
4636         /**
4637          * Removes the key from this map view, by removing the key (and its
4638          * corresponding value) from the backing map.  This method does
4639          * nothing if the key is not in the map.
4640          *
4641          * @param  o the key to be removed from the backing map
4642          * @return {@code true} if the backing map contained the specified key
4643          * @throws NullPointerException if the specified key is null
4644          */
4645         public boolean remove(Object o) { return map.remove(o) != null; }
4646 
4647         /**
4648          * @return an iterator over the keys of the backing map
4649          */
4650         public Iterator<K> iterator() {
4651             Node<K,V>[] t;
4652             ConcurrentHashMap<K,V> m = map;
4653             int f = (t = m.table) == null ? 0 : t.length;
4654             return new KeyIterator<K,V>(t, f, 0, f, m);
4655         }
4656 
4657         /**
4658          * Adds the specified key to this set view by mapping the key to
4659          * the default mapped value in the backing map, if defined.
4660          *
4661          * @param e key to be added
4662          * @return {@code true} if this set changed as a result of the call
4663          * @throws NullPointerException if the specified key is null
4664          * @throws UnsupportedOperationException if no default mapped value
4665          * for additions was provided
4666          */
4667         public boolean add(K e) {
4668             V v;
4669             if ((v = value) == null)
4670                 throw new UnsupportedOperationException();
4671             return map.putVal(e, v, true) == null;
4672         }
4673 
4674         /**
4675          * Adds all of the elements in the specified collection to this set,
4676          * as if by calling {@link #add} on each one.
4677          *
4678          * @param c the elements to be inserted into this set
4679          * @return {@code true} if this set changed as a result of the call
4680          * @throws NullPointerException if the collection or any of its
4681          * elements are {@code null}
4682          * @throws UnsupportedOperationException if no default mapped value
4683          * for additions was provided
4684          */
4685         public boolean addAll(Collection<? extends K> c) {
4686             boolean added = false;
4687             V v;
4688             if ((v = value) == null)
4689                 throw new UnsupportedOperationException();
4690             for (K e : c) {
4691                 if (map.putVal(e, v, true) == null)
4692                     added = true;
4693             }
4694             return added;
4695         }
4696 
4697         public int hashCode() {
4698             int h = 0;
4699             for (K e : this)
4700                 h += e.hashCode();
4701             return h;
4702         }
4703 
4704         public boolean equals(Object o) {
4705             Set<?> c;
4706             return ((o instanceof Set) &&
4707                     ((c = (Set<?>)o) == this ||
4708                      (containsAll(c) && c.containsAll(this))));
4709         }
4710 
4711         public Spliterator<K> spliterator() {
4712             Node<K,V>[] t;
4713             ConcurrentHashMap<K,V> m = map;
4714             long n = m.sumCount();
4715             int f = (t = m.table) == null ? 0 : t.length;
4716             return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4717         }
4718 
4719         public void forEach(Consumer<? super K> action) {
4720             if (action == null) throw new NullPointerException();
4721             Node<K,V>[] t;
4722             if ((t = map.table) != null) {
4723                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4724                 for (Node<K,V> p; (p = it.advance()) != null; )
4725                     action.accept(p.key);
4726             }
4727         }
4728     }
4729 
4730     /**
4731      * A view of a ConcurrentHashMap as a {@link Collection} of
4732      * values, in which additions are disabled. This class cannot be
4733      * directly instantiated. See {@link #values()}.
4734      */
4735     static final class ValuesView<K,V> extends CollectionView<K,V,V>
4736         implements Collection<V>, java.io.Serializable {
4737         private static final long serialVersionUID = 2249069246763182397L;
4738         ValuesView(ConcurrentHashMap<K,V> map) { super(map); }
4739         public final boolean contains(Object o) {
4740             return map.containsValue(o);
4741         }
4742 
4743         public final boolean remove(Object o) {
4744             if (o != null) {
4745                 for (Iterator<V> it = iterator(); it.hasNext();) {
4746                     if (o.equals(it.next())) {
4747                         it.remove();
4748                         return true;
4749                     }
4750                 }
4751             }
4752             return false;
4753         }
4754 
4755         public final Iterator<V> iterator() {
4756             ConcurrentHashMap<K,V> m = map;
4757             Node<K,V>[] t;
4758             int f = (t = m.table) == null ? 0 : t.length;
4759             return new ValueIterator<K,V>(t, f, 0, f, m);
4760         }
4761 
4762         public final boolean add(V e) {
4763             throw new UnsupportedOperationException();
4764         }
4765         public final boolean addAll(Collection<? extends V> c) {
4766             throw new UnsupportedOperationException();
4767         }
4768 
4769         @Override public boolean removeAll(Collection<?> c) {
4770             if (c == null) throw new NullPointerException();
4771             boolean modified = false;
4772             for (Iterator<V> it = iterator(); it.hasNext();) {
4773                 if (c.contains(it.next())) {
4774                     it.remove();
4775                     modified = true;
4776                 }
4777             }
4778             return modified;
4779         }
4780 
4781         public boolean removeIf(Predicate<? super V> filter) {
4782             return map.removeValueIf(filter);
4783         }
4784 
4785         public Spliterator<V> spliterator() {
4786             Node<K,V>[] t;
4787             ConcurrentHashMap<K,V> m = map;
4788             long n = m.sumCount();
4789             int f = (t = m.table) == null ? 0 : t.length;
4790             return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4791         }
4792 
4793         public void forEach(Consumer<? super V> action) {
4794             if (action == null) throw new NullPointerException();
4795             Node<K,V>[] t;
4796             if ((t = map.table) != null) {
4797                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4798                 for (Node<K,V> p; (p = it.advance()) != null; )
4799                     action.accept(p.val);
4800             }
4801         }
4802     }
4803 
4804     /**
4805      * A view of a ConcurrentHashMap as a {@link Set} of (key, value)
4806      * entries.  This class cannot be directly instantiated. See
4807      * {@link #entrySet()}.
4808      */
4809     static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
4810         implements Set<Map.Entry<K,V>>, java.io.Serializable {
4811         private static final long serialVersionUID = 2249069246763182397L;
4812         EntrySetView(ConcurrentHashMap<K,V> map) { super(map); }
4813 
4814         public boolean contains(Object o) {
4815             Object k, v, r; Map.Entry<?,?> e;
4816             return ((o instanceof Map.Entry) &&
4817                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4818                     (r = map.get(k)) != null &&
4819                     (v = e.getValue()) != null &&
4820                     (v == r || v.equals(r)));
4821         }
4822 
4823         public boolean remove(Object o) {
4824             Object k, v; Map.Entry<?,?> e;
4825             return ((o instanceof Map.Entry) &&
4826                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4827                     (v = e.getValue()) != null &&
4828                     map.remove(k, v));
4829         }
4830 
4831         /**
4832          * @return an iterator over the entries of the backing map
4833          */
4834         public Iterator<Map.Entry<K,V>> iterator() {
4835             ConcurrentHashMap<K,V> m = map;
4836             Node<K,V>[] t;
4837             int f = (t = m.table) == null ? 0 : t.length;
4838             return new EntryIterator<K,V>(t, f, 0, f, m);
4839         }
4840 
4841         public boolean add(Entry<K,V> e) {
4842             return map.putVal(e.getKey(), e.getValue(), false) == null;
4843         }
4844 
4845         public boolean addAll(Collection<? extends Entry<K,V>> c) {
4846             boolean added = false;
4847             for (Entry<K,V> e : c) {
4848                 if (add(e))
4849                     added = true;
4850             }
4851             return added;
4852         }
4853 
4854         public boolean removeIf(Predicate<? super Entry<K,V>> filter) {
4855             return map.removeEntryIf(filter);
4856         }
4857 
4858         public final int hashCode() {
4859             int h = 0;
4860             Node<K,V>[] t;
4861             if ((t = map.table) != null) {
4862                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4863                 for (Node<K,V> p; (p = it.advance()) != null; ) {
4864                     h += p.hashCode();
4865                 }
4866             }
4867             return h;
4868         }
4869 
4870         public final boolean equals(Object o) {
4871             Set<?> c;
4872             return ((o instanceof Set) &&
4873                     ((c = (Set<?>)o) == this ||
4874                      (containsAll(c) && c.containsAll(this))));
4875         }
4876 
4877         public Spliterator<Map.Entry<K,V>> spliterator() {
4878             Node<K,V>[] t;
4879             ConcurrentHashMap<K,V> m = map;
4880             long n = m.sumCount();
4881             int f = (t = m.table) == null ? 0 : t.length;
4882             return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
4883         }
4884 
4885         public void forEach(Consumer<? super Map.Entry<K,V>> action) {
4886             if (action == null) throw new NullPointerException();
4887             Node<K,V>[] t;
4888             if ((t = map.table) != null) {
4889                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4890                 for (Node<K,V> p; (p = it.advance()) != null; )
4891                     action.accept(new MapEntry<K,V>(p.key, p.val, map));
4892             }
4893         }
4894 
4895     }
4896 
4897     // -------------------------------------------------------
4898 
4899     /**
4900      * Base class for bulk tasks. Repeats some fields and code from
4901      * class Traverser, because we need to subclass CountedCompleter.
4902      */
4903     @SuppressWarnings("serial")
4904     abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
4905         Node<K,V>[] tab;        // same as Traverser
4906         Node<K,V> next;
4907         TableStack<K,V> stack, spare;
4908         int index;
4909         int baseIndex;
4910         int baseLimit;
4911         final int baseSize;
4912         int batch;              // split control
4913 
4914         BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
4915             super(par);
4916             this.batch = b;
4917             this.index = this.baseIndex = i;
4918             if ((this.tab = t) == null)
4919                 this.baseSize = this.baseLimit = 0;
4920             else if (par == null)
4921                 this.baseSize = this.baseLimit = t.length;
4922             else {
4923                 this.baseLimit = f;
4924                 this.baseSize = par.baseSize;
4925             }
4926         }
4927 
4928         /**
4929          * Same as Traverser version.
4930          */
4931         final Node<K,V> advance() {
4932             Node<K,V> e;
4933             if ((e = next) != null)
4934                 e = e.next;
4935             for (;;) {
4936                 Node<K,V>[] t; int i, n;
4937                 if (e != null)
4938                     return next = e;
4939                 if (baseIndex >= baseLimit || (t = tab) == null ||
4940                     (n = t.length) <= (i = index) || i < 0)
4941                     return next = null;
4942                 if ((e = tabAt(t, i)) != null && e.hash < 0) {
4943                     if (e instanceof ForwardingNode) {
4944                         tab = ((ForwardingNode<K,V>)e).nextTable;
4945                         e = null;
4946                         pushState(t, i, n);
4947                         continue;
4948                     }
4949                     else if (e instanceof TreeBin)
4950                         e = ((TreeBin<K,V>)e).first;
4951                     else
4952                         e = null;
4953                 }
4954                 if (stack != null)
4955                     recoverState(n);
4956                 else if ((index = i + baseSize) >= n)
4957                     index = ++baseIndex;
4958             }
4959         }
4960 
4961         private void pushState(Node<K,V>[] t, int i, int n) {
4962             TableStack<K,V> s = spare;
4963             if (s != null)
4964                 spare = s.next;
4965             else
4966                 s = new TableStack<K,V>();
4967             s.tab = t;
4968             s.length = n;
4969             s.index = i;
4970             s.next = stack;
4971             stack = s;
4972         }
4973 
4974         private void recoverState(int n) {
4975             TableStack<K,V> s; int len;
4976             while ((s = stack) != null && (index += (len = s.length)) >= n) {
4977                 n = len;
4978                 index = s.index;
4979                 tab = s.tab;
4980                 s.tab = null;
4981                 TableStack<K,V> next = s.next;
4982                 s.next = spare; // save for reuse
4983                 stack = next;
4984                 spare = s;
4985             }
4986             if (s == null && (index += baseSize) >= n)
4987                 index = ++baseIndex;
4988         }
4989     }
4990 
4991     /*
4992      * Task classes. Coded in a regular but ugly format/style to
4993      * simplify checks that each variant differs in the right way from
4994      * others. The null screenings exist because compilers cannot tell
4995      * that we've already null-checked task arguments, so we force
4996      * simplest hoisted bypass to help avoid convoluted traps.
4997      */
4998     @SuppressWarnings("serial")
4999     static final class ForEachKeyTask<K,V>
5000         extends BulkTask<K,V,Void> {
5001         final Consumer<? super K> action;
5002         ForEachKeyTask
5003             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5004              Consumer<? super K> action) {
5005             super(p, b, i, f, t);
5006             this.action = action;
5007         }
5008         public final void compute() {
5009             final Consumer<? super K> action;
5010             if ((action = this.action) != null) {
5011                 for (int i = baseIndex, f, h; batch > 0 &&
5012                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5013                     addToPendingCount(1);
5014                     new ForEachKeyTask<K,V>
5015                         (this, batch >>>= 1, baseLimit = h, f, tab,
5016                          action).fork();
5017                 }
5018                 for (Node<K,V> p; (p = advance()) != null;)
5019                     action.accept(p.key);
5020                 propagateCompletion();
5021             }
5022         }
5023     }
5024 
5025     @SuppressWarnings("serial")
5026     static final class ForEachValueTask<K,V>
5027         extends BulkTask<K,V,Void> {
5028         final Consumer<? super V> action;
5029         ForEachValueTask
5030             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5031              Consumer<? super V> action) {
5032             super(p, b, i, f, t);
5033             this.action = action;
5034         }
5035         public final void compute() {
5036             final Consumer<? super V> action;
5037             if ((action = this.action) != null) {
5038                 for (int i = baseIndex, f, h; batch > 0 &&
5039                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5040                     addToPendingCount(1);
5041                     new ForEachValueTask<K,V>
5042                         (this, batch >>>= 1, baseLimit = h, f, tab,
5043                          action).fork();
5044                 }
5045                 for (Node<K,V> p; (p = advance()) != null;)
5046                     action.accept(p.val);
5047                 propagateCompletion();
5048             }
5049         }
5050     }
5051 
5052     @SuppressWarnings("serial")
5053     static final class ForEachEntryTask<K,V>
5054         extends BulkTask<K,V,Void> {
5055         final Consumer<? super Entry<K,V>> action;
5056         ForEachEntryTask
5057             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5058              Consumer<? super Entry<K,V>> action) {
5059             super(p, b, i, f, t);
5060             this.action = action;
5061         }
5062         public final void compute() {
5063             final Consumer<? super Entry<K,V>> action;
5064             if ((action = this.action) != null) {
5065                 for (int i = baseIndex, f, h; batch > 0 &&
5066                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5067                     addToPendingCount(1);
5068                     new ForEachEntryTask<K,V>
5069                         (this, batch >>>= 1, baseLimit = h, f, tab,
5070                          action).fork();
5071                 }
5072                 for (Node<K,V> p; (p = advance()) != null; )
5073                     action.accept(p);
5074                 propagateCompletion();
5075             }
5076         }
5077     }
5078 
5079     @SuppressWarnings("serial")
5080     static final class ForEachMappingTask<K,V>
5081         extends BulkTask<K,V,Void> {
5082         final BiConsumer<? super K, ? super V> action;
5083         ForEachMappingTask
5084             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5085              BiConsumer<? super K,? super V> action) {
5086             super(p, b, i, f, t);
5087             this.action = action;
5088         }
5089         public final void compute() {
5090             final BiConsumer<? super K, ? super V> action;
5091             if ((action = this.action) != null) {
5092                 for (int i = baseIndex, f, h; batch > 0 &&
5093                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5094                     addToPendingCount(1);
5095                     new ForEachMappingTask<K,V>
5096                         (this, batch >>>= 1, baseLimit = h, f, tab,
5097                          action).fork();
5098                 }
5099                 for (Node<K,V> p; (p = advance()) != null; )
5100                     action.accept(p.key, p.val);
5101                 propagateCompletion();
5102             }
5103         }
5104     }
5105 
5106     @SuppressWarnings("serial")
5107     static final class ForEachTransformedKeyTask<K,V,U>
5108         extends BulkTask<K,V,Void> {
5109         final Function<? super K, ? extends U> transformer;
5110         final Consumer<? super U> action;
5111         ForEachTransformedKeyTask
5112             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5113              Function<? super K, ? extends U> transformer, Consumer<? super U> action) {
5114             super(p, b, i, f, t);
5115             this.transformer = transformer; this.action = action;
5116         }
5117         public final void compute() {
5118             final Function<? super K, ? extends U> transformer;
5119             final Consumer<? super U> action;
5120             if ((transformer = this.transformer) != null &&
5121                 (action = this.action) != null) {
5122                 for (int i = baseIndex, f, h; batch > 0 &&
5123                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5124                     addToPendingCount(1);
5125                     new ForEachTransformedKeyTask<K,V,U>
5126                         (this, batch >>>= 1, baseLimit = h, f, tab,
5127                          transformer, action).fork();
5128                 }
5129                 for (Node<K,V> p; (p = advance()) != null; ) {
5130                     U u;
5131                     if ((u = transformer.apply(p.key)) != null)
5132                         action.accept(u);
5133                 }
5134                 propagateCompletion();
5135             }
5136         }
5137     }
5138 
5139     @SuppressWarnings("serial")
5140     static final class ForEachTransformedValueTask<K,V,U>
5141         extends BulkTask<K,V,Void> {
5142         final Function<? super V, ? extends U> transformer;
5143         final Consumer<? super U> action;
5144         ForEachTransformedValueTask
5145             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5146              Function<? super V, ? extends U> transformer, Consumer<? super U> action) {
5147             super(p, b, i, f, t);
5148             this.transformer = transformer; this.action = action;
5149         }
5150         public final void compute() {
5151             final Function<? super V, ? extends U> transformer;
5152             final Consumer<? super U> action;
5153             if ((transformer = this.transformer) != null &&
5154                 (action = this.action) != null) {
5155                 for (int i = baseIndex, f, h; batch > 0 &&
5156                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5157                     addToPendingCount(1);
5158                     new ForEachTransformedValueTask<K,V,U>
5159                         (this, batch >>>= 1, baseLimit = h, f, tab,
5160                          transformer, action).fork();
5161                 }
5162                 for (Node<K,V> p; (p = advance()) != null; ) {
5163                     U u;
5164                     if ((u = transformer.apply(p.val)) != null)
5165                         action.accept(u);
5166                 }
5167                 propagateCompletion();
5168             }
5169         }
5170     }
5171 
5172     @SuppressWarnings("serial")
5173     static final class ForEachTransformedEntryTask<K,V,U>
5174         extends BulkTask<K,V,Void> {
5175         final Function<Map.Entry<K,V>, ? extends U> transformer;
5176         final Consumer<? super U> action;
5177         ForEachTransformedEntryTask
5178             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5179              Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) {
5180             super(p, b, i, f, t);
5181             this.transformer = transformer; this.action = action;
5182         }
5183         public final void compute() {
5184             final Function<Map.Entry<K,V>, ? extends U> transformer;
5185             final Consumer<? super U> action;
5186             if ((transformer = this.transformer) != null &&
5187                 (action = this.action) != null) {
5188                 for (int i = baseIndex, f, h; batch > 0 &&
5189                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5190                     addToPendingCount(1);
5191                     new ForEachTransformedEntryTask<K,V,U>
5192                         (this, batch >>>= 1, baseLimit = h, f, tab,
5193                          transformer, action).fork();
5194                 }
5195                 for (Node<K,V> p; (p = advance()) != null; ) {
5196                     U u;
5197                     if ((u = transformer.apply(p)) != null)
5198                         action.accept(u);
5199                 }
5200                 propagateCompletion();
5201             }
5202         }
5203     }
5204 
5205     @SuppressWarnings("serial")
5206     static final class ForEachTransformedMappingTask<K,V,U>
5207         extends BulkTask<K,V,Void> {
5208         final BiFunction<? super K, ? super V, ? extends U> transformer;
5209         final Consumer<? super U> action;
5210         ForEachTransformedMappingTask
5211             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5212              BiFunction<? super K, ? super V, ? extends U> transformer,
5213              Consumer<? super U> action) {
5214             super(p, b, i, f, t);
5215             this.transformer = transformer; this.action = action;
5216         }
5217         public final void compute() {
5218             final BiFunction<? super K, ? super V, ? extends U> transformer;
5219             final Consumer<? super U> action;
5220             if ((transformer = this.transformer) != null &&
5221                 (action = this.action) != null) {
5222                 for (int i = baseIndex, f, h; batch > 0 &&
5223                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5224                     addToPendingCount(1);
5225                     new ForEachTransformedMappingTask<K,V,U>
5226                         (this, batch >>>= 1, baseLimit = h, f, tab,
5227                          transformer, action).fork();
5228                 }
5229                 for (Node<K,V> p; (p = advance()) != null; ) {
5230                     U u;
5231                     if ((u = transformer.apply(p.key, p.val)) != null)
5232                         action.accept(u);
5233                 }
5234                 propagateCompletion();
5235             }
5236         }
5237     }
5238 
5239     @SuppressWarnings("serial")
5240     static final class SearchKeysTask<K,V,U>
5241         extends BulkTask<K,V,U> {
5242         final Function<? super K, ? extends U> searchFunction;
5243         final AtomicReference<U> result;
5244         SearchKeysTask
5245             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5246              Function<? super K, ? extends U> searchFunction,
5247              AtomicReference<U> result) {
5248             super(p, b, i, f, t);
5249             this.searchFunction = searchFunction; this.result = result;
5250         }
5251         public final U getRawResult() { return result.get(); }
5252         public final void compute() {
5253             final Function<? super K, ? extends U> searchFunction;
5254             final AtomicReference<U> result;
5255             if ((searchFunction = this.searchFunction) != null &&
5256                 (result = this.result) != null) {
5257                 for (int i = baseIndex, f, h; batch > 0 &&
5258                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5259                     if (result.get() != null)
5260                         return;
5261                     addToPendingCount(1);
5262                     new SearchKeysTask<K,V,U>
5263                         (this, batch >>>= 1, baseLimit = h, f, tab,
5264                          searchFunction, result).fork();
5265                 }
5266                 while (result.get() == null) {
5267                     U u;
5268                     Node<K,V> p;
5269                     if ((p = advance()) == null) {
5270                         propagateCompletion();
5271                         break;
5272                     }
5273                     if ((u = searchFunction.apply(p.key)) != null) {
5274                         if (result.compareAndSet(null, u))
5275                             quietlyCompleteRoot();
5276                         break;
5277                     }
5278                 }
5279             }
5280         }
5281     }
5282 
5283     @SuppressWarnings("serial")
5284     static final class SearchValuesTask<K,V,U>
5285         extends BulkTask<K,V,U> {
5286         final Function<? super V, ? extends U> searchFunction;
5287         final AtomicReference<U> result;
5288         SearchValuesTask
5289             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5290              Function<? super V, ? extends U> searchFunction,
5291              AtomicReference<U> result) {
5292             super(p, b, i, f, t);
5293             this.searchFunction = searchFunction; this.result = result;
5294         }
5295         public final U getRawResult() { return result.get(); }
5296         public final void compute() {
5297             final Function<? super V, ? extends U> searchFunction;
5298             final AtomicReference<U> result;
5299             if ((searchFunction = this.searchFunction) != null &&
5300                 (result = this.result) != null) {
5301                 for (int i = baseIndex, f, h; batch > 0 &&
5302                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5303                     if (result.get() != null)
5304                         return;
5305                     addToPendingCount(1);
5306                     new SearchValuesTask<K,V,U>
5307                         (this, batch >>>= 1, baseLimit = h, f, tab,
5308                          searchFunction, result).fork();
5309                 }
5310                 while (result.get() == null) {
5311                     U u;
5312                     Node<K,V> p;
5313                     if ((p = advance()) == null) {
5314                         propagateCompletion();
5315                         break;
5316                     }
5317                     if ((u = searchFunction.apply(p.val)) != null) {
5318                         if (result.compareAndSet(null, u))
5319                             quietlyCompleteRoot();
5320                         break;
5321                     }
5322                 }
5323             }
5324         }
5325     }
5326 
5327     @SuppressWarnings("serial")
5328     static final class SearchEntriesTask<K,V,U>
5329         extends BulkTask<K,V,U> {
5330         final Function<Entry<K,V>, ? extends U> searchFunction;
5331         final AtomicReference<U> result;
5332         SearchEntriesTask
5333             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5334              Function<Entry<K,V>, ? extends U> searchFunction,
5335              AtomicReference<U> result) {
5336             super(p, b, i, f, t);
5337             this.searchFunction = searchFunction; this.result = result;
5338         }
5339         public final U getRawResult() { return result.get(); }
5340         public final void compute() {
5341             final Function<Entry<K,V>, ? extends U> searchFunction;
5342             final AtomicReference<U> result;
5343             if ((searchFunction = this.searchFunction) != null &&
5344                 (result = this.result) != null) {
5345                 for (int i = baseIndex, f, h; batch > 0 &&
5346                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5347                     if (result.get() != null)
5348                         return;
5349                     addToPendingCount(1);
5350                     new SearchEntriesTask<K,V,U>
5351                         (this, batch >>>= 1, baseLimit = h, f, tab,
5352                          searchFunction, result).fork();
5353                 }
5354                 while (result.get() == null) {
5355                     U u;
5356                     Node<K,V> p;
5357                     if ((p = advance()) == null) {
5358                         propagateCompletion();
5359                         break;
5360                     }
5361                     if ((u = searchFunction.apply(p)) != null) {
5362                         if (result.compareAndSet(null, u))
5363                             quietlyCompleteRoot();
5364                         return;
5365                     }
5366                 }
5367             }
5368         }
5369     }
5370 
5371     @SuppressWarnings("serial")
5372     static final class SearchMappingsTask<K,V,U>
5373         extends BulkTask<K,V,U> {
5374         final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5375         final AtomicReference<U> result;
5376         SearchMappingsTask
5377             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5378              BiFunction<? super K, ? super V, ? extends U> searchFunction,
5379              AtomicReference<U> result) {
5380             super(p, b, i, f, t);
5381             this.searchFunction = searchFunction; this.result = result;
5382         }
5383         public final U getRawResult() { return result.get(); }
5384         public final void compute() {
5385             final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5386             final AtomicReference<U> result;
5387             if ((searchFunction = this.searchFunction) != null &&
5388                 (result = this.result) != null) {
5389                 for (int i = baseIndex, f, h; batch > 0 &&
5390                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5391                     if (result.get() != null)
5392                         return;
5393                     addToPendingCount(1);
5394                     new SearchMappingsTask<K,V,U>
5395                         (this, batch >>>= 1, baseLimit = h, f, tab,
5396                          searchFunction, result).fork();
5397                 }
5398                 while (result.get() == null) {
5399                     U u;
5400                     Node<K,V> p;
5401                     if ((p = advance()) == null) {
5402                         propagateCompletion();
5403                         break;
5404                     }
5405                     if ((u = searchFunction.apply(p.key, p.val)) != null) {
5406                         if (result.compareAndSet(null, u))
5407                             quietlyCompleteRoot();
5408                         break;
5409                     }
5410                 }
5411             }
5412         }
5413     }
5414 
5415     @SuppressWarnings("serial")
5416     static final class ReduceKeysTask<K,V>
5417         extends BulkTask<K,V,K> {
5418         final BiFunction<? super K, ? super K, ? extends K> reducer;
5419         K result;
5420         ReduceKeysTask<K,V> rights, nextRight;
5421         ReduceKeysTask
5422             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5423              ReduceKeysTask<K,V> nextRight,
5424              BiFunction<? super K, ? super K, ? extends K> reducer) {
5425             super(p, b, i, f, t); this.nextRight = nextRight;
5426             this.reducer = reducer;
5427         }
5428         public final K getRawResult() { return result; }
5429         public final void compute() {
5430             final BiFunction<? super K, ? super K, ? extends K> reducer;
5431             if ((reducer = this.reducer) != null) {
5432                 for (int i = baseIndex, f, h; batch > 0 &&
5433                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5434                     addToPendingCount(1);
5435                     (rights = new ReduceKeysTask<K,V>
5436                      (this, batch >>>= 1, baseLimit = h, f, tab,
5437                       rights, reducer)).fork();
5438                 }
5439                 K r = null;
5440                 for (Node<K,V> p; (p = advance()) != null; ) {
5441                     K u = p.key;
5442                     r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
5443                 }
5444                 result = r;
5445                 CountedCompleter<?> c;
5446                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5447                     @SuppressWarnings("unchecked")
5448                     ReduceKeysTask<K,V>
5449                         t = (ReduceKeysTask<K,V>)c,
5450                         s = t.rights;
5451                     while (s != null) {
5452                         K tr, sr;
5453                         if ((sr = s.result) != null)
5454                             t.result = (((tr = t.result) == null) ? sr :
5455                                         reducer.apply(tr, sr));
5456                         s = t.rights = s.nextRight;
5457                     }
5458                 }
5459             }
5460         }
5461     }
5462 
5463     @SuppressWarnings("serial")
5464     static final class ReduceValuesTask<K,V>
5465         extends BulkTask<K,V,V> {
5466         final BiFunction<? super V, ? super V, ? extends V> reducer;
5467         V result;
5468         ReduceValuesTask<K,V> rights, nextRight;
5469         ReduceValuesTask
5470             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5471              ReduceValuesTask<K,V> nextRight,
5472              BiFunction<? super V, ? super V, ? extends V> reducer) {
5473             super(p, b, i, f, t); this.nextRight = nextRight;
5474             this.reducer = reducer;
5475         }
5476         public final V getRawResult() { return result; }
5477         public final void compute() {
5478             final BiFunction<? super V, ? super V, ? extends V> reducer;
5479             if ((reducer = this.reducer) != null) {
5480                 for (int i = baseIndex, f, h; batch > 0 &&
5481                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5482                     addToPendingCount(1);
5483                     (rights = new ReduceValuesTask<K,V>
5484                      (this, batch >>>= 1, baseLimit = h, f, tab,
5485                       rights, reducer)).fork();
5486                 }
5487                 V r = null;
5488                 for (Node<K,V> p; (p = advance()) != null; ) {
5489                     V v = p.val;
5490                     r = (r == null) ? v : reducer.apply(r, v);
5491                 }
5492                 result = r;
5493                 CountedCompleter<?> c;
5494                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5495                     @SuppressWarnings("unchecked")
5496                     ReduceValuesTask<K,V>
5497                         t = (ReduceValuesTask<K,V>)c,
5498                         s = t.rights;
5499                     while (s != null) {
5500                         V tr, sr;
5501                         if ((sr = s.result) != null)
5502                             t.result = (((tr = t.result) == null) ? sr :
5503                                         reducer.apply(tr, sr));
5504                         s = t.rights = s.nextRight;
5505                     }
5506                 }
5507             }
5508         }
5509     }
5510 
5511     @SuppressWarnings("serial")
5512     static final class ReduceEntriesTask<K,V>
5513         extends BulkTask<K,V,Map.Entry<K,V>> {
5514         final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5515         Map.Entry<K,V> result;
5516         ReduceEntriesTask<K,V> rights, nextRight;
5517         ReduceEntriesTask
5518             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5519              ReduceEntriesTask<K,V> nextRight,
5520              BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5521             super(p, b, i, f, t); this.nextRight = nextRight;
5522             this.reducer = reducer;
5523         }
5524         public final Map.Entry<K,V> getRawResult() { return result; }
5525         public final void compute() {
5526             final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5527             if ((reducer = this.reducer) != null) {
5528                 for (int i = baseIndex, f, h; batch > 0 &&
5529                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5530                     addToPendingCount(1);
5531                     (rights = new ReduceEntriesTask<K,V>
5532                      (this, batch >>>= 1, baseLimit = h, f, tab,
5533                       rights, reducer)).fork();
5534                 }
5535                 Map.Entry<K,V> r = null;
5536                 for (Node<K,V> p; (p = advance()) != null; )
5537                     r = (r == null) ? p : reducer.apply(r, p);
5538                 result = r;
5539                 CountedCompleter<?> c;
5540                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5541                     @SuppressWarnings("unchecked")
5542                     ReduceEntriesTask<K,V>
5543                         t = (ReduceEntriesTask<K,V>)c,
5544                         s = t.rights;
5545                     while (s != null) {
5546                         Map.Entry<K,V> tr, sr;
5547                         if ((sr = s.result) != null)
5548                             t.result = (((tr = t.result) == null) ? sr :
5549                                         reducer.apply(tr, sr));
5550                         s = t.rights = s.nextRight;
5551                     }
5552                 }
5553             }
5554         }
5555     }
5556 
5557     @SuppressWarnings("serial")
5558     static final class MapReduceKeysTask<K,V,U>
5559         extends BulkTask<K,V,U> {
5560         final Function<? super K, ? extends U> transformer;
5561         final BiFunction<? super U, ? super U, ? extends U> reducer;
5562         U result;
5563         MapReduceKeysTask<K,V,U> rights, nextRight;
5564         MapReduceKeysTask
5565             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5566              MapReduceKeysTask<K,V,U> nextRight,
5567              Function<? super K, ? extends U> transformer,
5568              BiFunction<? super U, ? super U, ? extends U> reducer) {
5569             super(p, b, i, f, t); this.nextRight = nextRight;
5570             this.transformer = transformer;
5571             this.reducer = reducer;
5572         }
5573         public final U getRawResult() { return result; }
5574         public final void compute() {
5575             final Function<? super K, ? extends U> transformer;
5576             final BiFunction<? super U, ? super U, ? extends U> reducer;
5577             if ((transformer = this.transformer) != null &&
5578                 (reducer = this.reducer) != null) {
5579                 for (int i = baseIndex, f, h; batch > 0 &&
5580                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5581                     addToPendingCount(1);
5582                     (rights = new MapReduceKeysTask<K,V,U>
5583                      (this, batch >>>= 1, baseLimit = h, f, tab,
5584                       rights, transformer, reducer)).fork();
5585                 }
5586                 U r = null;
5587                 for (Node<K,V> p; (p = advance()) != null; ) {
5588                     U u;
5589                     if ((u = transformer.apply(p.key)) != null)
5590                         r = (r == null) ? u : reducer.apply(r, u);
5591                 }
5592                 result = r;
5593                 CountedCompleter<?> c;
5594                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5595                     @SuppressWarnings("unchecked")
5596                     MapReduceKeysTask<K,V,U>
5597                         t = (MapReduceKeysTask<K,V,U>)c,
5598                         s = t.rights;
5599                     while (s != null) {
5600                         U tr, sr;
5601                         if ((sr = s.result) != null)
5602                             t.result = (((tr = t.result) == null) ? sr :
5603                                         reducer.apply(tr, sr));
5604                         s = t.rights = s.nextRight;
5605                     }
5606                 }
5607             }
5608         }
5609     }
5610 
5611     @SuppressWarnings("serial")
5612     static final class MapReduceValuesTask<K,V,U>
5613         extends BulkTask<K,V,U> {
5614         final Function<? super V, ? extends U> transformer;
5615         final BiFunction<? super U, ? super U, ? extends U> reducer;
5616         U result;
5617         MapReduceValuesTask<K,V,U> rights, nextRight;
5618         MapReduceValuesTask
5619             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5620              MapReduceValuesTask<K,V,U> nextRight,
5621              Function<? super V, ? extends U> transformer,
5622              BiFunction<? super U, ? super U, ? extends U> reducer) {
5623             super(p, b, i, f, t); this.nextRight = nextRight;
5624             this.transformer = transformer;
5625             this.reducer = reducer;
5626         }
5627         public final U getRawResult() { return result; }
5628         public final void compute() {
5629             final Function<? super V, ? extends U> transformer;
5630             final BiFunction<? super U, ? super U, ? extends U> reducer;
5631             if ((transformer = this.transformer) != null &&
5632                 (reducer = this.reducer) != null) {
5633                 for (int i = baseIndex, f, h; batch > 0 &&
5634                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5635                     addToPendingCount(1);
5636                     (rights = new MapReduceValuesTask<K,V,U>
5637                      (this, batch >>>= 1, baseLimit = h, f, tab,
5638                       rights, transformer, reducer)).fork();
5639                 }
5640                 U r = null;
5641                 for (Node<K,V> p; (p = advance()) != null; ) {
5642                     U u;
5643                     if ((u = transformer.apply(p.val)) != null)
5644                         r = (r == null) ? u : reducer.apply(r, u);
5645                 }
5646                 result = r;
5647                 CountedCompleter<?> c;
5648                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5649                     @SuppressWarnings("unchecked")
5650                     MapReduceValuesTask<K,V,U>
5651                         t = (MapReduceValuesTask<K,V,U>)c,
5652                         s = t.rights;
5653                     while (s != null) {
5654                         U tr, sr;
5655                         if ((sr = s.result) != null)
5656                             t.result = (((tr = t.result) == null) ? sr :
5657                                         reducer.apply(tr, sr));
5658                         s = t.rights = s.nextRight;
5659                     }
5660                 }
5661             }
5662         }
5663     }
5664 
5665     @SuppressWarnings("serial")
5666     static final class MapReduceEntriesTask<K,V,U>
5667         extends BulkTask<K,V,U> {
5668         final Function<Map.Entry<K,V>, ? extends U> transformer;
5669         final BiFunction<? super U, ? super U, ? extends U> reducer;
5670         U result;
5671         MapReduceEntriesTask<K,V,U> rights, nextRight;
5672         MapReduceEntriesTask
5673             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5674              MapReduceEntriesTask<K,V,U> nextRight,
5675              Function<Map.Entry<K,V>, ? extends U> transformer,
5676              BiFunction<? super U, ? super U, ? extends U> reducer) {
5677             super(p, b, i, f, t); this.nextRight = nextRight;
5678             this.transformer = transformer;
5679             this.reducer = reducer;
5680         }
5681         public final U getRawResult() { return result; }
5682         public final void compute() {
5683             final Function<Map.Entry<K,V>, ? extends U> transformer;
5684             final BiFunction<? super U, ? super U, ? extends U> reducer;
5685             if ((transformer = this.transformer) != null &&
5686                 (reducer = this.reducer) != null) {
5687                 for (int i = baseIndex, f, h; batch > 0 &&
5688                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5689                     addToPendingCount(1);
5690                     (rights = new MapReduceEntriesTask<K,V,U>
5691                      (this, batch >>>= 1, baseLimit = h, f, tab,
5692                       rights, transformer, reducer)).fork();
5693                 }
5694                 U r = null;
5695                 for (Node<K,V> p; (p = advance()) != null; ) {
5696                     U u;
5697                     if ((u = transformer.apply(p)) != null)
5698                         r = (r == null) ? u : reducer.apply(r, u);
5699                 }
5700                 result = r;
5701                 CountedCompleter<?> c;
5702                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5703                     @SuppressWarnings("unchecked")
5704                     MapReduceEntriesTask<K,V,U>
5705                         t = (MapReduceEntriesTask<K,V,U>)c,
5706                         s = t.rights;
5707                     while (s != null) {
5708                         U tr, sr;
5709                         if ((sr = s.result) != null)
5710                             t.result = (((tr = t.result) == null) ? sr :
5711                                         reducer.apply(tr, sr));
5712                         s = t.rights = s.nextRight;
5713                     }
5714                 }
5715             }
5716         }
5717     }
5718 
5719     @SuppressWarnings("serial")
5720     static final class MapReduceMappingsTask<K,V,U>
5721         extends BulkTask<K,V,U> {
5722         final BiFunction<? super K, ? super V, ? extends U> transformer;
5723         final BiFunction<? super U, ? super U, ? extends U> reducer;
5724         U result;
5725         MapReduceMappingsTask<K,V,U> rights, nextRight;
5726         MapReduceMappingsTask
5727             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5728              MapReduceMappingsTask<K,V,U> nextRight,
5729              BiFunction<? super K, ? super V, ? extends U> transformer,
5730              BiFunction<? super U, ? super U, ? extends U> reducer) {
5731             super(p, b, i, f, t); this.nextRight = nextRight;
5732             this.transformer = transformer;
5733             this.reducer = reducer;
5734         }
5735         public final U getRawResult() { return result; }
5736         public final void compute() {
5737             final BiFunction<? super K, ? super V, ? extends U> transformer;
5738             final BiFunction<? super U, ? super U, ? extends U> reducer;
5739             if ((transformer = this.transformer) != null &&
5740                 (reducer = this.reducer) != null) {
5741                 for (int i = baseIndex, f, h; batch > 0 &&
5742                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5743                     addToPendingCount(1);
5744                     (rights = new MapReduceMappingsTask<K,V,U>
5745                      (this, batch >>>= 1, baseLimit = h, f, tab,
5746                       rights, transformer, reducer)).fork();
5747                 }
5748                 U r = null;
5749                 for (Node<K,V> p; (p = advance()) != null; ) {
5750                     U u;
5751                     if ((u = transformer.apply(p.key, p.val)) != null)
5752                         r = (r == null) ? u : reducer.apply(r, u);
5753                 }
5754                 result = r;
5755                 CountedCompleter<?> c;
5756                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5757                     @SuppressWarnings("unchecked")
5758                     MapReduceMappingsTask<K,V,U>
5759                         t = (MapReduceMappingsTask<K,V,U>)c,
5760                         s = t.rights;
5761                     while (s != null) {
5762                         U tr, sr;
5763                         if ((sr = s.result) != null)
5764                             t.result = (((tr = t.result) == null) ? sr :
5765                                         reducer.apply(tr, sr));
5766                         s = t.rights = s.nextRight;
5767                     }
5768                 }
5769             }
5770         }
5771     }
5772 
5773     @SuppressWarnings("serial")
5774     static final class MapReduceKeysToDoubleTask<K,V>
5775         extends BulkTask<K,V,Double> {
5776         final ToDoubleFunction<? super K> transformer;
5777         final DoubleBinaryOperator reducer;
5778         final double basis;
5779         double result;
5780         MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5781         MapReduceKeysToDoubleTask
5782             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5783              MapReduceKeysToDoubleTask<K,V> nextRight,
5784              ToDoubleFunction<? super K> transformer,
5785              double basis,
5786              DoubleBinaryOperator reducer) {
5787             super(p, b, i, f, t); this.nextRight = nextRight;
5788             this.transformer = transformer;
5789             this.basis = basis; this.reducer = reducer;
5790         }
5791         public final Double getRawResult() { return result; }
5792         public final void compute() {
5793             final ToDoubleFunction<? super K> transformer;
5794             final DoubleBinaryOperator reducer;
5795             if ((transformer = this.transformer) != null &&
5796                 (reducer = this.reducer) != null) {
5797                 double r = this.basis;
5798                 for (int i = baseIndex, f, h; batch > 0 &&
5799                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5800                     addToPendingCount(1);
5801                     (rights = new MapReduceKeysToDoubleTask<K,V>
5802                      (this, batch >>>= 1, baseLimit = h, f, tab,
5803                       rights, transformer, r, reducer)).fork();
5804                 }
5805                 for (Node<K,V> p; (p = advance()) != null; )
5806                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key));
5807                 result = r;
5808                 CountedCompleter<?> c;
5809                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5810                     @SuppressWarnings("unchecked")
5811                     MapReduceKeysToDoubleTask<K,V>
5812                         t = (MapReduceKeysToDoubleTask<K,V>)c,
5813                         s = t.rights;
5814                     while (s != null) {
5815                         t.result = reducer.applyAsDouble(t.result, s.result);
5816                         s = t.rights = s.nextRight;
5817                     }
5818                 }
5819             }
5820         }
5821     }
5822 
5823     @SuppressWarnings("serial")
5824     static final class MapReduceValuesToDoubleTask<K,V>
5825         extends BulkTask<K,V,Double> {
5826         final ToDoubleFunction<? super V> transformer;
5827         final DoubleBinaryOperator reducer;
5828         final double basis;
5829         double result;
5830         MapReduceValuesToDoubleTask<K,V> rights, nextRight;
5831         MapReduceValuesToDoubleTask
5832             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5833              MapReduceValuesToDoubleTask<K,V> nextRight,
5834              ToDoubleFunction<? super V> transformer,
5835              double basis,
5836              DoubleBinaryOperator reducer) {
5837             super(p, b, i, f, t); this.nextRight = nextRight;
5838             this.transformer = transformer;
5839             this.basis = basis; this.reducer = reducer;
5840         }
5841         public final Double getRawResult() { return result; }
5842         public final void compute() {
5843             final ToDoubleFunction<? super V> transformer;
5844             final DoubleBinaryOperator reducer;
5845             if ((transformer = this.transformer) != null &&
5846                 (reducer = this.reducer) != null) {
5847                 double r = this.basis;
5848                 for (int i = baseIndex, f, h; batch > 0 &&
5849                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5850                     addToPendingCount(1);
5851                     (rights = new MapReduceValuesToDoubleTask<K,V>
5852                      (this, batch >>>= 1, baseLimit = h, f, tab,
5853                       rights, transformer, r, reducer)).fork();
5854                 }
5855                 for (Node<K,V> p; (p = advance()) != null; )
5856                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val));
5857                 result = r;
5858                 CountedCompleter<?> c;
5859                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5860                     @SuppressWarnings("unchecked")
5861                     MapReduceValuesToDoubleTask<K,V>
5862                         t = (MapReduceValuesToDoubleTask<K,V>)c,
5863                         s = t.rights;
5864                     while (s != null) {
5865                         t.result = reducer.applyAsDouble(t.result, s.result);
5866                         s = t.rights = s.nextRight;
5867                     }
5868                 }
5869             }
5870         }
5871     }
5872 
5873     @SuppressWarnings("serial")
5874     static final class MapReduceEntriesToDoubleTask<K,V>
5875         extends BulkTask<K,V,Double> {
5876         final ToDoubleFunction<Map.Entry<K,V>> transformer;
5877         final DoubleBinaryOperator reducer;
5878         final double basis;
5879         double result;
5880         MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
5881         MapReduceEntriesToDoubleTask
5882             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5883              MapReduceEntriesToDoubleTask<K,V> nextRight,
5884              ToDoubleFunction<Map.Entry<K,V>> transformer,
5885              double basis,
5886              DoubleBinaryOperator reducer) {
5887             super(p, b, i, f, t); this.nextRight = nextRight;
5888             this.transformer = transformer;
5889             this.basis = basis; this.reducer = reducer;
5890         }
5891         public final Double getRawResult() { return result; }
5892         public final void compute() {
5893             final ToDoubleFunction<Map.Entry<K,V>> transformer;
5894             final DoubleBinaryOperator reducer;
5895             if ((transformer = this.transformer) != null &&
5896                 (reducer = this.reducer) != null) {
5897                 double r = this.basis;
5898                 for (int i = baseIndex, f, h; batch > 0 &&
5899                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5900                     addToPendingCount(1);
5901                     (rights = new MapReduceEntriesToDoubleTask<K,V>
5902                      (this, batch >>>= 1, baseLimit = h, f, tab,
5903                       rights, transformer, r, reducer)).fork();
5904                 }
5905                 for (Node<K,V> p; (p = advance()) != null; )
5906                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p));
5907                 result = r;
5908                 CountedCompleter<?> c;
5909                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5910                     @SuppressWarnings("unchecked")
5911                     MapReduceEntriesToDoubleTask<K,V>
5912                         t = (MapReduceEntriesToDoubleTask<K,V>)c,
5913                         s = t.rights;
5914                     while (s != null) {
5915                         t.result = reducer.applyAsDouble(t.result, s.result);
5916                         s = t.rights = s.nextRight;
5917                     }
5918                 }
5919             }
5920         }
5921     }
5922 
5923     @SuppressWarnings("serial")
5924     static final class MapReduceMappingsToDoubleTask<K,V>
5925         extends BulkTask<K,V,Double> {
5926         final ToDoubleBiFunction<? super K, ? super V> transformer;
5927         final DoubleBinaryOperator reducer;
5928         final double basis;
5929         double result;
5930         MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
5931         MapReduceMappingsToDoubleTask
5932             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5933              MapReduceMappingsToDoubleTask<K,V> nextRight,
5934              ToDoubleBiFunction<? super K, ? super V> transformer,
5935              double basis,
5936              DoubleBinaryOperator reducer) {
5937             super(p, b, i, f, t); this.nextRight = nextRight;
5938             this.transformer = transformer;
5939             this.basis = basis; this.reducer = reducer;
5940         }
5941         public final Double getRawResult() { return result; }
5942         public final void compute() {
5943             final ToDoubleBiFunction<? super K, ? super V> transformer;
5944             final DoubleBinaryOperator reducer;
5945             if ((transformer = this.transformer) != null &&
5946                 (reducer = this.reducer) != null) {
5947                 double r = this.basis;
5948                 for (int i = baseIndex, f, h; batch > 0 &&
5949                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5950                     addToPendingCount(1);
5951                     (rights = new MapReduceMappingsToDoubleTask<K,V>
5952                      (this, batch >>>= 1, baseLimit = h, f, tab,
5953                       rights, transformer, r, reducer)).fork();
5954                 }
5955                 for (Node<K,V> p; (p = advance()) != null; )
5956                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key, p.val));
5957                 result = r;
5958                 CountedCompleter<?> c;
5959                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5960                     @SuppressWarnings("unchecked")
5961                     MapReduceMappingsToDoubleTask<K,V>
5962                         t = (MapReduceMappingsToDoubleTask<K,V>)c,
5963                         s = t.rights;
5964                     while (s != null) {
5965                         t.result = reducer.applyAsDouble(t.result, s.result);
5966                         s = t.rights = s.nextRight;
5967                     }
5968                 }
5969             }
5970         }
5971     }
5972 
5973     @SuppressWarnings("serial")
5974     static final class MapReduceKeysToLongTask<K,V>
5975         extends BulkTask<K,V,Long> {
5976         final ToLongFunction<? super K> transformer;
5977         final LongBinaryOperator reducer;
5978         final long basis;
5979         long result;
5980         MapReduceKeysToLongTask<K,V> rights, nextRight;
5981         MapReduceKeysToLongTask
5982             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5983              MapReduceKeysToLongTask<K,V> nextRight,
5984              ToLongFunction<? super K> transformer,
5985              long basis,
5986              LongBinaryOperator reducer) {
5987             super(p, b, i, f, t); this.nextRight = nextRight;
5988             this.transformer = transformer;
5989             this.basis = basis; this.reducer = reducer;
5990         }
5991         public final Long getRawResult() { return result; }
5992         public final void compute() {
5993             final ToLongFunction<? super K> transformer;
5994             final LongBinaryOperator reducer;
5995             if ((transformer = this.transformer) != null &&
5996                 (reducer = this.reducer) != null) {
5997                 long r = this.basis;
5998                 for (int i = baseIndex, f, h; batch > 0 &&
5999                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6000                     addToPendingCount(1);
6001                     (rights = new MapReduceKeysToLongTask<K,V>
6002                      (this, batch >>>= 1, baseLimit = h, f, tab,
6003                       rights, transformer, r, reducer)).fork();
6004                 }
6005                 for (Node<K,V> p; (p = advance()) != null; )
6006                     r = reducer.applyAsLong(r, transformer.applyAsLong(p.key));
6007                 result = r;
6008                 CountedCompleter<?> c;
6009                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6010                     @SuppressWarnings("unchecked")
6011                     MapReduceKeysToLongTask<K,V>
6012                         t = (MapReduceKeysToLongTask<K,V>)c,
6013                         s = t.rights;
6014                     while (s != null) {
6015                         t.result = reducer.applyAsLong(t.result, s.result);
6016                         s = t.rights = s.nextRight;
6017                     }
6018                 }
6019             }
6020         }
6021     }
6022 
6023     @SuppressWarnings("serial")
6024     static final class MapReduceValuesToLongTask<K,V>
6025         extends BulkTask<K,V,Long> {
6026         final ToLongFunction<? super V> transformer;
6027         final LongBinaryOperator reducer;
6028         final long basis;
6029         long result;
6030         MapReduceValuesToLongTask<K,V> rights, nextRight;
6031         MapReduceValuesToLongTask
6032             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6033              MapReduceValuesToLongTask<K,V> nextRight,
6034              ToLongFunction<? super V> transformer,
6035              long basis,
6036              LongBinaryOperator reducer) {
6037             super(p, b, i, f, t); this.nextRight = nextRight;
6038             this.transformer = transformer;
6039             this.basis = basis; this.reducer = reducer;
6040         }
6041         public final Long getRawResult() { return result; }
6042         public final void compute() {
6043             final ToLongFunction<? super V> transformer;
6044             final LongBinaryOperator reducer;
6045             if ((transformer = this.transformer) != null &&
6046                 (reducer = this.reducer) != null) {
6047                 long r = this.basis;
6048                 for (int i = baseIndex, f, h; batch > 0 &&
6049                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6050                     addToPendingCount(1);
6051                     (rights = new MapReduceValuesToLongTask<K,V>
6052                      (this, batch >>>= 1, baseLimit = h, f, tab,
6053                       rights, transformer, r, reducer)).fork();
6054                 }
6055                 for (Node<K,V> p; (p = advance()) != null; )
6056                     r = reducer.applyAsLong(r, transformer.applyAsLong(p.val));
6057                 result = r;
6058                 CountedCompleter<?> c;
6059                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6060                     @SuppressWarnings("unchecked")
6061                     MapReduceValuesToLongTask<K,V>
6062                         t = (MapReduceValuesToLongTask<K,V>)c,
6063                         s = t.rights;
6064                     while (s != null) {
6065                         t.result = reducer.applyAsLong(t.result, s.result);
6066                         s = t.rights = s.nextRight;
6067                     }
6068                 }
6069             }
6070         }
6071     }
6072 
6073     @SuppressWarnings("serial")
6074     static final class MapReduceEntriesToLongTask<K,V>
6075         extends BulkTask<K,V,Long> {
6076         final ToLongFunction<Map.Entry<K,V>> transformer;
6077         final LongBinaryOperator reducer;
6078         final long basis;
6079         long result;
6080         MapReduceEntriesToLongTask<K,V> rights, nextRight;
6081         MapReduceEntriesToLongTask
6082             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6083              MapReduceEntriesToLongTask<K,V> nextRight,
6084              ToLongFunction<Map.Entry<K,V>> transformer,
6085              long basis,
6086              LongBinaryOperator reducer) {
6087             super(p, b, i, f, t); this.nextRight = nextRight;
6088             this.transformer = transformer;
6089             this.basis = basis; this.reducer = reducer;
6090         }
6091         public final Long getRawResult() { return result; }
6092         public final void compute() {
6093             final ToLongFunction<Map.Entry<K,V>> transformer;
6094             final LongBinaryOperator reducer;
6095             if ((transformer = this.transformer) != null &&
6096                 (reducer = this.reducer) != null) {
6097                 long r = this.basis;
6098                 for (int i = baseIndex, f, h; batch > 0 &&
6099                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6100                     addToPendingCount(1);
6101                     (rights = new MapReduceEntriesToLongTask<K,V>
6102                      (this, batch >>>= 1, baseLimit = h, f, tab,
6103                       rights, transformer, r, reducer)).fork();
6104                 }
6105                 for (Node<K,V> p; (p = advance()) != null; )
6106                     r = reducer.applyAsLong(r, transformer.applyAsLong(p));
6107                 result = r;
6108                 CountedCompleter<?> c;
6109                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6110                     @SuppressWarnings("unchecked")
6111                     MapReduceEntriesToLongTask<K,V>
6112                         t = (MapReduceEntriesToLongTask<K,V>)c,
6113                         s = t.rights;
6114                     while (s != null) {
6115                         t.result = reducer.applyAsLong(t.result, s.result);
6116                         s = t.rights = s.nextRight;
6117                     }
6118                 }
6119             }
6120         }
6121     }
6122 
6123     @SuppressWarnings("serial")
6124     static final class MapReduceMappingsToLongTask<K,V>
6125         extends BulkTask<K,V,Long> {
6126         final ToLongBiFunction<? super K, ? super V> transformer;
6127         final LongBinaryOperator reducer;
6128         final long basis;
6129         long result;
6130         MapReduceMappingsToLongTask<K,V> rights, nextRight;
6131         MapReduceMappingsToLongTask
6132             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6133              MapReduceMappingsToLongTask<K,V> nextRight,
6134              ToLongBiFunction<? super K, ? super V> transformer,
6135              long basis,
6136              LongBinaryOperator reducer) {
6137             super(p, b, i, f, t); this.nextRight = nextRight;
6138             this.transformer = transformer;
6139             this.basis = basis; this.reducer = reducer;
6140         }
6141         public final Long getRawResult() { return result; }
6142         public final void compute() {
6143             final ToLongBiFunction<? super K, ? super V> transformer;
6144             final LongBinaryOperator reducer;
6145             if ((transformer = this.transformer) != null &&
6146                 (reducer = this.reducer) != null) {
6147                 long r = this.basis;
6148                 for (int i = baseIndex, f, h; batch > 0 &&
6149                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6150                     addToPendingCount(1);
6151                     (rights = new MapReduceMappingsToLongTask<K,V>
6152                      (this, batch >>>= 1, baseLimit = h, f, tab,
6153                       rights, transformer, r, reducer)).fork();
6154                 }
6155                 for (Node<K,V> p; (p = advance()) != null; )
6156                     r = reducer.applyAsLong(r, transformer.applyAsLong(p.key, p.val));
6157                 result = r;
6158                 CountedCompleter<?> c;
6159                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6160                     @SuppressWarnings("unchecked")
6161                     MapReduceMappingsToLongTask<K,V>
6162                         t = (MapReduceMappingsToLongTask<K,V>)c,
6163                         s = t.rights;
6164                     while (s != null) {
6165                         t.result = reducer.applyAsLong(t.result, s.result);
6166                         s = t.rights = s.nextRight;
6167                     }
6168                 }
6169             }
6170         }
6171     }
6172 
6173     @SuppressWarnings("serial")
6174     static final class MapReduceKeysToIntTask<K,V>
6175         extends BulkTask<K,V,Integer> {
6176         final ToIntFunction<? super K> transformer;
6177         final IntBinaryOperator reducer;
6178         final int basis;
6179         int result;
6180         MapReduceKeysToIntTask<K,V> rights, nextRight;
6181         MapReduceKeysToIntTask
6182             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6183              MapReduceKeysToIntTask<K,V> nextRight,
6184              ToIntFunction<? super K> transformer,
6185              int basis,
6186              IntBinaryOperator reducer) {
6187             super(p, b, i, f, t); this.nextRight = nextRight;
6188             this.transformer = transformer;
6189             this.basis = basis; this.reducer = reducer;
6190         }
6191         public final Integer getRawResult() { return result; }
6192         public final void compute() {
6193             final ToIntFunction<? super K> transformer;
6194             final IntBinaryOperator reducer;
6195             if ((transformer = this.transformer) != null &&
6196                 (reducer = this.reducer) != null) {
6197                 int r = this.basis;
6198                 for (int i = baseIndex, f, h; batch > 0 &&
6199                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6200                     addToPendingCount(1);
6201                     (rights = new MapReduceKeysToIntTask<K,V>
6202                      (this, batch >>>= 1, baseLimit = h, f, tab,
6203                       rights, transformer, r, reducer)).fork();
6204                 }
6205                 for (Node<K,V> p; (p = advance()) != null; )
6206                     r = reducer.applyAsInt(r, transformer.applyAsInt(p.key));
6207                 result = r;
6208                 CountedCompleter<?> c;
6209                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6210                     @SuppressWarnings("unchecked")
6211                     MapReduceKeysToIntTask<K,V>
6212                         t = (MapReduceKeysToIntTask<K,V>)c,
6213                         s = t.rights;
6214                     while (s != null) {
6215                         t.result = reducer.applyAsInt(t.result, s.result);
6216                         s = t.rights = s.nextRight;
6217                     }
6218                 }
6219             }
6220         }
6221     }
6222 
6223     @SuppressWarnings("serial")
6224     static final class MapReduceValuesToIntTask<K,V>
6225         extends BulkTask<K,V,Integer> {
6226         final ToIntFunction<? super V> transformer;
6227         final IntBinaryOperator reducer;
6228         final int basis;
6229         int result;
6230         MapReduceValuesToIntTask<K,V> rights, nextRight;
6231         MapReduceValuesToIntTask
6232             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6233              MapReduceValuesToIntTask<K,V> nextRight,
6234              ToIntFunction<? super V> transformer,
6235              int basis,
6236              IntBinaryOperator reducer) {
6237             super(p, b, i, f, t); this.nextRight = nextRight;
6238             this.transformer = transformer;
6239             this.basis = basis; this.reducer = reducer;
6240         }
6241         public final Integer getRawResult() { return result; }
6242         public final void compute() {
6243             final ToIntFunction<? super V> transformer;
6244             final IntBinaryOperator reducer;
6245             if ((transformer = this.transformer) != null &&
6246                 (reducer = this.reducer) != null) {
6247                 int r = this.basis;
6248                 for (int i = baseIndex, f, h; batch > 0 &&
6249                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6250                     addToPendingCount(1);
6251                     (rights = new MapReduceValuesToIntTask<K,V>
6252                      (this, batch >>>= 1, baseLimit = h, f, tab,
6253                       rights, transformer, r, reducer)).fork();
6254                 }
6255                 for (Node<K,V> p; (p = advance()) != null; )
6256                     r = reducer.applyAsInt(r, transformer.applyAsInt(p.val));
6257                 result = r;
6258                 CountedCompleter<?> c;
6259                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6260                     @SuppressWarnings("unchecked")
6261                     MapReduceValuesToIntTask<K,V>
6262                         t = (MapReduceValuesToIntTask<K,V>)c,
6263                         s = t.rights;
6264                     while (s != null) {
6265                         t.result = reducer.applyAsInt(t.result, s.result);
6266                         s = t.rights = s.nextRight;
6267                     }
6268                 }
6269             }
6270         }
6271     }
6272 
6273     @SuppressWarnings("serial")
6274     static final class MapReduceEntriesToIntTask<K,V>
6275         extends BulkTask<K,V,Integer> {
6276         final ToIntFunction<Map.Entry<K,V>> transformer;
6277         final IntBinaryOperator reducer;
6278         final int basis;
6279         int result;
6280         MapReduceEntriesToIntTask<K,V> rights, nextRight;
6281         MapReduceEntriesToIntTask
6282             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6283              MapReduceEntriesToIntTask<K,V> nextRight,
6284              ToIntFunction<Map.Entry<K,V>> transformer,
6285              int basis,
6286              IntBinaryOperator reducer) {
6287             super(p, b, i, f, t); this.nextRight = nextRight;
6288             this.transformer = transformer;
6289             this.basis = basis; this.reducer = reducer;
6290         }
6291         public final Integer getRawResult() { return result; }
6292         public final void compute() {
6293             final ToIntFunction<Map.Entry<K,V>> transformer;
6294             final IntBinaryOperator reducer;
6295             if ((transformer = this.transformer) != null &&
6296                 (reducer = this.reducer) != null) {
6297                 int r = this.basis;
6298                 for (int i = baseIndex, f, h; batch > 0 &&
6299                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6300                     addToPendingCount(1);
6301                     (rights = new MapReduceEntriesToIntTask<K,V>
6302                      (this, batch >>>= 1, baseLimit = h, f, tab,
6303                       rights, transformer, r, reducer)).fork();
6304                 }
6305                 for (Node<K,V> p; (p = advance()) != null; )
6306                     r = reducer.applyAsInt(r, transformer.applyAsInt(p));
6307                 result = r;
6308                 CountedCompleter<?> c;
6309                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6310                     @SuppressWarnings("unchecked")
6311                     MapReduceEntriesToIntTask<K,V>
6312                         t = (MapReduceEntriesToIntTask<K,V>)c,
6313                         s = t.rights;
6314                     while (s != null) {
6315                         t.result = reducer.applyAsInt(t.result, s.result);
6316                         s = t.rights = s.nextRight;
6317                     }
6318                 }
6319             }
6320         }
6321     }
6322 
6323     @SuppressWarnings("serial")
6324     static final class MapReduceMappingsToIntTask<K,V>
6325         extends BulkTask<K,V,Integer> {
6326         final ToIntBiFunction<? super K, ? super V> transformer;
6327         final IntBinaryOperator reducer;
6328         final int basis;
6329         int result;
6330         MapReduceMappingsToIntTask<K,V> rights, nextRight;
6331         MapReduceMappingsToIntTask
6332             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6333              MapReduceMappingsToIntTask<K,V> nextRight,
6334              ToIntBiFunction<? super K, ? super V> transformer,
6335              int basis,
6336              IntBinaryOperator reducer) {
6337             super(p, b, i, f, t); this.nextRight = nextRight;
6338             this.transformer = transformer;
6339             this.basis = basis; this.reducer = reducer;
6340         }
6341         public final Integer getRawResult() { return result; }
6342         public final void compute() {
6343             final ToIntBiFunction<? super K, ? super V> transformer;
6344             final IntBinaryOperator reducer;
6345             if ((transformer = this.transformer) != null &&
6346                 (reducer = this.reducer) != null) {
6347                 int r = this.basis;
6348                 for (int i = baseIndex, f, h; batch > 0 &&
6349                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6350                     addToPendingCount(1);
6351                     (rights = new MapReduceMappingsToIntTask<K,V>
6352                      (this, batch >>>= 1, baseLimit = h, f, tab,
6353                       rights, transformer, r, reducer)).fork();
6354                 }
6355                 for (Node<K,V> p; (p = advance()) != null; )
6356                     r = reducer.applyAsInt(r, transformer.applyAsInt(p.key, p.val));
6357                 result = r;
6358                 CountedCompleter<?> c;
6359                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6360                     @SuppressWarnings("unchecked")
6361                     MapReduceMappingsToIntTask<K,V>
6362                         t = (MapReduceMappingsToIntTask<K,V>)c,
6363                         s = t.rights;
6364                     while (s != null) {
6365                         t.result = reducer.applyAsInt(t.result, s.result);
6366                         s = t.rights = s.nextRight;
6367                     }
6368                 }
6369             }
6370         }
6371     }
6372 
6373     // Unsafe mechanics
6374     private static final Unsafe U = Unsafe.getUnsafe();
6375     private static final long SIZECTL;
6376     private static final long TRANSFERINDEX;
6377     private static final long BASECOUNT;
6378     private static final long CELLSBUSY;
6379     private static final long CELLVALUE;
6380     private static final int ABASE;
6381     private static final int ASHIFT;
6382 
6383     static {
6384         try {
6385             SIZECTL = U.objectFieldOffset
6386                 (ConcurrentHashMap.class.getDeclaredField("sizeCtl"));
6387             TRANSFERINDEX = U.objectFieldOffset
6388                 (ConcurrentHashMap.class.getDeclaredField("transferIndex"));
6389             BASECOUNT = U.objectFieldOffset
6390                 (ConcurrentHashMap.class.getDeclaredField("baseCount"));
6391             CELLSBUSY = U.objectFieldOffset
6392                 (ConcurrentHashMap.class.getDeclaredField("cellsBusy"));
6393 
6394             CELLVALUE = U.objectFieldOffset
6395                 (CounterCell.class.getDeclaredField("value"));
6396 
6397             ABASE = U.arrayBaseOffset(Node[].class);
6398             int scale = U.arrayIndexScale(Node[].class);
6399             if ((scale & (scale - 1)) != 0)
6400                 throw new Error("array index scale not a power of two");
6401             ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
6402         } catch (ReflectiveOperationException e) {
6403             throw new Error(e);
6404         }
6405 
6406         // Reduce the risk of rare disastrous classloading in first call to
6407         // LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773
6408         Class<?> ensureLoaded = LockSupport.class;
6409     }
6410 }