1 /*
   2  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.  Oracle designates this
   7  * particular file as subject to the "Classpath" exception as provided
   8  * by Oracle in the LICENSE file that accompanied this code.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  */
  24 
  25 /*
  26  * This file is available under and governed by the GNU General Public
  27  * License version 2 only, as published by the Free Software Foundation.
  28  * However, the following notice accompanied the original version of this
  29  * file:
  30  *
  31  * Written by Doug Lea with assistance from members of JCP JSR-166
  32  * Expert Group and released to the public domain, as explained at
  33  * http://creativecommons.org/publicdomain/zero/1.0/
  34  */
  35 
  36 package java.util.concurrent;
  37 
  38 import java.io.ObjectStreamField;
  39 import java.io.Serializable;
  40 import java.lang.reflect.ParameterizedType;
  41 import java.lang.reflect.Type;
  42 import java.util.AbstractMap;
  43 import java.util.Arrays;
  44 import java.util.Collection;
  45 import java.util.Enumeration;
  46 import java.util.HashMap;
  47 import java.util.Hashtable;
  48 import java.util.Iterator;
  49 import java.util.Map;
  50 import java.util.NoSuchElementException;
  51 import java.util.Set;
  52 import java.util.Spliterator;
  53 import java.util.concurrent.atomic.AtomicReference;
  54 import java.util.concurrent.locks.LockSupport;
  55 import java.util.concurrent.locks.ReentrantLock;
  56 import java.util.function.BiConsumer;
  57 import java.util.function.BiFunction;
  58 import java.util.function.Consumer;
  59 import java.util.function.DoubleBinaryOperator;
  60 import java.util.function.Function;
  61 import java.util.function.IntBinaryOperator;
  62 import java.util.function.LongBinaryOperator;
  63 import java.util.function.Predicate;
  64 import java.util.function.ToDoubleBiFunction;
  65 import java.util.function.ToDoubleFunction;
  66 import java.util.function.ToIntBiFunction;
  67 import java.util.function.ToIntFunction;
  68 import java.util.function.ToLongBiFunction;
  69 import java.util.function.ToLongFunction;
  70 import java.util.stream.Stream;
  71 import jdk.internal.misc.Unsafe;
  72 
  73 /**
  74  * A hash table supporting full concurrency of retrievals and
  75  * high expected concurrency for updates. This class obeys the
  76  * same functional specification as {@link java.util.Hashtable}, and
  77  * includes versions of methods corresponding to each method of
  78  * {@code Hashtable}. However, even though all operations are
  79  * thread-safe, retrieval operations do <em>not</em> entail locking,
  80  * and there is <em>not</em> any support for locking the entire table
  81  * in a way that prevents all access.  This class is fully
  82  * interoperable with {@code Hashtable} in programs that rely on its
  83  * thread safety but not on its synchronization details.
  84  *
  85  * <p>Retrieval operations (including {@code get}) generally do not
  86  * block, so may overlap with update operations (including {@code put}
  87  * and {@code remove}). Retrievals reflect the results of the most
  88  * recently <em>completed</em> update operations holding upon their
  89  * onset. (More formally, an update operation for a given key bears a
  90  * <em>happens-before</em> relation with any (non-null) retrieval for
  91  * that key reporting the updated value.)  For aggregate operations
  92  * such as {@code putAll} and {@code clear}, concurrent retrievals may
  93  * reflect insertion or removal of only some entries.  Similarly,
  94  * Iterators, Spliterators and Enumerations return elements reflecting the
  95  * state of the hash table at some point at or since the creation of the
  96  * iterator/enumeration.  They do <em>not</em> throw {@link
  97  * java.util.ConcurrentModificationException ConcurrentModificationException}.
  98  * However, iterators are designed to be used by only one thread at a time.
  99  * Bear in mind that the results of aggregate status methods including
 100  * {@code size}, {@code isEmpty}, and {@code containsValue} are typically
 101  * useful only when a map is not undergoing concurrent updates in other threads.
 102  * Otherwise the results of these methods reflect transient states
 103  * that may be adequate for monitoring or estimation purposes, but not
 104  * for program control.
 105  *
 106  * <p>The table is dynamically expanded when there are too many
 107  * collisions (i.e., keys that have distinct hash codes but fall into
 108  * the same slot modulo the table size), with the expected average
 109  * effect of maintaining roughly two bins per mapping (corresponding
 110  * to a 0.75 load factor threshold for resizing). There may be much
 111  * variance around this average as mappings are added and removed, but
 112  * overall, this maintains a commonly accepted time/space tradeoff for
 113  * hash tables.  However, resizing this or any other kind of hash
 114  * table may be a relatively slow operation. When possible, it is a
 115  * good idea to provide a size estimate as an optional {@code
 116  * initialCapacity} constructor argument. An additional optional
 117  * {@code loadFactor} constructor argument provides a further means of
 118  * customizing initial table capacity by specifying the table density
 119  * to be used in calculating the amount of space to allocate for the
 120  * given number of elements.  Also, for compatibility with previous
 121  * versions of this class, constructors may optionally specify an
 122  * expected {@code concurrencyLevel} as an additional hint for
 123  * internal sizing.  Note that using many keys with exactly the same
 124  * {@code hashCode()} is a sure way to slow down performance of any
 125  * hash table. To ameliorate impact, when keys are {@link Comparable},
 126  * this class may use comparison order among keys to help break ties.
 127  *
 128  * <p>A {@link Set} projection of a ConcurrentHashMap may be created
 129  * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
 130  * (using {@link #keySet(Object)} when only keys are of interest, and the
 131  * mapped values are (perhaps transiently) not used or all take the
 132  * same mapping value.
 133  *
 134  * <p>A ConcurrentHashMap can be used as a scalable frequency map (a
 135  * form of histogram or multiset) by using {@link
 136  * java.util.concurrent.atomic.LongAdder} values and initializing via
 137  * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count
 138  * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use
 139  * {@code freqs.computeIfAbsent(key, k -> new LongAdder()).increment();}
 140  *
 141  * <p>This class and its views and iterators implement all of the
 142  * <em>optional</em> methods of the {@link Map} and {@link Iterator}
 143  * interfaces.
 144  *
 145  * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
 146  * does <em>not</em> allow {@code null} to be used as a key or value.
 147  *
 148  * <p>ConcurrentHashMaps support a set of sequential and parallel bulk
 149  * operations that, unlike most {@link Stream} methods, are designed
 150  * to be safely, and often sensibly, applied even with maps that are
 151  * being concurrently updated by other threads; for example, when
 152  * computing a snapshot summary of the values in a shared registry.
 153  * There are three kinds of operation, each with four forms, accepting
 154  * functions with keys, values, entries, and (key, value) pairs as
 155  * arguments and/or return values. Because the elements of a
 156  * ConcurrentHashMap are not ordered in any particular way, and may be
 157  * processed in different orders in different parallel executions, the
 158  * correctness of supplied functions should not depend on any
 159  * ordering, or on any other objects or values that may transiently
 160  * change while computation is in progress; and except for forEach
 161  * actions, should ideally be side-effect-free. Bulk operations on
 162  * {@link java.util.Map.Entry} objects do not support method {@code
 163  * setValue}.
 164  *
 165  * <ul>
 166  * <li>forEach: Performs a given action on each element.
 167  * A variant form applies a given transformation on each element
 168  * before performing the action.
 169  *
 170  * <li>search: Returns the first available non-null result of
 171  * applying a given function on each element; skipping further
 172  * search when a result is found.
 173  *
 174  * <li>reduce: Accumulates each element.  The supplied reduction
 175  * function cannot rely on ordering (more formally, it should be
 176  * both associative and commutative).  There are five variants:
 177  *
 178  * <ul>
 179  *
 180  * <li>Plain reductions. (There is not a form of this method for
 181  * (key, value) function arguments since there is no corresponding
 182  * return type.)
 183  *
 184  * <li>Mapped reductions that accumulate the results of a given
 185  * function applied to each element.
 186  *
 187  * <li>Reductions to scalar doubles, longs, and ints, using a
 188  * given basis value.
 189  *
 190  * </ul>
 191  * </ul>
 192  *
 193  * <p>These bulk operations accept a {@code parallelismThreshold}
 194  * argument. Methods proceed sequentially if the current map size is
 195  * estimated to be less than the given threshold. Using a value of
 196  * {@code Long.MAX_VALUE} suppresses all parallelism.  Using a value
 197  * of {@code 1} results in maximal parallelism by partitioning into
 198  * enough subtasks to fully utilize the {@link
 199  * ForkJoinPool#commonPool()} that is used for all parallel
 200  * computations. Normally, you would initially choose one of these
 201  * extreme values, and then measure performance of using in-between
 202  * values that trade off overhead versus throughput.
 203  *
 204  * <p>The concurrency properties of bulk operations follow
 205  * from those of ConcurrentHashMap: Any non-null result returned
 206  * from {@code get(key)} and related access methods bears a
 207  * happens-before relation with the associated insertion or
 208  * update.  The result of any bulk operation reflects the
 209  * composition of these per-element relations (but is not
 210  * necessarily atomic with respect to the map as a whole unless it
 211  * is somehow known to be quiescent).  Conversely, because keys
 212  * and values in the map are never null, null serves as a reliable
 213  * atomic indicator of the current lack of any result.  To
 214  * maintain this property, null serves as an implicit basis for
 215  * all non-scalar reduction operations. For the double, long, and
 216  * int versions, the basis should be one that, when combined with
 217  * any other value, returns that other value (more formally, it
 218  * should be the identity element for the reduction). Most common
 219  * reductions have these properties; for example, computing a sum
 220  * with basis 0 or a minimum with basis MAX_VALUE.
 221  *
 222  * <p>Search and transformation functions provided as arguments
 223  * should similarly return null to indicate the lack of any result
 224  * (in which case it is not used). In the case of mapped
 225  * reductions, this also enables transformations to serve as
 226  * filters, returning null (or, in the case of primitive
 227  * specializations, the identity basis) if the element should not
 228  * be combined. You can create compound transformations and
 229  * filterings by composing them yourself under this "null means
 230  * there is nothing there now" rule before using them in search or
 231  * reduce operations.
 232  *
 233  * <p>Methods accepting and/or returning Entry arguments maintain
 234  * key-value associations. They may be useful for example when
 235  * finding the key for the greatest value. Note that "plain" Entry
 236  * arguments can be supplied using {@code new
 237  * AbstractMap.SimpleEntry(k,v)}.
 238  *
 239  * <p>Bulk operations may complete abruptly, throwing an
 240  * exception encountered in the application of a supplied
 241  * function. Bear in mind when handling such exceptions that other
 242  * concurrently executing functions could also have thrown
 243  * exceptions, or would have done so if the first exception had
 244  * not occurred.
 245  *
 246  * <p>Speedups for parallel compared to sequential forms are common
 247  * but not guaranteed.  Parallel operations involving brief functions
 248  * on small maps may execute more slowly than sequential forms if the
 249  * underlying work to parallelize the computation is more expensive
 250  * than the computation itself.  Similarly, parallelization may not
 251  * lead to much actual parallelism if all processors are busy
 252  * performing unrelated tasks.
 253  *
 254  * <p>All arguments to all task methods must be non-null.
 255  *
 256  * <p>This class is a member of the
 257  * <a href="{@docRoot}/../technotes/guides/collections/index.html">
 258  * Java Collections Framework</a>.
 259  *
 260  * @since 1.5
 261  * @author Doug Lea
 262  * @param <K> the type of keys maintained by this map
 263  * @param <V> the type of mapped values
 264  */
 265 public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
 266     implements ConcurrentMap<K,V>, Serializable {
 267     private static final long serialVersionUID = 7249069246763182397L;
 268 
 269     /*
 270      * Overview:
 271      *
 272      * The primary design goal of this hash table is to maintain
 273      * concurrent readability (typically method get(), but also
 274      * iterators and related methods) while minimizing update
 275      * contention. Secondary goals are to keep space consumption about
 276      * the same or better than java.util.HashMap, and to support high
 277      * initial insertion rates on an empty table by many threads.
 278      *
 279      * This map usually acts as a binned (bucketed) hash table.  Each
 280      * key-value mapping is held in a Node.  Most nodes are instances
 281      * of the basic Node class with hash, key, value, and next
 282      * fields. However, various subclasses exist: TreeNodes are
 283      * arranged in balanced trees, not lists.  TreeBins hold the roots
 284      * of sets of TreeNodes. ForwardingNodes are placed at the heads
 285      * of bins during resizing. ReservationNodes are used as
 286      * placeholders while establishing values in computeIfAbsent and
 287      * related methods.  The types TreeBin, ForwardingNode, and
 288      * ReservationNode do not hold normal user keys, values, or
 289      * hashes, and are readily distinguishable during search etc
 290      * because they have negative hash fields and null key and value
 291      * fields. (These special nodes are either uncommon or transient,
 292      * so the impact of carrying around some unused fields is
 293      * insignificant.)
 294      *
 295      * The table is lazily initialized to a power-of-two size upon the
 296      * first insertion.  Each bin in the table normally contains a
 297      * list of Nodes (most often, the list has only zero or one Node).
 298      * Table accesses require volatile/atomic reads, writes, and
 299      * CASes.  Because there is no other way to arrange this without
 300      * adding further indirections, we use intrinsics
 301      * (jdk.internal.misc.Unsafe) operations.
 302      *
 303      * We use the top (sign) bit of Node hash fields for control
 304      * purposes -- it is available anyway because of addressing
 305      * constraints.  Nodes with negative hash fields are specially
 306      * handled or ignored in map methods.
 307      *
 308      * Insertion (via put or its variants) of the first node in an
 309      * empty bin is performed by just CASing it to the bin.  This is
 310      * by far the most common case for put operations under most
 311      * key/hash distributions.  Other update operations (insert,
 312      * delete, and replace) require locks.  We do not want to waste
 313      * the space required to associate a distinct lock object with
 314      * each bin, so instead use the first node of a bin list itself as
 315      * a lock. Locking support for these locks relies on builtin
 316      * "synchronized" monitors.
 317      *
 318      * Using the first node of a list as a lock does not by itself
 319      * suffice though: When a node is locked, any update must first
 320      * validate that it is still the first node after locking it, and
 321      * retry if not. Because new nodes are always appended to lists,
 322      * once a node is first in a bin, it remains first until deleted
 323      * or the bin becomes invalidated (upon resizing).
 324      *
 325      * The main disadvantage of per-bin locks is that other update
 326      * operations on other nodes in a bin list protected by the same
 327      * lock can stall, for example when user equals() or mapping
 328      * functions take a long time.  However, statistically, under
 329      * random hash codes, this is not a common problem.  Ideally, the
 330      * frequency of nodes in bins follows a Poisson distribution
 331      * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
 332      * parameter of about 0.5 on average, given the resizing threshold
 333      * of 0.75, although with a large variance because of resizing
 334      * granularity. Ignoring variance, the expected occurrences of
 335      * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
 336      * first values are:
 337      *
 338      * 0:    0.60653066
 339      * 1:    0.30326533
 340      * 2:    0.07581633
 341      * 3:    0.01263606
 342      * 4:    0.00157952
 343      * 5:    0.00015795
 344      * 6:    0.00001316
 345      * 7:    0.00000094
 346      * 8:    0.00000006
 347      * more: less than 1 in ten million
 348      *
 349      * Lock contention probability for two threads accessing distinct
 350      * elements is roughly 1 / (8 * #elements) under random hashes.
 351      *
 352      * Actual hash code distributions encountered in practice
 353      * sometimes deviate significantly from uniform randomness.  This
 354      * includes the case when N > (1<<30), so some keys MUST collide.
 355      * Similarly for dumb or hostile usages in which multiple keys are
 356      * designed to have identical hash codes or ones that differs only
 357      * in masked-out high bits. So we use a secondary strategy that
 358      * applies when the number of nodes in a bin exceeds a
 359      * threshold. These TreeBins use a balanced tree to hold nodes (a
 360      * specialized form of red-black trees), bounding search time to
 361      * O(log N).  Each search step in a TreeBin is at least twice as
 362      * slow as in a regular list, but given that N cannot exceed
 363      * (1<<64) (before running out of addresses) this bounds search
 364      * steps, lock hold times, etc, to reasonable constants (roughly
 365      * 100 nodes inspected per operation worst case) so long as keys
 366      * are Comparable (which is very common -- String, Long, etc).
 367      * TreeBin nodes (TreeNodes) also maintain the same "next"
 368      * traversal pointers as regular nodes, so can be traversed in
 369      * iterators in the same way.
 370      *
 371      * The table is resized when occupancy exceeds a percentage
 372      * threshold (nominally, 0.75, but see below).  Any thread
 373      * noticing an overfull bin may assist in resizing after the
 374      * initiating thread allocates and sets up the replacement array.
 375      * However, rather than stalling, these other threads may proceed
 376      * with insertions etc.  The use of TreeBins shields us from the
 377      * worst case effects of overfilling while resizes are in
 378      * progress.  Resizing proceeds by transferring bins, one by one,
 379      * from the table to the next table. However, threads claim small
 380      * blocks of indices to transfer (via field transferIndex) before
 381      * doing so, reducing contention.  A generation stamp in field
 382      * sizeCtl ensures that resizings do not overlap. Because we are
 383      * using power-of-two expansion, the elements from each bin must
 384      * either stay at same index, or move with a power of two
 385      * offset. We eliminate unnecessary node creation by catching
 386      * cases where old nodes can be reused because their next fields
 387      * won't change.  On average, only about one-sixth of them need
 388      * cloning when a table doubles. The nodes they replace will be
 389      * garbage collectable as soon as they are no longer referenced by
 390      * any reader thread that may be in the midst of concurrently
 391      * traversing table.  Upon transfer, the old table bin contains
 392      * only a special forwarding node (with hash field "MOVED") that
 393      * contains the next table as its key. On encountering a
 394      * forwarding node, access and update operations restart, using
 395      * the new table.
 396      *
 397      * Each bin transfer requires its bin lock, which can stall
 398      * waiting for locks while resizing. However, because other
 399      * threads can join in and help resize rather than contend for
 400      * locks, average aggregate waits become shorter as resizing
 401      * progresses.  The transfer operation must also ensure that all
 402      * accessible bins in both the old and new table are usable by any
 403      * traversal.  This is arranged in part by proceeding from the
 404      * last bin (table.length - 1) up towards the first.  Upon seeing
 405      * a forwarding node, traversals (see class Traverser) arrange to
 406      * move to the new table without revisiting nodes.  To ensure that
 407      * no intervening nodes are skipped even when moved out of order,
 408      * a stack (see class TableStack) is created on first encounter of
 409      * a forwarding node during a traversal, to maintain its place if
 410      * later processing the current table. The need for these
 411      * save/restore mechanics is relatively rare, but when one
 412      * forwarding node is encountered, typically many more will be.
 413      * So Traversers use a simple caching scheme to avoid creating so
 414      * many new TableStack nodes. (Thanks to Peter Levart for
 415      * suggesting use of a stack here.)
 416      *
 417      * The traversal scheme also applies to partial traversals of
 418      * ranges of bins (via an alternate Traverser constructor)
 419      * to support partitioned aggregate operations.  Also, read-only
 420      * operations give up if ever forwarded to a null table, which
 421      * provides support for shutdown-style clearing, which is also not
 422      * currently implemented.
 423      *
 424      * Lazy table initialization minimizes footprint until first use,
 425      * and also avoids resizings when the first operation is from a
 426      * putAll, constructor with map argument, or deserialization.
 427      * These cases attempt to override the initial capacity settings,
 428      * but harmlessly fail to take effect in cases of races.
 429      *
 430      * The element count is maintained using a specialization of
 431      * LongAdder. We need to incorporate a specialization rather than
 432      * just use a LongAdder in order to access implicit
 433      * contention-sensing that leads to creation of multiple
 434      * CounterCells.  The counter mechanics avoid contention on
 435      * updates but can encounter cache thrashing if read too
 436      * frequently during concurrent access. To avoid reading so often,
 437      * resizing under contention is attempted only upon adding to a
 438      * bin already holding two or more nodes. Under uniform hash
 439      * distributions, the probability of this occurring at threshold
 440      * is around 13%, meaning that only about 1 in 8 puts check
 441      * threshold (and after resizing, many fewer do so).
 442      *
 443      * TreeBins use a special form of comparison for search and
 444      * related operations (which is the main reason we cannot use
 445      * existing collections such as TreeMaps). TreeBins contain
 446      * Comparable elements, but may contain others, as well as
 447      * elements that are Comparable but not necessarily Comparable for
 448      * the same T, so we cannot invoke compareTo among them. To handle
 449      * this, the tree is ordered primarily by hash value, then by
 450      * Comparable.compareTo order if applicable.  On lookup at a node,
 451      * if elements are not comparable or compare as 0 then both left
 452      * and right children may need to be searched in the case of tied
 453      * hash values. (This corresponds to the full list search that
 454      * would be necessary if all elements were non-Comparable and had
 455      * tied hashes.) On insertion, to keep a total ordering (or as
 456      * close as is required here) across rebalancings, we compare
 457      * classes and identityHashCodes as tie-breakers. The red-black
 458      * balancing code is updated from pre-jdk-collections
 459      * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
 460      * based in turn on Cormen, Leiserson, and Rivest "Introduction to
 461      * Algorithms" (CLR).
 462      *
 463      * TreeBins also require an additional locking mechanism.  While
 464      * list traversal is always possible by readers even during
 465      * updates, tree traversal is not, mainly because of tree-rotations
 466      * that may change the root node and/or its linkages.  TreeBins
 467      * include a simple read-write lock mechanism parasitic on the
 468      * main bin-synchronization strategy: Structural adjustments
 469      * associated with an insertion or removal are already bin-locked
 470      * (and so cannot conflict with other writers) but must wait for
 471      * ongoing readers to finish. Since there can be only one such
 472      * waiter, we use a simple scheme using a single "waiter" field to
 473      * block writers.  However, readers need never block.  If the root
 474      * lock is held, they proceed along the slow traversal path (via
 475      * next-pointers) until the lock becomes available or the list is
 476      * exhausted, whichever comes first. These cases are not fast, but
 477      * maximize aggregate expected throughput.
 478      *
 479      * Maintaining API and serialization compatibility with previous
 480      * versions of this class introduces several oddities. Mainly: We
 481      * leave untouched but unused constructor arguments referring to
 482      * concurrencyLevel. We accept a loadFactor constructor argument,
 483      * but apply it only to initial table capacity (which is the only
 484      * time that we can guarantee to honor it.) We also declare an
 485      * unused "Segment" class that is instantiated in minimal form
 486      * only when serializing.
 487      *
 488      * Also, solely for compatibility with previous versions of this
 489      * class, it extends AbstractMap, even though all of its methods
 490      * are overridden, so it is just useless baggage.
 491      *
 492      * This file is organized to make things a little easier to follow
 493      * while reading than they might otherwise: First the main static
 494      * declarations and utilities, then fields, then main public
 495      * methods (with a few factorings of multiple public methods into
 496      * internal ones), then sizing methods, trees, traversers, and
 497      * bulk operations.
 498      */
 499 
 500     /* ---------------- Constants -------------- */
 501 
 502     /**
 503      * The largest possible table capacity.  This value must be
 504      * exactly 1<<30 to stay within Java array allocation and indexing
 505      * bounds for power of two table sizes, and is further required
 506      * because the top two bits of 32bit hash fields are used for
 507      * control purposes.
 508      */
 509     private static final int MAXIMUM_CAPACITY = 1 << 30;
 510 
 511     /**
 512      * The default initial table capacity.  Must be a power of 2
 513      * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
 514      */
 515     private static final int DEFAULT_CAPACITY = 16;
 516 
 517     /**
 518      * The largest possible (non-power of two) array size.
 519      * Needed by toArray and related methods.
 520      */
 521     static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
 522 
 523     /**
 524      * The default concurrency level for this table. Unused but
 525      * defined for compatibility with previous versions of this class.
 526      */
 527     private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
 528 
 529     /**
 530      * The load factor for this table. Overrides of this value in
 531      * constructors affect only the initial table capacity.  The
 532      * actual floating point value isn't normally used -- it is
 533      * simpler to use expressions such as {@code n - (n >>> 2)} for
 534      * the associated resizing threshold.
 535      */
 536     private static final float LOAD_FACTOR = 0.75f;
 537 
 538     /**
 539      * The bin count threshold for using a tree rather than list for a
 540      * bin.  Bins are converted to trees when adding an element to a
 541      * bin with at least this many nodes. The value must be greater
 542      * than 2, and should be at least 8 to mesh with assumptions in
 543      * tree removal about conversion back to plain bins upon
 544      * shrinkage.
 545      */
 546     static final int TREEIFY_THRESHOLD = 8;
 547 
 548     /**
 549      * The bin count threshold for untreeifying a (split) bin during a
 550      * resize operation. Should be less than TREEIFY_THRESHOLD, and at
 551      * most 6 to mesh with shrinkage detection under removal.
 552      */
 553     static final int UNTREEIFY_THRESHOLD = 6;
 554 
 555     /**
 556      * The smallest table capacity for which bins may be treeified.
 557      * (Otherwise the table is resized if too many nodes in a bin.)
 558      * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
 559      * conflicts between resizing and treeification thresholds.
 560      */
 561     static final int MIN_TREEIFY_CAPACITY = 64;
 562 
 563     /**
 564      * Minimum number of rebinnings per transfer step. Ranges are
 565      * subdivided to allow multiple resizer threads.  This value
 566      * serves as a lower bound to avoid resizers encountering
 567      * excessive memory contention.  The value should be at least
 568      * DEFAULT_CAPACITY.
 569      */
 570     private static final int MIN_TRANSFER_STRIDE = 16;
 571 
 572     /**
 573      * The number of bits used for generation stamp in sizeCtl.
 574      * Must be at least 6 for 32bit arrays.
 575      */
 576     private static final int RESIZE_STAMP_BITS = 16;
 577 
 578     /**
 579      * The maximum number of threads that can help resize.
 580      * Must fit in 32 - RESIZE_STAMP_BITS bits.
 581      */
 582     private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
 583 
 584     /**
 585      * The bit shift for recording size stamp in sizeCtl.
 586      */
 587     private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
 588 
 589     /*
 590      * Encodings for Node hash fields. See above for explanation.
 591      */
 592     static final int MOVED     = -1; // hash for forwarding nodes
 593     static final int TREEBIN   = -2; // hash for roots of trees
 594     static final int RESERVED  = -3; // hash for transient reservations
 595     static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
 596 
 597     /** Number of CPUS, to place bounds on some sizings */
 598     static final int NCPU = Runtime.getRuntime().availableProcessors();
 599 
 600     /**
 601      * Serialized pseudo-fields, provided only for jdk7 compatibility.
 602      * @serialField segments Segment[]
 603      *   The segments, each of which is a specialized hash table.
 604      * @serialField segmentMask int
 605      *   Mask value for indexing into segments. The upper bits of a
 606      *   key's hash code are used to choose the segment.
 607      * @serialField segmentShift int
 608      *   Shift value for indexing within segments.
 609      */
 610     private static final ObjectStreamField[] serialPersistentFields = {
 611         new ObjectStreamField("segments", Segment[].class),
 612         new ObjectStreamField("segmentMask", Integer.TYPE),
 613         new ObjectStreamField("segmentShift", Integer.TYPE),
 614     };
 615 
 616     /* ---------------- Nodes -------------- */
 617 
 618     /**
 619      * Key-value entry.  This class is never exported out as a
 620      * user-mutable Map.Entry (i.e., one supporting setValue; see
 621      * MapEntry below), but can be used for read-only traversals used
 622      * in bulk tasks.  Subclasses of Node with a negative hash field
 623      * are special, and contain null keys and values (but are never
 624      * exported).  Otherwise, keys and vals are never null.
 625      */
 626     static class Node<K,V> implements Map.Entry<K,V> {
 627         final int hash;
 628         final K key;
 629         volatile V val;
 630         volatile Node<K,V> next;
 631 
 632         Node(int hash, K key, V val) {
 633             this.hash = hash;
 634             this.key = key;
 635             this.val = val;
 636         }
 637 
 638         Node(int hash, K key, V val, Node<K,V> next) {
 639             this(hash, key, val);
 640             this.next = next;
 641         }
 642 
 643         public final K getKey()     { return key; }
 644         public final V getValue()   { return val; }
 645         public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
 646         public final String toString() {
 647             return Helpers.mapEntryToString(key, val);
 648         }
 649         public final V setValue(V value) {
 650             throw new UnsupportedOperationException();
 651         }
 652 
 653         public final boolean equals(Object o) {
 654             Object k, v, u; Map.Entry<?,?> e;
 655             return ((o instanceof Map.Entry) &&
 656                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
 657                     (v = e.getValue()) != null &&
 658                     (k == key || k.equals(key)) &&
 659                     (v == (u = val) || v.equals(u)));
 660         }
 661 
 662         /**
 663          * Virtualized support for map.get(); overridden in subclasses.
 664          */
 665         Node<K,V> find(int h, Object k) {
 666             Node<K,V> e = this;
 667             if (k != null) {
 668                 do {
 669                     K ek;
 670                     if (e.hash == h &&
 671                         ((ek = e.key) == k || (ek != null && k.equals(ek))))
 672                         return e;
 673                 } while ((e = e.next) != null);
 674             }
 675             return null;
 676         }
 677     }
 678 
 679     /* ---------------- Static utilities -------------- */
 680 
 681     /**
 682      * Spreads (XORs) higher bits of hash to lower and also forces top
 683      * bit to 0. Because the table uses power-of-two masking, sets of
 684      * hashes that vary only in bits above the current mask will
 685      * always collide. (Among known examples are sets of Float keys
 686      * holding consecutive whole numbers in small tables.)  So we
 687      * apply a transform that spreads the impact of higher bits
 688      * downward. There is a tradeoff between speed, utility, and
 689      * quality of bit-spreading. Because many common sets of hashes
 690      * are already reasonably distributed (so don't benefit from
 691      * spreading), and because we use trees to handle large sets of
 692      * collisions in bins, we just XOR some shifted bits in the
 693      * cheapest possible way to reduce systematic lossage, as well as
 694      * to incorporate impact of the highest bits that would otherwise
 695      * never be used in index calculations because of table bounds.
 696      */
 697     static final int spread(int h) {
 698         return (h ^ (h >>> 16)) & HASH_BITS;
 699     }
 700 
 701     /**
 702      * Returns a power of two table size for the given desired capacity.
 703      * See Hackers Delight, sec 3.2
 704      */
 705     private static final int tableSizeFor(int c) {
 706         int n = c - 1;
 707         n |= n >>> 1;
 708         n |= n >>> 2;
 709         n |= n >>> 4;
 710         n |= n >>> 8;
 711         n |= n >>> 16;
 712         return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
 713     }
 714 
 715     /**
 716      * Returns x's Class if it is of the form "class C implements
 717      * Comparable<C>", else null.
 718      */
 719     static Class<?> comparableClassFor(Object x) {
 720         if (x instanceof Comparable) {
 721             Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
 722             if ((c = x.getClass()) == String.class) // bypass checks
 723                 return c;
 724             if ((ts = c.getGenericInterfaces()) != null) {
 725                 for (int i = 0; i < ts.length; ++i) {
 726                     if (((t = ts[i]) instanceof ParameterizedType) &&
 727                         ((p = (ParameterizedType)t).getRawType() ==
 728                          Comparable.class) &&
 729                         (as = p.getActualTypeArguments()) != null &&
 730                         as.length == 1 && as[0] == c) // type arg is c
 731                         return c;
 732                 }
 733             }
 734         }
 735         return null;
 736     }
 737 
 738     /**
 739      * Returns k.compareTo(x) if x matches kc (k's screened comparable
 740      * class), else 0.
 741      */
 742     @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
 743     static int compareComparables(Class<?> kc, Object k, Object x) {
 744         return (x == null || x.getClass() != kc ? 0 :
 745                 ((Comparable)k).compareTo(x));
 746     }
 747 
 748     /* ---------------- Table element access -------------- */
 749 
 750     /*
 751      * Atomic access methods are used for table elements as well as
 752      * elements of in-progress next table while resizing.  All uses of
 753      * the tab arguments must be null checked by callers.  All callers
 754      * also paranoically precheck that tab's length is not zero (or an
 755      * equivalent check), thus ensuring that any index argument taking
 756      * the form of a hash value anded with (length - 1) is a valid
 757      * index.  Note that, to be correct wrt arbitrary concurrency
 758      * errors by users, these checks must operate on local variables,
 759      * which accounts for some odd-looking inline assignments below.
 760      * Note that calls to setTabAt always occur within locked regions,
 761      * and so require only release ordering.
 762      */
 763 
 764     @SuppressWarnings("unchecked")
 765     static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
 766         return (Node<K,V>)U.getObjectAcquire(tab, ((long)i << ASHIFT) + ABASE);
 767     }
 768 
 769     static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
 770                                         Node<K,V> c, Node<K,V> v) {
 771         return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
 772     }
 773 
 774     static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
 775         U.putObjectRelease(tab, ((long)i << ASHIFT) + ABASE, v);
 776     }
 777 
 778     /* ---------------- Fields -------------- */
 779 
 780     /**
 781      * The array of bins. Lazily initialized upon first insertion.
 782      * Size is always a power of two. Accessed directly by iterators.
 783      */
 784     transient volatile Node<K,V>[] table;
 785 
 786     /**
 787      * The next table to use; non-null only while resizing.
 788      */
 789     private transient volatile Node<K,V>[] nextTable;
 790 
 791     /**
 792      * Base counter value, used mainly when there is no contention,
 793      * but also as a fallback during table initialization
 794      * races. Updated via CAS.
 795      */
 796     private transient volatile long baseCount;
 797 
 798     /**
 799      * Table initialization and resizing control.  When negative, the
 800      * table is being initialized or resized: -1 for initialization,
 801      * else -(1 + the number of active resizing threads).  Otherwise,
 802      * when table is null, holds the initial table size to use upon
 803      * creation, or 0 for default. After initialization, holds the
 804      * next element count value upon which to resize the table.
 805      */
 806     private transient volatile int sizeCtl;
 807 
 808     /**
 809      * The next table index (plus one) to split while resizing.
 810      */
 811     private transient volatile int transferIndex;
 812 
 813     /**
 814      * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
 815      */
 816     private transient volatile int cellsBusy;
 817 
 818     /**
 819      * Table of counter cells. When non-null, size is a power of 2.
 820      */
 821     private transient volatile CounterCell[] counterCells;
 822 
 823     // views
 824     private transient KeySetView<K,V> keySet;
 825     private transient ValuesView<K,V> values;
 826     private transient EntrySetView<K,V> entrySet;
 827 
 828 
 829     /* ---------------- Public operations -------------- */
 830 
 831     /**
 832      * Creates a new, empty map with the default initial table size (16).
 833      */
 834     public ConcurrentHashMap() {
 835     }
 836 
 837     /**
 838      * Creates a new, empty map with an initial table size
 839      * accommodating the specified number of elements without the need
 840      * to dynamically resize.
 841      *
 842      * @param initialCapacity The implementation performs internal
 843      * sizing to accommodate this many elements.
 844      * @throws IllegalArgumentException if the initial capacity of
 845      * elements is negative
 846      */
 847     public ConcurrentHashMap(int initialCapacity) {
 848         if (initialCapacity < 0)
 849             throw new IllegalArgumentException();
 850         int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
 851                    MAXIMUM_CAPACITY :
 852                    tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
 853         this.sizeCtl = cap;
 854     }
 855 
 856     /**
 857      * Creates a new map with the same mappings as the given map.
 858      *
 859      * @param m the map
 860      */
 861     public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
 862         this.sizeCtl = DEFAULT_CAPACITY;
 863         putAll(m);
 864     }
 865 
 866     /**
 867      * Creates a new, empty map with an initial table size based on
 868      * the given number of elements ({@code initialCapacity}) and
 869      * initial table density ({@code loadFactor}).
 870      *
 871      * @param initialCapacity the initial capacity. The implementation
 872      * performs internal sizing to accommodate this many elements,
 873      * given the specified load factor.
 874      * @param loadFactor the load factor (table density) for
 875      * establishing the initial table size
 876      * @throws IllegalArgumentException if the initial capacity of
 877      * elements is negative or the load factor is nonpositive
 878      *
 879      * @since 1.6
 880      */
 881     public ConcurrentHashMap(int initialCapacity, float loadFactor) {
 882         this(initialCapacity, loadFactor, 1);
 883     }
 884 
 885     /**
 886      * Creates a new, empty map with an initial table size based on
 887      * the given number of elements ({@code initialCapacity}), table
 888      * density ({@code loadFactor}), and number of concurrently
 889      * updating threads ({@code concurrencyLevel}).
 890      *
 891      * @param initialCapacity the initial capacity. The implementation
 892      * performs internal sizing to accommodate this many elements,
 893      * given the specified load factor.
 894      * @param loadFactor the load factor (table density) for
 895      * establishing the initial table size
 896      * @param concurrencyLevel the estimated number of concurrently
 897      * updating threads. The implementation may use this value as
 898      * a sizing hint.
 899      * @throws IllegalArgumentException if the initial capacity is
 900      * negative or the load factor or concurrencyLevel are
 901      * nonpositive
 902      */
 903     public ConcurrentHashMap(int initialCapacity,
 904                              float loadFactor, int concurrencyLevel) {
 905         if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
 906             throw new IllegalArgumentException();
 907         if (initialCapacity < concurrencyLevel)   // Use at least as many bins
 908             initialCapacity = concurrencyLevel;   // as estimated threads
 909         long size = (long)(1.0 + (long)initialCapacity / loadFactor);
 910         int cap = (size >= (long)MAXIMUM_CAPACITY) ?
 911             MAXIMUM_CAPACITY : tableSizeFor((int)size);
 912         this.sizeCtl = cap;
 913     }
 914 
 915     // Original (since JDK1.2) Map methods
 916 
 917     /**
 918      * {@inheritDoc}
 919      */
 920     public int size() {
 921         long n = sumCount();
 922         return ((n < 0L) ? 0 :
 923                 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
 924                 (int)n);
 925     }
 926 
 927     /**
 928      * {@inheritDoc}
 929      */
 930     public boolean isEmpty() {
 931         return sumCount() <= 0L; // ignore transient negative values
 932     }
 933 
 934     /**
 935      * Returns the value to which the specified key is mapped,
 936      * or {@code null} if this map contains no mapping for the key.
 937      *
 938      * <p>More formally, if this map contains a mapping from a key
 939      * {@code k} to a value {@code v} such that {@code key.equals(k)},
 940      * then this method returns {@code v}; otherwise it returns
 941      * {@code null}.  (There can be at most one such mapping.)
 942      *
 943      * @throws NullPointerException if the specified key is null
 944      */
 945     public V get(Object key) {
 946         Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
 947         int h = spread(key.hashCode());
 948         if ((tab = table) != null && (n = tab.length) > 0 &&
 949             (e = tabAt(tab, (n - 1) & h)) != null) {
 950             if ((eh = e.hash) == h) {
 951                 if ((ek = e.key) == key || (ek != null && key.equals(ek)))
 952                     return e.val;
 953             }
 954             else if (eh < 0)
 955                 return (p = e.find(h, key)) != null ? p.val : null;
 956             while ((e = e.next) != null) {
 957                 if (e.hash == h &&
 958                     ((ek = e.key) == key || (ek != null && key.equals(ek))))
 959                     return e.val;
 960             }
 961         }
 962         return null;
 963     }
 964 
 965     /**
 966      * Tests if the specified object is a key in this table.
 967      *
 968      * @param  key possible key
 969      * @return {@code true} if and only if the specified object
 970      *         is a key in this table, as determined by the
 971      *         {@code equals} method; {@code false} otherwise
 972      * @throws NullPointerException if the specified key is null
 973      */
 974     public boolean containsKey(Object key) {
 975         return get(key) != null;
 976     }
 977 
 978     /**
 979      * Returns {@code true} if this map maps one or more keys to the
 980      * specified value. Note: This method may require a full traversal
 981      * of the map, and is much slower than method {@code containsKey}.
 982      *
 983      * @param value value whose presence in this map is to be tested
 984      * @return {@code true} if this map maps one or more keys to the
 985      *         specified value
 986      * @throws NullPointerException if the specified value is null
 987      */
 988     public boolean containsValue(Object value) {
 989         if (value == null)
 990             throw new NullPointerException();
 991         Node<K,V>[] t;
 992         if ((t = table) != null) {
 993             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
 994             for (Node<K,V> p; (p = it.advance()) != null; ) {
 995                 V v;
 996                 if ((v = p.val) == value || (v != null && value.equals(v)))
 997                     return true;
 998             }
 999         }
1000         return false;
1001     }
1002 
1003     /**
1004      * Maps the specified key to the specified value in this table.
1005      * Neither the key nor the value can be null.
1006      *
1007      * <p>The value can be retrieved by calling the {@code get} method
1008      * with a key that is equal to the original key.
1009      *
1010      * @param key key with which the specified value is to be associated
1011      * @param value value to be associated with the specified key
1012      * @return the previous value associated with {@code key}, or
1013      *         {@code null} if there was no mapping for {@code key}
1014      * @throws NullPointerException if the specified key or value is null
1015      */
1016     public V put(K key, V value) {
1017         return putVal(key, value, false);
1018     }
1019 
1020     /** Implementation for put and putIfAbsent */
1021     final V putVal(K key, V value, boolean onlyIfAbsent) {
1022         if (key == null || value == null) throw new NullPointerException();
1023         int hash = spread(key.hashCode());
1024         int binCount = 0;
1025         for (Node<K,V>[] tab = table;;) {
1026             Node<K,V> f; int n, i, fh; K fk; V fv;
1027             if (tab == null || (n = tab.length) == 0)
1028                 tab = initTable();
1029             else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
1030                 if (casTabAt(tab, i, null, new Node<K,V>(hash, key, value)))
1031                     break;                   // no lock when adding to empty bin
1032             }
1033             else if ((fh = f.hash) == MOVED)
1034                 tab = helpTransfer(tab, f);
1035             else if (onlyIfAbsent && fh == hash &&  // check first node
1036                      ((fk = f.key) == key || fk != null && key.equals(fk)) &&
1037                      (fv = f.val) != null)
1038                 return fv;
1039             else {
1040                 V oldVal = null;
1041                 synchronized (f) {
1042                     if (tabAt(tab, i) == f) {
1043                         if (fh >= 0) {
1044                             binCount = 1;
1045                             for (Node<K,V> e = f;; ++binCount) {
1046                                 K ek;
1047                                 if (e.hash == hash &&
1048                                     ((ek = e.key) == key ||
1049                                      (ek != null && key.equals(ek)))) {
1050                                     oldVal = e.val;
1051                                     if (!onlyIfAbsent)
1052                                         e.val = value;
1053                                     break;
1054                                 }
1055                                 Node<K,V> pred = e;
1056                                 if ((e = e.next) == null) {
1057                                     pred.next = new Node<K,V>(hash, key, value);
1058                                     break;
1059                                 }
1060                             }
1061                         }
1062                         else if (f instanceof TreeBin) {
1063                             Node<K,V> p;
1064                             binCount = 2;
1065                             if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
1066                                                            value)) != null) {
1067                                 oldVal = p.val;
1068                                 if (!onlyIfAbsent)
1069                                     p.val = value;
1070                             }
1071                         }
1072                         else if (f instanceof ReservationNode)
1073                             throw new IllegalStateException("Recursive update");
1074                     }
1075                 }
1076                 if (binCount != 0) {
1077                     if (binCount >= TREEIFY_THRESHOLD)
1078                         treeifyBin(tab, i);
1079                     if (oldVal != null)
1080                         return oldVal;
1081                     break;
1082                 }
1083             }
1084         }
1085         addCount(1L, binCount);
1086         return null;
1087     }
1088 
1089     /**
1090      * Copies all of the mappings from the specified map to this one.
1091      * These mappings replace any mappings that this map had for any of the
1092      * keys currently in the specified map.
1093      *
1094      * @param m mappings to be stored in this map
1095      */
1096     public void putAll(Map<? extends K, ? extends V> m) {
1097         tryPresize(m.size());
1098         for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
1099             putVal(e.getKey(), e.getValue(), false);
1100     }
1101 
1102     /**
1103      * Removes the key (and its corresponding value) from this map.
1104      * This method does nothing if the key is not in the map.
1105      *
1106      * @param  key the key that needs to be removed
1107      * @return the previous value associated with {@code key}, or
1108      *         {@code null} if there was no mapping for {@code key}
1109      * @throws NullPointerException if the specified key is null
1110      */
1111     public V remove(Object key) {
1112         return replaceNode(key, null, null);
1113     }
1114 
1115     /**
1116      * Implementation for the four public remove/replace methods:
1117      * Replaces node value with v, conditional upon match of cv if
1118      * non-null.  If resulting value is null, delete.
1119      */
1120     final V replaceNode(Object key, V value, Object cv) {
1121         int hash = spread(key.hashCode());
1122         for (Node<K,V>[] tab = table;;) {
1123             Node<K,V> f; int n, i, fh;
1124             if (tab == null || (n = tab.length) == 0 ||
1125                 (f = tabAt(tab, i = (n - 1) & hash)) == null)
1126                 break;
1127             else if ((fh = f.hash) == MOVED)
1128                 tab = helpTransfer(tab, f);
1129             else {
1130                 V oldVal = null;
1131                 boolean validated = false;
1132                 synchronized (f) {
1133                     if (tabAt(tab, i) == f) {
1134                         if (fh >= 0) {
1135                             validated = true;
1136                             for (Node<K,V> e = f, pred = null;;) {
1137                                 K ek;
1138                                 if (e.hash == hash &&
1139                                     ((ek = e.key) == key ||
1140                                      (ek != null && key.equals(ek)))) {
1141                                     V ev = e.val;
1142                                     if (cv == null || cv == ev ||
1143                                         (ev != null && cv.equals(ev))) {
1144                                         oldVal = ev;
1145                                         if (value != null)
1146                                             e.val = value;
1147                                         else if (pred != null)
1148                                             pred.next = e.next;
1149                                         else
1150                                             setTabAt(tab, i, e.next);
1151                                     }
1152                                     break;
1153                                 }
1154                                 pred = e;
1155                                 if ((e = e.next) == null)
1156                                     break;
1157                             }
1158                         }
1159                         else if (f instanceof TreeBin) {
1160                             validated = true;
1161                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1162                             TreeNode<K,V> r, p;
1163                             if ((r = t.root) != null &&
1164                                 (p = r.findTreeNode(hash, key, null)) != null) {
1165                                 V pv = p.val;
1166                                 if (cv == null || cv == pv ||
1167                                     (pv != null && cv.equals(pv))) {
1168                                     oldVal = pv;
1169                                     if (value != null)
1170                                         p.val = value;
1171                                     else if (t.removeTreeNode(p))
1172                                         setTabAt(tab, i, untreeify(t.first));
1173                                 }
1174                             }
1175                         }
1176                         else if (f instanceof ReservationNode)
1177                             throw new IllegalStateException("Recursive update");
1178                     }
1179                 }
1180                 if (validated) {
1181                     if (oldVal != null) {
1182                         if (value == null)
1183                             addCount(-1L, -1);
1184                         return oldVal;
1185                     }
1186                     break;
1187                 }
1188             }
1189         }
1190         return null;
1191     }
1192 
1193     /**
1194      * Removes all of the mappings from this map.
1195      */
1196     public void clear() {
1197         long delta = 0L; // negative number of deletions
1198         int i = 0;
1199         Node<K,V>[] tab = table;
1200         while (tab != null && i < tab.length) {
1201             int fh;
1202             Node<K,V> f = tabAt(tab, i);
1203             if (f == null)
1204                 ++i;
1205             else if ((fh = f.hash) == MOVED) {
1206                 tab = helpTransfer(tab, f);
1207                 i = 0; // restart
1208             }
1209             else {
1210                 synchronized (f) {
1211                     if (tabAt(tab, i) == f) {
1212                         Node<K,V> p = (fh >= 0 ? f :
1213                                        (f instanceof TreeBin) ?
1214                                        ((TreeBin<K,V>)f).first : null);
1215                         while (p != null) {
1216                             --delta;
1217                             p = p.next;
1218                         }
1219                         setTabAt(tab, i++, null);
1220                     }
1221                 }
1222             }
1223         }
1224         if (delta != 0L)
1225             addCount(delta, -1);
1226     }
1227 
1228     /**
1229      * Returns a {@link Set} view of the keys contained in this map.
1230      * The set is backed by the map, so changes to the map are
1231      * reflected in the set, and vice-versa. The set supports element
1232      * removal, which removes the corresponding mapping from this map,
1233      * via the {@code Iterator.remove}, {@code Set.remove},
1234      * {@code removeAll}, {@code retainAll}, and {@code clear}
1235      * operations.  It does not support the {@code add} or
1236      * {@code addAll} operations.
1237      *
1238      * <p>The view's iterators and spliterators are
1239      * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1240      *
1241      * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1242      * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1243      *
1244      * @return the set view
1245      */
1246     public KeySetView<K,V> keySet() {
1247         KeySetView<K,V> ks;
1248         if ((ks = keySet) != null) return ks;
1249         return keySet = new KeySetView<K,V>(this, null);
1250     }
1251 
1252     /**
1253      * Returns a {@link Collection} view of the values contained in this map.
1254      * The collection is backed by the map, so changes to the map are
1255      * reflected in the collection, and vice-versa.  The collection
1256      * supports element removal, which removes the corresponding
1257      * mapping from this map, via the {@code Iterator.remove},
1258      * {@code Collection.remove}, {@code removeAll},
1259      * {@code retainAll}, and {@code clear} operations.  It does not
1260      * support the {@code add} or {@code addAll} operations.
1261      *
1262      * <p>The view's iterators and spliterators are
1263      * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1264      *
1265      * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}
1266      * and {@link Spliterator#NONNULL}.
1267      *
1268      * @return the collection view
1269      */
1270     public Collection<V> values() {
1271         ValuesView<K,V> vs;
1272         if ((vs = values) != null) return vs;
1273         return values = new ValuesView<K,V>(this);
1274     }
1275 
1276     /**
1277      * Returns a {@link Set} view of the mappings contained in this map.
1278      * The set is backed by the map, so changes to the map are
1279      * reflected in the set, and vice-versa.  The set supports element
1280      * removal, which removes the corresponding mapping from the map,
1281      * via the {@code Iterator.remove}, {@code Set.remove},
1282      * {@code removeAll}, {@code retainAll}, and {@code clear}
1283      * operations.
1284      *
1285      * <p>The view's iterators and spliterators are
1286      * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1287      *
1288      * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1289      * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1290      *
1291      * @return the set view
1292      */
1293     public Set<Map.Entry<K,V>> entrySet() {
1294         EntrySetView<K,V> es;
1295         if ((es = entrySet) != null) return es;
1296         return entrySet = new EntrySetView<K,V>(this);
1297     }
1298 
1299     /**
1300      * Returns the hash code value for this {@link Map}, i.e.,
1301      * the sum of, for each key-value pair in the map,
1302      * {@code key.hashCode() ^ value.hashCode()}.
1303      *
1304      * @return the hash code value for this map
1305      */
1306     public int hashCode() {
1307         int h = 0;
1308         Node<K,V>[] t;
1309         if ((t = table) != null) {
1310             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1311             for (Node<K,V> p; (p = it.advance()) != null; )
1312                 h += p.key.hashCode() ^ p.val.hashCode();
1313         }
1314         return h;
1315     }
1316 
1317     /**
1318      * Returns a string representation of this map.  The string
1319      * representation consists of a list of key-value mappings (in no
1320      * particular order) enclosed in braces ("{@code {}}").  Adjacent
1321      * mappings are separated by the characters {@code ", "} (comma
1322      * and space).  Each key-value mapping is rendered as the key
1323      * followed by an equals sign ("{@code =}") followed by the
1324      * associated value.
1325      *
1326      * @return a string representation of this map
1327      */
1328     public String toString() {
1329         Node<K,V>[] t;
1330         int f = (t = table) == null ? 0 : t.length;
1331         Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1332         StringBuilder sb = new StringBuilder();
1333         sb.append('{');
1334         Node<K,V> p;
1335         if ((p = it.advance()) != null) {
1336             for (;;) {
1337                 K k = p.key;
1338                 V v = p.val;
1339                 sb.append(k == this ? "(this Map)" : k);
1340                 sb.append('=');
1341                 sb.append(v == this ? "(this Map)" : v);
1342                 if ((p = it.advance()) == null)
1343                     break;
1344                 sb.append(',').append(' ');
1345             }
1346         }
1347         return sb.append('}').toString();
1348     }
1349 
1350     /**
1351      * Compares the specified object with this map for equality.
1352      * Returns {@code true} if the given object is a map with the same
1353      * mappings as this map.  This operation may return misleading
1354      * results if either map is concurrently modified during execution
1355      * of this method.
1356      *
1357      * @param o object to be compared for equality with this map
1358      * @return {@code true} if the specified object is equal to this map
1359      */
1360     public boolean equals(Object o) {
1361         if (o != this) {
1362             if (!(o instanceof Map))
1363                 return false;
1364             Map<?,?> m = (Map<?,?>) o;
1365             Node<K,V>[] t;
1366             int f = (t = table) == null ? 0 : t.length;
1367             Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1368             for (Node<K,V> p; (p = it.advance()) != null; ) {
1369                 V val = p.val;
1370                 Object v = m.get(p.key);
1371                 if (v == null || (v != val && !v.equals(val)))
1372                     return false;
1373             }
1374             for (Map.Entry<?,?> e : m.entrySet()) {
1375                 Object mk, mv, v;
1376                 if ((mk = e.getKey()) == null ||
1377                     (mv = e.getValue()) == null ||
1378                     (v = get(mk)) == null ||
1379                     (mv != v && !mv.equals(v)))
1380                     return false;
1381             }
1382         }
1383         return true;
1384     }
1385 
1386     /**
1387      * Stripped-down version of helper class used in previous version,
1388      * declared for the sake of serialization compatibility.
1389      */
1390     static class Segment<K,V> extends ReentrantLock implements Serializable {
1391         private static final long serialVersionUID = 2249069246763182397L;
1392         final float loadFactor;
1393         Segment(float lf) { this.loadFactor = lf; }
1394     }
1395 
1396     /**
1397      * Saves the state of the {@code ConcurrentHashMap} instance to a
1398      * stream (i.e., serializes it).
1399      * @param s the stream
1400      * @throws java.io.IOException if an I/O error occurs
1401      * @serialData
1402      * the serialized fields, followed by the key (Object) and value
1403      * (Object) for each key-value mapping, followed by a null pair.
1404      * The key-value mappings are emitted in no particular order.
1405      */
1406     private void writeObject(java.io.ObjectOutputStream s)
1407         throws java.io.IOException {
1408         // For serialization compatibility
1409         // Emulate segment calculation from previous version of this class
1410         int sshift = 0;
1411         int ssize = 1;
1412         while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
1413             ++sshift;
1414             ssize <<= 1;
1415         }
1416         int segmentShift = 32 - sshift;
1417         int segmentMask = ssize - 1;
1418         @SuppressWarnings("unchecked")
1419         Segment<K,V>[] segments = (Segment<K,V>[])
1420             new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
1421         for (int i = 0; i < segments.length; ++i)
1422             segments[i] = new Segment<K,V>(LOAD_FACTOR);
1423         java.io.ObjectOutputStream.PutField streamFields = s.putFields();
1424         streamFields.put("segments", segments);
1425         streamFields.put("segmentShift", segmentShift);
1426         streamFields.put("segmentMask", segmentMask);
1427         s.writeFields();
1428 
1429         Node<K,V>[] t;
1430         if ((t = table) != null) {
1431             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1432             for (Node<K,V> p; (p = it.advance()) != null; ) {
1433                 s.writeObject(p.key);
1434                 s.writeObject(p.val);
1435             }
1436         }
1437         s.writeObject(null);
1438         s.writeObject(null);
1439     }
1440 
1441     /**
1442      * Reconstitutes the instance from a stream (that is, deserializes it).
1443      * @param s the stream
1444      * @throws ClassNotFoundException if the class of a serialized object
1445      *         could not be found
1446      * @throws java.io.IOException if an I/O error occurs
1447      */
1448     private void readObject(java.io.ObjectInputStream s)
1449         throws java.io.IOException, ClassNotFoundException {
1450         /*
1451          * To improve performance in typical cases, we create nodes
1452          * while reading, then place in table once size is known.
1453          * However, we must also validate uniqueness and deal with
1454          * overpopulated bins while doing so, which requires
1455          * specialized versions of putVal mechanics.
1456          */
1457         sizeCtl = -1; // force exclusion for table construction
1458         s.defaultReadObject();
1459         long size = 0L;
1460         Node<K,V> p = null;
1461         for (;;) {
1462             @SuppressWarnings("unchecked")
1463             K k = (K) s.readObject();
1464             @SuppressWarnings("unchecked")
1465             V v = (V) s.readObject();
1466             if (k != null && v != null) {
1467                 p = new Node<K,V>(spread(k.hashCode()), k, v, p);
1468                 ++size;
1469             }
1470             else
1471                 break;
1472         }
1473         if (size == 0L)
1474             sizeCtl = 0;
1475         else {
1476             int n;
1477             if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
1478                 n = MAXIMUM_CAPACITY;
1479             else {
1480                 int sz = (int)size;
1481                 n = tableSizeFor(sz + (sz >>> 1) + 1);
1482             }
1483             @SuppressWarnings("unchecked")
1484             Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n];
1485             int mask = n - 1;
1486             long added = 0L;
1487             while (p != null) {
1488                 boolean insertAtFront;
1489                 Node<K,V> next = p.next, first;
1490                 int h = p.hash, j = h & mask;
1491                 if ((first = tabAt(tab, j)) == null)
1492                     insertAtFront = true;
1493                 else {
1494                     K k = p.key;
1495                     if (first.hash < 0) {
1496                         TreeBin<K,V> t = (TreeBin<K,V>)first;
1497                         if (t.putTreeVal(h, k, p.val) == null)
1498                             ++added;
1499                         insertAtFront = false;
1500                     }
1501                     else {
1502                         int binCount = 0;
1503                         insertAtFront = true;
1504                         Node<K,V> q; K qk;
1505                         for (q = first; q != null; q = q.next) {
1506                             if (q.hash == h &&
1507                                 ((qk = q.key) == k ||
1508                                  (qk != null && k.equals(qk)))) {
1509                                 insertAtFront = false;
1510                                 break;
1511                             }
1512                             ++binCount;
1513                         }
1514                         if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
1515                             insertAtFront = false;
1516                             ++added;
1517                             p.next = first;
1518                             TreeNode<K,V> hd = null, tl = null;
1519                             for (q = p; q != null; q = q.next) {
1520                                 TreeNode<K,V> t = new TreeNode<K,V>
1521                                     (q.hash, q.key, q.val, null, null);
1522                                 if ((t.prev = tl) == null)
1523                                     hd = t;
1524                                 else
1525                                     tl.next = t;
1526                                 tl = t;
1527                             }
1528                             setTabAt(tab, j, new TreeBin<K,V>(hd));
1529                         }
1530                     }
1531                 }
1532                 if (insertAtFront) {
1533                     ++added;
1534                     p.next = first;
1535                     setTabAt(tab, j, p);
1536                 }
1537                 p = next;
1538             }
1539             table = tab;
1540             sizeCtl = n - (n >>> 2);
1541             baseCount = added;
1542         }
1543     }
1544 
1545     // ConcurrentMap methods
1546 
1547     /**
1548      * {@inheritDoc}
1549      *
1550      * @return the previous value associated with the specified key,
1551      *         or {@code null} if there was no mapping for the key
1552      * @throws NullPointerException if the specified key or value is null
1553      */
1554     public V putIfAbsent(K key, V value) {
1555         return putVal(key, value, true);
1556     }
1557 
1558     /**
1559      * {@inheritDoc}
1560      *
1561      * @throws NullPointerException if the specified key is null
1562      */
1563     public boolean remove(Object key, Object value) {
1564         if (key == null)
1565             throw new NullPointerException();
1566         return value != null && replaceNode(key, null, value) != null;
1567     }
1568 
1569     /**
1570      * {@inheritDoc}
1571      *
1572      * @throws NullPointerException if any of the arguments are null
1573      */
1574     public boolean replace(K key, V oldValue, V newValue) {
1575         if (key == null || oldValue == null || newValue == null)
1576             throw new NullPointerException();
1577         return replaceNode(key, newValue, oldValue) != null;
1578     }
1579 
1580     /**
1581      * {@inheritDoc}
1582      *
1583      * @return the previous value associated with the specified key,
1584      *         or {@code null} if there was no mapping for the key
1585      * @throws NullPointerException if the specified key or value is null
1586      */
1587     public V replace(K key, V value) {
1588         if (key == null || value == null)
1589             throw new NullPointerException();
1590         return replaceNode(key, value, null);
1591     }
1592 
1593     // Overrides of JDK8+ Map extension method defaults
1594 
1595     /**
1596      * Returns the value to which the specified key is mapped, or the
1597      * given default value if this map contains no mapping for the
1598      * key.
1599      *
1600      * @param key the key whose associated value is to be returned
1601      * @param defaultValue the value to return if this map contains
1602      * no mapping for the given key
1603      * @return the mapping for the key, if present; else the default value
1604      * @throws NullPointerException if the specified key is null
1605      */
1606     public V getOrDefault(Object key, V defaultValue) {
1607         V v;
1608         return (v = get(key)) == null ? defaultValue : v;
1609     }
1610 
1611     public void forEach(BiConsumer<? super K, ? super V> action) {
1612         if (action == null) throw new NullPointerException();
1613         Node<K,V>[] t;
1614         if ((t = table) != null) {
1615             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1616             for (Node<K,V> p; (p = it.advance()) != null; ) {
1617                 action.accept(p.key, p.val);
1618             }
1619         }
1620     }
1621 
1622     public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
1623         if (function == null) throw new NullPointerException();
1624         Node<K,V>[] t;
1625         if ((t = table) != null) {
1626             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1627             for (Node<K,V> p; (p = it.advance()) != null; ) {
1628                 V oldValue = p.val;
1629                 for (K key = p.key;;) {
1630                     V newValue = function.apply(key, oldValue);
1631                     if (newValue == null)
1632                         throw new NullPointerException();
1633                     if (replaceNode(key, newValue, oldValue) != null ||
1634                         (oldValue = get(key)) == null)
1635                         break;
1636                 }
1637             }
1638         }
1639     }
1640 
1641     /**
1642      * Helper method for EntrySetView.removeIf.
1643      */
1644     boolean removeEntryIf(Predicate<? super Entry<K,V>> function) {
1645         if (function == null) throw new NullPointerException();
1646         Node<K,V>[] t;
1647         boolean removed = false;
1648         if ((t = table) != null) {
1649             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1650             for (Node<K,V> p; (p = it.advance()) != null; ) {
1651                 K k = p.key;
1652                 V v = p.val;
1653                 Map.Entry<K,V> e = new AbstractMap.SimpleImmutableEntry<>(k, v);
1654                 if (function.test(e) && replaceNode(k, null, v) != null)
1655                     removed = true;
1656             }
1657         }
1658         return removed;
1659     }
1660 
1661     /**
1662      * Helper method for ValuesView.removeIf.
1663      */
1664     boolean removeValueIf(Predicate<? super V> function) {
1665         if (function == null) throw new NullPointerException();
1666         Node<K,V>[] t;
1667         boolean removed = false;
1668         if ((t = table) != null) {
1669             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1670             for (Node<K,V> p; (p = it.advance()) != null; ) {
1671                 K k = p.key;
1672                 V v = p.val;
1673                 if (function.test(v) && replaceNode(k, null, v) != null)
1674                     removed = true;
1675             }
1676         }
1677         return removed;
1678     }
1679 
1680     /**
1681      * If the specified key is not already associated with a value,
1682      * attempts to compute its value using the given mapping function
1683      * and enters it into this map unless {@code null}.  The entire
1684      * method invocation is performed atomically, so the function is
1685      * applied at most once per key.  Some attempted update operations
1686      * on this map by other threads may be blocked while computation
1687      * is in progress, so the computation should be short and simple,
1688      * and must not attempt to update any other mappings of this map.
1689      *
1690      * @param key key with which the specified value is to be associated
1691      * @param mappingFunction the function to compute a value
1692      * @return the current (existing or computed) value associated with
1693      *         the specified key, or null if the computed value is null
1694      * @throws NullPointerException if the specified key or mappingFunction
1695      *         is null
1696      * @throws IllegalStateException if the computation detectably
1697      *         attempts a recursive update to this map that would
1698      *         otherwise never complete
1699      * @throws RuntimeException or Error if the mappingFunction does so,
1700      *         in which case the mapping is left unestablished
1701      */
1702     public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
1703         if (key == null || mappingFunction == null)
1704             throw new NullPointerException();
1705         int h = spread(key.hashCode());
1706         V val = null;
1707         int binCount = 0;
1708         for (Node<K,V>[] tab = table;;) {
1709             Node<K,V> f; int n, i, fh; K fk; V fv;
1710             if (tab == null || (n = tab.length) == 0)
1711                 tab = initTable();
1712             else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1713                 Node<K,V> r = new ReservationNode<K,V>();
1714                 synchronized (r) {
1715                     if (casTabAt(tab, i, null, r)) {
1716                         binCount = 1;
1717                         Node<K,V> node = null;
1718                         try {
1719                             if ((val = mappingFunction.apply(key)) != null)
1720                                 node = new Node<K,V>(h, key, val);
1721                         } finally {
1722                             setTabAt(tab, i, node);
1723                         }
1724                     }
1725                 }
1726                 if (binCount != 0)
1727                     break;
1728             }
1729             else if ((fh = f.hash) == MOVED)
1730                 tab = helpTransfer(tab, f);
1731             else if (fh == h &&                  // check first node
1732                      ((fk = f.key) == key || fk != null && key.equals(fk)) &&
1733                      (fv = f.val) != null)
1734                 return fv;
1735             else {
1736                 boolean added = false;
1737                 synchronized (f) {
1738                     if (tabAt(tab, i) == f) {
1739                         if (fh >= 0) {
1740                             binCount = 1;
1741                             for (Node<K,V> e = f;; ++binCount) {
1742                                 K ek;
1743                                 if (e.hash == h &&
1744                                     ((ek = e.key) == key ||
1745                                      (ek != null && key.equals(ek)))) {
1746                                     val = e.val;
1747                                     break;
1748                                 }
1749                                 Node<K,V> pred = e;
1750                                 if ((e = e.next) == null) {
1751                                     if ((val = mappingFunction.apply(key)) != null) {
1752                                         if (pred.next != null)
1753                                             throw new IllegalStateException("Recursive update");
1754                                         added = true;
1755                                         pred.next = new Node<K,V>(h, key, val);
1756                                     }
1757                                     break;
1758                                 }
1759                             }
1760                         }
1761                         else if (f instanceof TreeBin) {
1762                             binCount = 2;
1763                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1764                             TreeNode<K,V> r, p;
1765                             if ((r = t.root) != null &&
1766                                 (p = r.findTreeNode(h, key, null)) != null)
1767                                 val = p.val;
1768                             else if ((val = mappingFunction.apply(key)) != null) {
1769                                 added = true;
1770                                 t.putTreeVal(h, key, val);
1771                             }
1772                         }
1773                         else if (f instanceof ReservationNode)
1774                             throw new IllegalStateException("Recursive update");
1775                     }
1776                 }
1777                 if (binCount != 0) {
1778                     if (binCount >= TREEIFY_THRESHOLD)
1779                         treeifyBin(tab, i);
1780                     if (!added)
1781                         return val;
1782                     break;
1783                 }
1784             }
1785         }
1786         if (val != null)
1787             addCount(1L, binCount);
1788         return val;
1789     }
1790 
1791     /**
1792      * If the value for the specified key is present, attempts to
1793      * compute a new mapping given the key and its current mapped
1794      * value.  The entire method invocation is performed atomically.
1795      * Some attempted update operations on this map by other threads
1796      * may be blocked while computation is in progress, so the
1797      * computation should be short and simple, and must not attempt to
1798      * update any other mappings of this map.
1799      *
1800      * @param key key with which a value may be associated
1801      * @param remappingFunction the function to compute a value
1802      * @return the new value associated with the specified key, or null if none
1803      * @throws NullPointerException if the specified key or remappingFunction
1804      *         is null
1805      * @throws IllegalStateException if the computation detectably
1806      *         attempts a recursive update to this map that would
1807      *         otherwise never complete
1808      * @throws RuntimeException or Error if the remappingFunction does so,
1809      *         in which case the mapping is unchanged
1810      */
1811     public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1812         if (key == null || remappingFunction == null)
1813             throw new NullPointerException();
1814         int h = spread(key.hashCode());
1815         V val = null;
1816         int delta = 0;
1817         int binCount = 0;
1818         for (Node<K,V>[] tab = table;;) {
1819             Node<K,V> f; int n, i, fh;
1820             if (tab == null || (n = tab.length) == 0)
1821                 tab = initTable();
1822             else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
1823                 break;
1824             else if ((fh = f.hash) == MOVED)
1825                 tab = helpTransfer(tab, f);
1826             else {
1827                 synchronized (f) {
1828                     if (tabAt(tab, i) == f) {
1829                         if (fh >= 0) {
1830                             binCount = 1;
1831                             for (Node<K,V> e = f, pred = null;; ++binCount) {
1832                                 K ek;
1833                                 if (e.hash == h &&
1834                                     ((ek = e.key) == key ||
1835                                      (ek != null && key.equals(ek)))) {
1836                                     val = remappingFunction.apply(key, e.val);
1837                                     if (val != null)
1838                                         e.val = val;
1839                                     else {
1840                                         delta = -1;
1841                                         Node<K,V> en = e.next;
1842                                         if (pred != null)
1843                                             pred.next = en;
1844                                         else
1845                                             setTabAt(tab, i, en);
1846                                     }
1847                                     break;
1848                                 }
1849                                 pred = e;
1850                                 if ((e = e.next) == null)
1851                                     break;
1852                             }
1853                         }
1854                         else if (f instanceof TreeBin) {
1855                             binCount = 2;
1856                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1857                             TreeNode<K,V> r, p;
1858                             if ((r = t.root) != null &&
1859                                 (p = r.findTreeNode(h, key, null)) != null) {
1860                                 val = remappingFunction.apply(key, p.val);
1861                                 if (val != null)
1862                                     p.val = val;
1863                                 else {
1864                                     delta = -1;
1865                                     if (t.removeTreeNode(p))
1866                                         setTabAt(tab, i, untreeify(t.first));
1867                                 }
1868                             }
1869                         }
1870                         else if (f instanceof ReservationNode)
1871                             throw new IllegalStateException("Recursive update");
1872                     }
1873                 }
1874                 if (binCount != 0)
1875                     break;
1876             }
1877         }
1878         if (delta != 0)
1879             addCount((long)delta, binCount);
1880         return val;
1881     }
1882 
1883     /**
1884      * Attempts to compute a mapping for the specified key and its
1885      * current mapped value (or {@code null} if there is no current
1886      * mapping). The entire method invocation is performed atomically.
1887      * Some attempted update operations on this map by other threads
1888      * may be blocked while computation is in progress, so the
1889      * computation should be short and simple, and must not attempt to
1890      * update any other mappings of this Map.
1891      *
1892      * @param key key with which the specified value is to be associated
1893      * @param remappingFunction the function to compute a value
1894      * @return the new value associated with the specified key, or null if none
1895      * @throws NullPointerException if the specified key or remappingFunction
1896      *         is null
1897      * @throws IllegalStateException if the computation detectably
1898      *         attempts a recursive update to this map that would
1899      *         otherwise never complete
1900      * @throws RuntimeException or Error if the remappingFunction does so,
1901      *         in which case the mapping is unchanged
1902      */
1903     public V compute(K key,
1904                      BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1905         if (key == null || remappingFunction == null)
1906             throw new NullPointerException();
1907         int h = spread(key.hashCode());
1908         V val = null;
1909         int delta = 0;
1910         int binCount = 0;
1911         for (Node<K,V>[] tab = table;;) {
1912             Node<K,V> f; int n, i, fh;
1913             if (tab == null || (n = tab.length) == 0)
1914                 tab = initTable();
1915             else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1916                 Node<K,V> r = new ReservationNode<K,V>();
1917                 synchronized (r) {
1918                     if (casTabAt(tab, i, null, r)) {
1919                         binCount = 1;
1920                         Node<K,V> node = null;
1921                         try {
1922                             if ((val = remappingFunction.apply(key, null)) != null) {
1923                                 delta = 1;
1924                                 node = new Node<K,V>(h, key, val);
1925                             }
1926                         } finally {
1927                             setTabAt(tab, i, node);
1928                         }
1929                     }
1930                 }
1931                 if (binCount != 0)
1932                     break;
1933             }
1934             else if ((fh = f.hash) == MOVED)
1935                 tab = helpTransfer(tab, f);
1936             else {
1937                 synchronized (f) {
1938                     if (tabAt(tab, i) == f) {
1939                         if (fh >= 0) {
1940                             binCount = 1;
1941                             for (Node<K,V> e = f, pred = null;; ++binCount) {
1942                                 K ek;
1943                                 if (e.hash == h &&
1944                                     ((ek = e.key) == key ||
1945                                      (ek != null && key.equals(ek)))) {
1946                                     val = remappingFunction.apply(key, e.val);
1947                                     if (val != null)
1948                                         e.val = val;
1949                                     else {
1950                                         delta = -1;
1951                                         Node<K,V> en = e.next;
1952                                         if (pred != null)
1953                                             pred.next = en;
1954                                         else
1955                                             setTabAt(tab, i, en);
1956                                     }
1957                                     break;
1958                                 }
1959                                 pred = e;
1960                                 if ((e = e.next) == null) {
1961                                     val = remappingFunction.apply(key, null);
1962                                     if (val != null) {
1963                                         if (pred.next != null)
1964                                             throw new IllegalStateException("Recursive update");
1965                                         delta = 1;
1966                                         pred.next = new Node<K,V>(h, key, val);
1967                                     }
1968                                     break;
1969                                 }
1970                             }
1971                         }
1972                         else if (f instanceof TreeBin) {
1973                             binCount = 1;
1974                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1975                             TreeNode<K,V> r, p;
1976                             if ((r = t.root) != null)
1977                                 p = r.findTreeNode(h, key, null);
1978                             else
1979                                 p = null;
1980                             V pv = (p == null) ? null : p.val;
1981                             val = remappingFunction.apply(key, pv);
1982                             if (val != null) {
1983                                 if (p != null)
1984                                     p.val = val;
1985                                 else {
1986                                     delta = 1;
1987                                     t.putTreeVal(h, key, val);
1988                                 }
1989                             }
1990                             else if (p != null) {
1991                                 delta = -1;
1992                                 if (t.removeTreeNode(p))
1993                                     setTabAt(tab, i, untreeify(t.first));
1994                             }
1995                         }
1996                         else if (f instanceof ReservationNode)
1997                             throw new IllegalStateException("Recursive update");
1998                     }
1999                 }
2000                 if (binCount != 0) {
2001                     if (binCount >= TREEIFY_THRESHOLD)
2002                         treeifyBin(tab, i);
2003                     break;
2004                 }
2005             }
2006         }
2007         if (delta != 0)
2008             addCount((long)delta, binCount);
2009         return val;
2010     }
2011 
2012     /**
2013      * If the specified key is not already associated with a
2014      * (non-null) value, associates it with the given value.
2015      * Otherwise, replaces the value with the results of the given
2016      * remapping function, or removes if {@code null}. The entire
2017      * method invocation is performed atomically.  Some attempted
2018      * update operations on this map by other threads may be blocked
2019      * while computation is in progress, so the computation should be
2020      * short and simple, and must not attempt to update any other
2021      * mappings of this Map.
2022      *
2023      * @param key key with which the specified value is to be associated
2024      * @param value the value to use if absent
2025      * @param remappingFunction the function to recompute a value if present
2026      * @return the new value associated with the specified key, or null if none
2027      * @throws NullPointerException if the specified key or the
2028      *         remappingFunction is null
2029      * @throws RuntimeException or Error if the remappingFunction does so,
2030      *         in which case the mapping is unchanged
2031      */
2032     public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
2033         if (key == null || value == null || remappingFunction == null)
2034             throw new NullPointerException();
2035         int h = spread(key.hashCode());
2036         V val = null;
2037         int delta = 0;
2038         int binCount = 0;
2039         for (Node<K,V>[] tab = table;;) {
2040             Node<K,V> f; int n, i, fh;
2041             if (tab == null || (n = tab.length) == 0)
2042                 tab = initTable();
2043             else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
2044                 if (casTabAt(tab, i, null, new Node<K,V>(h, key, value))) {
2045                     delta = 1;
2046                     val = value;
2047                     break;
2048                 }
2049             }
2050             else if ((fh = f.hash) == MOVED)
2051                 tab = helpTransfer(tab, f);
2052             else {
2053                 synchronized (f) {
2054                     if (tabAt(tab, i) == f) {
2055                         if (fh >= 0) {
2056                             binCount = 1;
2057                             for (Node<K,V> e = f, pred = null;; ++binCount) {
2058                                 K ek;
2059                                 if (e.hash == h &&
2060                                     ((ek = e.key) == key ||
2061                                      (ek != null && key.equals(ek)))) {
2062                                     val = remappingFunction.apply(e.val, value);
2063                                     if (val != null)
2064                                         e.val = val;
2065                                     else {
2066                                         delta = -1;
2067                                         Node<K,V> en = e.next;
2068                                         if (pred != null)
2069                                             pred.next = en;
2070                                         else
2071                                             setTabAt(tab, i, en);
2072                                     }
2073                                     break;
2074                                 }
2075                                 pred = e;
2076                                 if ((e = e.next) == null) {
2077                                     delta = 1;
2078                                     val = value;
2079                                     pred.next = new Node<K,V>(h, key, val);
2080                                     break;
2081                                 }
2082                             }
2083                         }
2084                         else if (f instanceof TreeBin) {
2085                             binCount = 2;
2086                             TreeBin<K,V> t = (TreeBin<K,V>)f;
2087                             TreeNode<K,V> r = t.root;
2088                             TreeNode<K,V> p = (r == null) ? null :
2089                                 r.findTreeNode(h, key, null);
2090                             val = (p == null) ? value :
2091                                 remappingFunction.apply(p.val, value);
2092                             if (val != null) {
2093                                 if (p != null)
2094                                     p.val = val;
2095                                 else {
2096                                     delta = 1;
2097                                     t.putTreeVal(h, key, val);
2098                                 }
2099                             }
2100                             else if (p != null) {
2101                                 delta = -1;
2102                                 if (t.removeTreeNode(p))
2103                                     setTabAt(tab, i, untreeify(t.first));
2104                             }
2105                         }
2106                         else if (f instanceof ReservationNode)
2107                             throw new IllegalStateException("Recursive update");
2108                     }
2109                 }
2110                 if (binCount != 0) {
2111                     if (binCount >= TREEIFY_THRESHOLD)
2112                         treeifyBin(tab, i);
2113                     break;
2114                 }
2115             }
2116         }
2117         if (delta != 0)
2118             addCount((long)delta, binCount);
2119         return val;
2120     }
2121 
2122     // Hashtable legacy methods
2123 
2124     /**
2125      * Tests if some key maps into the specified value in this table.
2126      *
2127      * <p>Note that this method is identical in functionality to
2128      * {@link #containsValue(Object)}, and exists solely to ensure
2129      * full compatibility with class {@link java.util.Hashtable},
2130      * which supported this method prior to introduction of the
2131      * Java Collections Framework.
2132      *
2133      * @param  value a value to search for
2134      * @return {@code true} if and only if some key maps to the
2135      *         {@code value} argument in this table as
2136      *         determined by the {@code equals} method;
2137      *         {@code false} otherwise
2138      * @throws NullPointerException if the specified value is null
2139      */
2140     public boolean contains(Object value) {
2141         return containsValue(value);
2142     }
2143 
2144     /**
2145      * Returns an enumeration of the keys in this table.
2146      *
2147      * @return an enumeration of the keys in this table
2148      * @see #keySet()
2149      */
2150     public Enumeration<K> keys() {
2151         Node<K,V>[] t;
2152         int f = (t = table) == null ? 0 : t.length;
2153         return new KeyIterator<K,V>(t, f, 0, f, this);
2154     }
2155 
2156     /**
2157      * Returns an enumeration of the values in this table.
2158      *
2159      * @return an enumeration of the values in this table
2160      * @see #values()
2161      */
2162     public Enumeration<V> elements() {
2163         Node<K,V>[] t;
2164         int f = (t = table) == null ? 0 : t.length;
2165         return new ValueIterator<K,V>(t, f, 0, f, this);
2166     }
2167 
2168     // ConcurrentHashMap-only methods
2169 
2170     /**
2171      * Returns the number of mappings. This method should be used
2172      * instead of {@link #size} because a ConcurrentHashMap may
2173      * contain more mappings than can be represented as an int. The
2174      * value returned is an estimate; the actual count may differ if
2175      * there are concurrent insertions or removals.
2176      *
2177      * @return the number of mappings
2178      * @since 1.8
2179      */
2180     public long mappingCount() {
2181         long n = sumCount();
2182         return (n < 0L) ? 0L : n; // ignore transient negative values
2183     }
2184 
2185     /**
2186      * Creates a new {@link Set} backed by a ConcurrentHashMap
2187      * from the given type to {@code Boolean.TRUE}.
2188      *
2189      * @param <K> the element type of the returned set
2190      * @return the new set
2191      * @since 1.8
2192      */
2193     public static <K> KeySetView<K,Boolean> newKeySet() {
2194         return new KeySetView<K,Boolean>
2195             (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE);
2196     }
2197 
2198     /**
2199      * Creates a new {@link Set} backed by a ConcurrentHashMap
2200      * from the given type to {@code Boolean.TRUE}.
2201      *
2202      * @param initialCapacity The implementation performs internal
2203      * sizing to accommodate this many elements.
2204      * @param <K> the element type of the returned set
2205      * @return the new set
2206      * @throws IllegalArgumentException if the initial capacity of
2207      * elements is negative
2208      * @since 1.8
2209      */
2210     public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2211         return new KeySetView<K,Boolean>
2212             (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE);
2213     }
2214 
2215     /**
2216      * Returns a {@link Set} view of the keys in this map, using the
2217      * given common mapped value for any additions (i.e., {@link
2218      * Collection#add} and {@link Collection#addAll(Collection)}).
2219      * This is of course only appropriate if it is acceptable to use
2220      * the same value for all additions from this view.
2221      *
2222      * @param mappedValue the mapped value to use for any additions
2223      * @return the set view
2224      * @throws NullPointerException if the mappedValue is null
2225      */
2226     public KeySetView<K,V> keySet(V mappedValue) {
2227         if (mappedValue == null)
2228             throw new NullPointerException();
2229         return new KeySetView<K,V>(this, mappedValue);
2230     }
2231 
2232     /* ---------------- Special Nodes -------------- */
2233 
2234     /**
2235      * A node inserted at head of bins during transfer operations.
2236      */
2237     static final class ForwardingNode<K,V> extends Node<K,V> {
2238         final Node<K,V>[] nextTable;
2239         ForwardingNode(Node<K,V>[] tab) {
2240             super(MOVED, null, null);
2241             this.nextTable = tab;
2242         }
2243 
2244         Node<K,V> find(int h, Object k) {
2245             // loop to avoid arbitrarily deep recursion on forwarding nodes
2246             outer: for (Node<K,V>[] tab = nextTable;;) {
2247                 Node<K,V> e; int n;
2248                 if (k == null || tab == null || (n = tab.length) == 0 ||
2249                     (e = tabAt(tab, (n - 1) & h)) == null)
2250                     return null;
2251                 for (;;) {
2252                     int eh; K ek;
2253                     if ((eh = e.hash) == h &&
2254                         ((ek = e.key) == k || (ek != null && k.equals(ek))))
2255                         return e;
2256                     if (eh < 0) {
2257                         if (e instanceof ForwardingNode) {
2258                             tab = ((ForwardingNode<K,V>)e).nextTable;
2259                             continue outer;
2260                         }
2261                         else
2262                             return e.find(h, k);
2263                     }
2264                     if ((e = e.next) == null)
2265                         return null;
2266                 }
2267             }
2268         }
2269     }
2270 
2271     /**
2272      * A place-holder node used in computeIfAbsent and compute.
2273      */
2274     static final class ReservationNode<K,V> extends Node<K,V> {
2275         ReservationNode() {
2276             super(RESERVED, null, null);
2277         }
2278 
2279         Node<K,V> find(int h, Object k) {
2280             return null;
2281         }
2282     }
2283 
2284     /* ---------------- Table Initialization and Resizing -------------- */
2285 
2286     /**
2287      * Returns the stamp bits for resizing a table of size n.
2288      * Must be negative when shifted left by RESIZE_STAMP_SHIFT.
2289      */
2290     static final int resizeStamp(int n) {
2291         return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
2292     }
2293 
2294     /**
2295      * Initializes table, using the size recorded in sizeCtl.
2296      */
2297     private final Node<K,V>[] initTable() {
2298         Node<K,V>[] tab; int sc;
2299         while ((tab = table) == null || tab.length == 0) {
2300             if ((sc = sizeCtl) < 0)
2301                 Thread.yield(); // lost initialization race; just spin
2302             else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2303                 try {
2304                     if ((tab = table) == null || tab.length == 0) {
2305                         int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2306                         @SuppressWarnings("unchecked")
2307                         Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2308                         table = tab = nt;
2309                         sc = n - (n >>> 2);
2310                     }
2311                 } finally {
2312                     sizeCtl = sc;
2313                 }
2314                 break;
2315             }
2316         }
2317         return tab;
2318     }
2319 
2320     /**
2321      * Adds to count, and if table is too small and not already
2322      * resizing, initiates transfer. If already resizing, helps
2323      * perform transfer if work is available.  Rechecks occupancy
2324      * after a transfer to see if another resize is already needed
2325      * because resizings are lagging additions.
2326      *
2327      * @param x the count to add
2328      * @param check if <0, don't check resize, if <= 1 only check if uncontended
2329      */
2330     private final void addCount(long x, int check) {
2331         CounterCell[] as; long b, s;
2332         if ((as = counterCells) != null ||
2333             !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
2334             CounterCell a; long v; int m;
2335             boolean uncontended = true;
2336             if (as == null || (m = as.length - 1) < 0 ||
2337                 (a = as[ThreadLocalRandom.getProbe() & m]) == null ||
2338                 !(uncontended =
2339                   U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
2340                 fullAddCount(x, uncontended);
2341                 return;
2342             }
2343             if (check <= 1)
2344                 return;
2345             s = sumCount();
2346         }
2347         if (check >= 0) {
2348             Node<K,V>[] tab, nt; int n, sc;
2349             while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
2350                    (n = tab.length) < MAXIMUM_CAPACITY) {
2351                 int rs = resizeStamp(n);
2352                 if (sc < 0) {
2353                     if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2354                         sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
2355                         transferIndex <= 0)
2356                         break;
2357                     if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
2358                         transfer(tab, nt);
2359                 }
2360                 else if (U.compareAndSwapInt(this, SIZECTL, sc,
2361                                              (rs << RESIZE_STAMP_SHIFT) + 2))
2362                     transfer(tab, null);
2363                 s = sumCount();
2364             }
2365         }
2366     }
2367 
2368     /**
2369      * Helps transfer if a resize is in progress.
2370      */
2371     final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
2372         Node<K,V>[] nextTab; int sc;
2373         if (tab != null && (f instanceof ForwardingNode) &&
2374             (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
2375             int rs = resizeStamp(tab.length);
2376             while (nextTab == nextTable && table == tab &&
2377                    (sc = sizeCtl) < 0) {
2378                 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2379                     sc == rs + MAX_RESIZERS || transferIndex <= 0)
2380                     break;
2381                 if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) {
2382                     transfer(tab, nextTab);
2383                     break;
2384                 }
2385             }
2386             return nextTab;
2387         }
2388         return table;
2389     }
2390 
2391     /**
2392      * Tries to presize table to accommodate the given number of elements.
2393      *
2394      * @param size number of elements (doesn't need to be perfectly accurate)
2395      */
2396     private final void tryPresize(int size) {
2397         int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2398             tableSizeFor(size + (size >>> 1) + 1);
2399         int sc;
2400         while ((sc = sizeCtl) >= 0) {
2401             Node<K,V>[] tab = table; int n;
2402             if (tab == null || (n = tab.length) == 0) {
2403                 n = (sc > c) ? sc : c;
2404                 if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2405                     try {
2406                         if (table == tab) {
2407                             @SuppressWarnings("unchecked")
2408                             Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2409                             table = nt;
2410                             sc = n - (n >>> 2);
2411                         }
2412                     } finally {
2413                         sizeCtl = sc;
2414                     }
2415                 }
2416             }
2417             else if (c <= sc || n >= MAXIMUM_CAPACITY)
2418                 break;
2419             else if (tab == table) {
2420                 int rs = resizeStamp(n);
2421                 if (U.compareAndSwapInt(this, SIZECTL, sc,
2422                                         (rs << RESIZE_STAMP_SHIFT) + 2))
2423                     transfer(tab, null);
2424             }
2425         }
2426     }
2427 
2428     /**
2429      * Moves and/or copies the nodes in each bin to new table. See
2430      * above for explanation.
2431      */
2432     private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
2433         int n = tab.length, stride;
2434         if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
2435             stride = MIN_TRANSFER_STRIDE; // subdivide range
2436         if (nextTab == null) {            // initiating
2437             try {
2438                 @SuppressWarnings("unchecked")
2439                 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
2440                 nextTab = nt;
2441             } catch (Throwable ex) {      // try to cope with OOME
2442                 sizeCtl = Integer.MAX_VALUE;
2443                 return;
2444             }
2445             nextTable = nextTab;
2446             transferIndex = n;
2447         }
2448         int nextn = nextTab.length;
2449         ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
2450         boolean advance = true;
2451         boolean finishing = false; // to ensure sweep before committing nextTab
2452         for (int i = 0, bound = 0;;) {
2453             Node<K,V> f; int fh;
2454             while (advance) {
2455                 int nextIndex, nextBound;
2456                 if (--i >= bound || finishing)
2457                     advance = false;
2458                 else if ((nextIndex = transferIndex) <= 0) {
2459                     i = -1;
2460                     advance = false;
2461                 }
2462                 else if (U.compareAndSwapInt
2463                          (this, TRANSFERINDEX, nextIndex,
2464                           nextBound = (nextIndex > stride ?
2465                                        nextIndex - stride : 0))) {
2466                     bound = nextBound;
2467                     i = nextIndex - 1;
2468                     advance = false;
2469                 }
2470             }
2471             if (i < 0 || i >= n || i + n >= nextn) {
2472                 int sc;
2473                 if (finishing) {
2474                     nextTable = null;
2475                     table = nextTab;
2476                     sizeCtl = (n << 1) - (n >>> 1);
2477                     return;
2478                 }
2479                 if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
2480                     if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
2481                         return;
2482                     finishing = advance = true;
2483                     i = n; // recheck before commit
2484                 }
2485             }
2486             else if ((f = tabAt(tab, i)) == null)
2487                 advance = casTabAt(tab, i, null, fwd);
2488             else if ((fh = f.hash) == MOVED)
2489                 advance = true; // already processed
2490             else {
2491                 synchronized (f) {
2492                     if (tabAt(tab, i) == f) {
2493                         Node<K,V> ln, hn;
2494                         if (fh >= 0) {
2495                             int runBit = fh & n;
2496                             Node<K,V> lastRun = f;
2497                             for (Node<K,V> p = f.next; p != null; p = p.next) {
2498                                 int b = p.hash & n;
2499                                 if (b != runBit) {
2500                                     runBit = b;
2501                                     lastRun = p;
2502                                 }
2503                             }
2504                             if (runBit == 0) {
2505                                 ln = lastRun;
2506                                 hn = null;
2507                             }
2508                             else {
2509                                 hn = lastRun;
2510                                 ln = null;
2511                             }
2512                             for (Node<K,V> p = f; p != lastRun; p = p.next) {
2513                                 int ph = p.hash; K pk = p.key; V pv = p.val;
2514                                 if ((ph & n) == 0)
2515                                     ln = new Node<K,V>(ph, pk, pv, ln);
2516                                 else
2517                                     hn = new Node<K,V>(ph, pk, pv, hn);
2518                             }
2519                             setTabAt(nextTab, i, ln);
2520                             setTabAt(nextTab, i + n, hn);
2521                             setTabAt(tab, i, fwd);
2522                             advance = true;
2523                         }
2524                         else if (f instanceof TreeBin) {
2525                             TreeBin<K,V> t = (TreeBin<K,V>)f;
2526                             TreeNode<K,V> lo = null, loTail = null;
2527                             TreeNode<K,V> hi = null, hiTail = null;
2528                             int lc = 0, hc = 0;
2529                             for (Node<K,V> e = t.first; e != null; e = e.next) {
2530                                 int h = e.hash;
2531                                 TreeNode<K,V> p = new TreeNode<K,V>
2532                                     (h, e.key, e.val, null, null);
2533                                 if ((h & n) == 0) {
2534                                     if ((p.prev = loTail) == null)
2535                                         lo = p;
2536                                     else
2537                                         loTail.next = p;
2538                                     loTail = p;
2539                                     ++lc;
2540                                 }
2541                                 else {
2542                                     if ((p.prev = hiTail) == null)
2543                                         hi = p;
2544                                     else
2545                                         hiTail.next = p;
2546                                     hiTail = p;
2547                                     ++hc;
2548                                 }
2549                             }
2550                             ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
2551                                 (hc != 0) ? new TreeBin<K,V>(lo) : t;
2552                             hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
2553                                 (lc != 0) ? new TreeBin<K,V>(hi) : t;
2554                             setTabAt(nextTab, i, ln);
2555                             setTabAt(nextTab, i + n, hn);
2556                             setTabAt(tab, i, fwd);
2557                             advance = true;
2558                         }
2559                     }
2560                 }
2561             }
2562         }
2563     }
2564 
2565     /* ---------------- Counter support -------------- */
2566 
2567     /**
2568      * A padded cell for distributing counts.  Adapted from LongAdder
2569      * and Striped64.  See their internal docs for explanation.
2570      */
2571     @jdk.internal.vm.annotation.Contended static final class CounterCell {
2572         volatile long value;
2573         CounterCell(long x) { value = x; }
2574     }
2575 
2576     final long sumCount() {
2577         CounterCell[] as = counterCells; CounterCell a;
2578         long sum = baseCount;
2579         if (as != null) {
2580             for (int i = 0; i < as.length; ++i) {
2581                 if ((a = as[i]) != null)
2582                     sum += a.value;
2583             }
2584         }
2585         return sum;
2586     }
2587 
2588     // See LongAdder version for explanation
2589     private final void fullAddCount(long x, boolean wasUncontended) {
2590         int h;
2591         if ((h = ThreadLocalRandom.getProbe()) == 0) {
2592             ThreadLocalRandom.localInit();      // force initialization
2593             h = ThreadLocalRandom.getProbe();
2594             wasUncontended = true;
2595         }
2596         boolean collide = false;                // True if last slot nonempty
2597         for (;;) {
2598             CounterCell[] as; CounterCell a; int n; long v;
2599             if ((as = counterCells) != null && (n = as.length) > 0) {
2600                 if ((a = as[(n - 1) & h]) == null) {
2601                     if (cellsBusy == 0) {            // Try to attach new Cell
2602                         CounterCell r = new CounterCell(x); // Optimistic create
2603                         if (cellsBusy == 0 &&
2604                             U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2605                             boolean created = false;
2606                             try {               // Recheck under lock
2607                                 CounterCell[] rs; int m, j;
2608                                 if ((rs = counterCells) != null &&
2609                                     (m = rs.length) > 0 &&
2610                                     rs[j = (m - 1) & h] == null) {
2611                                     rs[j] = r;
2612                                     created = true;
2613                                 }
2614                             } finally {
2615                                 cellsBusy = 0;
2616                             }
2617                             if (created)
2618                                 break;
2619                             continue;           // Slot is now non-empty
2620                         }
2621                     }
2622                     collide = false;
2623                 }
2624                 else if (!wasUncontended)       // CAS already known to fail
2625                     wasUncontended = true;      // Continue after rehash
2626                 else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
2627                     break;
2628                 else if (counterCells != as || n >= NCPU)
2629                     collide = false;            // At max size or stale
2630                 else if (!collide)
2631                     collide = true;
2632                 else if (cellsBusy == 0 &&
2633                          U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2634                     try {
2635                         if (counterCells == as) {// Expand table unless stale
2636                             CounterCell[] rs = new CounterCell[n << 1];
2637                             for (int i = 0; i < n; ++i)
2638                                 rs[i] = as[i];
2639                             counterCells = rs;
2640                         }
2641                     } finally {
2642                         cellsBusy = 0;
2643                     }
2644                     collide = false;
2645                     continue;                   // Retry with expanded table
2646                 }
2647                 h = ThreadLocalRandom.advanceProbe(h);
2648             }
2649             else if (cellsBusy == 0 && counterCells == as &&
2650                      U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2651                 boolean init = false;
2652                 try {                           // Initialize table
2653                     if (counterCells == as) {
2654                         CounterCell[] rs = new CounterCell[2];
2655                         rs[h & 1] = new CounterCell(x);
2656                         counterCells = rs;
2657                         init = true;
2658                     }
2659                 } finally {
2660                     cellsBusy = 0;
2661                 }
2662                 if (init)
2663                     break;
2664             }
2665             else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
2666                 break;                          // Fall back on using base
2667         }
2668     }
2669 
2670     /* ---------------- Conversion from/to TreeBins -------------- */
2671 
2672     /**
2673      * Replaces all linked nodes in bin at given index unless table is
2674      * too small, in which case resizes instead.
2675      */
2676     private final void treeifyBin(Node<K,V>[] tab, int index) {
2677         Node<K,V> b; int n;
2678         if (tab != null) {
2679             if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
2680                 tryPresize(n << 1);
2681             else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
2682                 synchronized (b) {
2683                     if (tabAt(tab, index) == b) {
2684                         TreeNode<K,V> hd = null, tl = null;
2685                         for (Node<K,V> e = b; e != null; e = e.next) {
2686                             TreeNode<K,V> p =
2687                                 new TreeNode<K,V>(e.hash, e.key, e.val,
2688                                                   null, null);
2689                             if ((p.prev = tl) == null)
2690                                 hd = p;
2691                             else
2692                                 tl.next = p;
2693                             tl = p;
2694                         }
2695                         setTabAt(tab, index, new TreeBin<K,V>(hd));
2696                     }
2697                 }
2698             }
2699         }
2700     }
2701 
2702     /**
2703      * Returns a list of non-TreeNodes replacing those in given list.
2704      */
2705     static <K,V> Node<K,V> untreeify(Node<K,V> b) {
2706         Node<K,V> hd = null, tl = null;
2707         for (Node<K,V> q = b; q != null; q = q.next) {
2708             Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val);
2709             if (tl == null)
2710                 hd = p;
2711             else
2712                 tl.next = p;
2713             tl = p;
2714         }
2715         return hd;
2716     }
2717 
2718     /* ---------------- TreeNodes -------------- */
2719 
2720     /**
2721      * Nodes for use in TreeBins.
2722      */
2723     static final class TreeNode<K,V> extends Node<K,V> {
2724         TreeNode<K,V> parent;  // red-black tree links
2725         TreeNode<K,V> left;
2726         TreeNode<K,V> right;
2727         TreeNode<K,V> prev;    // needed to unlink next upon deletion
2728         boolean red;
2729 
2730         TreeNode(int hash, K key, V val, Node<K,V> next,
2731                  TreeNode<K,V> parent) {
2732             super(hash, key, val, next);
2733             this.parent = parent;
2734         }
2735 
2736         Node<K,V> find(int h, Object k) {
2737             return findTreeNode(h, k, null);
2738         }
2739 
2740         /**
2741          * Returns the TreeNode (or null if not found) for the given key
2742          * starting at given root.
2743          */
2744         final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
2745             if (k != null) {
2746                 TreeNode<K,V> p = this;
2747                 do {
2748                     int ph, dir; K pk; TreeNode<K,V> q;
2749                     TreeNode<K,V> pl = p.left, pr = p.right;
2750                     if ((ph = p.hash) > h)
2751                         p = pl;
2752                     else if (ph < h)
2753                         p = pr;
2754                     else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2755                         return p;
2756                     else if (pl == null)
2757                         p = pr;
2758                     else if (pr == null)
2759                         p = pl;
2760                     else if ((kc != null ||
2761                               (kc = comparableClassFor(k)) != null) &&
2762                              (dir = compareComparables(kc, k, pk)) != 0)
2763                         p = (dir < 0) ? pl : pr;
2764                     else if ((q = pr.findTreeNode(h, k, kc)) != null)
2765                         return q;
2766                     else
2767                         p = pl;
2768                 } while (p != null);
2769             }
2770             return null;
2771         }
2772     }
2773 
2774     /* ---------------- TreeBins -------------- */
2775 
2776     /**
2777      * TreeNodes used at the heads of bins. TreeBins do not hold user
2778      * keys or values, but instead point to list of TreeNodes and
2779      * their root. They also maintain a parasitic read-write lock
2780      * forcing writers (who hold bin lock) to wait for readers (who do
2781      * not) to complete before tree restructuring operations.
2782      */
2783     static final class TreeBin<K,V> extends Node<K,V> {
2784         TreeNode<K,V> root;
2785         volatile TreeNode<K,V> first;
2786         volatile Thread waiter;
2787         volatile int lockState;
2788         // values for lockState
2789         static final int WRITER = 1; // set while holding write lock
2790         static final int WAITER = 2; // set when waiting for write lock
2791         static final int READER = 4; // increment value for setting read lock
2792 
2793         /**
2794          * Tie-breaking utility for ordering insertions when equal
2795          * hashCodes and non-comparable. We don't require a total
2796          * order, just a consistent insertion rule to maintain
2797          * equivalence across rebalancings. Tie-breaking further than
2798          * necessary simplifies testing a bit.
2799          */
2800         static int tieBreakOrder(Object a, Object b) {
2801             int d;
2802             if (a == null || b == null ||
2803                 (d = a.getClass().getName().
2804                  compareTo(b.getClass().getName())) == 0)
2805                 d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
2806                      -1 : 1);
2807             return d;
2808         }
2809 
2810         /**
2811          * Creates bin with initial set of nodes headed by b.
2812          */
2813         TreeBin(TreeNode<K,V> b) {
2814             super(TREEBIN, null, null);
2815             this.first = b;
2816             TreeNode<K,V> r = null;
2817             for (TreeNode<K,V> x = b, next; x != null; x = next) {
2818                 next = (TreeNode<K,V>)x.next;
2819                 x.left = x.right = null;
2820                 if (r == null) {
2821                     x.parent = null;
2822                     x.red = false;
2823                     r = x;
2824                 }
2825                 else {
2826                     K k = x.key;
2827                     int h = x.hash;
2828                     Class<?> kc = null;
2829                     for (TreeNode<K,V> p = r;;) {
2830                         int dir, ph;
2831                         K pk = p.key;
2832                         if ((ph = p.hash) > h)
2833                             dir = -1;
2834                         else if (ph < h)
2835                             dir = 1;
2836                         else if ((kc == null &&
2837                                   (kc = comparableClassFor(k)) == null) ||
2838                                  (dir = compareComparables(kc, k, pk)) == 0)
2839                             dir = tieBreakOrder(k, pk);
2840                         TreeNode<K,V> xp = p;
2841                         if ((p = (dir <= 0) ? p.left : p.right) == null) {
2842                             x.parent = xp;
2843                             if (dir <= 0)
2844                                 xp.left = x;
2845                             else
2846                                 xp.right = x;
2847                             r = balanceInsertion(r, x);
2848                             break;
2849                         }
2850                     }
2851                 }
2852             }
2853             this.root = r;
2854             assert checkInvariants(root);
2855         }
2856 
2857         /**
2858          * Acquires write lock for tree restructuring.
2859          */
2860         private final void lockRoot() {
2861             if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
2862                 contendedLock(); // offload to separate method
2863         }
2864 
2865         /**
2866          * Releases write lock for tree restructuring.
2867          */
2868         private final void unlockRoot() {
2869             lockState = 0;
2870         }
2871 
2872         /**
2873          * Possibly blocks awaiting root lock.
2874          */
2875         private final void contendedLock() {
2876             boolean waiting = false;
2877             for (int s;;) {
2878                 if (((s = lockState) & ~WAITER) == 0) {
2879                     if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
2880                         if (waiting)
2881                             waiter = null;
2882                         return;
2883                     }
2884                 }
2885                 else if ((s & WAITER) == 0) {
2886                     if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
2887                         waiting = true;
2888                         waiter = Thread.currentThread();
2889                     }
2890                 }
2891                 else if (waiting)
2892                     LockSupport.park(this);
2893             }
2894         }
2895 
2896         /**
2897          * Returns matching node or null if none. Tries to search
2898          * using tree comparisons from root, but continues linear
2899          * search when lock not available.
2900          */
2901         final Node<K,V> find(int h, Object k) {
2902             if (k != null) {
2903                 for (Node<K,V> e = first; e != null; ) {
2904                     int s; K ek;
2905                     if (((s = lockState) & (WAITER|WRITER)) != 0) {
2906                         if (e.hash == h &&
2907                             ((ek = e.key) == k || (ek != null && k.equals(ek))))
2908                             return e;
2909                         e = e.next;
2910                     }
2911                     else if (U.compareAndSwapInt(this, LOCKSTATE, s,
2912                                                  s + READER)) {
2913                         TreeNode<K,V> r, p;
2914                         try {
2915                             p = ((r = root) == null ? null :
2916                                  r.findTreeNode(h, k, null));
2917                         } finally {
2918                             Thread w;
2919                             if (U.getAndAddInt(this, LOCKSTATE, -READER) ==
2920                                 (READER|WAITER) && (w = waiter) != null)
2921                                 LockSupport.unpark(w);
2922                         }
2923                         return p;
2924                     }
2925                 }
2926             }
2927             return null;
2928         }
2929 
2930         /**
2931          * Finds or adds a node.
2932          * @return null if added
2933          */
2934         final TreeNode<K,V> putTreeVal(int h, K k, V v) {
2935             Class<?> kc = null;
2936             boolean searched = false;
2937             for (TreeNode<K,V> p = root;;) {
2938                 int dir, ph; K pk;
2939                 if (p == null) {
2940                     first = root = new TreeNode<K,V>(h, k, v, null, null);
2941                     break;
2942                 }
2943                 else if ((ph = p.hash) > h)
2944                     dir = -1;
2945                 else if (ph < h)
2946                     dir = 1;
2947                 else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2948                     return p;
2949                 else if ((kc == null &&
2950                           (kc = comparableClassFor(k)) == null) ||
2951                          (dir = compareComparables(kc, k, pk)) == 0) {
2952                     if (!searched) {
2953                         TreeNode<K,V> q, ch;
2954                         searched = true;
2955                         if (((ch = p.left) != null &&
2956                              (q = ch.findTreeNode(h, k, kc)) != null) ||
2957                             ((ch = p.right) != null &&
2958                              (q = ch.findTreeNode(h, k, kc)) != null))
2959                             return q;
2960                     }
2961                     dir = tieBreakOrder(k, pk);
2962                 }
2963 
2964                 TreeNode<K,V> xp = p;
2965                 if ((p = (dir <= 0) ? p.left : p.right) == null) {
2966                     TreeNode<K,V> x, f = first;
2967                     first = x = new TreeNode<K,V>(h, k, v, f, xp);
2968                     if (f != null)
2969                         f.prev = x;
2970                     if (dir <= 0)
2971                         xp.left = x;
2972                     else
2973                         xp.right = x;
2974                     if (!xp.red)
2975                         x.red = true;
2976                     else {
2977                         lockRoot();
2978                         try {
2979                             root = balanceInsertion(root, x);
2980                         } finally {
2981                             unlockRoot();
2982                         }
2983                     }
2984                     break;
2985                 }
2986             }
2987             assert checkInvariants(root);
2988             return null;
2989         }
2990 
2991         /**
2992          * Removes the given node, that must be present before this
2993          * call.  This is messier than typical red-black deletion code
2994          * because we cannot swap the contents of an interior node
2995          * with a leaf successor that is pinned by "next" pointers
2996          * that are accessible independently of lock. So instead we
2997          * swap the tree linkages.
2998          *
2999          * @return true if now too small, so should be untreeified
3000          */
3001         final boolean removeTreeNode(TreeNode<K,V> p) {
3002             TreeNode<K,V> next = (TreeNode<K,V>)p.next;
3003             TreeNode<K,V> pred = p.prev;  // unlink traversal pointers
3004             TreeNode<K,V> r, rl;
3005             if (pred == null)
3006                 first = next;
3007             else
3008                 pred.next = next;
3009             if (next != null)
3010                 next.prev = pred;
3011             if (first == null) {
3012                 root = null;
3013                 return true;
3014             }
3015             if ((r = root) == null || r.right == null || // too small
3016                 (rl = r.left) == null || rl.left == null)
3017                 return true;
3018             lockRoot();
3019             try {
3020                 TreeNode<K,V> replacement;
3021                 TreeNode<K,V> pl = p.left;
3022                 TreeNode<K,V> pr = p.right;
3023                 if (pl != null && pr != null) {
3024                     TreeNode<K,V> s = pr, sl;
3025                     while ((sl = s.left) != null) // find successor
3026                         s = sl;
3027                     boolean c = s.red; s.red = p.red; p.red = c; // swap colors
3028                     TreeNode<K,V> sr = s.right;
3029                     TreeNode<K,V> pp = p.parent;
3030                     if (s == pr) { // p was s's direct parent
3031                         p.parent = s;
3032                         s.right = p;
3033                     }
3034                     else {
3035                         TreeNode<K,V> sp = s.parent;
3036                         if ((p.parent = sp) != null) {
3037                             if (s == sp.left)
3038                                 sp.left = p;
3039                             else
3040                                 sp.right = p;
3041                         }
3042                         if ((s.right = pr) != null)
3043                             pr.parent = s;
3044                     }
3045                     p.left = null;
3046                     if ((p.right = sr) != null)
3047                         sr.parent = p;
3048                     if ((s.left = pl) != null)
3049                         pl.parent = s;
3050                     if ((s.parent = pp) == null)
3051                         r = s;
3052                     else if (p == pp.left)
3053                         pp.left = s;
3054                     else
3055                         pp.right = s;
3056                     if (sr != null)
3057                         replacement = sr;
3058                     else
3059                         replacement = p;
3060                 }
3061                 else if (pl != null)
3062                     replacement = pl;
3063                 else if (pr != null)
3064                     replacement = pr;
3065                 else
3066                     replacement = p;
3067                 if (replacement != p) {
3068                     TreeNode<K,V> pp = replacement.parent = p.parent;
3069                     if (pp == null)
3070                         r = replacement;
3071                     else if (p == pp.left)
3072                         pp.left = replacement;
3073                     else
3074                         pp.right = replacement;
3075                     p.left = p.right = p.parent = null;
3076                 }
3077 
3078                 root = (p.red) ? r : balanceDeletion(r, replacement);
3079 
3080                 if (p == replacement) {  // detach pointers
3081                     TreeNode<K,V> pp;
3082                     if ((pp = p.parent) != null) {
3083                         if (p == pp.left)
3084                             pp.left = null;
3085                         else if (p == pp.right)
3086                             pp.right = null;
3087                         p.parent = null;
3088                     }
3089                 }
3090             } finally {
3091                 unlockRoot();
3092             }
3093             assert checkInvariants(root);
3094             return false;
3095         }
3096 
3097         /* ------------------------------------------------------------ */
3098         // Red-black tree methods, all adapted from CLR
3099 
3100         static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
3101                                               TreeNode<K,V> p) {
3102             TreeNode<K,V> r, pp, rl;
3103             if (p != null && (r = p.right) != null) {
3104                 if ((rl = p.right = r.left) != null)
3105                     rl.parent = p;
3106                 if ((pp = r.parent = p.parent) == null)
3107                     (root = r).red = false;
3108                 else if (pp.left == p)
3109                     pp.left = r;
3110                 else
3111                     pp.right = r;
3112                 r.left = p;
3113                 p.parent = r;
3114             }
3115             return root;
3116         }
3117 
3118         static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
3119                                                TreeNode<K,V> p) {
3120             TreeNode<K,V> l, pp, lr;
3121             if (p != null && (l = p.left) != null) {
3122                 if ((lr = p.left = l.right) != null)
3123                     lr.parent = p;
3124                 if ((pp = l.parent = p.parent) == null)
3125                     (root = l).red = false;
3126                 else if (pp.right == p)
3127                     pp.right = l;
3128                 else
3129                     pp.left = l;
3130                 l.right = p;
3131                 p.parent = l;
3132             }
3133             return root;
3134         }
3135 
3136         static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
3137                                                     TreeNode<K,V> x) {
3138             x.red = true;
3139             for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
3140                 if ((xp = x.parent) == null) {
3141                     x.red = false;
3142                     return x;
3143                 }
3144                 else if (!xp.red || (xpp = xp.parent) == null)
3145                     return root;
3146                 if (xp == (xppl = xpp.left)) {
3147                     if ((xppr = xpp.right) != null && xppr.red) {
3148                         xppr.red = false;
3149                         xp.red = false;
3150                         xpp.red = true;
3151                         x = xpp;
3152                     }
3153                     else {
3154                         if (x == xp.right) {
3155                             root = rotateLeft(root, x = xp);
3156                             xpp = (xp = x.parent) == null ? null : xp.parent;
3157                         }
3158                         if (xp != null) {
3159                             xp.red = false;
3160                             if (xpp != null) {
3161                                 xpp.red = true;
3162                                 root = rotateRight(root, xpp);
3163                             }
3164                         }
3165                     }
3166                 }
3167                 else {
3168                     if (xppl != null && xppl.red) {
3169                         xppl.red = false;
3170                         xp.red = false;
3171                         xpp.red = true;
3172                         x = xpp;
3173                     }
3174                     else {
3175                         if (x == xp.left) {
3176                             root = rotateRight(root, x = xp);
3177                             xpp = (xp = x.parent) == null ? null : xp.parent;
3178                         }
3179                         if (xp != null) {
3180                             xp.red = false;
3181                             if (xpp != null) {
3182                                 xpp.red = true;
3183                                 root = rotateLeft(root, xpp);
3184                             }
3185                         }
3186                     }
3187                 }
3188             }
3189         }
3190 
3191         static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
3192                                                    TreeNode<K,V> x) {
3193             for (TreeNode<K,V> xp, xpl, xpr;;) {
3194                 if (x == null || x == root)
3195                     return root;
3196                 else if ((xp = x.parent) == null) {
3197                     x.red = false;
3198                     return x;
3199                 }
3200                 else if (x.red) {
3201                     x.red = false;
3202                     return root;
3203                 }
3204                 else if ((xpl = xp.left) == x) {
3205                     if ((xpr = xp.right) != null && xpr.red) {
3206                         xpr.red = false;
3207                         xp.red = true;
3208                         root = rotateLeft(root, xp);
3209                         xpr = (xp = x.parent) == null ? null : xp.right;
3210                     }
3211                     if (xpr == null)
3212                         x = xp;
3213                     else {
3214                         TreeNode<K,V> sl = xpr.left, sr = xpr.right;
3215                         if ((sr == null || !sr.red) &&
3216                             (sl == null || !sl.red)) {
3217                             xpr.red = true;
3218                             x = xp;
3219                         }
3220                         else {
3221                             if (sr == null || !sr.red) {
3222                                 if (sl != null)
3223                                     sl.red = false;
3224                                 xpr.red = true;
3225                                 root = rotateRight(root, xpr);
3226                                 xpr = (xp = x.parent) == null ?
3227                                     null : xp.right;
3228                             }
3229                             if (xpr != null) {
3230                                 xpr.red = (xp == null) ? false : xp.red;
3231                                 if ((sr = xpr.right) != null)
3232                                     sr.red = false;
3233                             }
3234                             if (xp != null) {
3235                                 xp.red = false;
3236                                 root = rotateLeft(root, xp);
3237                             }
3238                             x = root;
3239                         }
3240                     }
3241                 }
3242                 else { // symmetric
3243                     if (xpl != null && xpl.red) {
3244                         xpl.red = false;
3245                         xp.red = true;
3246                         root = rotateRight(root, xp);
3247                         xpl = (xp = x.parent) == null ? null : xp.left;
3248                     }
3249                     if (xpl == null)
3250                         x = xp;
3251                     else {
3252                         TreeNode<K,V> sl = xpl.left, sr = xpl.right;
3253                         if ((sl == null || !sl.red) &&
3254                             (sr == null || !sr.red)) {
3255                             xpl.red = true;
3256                             x = xp;
3257                         }
3258                         else {
3259                             if (sl == null || !sl.red) {
3260                                 if (sr != null)
3261                                     sr.red = false;
3262                                 xpl.red = true;
3263                                 root = rotateLeft(root, xpl);
3264                                 xpl = (xp = x.parent) == null ?
3265                                     null : xp.left;
3266                             }
3267                             if (xpl != null) {
3268                                 xpl.red = (xp == null) ? false : xp.red;
3269                                 if ((sl = xpl.left) != null)
3270                                     sl.red = false;
3271                             }
3272                             if (xp != null) {
3273                                 xp.red = false;
3274                                 root = rotateRight(root, xp);
3275                             }
3276                             x = root;
3277                         }
3278                     }
3279                 }
3280             }
3281         }
3282 
3283         /**
3284          * Checks invariants recursively for the tree of Nodes rooted at t.
3285          */
3286         static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
3287             TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
3288                 tb = t.prev, tn = (TreeNode<K,V>)t.next;
3289             if (tb != null && tb.next != t)
3290                 return false;
3291             if (tn != null && tn.prev != t)
3292                 return false;
3293             if (tp != null && t != tp.left && t != tp.right)
3294                 return false;
3295             if (tl != null && (tl.parent != t || tl.hash > t.hash))
3296                 return false;
3297             if (tr != null && (tr.parent != t || tr.hash < t.hash))
3298                 return false;
3299             if (t.red && tl != null && tl.red && tr != null && tr.red)
3300                 return false;
3301             if (tl != null && !checkInvariants(tl))
3302                 return false;
3303             if (tr != null && !checkInvariants(tr))
3304                 return false;
3305             return true;
3306         }
3307 
3308         private static final Unsafe U = Unsafe.getUnsafe();
3309         private static final long LOCKSTATE;
3310         static {
3311             try {
3312                 LOCKSTATE = U.objectFieldOffset
3313                     (TreeBin.class.getDeclaredField("lockState"));
3314             } catch (ReflectiveOperationException e) {
3315                 throw new Error(e);
3316             }
3317         }
3318     }
3319 
3320     /* ----------------Table Traversal -------------- */
3321 
3322     /**
3323      * Records the table, its length, and current traversal index for a
3324      * traverser that must process a region of a forwarded table before
3325      * proceeding with current table.
3326      */
3327     static final class TableStack<K,V> {
3328         int length;
3329         int index;
3330         Node<K,V>[] tab;
3331         TableStack<K,V> next;
3332     }
3333 
3334     /**
3335      * Encapsulates traversal for methods such as containsValue; also
3336      * serves as a base class for other iterators and spliterators.
3337      *
3338      * Method advance visits once each still-valid node that was
3339      * reachable upon iterator construction. It might miss some that
3340      * were added to a bin after the bin was visited, which is OK wrt
3341      * consistency guarantees. Maintaining this property in the face
3342      * of possible ongoing resizes requires a fair amount of
3343      * bookkeeping state that is difficult to optimize away amidst
3344      * volatile accesses.  Even so, traversal maintains reasonable
3345      * throughput.
3346      *
3347      * Normally, iteration proceeds bin-by-bin traversing lists.
3348      * However, if the table has been resized, then all future steps
3349      * must traverse both the bin at the current index as well as at
3350      * (index + baseSize); and so on for further resizings. To
3351      * paranoically cope with potential sharing by users of iterators
3352      * across threads, iteration terminates if a bounds checks fails
3353      * for a table read.
3354      */
3355     static class Traverser<K,V> {
3356         Node<K,V>[] tab;        // current table; updated if resized
3357         Node<K,V> next;         // the next entry to use
3358         TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes
3359         int index;              // index of bin to use next
3360         int baseIndex;          // current index of initial table
3361         int baseLimit;          // index bound for initial table
3362         final int baseSize;     // initial table size
3363 
3364         Traverser(Node<K,V>[] tab, int size, int index, int limit) {
3365             this.tab = tab;
3366             this.baseSize = size;
3367             this.baseIndex = this.index = index;
3368             this.baseLimit = limit;
3369             this.next = null;
3370         }
3371 
3372         /**
3373          * Advances if possible, returning next valid node, or null if none.
3374          */
3375         final Node<K,V> advance() {
3376             Node<K,V> e;
3377             if ((e = next) != null)
3378                 e = e.next;
3379             for (;;) {
3380                 Node<K,V>[] t; int i, n;  // must use locals in checks
3381                 if (e != null)
3382                     return next = e;
3383                 if (baseIndex >= baseLimit || (t = tab) == null ||
3384                     (n = t.length) <= (i = index) || i < 0)
3385                     return next = null;
3386                 if ((e = tabAt(t, i)) != null && e.hash < 0) {
3387                     if (e instanceof ForwardingNode) {
3388                         tab = ((ForwardingNode<K,V>)e).nextTable;
3389                         e = null;
3390                         pushState(t, i, n);
3391                         continue;
3392                     }
3393                     else if (e instanceof TreeBin)
3394                         e = ((TreeBin<K,V>)e).first;
3395                     else
3396                         e = null;
3397                 }
3398                 if (stack != null)
3399                     recoverState(n);
3400                 else if ((index = i + baseSize) >= n)
3401                     index = ++baseIndex; // visit upper slots if present
3402             }
3403         }
3404 
3405         /**
3406          * Saves traversal state upon encountering a forwarding node.
3407          */
3408         private void pushState(Node<K,V>[] t, int i, int n) {
3409             TableStack<K,V> s = spare;  // reuse if possible
3410             if (s != null)
3411                 spare = s.next;
3412             else
3413                 s = new TableStack<K,V>();
3414             s.tab = t;
3415             s.length = n;
3416             s.index = i;
3417             s.next = stack;
3418             stack = s;
3419         }
3420 
3421         /**
3422          * Possibly pops traversal state.
3423          *
3424          * @param n length of current table
3425          */
3426         private void recoverState(int n) {
3427             TableStack<K,V> s; int len;
3428             while ((s = stack) != null && (index += (len = s.length)) >= n) {
3429                 n = len;
3430                 index = s.index;
3431                 tab = s.tab;
3432                 s.tab = null;
3433                 TableStack<K,V> next = s.next;
3434                 s.next = spare; // save for reuse
3435                 stack = next;
3436                 spare = s;
3437             }
3438             if (s == null && (index += baseSize) >= n)
3439                 index = ++baseIndex;
3440         }
3441     }
3442 
3443     /**
3444      * Base of key, value, and entry Iterators. Adds fields to
3445      * Traverser to support iterator.remove.
3446      */
3447     static class BaseIterator<K,V> extends Traverser<K,V> {
3448         final ConcurrentHashMap<K,V> map;
3449         Node<K,V> lastReturned;
3450         BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
3451                     ConcurrentHashMap<K,V> map) {
3452             super(tab, size, index, limit);
3453             this.map = map;
3454             advance();
3455         }
3456 
3457         public final boolean hasNext() { return next != null; }
3458         public final boolean hasMoreElements() { return next != null; }
3459 
3460         public final void remove() {
3461             Node<K,V> p;
3462             if ((p = lastReturned) == null)
3463                 throw new IllegalStateException();
3464             lastReturned = null;
3465             map.replaceNode(p.key, null, null);
3466         }
3467     }
3468 
3469     static final class KeyIterator<K,V> extends BaseIterator<K,V>
3470         implements Iterator<K>, Enumeration<K> {
3471         KeyIterator(Node<K,V>[] tab, int size, int index, int limit,
3472                     ConcurrentHashMap<K,V> map) {
3473             super(tab, size, index, limit, map);
3474         }
3475 
3476         public final K next() {
3477             Node<K,V> p;
3478             if ((p = next) == null)
3479                 throw new NoSuchElementException();
3480             K k = p.key;
3481             lastReturned = p;
3482             advance();
3483             return k;
3484         }
3485 
3486         public final K nextElement() { return next(); }
3487     }
3488 
3489     static final class ValueIterator<K,V> extends BaseIterator<K,V>
3490         implements Iterator<V>, Enumeration<V> {
3491         ValueIterator(Node<K,V>[] tab, int size, int index, int limit,
3492                       ConcurrentHashMap<K,V> map) {
3493             super(tab, size, index, limit, map);
3494         }
3495 
3496         public final V next() {
3497             Node<K,V> p;
3498             if ((p = next) == null)
3499                 throw new NoSuchElementException();
3500             V v = p.val;
3501             lastReturned = p;
3502             advance();
3503             return v;
3504         }
3505 
3506         public final V nextElement() { return next(); }
3507     }
3508 
3509     static final class EntryIterator<K,V> extends BaseIterator<K,V>
3510         implements Iterator<Map.Entry<K,V>> {
3511         EntryIterator(Node<K,V>[] tab, int size, int index, int limit,
3512                       ConcurrentHashMap<K,V> map) {
3513             super(tab, size, index, limit, map);
3514         }
3515 
3516         public final Map.Entry<K,V> next() {
3517             Node<K,V> p;
3518             if ((p = next) == null)
3519                 throw new NoSuchElementException();
3520             K k = p.key;
3521             V v = p.val;
3522             lastReturned = p;
3523             advance();
3524             return new MapEntry<K,V>(k, v, map);
3525         }
3526     }
3527 
3528     /**
3529      * Exported Entry for EntryIterator.
3530      */
3531     static final class MapEntry<K,V> implements Map.Entry<K,V> {
3532         final K key; // non-null
3533         V val;       // non-null
3534         final ConcurrentHashMap<K,V> map;
3535         MapEntry(K key, V val, ConcurrentHashMap<K,V> map) {
3536             this.key = key;
3537             this.val = val;
3538             this.map = map;
3539         }
3540         public K getKey()        { return key; }
3541         public V getValue()      { return val; }
3542         public int hashCode()    { return key.hashCode() ^ val.hashCode(); }
3543         public String toString() {
3544             return Helpers.mapEntryToString(key, val);
3545         }
3546 
3547         public boolean equals(Object o) {
3548             Object k, v; Map.Entry<?,?> e;
3549             return ((o instanceof Map.Entry) &&
3550                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3551                     (v = e.getValue()) != null &&
3552                     (k == key || k.equals(key)) &&
3553                     (v == val || v.equals(val)));
3554         }
3555 
3556         /**
3557          * Sets our entry's value and writes through to the map. The
3558          * value to return is somewhat arbitrary here. Since we do not
3559          * necessarily track asynchronous changes, the most recent
3560          * "previous" value could be different from what we return (or
3561          * could even have been removed, in which case the put will
3562          * re-establish). We do not and cannot guarantee more.
3563          */
3564         public V setValue(V value) {
3565             if (value == null) throw new NullPointerException();
3566             V v = val;
3567             val = value;
3568             map.put(key, value);
3569             return v;
3570         }
3571     }
3572 
3573     static final class KeySpliterator<K,V> extends Traverser<K,V>
3574         implements Spliterator<K> {
3575         long est;               // size estimate
3576         KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3577                        long est) {
3578             super(tab, size, index, limit);
3579             this.est = est;
3580         }
3581 
3582         public KeySpliterator<K,V> trySplit() {
3583             int i, f, h;
3584             return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3585                 new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
3586                                         f, est >>>= 1);
3587         }
3588 
3589         public void forEachRemaining(Consumer<? super K> action) {
3590             if (action == null) throw new NullPointerException();
3591             for (Node<K,V> p; (p = advance()) != null;)
3592                 action.accept(p.key);
3593         }
3594 
3595         public boolean tryAdvance(Consumer<? super K> action) {
3596             if (action == null) throw new NullPointerException();
3597             Node<K,V> p;
3598             if ((p = advance()) == null)
3599                 return false;
3600             action.accept(p.key);
3601             return true;
3602         }
3603 
3604         public long estimateSize() { return est; }
3605 
3606         public int characteristics() {
3607             return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3608                 Spliterator.NONNULL;
3609         }
3610     }
3611 
3612     static final class ValueSpliterator<K,V> extends Traverser<K,V>
3613         implements Spliterator<V> {
3614         long est;               // size estimate
3615         ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
3616                          long est) {
3617             super(tab, size, index, limit);
3618             this.est = est;
3619         }
3620 
3621         public ValueSpliterator<K,V> trySplit() {
3622             int i, f, h;
3623             return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3624                 new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
3625                                           f, est >>>= 1);
3626         }
3627 
3628         public void forEachRemaining(Consumer<? super V> action) {
3629             if (action == null) throw new NullPointerException();
3630             for (Node<K,V> p; (p = advance()) != null;)
3631                 action.accept(p.val);
3632         }
3633 
3634         public boolean tryAdvance(Consumer<? super V> action) {
3635             if (action == null) throw new NullPointerException();
3636             Node<K,V> p;
3637             if ((p = advance()) == null)
3638                 return false;
3639             action.accept(p.val);
3640             return true;
3641         }
3642 
3643         public long estimateSize() { return est; }
3644 
3645         public int characteristics() {
3646             return Spliterator.CONCURRENT | Spliterator.NONNULL;
3647         }
3648     }
3649 
3650     static final class EntrySpliterator<K,V> extends Traverser<K,V>
3651         implements Spliterator<Map.Entry<K,V>> {
3652         final ConcurrentHashMap<K,V> map; // To export MapEntry
3653         long est;               // size estimate
3654         EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3655                          long est, ConcurrentHashMap<K,V> map) {
3656             super(tab, size, index, limit);
3657             this.map = map;
3658             this.est = est;
3659         }
3660 
3661         public EntrySpliterator<K,V> trySplit() {
3662             int i, f, h;
3663             return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3664                 new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
3665                                           f, est >>>= 1, map);
3666         }
3667 
3668         public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
3669             if (action == null) throw new NullPointerException();
3670             for (Node<K,V> p; (p = advance()) != null; )
3671                 action.accept(new MapEntry<K,V>(p.key, p.val, map));
3672         }
3673 
3674         public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
3675             if (action == null) throw new NullPointerException();
3676             Node<K,V> p;
3677             if ((p = advance()) == null)
3678                 return false;
3679             action.accept(new MapEntry<K,V>(p.key, p.val, map));
3680             return true;
3681         }
3682 
3683         public long estimateSize() { return est; }
3684 
3685         public int characteristics() {
3686             return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3687                 Spliterator.NONNULL;
3688         }
3689     }
3690 
3691     // Parallel bulk operations
3692 
3693     /**
3694      * Computes initial batch value for bulk tasks. The returned value
3695      * is approximately exp2 of the number of times (minus one) to
3696      * split task by two before executing leaf action. This value is
3697      * faster to compute and more convenient to use as a guide to
3698      * splitting than is the depth, since it is used while dividing by
3699      * two anyway.
3700      */
3701     final int batchFor(long b) {
3702         long n;
3703         if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
3704             return 0;
3705         int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
3706         return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
3707     }
3708 
3709     /**
3710      * Performs the given action for each (key, value).
3711      *
3712      * @param parallelismThreshold the (estimated) number of elements
3713      * needed for this operation to be executed in parallel
3714      * @param action the action
3715      * @since 1.8
3716      */
3717     public void forEach(long parallelismThreshold,
3718                         BiConsumer<? super K,? super V> action) {
3719         if (action == null) throw new NullPointerException();
3720         new ForEachMappingTask<K,V>
3721             (null, batchFor(parallelismThreshold), 0, 0, table,
3722              action).invoke();
3723     }
3724 
3725     /**
3726      * Performs the given action for each non-null transformation
3727      * of each (key, value).
3728      *
3729      * @param parallelismThreshold the (estimated) number of elements
3730      * needed for this operation to be executed in parallel
3731      * @param transformer a function returning the transformation
3732      * for an element, or null if there is no transformation (in
3733      * which case the action is not applied)
3734      * @param action the action
3735      * @param <U> the return type of the transformer
3736      * @since 1.8
3737      */
3738     public <U> void forEach(long parallelismThreshold,
3739                             BiFunction<? super K, ? super V, ? extends U> transformer,
3740                             Consumer<? super U> action) {
3741         if (transformer == null || action == null)
3742             throw new NullPointerException();
3743         new ForEachTransformedMappingTask<K,V,U>
3744             (null, batchFor(parallelismThreshold), 0, 0, table,
3745              transformer, action).invoke();
3746     }
3747 
3748     /**
3749      * Returns a non-null result from applying the given search
3750      * function on each (key, value), or null if none.  Upon
3751      * success, further element processing is suppressed and the
3752      * results of any other parallel invocations of the search
3753      * function are ignored.
3754      *
3755      * @param parallelismThreshold the (estimated) number of elements
3756      * needed for this operation to be executed in parallel
3757      * @param searchFunction a function returning a non-null
3758      * result on success, else null
3759      * @param <U> the return type of the search function
3760      * @return a non-null result from applying the given search
3761      * function on each (key, value), or null if none
3762      * @since 1.8
3763      */
3764     public <U> U search(long parallelismThreshold,
3765                         BiFunction<? super K, ? super V, ? extends U> searchFunction) {
3766         if (searchFunction == null) throw new NullPointerException();
3767         return new SearchMappingsTask<K,V,U>
3768             (null, batchFor(parallelismThreshold), 0, 0, table,
3769              searchFunction, new AtomicReference<U>()).invoke();
3770     }
3771 
3772     /**
3773      * Returns the result of accumulating the given transformation
3774      * of all (key, value) pairs using the given reducer to
3775      * combine values, or null if none.
3776      *
3777      * @param parallelismThreshold the (estimated) number of elements
3778      * needed for this operation to be executed in parallel
3779      * @param transformer a function returning the transformation
3780      * for an element, or null if there is no transformation (in
3781      * which case it is not combined)
3782      * @param reducer a commutative associative combining function
3783      * @param <U> the return type of the transformer
3784      * @return the result of accumulating the given transformation
3785      * of all (key, value) pairs
3786      * @since 1.8
3787      */
3788     public <U> U reduce(long parallelismThreshold,
3789                         BiFunction<? super K, ? super V, ? extends U> transformer,
3790                         BiFunction<? super U, ? super U, ? extends U> reducer) {
3791         if (transformer == null || reducer == null)
3792             throw new NullPointerException();
3793         return new MapReduceMappingsTask<K,V,U>
3794             (null, batchFor(parallelismThreshold), 0, 0, table,
3795              null, transformer, reducer).invoke();
3796     }
3797 
3798     /**
3799      * Returns the result of accumulating the given transformation
3800      * of all (key, value) pairs using the given reducer to
3801      * combine values, and the given basis as an identity value.
3802      *
3803      * @param parallelismThreshold the (estimated) number of elements
3804      * needed for this operation to be executed in parallel
3805      * @param transformer a function returning the transformation
3806      * for an element
3807      * @param basis the identity (initial default value) for the reduction
3808      * @param reducer a commutative associative combining function
3809      * @return the result of accumulating the given transformation
3810      * of all (key, value) pairs
3811      * @since 1.8
3812      */
3813     public double reduceToDouble(long parallelismThreshold,
3814                                  ToDoubleBiFunction<? super K, ? super V> transformer,
3815                                  double basis,
3816                                  DoubleBinaryOperator reducer) {
3817         if (transformer == null || reducer == null)
3818             throw new NullPointerException();
3819         return new MapReduceMappingsToDoubleTask<K,V>
3820             (null, batchFor(parallelismThreshold), 0, 0, table,
3821              null, transformer, basis, reducer).invoke();
3822     }
3823 
3824     /**
3825      * Returns the result of accumulating the given transformation
3826      * of all (key, value) pairs using the given reducer to
3827      * combine values, and the given basis as an identity value.
3828      *
3829      * @param parallelismThreshold the (estimated) number of elements
3830      * needed for this operation to be executed in parallel
3831      * @param transformer a function returning the transformation
3832      * for an element
3833      * @param basis the identity (initial default value) for the reduction
3834      * @param reducer a commutative associative combining function
3835      * @return the result of accumulating the given transformation
3836      * of all (key, value) pairs
3837      * @since 1.8
3838      */
3839     public long reduceToLong(long parallelismThreshold,
3840                              ToLongBiFunction<? super K, ? super V> transformer,
3841                              long basis,
3842                              LongBinaryOperator reducer) {
3843         if (transformer == null || reducer == null)
3844             throw new NullPointerException();
3845         return new MapReduceMappingsToLongTask<K,V>
3846             (null, batchFor(parallelismThreshold), 0, 0, table,
3847              null, transformer, basis, reducer).invoke();
3848     }
3849 
3850     /**
3851      * Returns the result of accumulating the given transformation
3852      * of all (key, value) pairs using the given reducer to
3853      * combine values, and the given basis as an identity value.
3854      *
3855      * @param parallelismThreshold the (estimated) number of elements
3856      * needed for this operation to be executed in parallel
3857      * @param transformer a function returning the transformation
3858      * for an element
3859      * @param basis the identity (initial default value) for the reduction
3860      * @param reducer a commutative associative combining function
3861      * @return the result of accumulating the given transformation
3862      * of all (key, value) pairs
3863      * @since 1.8
3864      */
3865     public int reduceToInt(long parallelismThreshold,
3866                            ToIntBiFunction<? super K, ? super V> transformer,
3867                            int basis,
3868                            IntBinaryOperator reducer) {
3869         if (transformer == null || reducer == null)
3870             throw new NullPointerException();
3871         return new MapReduceMappingsToIntTask<K,V>
3872             (null, batchFor(parallelismThreshold), 0, 0, table,
3873              null, transformer, basis, reducer).invoke();
3874     }
3875 
3876     /**
3877      * Performs the given action for each key.
3878      *
3879      * @param parallelismThreshold the (estimated) number of elements
3880      * needed for this operation to be executed in parallel
3881      * @param action the action
3882      * @since 1.8
3883      */
3884     public void forEachKey(long parallelismThreshold,
3885                            Consumer<? super K> action) {
3886         if (action == null) throw new NullPointerException();
3887         new ForEachKeyTask<K,V>
3888             (null, batchFor(parallelismThreshold), 0, 0, table,
3889              action).invoke();
3890     }
3891 
3892     /**
3893      * Performs the given action for each non-null transformation
3894      * of each key.
3895      *
3896      * @param parallelismThreshold the (estimated) number of elements
3897      * needed for this operation to be executed in parallel
3898      * @param transformer a function returning the transformation
3899      * for an element, or null if there is no transformation (in
3900      * which case the action is not applied)
3901      * @param action the action
3902      * @param <U> the return type of the transformer
3903      * @since 1.8
3904      */
3905     public <U> void forEachKey(long parallelismThreshold,
3906                                Function<? super K, ? extends U> transformer,
3907                                Consumer<? super U> action) {
3908         if (transformer == null || action == null)
3909             throw new NullPointerException();
3910         new ForEachTransformedKeyTask<K,V,U>
3911             (null, batchFor(parallelismThreshold), 0, 0, table,
3912              transformer, action).invoke();
3913     }
3914 
3915     /**
3916      * Returns a non-null result from applying the given search
3917      * function on each key, or null if none. Upon success,
3918      * further element processing is suppressed and the results of
3919      * any other parallel invocations of the search function are
3920      * ignored.
3921      *
3922      * @param parallelismThreshold the (estimated) number of elements
3923      * needed for this operation to be executed in parallel
3924      * @param searchFunction a function returning a non-null
3925      * result on success, else null
3926      * @param <U> the return type of the search function
3927      * @return a non-null result from applying the given search
3928      * function on each key, or null if none
3929      * @since 1.8
3930      */
3931     public <U> U searchKeys(long parallelismThreshold,
3932                             Function<? super K, ? extends U> searchFunction) {
3933         if (searchFunction == null) throw new NullPointerException();
3934         return new SearchKeysTask<K,V,U>
3935             (null, batchFor(parallelismThreshold), 0, 0, table,
3936              searchFunction, new AtomicReference<U>()).invoke();
3937     }
3938 
3939     /**
3940      * Returns the result of accumulating all keys using the given
3941      * reducer to combine values, or null if none.
3942      *
3943      * @param parallelismThreshold the (estimated) number of elements
3944      * needed for this operation to be executed in parallel
3945      * @param reducer a commutative associative combining function
3946      * @return the result of accumulating all keys using the given
3947      * reducer to combine values, or null if none
3948      * @since 1.8
3949      */
3950     public K reduceKeys(long parallelismThreshold,
3951                         BiFunction<? super K, ? super K, ? extends K> reducer) {
3952         if (reducer == null) throw new NullPointerException();
3953         return new ReduceKeysTask<K,V>
3954             (null, batchFor(parallelismThreshold), 0, 0, table,
3955              null, reducer).invoke();
3956     }
3957 
3958     /**
3959      * Returns the result of accumulating the given transformation
3960      * of all keys using the given reducer to combine values, or
3961      * null if none.
3962      *
3963      * @param parallelismThreshold the (estimated) number of elements
3964      * needed for this operation to be executed in parallel
3965      * @param transformer a function returning the transformation
3966      * for an element, or null if there is no transformation (in
3967      * which case it is not combined)
3968      * @param reducer a commutative associative combining function
3969      * @param <U> the return type of the transformer
3970      * @return the result of accumulating the given transformation
3971      * of all keys
3972      * @since 1.8
3973      */
3974     public <U> U reduceKeys(long parallelismThreshold,
3975                             Function<? super K, ? extends U> transformer,
3976          BiFunction<? super U, ? super U, ? extends U> reducer) {
3977         if (transformer == null || reducer == null)
3978             throw new NullPointerException();
3979         return new MapReduceKeysTask<K,V,U>
3980             (null, batchFor(parallelismThreshold), 0, 0, table,
3981              null, transformer, reducer).invoke();
3982     }
3983 
3984     /**
3985      * Returns the result of accumulating the given transformation
3986      * of all keys using the given reducer to combine values, and
3987      * the given basis as an identity value.
3988      *
3989      * @param parallelismThreshold the (estimated) number of elements
3990      * needed for this operation to be executed in parallel
3991      * @param transformer a function returning the transformation
3992      * for an element
3993      * @param basis the identity (initial default value) for the reduction
3994      * @param reducer a commutative associative combining function
3995      * @return the result of accumulating the given transformation
3996      * of all keys
3997      * @since 1.8
3998      */
3999     public double reduceKeysToDouble(long parallelismThreshold,
4000                                      ToDoubleFunction<? super K> transformer,
4001                                      double basis,
4002                                      DoubleBinaryOperator reducer) {
4003         if (transformer == null || reducer == null)
4004             throw new NullPointerException();
4005         return new MapReduceKeysToDoubleTask<K,V>
4006             (null, batchFor(parallelismThreshold), 0, 0, table,
4007              null, transformer, basis, reducer).invoke();
4008     }
4009 
4010     /**
4011      * Returns the result of accumulating the given transformation
4012      * of all keys using the given reducer to combine values, and
4013      * the given basis as an identity value.
4014      *
4015      * @param parallelismThreshold the (estimated) number of elements
4016      * needed for this operation to be executed in parallel
4017      * @param transformer a function returning the transformation
4018      * for an element
4019      * @param basis the identity (initial default value) for the reduction
4020      * @param reducer a commutative associative combining function
4021      * @return the result of accumulating the given transformation
4022      * of all keys
4023      * @since 1.8
4024      */
4025     public long reduceKeysToLong(long parallelismThreshold,
4026                                  ToLongFunction<? super K> transformer,
4027                                  long basis,
4028                                  LongBinaryOperator reducer) {
4029         if (transformer == null || reducer == null)
4030             throw new NullPointerException();
4031         return new MapReduceKeysToLongTask<K,V>
4032             (null, batchFor(parallelismThreshold), 0, 0, table,
4033              null, transformer, basis, reducer).invoke();
4034     }
4035 
4036     /**
4037      * Returns the result of accumulating the given transformation
4038      * of all keys using the given reducer to combine values, and
4039      * the given basis as an identity value.
4040      *
4041      * @param parallelismThreshold the (estimated) number of elements
4042      * needed for this operation to be executed in parallel
4043      * @param transformer a function returning the transformation
4044      * for an element
4045      * @param basis the identity (initial default value) for the reduction
4046      * @param reducer a commutative associative combining function
4047      * @return the result of accumulating the given transformation
4048      * of all keys
4049      * @since 1.8
4050      */
4051     public int reduceKeysToInt(long parallelismThreshold,
4052                                ToIntFunction<? super K> transformer,
4053                                int basis,
4054                                IntBinaryOperator reducer) {
4055         if (transformer == null || reducer == null)
4056             throw new NullPointerException();
4057         return new MapReduceKeysToIntTask<K,V>
4058             (null, batchFor(parallelismThreshold), 0, 0, table,
4059              null, transformer, basis, reducer).invoke();
4060     }
4061 
4062     /**
4063      * Performs the given action for each value.
4064      *
4065      * @param parallelismThreshold the (estimated) number of elements
4066      * needed for this operation to be executed in parallel
4067      * @param action the action
4068      * @since 1.8
4069      */
4070     public void forEachValue(long parallelismThreshold,
4071                              Consumer<? super V> action) {
4072         if (action == null)
4073             throw new NullPointerException();
4074         new ForEachValueTask<K,V>
4075             (null, batchFor(parallelismThreshold), 0, 0, table,
4076              action).invoke();
4077     }
4078 
4079     /**
4080      * Performs the given action for each non-null transformation
4081      * of each value.
4082      *
4083      * @param parallelismThreshold the (estimated) number of elements
4084      * needed for this operation to be executed in parallel
4085      * @param transformer a function returning the transformation
4086      * for an element, or null if there is no transformation (in
4087      * which case the action is not applied)
4088      * @param action the action
4089      * @param <U> the return type of the transformer
4090      * @since 1.8
4091      */
4092     public <U> void forEachValue(long parallelismThreshold,
4093                                  Function<? super V, ? extends U> transformer,
4094                                  Consumer<? super U> action) {
4095         if (transformer == null || action == null)
4096             throw new NullPointerException();
4097         new ForEachTransformedValueTask<K,V,U>
4098             (null, batchFor(parallelismThreshold), 0, 0, table,
4099              transformer, action).invoke();
4100     }
4101 
4102     /**
4103      * Returns a non-null result from applying the given search
4104      * function on each value, or null if none.  Upon success,
4105      * further element processing is suppressed and the results of
4106      * any other parallel invocations of the search function are
4107      * ignored.
4108      *
4109      * @param parallelismThreshold the (estimated) number of elements
4110      * needed for this operation to be executed in parallel
4111      * @param searchFunction a function returning a non-null
4112      * result on success, else null
4113      * @param <U> the return type of the search function
4114      * @return a non-null result from applying the given search
4115      * function on each value, or null if none
4116      * @since 1.8
4117      */
4118     public <U> U searchValues(long parallelismThreshold,
4119                               Function<? super V, ? extends U> searchFunction) {
4120         if (searchFunction == null) throw new NullPointerException();
4121         return new SearchValuesTask<K,V,U>
4122             (null, batchFor(parallelismThreshold), 0, 0, table,
4123              searchFunction, new AtomicReference<U>()).invoke();
4124     }
4125 
4126     /**
4127      * Returns the result of accumulating all values using the
4128      * given reducer to combine values, or null if none.
4129      *
4130      * @param parallelismThreshold the (estimated) number of elements
4131      * needed for this operation to be executed in parallel
4132      * @param reducer a commutative associative combining function
4133      * @return the result of accumulating all values
4134      * @since 1.8
4135      */
4136     public V reduceValues(long parallelismThreshold,
4137                           BiFunction<? super V, ? super V, ? extends V> reducer) {
4138         if (reducer == null) throw new NullPointerException();
4139         return new ReduceValuesTask<K,V>
4140             (null, batchFor(parallelismThreshold), 0, 0, table,
4141              null, reducer).invoke();
4142     }
4143 
4144     /**
4145      * Returns the result of accumulating the given transformation
4146      * of all values using the given reducer to combine values, or
4147      * null if none.
4148      *
4149      * @param parallelismThreshold the (estimated) number of elements
4150      * needed for this operation to be executed in parallel
4151      * @param transformer a function returning the transformation
4152      * for an element, or null if there is no transformation (in
4153      * which case it is not combined)
4154      * @param reducer a commutative associative combining function
4155      * @param <U> the return type of the transformer
4156      * @return the result of accumulating the given transformation
4157      * of all values
4158      * @since 1.8
4159      */
4160     public <U> U reduceValues(long parallelismThreshold,
4161                               Function<? super V, ? extends U> transformer,
4162                               BiFunction<? super U, ? super U, ? extends U> reducer) {
4163         if (transformer == null || reducer == null)
4164             throw new NullPointerException();
4165         return new MapReduceValuesTask<K,V,U>
4166             (null, batchFor(parallelismThreshold), 0, 0, table,
4167              null, transformer, reducer).invoke();
4168     }
4169 
4170     /**
4171      * Returns the result of accumulating the given transformation
4172      * of all values using the given reducer to combine values,
4173      * and the given basis as an identity value.
4174      *
4175      * @param parallelismThreshold the (estimated) number of elements
4176      * needed for this operation to be executed in parallel
4177      * @param transformer a function returning the transformation
4178      * for an element
4179      * @param basis the identity (initial default value) for the reduction
4180      * @param reducer a commutative associative combining function
4181      * @return the result of accumulating the given transformation
4182      * of all values
4183      * @since 1.8
4184      */
4185     public double reduceValuesToDouble(long parallelismThreshold,
4186                                        ToDoubleFunction<? super V> transformer,
4187                                        double basis,
4188                                        DoubleBinaryOperator reducer) {
4189         if (transformer == null || reducer == null)
4190             throw new NullPointerException();
4191         return new MapReduceValuesToDoubleTask<K,V>
4192             (null, batchFor(parallelismThreshold), 0, 0, table,
4193              null, transformer, basis, reducer).invoke();
4194     }
4195 
4196     /**
4197      * Returns the result of accumulating the given transformation
4198      * of all values using the given reducer to combine values,
4199      * and the given basis as an identity value.
4200      *
4201      * @param parallelismThreshold the (estimated) number of elements
4202      * needed for this operation to be executed in parallel
4203      * @param transformer a function returning the transformation
4204      * for an element
4205      * @param basis the identity (initial default value) for the reduction
4206      * @param reducer a commutative associative combining function
4207      * @return the result of accumulating the given transformation
4208      * of all values
4209      * @since 1.8
4210      */
4211     public long reduceValuesToLong(long parallelismThreshold,
4212                                    ToLongFunction<? super V> transformer,
4213                                    long basis,
4214                                    LongBinaryOperator reducer) {
4215         if (transformer == null || reducer == null)
4216             throw new NullPointerException();
4217         return new MapReduceValuesToLongTask<K,V>
4218             (null, batchFor(parallelismThreshold), 0, 0, table,
4219              null, transformer, basis, reducer).invoke();
4220     }
4221 
4222     /**
4223      * Returns the result of accumulating the given transformation
4224      * of all values using the given reducer to combine values,
4225      * and the given basis as an identity value.
4226      *
4227      * @param parallelismThreshold the (estimated) number of elements
4228      * needed for this operation to be executed in parallel
4229      * @param transformer a function returning the transformation
4230      * for an element
4231      * @param basis the identity (initial default value) for the reduction
4232      * @param reducer a commutative associative combining function
4233      * @return the result of accumulating the given transformation
4234      * of all values
4235      * @since 1.8
4236      */
4237     public int reduceValuesToInt(long parallelismThreshold,
4238                                  ToIntFunction<? super V> transformer,
4239                                  int basis,
4240                                  IntBinaryOperator reducer) {
4241         if (transformer == null || reducer == null)
4242             throw new NullPointerException();
4243         return new MapReduceValuesToIntTask<K,V>
4244             (null, batchFor(parallelismThreshold), 0, 0, table,
4245              null, transformer, basis, reducer).invoke();
4246     }
4247 
4248     /**
4249      * Performs the given action for each entry.
4250      *
4251      * @param parallelismThreshold the (estimated) number of elements
4252      * needed for this operation to be executed in parallel
4253      * @param action the action
4254      * @since 1.8
4255      */
4256     public void forEachEntry(long parallelismThreshold,
4257                              Consumer<? super Map.Entry<K,V>> action) {
4258         if (action == null) throw new NullPointerException();
4259         new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
4260                                   action).invoke();
4261     }
4262 
4263     /**
4264      * Performs the given action for each non-null transformation
4265      * of each entry.
4266      *
4267      * @param parallelismThreshold the (estimated) number of elements
4268      * needed for this operation to be executed in parallel
4269      * @param transformer a function returning the transformation
4270      * for an element, or null if there is no transformation (in
4271      * which case the action is not applied)
4272      * @param action the action
4273      * @param <U> the return type of the transformer
4274      * @since 1.8
4275      */
4276     public <U> void forEachEntry(long parallelismThreshold,
4277                                  Function<Map.Entry<K,V>, ? extends U> transformer,
4278                                  Consumer<? super U> action) {
4279         if (transformer == null || action == null)
4280             throw new NullPointerException();
4281         new ForEachTransformedEntryTask<K,V,U>
4282             (null, batchFor(parallelismThreshold), 0, 0, table,
4283              transformer, action).invoke();
4284     }
4285 
4286     /**
4287      * Returns a non-null result from applying the given search
4288      * function on each entry, or null if none.  Upon success,
4289      * further element processing is suppressed and the results of
4290      * any other parallel invocations of the search function are
4291      * ignored.
4292      *
4293      * @param parallelismThreshold the (estimated) number of elements
4294      * needed for this operation to be executed in parallel
4295      * @param searchFunction a function returning a non-null
4296      * result on success, else null
4297      * @param <U> the return type of the search function
4298      * @return a non-null result from applying the given search
4299      * function on each entry, or null if none
4300      * @since 1.8
4301      */
4302     public <U> U searchEntries(long parallelismThreshold,
4303                                Function<Map.Entry<K,V>, ? extends U> searchFunction) {
4304         if (searchFunction == null) throw new NullPointerException();
4305         return new SearchEntriesTask<K,V,U>
4306             (null, batchFor(parallelismThreshold), 0, 0, table,
4307              searchFunction, new AtomicReference<U>()).invoke();
4308     }
4309 
4310     /**
4311      * Returns the result of accumulating all entries using the
4312      * given reducer to combine values, or null if none.
4313      *
4314      * @param parallelismThreshold the (estimated) number of elements
4315      * needed for this operation to be executed in parallel
4316      * @param reducer a commutative associative combining function
4317      * @return the result of accumulating all entries
4318      * @since 1.8
4319      */
4320     public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
4321                                         BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4322         if (reducer == null) throw new NullPointerException();
4323         return new ReduceEntriesTask<K,V>
4324             (null, batchFor(parallelismThreshold), 0, 0, table,
4325              null, reducer).invoke();
4326     }
4327 
4328     /**
4329      * Returns the result of accumulating the given transformation
4330      * of all entries using the given reducer to combine values,
4331      * or null if none.
4332      *
4333      * @param parallelismThreshold the (estimated) number of elements
4334      * needed for this operation to be executed in parallel
4335      * @param transformer a function returning the transformation
4336      * for an element, or null if there is no transformation (in
4337      * which case it is not combined)
4338      * @param reducer a commutative associative combining function
4339      * @param <U> the return type of the transformer
4340      * @return the result of accumulating the given transformation
4341      * of all entries
4342      * @since 1.8
4343      */
4344     public <U> U reduceEntries(long parallelismThreshold,
4345                                Function<Map.Entry<K,V>, ? extends U> transformer,
4346                                BiFunction<? super U, ? super U, ? extends U> reducer) {
4347         if (transformer == null || reducer == null)
4348             throw new NullPointerException();
4349         return new MapReduceEntriesTask<K,V,U>
4350             (null, batchFor(parallelismThreshold), 0, 0, table,
4351              null, transformer, reducer).invoke();
4352     }
4353 
4354     /**
4355      * Returns the result of accumulating the given transformation
4356      * of all entries using the given reducer to combine values,
4357      * and the given basis as an identity value.
4358      *
4359      * @param parallelismThreshold the (estimated) number of elements
4360      * needed for this operation to be executed in parallel
4361      * @param transformer a function returning the transformation
4362      * for an element
4363      * @param basis the identity (initial default value) for the reduction
4364      * @param reducer a commutative associative combining function
4365      * @return the result of accumulating the given transformation
4366      * of all entries
4367      * @since 1.8
4368      */
4369     public double reduceEntriesToDouble(long parallelismThreshold,
4370                                         ToDoubleFunction<Map.Entry<K,V>> transformer,
4371                                         double basis,
4372                                         DoubleBinaryOperator reducer) {
4373         if (transformer == null || reducer == null)
4374             throw new NullPointerException();
4375         return new MapReduceEntriesToDoubleTask<K,V>
4376             (null, batchFor(parallelismThreshold), 0, 0, table,
4377              null, transformer, basis, reducer).invoke();
4378     }
4379 
4380     /**
4381      * Returns the result of accumulating the given transformation
4382      * of all entries using the given reducer to combine values,
4383      * and the given basis as an identity value.
4384      *
4385      * @param parallelismThreshold the (estimated) number of elements
4386      * needed for this operation to be executed in parallel
4387      * @param transformer a function returning the transformation
4388      * for an element
4389      * @param basis the identity (initial default value) for the reduction
4390      * @param reducer a commutative associative combining function
4391      * @return the result of accumulating the given transformation
4392      * of all entries
4393      * @since 1.8
4394      */
4395     public long reduceEntriesToLong(long parallelismThreshold,
4396                                     ToLongFunction<Map.Entry<K,V>> transformer,
4397                                     long basis,
4398                                     LongBinaryOperator reducer) {
4399         if (transformer == null || reducer == null)
4400             throw new NullPointerException();
4401         return new MapReduceEntriesToLongTask<K,V>
4402             (null, batchFor(parallelismThreshold), 0, 0, table,
4403              null, transformer, basis, reducer).invoke();
4404     }
4405 
4406     /**
4407      * Returns the result of accumulating the given transformation
4408      * of all entries using the given reducer to combine values,
4409      * and the given basis as an identity value.
4410      *
4411      * @param parallelismThreshold the (estimated) number of elements
4412      * needed for this operation to be executed in parallel
4413      * @param transformer a function returning the transformation
4414      * for an element
4415      * @param basis the identity (initial default value) for the reduction
4416      * @param reducer a commutative associative combining function
4417      * @return the result of accumulating the given transformation
4418      * of all entries
4419      * @since 1.8
4420      */
4421     public int reduceEntriesToInt(long parallelismThreshold,
4422                                   ToIntFunction<Map.Entry<K,V>> transformer,
4423                                   int basis,
4424                                   IntBinaryOperator reducer) {
4425         if (transformer == null || reducer == null)
4426             throw new NullPointerException();
4427         return new MapReduceEntriesToIntTask<K,V>
4428             (null, batchFor(parallelismThreshold), 0, 0, table,
4429              null, transformer, basis, reducer).invoke();
4430     }
4431 
4432 
4433     /* ----------------Views -------------- */
4434 
4435     /**
4436      * Base class for views.
4437      */
4438     abstract static class CollectionView<K,V,E>
4439         implements Collection<E>, java.io.Serializable {
4440         private static final long serialVersionUID = 7249069246763182397L;
4441         final ConcurrentHashMap<K,V> map;
4442         CollectionView(ConcurrentHashMap<K,V> map)  { this.map = map; }
4443 
4444         /**
4445          * Returns the map backing this view.
4446          *
4447          * @return the map backing this view
4448          */
4449         public ConcurrentHashMap<K,V> getMap() { return map; }
4450 
4451         /**
4452          * Removes all of the elements from this view, by removing all
4453          * the mappings from the map backing this view.
4454          */
4455         public final void clear()      { map.clear(); }
4456         public final int size()        { return map.size(); }
4457         public final boolean isEmpty() { return map.isEmpty(); }
4458 
4459         // implementations below rely on concrete classes supplying these
4460         // abstract methods
4461         /**
4462          * Returns an iterator over the elements in this collection.
4463          *
4464          * <p>The returned iterator is
4465          * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
4466          *
4467          * @return an iterator over the elements in this collection
4468          */
4469         public abstract Iterator<E> iterator();
4470         public abstract boolean contains(Object o);
4471         public abstract boolean remove(Object o);
4472 
4473         private static final String OOME_MSG = "Required array size too large";
4474 
4475         public final Object[] toArray() {
4476             long sz = map.mappingCount();
4477             if (sz > MAX_ARRAY_SIZE)
4478                 throw new OutOfMemoryError(OOME_MSG);
4479             int n = (int)sz;
4480             Object[] r = new Object[n];
4481             int i = 0;
4482             for (E e : this) {
4483                 if (i == n) {
4484                     if (n >= MAX_ARRAY_SIZE)
4485                         throw new OutOfMemoryError(OOME_MSG);
4486                     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4487                         n = MAX_ARRAY_SIZE;
4488                     else
4489                         n += (n >>> 1) + 1;
4490                     r = Arrays.copyOf(r, n);
4491                 }
4492                 r[i++] = e;
4493             }
4494             return (i == n) ? r : Arrays.copyOf(r, i);
4495         }
4496 
4497         @SuppressWarnings("unchecked")
4498         public final <T> T[] toArray(T[] a) {
4499             long sz = map.mappingCount();
4500             if (sz > MAX_ARRAY_SIZE)
4501                 throw new OutOfMemoryError(OOME_MSG);
4502             int m = (int)sz;
4503             T[] r = (a.length >= m) ? a :
4504                 (T[])java.lang.reflect.Array
4505                 .newInstance(a.getClass().getComponentType(), m);
4506             int n = r.length;
4507             int i = 0;
4508             for (E e : this) {
4509                 if (i == n) {
4510                     if (n >= MAX_ARRAY_SIZE)
4511                         throw new OutOfMemoryError(OOME_MSG);
4512                     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4513                         n = MAX_ARRAY_SIZE;
4514                     else
4515                         n += (n >>> 1) + 1;
4516                     r = Arrays.copyOf(r, n);
4517                 }
4518                 r[i++] = (T)e;
4519             }
4520             if (a == r && i < n) {
4521                 r[i] = null; // null-terminate
4522                 return r;
4523             }
4524             return (i == n) ? r : Arrays.copyOf(r, i);
4525         }
4526 
4527         /**
4528          * Returns a string representation of this collection.
4529          * The string representation consists of the string representations
4530          * of the collection's elements in the order they are returned by
4531          * its iterator, enclosed in square brackets ({@code "[]"}).
4532          * Adjacent elements are separated by the characters {@code ", "}
4533          * (comma and space).  Elements are converted to strings as by
4534          * {@link String#valueOf(Object)}.
4535          *
4536          * @return a string representation of this collection
4537          */
4538         public final String toString() {
4539             StringBuilder sb = new StringBuilder();
4540             sb.append('[');
4541             Iterator<E> it = iterator();
4542             if (it.hasNext()) {
4543                 for (;;) {
4544                     Object e = it.next();
4545                     sb.append(e == this ? "(this Collection)" : e);
4546                     if (!it.hasNext())
4547                         break;
4548                     sb.append(',').append(' ');
4549                 }
4550             }
4551             return sb.append(']').toString();
4552         }
4553 
4554         public final boolean containsAll(Collection<?> c) {
4555             if (c != this) {
4556                 for (Object e : c) {
4557                     if (e == null || !contains(e))
4558                         return false;
4559                 }
4560             }
4561             return true;
4562         }
4563 
4564         public boolean removeAll(Collection<?> c) {
4565             if (c == null) throw new NullPointerException();
4566             boolean modified = false;
4567             // Use (c instanceof Set) as a hint that lookup in c is as
4568             // efficient as this view
4569             Node<K,V>[] t;
4570             if ((t = map.table) == null) {
4571                 return false;
4572             } else if (c instanceof Set<?> && c.size() > t.length) {
4573                 for (Iterator<?> it = iterator(); it.hasNext(); ) {
4574                     if (c.contains(it.next())) {
4575                         it.remove();
4576                         modified = true;
4577                     }
4578                 }
4579             } else {
4580                 for (Object e : c)
4581                     modified |= remove(e);
4582             }
4583             return modified;
4584         }
4585 
4586         public final boolean retainAll(Collection<?> c) {
4587             if (c == null) throw new NullPointerException();
4588             boolean modified = false;
4589             for (Iterator<E> it = iterator(); it.hasNext();) {
4590                 if (!c.contains(it.next())) {
4591                     it.remove();
4592                     modified = true;
4593                 }
4594             }
4595             return modified;
4596         }
4597 
4598     }
4599 
4600     /**
4601      * A view of a ConcurrentHashMap as a {@link Set} of keys, in
4602      * which additions may optionally be enabled by mapping to a
4603      * common value.  This class cannot be directly instantiated.
4604      * See {@link #keySet() keySet()},
4605      * {@link #keySet(Object) keySet(V)},
4606      * {@link #newKeySet() newKeySet()},
4607      * {@link #newKeySet(int) newKeySet(int)}.
4608      *
4609      * @since 1.8
4610      */
4611     public static class KeySetView<K,V> extends CollectionView<K,V,K>
4612         implements Set<K>, java.io.Serializable {
4613         private static final long serialVersionUID = 7249069246763182397L;
4614         private final V value;
4615         KeySetView(ConcurrentHashMap<K,V> map, V value) {  // non-public
4616             super(map);
4617             this.value = value;
4618         }
4619 
4620         /**
4621          * Returns the default mapped value for additions,
4622          * or {@code null} if additions are not supported.
4623          *
4624          * @return the default mapped value for additions, or {@code null}
4625          * if not supported
4626          */
4627         public V getMappedValue() { return value; }
4628 
4629         /**
4630          * {@inheritDoc}
4631          * @throws NullPointerException if the specified key is null
4632          */
4633         public boolean contains(Object o) { return map.containsKey(o); }
4634 
4635         /**
4636          * Removes the key from this map view, by removing the key (and its
4637          * corresponding value) from the backing map.  This method does
4638          * nothing if the key is not in the map.
4639          *
4640          * @param  o the key to be removed from the backing map
4641          * @return {@code true} if the backing map contained the specified key
4642          * @throws NullPointerException if the specified key is null
4643          */
4644         public boolean remove(Object o) { return map.remove(o) != null; }
4645 
4646         /**
4647          * @return an iterator over the keys of the backing map
4648          */
4649         public Iterator<K> iterator() {
4650             Node<K,V>[] t;
4651             ConcurrentHashMap<K,V> m = map;
4652             int f = (t = m.table) == null ? 0 : t.length;
4653             return new KeyIterator<K,V>(t, f, 0, f, m);
4654         }
4655 
4656         /**
4657          * Adds the specified key to this set view by mapping the key to
4658          * the default mapped value in the backing map, if defined.
4659          *
4660          * @param e key to be added
4661          * @return {@code true} if this set changed as a result of the call
4662          * @throws NullPointerException if the specified key is null
4663          * @throws UnsupportedOperationException if no default mapped value
4664          * for additions was provided
4665          */
4666         public boolean add(K e) {
4667             V v;
4668             if ((v = value) == null)
4669                 throw new UnsupportedOperationException();
4670             return map.putVal(e, v, true) == null;
4671         }
4672 
4673         /**
4674          * Adds all of the elements in the specified collection to this set,
4675          * as if by calling {@link #add} on each one.
4676          *
4677          * @param c the elements to be inserted into this set
4678          * @return {@code true} if this set changed as a result of the call
4679          * @throws NullPointerException if the collection or any of its
4680          * elements are {@code null}
4681          * @throws UnsupportedOperationException if no default mapped value
4682          * for additions was provided
4683          */
4684         public boolean addAll(Collection<? extends K> c) {
4685             boolean added = false;
4686             V v;
4687             if ((v = value) == null)
4688                 throw new UnsupportedOperationException();
4689             for (K e : c) {
4690                 if (map.putVal(e, v, true) == null)
4691                     added = true;
4692             }
4693             return added;
4694         }
4695 
4696         public int hashCode() {
4697             int h = 0;
4698             for (K e : this)
4699                 h += e.hashCode();
4700             return h;
4701         }
4702 
4703         public boolean equals(Object o) {
4704             Set<?> c;
4705             return ((o instanceof Set) &&
4706                     ((c = (Set<?>)o) == this ||
4707                      (containsAll(c) && c.containsAll(this))));
4708         }
4709 
4710         public Spliterator<K> spliterator() {
4711             Node<K,V>[] t;
4712             ConcurrentHashMap<K,V> m = map;
4713             long n = m.sumCount();
4714             int f = (t = m.table) == null ? 0 : t.length;
4715             return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4716         }
4717 
4718         public void forEach(Consumer<? super K> action) {
4719             if (action == null) throw new NullPointerException();
4720             Node<K,V>[] t;
4721             if ((t = map.table) != null) {
4722                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4723                 for (Node<K,V> p; (p = it.advance()) != null; )
4724                     action.accept(p.key);
4725             }
4726         }
4727     }
4728 
4729     /**
4730      * A view of a ConcurrentHashMap as a {@link Collection} of
4731      * values, in which additions are disabled. This class cannot be
4732      * directly instantiated. See {@link #values()}.
4733      */
4734     static final class ValuesView<K,V> extends CollectionView<K,V,V>
4735         implements Collection<V>, java.io.Serializable {
4736         private static final long serialVersionUID = 2249069246763182397L;
4737         ValuesView(ConcurrentHashMap<K,V> map) { super(map); }
4738         public final boolean contains(Object o) {
4739             return map.containsValue(o);
4740         }
4741 
4742         public final boolean remove(Object o) {
4743             if (o != null) {
4744                 for (Iterator<V> it = iterator(); it.hasNext();) {
4745                     if (o.equals(it.next())) {
4746                         it.remove();
4747                         return true;
4748                     }
4749                 }
4750             }
4751             return false;
4752         }
4753 
4754         public final Iterator<V> iterator() {
4755             ConcurrentHashMap<K,V> m = map;
4756             Node<K,V>[] t;
4757             int f = (t = m.table) == null ? 0 : t.length;
4758             return new ValueIterator<K,V>(t, f, 0, f, m);
4759         }
4760 
4761         public final boolean add(V e) {
4762             throw new UnsupportedOperationException();
4763         }
4764         public final boolean addAll(Collection<? extends V> c) {
4765             throw new UnsupportedOperationException();
4766         }
4767 
4768         @Override public boolean removeAll(Collection<?> c) {
4769             if (c == null) throw new NullPointerException();
4770             boolean modified = false;
4771             for (Iterator<V> it = iterator(); it.hasNext();) {
4772                 if (c.contains(it.next())) {
4773                     it.remove();
4774                     modified = true;
4775                 }
4776             }
4777             return modified;
4778         }
4779 
4780         public boolean removeIf(Predicate<? super V> filter) {
4781             return map.removeValueIf(filter);
4782         }
4783 
4784         public Spliterator<V> spliterator() {
4785             Node<K,V>[] t;
4786             ConcurrentHashMap<K,V> m = map;
4787             long n = m.sumCount();
4788             int f = (t = m.table) == null ? 0 : t.length;
4789             return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4790         }
4791 
4792         public void forEach(Consumer<? super V> action) {
4793             if (action == null) throw new NullPointerException();
4794             Node<K,V>[] t;
4795             if ((t = map.table) != null) {
4796                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4797                 for (Node<K,V> p; (p = it.advance()) != null; )
4798                     action.accept(p.val);
4799             }
4800         }
4801     }
4802 
4803     /**
4804      * A view of a ConcurrentHashMap as a {@link Set} of (key, value)
4805      * entries.  This class cannot be directly instantiated. See
4806      * {@link #entrySet()}.
4807      */
4808     static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
4809         implements Set<Map.Entry<K,V>>, java.io.Serializable {
4810         private static final long serialVersionUID = 2249069246763182397L;
4811         EntrySetView(ConcurrentHashMap<K,V> map) { super(map); }
4812 
4813         public boolean contains(Object o) {
4814             Object k, v, r; Map.Entry<?,?> e;
4815             return ((o instanceof Map.Entry) &&
4816                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4817                     (r = map.get(k)) != null &&
4818                     (v = e.getValue()) != null &&
4819                     (v == r || v.equals(r)));
4820         }
4821 
4822         public boolean remove(Object o) {
4823             Object k, v; Map.Entry<?,?> e;
4824             return ((o instanceof Map.Entry) &&
4825                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4826                     (v = e.getValue()) != null &&
4827                     map.remove(k, v));
4828         }
4829 
4830         /**
4831          * @return an iterator over the entries of the backing map
4832          */
4833         public Iterator<Map.Entry<K,V>> iterator() {
4834             ConcurrentHashMap<K,V> m = map;
4835             Node<K,V>[] t;
4836             int f = (t = m.table) == null ? 0 : t.length;
4837             return new EntryIterator<K,V>(t, f, 0, f, m);
4838         }
4839 
4840         public boolean add(Entry<K,V> e) {
4841             return map.putVal(e.getKey(), e.getValue(), false) == null;
4842         }
4843 
4844         public boolean addAll(Collection<? extends Entry<K,V>> c) {
4845             boolean added = false;
4846             for (Entry<K,V> e : c) {
4847                 if (add(e))
4848                     added = true;
4849             }
4850             return added;
4851         }
4852 
4853         public boolean removeIf(Predicate<? super Entry<K,V>> filter) {
4854             return map.removeEntryIf(filter);
4855         }
4856 
4857         public final int hashCode() {
4858             int h = 0;
4859             Node<K,V>[] t;
4860             if ((t = map.table) != null) {
4861                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4862                 for (Node<K,V> p; (p = it.advance()) != null; ) {
4863                     h += p.hashCode();
4864                 }
4865             }
4866             return h;
4867         }
4868 
4869         public final boolean equals(Object o) {
4870             Set<?> c;
4871             return ((o instanceof Set) &&
4872                     ((c = (Set<?>)o) == this ||
4873                      (containsAll(c) && c.containsAll(this))));
4874         }
4875 
4876         public Spliterator<Map.Entry<K,V>> spliterator() {
4877             Node<K,V>[] t;
4878             ConcurrentHashMap<K,V> m = map;
4879             long n = m.sumCount();
4880             int f = (t = m.table) == null ? 0 : t.length;
4881             return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
4882         }
4883 
4884         public void forEach(Consumer<? super Map.Entry<K,V>> action) {
4885             if (action == null) throw new NullPointerException();
4886             Node<K,V>[] t;
4887             if ((t = map.table) != null) {
4888                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4889                 for (Node<K,V> p; (p = it.advance()) != null; )
4890                     action.accept(new MapEntry<K,V>(p.key, p.val, map));
4891             }
4892         }
4893 
4894     }
4895 
4896     // -------------------------------------------------------
4897 
4898     /**
4899      * Base class for bulk tasks. Repeats some fields and code from
4900      * class Traverser, because we need to subclass CountedCompleter.
4901      */
4902     @SuppressWarnings("serial")
4903     abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
4904         Node<K,V>[] tab;        // same as Traverser
4905         Node<K,V> next;
4906         TableStack<K,V> stack, spare;
4907         int index;
4908         int baseIndex;
4909         int baseLimit;
4910         final int baseSize;
4911         int batch;              // split control
4912 
4913         BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
4914             super(par);
4915             this.batch = b;
4916             this.index = this.baseIndex = i;
4917             if ((this.tab = t) == null)
4918                 this.baseSize = this.baseLimit = 0;
4919             else if (par == null)
4920                 this.baseSize = this.baseLimit = t.length;
4921             else {
4922                 this.baseLimit = f;
4923                 this.baseSize = par.baseSize;
4924             }
4925         }
4926 
4927         /**
4928          * Same as Traverser version.
4929          */
4930         final Node<K,V> advance() {
4931             Node<K,V> e;
4932             if ((e = next) != null)
4933                 e = e.next;
4934             for (;;) {
4935                 Node<K,V>[] t; int i, n;
4936                 if (e != null)
4937                     return next = e;
4938                 if (baseIndex >= baseLimit || (t = tab) == null ||
4939                     (n = t.length) <= (i = index) || i < 0)
4940                     return next = null;
4941                 if ((e = tabAt(t, i)) != null && e.hash < 0) {
4942                     if (e instanceof ForwardingNode) {
4943                         tab = ((ForwardingNode<K,V>)e).nextTable;
4944                         e = null;
4945                         pushState(t, i, n);
4946                         continue;
4947                     }
4948                     else if (e instanceof TreeBin)
4949                         e = ((TreeBin<K,V>)e).first;
4950                     else
4951                         e = null;
4952                 }
4953                 if (stack != null)
4954                     recoverState(n);
4955                 else if ((index = i + baseSize) >= n)
4956                     index = ++baseIndex;
4957             }
4958         }
4959 
4960         private void pushState(Node<K,V>[] t, int i, int n) {
4961             TableStack<K,V> s = spare;
4962             if (s != null)
4963                 spare = s.next;
4964             else
4965                 s = new TableStack<K,V>();
4966             s.tab = t;
4967             s.length = n;
4968             s.index = i;
4969             s.next = stack;
4970             stack = s;
4971         }
4972 
4973         private void recoverState(int n) {
4974             TableStack<K,V> s; int len;
4975             while ((s = stack) != null && (index += (len = s.length)) >= n) {
4976                 n = len;
4977                 index = s.index;
4978                 tab = s.tab;
4979                 s.tab = null;
4980                 TableStack<K,V> next = s.next;
4981                 s.next = spare; // save for reuse
4982                 stack = next;
4983                 spare = s;
4984             }
4985             if (s == null && (index += baseSize) >= n)
4986                 index = ++baseIndex;
4987         }
4988     }
4989 
4990     /*
4991      * Task classes. Coded in a regular but ugly format/style to
4992      * simplify checks that each variant differs in the right way from
4993      * others. The null screenings exist because compilers cannot tell
4994      * that we've already null-checked task arguments, so we force
4995      * simplest hoisted bypass to help avoid convoluted traps.
4996      */
4997     @SuppressWarnings("serial")
4998     static final class ForEachKeyTask<K,V>
4999         extends BulkTask<K,V,Void> {
5000         final Consumer<? super K> action;
5001         ForEachKeyTask
5002             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5003              Consumer<? super K> action) {
5004             super(p, b, i, f, t);
5005             this.action = action;
5006         }
5007         public final void compute() {
5008             final Consumer<? super K> action;
5009             if ((action = this.action) != null) {
5010                 for (int i = baseIndex, f, h; batch > 0 &&
5011                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5012                     addToPendingCount(1);
5013                     new ForEachKeyTask<K,V>
5014                         (this, batch >>>= 1, baseLimit = h, f, tab,
5015                          action).fork();
5016                 }
5017                 for (Node<K,V> p; (p = advance()) != null;)
5018                     action.accept(p.key);
5019                 propagateCompletion();
5020             }
5021         }
5022     }
5023 
5024     @SuppressWarnings("serial")
5025     static final class ForEachValueTask<K,V>
5026         extends BulkTask<K,V,Void> {
5027         final Consumer<? super V> action;
5028         ForEachValueTask
5029             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5030              Consumer<? super V> action) {
5031             super(p, b, i, f, t);
5032             this.action = action;
5033         }
5034         public final void compute() {
5035             final Consumer<? super V> action;
5036             if ((action = this.action) != null) {
5037                 for (int i = baseIndex, f, h; batch > 0 &&
5038                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5039                     addToPendingCount(1);
5040                     new ForEachValueTask<K,V>
5041                         (this, batch >>>= 1, baseLimit = h, f, tab,
5042                          action).fork();
5043                 }
5044                 for (Node<K,V> p; (p = advance()) != null;)
5045                     action.accept(p.val);
5046                 propagateCompletion();
5047             }
5048         }
5049     }
5050 
5051     @SuppressWarnings("serial")
5052     static final class ForEachEntryTask<K,V>
5053         extends BulkTask<K,V,Void> {
5054         final Consumer<? super Entry<K,V>> action;
5055         ForEachEntryTask
5056             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5057              Consumer<? super Entry<K,V>> action) {
5058             super(p, b, i, f, t);
5059             this.action = action;
5060         }
5061         public final void compute() {
5062             final Consumer<? super Entry<K,V>> action;
5063             if ((action = this.action) != null) {
5064                 for (int i = baseIndex, f, h; batch > 0 &&
5065                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5066                     addToPendingCount(1);
5067                     new ForEachEntryTask<K,V>
5068                         (this, batch >>>= 1, baseLimit = h, f, tab,
5069                          action).fork();
5070                 }
5071                 for (Node<K,V> p; (p = advance()) != null; )
5072                     action.accept(p);
5073                 propagateCompletion();
5074             }
5075         }
5076     }
5077 
5078     @SuppressWarnings("serial")
5079     static final class ForEachMappingTask<K,V>
5080         extends BulkTask<K,V,Void> {
5081         final BiConsumer<? super K, ? super V> action;
5082         ForEachMappingTask
5083             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5084              BiConsumer<? super K,? super V> action) {
5085             super(p, b, i, f, t);
5086             this.action = action;
5087         }
5088         public final void compute() {
5089             final BiConsumer<? super K, ? super V> action;
5090             if ((action = this.action) != null) {
5091                 for (int i = baseIndex, f, h; batch > 0 &&
5092                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5093                     addToPendingCount(1);
5094                     new ForEachMappingTask<K,V>
5095                         (this, batch >>>= 1, baseLimit = h, f, tab,
5096                          action).fork();
5097                 }
5098                 for (Node<K,V> p; (p = advance()) != null; )
5099                     action.accept(p.key, p.val);
5100                 propagateCompletion();
5101             }
5102         }
5103     }
5104 
5105     @SuppressWarnings("serial")
5106     static final class ForEachTransformedKeyTask<K,V,U>
5107         extends BulkTask<K,V,Void> {
5108         final Function<? super K, ? extends U> transformer;
5109         final Consumer<? super U> action;
5110         ForEachTransformedKeyTask
5111             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5112              Function<? super K, ? extends U> transformer, Consumer<? super U> action) {
5113             super(p, b, i, f, t);
5114             this.transformer = transformer; this.action = action;
5115         }
5116         public final void compute() {
5117             final Function<? super K, ? extends U> transformer;
5118             final Consumer<? super U> action;
5119             if ((transformer = this.transformer) != null &&
5120                 (action = this.action) != null) {
5121                 for (int i = baseIndex, f, h; batch > 0 &&
5122                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5123                     addToPendingCount(1);
5124                     new ForEachTransformedKeyTask<K,V,U>
5125                         (this, batch >>>= 1, baseLimit = h, f, tab,
5126                          transformer, action).fork();
5127                 }
5128                 for (Node<K,V> p; (p = advance()) != null; ) {
5129                     U u;
5130                     if ((u = transformer.apply(p.key)) != null)
5131                         action.accept(u);
5132                 }
5133                 propagateCompletion();
5134             }
5135         }
5136     }
5137 
5138     @SuppressWarnings("serial")
5139     static final class ForEachTransformedValueTask<K,V,U>
5140         extends BulkTask<K,V,Void> {
5141         final Function<? super V, ? extends U> transformer;
5142         final Consumer<? super U> action;
5143         ForEachTransformedValueTask
5144             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5145              Function<? super V, ? extends U> transformer, Consumer<? super U> action) {
5146             super(p, b, i, f, t);
5147             this.transformer = transformer; this.action = action;
5148         }
5149         public final void compute() {
5150             final Function<? super V, ? extends U> transformer;
5151             final Consumer<? super U> action;
5152             if ((transformer = this.transformer) != null &&
5153                 (action = this.action) != null) {
5154                 for (int i = baseIndex, f, h; batch > 0 &&
5155                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5156                     addToPendingCount(1);
5157                     new ForEachTransformedValueTask<K,V,U>
5158                         (this, batch >>>= 1, baseLimit = h, f, tab,
5159                          transformer, action).fork();
5160                 }
5161                 for (Node<K,V> p; (p = advance()) != null; ) {
5162                     U u;
5163                     if ((u = transformer.apply(p.val)) != null)
5164                         action.accept(u);
5165                 }
5166                 propagateCompletion();
5167             }
5168         }
5169     }
5170 
5171     @SuppressWarnings("serial")
5172     static final class ForEachTransformedEntryTask<K,V,U>
5173         extends BulkTask<K,V,Void> {
5174         final Function<Map.Entry<K,V>, ? extends U> transformer;
5175         final Consumer<? super U> action;
5176         ForEachTransformedEntryTask
5177             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5178              Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) {
5179             super(p, b, i, f, t);
5180             this.transformer = transformer; this.action = action;
5181         }
5182         public final void compute() {
5183             final Function<Map.Entry<K,V>, ? extends U> transformer;
5184             final Consumer<? super U> action;
5185             if ((transformer = this.transformer) != null &&
5186                 (action = this.action) != null) {
5187                 for (int i = baseIndex, f, h; batch > 0 &&
5188                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5189                     addToPendingCount(1);
5190                     new ForEachTransformedEntryTask<K,V,U>
5191                         (this, batch >>>= 1, baseLimit = h, f, tab,
5192                          transformer, action).fork();
5193                 }
5194                 for (Node<K,V> p; (p = advance()) != null; ) {
5195                     U u;
5196                     if ((u = transformer.apply(p)) != null)
5197                         action.accept(u);
5198                 }
5199                 propagateCompletion();
5200             }
5201         }
5202     }
5203 
5204     @SuppressWarnings("serial")
5205     static final class ForEachTransformedMappingTask<K,V,U>
5206         extends BulkTask<K,V,Void> {
5207         final BiFunction<? super K, ? super V, ? extends U> transformer;
5208         final Consumer<? super U> action;
5209         ForEachTransformedMappingTask
5210             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5211              BiFunction<? super K, ? super V, ? extends U> transformer,
5212              Consumer<? super U> action) {
5213             super(p, b, i, f, t);
5214             this.transformer = transformer; this.action = action;
5215         }
5216         public final void compute() {
5217             final BiFunction<? super K, ? super V, ? extends U> transformer;
5218             final Consumer<? super U> action;
5219             if ((transformer = this.transformer) != null &&
5220                 (action = this.action) != null) {
5221                 for (int i = baseIndex, f, h; batch > 0 &&
5222                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5223                     addToPendingCount(1);
5224                     new ForEachTransformedMappingTask<K,V,U>
5225                         (this, batch >>>= 1, baseLimit = h, f, tab,
5226                          transformer, action).fork();
5227                 }
5228                 for (Node<K,V> p; (p = advance()) != null; ) {
5229                     U u;
5230                     if ((u = transformer.apply(p.key, p.val)) != null)
5231                         action.accept(u);
5232                 }
5233                 propagateCompletion();
5234             }
5235         }
5236     }
5237 
5238     @SuppressWarnings("serial")
5239     static final class SearchKeysTask<K,V,U>
5240         extends BulkTask<K,V,U> {
5241         final Function<? super K, ? extends U> searchFunction;
5242         final AtomicReference<U> result;
5243         SearchKeysTask
5244             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5245              Function<? super K, ? extends U> searchFunction,
5246              AtomicReference<U> result) {
5247             super(p, b, i, f, t);
5248             this.searchFunction = searchFunction; this.result = result;
5249         }
5250         public final U getRawResult() { return result.get(); }
5251         public final void compute() {
5252             final Function<? super K, ? extends U> searchFunction;
5253             final AtomicReference<U> result;
5254             if ((searchFunction = this.searchFunction) != null &&
5255                 (result = this.result) != null) {
5256                 for (int i = baseIndex, f, h; batch > 0 &&
5257                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5258                     if (result.get() != null)
5259                         return;
5260                     addToPendingCount(1);
5261                     new SearchKeysTask<K,V,U>
5262                         (this, batch >>>= 1, baseLimit = h, f, tab,
5263                          searchFunction, result).fork();
5264                 }
5265                 while (result.get() == null) {
5266                     U u;
5267                     Node<K,V> p;
5268                     if ((p = advance()) == null) {
5269                         propagateCompletion();
5270                         break;
5271                     }
5272                     if ((u = searchFunction.apply(p.key)) != null) {
5273                         if (result.compareAndSet(null, u))
5274                             quietlyCompleteRoot();
5275                         break;
5276                     }
5277                 }
5278             }
5279         }
5280     }
5281 
5282     @SuppressWarnings("serial")
5283     static final class SearchValuesTask<K,V,U>
5284         extends BulkTask<K,V,U> {
5285         final Function<? super V, ? extends U> searchFunction;
5286         final AtomicReference<U> result;
5287         SearchValuesTask
5288             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5289              Function<? super V, ? extends U> searchFunction,
5290              AtomicReference<U> result) {
5291             super(p, b, i, f, t);
5292             this.searchFunction = searchFunction; this.result = result;
5293         }
5294         public final U getRawResult() { return result.get(); }
5295         public final void compute() {
5296             final Function<? super V, ? extends U> searchFunction;
5297             final AtomicReference<U> result;
5298             if ((searchFunction = this.searchFunction) != null &&
5299                 (result = this.result) != null) {
5300                 for (int i = baseIndex, f, h; batch > 0 &&
5301                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5302                     if (result.get() != null)
5303                         return;
5304                     addToPendingCount(1);
5305                     new SearchValuesTask<K,V,U>
5306                         (this, batch >>>= 1, baseLimit = h, f, tab,
5307                          searchFunction, result).fork();
5308                 }
5309                 while (result.get() == null) {
5310                     U u;
5311                     Node<K,V> p;
5312                     if ((p = advance()) == null) {
5313                         propagateCompletion();
5314                         break;
5315                     }
5316                     if ((u = searchFunction.apply(p.val)) != null) {
5317                         if (result.compareAndSet(null, u))
5318                             quietlyCompleteRoot();
5319                         break;
5320                     }
5321                 }
5322             }
5323         }
5324     }
5325 
5326     @SuppressWarnings("serial")
5327     static final class SearchEntriesTask<K,V,U>
5328         extends BulkTask<K,V,U> {
5329         final Function<Entry<K,V>, ? extends U> searchFunction;
5330         final AtomicReference<U> result;
5331         SearchEntriesTask
5332             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5333              Function<Entry<K,V>, ? extends U> searchFunction,
5334              AtomicReference<U> result) {
5335             super(p, b, i, f, t);
5336             this.searchFunction = searchFunction; this.result = result;
5337         }
5338         public final U getRawResult() { return result.get(); }
5339         public final void compute() {
5340             final Function<Entry<K,V>, ? extends U> searchFunction;
5341             final AtomicReference<U> result;
5342             if ((searchFunction = this.searchFunction) != null &&
5343                 (result = this.result) != null) {
5344                 for (int i = baseIndex, f, h; batch > 0 &&
5345                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5346                     if (result.get() != null)
5347                         return;
5348                     addToPendingCount(1);
5349                     new SearchEntriesTask<K,V,U>
5350                         (this, batch >>>= 1, baseLimit = h, f, tab,
5351                          searchFunction, result).fork();
5352                 }
5353                 while (result.get() == null) {
5354                     U u;
5355                     Node<K,V> p;
5356                     if ((p = advance()) == null) {
5357                         propagateCompletion();
5358                         break;
5359                     }
5360                     if ((u = searchFunction.apply(p)) != null) {
5361                         if (result.compareAndSet(null, u))
5362                             quietlyCompleteRoot();
5363                         return;
5364                     }
5365                 }
5366             }
5367         }
5368     }
5369 
5370     @SuppressWarnings("serial")
5371     static final class SearchMappingsTask<K,V,U>
5372         extends BulkTask<K,V,U> {
5373         final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5374         final AtomicReference<U> result;
5375         SearchMappingsTask
5376             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5377              BiFunction<? super K, ? super V, ? extends U> searchFunction,
5378              AtomicReference<U> result) {
5379             super(p, b, i, f, t);
5380             this.searchFunction = searchFunction; this.result = result;
5381         }
5382         public final U getRawResult() { return result.get(); }
5383         public final void compute() {
5384             final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5385             final AtomicReference<U> result;
5386             if ((searchFunction = this.searchFunction) != null &&
5387                 (result = this.result) != null) {
5388                 for (int i = baseIndex, f, h; batch > 0 &&
5389                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5390                     if (result.get() != null)
5391                         return;
5392                     addToPendingCount(1);
5393                     new SearchMappingsTask<K,V,U>
5394                         (this, batch >>>= 1, baseLimit = h, f, tab,
5395                          searchFunction, result).fork();
5396                 }
5397                 while (result.get() == null) {
5398                     U u;
5399                     Node<K,V> p;
5400                     if ((p = advance()) == null) {
5401                         propagateCompletion();
5402                         break;
5403                     }
5404                     if ((u = searchFunction.apply(p.key, p.val)) != null) {
5405                         if (result.compareAndSet(null, u))
5406                             quietlyCompleteRoot();
5407                         break;
5408                     }
5409                 }
5410             }
5411         }
5412     }
5413 
5414     @SuppressWarnings("serial")
5415     static final class ReduceKeysTask<K,V>
5416         extends BulkTask<K,V,K> {
5417         final BiFunction<? super K, ? super K, ? extends K> reducer;
5418         K result;
5419         ReduceKeysTask<K,V> rights, nextRight;
5420         ReduceKeysTask
5421             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5422              ReduceKeysTask<K,V> nextRight,
5423              BiFunction<? super K, ? super K, ? extends K> reducer) {
5424             super(p, b, i, f, t); this.nextRight = nextRight;
5425             this.reducer = reducer;
5426         }
5427         public final K getRawResult() { return result; }
5428         public final void compute() {
5429             final BiFunction<? super K, ? super K, ? extends K> reducer;
5430             if ((reducer = this.reducer) != null) {
5431                 for (int i = baseIndex, f, h; batch > 0 &&
5432                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5433                     addToPendingCount(1);
5434                     (rights = new ReduceKeysTask<K,V>
5435                      (this, batch >>>= 1, baseLimit = h, f, tab,
5436                       rights, reducer)).fork();
5437                 }
5438                 K r = null;
5439                 for (Node<K,V> p; (p = advance()) != null; ) {
5440                     K u = p.key;
5441                     r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
5442                 }
5443                 result = r;
5444                 CountedCompleter<?> c;
5445                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5446                     @SuppressWarnings("unchecked")
5447                     ReduceKeysTask<K,V>
5448                         t = (ReduceKeysTask<K,V>)c,
5449                         s = t.rights;
5450                     while (s != null) {
5451                         K tr, sr;
5452                         if ((sr = s.result) != null)
5453                             t.result = (((tr = t.result) == null) ? sr :
5454                                         reducer.apply(tr, sr));
5455                         s = t.rights = s.nextRight;
5456                     }
5457                 }
5458             }
5459         }
5460     }
5461 
5462     @SuppressWarnings("serial")
5463     static final class ReduceValuesTask<K,V>
5464         extends BulkTask<K,V,V> {
5465         final BiFunction<? super V, ? super V, ? extends V> reducer;
5466         V result;
5467         ReduceValuesTask<K,V> rights, nextRight;
5468         ReduceValuesTask
5469             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5470              ReduceValuesTask<K,V> nextRight,
5471              BiFunction<? super V, ? super V, ? extends V> reducer) {
5472             super(p, b, i, f, t); this.nextRight = nextRight;
5473             this.reducer = reducer;
5474         }
5475         public final V getRawResult() { return result; }
5476         public final void compute() {
5477             final BiFunction<? super V, ? super V, ? extends V> reducer;
5478             if ((reducer = this.reducer) != null) {
5479                 for (int i = baseIndex, f, h; batch > 0 &&
5480                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5481                     addToPendingCount(1);
5482                     (rights = new ReduceValuesTask<K,V>
5483                      (this, batch >>>= 1, baseLimit = h, f, tab,
5484                       rights, reducer)).fork();
5485                 }
5486                 V r = null;
5487                 for (Node<K,V> p; (p = advance()) != null; ) {
5488                     V v = p.val;
5489                     r = (r == null) ? v : reducer.apply(r, v);
5490                 }
5491                 result = r;
5492                 CountedCompleter<?> c;
5493                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5494                     @SuppressWarnings("unchecked")
5495                     ReduceValuesTask<K,V>
5496                         t = (ReduceValuesTask<K,V>)c,
5497                         s = t.rights;
5498                     while (s != null) {
5499                         V tr, sr;
5500                         if ((sr = s.result) != null)
5501                             t.result = (((tr = t.result) == null) ? sr :
5502                                         reducer.apply(tr, sr));
5503                         s = t.rights = s.nextRight;
5504                     }
5505                 }
5506             }
5507         }
5508     }
5509 
5510     @SuppressWarnings("serial")
5511     static final class ReduceEntriesTask<K,V>
5512         extends BulkTask<K,V,Map.Entry<K,V>> {
5513         final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5514         Map.Entry<K,V> result;
5515         ReduceEntriesTask<K,V> rights, nextRight;
5516         ReduceEntriesTask
5517             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5518              ReduceEntriesTask<K,V> nextRight,
5519              BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5520             super(p, b, i, f, t); this.nextRight = nextRight;
5521             this.reducer = reducer;
5522         }
5523         public final Map.Entry<K,V> getRawResult() { return result; }
5524         public final void compute() {
5525             final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5526             if ((reducer = this.reducer) != null) {
5527                 for (int i = baseIndex, f, h; batch > 0 &&
5528                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5529                     addToPendingCount(1);
5530                     (rights = new ReduceEntriesTask<K,V>
5531                      (this, batch >>>= 1, baseLimit = h, f, tab,
5532                       rights, reducer)).fork();
5533                 }
5534                 Map.Entry<K,V> r = null;
5535                 for (Node<K,V> p; (p = advance()) != null; )
5536                     r = (r == null) ? p : reducer.apply(r, p);
5537                 result = r;
5538                 CountedCompleter<?> c;
5539                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5540                     @SuppressWarnings("unchecked")
5541                     ReduceEntriesTask<K,V>
5542                         t = (ReduceEntriesTask<K,V>)c,
5543                         s = t.rights;
5544                     while (s != null) {
5545                         Map.Entry<K,V> tr, sr;
5546                         if ((sr = s.result) != null)
5547                             t.result = (((tr = t.result) == null) ? sr :
5548                                         reducer.apply(tr, sr));
5549                         s = t.rights = s.nextRight;
5550                     }
5551                 }
5552             }
5553         }
5554     }
5555 
5556     @SuppressWarnings("serial")
5557     static final class MapReduceKeysTask<K,V,U>
5558         extends BulkTask<K,V,U> {
5559         final Function<? super K, ? extends U> transformer;
5560         final BiFunction<? super U, ? super U, ? extends U> reducer;
5561         U result;
5562         MapReduceKeysTask<K,V,U> rights, nextRight;
5563         MapReduceKeysTask
5564             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5565              MapReduceKeysTask<K,V,U> nextRight,
5566              Function<? super K, ? extends U> transformer,
5567              BiFunction<? super U, ? super U, ? extends U> reducer) {
5568             super(p, b, i, f, t); this.nextRight = nextRight;
5569             this.transformer = transformer;
5570             this.reducer = reducer;
5571         }
5572         public final U getRawResult() { return result; }
5573         public final void compute() {
5574             final Function<? super K, ? extends U> transformer;
5575             final BiFunction<? super U, ? super U, ? extends U> reducer;
5576             if ((transformer = this.transformer) != null &&
5577                 (reducer = this.reducer) != null) {
5578                 for (int i = baseIndex, f, h; batch > 0 &&
5579                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5580                     addToPendingCount(1);
5581                     (rights = new MapReduceKeysTask<K,V,U>
5582                      (this, batch >>>= 1, baseLimit = h, f, tab,
5583                       rights, transformer, reducer)).fork();
5584                 }
5585                 U r = null;
5586                 for (Node<K,V> p; (p = advance()) != null; ) {
5587                     U u;
5588                     if ((u = transformer.apply(p.key)) != null)
5589                         r = (r == null) ? u : reducer.apply(r, u);
5590                 }
5591                 result = r;
5592                 CountedCompleter<?> c;
5593                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5594                     @SuppressWarnings("unchecked")
5595                     MapReduceKeysTask<K,V,U>
5596                         t = (MapReduceKeysTask<K,V,U>)c,
5597                         s = t.rights;
5598                     while (s != null) {
5599                         U tr, sr;
5600                         if ((sr = s.result) != null)
5601                             t.result = (((tr = t.result) == null) ? sr :
5602                                         reducer.apply(tr, sr));
5603                         s = t.rights = s.nextRight;
5604                     }
5605                 }
5606             }
5607         }
5608     }
5609 
5610     @SuppressWarnings("serial")
5611     static final class MapReduceValuesTask<K,V,U>
5612         extends BulkTask<K,V,U> {
5613         final Function<? super V, ? extends U> transformer;
5614         final BiFunction<? super U, ? super U, ? extends U> reducer;
5615         U result;
5616         MapReduceValuesTask<K,V,U> rights, nextRight;
5617         MapReduceValuesTask
5618             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5619              MapReduceValuesTask<K,V,U> nextRight,
5620              Function<? super V, ? extends U> transformer,
5621              BiFunction<? super U, ? super U, ? extends U> reducer) {
5622             super(p, b, i, f, t); this.nextRight = nextRight;
5623             this.transformer = transformer;
5624             this.reducer = reducer;
5625         }
5626         public final U getRawResult() { return result; }
5627         public final void compute() {
5628             final Function<? super V, ? extends U> transformer;
5629             final BiFunction<? super U, ? super U, ? extends U> reducer;
5630             if ((transformer = this.transformer) != null &&
5631                 (reducer = this.reducer) != null) {
5632                 for (int i = baseIndex, f, h; batch > 0 &&
5633                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5634                     addToPendingCount(1);
5635                     (rights = new MapReduceValuesTask<K,V,U>
5636                      (this, batch >>>= 1, baseLimit = h, f, tab,
5637                       rights, transformer, reducer)).fork();
5638                 }
5639                 U r = null;
5640                 for (Node<K,V> p; (p = advance()) != null; ) {
5641                     U u;
5642                     if ((u = transformer.apply(p.val)) != null)
5643                         r = (r == null) ? u : reducer.apply(r, u);
5644                 }
5645                 result = r;
5646                 CountedCompleter<?> c;
5647                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5648                     @SuppressWarnings("unchecked")
5649                     MapReduceValuesTask<K,V,U>
5650                         t = (MapReduceValuesTask<K,V,U>)c,
5651                         s = t.rights;
5652                     while (s != null) {
5653                         U tr, sr;
5654                         if ((sr = s.result) != null)
5655                             t.result = (((tr = t.result) == null) ? sr :
5656                                         reducer.apply(tr, sr));
5657                         s = t.rights = s.nextRight;
5658                     }
5659                 }
5660             }
5661         }
5662     }
5663 
5664     @SuppressWarnings("serial")
5665     static final class MapReduceEntriesTask<K,V,U>
5666         extends BulkTask<K,V,U> {
5667         final Function<Map.Entry<K,V>, ? extends U> transformer;
5668         final BiFunction<? super U, ? super U, ? extends U> reducer;
5669         U result;
5670         MapReduceEntriesTask<K,V,U> rights, nextRight;
5671         MapReduceEntriesTask
5672             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5673              MapReduceEntriesTask<K,V,U> nextRight,
5674              Function<Map.Entry<K,V>, ? extends U> transformer,
5675              BiFunction<? super U, ? super U, ? extends U> reducer) {
5676             super(p, b, i, f, t); this.nextRight = nextRight;
5677             this.transformer = transformer;
5678             this.reducer = reducer;
5679         }
5680         public final U getRawResult() { return result; }
5681         public final void compute() {
5682             final Function<Map.Entry<K,V>, ? extends U> transformer;
5683             final BiFunction<? super U, ? super U, ? extends U> reducer;
5684             if ((transformer = this.transformer) != null &&
5685                 (reducer = this.reducer) != null) {
5686                 for (int i = baseIndex, f, h; batch > 0 &&
5687                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5688                     addToPendingCount(1);
5689                     (rights = new MapReduceEntriesTask<K,V,U>
5690                      (this, batch >>>= 1, baseLimit = h, f, tab,
5691                       rights, transformer, reducer)).fork();
5692                 }
5693                 U r = null;
5694                 for (Node<K,V> p; (p = advance()) != null; ) {
5695                     U u;
5696                     if ((u = transformer.apply(p)) != null)
5697                         r = (r == null) ? u : reducer.apply(r, u);
5698                 }
5699                 result = r;
5700                 CountedCompleter<?> c;
5701                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5702                     @SuppressWarnings("unchecked")
5703                     MapReduceEntriesTask<K,V,U>
5704                         t = (MapReduceEntriesTask<K,V,U>)c,
5705                         s = t.rights;
5706                     while (s != null) {
5707                         U tr, sr;
5708                         if ((sr = s.result) != null)
5709                             t.result = (((tr = t.result) == null) ? sr :
5710                                         reducer.apply(tr, sr));
5711                         s = t.rights = s.nextRight;
5712                     }
5713                 }
5714             }
5715         }
5716     }
5717 
5718     @SuppressWarnings("serial")
5719     static final class MapReduceMappingsTask<K,V,U>
5720         extends BulkTask<K,V,U> {
5721         final BiFunction<? super K, ? super V, ? extends U> transformer;
5722         final BiFunction<? super U, ? super U, ? extends U> reducer;
5723         U result;
5724         MapReduceMappingsTask<K,V,U> rights, nextRight;
5725         MapReduceMappingsTask
5726             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5727              MapReduceMappingsTask<K,V,U> nextRight,
5728              BiFunction<? super K, ? super V, ? extends U> transformer,
5729              BiFunction<? super U, ? super U, ? extends U> reducer) {
5730             super(p, b, i, f, t); this.nextRight = nextRight;
5731             this.transformer = transformer;
5732             this.reducer = reducer;
5733         }
5734         public final U getRawResult() { return result; }
5735         public final void compute() {
5736             final BiFunction<? super K, ? super V, ? extends U> transformer;
5737             final BiFunction<? super U, ? super U, ? extends U> reducer;
5738             if ((transformer = this.transformer) != null &&
5739                 (reducer = this.reducer) != null) {
5740                 for (int i = baseIndex, f, h; batch > 0 &&
5741                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5742                     addToPendingCount(1);
5743                     (rights = new MapReduceMappingsTask<K,V,U>
5744                      (this, batch >>>= 1, baseLimit = h, f, tab,
5745                       rights, transformer, reducer)).fork();
5746                 }
5747                 U r = null;
5748                 for (Node<K,V> p; (p = advance()) != null; ) {
5749                     U u;
5750                     if ((u = transformer.apply(p.key, p.val)) != null)
5751                         r = (r == null) ? u : reducer.apply(r, u);
5752                 }
5753                 result = r;
5754                 CountedCompleter<?> c;
5755                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5756                     @SuppressWarnings("unchecked")
5757                     MapReduceMappingsTask<K,V,U>
5758                         t = (MapReduceMappingsTask<K,V,U>)c,
5759                         s = t.rights;
5760                     while (s != null) {
5761                         U tr, sr;
5762                         if ((sr = s.result) != null)
5763                             t.result = (((tr = t.result) == null) ? sr :
5764                                         reducer.apply(tr, sr));
5765                         s = t.rights = s.nextRight;
5766                     }
5767                 }
5768             }
5769         }
5770     }
5771 
5772     @SuppressWarnings("serial")
5773     static final class MapReduceKeysToDoubleTask<K,V>
5774         extends BulkTask<K,V,Double> {
5775         final ToDoubleFunction<? super K> transformer;
5776         final DoubleBinaryOperator reducer;
5777         final double basis;
5778         double result;
5779         MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5780         MapReduceKeysToDoubleTask
5781             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5782              MapReduceKeysToDoubleTask<K,V> nextRight,
5783              ToDoubleFunction<? super K> transformer,
5784              double basis,
5785              DoubleBinaryOperator reducer) {
5786             super(p, b, i, f, t); this.nextRight = nextRight;
5787             this.transformer = transformer;
5788             this.basis = basis; this.reducer = reducer;
5789         }
5790         public final Double getRawResult() { return result; }
5791         public final void compute() {
5792             final ToDoubleFunction<? super K> transformer;
5793             final DoubleBinaryOperator reducer;
5794             if ((transformer = this.transformer) != null &&
5795                 (reducer = this.reducer) != null) {
5796                 double r = this.basis;
5797                 for (int i = baseIndex, f, h; batch > 0 &&
5798                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5799                     addToPendingCount(1);
5800                     (rights = new MapReduceKeysToDoubleTask<K,V>
5801                      (this, batch >>>= 1, baseLimit = h, f, tab,
5802                       rights, transformer, r, reducer)).fork();
5803                 }
5804                 for (Node<K,V> p; (p = advance()) != null; )
5805                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key));
5806                 result = r;
5807                 CountedCompleter<?> c;
5808                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5809                     @SuppressWarnings("unchecked")
5810                     MapReduceKeysToDoubleTask<K,V>
5811                         t = (MapReduceKeysToDoubleTask<K,V>)c,
5812                         s = t.rights;
5813                     while (s != null) {
5814                         t.result = reducer.applyAsDouble(t.result, s.result);
5815                         s = t.rights = s.nextRight;
5816                     }
5817                 }
5818             }
5819         }
5820     }
5821 
5822     @SuppressWarnings("serial")
5823     static final class MapReduceValuesToDoubleTask<K,V>
5824         extends BulkTask<K,V,Double> {
5825         final ToDoubleFunction<? super V> transformer;
5826         final DoubleBinaryOperator reducer;
5827         final double basis;
5828         double result;
5829         MapReduceValuesToDoubleTask<K,V> rights, nextRight;
5830         MapReduceValuesToDoubleTask
5831             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5832              MapReduceValuesToDoubleTask<K,V> nextRight,
5833              ToDoubleFunction<? super V> transformer,
5834              double basis,
5835              DoubleBinaryOperator reducer) {
5836             super(p, b, i, f, t); this.nextRight = nextRight;
5837             this.transformer = transformer;
5838             this.basis = basis; this.reducer = reducer;
5839         }
5840         public final Double getRawResult() { return result; }
5841         public final void compute() {
5842             final ToDoubleFunction<? super V> transformer;
5843             final DoubleBinaryOperator reducer;
5844             if ((transformer = this.transformer) != null &&
5845                 (reducer = this.reducer) != null) {
5846                 double r = this.basis;
5847                 for (int i = baseIndex, f, h; batch > 0 &&
5848                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5849                     addToPendingCount(1);
5850                     (rights = new MapReduceValuesToDoubleTask<K,V>
5851                      (this, batch >>>= 1, baseLimit = h, f, tab,
5852                       rights, transformer, r, reducer)).fork();
5853                 }
5854                 for (Node<K,V> p; (p = advance()) != null; )
5855                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val));
5856                 result = r;
5857                 CountedCompleter<?> c;
5858                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5859                     @SuppressWarnings("unchecked")
5860                     MapReduceValuesToDoubleTask<K,V>
5861                         t = (MapReduceValuesToDoubleTask<K,V>)c,
5862                         s = t.rights;
5863                     while (s != null) {
5864                         t.result = reducer.applyAsDouble(t.result, s.result);
5865                         s = t.rights = s.nextRight;
5866                     }
5867                 }
5868             }
5869         }
5870     }
5871 
5872     @SuppressWarnings("serial")
5873     static final class MapReduceEntriesToDoubleTask<K,V>
5874         extends BulkTask<K,V,Double> {
5875         final ToDoubleFunction<Map.Entry<K,V>> transformer;
5876         final DoubleBinaryOperator reducer;
5877         final double basis;
5878         double result;
5879         MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
5880         MapReduceEntriesToDoubleTask
5881             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5882              MapReduceEntriesToDoubleTask<K,V> nextRight,
5883              ToDoubleFunction<Map.Entry<K,V>> transformer,
5884              double basis,
5885              DoubleBinaryOperator reducer) {
5886             super(p, b, i, f, t); this.nextRight = nextRight;
5887             this.transformer = transformer;
5888             this.basis = basis; this.reducer = reducer;
5889         }
5890         public final Double getRawResult() { return result; }
5891         public final void compute() {
5892             final ToDoubleFunction<Map.Entry<K,V>> transformer;
5893             final DoubleBinaryOperator reducer;
5894             if ((transformer = this.transformer) != null &&
5895                 (reducer = this.reducer) != null) {
5896                 double r = this.basis;
5897                 for (int i = baseIndex, f, h; batch > 0 &&
5898                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5899                     addToPendingCount(1);
5900                     (rights = new MapReduceEntriesToDoubleTask<K,V>
5901                      (this, batch >>>= 1, baseLimit = h, f, tab,
5902                       rights, transformer, r, reducer)).fork();
5903                 }
5904                 for (Node<K,V> p; (p = advance()) != null; )
5905                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p));
5906                 result = r;
5907                 CountedCompleter<?> c;
5908                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5909                     @SuppressWarnings("unchecked")
5910                     MapReduceEntriesToDoubleTask<K,V>
5911                         t = (MapReduceEntriesToDoubleTask<K,V>)c,
5912                         s = t.rights;
5913                     while (s != null) {
5914                         t.result = reducer.applyAsDouble(t.result, s.result);
5915                         s = t.rights = s.nextRight;
5916                     }
5917                 }
5918             }
5919         }
5920     }
5921 
5922     @SuppressWarnings("serial")
5923     static final class MapReduceMappingsToDoubleTask<K,V>
5924         extends BulkTask<K,V,Double> {
5925         final ToDoubleBiFunction<? super K, ? super V> transformer;
5926         final DoubleBinaryOperator reducer;
5927         final double basis;
5928         double result;
5929         MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
5930         MapReduceMappingsToDoubleTask
5931             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5932              MapReduceMappingsToDoubleTask<K,V> nextRight,
5933              ToDoubleBiFunction<? super K, ? super V> transformer,
5934              double basis,
5935              DoubleBinaryOperator reducer) {
5936             super(p, b, i, f, t); this.nextRight = nextRight;
5937             this.transformer = transformer;
5938             this.basis = basis; this.reducer = reducer;
5939         }
5940         public final Double getRawResult() { return result; }
5941         public final void compute() {
5942             final ToDoubleBiFunction<? super K, ? super V> transformer;
5943             final DoubleBinaryOperator reducer;
5944             if ((transformer = this.transformer) != null &&
5945                 (reducer = this.reducer) != null) {
5946                 double r = this.basis;
5947                 for (int i = baseIndex, f, h; batch > 0 &&
5948                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5949                     addToPendingCount(1);
5950                     (rights = new MapReduceMappingsToDoubleTask<K,V>
5951                      (this, batch >>>= 1, baseLimit = h, f, tab,
5952                       rights, transformer, r, reducer)).fork();
5953                 }
5954                 for (Node<K,V> p; (p = advance()) != null; )
5955                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key, p.val));
5956                 result = r;
5957                 CountedCompleter<?> c;
5958                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5959                     @SuppressWarnings("unchecked")
5960                     MapReduceMappingsToDoubleTask<K,V>
5961                         t = (MapReduceMappingsToDoubleTask<K,V>)c,
5962                         s = t.rights;
5963                     while (s != null) {
5964                         t.result = reducer.applyAsDouble(t.result, s.result);
5965                         s = t.rights = s.nextRight;
5966                     }
5967                 }
5968             }
5969         }
5970     }
5971 
5972     @SuppressWarnings("serial")
5973     static final class MapReduceKeysToLongTask<K,V>
5974         extends BulkTask<K,V,Long> {
5975         final ToLongFunction<? super K> transformer;
5976         final LongBinaryOperator reducer;
5977         final long basis;
5978         long result;
5979         MapReduceKeysToLongTask<K,V> rights, nextRight;
5980         MapReduceKeysToLongTask
5981             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5982              MapReduceKeysToLongTask<K,V> nextRight,
5983              ToLongFunction<? super K> transformer,
5984              long basis,
5985              LongBinaryOperator reducer) {
5986             super(p, b, i, f, t); this.nextRight = nextRight;
5987             this.transformer = transformer;
5988             this.basis = basis; this.reducer = reducer;
5989         }
5990         public final Long getRawResult() { return result; }
5991         public final void compute() {
5992             final ToLongFunction<? super K> transformer;
5993             final LongBinaryOperator reducer;
5994             if ((transformer = this.transformer) != null &&
5995                 (reducer = this.reducer) != null) {
5996                 long r = this.basis;
5997                 for (int i = baseIndex, f, h; batch > 0 &&
5998                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5999                     addToPendingCount(1);
6000                     (rights = new MapReduceKeysToLongTask<K,V>
6001                      (this, batch >>>= 1, baseLimit = h, f, tab,
6002                       rights, transformer, r, reducer)).fork();
6003                 }
6004                 for (Node<K,V> p; (p = advance()) != null; )
6005                     r = reducer.applyAsLong(r, transformer.applyAsLong(p.key));
6006                 result = r;
6007                 CountedCompleter<?> c;
6008                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6009                     @SuppressWarnings("unchecked")
6010                     MapReduceKeysToLongTask<K,V>
6011                         t = (MapReduceKeysToLongTask<K,V>)c,
6012                         s = t.rights;
6013                     while (s != null) {
6014                         t.result = reducer.applyAsLong(t.result, s.result);
6015                         s = t.rights = s.nextRight;
6016                     }
6017                 }
6018             }
6019         }
6020     }
6021 
6022     @SuppressWarnings("serial")
6023     static final class MapReduceValuesToLongTask<K,V>
6024         extends BulkTask<K,V,Long> {
6025         final ToLongFunction<? super V> transformer;
6026         final LongBinaryOperator reducer;
6027         final long basis;
6028         long result;
6029         MapReduceValuesToLongTask<K,V> rights, nextRight;
6030         MapReduceValuesToLongTask
6031             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6032              MapReduceValuesToLongTask<K,V> nextRight,
6033              ToLongFunction<? super V> transformer,
6034              long basis,
6035              LongBinaryOperator reducer) {
6036             super(p, b, i, f, t); this.nextRight = nextRight;
6037             this.transformer = transformer;
6038             this.basis = basis; this.reducer = reducer;
6039         }
6040         public final Long getRawResult() { return result; }
6041         public final void compute() {
6042             final ToLongFunction<? super V> transformer;
6043             final LongBinaryOperator reducer;
6044             if ((transformer = this.transformer) != null &&
6045                 (reducer = this.reducer) != null) {
6046                 long r = this.basis;
6047                 for (int i = baseIndex, f, h; batch > 0 &&
6048                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6049                     addToPendingCount(1);
6050                     (rights = new MapReduceValuesToLongTask<K,V>
6051                      (this, batch >>>= 1, baseLimit = h, f, tab,
6052                       rights, transformer, r, reducer)).fork();
6053                 }
6054                 for (Node<K,V> p; (p = advance()) != null; )
6055                     r = reducer.applyAsLong(r, transformer.applyAsLong(p.val));
6056                 result = r;
6057                 CountedCompleter<?> c;
6058                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6059                     @SuppressWarnings("unchecked")
6060                     MapReduceValuesToLongTask<K,V>
6061                         t = (MapReduceValuesToLongTask<K,V>)c,
6062                         s = t.rights;
6063                     while (s != null) {
6064                         t.result = reducer.applyAsLong(t.result, s.result);
6065                         s = t.rights = s.nextRight;
6066                     }
6067                 }
6068             }
6069         }
6070     }
6071 
6072     @SuppressWarnings("serial")
6073     static final class MapReduceEntriesToLongTask<K,V>
6074         extends BulkTask<K,V,Long> {
6075         final ToLongFunction<Map.Entry<K,V>> transformer;
6076         final LongBinaryOperator reducer;
6077         final long basis;
6078         long result;
6079         MapReduceEntriesToLongTask<K,V> rights, nextRight;
6080         MapReduceEntriesToLongTask
6081             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6082              MapReduceEntriesToLongTask<K,V> nextRight,
6083              ToLongFunction<Map.Entry<K,V>> transformer,
6084              long basis,
6085              LongBinaryOperator reducer) {
6086             super(p, b, i, f, t); this.nextRight = nextRight;
6087             this.transformer = transformer;
6088             this.basis = basis; this.reducer = reducer;
6089         }
6090         public final Long getRawResult() { return result; }
6091         public final void compute() {
6092             final ToLongFunction<Map.Entry<K,V>> transformer;
6093             final LongBinaryOperator reducer;
6094             if ((transformer = this.transformer) != null &&
6095                 (reducer = this.reducer) != null) {
6096                 long r = this.basis;
6097                 for (int i = baseIndex, f, h; batch > 0 &&
6098                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6099                     addToPendingCount(1);
6100                     (rights = new MapReduceEntriesToLongTask<K,V>
6101                      (this, batch >>>= 1, baseLimit = h, f, tab,
6102                       rights, transformer, r, reducer)).fork();
6103                 }
6104                 for (Node<K,V> p; (p = advance()) != null; )
6105                     r = reducer.applyAsLong(r, transformer.applyAsLong(p));
6106                 result = r;
6107                 CountedCompleter<?> c;
6108                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6109                     @SuppressWarnings("unchecked")
6110                     MapReduceEntriesToLongTask<K,V>
6111                         t = (MapReduceEntriesToLongTask<K,V>)c,
6112                         s = t.rights;
6113                     while (s != null) {
6114                         t.result = reducer.applyAsLong(t.result, s.result);
6115                         s = t.rights = s.nextRight;
6116                     }
6117                 }
6118             }
6119         }
6120     }
6121 
6122     @SuppressWarnings("serial")
6123     static final class MapReduceMappingsToLongTask<K,V>
6124         extends BulkTask<K,V,Long> {
6125         final ToLongBiFunction<? super K, ? super V> transformer;
6126         final LongBinaryOperator reducer;
6127         final long basis;
6128         long result;
6129         MapReduceMappingsToLongTask<K,V> rights, nextRight;
6130         MapReduceMappingsToLongTask
6131             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6132              MapReduceMappingsToLongTask<K,V> nextRight,
6133              ToLongBiFunction<? super K, ? super V> transformer,
6134              long basis,
6135              LongBinaryOperator reducer) {
6136             super(p, b, i, f, t); this.nextRight = nextRight;
6137             this.transformer = transformer;
6138             this.basis = basis; this.reducer = reducer;
6139         }
6140         public final Long getRawResult() { return result; }
6141         public final void compute() {
6142             final ToLongBiFunction<? super K, ? super V> transformer;
6143             final LongBinaryOperator reducer;
6144             if ((transformer = this.transformer) != null &&
6145                 (reducer = this.reducer) != null) {
6146                 long r = this.basis;
6147                 for (int i = baseIndex, f, h; batch > 0 &&
6148                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6149                     addToPendingCount(1);
6150                     (rights = new MapReduceMappingsToLongTask<K,V>
6151                      (this, batch >>>= 1, baseLimit = h, f, tab,
6152                       rights, transformer, r, reducer)).fork();
6153                 }
6154                 for (Node<K,V> p; (p = advance()) != null; )
6155                     r = reducer.applyAsLong(r, transformer.applyAsLong(p.key, p.val));
6156                 result = r;
6157                 CountedCompleter<?> c;
6158                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6159                     @SuppressWarnings("unchecked")
6160                     MapReduceMappingsToLongTask<K,V>
6161                         t = (MapReduceMappingsToLongTask<K,V>)c,
6162                         s = t.rights;
6163                     while (s != null) {
6164                         t.result = reducer.applyAsLong(t.result, s.result);
6165                         s = t.rights = s.nextRight;
6166                     }
6167                 }
6168             }
6169         }
6170     }
6171 
6172     @SuppressWarnings("serial")
6173     static final class MapReduceKeysToIntTask<K,V>
6174         extends BulkTask<K,V,Integer> {
6175         final ToIntFunction<? super K> transformer;
6176         final IntBinaryOperator reducer;
6177         final int basis;
6178         int result;
6179         MapReduceKeysToIntTask<K,V> rights, nextRight;
6180         MapReduceKeysToIntTask
6181             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6182              MapReduceKeysToIntTask<K,V> nextRight,
6183              ToIntFunction<? super K> transformer,
6184              int basis,
6185              IntBinaryOperator reducer) {
6186             super(p, b, i, f, t); this.nextRight = nextRight;
6187             this.transformer = transformer;
6188             this.basis = basis; this.reducer = reducer;
6189         }
6190         public final Integer getRawResult() { return result; }
6191         public final void compute() {
6192             final ToIntFunction<? super K> transformer;
6193             final IntBinaryOperator reducer;
6194             if ((transformer = this.transformer) != null &&
6195                 (reducer = this.reducer) != null) {
6196                 int r = this.basis;
6197                 for (int i = baseIndex, f, h; batch > 0 &&
6198                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6199                     addToPendingCount(1);
6200                     (rights = new MapReduceKeysToIntTask<K,V>
6201                      (this, batch >>>= 1, baseLimit = h, f, tab,
6202                       rights, transformer, r, reducer)).fork();
6203                 }
6204                 for (Node<K,V> p; (p = advance()) != null; )
6205                     r = reducer.applyAsInt(r, transformer.applyAsInt(p.key));
6206                 result = r;
6207                 CountedCompleter<?> c;
6208                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6209                     @SuppressWarnings("unchecked")
6210                     MapReduceKeysToIntTask<K,V>
6211                         t = (MapReduceKeysToIntTask<K,V>)c,
6212                         s = t.rights;
6213                     while (s != null) {
6214                         t.result = reducer.applyAsInt(t.result, s.result);
6215                         s = t.rights = s.nextRight;
6216                     }
6217                 }
6218             }
6219         }
6220     }
6221 
6222     @SuppressWarnings("serial")
6223     static final class MapReduceValuesToIntTask<K,V>
6224         extends BulkTask<K,V,Integer> {
6225         final ToIntFunction<? super V> transformer;
6226         final IntBinaryOperator reducer;
6227         final int basis;
6228         int result;
6229         MapReduceValuesToIntTask<K,V> rights, nextRight;
6230         MapReduceValuesToIntTask
6231             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6232              MapReduceValuesToIntTask<K,V> nextRight,
6233              ToIntFunction<? super V> transformer,
6234              int basis,
6235              IntBinaryOperator reducer) {
6236             super(p, b, i, f, t); this.nextRight = nextRight;
6237             this.transformer = transformer;
6238             this.basis = basis; this.reducer = reducer;
6239         }
6240         public final Integer getRawResult() { return result; }
6241         public final void compute() {
6242             final ToIntFunction<? super V> transformer;
6243             final IntBinaryOperator reducer;
6244             if ((transformer = this.transformer) != null &&
6245                 (reducer = this.reducer) != null) {
6246                 int r = this.basis;
6247                 for (int i = baseIndex, f, h; batch > 0 &&
6248                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6249                     addToPendingCount(1);
6250                     (rights = new MapReduceValuesToIntTask<K,V>
6251                      (this, batch >>>= 1, baseLimit = h, f, tab,
6252                       rights, transformer, r, reducer)).fork();
6253                 }
6254                 for (Node<K,V> p; (p = advance()) != null; )
6255                     r = reducer.applyAsInt(r, transformer.applyAsInt(p.val));
6256                 result = r;
6257                 CountedCompleter<?> c;
6258                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6259                     @SuppressWarnings("unchecked")
6260                     MapReduceValuesToIntTask<K,V>
6261                         t = (MapReduceValuesToIntTask<K,V>)c,
6262                         s = t.rights;
6263                     while (s != null) {
6264                         t.result = reducer.applyAsInt(t.result, s.result);
6265                         s = t.rights = s.nextRight;
6266                     }
6267                 }
6268             }
6269         }
6270     }
6271 
6272     @SuppressWarnings("serial")
6273     static final class MapReduceEntriesToIntTask<K,V>
6274         extends BulkTask<K,V,Integer> {
6275         final ToIntFunction<Map.Entry<K,V>> transformer;
6276         final IntBinaryOperator reducer;
6277         final int basis;
6278         int result;
6279         MapReduceEntriesToIntTask<K,V> rights, nextRight;
6280         MapReduceEntriesToIntTask
6281             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6282              MapReduceEntriesToIntTask<K,V> nextRight,
6283              ToIntFunction<Map.Entry<K,V>> transformer,
6284              int basis,
6285              IntBinaryOperator reducer) {
6286             super(p, b, i, f, t); this.nextRight = nextRight;
6287             this.transformer = transformer;
6288             this.basis = basis; this.reducer = reducer;
6289         }
6290         public final Integer getRawResult() { return result; }
6291         public final void compute() {
6292             final ToIntFunction<Map.Entry<K,V>> transformer;
6293             final IntBinaryOperator reducer;
6294             if ((transformer = this.transformer) != null &&
6295                 (reducer = this.reducer) != null) {
6296                 int r = this.basis;
6297                 for (int i = baseIndex, f, h; batch > 0 &&
6298                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6299                     addToPendingCount(1);
6300                     (rights = new MapReduceEntriesToIntTask<K,V>
6301                      (this, batch >>>= 1, baseLimit = h, f, tab,
6302                       rights, transformer, r, reducer)).fork();
6303                 }
6304                 for (Node<K,V> p; (p = advance()) != null; )
6305                     r = reducer.applyAsInt(r, transformer.applyAsInt(p));
6306                 result = r;
6307                 CountedCompleter<?> c;
6308                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6309                     @SuppressWarnings("unchecked")
6310                     MapReduceEntriesToIntTask<K,V>
6311                         t = (MapReduceEntriesToIntTask<K,V>)c,
6312                         s = t.rights;
6313                     while (s != null) {
6314                         t.result = reducer.applyAsInt(t.result, s.result);
6315                         s = t.rights = s.nextRight;
6316                     }
6317                 }
6318             }
6319         }
6320     }
6321 
6322     @SuppressWarnings("serial")
6323     static final class MapReduceMappingsToIntTask<K,V>
6324         extends BulkTask<K,V,Integer> {
6325         final ToIntBiFunction<? super K, ? super V> transformer;
6326         final IntBinaryOperator reducer;
6327         final int basis;
6328         int result;
6329         MapReduceMappingsToIntTask<K,V> rights, nextRight;
6330         MapReduceMappingsToIntTask
6331             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6332              MapReduceMappingsToIntTask<K,V> nextRight,
6333              ToIntBiFunction<? super K, ? super V> transformer,
6334              int basis,
6335              IntBinaryOperator reducer) {
6336             super(p, b, i, f, t); this.nextRight = nextRight;
6337             this.transformer = transformer;
6338             this.basis = basis; this.reducer = reducer;
6339         }
6340         public final Integer getRawResult() { return result; }
6341         public final void compute() {
6342             final ToIntBiFunction<? super K, ? super V> transformer;
6343             final IntBinaryOperator reducer;
6344             if ((transformer = this.transformer) != null &&
6345                 (reducer = this.reducer) != null) {
6346                 int r = this.basis;
6347                 for (int i = baseIndex, f, h; batch > 0 &&
6348                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6349                     addToPendingCount(1);
6350                     (rights = new MapReduceMappingsToIntTask<K,V>
6351                      (this, batch >>>= 1, baseLimit = h, f, tab,
6352                       rights, transformer, r, reducer)).fork();
6353                 }
6354                 for (Node<K,V> p; (p = advance()) != null; )
6355                     r = reducer.applyAsInt(r, transformer.applyAsInt(p.key, p.val));
6356                 result = r;
6357                 CountedCompleter<?> c;
6358                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6359                     @SuppressWarnings("unchecked")
6360                     MapReduceMappingsToIntTask<K,V>
6361                         t = (MapReduceMappingsToIntTask<K,V>)c,
6362                         s = t.rights;
6363                     while (s != null) {
6364                         t.result = reducer.applyAsInt(t.result, s.result);
6365                         s = t.rights = s.nextRight;
6366                     }
6367                 }
6368             }
6369         }
6370     }
6371 
6372     // Unsafe mechanics
6373     private static final Unsafe U = Unsafe.getUnsafe();
6374     private static final long SIZECTL;
6375     private static final long TRANSFERINDEX;
6376     private static final long BASECOUNT;
6377     private static final long CELLSBUSY;
6378     private static final long CELLVALUE;
6379     private static final int ABASE;
6380     private static final int ASHIFT;
6381 
6382     static {
6383         try {
6384             SIZECTL = U.objectFieldOffset
6385                 (ConcurrentHashMap.class.getDeclaredField("sizeCtl"));
6386             TRANSFERINDEX = U.objectFieldOffset
6387                 (ConcurrentHashMap.class.getDeclaredField("transferIndex"));
6388             BASECOUNT = U.objectFieldOffset
6389                 (ConcurrentHashMap.class.getDeclaredField("baseCount"));
6390             CELLSBUSY = U.objectFieldOffset
6391                 (ConcurrentHashMap.class.getDeclaredField("cellsBusy"));
6392 
6393             CELLVALUE = U.objectFieldOffset
6394                 (CounterCell.class.getDeclaredField("value"));
6395 
6396             ABASE = U.arrayBaseOffset(Node[].class);
6397             int scale = U.arrayIndexScale(Node[].class);
6398             if ((scale & (scale - 1)) != 0)
6399                 throw new Error("array index scale not a power of two");
6400             ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
6401         } catch (ReflectiveOperationException e) {
6402             throw new Error(e);
6403         }
6404 
6405         // Reduce the risk of rare disastrous classloading in first call to
6406         // LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773
6407         Class<?> ensureLoaded = LockSupport.class;
6408     }
6409 }