diff a/test/micro/org/openjdk/bench/valhalla/corelibs/mapprotos/XHashMap.java b/test/micro/org/openjdk/bench/valhalla/corelibs/mapprotos/XHashMap.java
--- /dev/null
+++ b/test/micro/org/openjdk/bench/valhalla/corelibs/mapprotos/XHashMap.java
@@ -0,0 +1,2368 @@
+/*
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package org.openjdk.bench.valhalla.corelibs.mapprotos;
+
+import java.io.IOException;
+import java.io.InvalidObjectException;
+import java.io.PrintStream;
+import java.io.Serializable;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+import java.util.AbstractCollection;
+//import java.util.AbstractMap;
+import java.util.AbstractSet;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.ConcurrentModificationException;
+import java.util.Hashtable;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.TreeMap;
+import java.util.NoSuchElementException;
+import java.util.Set;
+import java.util.function.BiConsumer;
+import java.util.function.BiFunction;
+import java.util.function.Consumer;
+import java.util.function.Function;
+
+/**
+ * Hash map implementation that uses inline class entries in the initial table
+ * and maintains a link list of separate Node entries for key/value pairs
+ * that have the same hash value. The handling of the link list is the same
+ * as the original java.util.HashMap.
+ * The primary entry array is larger than in HashMap due to the inline storage
+ * of the entries but since it replaces the separate Node instance for the first
+ * Node, the overall memory usage is less for a reasonably full table.
+ * The TreeNode organization is not yet implemented.
+ *
+ * Hash table based implementation of the {@code Map} interface. This
+ * implementation provides all of the optional map operations, and permits
+ * {@code null} values and the {@code null} key. (The {@code HashMap}
+ * class is roughly equivalent to {@code Hashtable}, except that it is
+ * unsynchronized and permits nulls.) This class makes no guarantees as to
+ * the order of the map; in particular, it does not guarantee that the order
+ * will remain constant over time.
+ *
+ *
This implementation provides constant-time performance for the basic
+ * operations ({@code get} and {@code put}), assuming the hash function
+ * disperses the elements properly among the buckets. Iteration over
+ * collection views requires time proportional to the "capacity" of the
+ * {@code HashMap} instance (the number of buckets) plus its size (the number
+ * of key-value mappings). Thus, it's very important not to set the initial
+ * capacity too high (or the load factor too low) if iteration performance is
+ * important.
+ *
+ *
An instance of {@code HashMap} has two parameters that affect its
+ * performance: initial capacity and load factor. The
+ * capacity is the number of buckets in the hash table, and the initial
+ * capacity is simply the capacity at the time the hash table is created. The
+ * load factor is a measure of how full the hash table is allowed to
+ * get before its capacity is automatically increased. When the number of
+ * entries in the hash table exceeds the product of the load factor and the
+ * current capacity, the hash table is rehashed (that is, internal data
+ * structures are rebuilt) so that the hash table has approximately twice the
+ * number of buckets.
+ *
+ *
As a general rule, the default load factor (.75) offers a good
+ * tradeoff between time and space costs. Higher values decrease the
+ * space overhead but increase the lookup cost (reflected in most of
+ * the operations of the {@code HashMap} class, including
+ * {@code get} and {@code put}). The expected number of entries in
+ * the map and its load factor should be taken into account when
+ * setting its initial capacity, so as to minimize the number of
+ * rehash operations. If the initial capacity is greater than the
+ * maximum number of entries divided by the load factor, no rehash
+ * operations will ever occur.
+ *
+ *
If many mappings are to be stored in a {@code HashMap}
+ * instance, creating it with a sufficiently large capacity will allow
+ * the mappings to be stored more efficiently than letting it perform
+ * automatic rehashing as needed to grow the table. Note that using
+ * many keys with the same {@code hashCode()} is a sure way to slow
+ * down performance of any hash table. To ameliorate impact, when keys
+ * are {@link Comparable}, this class may use comparison order among
+ * keys to help break ties.
+ *
+ *
Note that this implementation is not synchronized.
+ * If multiple threads access a hash map concurrently, and at least one of
+ * the threads modifies the map structurally, it must be
+ * synchronized externally. (A structural modification is any operation
+ * that adds or deletes one or more mappings; merely changing the value
+ * associated with a key that an instance already contains is not a
+ * structural modification.) This is typically accomplished by
+ * synchronizing on some object that naturally encapsulates the map.
+ *
+ * If no such object exists, the map should be "wrapped" using the
+ * {@link Collections#synchronizedMap Collections.synchronizedMap}
+ * method. This is best done at creation time, to prevent accidental
+ * unsynchronized access to the map:
+ * Map m = Collections.synchronizedMap(new HashMap(...));
+ *
+ * The iterators returned by all of this class's "collection view methods"
+ * are fail-fast: if the map is structurally modified at any time after
+ * the iterator is created, in any way except through the iterator's own
+ * {@code remove} method, the iterator will throw a
+ * {@link ConcurrentModificationException}. Thus, in the face of concurrent
+ * modification, the iterator fails quickly and cleanly, rather than risking
+ * arbitrary, non-deterministic behavior at an undetermined time in the
+ * future.
+ *
+ *
Note that the fail-fast behavior of an iterator cannot be guaranteed
+ * as it is, generally speaking, impossible to make any hard guarantees in the
+ * presence of unsynchronized concurrent modification. Fail-fast iterators
+ * throw {@code ConcurrentModificationException} on a best-effort basis.
+ * Therefore, it would be wrong to write a program that depended on this
+ * exception for its correctness: the fail-fast behavior of iterators
+ * should be used only to detect bugs.
+ *
+ *
This class is a member of the
+ *
+ * Java Collections Framework.
+ *
+ * @param the type of keys maintained by this map
+ * @param the type of mapped values
+ *
+ * @author Doug Lea
+ * @author Josh Bloch
+ * @author Arthur van Hoff
+ * @author Neal Gafter
+ * @see Object#hashCode()
+ * @see Collection
+ * @see Map
+ * @see TreeMap
+ * @see Hashtable
+ * @since 1.2
+ */
+public class XHashMap extends XAbstractMap
+ implements Map, Cloneable, Serializable {
+
+ private static final long serialVersionUID = 362498820763181265L;
+
+ /*
+ * Implementation notes.
+ *
+ * This map usually acts as a binned (bucketed) hash table, but
+ * when bins get too large, they are transformed into bins of
+ * TreeNodes, each structured similarly to those in
+ * java.util.TreeMap. Most methods try to use normal bins, but
+ * relay to TreeNode methods when applicable (simply by checking
+ * instanceof a node). Bins of TreeNodes may be traversed and
+ * used like any others, but additionally support faster lookup
+ * when overpopulated. However, since the vast majority of bins in
+ * normal use are not overpopulated, checking for existence of
+ * tree bins may be delayed in the course of table methods.
+ *
+ * Tree bins (i.e., bins whose elements are all TreeNodes) are
+ * ordered primarily by hashCode, but in the case of ties, if two
+ * elements are of the same "class C implements Comparable",
+ * type then their compareTo method is used for ordering. (We
+ * conservatively check generic types via reflection to validate
+ * this -- see method comparableClassFor). The added complexity
+ * of tree bins is worthwhile in providing worst-case O(log n)
+ * operations when keys either have distinct hashes or are
+ * orderable, Thus, performance degrades gracefully under
+ * accidental or malicious usages in which hashCode() methods
+ * return values that are poorly distributed, as well as those in
+ * which many keys share a hashCode, so long as they are also
+ * Comparable. (If neither of these apply, we may waste about a
+ * factor of two in time and space compared to taking no
+ * precautions. But the only known cases stem from poor user
+ * programming practices that are already so slow that this makes
+ * little difference.)
+ *
+ * Because TreeNodes are about twice the size of regular nodes, we
+ * use them only when bins contain enough nodes to warrant use
+ * (see TREEIFY_THRESHOLD). And when they become too small (due to
+ * removal or resizing) they are converted back to plain bins. In
+ * usages with well-distributed user hashCodes, tree bins are
+ * rarely used. Ideally, under random hashCodes, the frequency of
+ * nodes in bins follows a Poisson distribution
+ * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
+ * parameter of about 0.5 on average for the default resizing
+ * threshold of 0.75, although with a large variance because of
+ * resizing granularity. Ignoring variance, the expected
+ * occurrences of list size k are (exp(-0.5) * pow(0.5, k) /
+ * factorial(k)). The first values are:
+ *
+ * 0: 0.60653066
+ * 1: 0.30326533
+ * 2: 0.07581633
+ * 3: 0.01263606
+ * 4: 0.00157952
+ * 5: 0.00015795
+ * 6: 0.00001316
+ * 7: 0.00000094
+ * 8: 0.00000006
+ * more: less than 1 in ten million
+ *
+ * The root of a tree bin is normally its first node. However,
+ * sometimes (currently only upon Iterator.remove), the root might
+ * be elsewhere, but can be recovered following parent links
+ * (method TreeNode.root()).
+ *
+ * All applicable internal methods accept a hash code as an
+ * argument (as normally supplied from a public method), allowing
+ * them to call each other without recomputing user hashCodes.
+ * Most internal methods also accept a "tab" argument, that is
+ * normally the current table, but may be a new or old one when
+ * resizing or converting.
+ *
+ * When bin lists are treeified, split, or untreeified, we keep
+ * them in the same relative access/traversal order (i.e., field
+ * Node.next) to better preserve locality, and to slightly
+ * simplify handling of splits and traversals that invoke
+ * iterator.remove. When using comparators on insertion, to keep a
+ * total ordering (or as close as is required here) across
+ * rebalancings, we compare classes and identityHashCodes as
+ * tie-breakers.
+ *
+ * The use and transitions among plain vs tree modes is
+ * complicated by the existence of subclass LinkedHashMap. See
+ * below for hook methods defined to be invoked upon insertion,
+ * removal and access that allow LinkedHashMap internals to
+ * otherwise remain independent of these mechanics. (This also
+ * requires that a map instance be passed to some utility methods
+ * that may create new nodes.)
+ *
+ * The concurrent-programming-like SSA-based coding style helps
+ * avoid aliasing errors amid all of the twisty pointer operations.
+ */
+
+ /**
+ * The default initial capacity - MUST be a power of two.
+ */
+ static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16
+
+ /**
+ * The maximum capacity, used if a higher value is implicitly specified
+ * by either of the constructors with arguments.
+ * MUST be a power of two <= 1<<30.
+ */
+ static final int MAXIMUM_CAPACITY = 1 << 30;
+
+ /**
+ * The load factor used when none specified in constructor.
+ */
+ static final float DEFAULT_LOAD_FACTOR = 0.75f;
+
+ /**
+ * The bin count threshold for using a tree rather than list for a
+ * bin. Bins are converted to trees when adding an element to a
+ * bin with at least this many nodes. The value must be greater
+ * than 2 and should be at least 8 to mesh with assumptions in
+ * tree removal about conversion back to plain bins upon
+ * shrinkage.
+ */
+ static final int TREEIFY_THRESHOLD = 8;
+
+ /**
+ * The bin count threshold for untreeifying a (split) bin during a
+ * resize operation. Should be less than TREEIFY_THRESHOLD, and at
+ * most 6 to mesh with shrinkage detection under removal.
+ */
+ static final int UNTREEIFY_THRESHOLD = 6;
+
+ /**
+ * The smallest table capacity for which bins may be treeified.
+ * (Otherwise the table is resized if too many nodes in a bin.)
+ * Should be at least 4 * TREEIFY_THRESHOLD to avoid conflicts
+ * between resizing and treeification thresholds.
+ */
+ static final int MIN_TREEIFY_CAPACITY = 64;
+
+ private XNode emptyXNode() {
+ return XNode.default;
+ }
+ /**
+ * Basic hash bin node, used for most entries. (See below for
+ * TreeNode subclass, and in LinkedHashMap for its Entry subclass.)
+ */
+ static inline class XNode implements Map.Entry {
+ final int hash;
+ final K key;
+ V value;
+ Node? next;
+
+ XNode(int hash, K key, V value, Node? next) {
+ this.hash = hash;
+ this.key = key;
+ this.value = value;
+ this.next = next;
+ }
+
+ boolean isEmpty() {
+ return hash == 0 && key == null && value == null;
+ }
+ public final K getKey() { return key; }
+ public final V getValue() { return value; }
+ public final String toString() { return key + "=" + value; }
+
+ public final int hashCode() {
+ return Objects.hashCode(key) ^ Objects.hashCode(value);
+ }
+
+ public final V setValue(V newValue) {
+ throw new IllegalStateException("XNode cannot set a value");
+// V oldValue = value;
+// value = newValue;
+// return oldValue;
+ }
+
+ public final boolean equals(Object o) {
+ if (o instanceof Map.Entry) {
+ Map.Entry,?> e = (Map.Entry,?>)o;
+ if (Objects.equals(key, e.getKey()) &&
+ Objects.equals(value, e.getValue()))
+ return true;
+ }
+ return false;
+ }
+ }
+
+ /**
+ * Basic hash bin node, used for overflow entries. (See below for
+ * TreeNode subclass, and in LinkedHashMap for its Entry subclass.)
+ */
+ static class Node implements Map.Entry {
+ final int hash;
+ final K key;
+ V value;
+ Node next;
+
+ Node(int hash, K key, V value, Node next) {
+ this.hash = hash;
+ this.key = key;
+ this.value = value;
+ this.next = next;
+ }
+
+ public final K getKey() { return key; }
+ public final V getValue() { return value; }
+ public final String toString() { return key + "=" + value; }
+ public final int hashCode() {
+ return Objects.hashCode(key) ^ Objects.hashCode(value);
+ }
+
+ public final V setValue(V newValue) {
+ V oldValue = value;
+ value = newValue;
+ return oldValue;
+ }
+
+ public final boolean equals(Object o) {
+ if (o == this)
+ return true;
+ if (o instanceof Map.Entry) {
+ Map.Entry,?> e = (Map.Entry,?>)o;
+ if (Objects.equals(key, e.getKey()) &&
+ Objects.equals(value, e.getValue()))
+ return true;
+ }
+ return false;
+ }
+ }
+
+ inline class XNodeWrapper implements Map.Entry {
+ int index;
+
+ XNodeWrapper(int index) {
+ this.index = index;
+ }
+
+ public K getKey() {
+ XNode e = table[index];
+ return e.isEmpty() ? null : e.key;
+ }
+
+ public V getValue() {
+ XNode e = table[index];
+ return e.isEmpty() ? null : e.value;
+ }
+
+ /**
+ * Replaces the value corresponding to this entry with the specified
+ * value (optional operation). (Writes through to the map.) The
+ * behavior of this call is undefined if the mapping has already been
+ * removed from the map (by the iterator's {@code remove} operation).
+ *
+ * @param value new value to be stored in this entry
+ * @return old value corresponding to the entry
+ * @throws UnsupportedOperationException if the {@code put} operation
+ * is not supported by the backing map
+ * @throws ClassCastException if the class of the specified value
+ * prevents it from being stored in the backing map
+ * @throws NullPointerException if the backing map does not permit
+ * null values, and the specified value is null
+ * @throws IllegalArgumentException if some property of this value
+ * prevents it from being stored in the backing map
+ * @throws IllegalStateException implementations may, but are not
+ * required to, throw this exception if the entry has been
+ * removed from the backing map.
+ */
+ public V setValue(V value) {
+ XNode e = table[index];
+ assert !e.isEmpty();
+ table[index] = new XNode(e.hash, e.key, value, e.next);
+ return e.value;
+ }
+ }
+ /* ---------------- Static utilities -------------- */
+
+ /**
+ * Computes key.hashCode() and spreads (XORs) higher bits of hash
+ * to lower. Because the table uses power-of-two masking, sets of
+ * hashes that vary only in bits above the current mask will
+ * always collide. (Among known examples are sets of Float keys
+ * holding consecutive whole numbers in small tables.) So we
+ * apply a transform that spreads the impact of higher bits
+ * downward. There is a tradeoff between speed, utility, and
+ * quality of bit-spreading. Because many common sets of hashes
+ * are already reasonably distributed (so don't benefit from
+ * spreading), and because we use trees to handle large sets of
+ * collisions in bins, we just XOR some shifted bits in the
+ * cheapest possible way to reduce systematic lossage, as well as
+ * to incorporate impact of the highest bits that would otherwise
+ * never be used in index calculations because of table bounds.
+ */
+ static final int hash(Object key) {
+ int h;
+ return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16);
+ }
+
+ /**
+ * Returns x's Class if it is of the form "class C implements
+ * Comparable", else null.
+ */
+ static Class> comparableClassFor(Object x) {
+ if (x instanceof Comparable) {
+ Class> c; Type[] ts, as; ParameterizedType p;
+ if ((c = x.getClass()) == String.class) // bypass checks
+ return c;
+ if ((ts = c.getGenericInterfaces()) != null) {
+ for (Type t : ts) {
+ if ((t instanceof ParameterizedType) &&
+ ((p = (ParameterizedType) t).getRawType() ==
+ Comparable.class) &&
+ (as = p.getActualTypeArguments()) != null &&
+ as.length == 1 && as[0] == c) // type arg is c
+ return c;
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Returns k.compareTo(x) if x matches kc (k's screened comparable
+ * class), else 0.
+ */
+ @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
+ static int compareComparables(Class> kc, Object k, Object x) {
+ return (x == null || x.getClass() != kc ? 0 :
+ ((Comparable)k).compareTo(x));
+ }
+
+ /**
+ * Returns a power of two size for the given target capacity.
+ */
+ static final int tableSizeFor(int cap) {
+ int n = -1 >>> Integer.numberOfLeadingZeros(cap - 1);
+ return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
+ }
+
+ /* ---------------- Fields -------------- */
+
+ /**
+ * The table, initialized on first use, and resized as
+ * necessary. When allocated, length is always a power of two.
+ * (We also tolerate length zero in some operations to allow
+ * bootstrapping mechanics that are currently not needed.)
+ */
+ transient XNode[] table;
+
+ /**
+ * Holds cached entrySet(). Note that AbstractMap fields are used
+ * for keySet() and values().
+ */
+ transient Set> entrySet;
+
+ /**
+ * The number of key-value mappings contained in this map.
+ */
+ transient int size;
+
+ /**
+ * The number of times this HashMap has been structurally modified
+ * Structural modifications are those that change the number of mappings in
+ * the HashMap or otherwise modify its internal structure (e.g.,
+ * rehash). This field is used to make iterators on Collection-views of
+ * the HashMap fail-fast. (See ConcurrentModificationException).
+ */
+ transient int modCount;
+
+ /**
+ * The next size value at which to resize (capacity * load factor).
+ *
+ * @serial
+ */
+ // (The javadoc description is true upon serialization.
+ // Additionally, if the table array has not been allocated, this
+ // field holds the initial array capacity, or zero signifying
+ // DEFAULT_INITIAL_CAPACITY.)
+ int threshold;
+
+ /**
+ * The load factor for the hash table.
+ *
+ * @serial
+ */
+ final float loadFactor;
+
+ /* ---------------- Public operations -------------- */
+
+ /**
+ * Constructs an empty {@code HashMap} with the specified initial
+ * capacity and load factor.
+ *
+ * @param initialCapacity the initial capacity
+ * @param loadFactor the load factor
+ * @throws IllegalArgumentException if the initial capacity is negative
+ * or the load factor is nonpositive
+ */
+ public XHashMap(int initialCapacity, float loadFactor) {
+ if (initialCapacity < 0)
+ throw new IllegalArgumentException("Illegal initial capacity: " +
+ initialCapacity);
+ if (initialCapacity > MAXIMUM_CAPACITY)
+ initialCapacity = MAXIMUM_CAPACITY;
+ if (loadFactor <= 0 || Float.isNaN(loadFactor))
+ throw new IllegalArgumentException("Illegal load factor: " +
+ loadFactor);
+ this.loadFactor = loadFactor;
+ this.threshold = tableSizeFor(initialCapacity);
+ }
+
+ /**
+ * Constructs an empty {@code HashMap} with the specified initial
+ * capacity and the default load factor (0.75).
+ *
+ * @param initialCapacity the initial capacity.
+ * @throws IllegalArgumentException if the initial capacity is negative.
+ */
+ public XHashMap(int initialCapacity) {
+ this(initialCapacity, DEFAULT_LOAD_FACTOR);
+ }
+
+ /**
+ * Constructs an empty {@code HashMap} with the default initial capacity
+ * (16) and the default load factor (0.75).
+ */
+ public XHashMap() {
+ this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted
+ }
+
+ /**
+ * Constructs a new {@code HashMap} with the same mappings as the
+ * specified {@code Map}. The {@code HashMap} is created with
+ * default load factor (0.75) and an initial capacity sufficient to
+ * hold the mappings in the specified {@code Map}.
+ *
+ * @param m the map whose mappings are to be placed in this map
+ * @throws NullPointerException if the specified map is null
+ */
+ public XHashMap(Map extends K, ? extends V> m) {
+ this.loadFactor = DEFAULT_LOAD_FACTOR;
+ putMapEntries(m, false);
+ }
+
+ /**
+ * Implements Map.putAll and Map constructor.
+ *
+ * @param m the map
+ * @param evict false when initially constructing this map, else true.
+ */
+ final void putMapEntries(Map extends K, ? extends V> m, boolean evict) {
+ int s = m.size();
+ if (s > 0) {
+ if (table == null) { // pre-size
+ float ft = ((float)s / loadFactor) + 1.0F;
+ int t = ((ft < (float)MAXIMUM_CAPACITY) ?
+ (int)ft : MAXIMUM_CAPACITY);
+ if (t > threshold)
+ threshold = tableSizeFor(t);
+ } else {
+ // Because of linked-list bucket constraints, we cannot
+ // expand all at once, but can reduce total resize
+ // effort by repeated doubling now vs later
+ while (s > threshold && table.length < MAXIMUM_CAPACITY)
+ resize();
+ }
+
+ for (Map.Entry extends K, ? extends V> e : m.entrySet()) {
+ K key = e.getKey();
+ V value = e.getValue();
+ putVal(hash(key), key, value, false, evict);
+ }
+ }
+ }
+
+ /**
+ * Returns the number of key-value mappings in this map.
+ *
+ * @return the number of key-value mappings in this map
+ */
+ public int size() {
+ return size;
+ }
+
+ /**
+ * Returns {@code true} if this map contains no key-value mappings.
+ *
+ * @return {@code true} if this map contains no key-value mappings
+ */
+ public boolean isEmpty() {
+ return size == 0;
+ }
+
+ /**
+ * Returns the value to which the specified key is mapped,
+ * or {@code null} if this map contains no mapping for the key.
+ *
+ * More formally, if this map contains a mapping from a key
+ * {@code k} to a value {@code v} such that {@code (key==null ? k==null :
+ * key.equals(k))}, then this method returns {@code v}; otherwise
+ * it returns {@code null}. (There can be at most one such mapping.)
+ *
+ *
A return value of {@code null} does not necessarily
+ * indicate that the map contains no mapping for the key; it's also
+ * possible that the map explicitly maps the key to {@code null}.
+ * The {@link #containsKey containsKey} operation may be used to
+ * distinguish these two cases.
+ *
+ * @see #put(Object, Object)
+ */
+ public V get(Object key) {
+ int hash = hash(key);
+ Node e;
+ XNode n = getXNode(hash, key);
+ return (!n.isEmpty()) ? n.value
+ : (e = getNode(hash, key)) == null ? null : e.value;
+ }
+
+ /**
+ * Implements Map.get and related methods.
+ *
+ * @param hash hash for key
+ * @param key the key
+ * @return the node, or emptyXNode() if not at top level.
+ */
+ final XNode getXNode(int hash, Object key) {
+ XNode[] tab;
+ XNode first;
+ int n;
+ K k;
+ if ((tab = table) != null && (n = tab.length) > 0 &&
+ !(first = tab[(n - 1) & hash]).isEmpty()) {
+ if (first.hash == hash && // always check first node
+ ((k = first.key) == key || (key != null && key.equals(k))))
+ return first;
+ }
+ return emptyXNode();
+ }
+
+ /**
+ * Implements Map.get and related methods when the key is not found in the primary entry.
+ *
+ * @param hash hash for key
+ * @param key the key
+ * @return the node, or null if none
+ */
+ final Node getNode(int hash, Object key) {
+ XNode[] tab; XNode first; Node e; int n; K k;
+ if ((tab = table) != null && (n = tab.length) > 0 &&
+ !(first = tab[(n - 1) & hash]).isEmpty()) {
+ if ((e = first.next) != null) {
+ if (e instanceof TreeNode)
+ return ((TreeNode)e).getTreeNode(hash, key);
+ do {
+ if (e.hash == hash &&
+ ((k = e.key) == key || (key != null && key.equals(k))))
+ return e;
+ } while ((e = e.next) != null);
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Returns {@code true} if this map contains a mapping for the
+ * specified key.
+ *
+ * @param key The key whose presence in this map is to be tested
+ * @return {@code true} if this map contains a mapping for the specified
+ * key.
+ */
+ public boolean containsKey(Object key) {
+ int hash = hash(key);
+ Node e;
+ XNode n = getXNode(hash, key);
+ return !n.isEmpty() || (e = getNode(hash, key)) != null;
+ }
+
+ /**
+ * Associates the specified value with the specified key in this map.
+ * If the map previously contained a mapping for the key, the old
+ * value is replaced.
+ *
+ * @param key key with which the specified value is to be associated
+ * @param value value to be associated with the specified key
+ * @return the previous value associated with {@code key}, or
+ * {@code null} if there was no mapping for {@code key}.
+ * (A {@code null} return can also indicate that the map
+ * previously associated {@code null} with {@code key}.)
+ */
+ public V put(K key, V value) {
+ return putVal(hash(key), key, value, false, true);
+ }
+
+ /**
+ * Implements Map.put and related methods.
+ *
+ * @param hash hash for key
+ * @param key the key
+ * @param value the value to put
+ * @param onlyIfAbsent if true, don't change existing value
+ * @param evict if false, the table is in creation mode.
+ * @return previous value, or null if none
+ */
+ final V putVal(int hash, K key, V value, boolean onlyIfAbsent,
+ boolean evict) {
+ XNode[] tab; XNode tp; int n, i;
+ if ((tab = table) == null || (n = tab.length) == 0)
+ n = (tab = resize()).length;
+ if ((tp = tab[i = (n - 1) & hash]).isEmpty()) {
+ tab[i] = new XNode(hash, key, value, null);
+ } else {
+ Node e; K k;
+ if (tp.hash == hash &&
+ ((k = tp.key) == key || (key != null && key.equals(k)))) {
+ if (!onlyIfAbsent || tp.value == null) {
+ tab[i] = new XNode(hash, k, value, tp.next);
+ }
+ return tp.value;
+ } else if ((e = tp.next) == null) {
+ Node x = newNode(hash, key, value, null);
+ tab[i] = new XNode(tp.hash, tp.key, tp.value, x);
+ } else if (e instanceof TreeNode) {
+ e = ((TreeNode) e).putTreeVal(this, tab, hash, key, value);
+ } else {
+ for (int binCount = 0; ; ++binCount) {
+ if (e.hash == hash &&
+ ((k = e.key) == key || (key != null && key.equals(k))))
+ break;
+ Node p = e;
+ if ((e = p.next) == null) {
+ p.next = newNode(hash, key, value, null);
+ if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st
+ treeifyBin(tab, hash);
+ break;
+ }
+ }
+ }
+ if (e != null) { // existing mapping for key
+ V oldValue = e.value;
+ if (!onlyIfAbsent || oldValue == null)
+ e.value = value;
+ return oldValue;
+ }
+ }
+
+ ++modCount;
+ if (++size > threshold)
+ resize();
+ return null;
+ }
+
+ /**
+ * Initializes or doubles table size. If null, allocates in
+ * accord with initial capacity target held in field threshold.
+ * Otherwise, because we are using power-of-two expansion, the
+ * elements from each bin must either stay at same index, or move
+ * with a power of two offset in the new table.
+ *
+ * @return the table
+ */
+ final XNode[] resize() {
+ XNode[] oldTab = table;
+ int oldCap = (oldTab == null) ? 0 : oldTab.length;
+ int oldThr = threshold;
+ int newCap, newThr = 0;
+ if (oldCap > 0) {
+ if (oldCap >= MAXIMUM_CAPACITY) {
+ threshold = Integer.MAX_VALUE;
+ return oldTab;
+ }
+ else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY &&
+ oldCap >= DEFAULT_INITIAL_CAPACITY)
+ newThr = oldThr << 1; // double threshold
+ }
+ else if (oldThr > 0) // initial capacity was placed in threshold
+ newCap = oldThr;
+ else { // zero initial threshold signifies using defaults
+ newCap = DEFAULT_INITIAL_CAPACITY;
+ newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY);
+ }
+ if (newThr == 0) {
+ float ft = (float)newCap * loadFactor;
+ newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ?
+ (int)ft : Integer.MAX_VALUE);
+ }
+ threshold = newThr;
+ @SuppressWarnings({"rawtypes","unchecked"})
+ XNode[] newTab = (XNode[])new XNode[newCap];
+ table = newTab;
+ if (oldTab != null) {
+ for (int j = 0; j < oldCap; ++j) {
+ XNode x;
+ Node e;
+ if (!(x = oldTab[j]).isEmpty()) {
+ oldTab[j] = emptyXNode();
+ if ((e = x.next) == null)
+ newTab[x.hash & (newCap - 1)] = new XNode(x.hash, x.key, x.value, null);
+ else if (e instanceof TreeNode)
+ ((TreeNode)e).split(this, newTab, j, oldCap);
+ else { // preserve order
+ Node loHead = null, loTail = null;
+ Node hiHead = null, hiTail = null;
+ Node next;
+ do {
+ next = e.next;
+ if ((e.hash & oldCap) == 0) {
+ if (loTail == null)
+ loHead = e;
+ else
+ loTail.next = e;
+ loTail = e;
+ }
+ else {
+ if (hiTail == null)
+ hiHead = e;
+ else
+ hiTail.next = e;
+ hiTail = e;
+ }
+ } while ((e = next) != null);
+ if (loTail != null)
+ loTail.next = null;
+ if (hiTail != null)
+ hiTail.next = null;
+
+ newTab[j] = (j == (x.hash & (newCap - 1)))
+ ? new XNode(x.hash, x.key, x.value, loHead)
+ : ((loHead != null)
+ ? new XNode(loHead.hash, loHead.key, loHead.value, loHead.next) :
+ emptyXNode());
+
+ newTab[j + oldCap] = ((j + oldCap) == (x.hash & (newCap - 1)))
+ ? new XNode(x.hash, x.key, x.value, hiHead)
+ : ((hiHead != null)
+ ? new XNode(hiHead.hash, hiHead.key, hiHead.value, hiHead.next) :
+ emptyXNode());
+ }
+ }
+ }
+ }
+ return newTab;
+ }
+
+ /**
+ * Replaces all linked nodes in bin at index for given hash unless
+ * table is too small, in which case resizes instead.
+ */
+ final void treeifyBin(XNode[] tab, int hash) {
+// int n, index; Node e;
+// if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY)
+// resize();
+// else if ((e = tab[index = (n - 1) & hash]) != null) {
+// TreeNode hd = null, tl = null;
+// do {
+// TreeNode p = replacementTreeNode(e, null);
+// if (tl == null)
+// hd = p;
+// else {
+// p.prev = tl;
+// tl.next = p;
+// }
+// tl = p;
+// } while ((e = e.next) != null);
+// if ((tab[index] = hd) != null)
+// hd.treeify(tab);
+// }
+ }
+
+ /**
+ * Copies all of the mappings from the specified map to this map.
+ * These mappings will replace any mappings that this map had for
+ * any of the keys currently in the specified map.
+ *
+ * @param m mappings to be stored in this map
+ * @throws NullPointerException if the specified map is null
+ */
+ public void putAll(Map extends K, ? extends V> m) {
+ putMapEntries(m, true);
+ }
+
+ /**
+ * Removes the mapping for the specified key from this map if present.
+ *
+ * @param key key whose mapping is to be removed from the map
+ * @return the previous value associated with {@code key}, or
+ * {@code null} if there was no mapping for {@code key}.
+ * (A {@code null} return can also indicate that the map
+ * previously associated {@code null} with {@code key}.)
+ */
+ public V remove(Object key) {
+ Optional o = removeNode(hash(key), key, null, false, true);
+ return o.orElse(null);
+ }
+
+ /**
+ * Implements Map.remove and related methods.
+ *
+ * @param hash hash for key
+ * @param key the key
+ * @param value the value to match if matchValue, else ignored
+ * @param matchValue if true only remove if value is equal
+ * @param movable if false do not move other nodes while removing
+ * @return the node, or null if none
+ */
+ final Optional removeNode(int hash, Object key, Object value,
+ boolean matchValue, boolean movable) {
+ XNode[] tab; XNode te; int n, index;
+ if ((tab = table) != null && (n = tab.length) > 0 &&
+ !(te = tab[index = (n - 1) & hash]).isEmpty()) {
+ Node node = null, e; K k; V v = null;
+ if (te.hash == hash &&
+ ((k = te.key) == key || (key != null && key.equals(k)))) {
+ if ((!matchValue || (v = te.value) == value ||
+ (value != null && value.equals(v)))) {
+ tab[index] = ((e = te.next) == null)
+ ? emptyXNode()
+ : new XNode(hash, e.key, e.value, e.next);
+ ++modCount;
+ --size;
+ return Optional.ofNullable(v);
+ }
+ } else if ((e = te.next) != null) {
+ Node p = null;
+ if (e instanceof TreeNode)
+ node = ((TreeNode)e).getTreeNode(hash, key);
+ else {
+ do {
+ if (e.hash == hash &&
+ ((k = e.key) == key ||
+ (key != null && key.equals(k)))) {
+ node = e;
+ break;
+ }
+ p = e;
+ } while ((e = e.next) != null);
+ }
+
+ if (node != null && (!matchValue || (v = node.value) == value ||
+ (value != null && value.equals(v)))) {
+ if (node instanceof TreeNode)
+ ((TreeNode)node).removeTreeNode(this, tab, movable);
+ else if (p == null)
+ tab[index] = new XNode(hash, node.key, node.value, node.next);
+ else
+ p.next = node.next;
+ ++modCount;
+ --size;
+ return Optional.of(node.value);
+ }
+ }
+ }
+ return Optional.empty();
+ }
+
+ /**
+ * Removes all of the mappings from this map.
+ * The map will be empty after this call returns.
+ */
+ public void clear() {
+ modCount++;
+ if (table != null && size > 0) {
+ size = 0;
+ table = null;
+ threshold = 0;
+ }
+ }
+
+ /**
+ * Returns {@code true} if this map maps one or more keys to the
+ * specified value.
+ *
+ * @param value value whose presence in this map is to be tested
+ * @return {@code true} if this map maps one or more keys to the
+ * specified value
+ */
+ public boolean containsValue(Object value) {
+ XNode[] tab; V v;
+ if ((tab = table) != null && size > 0) {
+ for (XNode te : tab) {
+ if (!te.isEmpty()) {
+ if ((v = te.value) == value ||
+ (value != null && value.equals(v)))
+ return true;
+ for (Node e = te.next; e != null; e = e.next) {
+ if ((v = e.value) == value ||
+ (value != null && value.equals(v)))
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Returns a {@link Set} view of the keys contained in this map.
+ * The set is backed by the map, so changes to the map are
+ * reflected in the set, and vice-versa. If the map is modified
+ * while an iteration over the set is in progress (except through
+ * the iterator's own {@code remove} operation), the results of
+ * the iteration are undefined. The set supports element removal,
+ * which removes the corresponding mapping from the map, via the
+ * {@code Iterator.remove}, {@code Set.remove},
+ * {@code removeAll}, {@code retainAll}, and {@code clear}
+ * operations. It does not support the {@code add} or {@code addAll}
+ * operations.
+ *
+ * @return a set view of the keys contained in this map
+ */
+ public Set keySet() {
+ Set ks = keySet;
+ if (ks == null) {
+ ks = new KeySet();
+ keySet = ks;
+ }
+ return ks;
+ }
+
+ /**
+ * Prepares the array for {@link Collection#toArray(Object[])} implementation.
+ * If supplied array is smaller than this map size, a new array is allocated.
+ * If supplied array is bigger than this map size, a null is written at size index.
+ *
+ * @param a an original array passed to {@code toArray()} method
+ * @param type of array elements
+ * @return an array ready to be filled and returned from {@code toArray()} method.
+ */
+ @SuppressWarnings("unchecked")
+ final T[] prepareArray(T[] a) {
+ int size = this.size;
+ if (a.length < size) {
+ return (T[]) java.lang.reflect.Array
+ .newInstance(a.getClass().getComponentType(), size);
+ }
+ if (a.length > size) {
+ a[size] = null;
+ }
+ return a;
+ }
+
+ /**
+ * Fills an array with this map keys and returns it. This method assumes
+ * that input array is big enough to fit all the keys. Use
+ * {@link #prepareArray(Object[])} to ensure this.
+ *
+ * @param a an array to fill
+ * @param type of array elements
+ * @return supplied array
+ */
+ T[] keysToArray(T[] a) {
+ Object[] r = a;
+ XNode[] tab;
+ int idx = 0;
+ int i = 0;
+ if (size > 0 && (tab = table) != null) {
+ for (XNode te : tab) {
+ if (!te.isEmpty()) {
+ r[idx++] = te.key;
+ for (Node e = te.next; e != null; e = e.next) {
+ r[idx++] = e.key;
+ }
+ }
+ }
+ }
+ return a;
+ }
+
+ /**
+ * Fills an array with this map values and returns it. This method assumes
+ * that input array is big enough to fit all the values. Use
+ * {@link #prepareArray(Object[])} to ensure this.
+ *
+ * @param a an array to fill
+ * @param type of array elements
+ * @return supplied array
+ */
+ T[] valuesToArray(T[] a) {
+ Object[] r = a;
+ XNode[] tab;
+ int idx = 0;
+ if (size > 0 && (tab = table) != null) {
+ for (XNode te : tab) {
+ if (!te.isEmpty()) {
+ r[idx++] = te.value;
+ for (Node e = te.next; e != null; e = e.next) {
+ r[idx++] = e.value;
+ }
+ }
+ }
+ }
+ return a;
+ }
+
+ final class KeySet extends AbstractSet {
+ public final int size() { return size; }
+ public final void clear() { XHashMap.this.clear(); }
+ public final Iterator iterator() { return new KeyIterator(); }
+ public final boolean contains(Object o) { return containsKey(o); }
+ public final boolean remove(Object key) {
+ return removeNode(hash(key), key, null, false, true).isPresent();
+ }
+
+ public Object[] toArray() {
+ return keysToArray(new Object[size]);
+ }
+
+ public T[] toArray(T[] a) {
+ return keysToArray(prepareArray(a));
+ }
+
+ public final void forEach(Consumer super K> action) {
+ XNode[] tab;
+ if (action == null)
+ throw new NullPointerException();
+ if (size > 0 && (tab = table) != null) {
+ int mc = modCount;
+ for (XNode te : tab) {
+ if (!te.isEmpty()) {
+ action.accept(te.key);
+ for (Node e = te.next; e != null; e = e.next)
+ action.accept(e.key);
+ }
+ }
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ }
+ }
+
+ /**
+ * Returns a {@link Collection} view of the values contained in this map.
+ * The collection is backed by the map, so changes to the map are
+ * reflected in the collection, and vice-versa. If the map is
+ * modified while an iteration over the collection is in progress
+ * (except through the iterator's own {@code remove} operation),
+ * the results of the iteration are undefined. The collection
+ * supports element removal, which removes the corresponding
+ * mapping from the map, via the {@code Iterator.remove},
+ * {@code Collection.remove}, {@code removeAll},
+ * {@code retainAll} and {@code clear} operations. It does not
+ * support the {@code add} or {@code addAll} operations.
+ *
+ * @return a view of the values contained in this map
+ */
+ public Collection values() {
+ Collection vs = values;
+ if (vs == null) {
+ vs = new Values();
+ values = vs;
+ }
+ return vs;
+ }
+
+ final class Values extends AbstractCollection {
+ public final int size() { return size; }
+ public final void clear() { XHashMap.this.clear(); }
+ public final Iterator iterator() { return new ValueIterator(); }
+ public final boolean contains(Object o) { return containsValue(o); }
+
+ public Object[] toArray() {
+ return valuesToArray(new Object[size]);
+ }
+
+ public T[] toArray(T[] a) {
+ return valuesToArray(prepareArray(a));
+ }
+
+ public final void forEach(Consumer super V> action) {
+ XNode[] tab;
+ if (action == null)
+ throw new NullPointerException();
+ if (size > 0 && (tab = table) != null) {
+ int mc = modCount;
+ for (XNode te : tab) {
+ if (!te.isEmpty()) {
+ action.accept(te.value);
+ for (Node e = te.next; e != null; e = e.next)
+ action.accept(e.value);
+ }
+ }
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ }
+ }
+
+ /**
+ * Returns a {@link Set} view of the mappings contained in this map.
+ * The set is backed by the map, so changes to the map are
+ * reflected in the set, and vice-versa. If the map is modified
+ * while an iteration over the set is in progress (except through
+ * the iterator's own {@code remove} operation, or through the
+ * {@code setValue} operation on a map entry returned by the
+ * iterator) the results of the iteration are undefined. The set
+ * supports element removal, which removes the corresponding
+ * mapping from the map, via the {@code Iterator.remove},
+ * {@code Set.remove}, {@code removeAll}, {@code retainAll} and
+ * {@code clear} operations. It does not support the
+ * {@code add} or {@code addAll} operations.
+ *
+ * @return a set view of the mappings contained in this map
+ */
+ public Set> entrySet() {
+ Set> es;
+ return (es = entrySet) == null ? (entrySet = new EntrySet()) : es;
+ }
+
+ final class EntrySet extends AbstractSet> {
+ public final int size() { return size; }
+ public final void clear() { XHashMap.this.clear(); }
+ public final Iterator> iterator() {
+ return new EntryIterator();
+ }
+ public final boolean contains(Object o) {
+ if (!(o instanceof Map.Entry))
+ return false;
+ Map.Entry,?> e = (Map.Entry,?>) o;
+ Object key = e.getKey();
+ Node candidate = getNode(hash(key), key);
+ return candidate != null && candidate.equals(e);
+ }
+ public final boolean remove(Object o) {
+ if (o instanceof Map.Entry) {
+ Map.Entry,?> e = (Map.Entry,?>) o;
+ Object key = e.getKey();
+ Object value = e.getValue();
+ return removeNode(hash(key), key, value, true, true).isPresent();
+ }
+ return false;
+ }
+ public final void forEach(Consumer super Map.Entry> action) {
+ XNode[] tab;
+ if (action == null)
+ throw new NullPointerException();
+ if (size > 0 && (tab = table) != null) {
+ int mc = modCount;
+ for (XNode te : tab) {
+ if (!te.isEmpty()) {
+ action.accept(new XNodeWrapper(te.hash & (tab.length - 1)));
+ for (Node e = te.next; e != null; e = e.next)
+ action.accept(e);
+ }
+ }
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ }
+ }
+
+ // Overrides of JDK8 Map extension methods
+
+ @Override
+ public V getOrDefault(Object key, V defaultValue) {
+ Node e;
+ return (e = getNode(hash(key), key)) == null ? defaultValue : e.value;
+ }
+
+ @Override
+ public V putIfAbsent(K key, V value) {
+ return putVal(hash(key), key, value, true, true);
+ }
+
+ @Override
+ public boolean remove(Object key, Object value) {
+ return removeNode(hash(key), key, value, true, true).isPresent();
+ }
+
+ @Override
+ public boolean replace(K key, V oldValue, V newValue) {
+ Node e; V v;
+ if ((e = getNode(hash(key), key)) != null &&
+ ((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) {
+ e.value = newValue;
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public V replace(K key, V value) {
+ Node e;
+ if ((e = getNode(hash(key), key)) != null) {
+ V oldValue = e.value;
+ e.value = value;
+ return oldValue;
+ }
+ return null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * This method will, on a best-effort basis, throw a
+ * {@link ConcurrentModificationException} if it is detected that the
+ * mapping function modifies this map during computation.
+ *
+ * @throws ConcurrentModificationException if it is detected that the
+ * mapping function modified this map
+ */
+ @Override
+ public V computeIfAbsent(K key,
+ Function super K, ? extends V> mappingFunction) {
+ if (mappingFunction == null)
+ throw new NullPointerException();
+ int hash = hash(key);
+ XNode[] tab; XNode first; int n, i;
+ int binCount = 0;
+ TreeNode t = null;
+ Node old = null;
+ if (size > threshold || (tab = table) == null ||
+ (n = tab.length) == 0)
+ n = (tab = resize()).length;
+ if (!(first = tab[i = (n - 1) & hash]).isEmpty()) {
+ K k;
+ if (first.hash == hash &&
+ ((k = first.key) == key || (key != null && key.equals(k)))) {
+ return first.value;
+ }
+ Node e = first.next;
+ if (e instanceof TreeNode)
+ old = (t = (TreeNode)e).getTreeNode(hash, key);
+ else {
+ do {
+ if (e.hash == hash &&
+ ((k = e.key) == key || (key != null && key.equals(k)))) {
+ old = e;
+ break;
+ }
+ ++binCount;
+ } while ((e = e.next) != null);
+ }
+
+ V oldValue;
+ if (old != null && (oldValue = old.value) != null) {
+ return oldValue;
+ }
+ }
+ int mc = modCount;
+ V v = mappingFunction.apply(key);
+ if (mc != modCount) { throw new ConcurrentModificationException(); }
+ if (v == null) {
+ return null;
+ } else if (old != null) {
+ old.value = v;
+ return v;
+ }
+ else if (t != null)
+ t.putTreeVal(this, tab, hash, key, v);
+ else {
+ Node x = (tab[i].isEmpty()) ? null : newNode(hash, key, v, null);
+ tab[i] = new XNode(hash, key, v, x);
+ if (binCount >= TREEIFY_THRESHOLD - 1)
+ treeifyBin(tab, hash);
+ }
+ modCount = mc + 1;
+ ++size;
+ return v;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * This method will, on a best-effort basis, throw a
+ * {@link ConcurrentModificationException} if it is detected that the
+ * remapping function modifies this map during computation.
+ *
+ * @throws ConcurrentModificationException if it is detected that the
+ * remapping function modified this map
+ */
+ @Override
+ public V computeIfPresent(K key,
+ BiFunction super K, ? super V, ? extends V> remappingFunction) {
+ if (remappingFunction == null)
+ throw new NullPointerException();
+ Node e; V oldValue;
+ int hash = hash(key);
+ if ((e = getNode(hash, key)) != null &&
+ (oldValue = e.value) != null) {
+ int mc = modCount;
+ V v = remappingFunction.apply(key, oldValue);
+ if (mc != modCount) { throw new ConcurrentModificationException(); }
+ if (v != null) {
+ e.value = v;
+ return v;
+ }
+ else
+ removeNode(hash, key, null, false, true);
+ }
+ return null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * This method will, on a best-effort basis, throw a
+ * {@link ConcurrentModificationException} if it is detected that the
+ * remapping function modifies this map during computation.
+ *
+ * @throws ConcurrentModificationException if it is detected that the
+ * remapping function modified this map
+ */
+ @Override
+ public V compute(K key,
+ BiFunction super K, ? super V, ? extends V> remappingFunction) {
+ if (remappingFunction == null)
+ throw new NullPointerException();
+ int hash = hash(key);
+ XNode[] tab; XNode first; int n, i;
+ int binCount = 0;
+ TreeNode t = null;
+ Node old = null;
+ if (size > threshold || (tab = table) == null ||
+ (n = tab.length) == 0)
+ n = (tab = resize()).length;
+ if (!(first = tab[i = (n - 1) & hash]).isEmpty()) {
+ Node e = first.next;K k;
+ if (first.hash == hash &&
+ ((k = first.key) == key || (key != null && key.equals(k)))) {
+ V v = remappingFunction.apply(k, first.value);
+ tab[i] = new XNode(hash, k, v, e);
+ return v;
+ }
+ if (e instanceof TreeNode)
+ old = (t = (TreeNode)e).getTreeNode(hash, key);
+ else {
+ do {
+ if (e.hash == hash &&
+ ((k = e.key) == key || (key != null && key.equals(k)))) {
+ old = e;
+ break;
+ }
+ ++binCount;
+ } while ((e = e.next) != null);
+ }
+ }
+ V oldValue = (old == null) ? null : old.value;
+ int mc = modCount;
+ V v = remappingFunction.apply(key, oldValue);
+ if (mc != modCount) { throw new ConcurrentModificationException(); }
+ if (old != null) {
+ if (v != null) {
+ old.value = v;
+ }
+ else
+ removeNode(hash, key, null, false, true);
+ }
+ else if (v != null) {
+ if (t != null)
+ t.putTreeVal(this, tab, hash, key, v);
+ else {
+ Node x = (tab[i].isEmpty()) ? null : newNode(hash, key, v, null);
+ tab[i] = new XNode(hash, key, v, x);
+ if (binCount >= TREEIFY_THRESHOLD - 1)
+ treeifyBin(tab, hash);
+ }
+ modCount = mc + 1;
+ ++size;
+ }
+ return v;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * This method will, on a best-effort basis, throw a
+ * {@link ConcurrentModificationException} if it is detected that the
+ * remapping function modifies this map during computation.
+ *
+ * @throws ConcurrentModificationException if it is detected that the
+ * remapping function modified this map
+ */
+ @Override
+ public V merge(K key, V value,
+ BiFunction super V, ? super V, ? extends V> remappingFunction) {
+ if (value == null || remappingFunction == null)
+ throw new NullPointerException();
+ int hash = hash(key);
+ XNode[] tab; XNode first; int n, i;
+ int binCount = 0;
+ TreeNode t = null;
+ Node old = null;
+ if (size > threshold || (tab = table) == null ||
+ (n = tab.length) == 0)
+ n = (tab = resize()).length;
+ if (!(first = tab[i = (n - 1) & hash]).isEmpty()) {
+ Node e = first.next;K k;
+ if (first.hash == hash &&
+ ((k = first.key) == key || (key != null && key.equals(k)))) {
+ V v = remappingFunction.apply(first.value, value);
+ tab[i] = new XNode(hash, k, v, e);
+ return v;
+ }
+ if (e instanceof TreeNode)
+ old = (t = (TreeNode)e).getTreeNode(hash, key);
+ else {
+ do {
+ if (e.hash == hash &&
+ ((k = e.key) == key || (key != null && key.equals(k)))) {
+ old = e;
+ break;
+ }
+ ++binCount;
+ } while ((e = e.next) != null);
+ }
+ }
+ if (old != null) {
+ V v;
+ if (old.value != null) {
+ int mc = modCount;
+ v = remappingFunction.apply(old.value, value);
+ if (mc != modCount) {
+ throw new ConcurrentModificationException();
+ }
+ } else {
+ v = value;
+ }
+ if (v != null) {
+ old.value = v;
+ }
+ else
+ removeNode(hash, key, null, false, true);
+ return v;
+ } else {
+ if (t != null)
+ t.putTreeVal(this, tab, hash, key, value);
+ else {
+ Node x = (tab[i].isEmpty()) ? null
+ : newNode(hash, tab[i].key, tab[i].value, null);
+ tab[i] = new XNode(hash, key, value, x);
+ if (binCount >= TREEIFY_THRESHOLD - 1)
+ treeifyBin(tab, hash);
+ }
+ ++modCount;
+ ++size;
+ return value;
+ }
+ }
+
+ @Override
+ public void forEach(BiConsumer super K, ? super V> action) {
+ XNode[] tab;
+ if (action == null)
+ throw new NullPointerException();
+ if (size > 0 && (tab = table) != null) {
+ int mc = modCount;
+ for (XNode te : tab) {
+ if (!te.isEmpty()) {
+ action.accept(te.key, te.value);
+ for (Node e = te.next; e != null; e = e.next)
+ action.accept(e.key, e.value);
+ }
+ }
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ }
+
+ @Override
+ public void replaceAll(BiFunction super K, ? super V, ? extends V> function) {
+ XNode[] tab;
+ if (function == null)
+ throw new NullPointerException();
+ if (size > 0 && (tab = table) != null) {
+ int mc = modCount;
+ for (XNode te : tab) {
+ if (!te.isEmpty()) {
+ V v = function.apply(te.key, te.value);
+ tab[te.hash & (tab.length -1)] = new XNode(te.hash, te.key, v, te.next);
+ for (Node e = te.next; e != null; e = e.next)
+ e.value = function.apply(e.key, e.value);
+ }
+ }
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ }
+
+ /* ------------------------------------------------------------ */
+ // Cloning and serialization
+
+ /**
+ * Returns a shallow copy of this {@code HashMap} instance: the keys and
+ * values themselves are not cloned.
+ *
+ * @return a shallow copy of this map
+ */
+ @SuppressWarnings("unchecked")
+ @Override
+ public Object clone() {
+ XHashMap result;
+ try {
+ result = (XHashMap)super.clone();
+ } catch (CloneNotSupportedException e) {
+ // this shouldn't happen, since we are Cloneable
+ throw new InternalError(e);
+ }
+ result.reinitialize();
+ result.putMapEntries(this, false);
+ return result;
+ }
+
+ // These methods are also used when serializing HashSets
+ final float loadFactor() { return loadFactor; }
+ final int capacity() {
+ return (table != null) ? table.length :
+ (threshold > 0) ? threshold :
+ DEFAULT_INITIAL_CAPACITY;
+ }
+
+ /**
+ * Saves this map to a stream (that is, serializes it).
+ *
+ * @param s the stream
+ * @throws IOException if an I/O error occurs
+ * @serialData The capacity of the HashMap (the length of the
+ * bucket array) is emitted (int), followed by the
+ * size (an int, the number of key-value
+ * mappings), followed by the key (Object) and value (Object)
+ * for each key-value mapping. The key-value mappings are
+ * emitted in no particular order.
+ */
+ private void writeObject(java.io.ObjectOutputStream s)
+ throws IOException {
+ int buckets = capacity();
+ // Write out the threshold, loadfactor, and any hidden stuff
+ s.defaultWriteObject();
+ s.writeInt(buckets);
+ s.writeInt(size);
+ internalWriteEntries(s);
+ }
+
+ /**
+ * Reconstitutes this map from a stream (that is, deserializes it).
+ * @param s the stream
+ * @throws ClassNotFoundException if the class of a serialized object
+ * could not be found
+ * @throws IOException if an I/O error occurs
+ */
+ private void readObject(java.io.ObjectInputStream s)
+ throws IOException, ClassNotFoundException {
+ // Read in the threshold (ignored), loadfactor, and any hidden stuff
+ s.defaultReadObject();
+ reinitialize();
+ if (loadFactor <= 0 || Float.isNaN(loadFactor))
+ throw new InvalidObjectException("Illegal load factor: " +
+ loadFactor);
+ s.readInt(); // Read and ignore number of buckets
+ int mappings = s.readInt(); // Read number of mappings (size)
+ if (mappings < 0)
+ throw new InvalidObjectException("Illegal mappings count: " +
+ mappings);
+ else if (mappings > 0) { // (if zero, use defaults)
+ // Size the table using given load factor only if within
+ // range of 0.25...4.0
+ float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f);
+ float fc = (float)mappings / lf + 1.0f;
+ int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ?
+ DEFAULT_INITIAL_CAPACITY :
+ (fc >= MAXIMUM_CAPACITY) ?
+ MAXIMUM_CAPACITY :
+ tableSizeFor((int)fc));
+ float ft = (float)cap * lf;
+ threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ?
+ (int)ft : Integer.MAX_VALUE);
+
+ // Check Map.Entry[].class since it's the nearest public type to
+ // what we're actually creating.
+ @SuppressWarnings({"rawtypes","unchecked"})
+ XNode[] tab = (XNode[])new XNode[cap];
+ table = tab;
+
+ // Read the keys and values, and put the mappings in the HashMap
+ for (int i = 0; i < mappings; i++) {
+ @SuppressWarnings("unchecked")
+ K key = (K) s.readObject();
+ @SuppressWarnings("unchecked")
+ V value = (V) s.readObject();
+ putVal(hash(key), key, value, false, false);
+ }
+ }
+ }
+
+ /* ------------------------------------------------------------ */
+ // iterators
+
+ static final Node,?> START_INDEX = new Node(0, null, null, null);
+
+ abstract class HashIterator {
+ Node next; // next entry to return
+ Node current; // current entry
+ int expectedModCount; // for fast-fail
+ int index; // current slot
+
+
+ HashIterator() {
+ expectedModCount = modCount;
+ XNode[] t = table;
+ current = next = null;
+ index = 0;
+ if (t != null && size > 0) { // advance to first entry
+ XNode n = emptyXNode();
+ for (; index < t.length && (n = t[index]).isEmpty(); index++) {
+ }
+ next = (Node)START_INDEX;
+ }
+ }
+
+ public final boolean hasNext() {
+ return next != null;
+ }
+
+ final Entry nextNode() {
+ XNode[] t;
+ Node e = next;
+ if (modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ if (e == null)
+ throw new NoSuchElementException();
+ if ((next = (current = e).next) == null && (t = table) != null) {
+ var ret = (e == START_INDEX) ? new XNodeWrapper(index++) : e;
+ for (; index < t.length && (t[index]).isEmpty(); index++) { }
+ next = (index < t.length) ? (Node) START_INDEX : null;
+ return ret;
+ }
+ return e;
+ }
+
+ public final void remove() {
+ Node p = current;
+ if (p == null)
+ throw new IllegalStateException();
+ if (modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ current = null;
+ removeNode(p.hash, p.key, null, false, false);
+ expectedModCount = modCount;
+ }
+ }
+
+ final class KeyIterator extends HashIterator
+ implements Iterator {
+ public final K next() { return nextNode().getKey(); }
+ }
+
+ final class ValueIterator extends HashIterator
+ implements Iterator {
+ public final V next() { return nextNode().getValue(); }
+ }
+
+ final class EntryIterator extends HashIterator
+ implements Iterator> {
+ public final Map.Entry next() { return nextNode(); }
+ }
+
+ /*
+ * The following package-protected methods are designed to be
+ * overridden by LinkedHashMap, but not by any other subclass.
+ * Nearly all other internal methods are also package-protected
+ * but are declared final, so can be used by LinkedHashMap, view
+ * classes, and HashSet.
+ */
+
+ // Create a regular (non-tree) node
+ Node newNode(int hash, K key, V value, Node next) {
+ return new Node<>(hash, key, value, next);
+ }
+
+ // For conversion from TreeNodes to plain nodes
+ Node replacementNode(Node p, Node next) {
+ return new Node<>(p.hash, p.key, p.value, next);
+ }
+
+ // Create a tree bin node
+ TreeNode newTreeNode(int hash, K key, V value, Node next) {
+ return new TreeNode<>(hash, key, value, next);
+ }
+
+ // For treeifyBin
+ TreeNode replacementTreeNode(Node p, Node next) {
+ return new TreeNode<>(p.hash, p.key, p.value, next);
+ }
+
+ /**
+ * Reset to initial default state. Called by clone and readObject.
+ */
+ void reinitialize() {
+ table = null;
+ entrySet = null;
+ keySet = null;
+ values = null;
+ modCount = 0;
+ threshold = 0;
+ size = 0;
+ }
+
+ // Called only from writeObject, to ensure compatible ordering.
+ void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException {
+ XNode[] tab;
+ if (size > 0 && (tab = table) != null) {
+ for (XNode te : tab) {
+ if (!te.isEmpty()) {
+ s.writeObject(te.key);
+ s.writeObject(te.value);
+
+ for (Node e = te.next; e != null; e = e.next) {
+ s.writeObject(e.key);
+ s.writeObject(e.value);
+ }
+ }
+ }
+ }
+ }
+
+ /* ------------------------------------------------------------ */
+ // Tree bins
+
+ /**
+ * Entry for Tree bins. Extends LinkedHashMap.Entry (which in turn
+ * extends Node) so can be used as extension of either regular or
+ * linked node.
+ */
+ static final class TreeNode extends Node {
+ TreeNode parent; // red-black tree links
+ TreeNode left;
+ TreeNode right;
+ TreeNode prev; // needed to unlink next upon deletion
+ boolean red;
+ TreeNode(int hash, K key, V val, Node next) {
+ super(hash, key, val, next);
+ }
+
+ /**
+ * Returns root of tree containing this node.
+ */
+ final TreeNode root() {
+ for (TreeNode r = this, p;;) {
+ if ((p = r.parent) == null)
+ return r;
+ r = p;
+ }
+ }
+
+ /**
+ * Ensures that the given root is the first node of its bin.
+ */
+ static void moveRootToFront(Node[] tab, TreeNode root) {
+ int n;
+ if (root != null && tab != null && (n = tab.length) > 0) {
+ int index = (n - 1) & root.hash;
+ TreeNode first = (TreeNode)tab[index];
+ if (root != first) {
+ Node rn;
+ tab[index] = root;
+ TreeNode rp = root.prev;
+ if ((rn = root.next) != null)
+ ((TreeNode)rn).prev = rp;
+ if (rp != null)
+ rp.next = rn;
+ if (first != null)
+ first.prev = root;
+ root.next = first;
+ root.prev = null;
+ }
+ assert checkInvariants(root);
+ }
+ }
+
+ /**
+ * Finds the node starting at root p with the given hash and key.
+ * The kc argument caches comparableClassFor(key) upon first use
+ * comparing keys.
+ */
+ final TreeNode find(int h, Object k, Class> kc) {
+ TreeNode p = this;
+ do {
+ int ph, dir; K pk;
+ TreeNode pl = p.left, pr = p.right, q;
+ if ((ph = p.hash) > h)
+ p = pl;
+ else if (ph < h)
+ p = pr;
+ else if ((pk = p.key) == k || (k != null && k.equals(pk)))
+ return p;
+ else if (pl == null)
+ p = pr;
+ else if (pr == null)
+ p = pl;
+ else if ((kc != null ||
+ (kc = comparableClassFor(k)) != null) &&
+ (dir = compareComparables(kc, k, pk)) != 0)
+ p = (dir < 0) ? pl : pr;
+ else if ((q = pr.find(h, k, kc)) != null)
+ return q;
+ else
+ p = pl;
+ } while (p != null);
+ return null;
+ }
+
+ /**
+ * Calls find for root node.
+ */
+ final TreeNode getTreeNode(int h, Object k) {
+ return ((parent != null) ? root() : this).find(h, k, null);
+ }
+
+ /**
+ * Tie-breaking utility for ordering insertions when equal
+ * hashCodes and non-comparable. We don't require a total
+ * order, just a consistent insertion rule to maintain
+ * equivalence across rebalancings. Tie-breaking further than
+ * necessary simplifies testing a bit.
+ */
+ static int tieBreakOrder(Object a, Object b) {
+ int d;
+ if (a == null || b == null ||
+ (d = a.getClass().getName().
+ compareTo(b.getClass().getName())) == 0)
+ d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
+ -1 : 1);
+ return d;
+ }
+
+ /**
+ * Forms tree of the nodes linked from this node.
+ */
+ final void treeify(XNode[] tab) {
+// TreeNode root = null;
+// for (TreeNode x = this, next; x != null; x = next) {
+// next = (TreeNode)x.next;
+// x.left = x.right = null;
+// if (root == null) {
+// x.parent = null;
+// x.red = false;
+// root = x;
+// }
+// else {
+// K k = x.key;
+// int h = x.hash;
+// Class> kc = null;
+// for (TreeNode p = root;;) {
+// int dir, ph;
+// K pk = p.key;
+// if ((ph = p.hash) > h)
+// dir = -1;
+// else if (ph < h)
+// dir = 1;
+// else if ((kc == null &&
+// (kc = comparableClassFor(k)) == null) ||
+// (dir = compareComparables(kc, k, pk)) == 0)
+// dir = tieBreakOrder(k, pk);
+//
+// TreeNode xp = p;
+// if ((p = (dir <= 0) ? p.left : p.right) == null) {
+// x.parent = xp;
+// if (dir <= 0)
+// xp.left = x;
+// else
+// xp.right = x;
+// root = balanceInsertion(root, x);
+// break;
+// }
+// }
+// }
+// }
+// moveRootToFront(tab, root);
+ }
+
+ /**
+ * Returns a list of non-TreeNodes replacing those linked from
+ * this node.
+ */
+ final Node untreeify(XHashMap map) {
+ Node hd = null, tl = null;
+ for (Node q = this; q != null; q = q.next) {
+ Node p = map.replacementNode(q, null);
+ if (tl == null)
+ hd = p;
+ else
+ tl.next = p;
+ tl = p;
+ }
+ return hd;
+ }
+
+ /**
+ * Tree version of putVal.
+ */
+ final TreeNode putTreeVal(XHashMap map, XNode[] tab,
+ int h, K k, V v) {
+// Class> kc = null;
+// boolean searched = false;
+// TreeNode root = (parent != null) ? root() : this;
+// for (TreeNode p = root;;) {
+// int dir, ph; K pk;
+// if ((ph = p.hash) > h)
+// dir = -1;
+// else if (ph < h)
+// dir = 1;
+// else if ((pk = p.key) == k || (k != null && k.equals(pk)))
+// return p;
+// else if ((kc == null &&
+// (kc = comparableClassFor(k)) == null) ||
+// (dir = compareComparables(kc, k, pk)) == 0) {
+// if (!searched) {
+// TreeNode q, ch;
+// searched = true;
+// if (((ch = p.left) != null &&
+// (q = ch.find(h, k, kc)) != null) ||
+// ((ch = p.right) != null &&
+// (q = ch.find(h, k, kc)) != null))
+// return q;
+// }
+// dir = tieBreakOrder(k, pk);
+// }
+//
+// TreeNode xp = p;
+// if ((p = (dir <= 0) ? p.left : p.right) == null) {
+// Node xpn = xp.next;
+// TreeNode x = map.newTreeNode(h, k, v, xpn);
+// if (dir <= 0)
+// xp.left = x;
+// else
+// xp.right = x;
+// xp.next = x;
+// x.parent = x.prev = xp;
+// if (xpn != null)
+// ((TreeNode)xpn).prev = x;
+// moveRootToFront(tab, balanceInsertion(root, x));
+// return null;
+// }
+// }
+ return null;
+ }
+
+ /**
+ * Removes the given node, that must be present before this call.
+ * This is messier than typical red-black deletion code because we
+ * cannot swap the contents of an interior node with a leaf
+ * successor that is pinned by "next" pointers that are accessible
+ * independently during traversal. So instead we swap the tree
+ * linkages. If the current tree appears to have too few nodes,
+ * the bin is converted back to a plain bin. (The test triggers
+ * somewhere between 2 and 6 nodes, depending on tree structure).
+ */
+ final void removeTreeNode(XHashMap map, XNode[] tab,
+ boolean movable) {
+// int n;
+// if (tab == null || (n = tab.length) == 0)
+// return;
+// int index = (n - 1) & hash;
+// TreeNode first = (TreeNode)tab[index], root = first, rl;
+// TreeNode succ = (TreeNode)next, pred = prev;
+// if (pred == null)
+// tab[index] = first = succ;
+// else
+// pred.next = succ;
+// if (succ != null)
+// succ.prev = pred;
+// if (first == null)
+// return;
+// if (root.parent != null)
+// root = root.root();
+// if (root == null
+// || (movable
+// && (root.right == null
+// || (rl = root.left) == null
+// || rl.left == null))) {
+// tab[index] = first.untreeify(map); // too small
+// return;
+// }
+// TreeNode p = this, pl = left, pr = right, replacement;
+// if (pl != null && pr != null) {
+// TreeNode s = pr, sl;
+// while ((sl = s.left) != null) // find successor
+// s = sl;
+// boolean c = s.red; s.red = p.red; p.red = c; // swap colors
+// TreeNode sr = s.right;
+// TreeNode pp = p.parent;
+// if (s == pr) { // p was s's direct parent
+// p.parent = s;
+// s.right = p;
+// }
+// else {
+// TreeNode sp = s.parent;
+// if ((p.parent = sp) != null) {
+// if (s == sp.left)
+// sp.left = p;
+// else
+// sp.right = p;
+// }
+// if ((s.right = pr) != null)
+// pr.parent = s;
+// }
+// p.left = null;
+// if ((p.right = sr) != null)
+// sr.parent = p;
+// if ((s.left = pl) != null)
+// pl.parent = s;
+// if ((s.parent = pp) == null)
+// root = s;
+// else if (p == pp.left)
+// pp.left = s;
+// else
+// pp.right = s;
+// if (sr != null)
+// replacement = sr;
+// else
+// replacement = p;
+// }
+// else if (pl != null)
+// replacement = pl;
+// else if (pr != null)
+// replacement = pr;
+// else
+// replacement = p;
+// if (replacement != p) {
+// TreeNode pp = replacement.parent = p.parent;
+// if (pp == null)
+// (root = replacement).red = false;
+// else if (p == pp.left)
+// pp.left = replacement;
+// else
+// pp.right = replacement;
+// p.left = p.right = p.parent = null;
+// }
+//
+// TreeNode r = p.red ? root : balanceDeletion(root, replacement);
+//
+// if (replacement == p) { // detach
+// TreeNode pp = p.parent;
+// p.parent = null;
+// if (pp != null) {
+// if (p == pp.left)
+// pp.left = null;
+// else if (p == pp.right)
+// pp.right = null;
+// }
+// }
+// if (movable)
+// moveRootToFront(tab, r);
+ }
+
+ /**
+ * Splits nodes in a tree bin into lower and upper tree bins,
+ * or untreeifies if now too small. Called only from resize;
+ * see above discussion about split bits and indices.
+ *
+ * @param map the map
+ * @param tab the table for recording bin heads
+ * @param index the index of the table being split
+ * @param bit the bit of hash to split on
+ */
+ final void split(XHashMap map, XNode[] tab, int index, int bit) {
+// TreeNode b = this;
+// // Relink into lo and hi lists, preserving order
+// TreeNode loHead = null, loTail = null;
+// TreeNode hiHead = null, hiTail = null;
+// int lc = 0, hc = 0;
+// for (TreeNode e = b, next; e != null; e = next) {
+// next = (TreeNode)e.next;
+// e.next = null;
+// if ((e.hash & bit) == 0) {
+// if ((e.prev = loTail) == null)
+// loHead = e;
+// else
+// loTail.next = e;
+// loTail = e;
+// ++lc;
+// }
+// else {
+// if ((e.prev = hiTail) == null)
+// hiHead = e;
+// else
+// hiTail.next = e;
+// hiTail = e;
+// ++hc;
+// }
+// }
+//
+// if (loHead != null) {
+// if (lc <= UNTREEIFY_THRESHOLD)
+// tab[index] = loHead.untreeify(map);
+// else {
+// tab[index] = loHead;
+// if (hiHead != null) // (else is already treeified)
+// loHead.treeify(tab);
+// }
+// }
+// if (hiHead != null) {
+// if (hc <= UNTREEIFY_THRESHOLD)
+// tab[index + bit] = hiHead.untreeify(map);
+// else {
+// tab[index + bit] = hiHead;
+// if (loHead != null)
+// hiHead.treeify(tab);
+// }
+// }
+ }
+
+ /**
+ * Recursive invariant check
+ */
+ static boolean checkInvariants(TreeNode t) {
+ TreeNode tp = t.parent, tl = t.left, tr = t.right,
+ tb = t.prev, tn = (TreeNode)t.next;
+ if (tb != null && tb.next != t)
+ return false;
+ if (tn != null && tn.prev != t)
+ return false;
+ if (tp != null && t != tp.left && t != tp.right)
+ return false;
+ if (tl != null && (tl.parent != t || tl.hash > t.hash))
+ return false;
+ if (tr != null && (tr.parent != t || tr.hash < t.hash))
+ return false;
+ if (t.red && tl != null && tl.red && tr != null && tr.red)
+ return false;
+ if (tl != null && !checkInvariants(tl))
+ return false;
+ if (tr != null && !checkInvariants(tr))
+ return false;
+ return true;
+ }
+ }
+
+
+ public void dumpStats(PrintStream out) {
+ out.printf("%s instance: size: %d%n", this.getClass().getName(), this.size());
+ long size = heapSize();
+ long bytesPer = size / this.size();
+ out.printf(" heap size: %d(bytes), avg bytes per entry: %d, table len: %d%n",
+ size, bytesPer, table.length);
+ long[] types = entryTypes();
+ out.printf(" values: %d, empty: %d%n", types[0], types[1]);
+ int[] rehashes = entryRehashes();
+ out.printf(" hash collision histogram: max: %d, %s%n",
+ rehashes.length - 1, Arrays.toString(rehashes));
+ }
+
+ private long[] entryTypes() {
+ long[] counts = new long[3];
+ for (XNode te : table) {
+ counts[te.isEmpty() ? 1 : 0]++;
+ }
+ return counts;
+ }
+
+ // Returns a histogram array of the number of rehashs needed to find each key.
+ private int[] entryRehashes() {
+ int[] counts = new int[table.length + 1];
+ XNode[] tab = table;
+ for (XNode te : tab) {
+ if (!te.isEmpty()) {
+ int count = 0;
+ for (Node e = te.next; e != null; e = e.next)
+ count++;
+ counts[count]++;
+ }
+ }
+
+ int i;
+ for (i = counts.length - 1; i >= 0 && counts[i] == 0; i--) {
+ }
+ counts = Arrays.copyOf(counts, i + 1);
+ return counts;
+ }
+
+ private long heapSize() {
+ long acc = objectSizeMaybe(this);
+ acc += objectSizeMaybe(table);
+
+ XNode[] tab = table;
+ for (XNode te : tab) {
+ if (!te.isEmpty()) {
+ for (Node e = te.next; e != null; e = e.next)
+ acc += objectSizeMaybe(e);
+ }
+ }
+ return acc;
+ }
+
+ private long objectSizeMaybe(Object o) {
+ try {
+ return (mObjectSize != null)
+ ? (long)mObjectSize.invoke(null, o)
+ : 0L;
+ } catch (IllegalAccessException | InvocationTargetException e) {
+ return 0L;
+ }
+ }
+
+ private static boolean hasObjectSize = false;
+ private static Method mObjectSize = getObjectSizeMethod();
+
+ private static Method getObjectSizeMethod() {
+ try {
+ Method m = Objects.class.getDeclaredMethod("getObjectSize", Object.class);
+ hasObjectSize = true;
+ return m;
+ } catch (NoSuchMethodException nsme) {
+ return null;
+ }
+ }
+
+}