# HG changeset patch # User briangoetz # Date 1366139761 14400 # Node ID 5148e3e669c5a535a58deb9dd1a3cce59acbaf94 # Parent e4e9f6455f3ceb679ae518d91226c87efa201ee1 imported patch JDK-8010096 diff --git a/src/share/classes/com/sun/tools/jdi/EventSetImpl.java b/src/share/classes/com/sun/tools/jdi/EventSetImpl.java --- a/src/share/classes/com/sun/tools/jdi/EventSetImpl.java +++ b/src/share/classes/com/sun/tools/jdi/EventSetImpl.java @@ -851,6 +851,11 @@ } } + @Override + public Spliterator spliterator() { + return Spliterators.spliterator(this, Spliterator.DISTINCT); + } + /* below make this unmodifiable */ public boolean add(Event o){ diff --git a/src/share/classes/java/lang/Iterable.java b/src/share/classes/java/lang/Iterable.java --- a/src/share/classes/java/lang/Iterable.java +++ b/src/share/classes/java/lang/Iterable.java @@ -22,26 +22,55 @@ * or visit www.oracle.com if you need additional information or have any * questions. */ - package java.lang; import java.util.Iterator; +import java.util.Objects; +import java.util.function.Consumer; /** * Implementing this interface allows an object to be the target of - * the "foreach" statement. + * the "for-each loop" statement. See + * + * For-each Loop + * * * @param the type of elements returned by the iterator * * @since 1.5 + * @jls 14.14.2 The enhanced for statement */ @FunctionalInterface public interface Iterable { - /** - * Returns an iterator over a set of elements of type T. + * Returns an iterator over elements of type {@code T}. * * @return an Iterator. */ Iterator iterator(); + + /** + * Performs the given action on the contents of the {@code Iterable}, in the + * order elements occur when iterating, until all elements have been + * processed or the action throws an exception. Errors or runtime + * exceptions thrown by the action are relayed to the caller. + * + * @implSpec + *

The default implementation behaves as if: + *

{@code
+     *     for (T t : this)
+     *         action.accept(t);
+     * }
+ * + * @param action The action to be performed for each element + * @throws NullPointerException if the specified action is null + * @since 1.8 + */ + default void forEach(Consumer action) { + Objects.requireNonNull(action); + for (T t : this) { + action.accept(t); + } + } } + diff --git a/src/share/classes/java/util/Arrays.java b/src/share/classes/java/util/Arrays.java --- a/src/share/classes/java/util/Arrays.java +++ b/src/share/classes/java/util/Arrays.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -3518,6 +3518,11 @@ public boolean contains(Object o) { return indexOf(o) != -1; } + + @Override + public Spliterator spliterator() { + return Spliterators.spliterator(a, Spliterator.ORDERED); + } } /** @@ -4300,4 +4305,167 @@ buf.append(']'); dejaVu.remove(a); } + + /** + * Creates a {@link Spliterator} covering all of the specified array. + * + *

The spliterator reports {@link Spliterator#SIZED}, + * {@link Spliterator#SUBSIZED}, {@link Spliterator#ORDERED}, and + * {@link Spliterator#IMMUTABLE}. + * + * @param Type of elements + * @param array The array, assumed to be unmodified during use + * @return A spliterator from the array + * @throws NullPointerException if the specified array is {@code null} + * @since 1.8 + */ + public static Spliterator spliterator(T[] array) { + return Spliterators.spliterator(array, + Spliterator.ORDERED | Spliterator.IMMUTABLE); + } + + /** + * Creates a {@link Spliterator} covering the specified range of the + * specified array. + * + *

The spliterator reports {@link Spliterator#SIZED}, + * {@link Spliterator#SUBSIZED}, {@link Spliterator#ORDERED}, and + * {@link Spliterator#IMMUTABLE}. + * + * @param Type of elements + * @param array The array, assumed to be unmodified during use + * @param fromIndex The least index (inclusive) to cover + * @param toIndex One past the greatest index to cover + * @return A spliterator from the array + * @throws NullPointerException if the specified array is {@code null} + * @throws ArrayIndexOutOfBoundsException if {@code fromIndex} is negative, + * {@code toIndex} is less than {@code fromIndex}, or + * {@code toIndex} is greater than the array size + * @since 1.8 + */ + public static Spliterator spliterator(T[] array, int fromIndex, int toIndex) { + return Spliterators.spliterator(array, fromIndex, toIndex, + Spliterator.ORDERED | Spliterator.IMMUTABLE); + } + + /** + * Creates a {@link Spliterator.OfInt} covering all of the specified array. + * + *

The spliterator reports {@link Spliterator#SIZED}, + * {@link Spliterator#SUBSIZED}, {@link Spliterator#ORDERED}, and + * {@link Spliterator#IMMUTABLE}. + * + * @param array The array, assumed to be unmodified during use + * @return A spliterator from the array + * @throws NullPointerException if the specified array is {@code null} + * @since 1.8 + */ + public static Spliterator.OfInt spliterator(int[] array) { + return Spliterators.spliterator(array, + Spliterator.ORDERED | Spliterator.IMMUTABLE); + } + + /** + * Creates a {@link Spliterator.OfInt} covering the specified range of the + * specified array. + * + *

The spliterator reports {@link Spliterator#SIZED}, + * {@link Spliterator#SUBSIZED}, {@link Spliterator#ORDERED}, and + * {@link Spliterator#IMMUTABLE}. + * + * @param array The array, assumed to be unmodified during use + * @param fromIndex The least index (inclusive) to cover + * @param toIndex One past the greatest index to cover + * @return A spliterator from the array + * @throws NullPointerException if the specified array is {@code null} + * @throws ArrayIndexOutOfBoundsException if {@code fromIndex} is negative, + * {@code toIndex} is less than {@code fromIndex}, or + * {@code toIndex} is greater than the array size + * @since 1.8 + */ + public static Spliterator.OfInt spliterator(int[] array, int fromIndex, int toIndex) { + return Spliterators.spliterator(array, fromIndex, toIndex, + Spliterator.ORDERED | Spliterator.IMMUTABLE); + } + + /** + * Creates a {@link Spliterator.OfLong} covering all of the specified array. + * + *

The spliterator reports {@link Spliterator#SIZED}, + * {@link Spliterator#SUBSIZED}, {@link Spliterator#ORDERED}, and + * {@link Spliterator#IMMUTABLE}. + * + * @param array The array, assumed to be unmodified during use + * @return A spliterator from the array + * @throws NullPointerException if the specified array is {@code null} + * @since 1.8 + */ + public static Spliterator.OfLong spliterator(long[] array) { + return Spliterators.spliterator(array, + Spliterator.ORDERED | Spliterator.IMMUTABLE); + } + + /** + * Creates a {@link Spliterator.OfLong} covering the specified range of the + * specified array. + * + *

The spliterator reports {@link Spliterator#SIZED}, + * {@link Spliterator#SUBSIZED}, {@link Spliterator#ORDERED}, and + * {@link Spliterator#IMMUTABLE}. + * + * @param array The array, assumed to be unmodified during use + * @param fromIndex The least index (inclusive) to cover + * @param toIndex One past the greatest index to cover + * @return A spliterator from the array + * @throws NullPointerException if the specified array is {@code null} + * @throws ArrayIndexOutOfBoundsException if {@code fromIndex} is negative, + * {@code toIndex} is less than {@code fromIndex}, or + * {@code toIndex} is greater than the array size + * @since 1.8 + */ + public static Spliterator.OfLong spliterator(long[] array, int fromIndex, int toIndex) { + return Spliterators.spliterator(array, fromIndex, toIndex, + Spliterator.ORDERED | Spliterator.IMMUTABLE); + } + + /** + * Creates a {@link Spliterator.OfDouble} covering all of the specified + * array. + * + *

The spliterator reports {@link Spliterator#SIZED}, + * {@link Spliterator#SUBSIZED}, {@link Spliterator#ORDERED}, and + * {@link Spliterator#IMMUTABLE}. + * + * @param array The array, assumed to be unmodified during use + * @return A spliterator from the array + * @throws NullPointerException if the specified array is {@code null} + * @since 1.8 + */ + public static Spliterator.OfDouble spliterator(double[] array) { + return Spliterators.spliterator(array, + Spliterator.ORDERED | Spliterator.IMMUTABLE); + } + + /** + * Creates a {@link Spliterator.OfDouble} covering the specified range of + * the specified array. + * + *

The spliterator reports {@link Spliterator#SIZED}, + * {@link Spliterator#SUBSIZED}, {@link Spliterator#ORDERED}, and + * {@link Spliterator#IMMUTABLE}. + * + * @param array The array, assumed to be unmodified during use + * @param fromIndex The least index (inclusive) to cover + * @param toIndex One past the greatest index to cover + * @return A spliterator from the array + * @throws NullPointerException if the specified array is {@code null} + * @throws ArrayIndexOutOfBoundsException if {@code fromIndex} is negative, + * {@code toIndex} is less than {@code fromIndex}, or + * {@code toIndex} is greater than the array size + * @since 1.8 + */ + public static Spliterator.OfDouble spliterator(double[] array, int fromIndex, int toIndex) { + return Spliterators.spliterator(array, fromIndex, toIndex, + Spliterator.ORDERED | Spliterator.IMMUTABLE); + } } diff --git a/src/share/classes/java/util/Collection.java b/src/share/classes/java/util/Collection.java --- a/src/share/classes/java/util/Collection.java +++ b/src/share/classes/java/util/Collection.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -104,6 +104,12 @@ * * Java Collections Framework. * + * @implSpec + * The default method implementations (inherited or otherwise) do not apply any + * synchronization protocol. If a {@code Collection} implementation has a + * specific synchronization protocol, then it must override default + * implementations to apply that protocol. + * * @param the type of elements in this collection * * @author Josh Bloch @@ -453,4 +459,28 @@ * @see Object#equals(Object) */ int hashCode(); + + /** + * Creates a {@link Spliterator} over the elements in this collection. + * + *

The {@code Spliterator} reports {@link Spliterator#SIZED}. + * Implementations should document the reporting of additional + * characteristic values. + * + * @implSpec + * The default implementation creates a + * late-binding spliterator + * from the collections's {@code Iterator}. The spliterator inherits the + * fail-fast properties of the collection's iterator. + * + * @implNote + * The created {@code Spliterator} additionally reports + * {@link Spliterator#SUBSIZED}. + * + * @return a {@code Spliterator} over the elements in this collection + * @since 1.8 + */ + default Spliterator spliterator() { + return Spliterators.spliterator(this, 0); + } } diff --git a/src/share/classes/java/util/Iterator.java b/src/share/classes/java/util/Iterator.java --- a/src/share/classes/java/util/Iterator.java +++ b/src/share/classes/java/util/Iterator.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,8 @@ package java.util; +import java.util.function.Consumer; + /** * An iterator over a collection. {@code Iterator} takes the place of * {@link Enumeration} in the Java Collections Framework. Iterators @@ -75,6 +77,10 @@ * iteration is in progress in any way other than by calling this * method. * + * @implSpec + * The default implementation throws an instance of + * {@link UnsupportedOperationException} and performs no other action. + * * @throws UnsupportedOperationException if the {@code remove} * operation is not supported by this iterator * @@ -83,5 +89,30 @@ * been called after the last call to the {@code next} * method */ - void remove(); + default void remove() { + throw new UnsupportedOperationException("remove"); + } + + /** + * Performs the given action for each remaining element, in the order + * elements occur when iterating, until all elements have been processed or + * the action throws an exception. Errors or runtime exceptions thrown by + * the action are relayed to the caller. + * + * @implSpec + *

The default implementation behaves as if: + *

{@code
+     *     while (hasNext())
+     *         action.accept(next());
+     * }
+ * + * @param action The action to be performed for each element + * @throws NullPointerException if the specified action is null + * @since 1.8 + */ + default void forEachRemaining(Consumer action) { + Objects.requireNonNull(action); + while (hasNext()) + action.accept(next()); + } } diff --git a/src/share/classes/java/util/List.java b/src/share/classes/java/util/List.java --- a/src/share/classes/java/util/List.java +++ b/src/share/classes/java/util/List.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -597,4 +597,30 @@ * fromIndex > toIndex) */ List subList(int fromIndex, int toIndex); + + /** + * Creates a {@link Spliterator} over the elements in this list. + * + *

The {@code Spliterator} reports {@link Spliterator#SIZED} and + * {@link Spliterator#ORDERED}. Implementations should document the + * reporting of additional characteristic values. + * + * @implSpec + * The default implementation creates a + * late-binding spliterator + * from the list's {@code Iterator}. The spliterator inherits the + * fail-fast properties of the collection's iterator. + * + * @implNote + * The created {@code Spliterator} additionally reports + * {@link Spliterator#SUBSIZED}. + * + * @return a {@code Spliterator} over the elements in this list + * @since 1.8 + */ + @Override + default Spliterator spliterator() { + return Spliterators.spliterator(this, Spliterator.ORDERED); + } } + diff --git a/src/share/classes/java/util/PrimitiveIterator.java b/src/share/classes/java/util/PrimitiveIterator.java new file mode 100644 --- /dev/null +++ b/src/share/classes/java/util/PrimitiveIterator.java @@ -0,0 +1,274 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util; + +import java.util.function.Consumer; +import java.util.function.DoubleConsumer; +import java.util.function.IntConsumer; +import java.util.function.LongConsumer; + +/** + * A base type for primitive specializations of {@code Iterator}. Specialized + * subtypes are provided for {@link OfInt int}, {@link OfLong long}, and + * {@link OfDouble double} values. + * + *

The specialized subtype default implementations of {@link Iterator#next} + * and {@link Iterator#forEachRemaining(java.util.function.Consumer)} box + * primitive values to instances of their corresponding wrapper class. Such + * boxing may offset any advantages gained when using the primitive + * specializations. To avoid boxing, the corresponding primitive-based methods + * should be used. For example, {@link PrimitiveIterator.OfInt#nextInt()} and + * {@link PrimitiveIterator.OfInt#forEachRemaining(java.util.function.IntConsumer)} + * should be used in preference to {@link PrimitiveIterator.OfInt#next()} and + * {@link PrimitiveIterator.OfInt#forEachRemaining(java.util.function.Consumer)}. + * + *

Iteration of primitive values using boxing-based methods + * {@link Iterator#next next()} and + * {@link Iterator#forEachRemaining(java.util.function.Consumer) forEachRemaining()}, + * does not affect the order in which the values, transformed to boxed values, + * are encountered. + * + * @implNote + * If the boolean system property {@code org.openjdk.java.util.stream.tripwire} + * is set to {@code true} then diagnostic warnings are reported if boxing of + * primitive values occur when operating on primitive subtype specializations. + * + * @param the boxed type of the primitive type + * @since 1.8 + */ +public interface PrimitiveIterator extends Iterator { + + /** + * An Iterator specialized for {@code int} values. + * @since 1.8 + */ + public static interface OfInt extends PrimitiveIterator { + + /** + * Returns the next {@code int} element in the iteration. + * + * @return the next {@code int} element in the iteration + * @throws NoSuchElementException if the iteration has no more elements + */ + int nextInt(); + + /** + * Performs the given action for each remaining element, in the order + * elements occur when iterating, until all elements have been processed + * or the action throws an exception. Errors or runtime exceptions + * thrown by the action are relayed to the caller. + * + * @implSpec + *

The default implementation behaves as if: + *

{@code
+         *     while (hasNext())
+         *         action.accept(nextInt());
+         * }
+ * + * @param action The action to be performed for each element + * @throws NullPointerException if the specified action is null + */ + default void forEachRemaining(IntConsumer action) { + while (hasNext()) + action.accept(nextInt()); + } + + /** + * {@inheritDoc} + * @implSpec + * The default implementation boxes the result of calling + * {@link #nextInt()}, and returns that boxed result. + */ + @Override + default Integer next() { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), "{0} calling PrimitiveIterator.OfInt.nextInt()"); + return nextInt(); + } + + /** + * {@inheritDoc} + * @implSpec + * If the action is an instance of {@code IntConsumer} then it is cast + * to {@code IntConsumer} and passed to {@link #forEachRemaining}; + * otherwise the action is adapted to an instance of + * {@code IntConsumer}, by boxing the argument of {@code IntConsumer}, + * and then passed to {@link #forEachRemaining}. + */ + @Override + default void forEachRemaining(Consumer action) { + if (action instanceof IntConsumer) { + forEachRemaining((IntConsumer) action); + } + else { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), "{0} calling PrimitiveIterator.OfInt.forEachRemainingInt(action::accept)"); + forEachRemaining((IntConsumer) action::accept); + } + } + + } + + /** + * An Iterator specialized for {@code long} values. + * @since 1.8 + */ + public static interface OfLong extends PrimitiveIterator { + + /** + * Returns the next {@code long} element in the iteration. + * + * @return the next {@code long} element in the iteration + * @throws NoSuchElementException if the iteration has no more elements + */ + long nextLong(); + + /** + * Performs the given action for each remaining element, in the order + * elements occur when iterating, until all elements have been processed + * or the action throws an exception. Errors or runtime exceptions + * thrown by the action are relayed to the caller. + * + * @implSpec + *

The default implementation behaves as if: + *

{@code
+         *     while (hasNext())
+         *         action.accept(nextLong());
+         * }
+ * + * @param action The action to be performed for each element + * @throws NullPointerException if the specified action is null + */ + default void forEachRemaining(LongConsumer action) { + while (hasNext()) + action.accept(nextLong()); + } + + /** + * {@inheritDoc} + * @implSpec + * The default implementation boxes the result of calling + * {@link #nextLong()}, and returns that boxed result. + */ + @Override + default Long next() { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), "{0} calling PrimitiveIterator.OfLong.nextLong()"); + return nextLong(); + } + + /** + * {@inheritDoc} + * @implSpec + * If the action is an instance of {@code LongConsumer} then it is cast + * to {@code LongConsumer} and passed to {@link #forEachRemaining}; + * otherwise the action is adapted to an instance of + * {@code LongConsumer}, by boxing the argument of {@code LongConsumer}, + * and then passed to {@link #forEachRemaining}. + */ + @Override + default void forEachRemaining(Consumer action) { + if (action instanceof LongConsumer) { + forEachRemaining((LongConsumer) action); + } + else { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), "{0} calling PrimitiveIterator.OfLong.forEachRemainingLong(action::accept)"); + forEachRemaining((LongConsumer) action::accept); + } + } + } + + /** + * An Iterator specialized for {@code double} values. + * @since 1.8 + */ + public static interface OfDouble extends PrimitiveIterator { + + /** + * Returns the next {@code double} element in the iteration. + * + * @return the next {@code double} element in the iteration + * @throws NoSuchElementException if the iteration has no more elements + */ + double nextDouble(); + + /** + * Performs the given action for each remaining element, in the order + * elements occur when iterating, until all elements have been processed + * or the action throws an exception. Errors or runtime exceptions + * thrown by the action are relayed to the caller. + * + * @implSpec + *

The default implementation behaves as if: + *

{@code
+         *     while (hasNext())
+         *         action.accept(nextDouble());
+         * }
+ * + * @param action The action to be performed for each element + * @throws NullPointerException if the specified action is null + */ + default void forEachRemaining(DoubleConsumer action) { + while (hasNext()) + action.accept(nextDouble()); + } + + /** + * {@inheritDoc} + * @implSpec + * The default implementation boxes the result of calling + * {@link #nextDouble()}, and returns that boxed result. + */ + @Override + default Double next() { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), "{0} calling PrimitiveIterator.OfDouble.nextLong()"); + return nextDouble(); + } + + /** + * {@inheritDoc} + * @implSpec + * If the action is an instance of {@code DoubleConsumer} then it is + * cast to {@code DoubleConsumer} and passed to + * {@link #forEachRemaining}; otherwise the action is adapted to + * an instance of {@code DoubleConsumer}, by boxing the argument of + * {@code DoubleConsumer}, and then passed to + * {@link #forEachRemaining}. + */ + @Override + default void forEachRemaining(Consumer action) { + if (action instanceof DoubleConsumer) { + forEachRemaining((DoubleConsumer) action); + } + else { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), "{0} calling PrimitiveIterator.OfDouble.forEachRemainingDouble(action::accept)"); + forEachRemaining((DoubleConsumer) action::accept); + } + } + } +} diff --git a/src/share/classes/java/util/Set.java b/src/share/classes/java/util/Set.java --- a/src/share/classes/java/util/Set.java +++ b/src/share/classes/java/util/Set.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -382,4 +382,29 @@ * @see Set#equals(Object) */ int hashCode(); + + /** + * Creates a {@code Spliterator} over the elements in this set. + * + *

The {@code Spliterator} reports {@link Spliterator#SIZED} and + * {@link Spliterator#DISTINCT}. Implementations should document the + * reporting of additional characteristic values. + * + * @implSpec + * The default implementation creates a + * late-binding spliterator + * from the set's {@code Iterator}. The spliterator inherits the + * fail-fast properties of the collection's iterator. + * + * @implNote + * The created {@code Spliterator} additionally reports + * {@link Spliterator#SUBSIZED}. + * + * @return a {@code Spliterator} over the elements in this set + * @since 1.8 + */ + @Override + default Spliterator spliterator() { + return Spliterators.spliterator(this, Spliterator.DISTINCT); + } } diff --git a/src/share/classes/java/util/SortedSet.java b/src/share/classes/java/util/SortedSet.java --- a/src/share/classes/java/util/SortedSet.java +++ b/src/share/classes/java/util/SortedSet.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2006, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -219,4 +219,43 @@ * @throws NoSuchElementException if this set is empty */ E last(); + + /** + * Creates a {@code Spliterator} over the elements in this sorted set. + * + *

The {@code Spliterator} reports {@link Spliterator#SIZED}, + * {@link Spliterator#DISTINCT}, {@link Spliterator#SORTED} and + * {@link Spliterator#ORDERED}. Implementations should document the + * reporting of additional characteristic values. + * + *

The spliterator's comparator (see + * {@link java.util.Spliterator#getComparator()}) must be {@code null} if + * the sorted set's comparator (see {@link #comparator()}) is {@code null}. + * Otherwise, the spliterator's comparator must be the same as or impose the + * same total ordering as the sorted set's comparator. + * + * @implSpec + * The default implementation creates a + * late-binding spliterator + * from the sorted set's {@code Iterator}. The spliterator inherits the + * fail-fast properties of the collection's iterator. The + * spliterator's comparator is the same as the sorted set's comparator. + * + * @implNote + * The created {@code Spliterator} additionally reports + * {@link Spliterator#SUBSIZED}. + * + * @return a {@code Spliterator} over the elements in this sorted set + * @since 1.8 + */ + @Override + default Spliterator spliterator() { + return new Spliterators.IteratorSpliterator( + this, Spliterator.DISTINCT | Spliterator.SORTED | Spliterator.ORDERED) { + @Override + public Comparator getComparator() { + return SortedSet.this.comparator(); + } + }; + } } diff --git a/src/share/classes/java/util/Spliterator.java b/src/share/classes/java/util/Spliterator.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/Spliterator.java @@ -0,0 +1,835 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util; + +import java.util.function.Consumer; +import java.util.function.DoubleConsumer; +import java.util.function.IntConsumer; +import java.util.function.LongConsumer; + +/** + * An object for traversing and partitioning elements of a source. The source + * of elements covered by a Spliterator could be, for example, an array, a + * {@link Collection}, an IO channel, or a generator function. + * + *

A Spliterator may traverse elements individually ({@link + * #tryAdvance tryAdvance()}) or sequentially in bulk + * ({@link #forEachRemaining forEachRemaining()}). + * + *

A Spliterator may also partition off some of its elements (using + * {@link #trySplit}) as another Spliterator, to be used in + * possibly-parallel operations. Operations using a Spliterator that + * cannot split, or does so in a highly imbalanced or inefficient + * manner, are unlikely to benefit from parallelism. Traversal + * and splitting exhaust elements; each Spliterator is useful for only a single + * bulk computation. + * + *

A Spliterator also reports a set of {@link #characteristics()} of its + * structure, source, and elements from among {@link #ORDERED}, + * {@link #DISTINCT}, {@link #SORTED}, {@link #SIZED}, {@link #NONNULL}, + * {@link #IMMUTABLE}, {@link #CONCURRENT}, and {@link #SUBSIZED}. These may + * be employed by Spliterator clients to control, specialize or simplify + * computation. For example, a Spliterator for a {@link Collection} would + * report {@code SIZED}, a Spliterator for a {@link Set} would report + * {@code DISTINCT}, and a Spliterator for a {@link SortedSet} would also + * report {@code SORTED}. Characteristics are reported as a simple unioned bit + * set. + * + * Some characteristics additionally constrain method behavior; for example if + * {@code ORDERED}, traversal methods must conform to their documented ordering. + * New characteristics may be defined in the future, so implementors should not + * assign meanings to unlisted values. + * + *

A Spliterator that does not report {@code IMMUTABLE} or + * {@code CONCURRENT} is expected to have a documented policy concerning: + * when the spliterator binds to the element source; and detection of + * structural interference of the element source detected after binding. A + * late-binding Spliterator binds to the source of elements at the + * point of first traversal, first split, or first query for estimated size, + * rather than at the time the Spliterator is created. A Spliterator that is + * not late-binding binds to the source of elements at the point of + * construction or first invocation of any method. Modifications made to the + * source prior to binding are reflected when the Spliterator is traversed. + * After binding a Spliterator should, on a best-effort basis, throw + * {@link ConcurrentModificationException} if structural interference is + * detected. Spliterators that do this are called fail-fast. + * + *

Spliterators can provide an estimate of the number of remaining elements + * via the {@link #estimateSize} method. Ideally, as reflected in characteristic + * {@link #SIZED}, this value corresponds exactly to the number of elements + * that would be encountered in a successful traversal. However, even when not + * exactly known, an estimated value value may still be useful to operations + * being performed on the source, such as helping to determine whether it is + * preferable to split further or traverse the remaining elements sequentially. + * + *

Despite their obvious utility in parallel algorithms, spliterators are not + * expected to be thread-safe; instead, implementations of parallel algorithms + * using spliterators should ensure that the spliterator is only used by one + * thread at a time. This is generally easy to attain via serial + * thread-confinement, which often is a natural consequence of typical + * parallel algorithms that work by recursive decomposition. A thread calling + * {@link #trySplit()} may hand over the returned Spliterator to another thread, + * which in turn may traverse or further split that Spliterator. The behaviour + * of splitting and traversal is undefined if two or more threads operate + * concurrently on the same spliterator. If the original thread hands a + * spliterator off to another thread for processing, it is best if that handoff + * occurs before any elements are consumed with {@link #tryAdvance(Consumer) + * tryAdvance()}, as certain guarantees (such as the accuracy of + * {@link #estimateSize()} for {@code SIZED} spliterators) are only valid before + * traversal has begun. + * + *

Primitive subtype specializations of {@code Spliterator} are provided for + * {@link OfInt int}, {@link OfLong long}, and {@link OfDouble double} values. + * The subtype default implementations of + * {@link Spliterator#tryAdvance(java.util.function.Consumer)} + * and {@link Spliterator#forEachRemaining(java.util.function.Consumer)} box + * primitive values to instances of their corresponding wrapper class. Such + * boxing may undermine any performance advantages gained by using the primitive + * specializations. To avoid boxing, the corresponding primitive-based methods + * should be used. For example, + * {@link Spliterator.OfInt#tryAdvance(java.util.function.IntConsumer)} + * and {@link Spliterator.OfInt#forEachRemaining(java.util.function.IntConsumer)} + * should be used in preference to + * {@link Spliterator.OfInt#tryAdvance(java.util.function.Consumer)} and + * {@link Spliterator.OfInt#forEachRemaining(java.util.function.Consumer)}. + * Traversal of primitive values using boxing-based methods + * {@link #tryAdvance tryAdvance()} and + * {@link #forEachRemaining(java.util.function.Consumer) forEachRemaining()} + * does not affect the order in which the values, transformed to boxed values, + * are encountered. + * + * @apiNote + *

Spliterators, like {@code Iterators}s, are for traversing the elements of + * a source. The {@code Spliterator} API was designed to support efficient + * parallel traversal in addition to sequential traversal, by supporting + * decomposition as well as single-element iteration. In addition, the + * protocol for accessing elements via a Spliterator is designed to impose + * smaller per-element overhead than {@code Iterator}, and to avoid the inherent + * race involved in having separate methods for {@code hasNext()} and + * {@code next()}. + * + *

For mutable sources, arbitrary and non-deterministic behavior may occur if + * the source is structurally interfered with (elements added, replaced, or + * removed) between the time that the Spliterator binds to its data source and + * the end of traversal. For example, such interference will produce arbitrary, + * non-deterministic results when using the {@code java.util.stream} framework. + * + *

Structural interference of a source can be managed in the following ways + * (in approximate order of decreasing desirability): + *

+ * + *

Example. Here is a class (not a very useful one, except + * for illustration) that maintains an array in which the actual data + * are held in even locations, and unrelated tag data are held in odd + * locations. Its Spliterator ignores the tags. + * + *

 {@code
+ * class TaggedArray {
+ *   private final Object[] elements; // immutable after construction
+ *   TaggedArray(T[] data, Object[] tags) {
+ *     int size = data.length;
+ *     if (tags.length != size) throw new IllegalArgumentException();
+ *     this.elements = new Object[2 * size];
+ *     for (int i = 0, j = 0; i < size; ++i) {
+ *       elements[j++] = data[i];
+ *       elements[j++] = tags[i];
+ *     }
+ *   }
+ *
+ *   public Spliterator spliterator() {
+ *     return new TaggedArraySpliterator<>(elements, 0, elements.length);
+ *   }
+ *
+ *   static class TaggedArraySpliterator implements Spliterator {
+ *     private final Object[] array;
+ *     private int origin; // current index, advanced on split or traversal
+ *     private final int fence; // one past the greatest index
+ *
+ *     TaggedArraySpliterator(Object[] array, int origin, int fence) {
+ *       this.array = array; this.origin = origin; this.fence = fence;
+ *     }
+ *
+ *     public void forEachRemaining(Consumer action) {
+ *       for (; origin < fence; origin += 2)
+ *         action.accept((T) array[origin]);
+ *     }
+ *
+ *     public boolean tryAdvance(Consumer action) {
+ *       if (origin < fence) {
+ *         action.accept((T) array[origin]);
+ *         origin += 2;
+ *         return true;
+ *       }
+ *       else // cannot advance
+ *         return false;
+ *     }
+ *
+ *     public Spliterator trySplit() {
+ *       int lo = origin; // divide range in half
+ *       int mid = ((lo + fence) >>> 1) & ~1; // force midpoint to be even
+ *       if (lo < mid) { // split out left half
+ *         origin = mid; // reset this Spliterator's origin
+ *         return new TaggedArraySpliterator<>(array, lo, mid);
+ *       }
+ *       else       // too small to split
+ *         return null;
+ *     }
+ *
+ *     public long estimateSize() {
+ *       return (long)((fence - origin) / 2);
+ *     }
+ *
+ *     public int characteristics() {
+ *       return ORDERED | SIZED | IMMUTABLE | SUBSIZED;
+ *     }
+ *   }
+ * }}
+ * + *

As an example how a parallel computation framework, such as the + * {@code java.util.stream} package, would use Spliterator in a parallel + * computation, here is one way to implement an associated parallel forEach, + * that illustrates the primary usage idiom of splitting off subtasks until + * the estimated amount of work is small enough to perform + * sequentially. Here we assume that the order of processing across + * subtasks doesn't matter; different (forked) tasks may further split + * and process elements concurrently in undetermined order. This + * example uses a {@link java.util.concurrent.CountedCompleter}; + * similar usages apply to other parallel task constructions. + * + *

{@code
+ * static  void parEach(TaggedArray a, Consumer action) {
+ *   Spliterator s = a.spliterator();
+ *   long targetBatchSize = s.estimateSize() / (ForkJoinPool.getCommonPoolParallelism() * 8);
+ *   new ParEach(null, s, action, targetBatchSize).invoke();
+ * }
+ *
+ * static class ParEach extends CountedCompleter {
+ *   final Spliterator spliterator;
+ *   final Consumer action;
+ *   final long targetBatchSize;
+ *
+ *   ParEach(ParEach parent, Spliterator spliterator,
+ *           Consumer action, long targetBatchSize) {
+ *     super(parent);
+ *     this.spliterator = spliterator; this.action = action;
+ *     this.targetBatchSize = targetBatchSize;
+ *   }
+ *
+ *   public void compute() {
+ *     Spliterator sub;
+ *     while (spliterator.estimateSize() > targetBatchSize &&
+ *            (sub = spliterator.trySplit()) != null) {
+ *       addToPendingCount(1);
+ *       new ParEach<>(this, sub, action, targetBatchSize).fork();
+ *     }
+ *     spliterator.forEachRemaining(action);
+ *     propagateCompletion();
+ *   }
+ * }}
+ * + * @implNote + * If the boolean system property {@code org.openjdk.java.util.stream.tripwire} + * is set to {@code true} then diagnostic warnings are reported if boxing of + * primitive values occur when operating on primitive subtype specializations. + * + * @see Collection + * @since 1.8 + */ +public interface Spliterator { + /** + * If a remaining element exists, performs the given action on it, + * returning {@code true}; else returns {@code false}. If this + * Spliterator is {@link #ORDERED} the action is performed on the + * next element in encounter order. Exceptions thrown by the + * action are relayed to the caller. + * + * @param action The action + * @return {@code false} if no remaining elements existed + * upon entry to this method, else {@code true}. + * @throws NullPointerException if the specified action is null + */ + boolean tryAdvance(Consumer action); + + /** + * Performs the given action for each remaining element, sequentially in + * the current thread, until all elements have been processed or the action + * throws an exception. If this Spliterator is {@link #ORDERED}, actions + * are performed in encounter order. Exceptions thrown by the action + * are relayed to the caller. + * + * @implSpec + * The default implementation repeatedly invokes {@link #tryAdvance} until + * it returns {@code false}. It should be overridden whenever possible. + * + * @param action The action + * @throws NullPointerException if the specified action is null + */ + default void forEachRemaining(Consumer action) { + do { } while (tryAdvance(action)); + } + + /** + * If this spliterator can be partitioned, returns a Spliterator + * covering elements, that will, upon return from this method, not + * be covered by this Spliterator. + * + *

If this Spliterator is {@link #ORDERED}, the returned Spliterator + * must cover a strict prefix of the elements. + * + *

Unless this Spliterator covers an infinite number of elements, + * repeated calls to {@code trySplit()} must eventually return {@code null}. + * Upon non-null return: + *

    + *
  • the value reported for {@code estimateSize()} before splitting, + * if not already zero or {@code Long.MAX_VALUE}, must, after splitting, be + * greater than {@code estimateSize()} for this and the returned + * Spliterator; and
  • + *
  • if this Spliterator is {@code SUBSIZED}, then {@code estimateSize()} + * for this spliterator before splitting must be equal to the sum of + * {@code estimateSize()} for this and the returned Spliterator after + * splitting.
  • + *
+ * + *

This method may return {@code null} for any reason, + * including emptiness, inability to split after traversal has + * commenced, data structure constraints, and efficiency + * considerations. + * + * @apiNote + * An ideal {@code trySplit} method efficiently (without + * traversal) divides its elements exactly in half, allowing + * balanced parallel computation. Many departures from this ideal + * remain highly effective; for example, only approximately + * splitting an approximately balanced tree, or for a tree in + * which leaf nodes may contain either one or two elements, + * failing to further split these nodes. However, large + * deviations in balance and/or overly inefficient {@code + * trySplit} mechanics typically result in poor parallel + * performance. + * + * @return a {@code Spliterator} covering some portion of the + * elements, or {@code null} if this spliterator cannot be split + */ + Spliterator trySplit(); + + /** + * Returns an estimate of the number of elements that would be + * encountered by a {@link #forEachRemaining} traversal, or returns {@link + * Long#MAX_VALUE} if infinite, unknown, or too expensive to compute. + * + *

If this Spliterator is {@link #SIZED} and has not yet been partially + * traversed or split, or this Spliterator is {@link #SUBSIZED} and has + * not yet been partially traversed, this estimate must be an accurate + * count of elements that would be encountered by a complete traversal. + * Otherwise, this estimate may be arbitrarily inaccurate, but must decrease + * as specified across invocations of {@link #trySplit}. + * + * @apiNote + * Even an inexact estimate is often useful and inexpensive to compute. + * For example, a sub-spliterator of an approximately balanced binary tree + * may return a value that estimates the number of elements to be half of + * that of its parent; if the root Spliterator does not maintain an + * accurate count, it could estimate size to be the power of two + * corresponding to its maximum depth. + * + * @return the estimated size, or {@code Long.MAX_VALUE} if infinite, + * unknown, or too expensive to compute. + */ + long estimateSize(); + + /** + * Convenience method that returns {@link #estimateSize()} if this + * Spliterator is {@link #SIZED}, else {@code -1}. + * @implSpec + * The default returns the result of {@code estimateSize()} if the + * Spliterator reports a characteristic of {@code SIZED}, and {@code -1} + * otherwise. + * + * @return the exact size, if known, else {@code -1}. + */ + default long getExactSizeIfKnown() { + return (characteristics() & SIZED) == 0 ? -1L : estimateSize(); + } + + /** + * Returns a set of characteristics of this Spliterator and its + * elements. The result is represented as ORed values from {@link + * #ORDERED}, {@link #DISTINCT}, {@link #SORTED}, {@link #SIZED}, + * {@link #NONNULL}, {@link #IMMUTABLE}, {@link #CONCURRENT}, + * {@link #SUBSIZED}. Repeated calls to {@code characteristics()} on + * a given spliterator should always return the same result. + * + *

If a Spliterator reports an inconsistent set of + * characteristics (either those returned from a single invocation + * or across multiple invocations), no guarantees can be made + * about any computation using this Spliterator. + * + * @return a representation of characteristics + */ + int characteristics(); + + /** + * Returns {@code true} if this Spliterator's {@link + * #characteristics} contain all of the given characteristics. + * + * @implSpec + * The default implementation returns true if the corresponding bits + * of the given characteristics are set. + * + * @return {@code true} if all the specified characteristics are present, + * else {@code false} + */ + default boolean hasCharacteristics(int characteristics) { + return (characteristics() & characteristics) == characteristics; + } + + /** + * If this Spliterator's source is {@link #SORTED} by a {@link Comparator}, + * returns that {@code Comparator}. If the source is {@code SORTED} in + * {@linkplain Comparable natural order, returns {@code null}. Otherwise, + * if the source is not {@code SORTED}, throws {@link IllegalStateException}. + * + * @implSpec + * The default implementation always throws {@link IllegalStateException}. + * + * @return a Comparator, or {@code null} if the elements are sorted in the + * natural order. + * @throws IllegalStateException if the spliterator does not report + * a characteristic of {@code SORTED}. + */ + default Comparator getComparator() { + throw new IllegalStateException(); + } + + /** + * Characteristic value signifying that an encounter order is defined for + * elements. If so, this Spliterator guarantees that method + * {@link #trySplit} splits a strict prefix of elements, that method + * {@link #tryAdvance} steps by one element in prefix order, and that + * {@link #forEachRemaining} performs actions in encounter order. + * + *

A {@link Collection} has an encounter order if the corresponding + * {@link Collection#iterator} documents an order. If so, the encounter + * order is the same as the documented order. Otherwise, a collection does + * not have an encounter order. + * + * @apiNote Encounter order is guaranteed to be ascending index order for + * any {@link List}. But no order is guaranteed for hash-based collections + * such as {@link HashSet}. Clients of a Spliterator that reports + * {@code ORDERED} are expected to preserve ordering constraints in + * non-commutative parallel computations. + */ + public static final int ORDERED = 0x00000010; + + /** + * Characteristic value signifying that, for each pair of + * encountered elements {@code x, y}, {@code !x.equals(y)}. This + * applies for example, to a Spliterator based on a {@link Set}. + */ + public static final int DISTINCT = 0x00000001; + + /** + * Characteristic value signifying that encounter order follows a defined + * sort order. If so, method {@link #getComparator()} returns the associated + * Comparator, or {@code null} if all elements are {@link Comparable} and + * are sorted by their natural ordering. + * + *

A Spliterator that reports {@code SORTED} must also report + * {@code ORDERED}. + * + * @apiNote The spliterators for {@code Collection} classes in the JDK that + * implement {@link NavigableSet} or {@link SortedSet} report {@code SORTED}. + */ + public static final int SORTED = 0x00000004; + + /** + * Characteristic value signifying that the value returned from + * {@code estimateSize()} prior to traversal or splitting represents a + * finite size that, in the absence of structural source modification, + * represents an exact count of the number of elements that would be + * encountered by a complete traversal. + * + * @apiNote Most Spliterators for Collections, that cover all elements of a + * {@code Collection} report this characteristic. Sub-spliterators, such as + * those for {@link HashSet}, that cover a sub-set of elements and + * approximate their reported size do not. + */ + public static final int SIZED = 0x00000040; + + /** + * Characteristic value signifying that the source guarantees that + * encountered elements will not be {@code null}. (This applies, + * for example, to most concurrent collections, queues, and maps.) + */ + public static final int NONNULL = 0x00000100; + + /** + * Characteristic value signifying that the element source cannot be + * structurally modified; that is, elements cannot be added, replaced, or + * removed, so such changes cannot occur during traversal. A Spliterator + * that does not report {@code IMMUTABLE} or {@code CONCURRENT} is expected + * to have a documented policy (for example throwing + * {@link ConcurrentModificationException}) concerning structural + * interference detected during traversal. + */ + public static final int IMMUTABLE = 0x00000400; + + /** + * Characteristic value signifying that the element source may be safely + * concurrently modified (allowing additions, replacements, and/or removals) + * by multiple threads without external synchronization. If so, the + * Spliterator is expected to have a documented policy concerning the impact + * of modifications during traversal. + * + *

A top-level Spliterator should not report {@code CONCURRENT} and + * {@code SIZED}, since the finite size, if known, may change if the source + * is concurrently modified during traversal. Such a Spliterator is + * inconsistent and no guarantees can be made about any computation using + * that Spliterator. Sub-spliterators may report {@code SIZED} if the + * sub-split size is known and additions or removals to the source are not + * reflected when traversing. + * + * @apiNote Most concurrent collections maintain a consistency policy + * guaranteeing accuracy with respect to elements present at the point of + * Spliterator construction, but possibly not reflecting subsequent + * additions or removals. + */ + public static final int CONCURRENT = 0x00001000; + + /** + * Characteristic value signifying that all Spliterators resulting from + * {@code trySplit()} will be both {@link #SIZED} and {@link #SUBSIZED}. + * (This means that all child Spliterators, whether direct or indirect, will + * be {@code SIZED}.) + * + *

A Spliterator that does not report {@code SIZED} as required by + * {@code SUBSIZED} is inconsistent and no guarantees can be made about any + * computation using that Spliterator. + * + * @apiNote Some spliterators, such as the top-level spliterator for an + * approximately balanced binary tree, will report {@code SIZED} but not + * {@code SUBSIZED}, since it is common to know the size of the entire tree + * but not the exact sizes of subtrees. + */ + public static final int SUBSIZED = 0x00004000; + + /** + * A Spliterator specialized for {@code int} values. + * @since 1.8 + */ + public interface OfInt extends Spliterator { + + @Override + OfInt trySplit(); + + /** + * If a remaining element exists, performs the given action on it, + * returning {@code true}; else returns {@code false}. If this + * Spliterator is {@link #ORDERED} the action is performed on the + * next element in encounter order. Exceptions thrown by the + * action are relayed to the caller. + * + * @param action The action + * @return {@code false} if no remaining elements existed + * upon entry to this method, else {@code true}. + * @throws NullPointerException if the specified action is null + */ + boolean tryAdvance(IntConsumer action); + + /** + * Performs the given action for each remaining element, sequentially in + * the current thread, until all elements have been processed or the + * action throws an exception. If this Spliterator is {@link #ORDERED}, + * actions are performed in encounter order. Exceptions thrown by the + * action are relayed to the caller. + * + * @implSpec + * The default implementation repeatedly invokes {@link #tryAdvance} + * until it returns {@code false}. It should be overridden whenever + * possible. + * + * @param action The action + * @throws NullPointerException if the specified action is null + */ + default void forEachRemaining(IntConsumer action) { + do { } while (tryAdvance(action)); + } + + /** + * {@inheritDoc} + * @implSpec + * If the action is an instance of {@code IntConsumer} then it is cast + * to {@code IntConsumer} and passed to + * {@link #tryAdvance(java.util.function.IntConsumer)}; otherwise + * the action is adapted to an instance of {@code IntConsumer}, by + * boxing the argument of {@code IntConsumer}, and then passed to + * {@link #tryAdvance(java.util.function.IntConsumer)}. + */ + @Override + default boolean tryAdvance(Consumer action) { + if (action instanceof IntConsumer) { + return tryAdvance((IntConsumer) action); + } + else { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), + "{0} calling Spliterator.OfInt.tryAdvance((IntConsumer) action::accept)"); + return tryAdvance((IntConsumer) action::accept); + } + } + + /** + * {@inheritDoc} + * @implSpec + * If the action is an instance of {@code IntConsumer} then it is cast + * to {@code IntConsumer} and passed to + * {@link #forEachRemaining(java.util.function.IntConsumer)}; otherwise + * the action is adapted to an instance of {@code IntConsumer}, by + * boxing the argument of {@code IntConsumer}, and then passed to + * {@link #forEachRemaining(java.util.function.IntConsumer)}. + */ + @Override + default void forEachRemaining(Consumer action) { + if (action instanceof IntConsumer) { + forEachRemaining((IntConsumer) action); + } + else { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), + "{0} calling Spliterator.OfInt.forEachRemaining((IntConsumer) action::accept)"); + forEachRemaining((IntConsumer) action::accept); + } + } + } + + /** + * A Spliterator specialized for {@code long} values. + * @since 1.8 + */ + public interface OfLong extends Spliterator { + + @Override + OfLong trySplit(); + + /** + * If a remaining element exists, performs the given action on it, + * returning {@code true}; else returns {@code false}. If this + * Spliterator is {@link #ORDERED} the action is performed on the + * next element in encounter order. Exceptions thrown by the + * action are relayed to the caller. + * + * @param action The action + * @return {@code false} if no remaining elements existed + * upon entry to this method, else {@code true}. + * @throws NullPointerException if the specified action is null + */ + boolean tryAdvance(LongConsumer action); + + /** + * Performs the given action for each remaining element, sequentially in + * the current thread, until all elements have been processed or the + * action throws an exception. If this Spliterator is {@link #ORDERED}, + * actions are performed in encounter order. Exceptions thrown by the + * action are relayed to the caller. + * + * @implSpec + * The default implementation repeatedly invokes {@link #tryAdvance} + * until it returns {@code false}. It should be overridden whenever + * possible. + * + * @param action The action + * @throws NullPointerException if the specified action is null + */ + default void forEachRemaining(LongConsumer action) { + do { } while (tryAdvance(action)); + } + + /** + * {@inheritDoc} + * @implSpec + * If the action is an instance of {@code LongConsumer} then it is cast + * to {@code LongConsumer} and passed to + * {@link #tryAdvance(java.util.function.LongConsumer)}; otherwise + * the action is adapted to an instance of {@code LongConsumer}, by + * boxing the argument of {@code LongConsumer}, and then passed to + * {@link #tryAdvance(java.util.function.LongConsumer)}. + */ + @Override + default boolean tryAdvance(Consumer action) { + if (action instanceof LongConsumer) { + return tryAdvance((LongConsumer) action); + } + else { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), + "{0} calling Spliterator.OfLong.tryAdvance((LongConsumer) action::accept)"); + return tryAdvance((LongConsumer) action::accept); + } + } + + /** + * {@inheritDoc} + * @implSpec + * If the action is an instance of {@code LongConsumer} then it is cast + * to {@code LongConsumer} and passed to + * {@link #forEachRemaining(java.util.function.LongConsumer)}; otherwise + * the action is adapted to an instance of {@code LongConsumer}, by + * boxing the argument of {@code LongConsumer}, and then passed to + * {@link #forEachRemaining(java.util.function.LongConsumer)}. + */ + @Override + default void forEachRemaining(Consumer action) { + if (action instanceof LongConsumer) { + forEachRemaining((LongConsumer) action); + } + else { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), + "{0} calling Spliterator.OfLong.forEachRemaining((LongConsumer) action::accept)"); + forEachRemaining((LongConsumer) action::accept); + } + } + } + + /** + * A Spliterator specialized for {@code double} values. + * @since 1.8 + */ + public interface OfDouble extends Spliterator { + + @Override + OfDouble trySplit(); + + /** + * If a remaining element exists, performs the given action on it, + * returning {@code true}; else returns {@code false}. If this + * Spliterator is {@link #ORDERED} the action is performed on the + * next element in encounter order. Exceptions thrown by the + * action are relayed to the caller. + * + * @param action The action + * @return {@code false} if no remaining elements existed + * upon entry to this method, else {@code true}. + * @throws NullPointerException if the specified action is null + */ + boolean tryAdvance(DoubleConsumer action); + + /** + * Performs the given action for each remaining element, sequentially in + * the current thread, until all elements have been processed or the + * action throws an exception. If this Spliterator is {@link #ORDERED}, + * actions are performed in encounter order. Exceptions thrown by the + * action are relayed to the caller. + * + * @implSpec + * The default implementation repeatedly invokes {@link #tryAdvance} + * until it returns {@code false}. It should be overridden whenever + * possible. + * + * @param action The action + * @throws NullPointerException if the specified action is null + */ + default void forEachRemaining(DoubleConsumer action) { + do { } while (tryAdvance(action)); + } + + /** + * {@inheritDoc} + * @implSpec + * If the action is an instance of {@code DoubleConsumer} then it is + * cast to {@code DoubleConsumer} and passed to + * {@link #tryAdvance(java.util.function.DoubleConsumer)}; otherwise + * the action is adapted to an instance of {@code DoubleConsumer}, by + * boxing the argument of {@code DoubleConsumer}, and then passed to + * {@link #tryAdvance(java.util.function.DoubleConsumer)}. + */ + @Override + default boolean tryAdvance(Consumer action) { + if (action instanceof DoubleConsumer) { + return tryAdvance((DoubleConsumer) action); + } + else { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), + "{0} calling Spliterator.OfDouble.tryAdvance((DoubleConsumer) action::accept)"); + return tryAdvance((DoubleConsumer) action::accept); + } + } + + /** + * {@inheritDoc} + * @implSpec + * If the action is an instance of {@code DoubleConsumer} then it is + * cast to {@code DoubleConsumer} and passed to + * {@link #forEachRemaining(java.util.function.DoubleConsumer)}; + * otherwise the action is adapted to an instance of + * {@code DoubleConsumer}, by boxing the argument of + * {@code DoubleConsumer}, and then passed to + * {@link #forEachRemaining(java.util.function.DoubleConsumer)}. + */ + @Override + default void forEachRemaining(Consumer action) { + if (action instanceof DoubleConsumer) { + forEachRemaining((DoubleConsumer) action); + } + else { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), + "{0} calling Spliterator.OfDouble.forEachRemaining((DoubleConsumer) action::accept)"); + forEachRemaining((DoubleConsumer) action::accept); + } + } + } +} diff --git a/src/share/classes/java/util/Spliterators.java b/src/share/classes/java/util/Spliterators.java new file mode 100644 --- /dev/null +++ b/src/share/classes/java/util/Spliterators.java @@ -0,0 +1,2154 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util; + +import java.util.function.Consumer; +import java.util.function.DoubleConsumer; +import java.util.function.IntConsumer; +import java.util.function.LongConsumer; + +/** + * Static classes and methods for operating on or creating instances of + * {@link Spliterator} and its primitive specializations + * {@link Spliterator.OfInt}, {@link Spliterator.OfLong}, and + * {@link Spliterator.OfDouble}. + * + * @see Spliterator + * @since 1.8 + */ +public final class Spliterators { + + // Suppresses default constructor, ensuring non-instantiability. + private Spliterators() {} + + // Empty spliterators + + /** + * Creates an empty {@code Spliterator} + * + *

The empty spliterator reports {@link Spliterator#SIZED} and + * {@link Spliterator#SUBSIZED}. Calls to + * {@link java.util.Spliterator#trySplit()} always return {@code null}. + * + * @param Type of elements + * @return An empty spliterator + */ + @SuppressWarnings("unchecked") + public static Spliterator emptySpliterator() { + return (Spliterator) EMPTY_SPLITERATOR; + } + + private static final Spliterator EMPTY_SPLITERATOR = + new EmptySpliterator.OfRef<>(); + + /** + * Creates an empty {@code Spliterator.OfInt} + * + *

The empty spliterator reports {@link Spliterator#SIZED} and + * {@link Spliterator#SUBSIZED}. Calls to + * {@link java.util.Spliterator#trySplit()} always return {@code null}. + * + * @return An empty spliterator + */ + public static Spliterator.OfInt emptyIntSpliterator() { + return EMPTY_INT_SPLITERATOR; + } + + private static final Spliterator.OfInt EMPTY_INT_SPLITERATOR = + new EmptySpliterator.OfInt(); + + /** + * Creates an empty {@code Spliterator.OfLong} + * + *

The empty spliterator reports {@link Spliterator#SIZED} and + * {@link Spliterator#SUBSIZED}. Calls to + * {@link java.util.Spliterator#trySplit()} always return {@code null}. + * + * @return An empty spliterator + */ + public static Spliterator.OfLong emptyLongSpliterator() { + return EMPTY_LONG_SPLITERATOR; + } + + private static final Spliterator.OfLong EMPTY_LONG_SPLITERATOR = + new EmptySpliterator.OfLong(); + + /** + * Creates an empty {@code Spliterator.OfDouble} + * + *

The empty spliterator reports {@link Spliterator#SIZED} and + * {@link Spliterator#SUBSIZED}. Calls to + * {@link java.util.Spliterator#trySplit()} always return {@code null}. + * + * @return An empty spliterator + */ + public static Spliterator.OfDouble emptyDoubleSpliterator() { + return EMPTY_DOUBLE_SPLITERATOR; + } + + private static final Spliterator.OfDouble EMPTY_DOUBLE_SPLITERATOR = + new EmptySpliterator.OfDouble(); + + // Array-based spliterators + + /** + * Creates a {@code Spliterator} covering the elements of a given array, + * using a customized set of spliterator characteristics. + * + *

This method is provided as an implementation convenience for + * Spliterators which store portions of their elements in arrays, and need + * fine control over Spliterator characteristics. Most other situations in + * which a Spliterator for an array is needed should use + * {@link Arrays#spliterator(Object[])}. + * + *

The returned spliterator always reports the characteristics + * {@code SIZED} and {@code SUBSIZED}. The caller may provide additional + * characteristics for the spliterator to report; it is common to + * additionally specify {@code IMMUTABLE} and {@code ORDERED}. + * + * @param Type of elements + * @param array The array, assumed to be unmodified during use + * @param additionalCharacteristics Additional spliterator characteristics + * of this spliterator's source or elements beyond {@code SIZED} and + * {@code SUBSIZED} which are are always reported + * @return A spliterator for an array + * @throws NullPointerException if the given array is {@code null} + * @see Arrays#spliterator(Object[]) + */ + public static Spliterator spliterator(Object[] array, + int additionalCharacteristics) { + return new ArraySpliterator<>(Objects.requireNonNull(array), + additionalCharacteristics); + } + + /** + * Creates a {@code Spliterator} covering a range of elements of a given + * array, using a customized set of spliterator characteristics. + * + *

This method is provided as an implementation convenience for + * Spliterators which store portions of their elements in arrays, and need + * fine control over Spliterator characteristics. Most other situations in + * which a Spliterator for an array is needed should use + * {@link Arrays#spliterator(Object[])}. + * + *

The returned spliterator always reports the characteristics + * {@code SIZED} and {@code SUBSIZED}. The caller may provide additional + * characteristics for the spliterator to report; it is common to + * additionally specify {@code IMMUTABLE} and {@code ORDERED}. + * + * @param Type of elements + * @param array The array, assumed to be unmodified during use + * @param fromIndex The least index (inclusive) to cover + * @param toIndex One past the greatest index to cover + * @param additionalCharacteristics Additional spliterator characteristics + * of this spliterator's source or elements beyond {@code SIZED} and + * {@code SUBSIZED} which are are always reported + * @return A spliterator for an array + * @throws NullPointerException if the given array is {@code null} + * @throws ArrayIndexOutOfBoundsException if {@code fromIndex} is negative, + * {@code toIndex} is less than {@code fromIndex}, or + * {@code toIndex} is greater than the array size + * @see Arrays#spliterator(Object[], int, int) + */ + public static Spliterator spliterator(Object[] array, int fromIndex, int toIndex, + int additionalCharacteristics) { + checkFromToBounds(Objects.requireNonNull(array).length, fromIndex, toIndex); + return new ArraySpliterator<>(array, fromIndex, toIndex, additionalCharacteristics); + } + + /** + * Creates a {@code Spliterator.OfInt} covering the elements of a given array, + * using a customized set of spliterator characteristics. + * + *

This method is provided as an implementation convenience for + * Spliterators which store portions of their elements in arrays, and need + * fine control over Spliterator characteristics. Most other situations in + * which a Spliterator for an array is needed should use + * {@link Arrays#spliterator(int[])}. + * + *

The returned spliterator always reports the characteristics + * {@code SIZED} and {@code SUBSIZED}. The caller may provide additional + * characteristics for the spliterator to report; it is common to + * additionally specify {@code IMMUTABLE} and {@code ORDERED}. + * + * @param array The array, assumed to be unmodified during use + * @param additionalCharacteristics Additional spliterator characteristics + * of this spliterator's source or elements beyond {@code SIZED} and + * {@code SUBSIZED} which are are always reported + * @return A spliterator for an array + * @throws NullPointerException if the given array is {@code null} + * @see Arrays#spliterator(int[]) + */ + public static Spliterator.OfInt spliterator(int[] array, + int additionalCharacteristics) { + return new IntArraySpliterator(Objects.requireNonNull(array), additionalCharacteristics); + } + + /** + * Creates a {@code Spliterator.OfInt} covering a range of elements of a + * given array, using a customized set of spliterator characteristics. + * + *

This method is provided as an implementation convenience for + * Spliterators which store portions of their elements in arrays, and need + * fine control over Spliterator characteristics. Most other situations in + * which a Spliterator for an array is needed should use + * {@link Arrays#spliterator(int[], int, int)}. + * + *

The returned spliterator always reports the characteristics + * {@code SIZED} and {@code SUBSIZED}. The caller may provide additional + * characteristics for the spliterator to report; it is common to + * additionally specify {@code IMMUTABLE} and {@code ORDERED}. + * + * @param array The array, assumed to be unmodified during use + * @param fromIndex The least index (inclusive) to cover + * @param toIndex One past the greatest index to cover + * @param additionalCharacteristics Additional spliterator characteristics + * of this spliterator's source or elements beyond {@code SIZED} and + * {@code SUBSIZED} which are are always reported + * @return A spliterator for an array + * @throws NullPointerException if the given array is {@code null} + * @throws ArrayIndexOutOfBoundsException if {@code fromIndex} is negative, + * {@code toIndex} is less than {@code fromIndex}, or + * {@code toIndex} is greater than the array size + * @see Arrays#spliterator(int[], int, int) + */ + public static Spliterator.OfInt spliterator(int[] array, int fromIndex, int toIndex, + int additionalCharacteristics) { + checkFromToBounds(Objects.requireNonNull(array).length, fromIndex, toIndex); + return new IntArraySpliterator(array, fromIndex, toIndex, additionalCharacteristics); + } + + /** + * Creates a {@code Spliterator.OfLong} covering the elements of a given array, + * using a customized set of spliterator characteristics. + * + *

This method is provided as an implementation convenience for + * Spliterators which store portions of their elements in arrays, and need + * fine control over Spliterator characteristics. Most other situations in + * which a Spliterator for an array is needed should use + * {@link Arrays#spliterator(long[])}. + * + *

The returned spliterator always reports the characteristics + * {@code SIZED} and {@code SUBSIZED}. The caller may provide additional + * characteristics for the spliterator to report; it is common to + * additionally specify {@code IMMUTABLE} and {@code ORDERED}. + * + * @param array The array, assumed to be unmodified during use + * @param additionalCharacteristics Additional spliterator characteristics + * of this spliterator's source or elements beyond {@code SIZED} and + * {@code SUBSIZED} which are are always reported + * @return A spliterator for an array + * @throws NullPointerException if the given array is {@code null} + * @see Arrays#spliterator(long[]) + */ + public static Spliterator.OfLong spliterator(long[] array, + int additionalCharacteristics) { + return new LongArraySpliterator(Objects.requireNonNull(array), additionalCharacteristics); + } + + /** + * Creates a {@code Spliterator.OfLong} covering a range of elements of a + * given array, using a customized set of spliterator characteristics. + * + *

This method is provided as an implementation convenience for + * Spliterators which store portions of their elements in arrays, and need + * fine control over Spliterator characteristics. Most other situations in + * which a Spliterator for an array is needed should use + * {@link Arrays#spliterator(long[], int, int)}. + * + *

The returned spliterator always reports the characteristics + * {@code SIZED} and {@code SUBSIZED}. The caller may provide additional + * characteristics for the spliterator to report. (For example, if it is + * known the array will not be further modified, specify {@code IMMUTABLE}; + * if the array data is considered to have an an encounter order, specify + * {@code ORDERED}). The method {@link Arrays#spliterator(long[], int, int)} can + * often be used instead, which returns a spliterator that reports + * {@code SIZED}, {@code SUBSIZED}, {@code IMMUTABLE}, and {@code ORDERED}. + * + * @param array The array, assumed to be unmodified during use + * @param fromIndex The least index (inclusive) to cover + * @param toIndex One past the greatest index to cover + * @param additionalCharacteristics Additional spliterator characteristics + * of this spliterator's source or elements beyond {@code SIZED} and + * {@code SUBSIZED} which are are always reported + * @return A spliterator for an array + * @throws NullPointerException if the given array is {@code null} + * @throws ArrayIndexOutOfBoundsException if {@code fromIndex} is negative, + * {@code toIndex} is less than {@code fromIndex}, or + * {@code toIndex} is greater than the array size + * @see Arrays#spliterator(long[], int, int) + */ + public static Spliterator.OfLong spliterator(long[] array, int fromIndex, int toIndex, + int additionalCharacteristics) { + checkFromToBounds(Objects.requireNonNull(array).length, fromIndex, toIndex); + return new LongArraySpliterator(array, fromIndex, toIndex, additionalCharacteristics); + } + + /** + * Creates a {@code Spliterator.OfDouble} covering the elements of a given array, + * using a customized set of spliterator characteristics. + * + *

This method is provided as an implementation convenience for + * Spliterators which store portions of their elements in arrays, and need + * fine control over Spliterator characteristics. Most other situations in + * which a Spliterator for an array is needed should use + * {@link Arrays#spliterator(double[])}. + * + *

The returned spliterator always reports the characteristics + * {@code SIZED} and {@code SUBSIZED}. The caller may provide additional + * characteristics for the spliterator to report; it is common to + * additionally specify {@code IMMUTABLE} and {@code ORDERED}. + * + * @param array The array, assumed to be unmodified during use + * @param additionalCharacteristics Additional spliterator characteristics + * of this spliterator's source or elements beyond {@code SIZED} and + * {@code SUBSIZED} which are are always reported + * @return A spliterator for an array + * @throws NullPointerException if the given array is {@code null} + * @see Arrays#spliterator(double[]) + */ + public static Spliterator.OfDouble spliterator(double[] array, + int additionalCharacteristics) { + return new DoubleArraySpliterator(Objects.requireNonNull(array), additionalCharacteristics); + } + + /** + * Creates a {@code Spliterator.OfDouble} covering a range of elements of a + * given array, using a customized set of spliterator characteristics. + * + *

This method is provided as an implementation convenience for + * Spliterators which store portions of their elements in arrays, and need + * fine control over Spliterator characteristics. Most other situations in + * which a Spliterator for an array is needed should use + * {@link Arrays#spliterator(double[], int, int)}. + * + *

The returned spliterator always reports the characteristics + * {@code SIZED} and {@code SUBSIZED}. The caller may provide additional + * characteristics for the spliterator to report. (For example, if it is + * known the array will not be further modified, specify {@code IMMUTABLE}; + * if the array data is considered to have an an encounter order, specify + * {@code ORDERED}). The method {@link Arrays#spliterator(long[], int, int)} can + * often be used instead, which returns a spliterator that reports + * {@code SIZED}, {@code SUBSIZED}, {@code IMMUTABLE}, and {@code ORDERED}. + * + * @param array The array, assumed to be unmodified during use + * @param fromIndex The least index (inclusive) to cover + * @param toIndex One past the greatest index to cover + * @param additionalCharacteristics Additional spliterator characteristics + * of this spliterator's source or elements beyond {@code SIZED} and + * {@code SUBSIZED} which are are always reported + * @return A spliterator for an array + * @throws NullPointerException if the given array is {@code null} + * @throws ArrayIndexOutOfBoundsException if {@code fromIndex} is negative, + * {@code toIndex} is less than {@code fromIndex}, or + * {@code toIndex} is greater than the array size + * @see Arrays#spliterator(double[], int, int) + */ + public static Spliterator.OfDouble spliterator(double[] array, int fromIndex, int toIndex, + int additionalCharacteristics) { + checkFromToBounds(Objects.requireNonNull(array).length, fromIndex, toIndex); + return new DoubleArraySpliterator(array, fromIndex, toIndex, additionalCharacteristics); + } + + /** + * Validate inclusive start index and exclusive end index against the length + * of an array. + * @param arrayLength The length of the array + * @param origin The inclusive start index + * @param fence The exclusive end index + * @throws ArrayIndexOutOfBoundsException if the start index is greater than + * the end index, if the start index is negative, or the end index is + * greater than the array length + */ + private static void checkFromToBounds(int arrayLength, int origin, int fence) { + if (origin > fence) { + throw new IllegalArgumentException( + "origin(" + origin + ") > fence(" + fence + ")"); + } + if (origin < 0) { + throw new ArrayIndexOutOfBoundsException(origin); + } + if (fence > arrayLength) { + throw new ArrayIndexOutOfBoundsException(fence); + } + } + + // Iterator-based spliterators + + /** + * Creates a {@code Spliterator} using the given collection's + * {@link java.util.Collection#iterator()} as the source of elements, and + * reporting its {@link java.util.Collection#size()} as its initial size. + * + *

The spliterator is + * late-binding, inherits + * the fail-fast properties of the collection's iterator, and + * implements {@code trySplit} to permit limited parallelism. + * + * @param Type of elements + * @param c The collection + * @param additionalCharacteristics Additional spliterator characteristics + * of this spliterator's source or elements beyond {@code SIZED} and + * {@code SUBSIZED} which are are always reported + * @return A spliterator from an iterator + * @throws NullPointerException if the given collection is {@code null} + */ + public static Spliterator spliterator(Collection c, + int additionalCharacteristics) { + return new IteratorSpliterator<>(Objects.requireNonNull(c), + additionalCharacteristics); + } + + /** + * Creates a {@code Spliterator} using a given {@code Iterator} + * as the source of elements, and with a given initially reported size. + * + *

The spliterator is not + * late-binding, inherits + * the fail-fast properties of the iterator, and implements + * {@code trySplit} to permit limited parallelism. + * + *

Traversal of elements should be accomplished through the spliterator. + * The behaviour of splitting and traversal is undefined if the iterator is + * operated on after the spliterator is returned, or the initially reported + * size is not equal to the actual number of elements in the source. + * + * @param Type of elements + * @param iterator The iterator for the source + * @param size The number of elements in the source, to be reported as + * initial {@code estimateSize} + * @param additionalCharacteristics Additional spliterator characteristics + * of this spliterator's source or elements beyond {@code SIZED} and + * {@code SUBSIZED} which are are always reported + * @return A spliterator from an iterator + * @throws NullPointerException if the given iterator is {@code null} + */ + public static Spliterator spliterator(Iterator iterator, + long size, + int additionalCharacteristics) { + return new IteratorSpliterator<>(Objects.requireNonNull(iterator), size, + additionalCharacteristics); + } + + /** + * Creates a {@code Spliterator} using a given {@code Iterator} + * as the source of elements, with no initial size estimate. + * + *

The spliterator is not + * late-binding, inherits + * the fail-fast properties of the iterator, and implements + * {@code trySplit} to permit limited parallelism. + * + *

Traversal of elements should be accomplished through the spliterator. + * The behaviour of splitting and traversal is undefined if the iterator is + * operated on after the spliterator is returned. + * + * @param Type of elements + * @param iterator The iterator for the source + * @param characteristics Properties of this spliterator's source + * or elements ({@code SIZED} and {@code SUBSIZED}, if supplied, are + * ignored and are not reported.) + * @return A spliterator from an iterator + * @throws NullPointerException if the given iterator is {@code null} + */ + public static Spliterator spliteratorUnknownSize(Iterator iterator, + int characteristics) { + return new IteratorSpliterator<>(Objects.requireNonNull(iterator), characteristics); + } + + /** + * Creates a {@code Spliterator.OfInt} using a given + * {@code IntStream.IntIterator} as the source of elements, and with a given + * initially reported size. + * + *

The spliterator is not + * late-binding, inherits + * the fail-fast properties of the iterator, and implements + * {@code trySplit} to permit limited parallelism. + * + *

Traversal of elements should be accomplished through the spliterator. + * The behaviour of splitting and traversal is undefined if the iterator is + * operated on after the spliterator is returned, or the initially reported + * size is not equal to the actual number of elements in the source. + * + * @param iterator The iterator for the source + * @param size The number of elements in the source, to be reported as + * initial {@code estimateSize}. + * @param additionalCharacteristics Additional spliterator characteristics + * of this spliterator's source or elements beyond {@code SIZED} and + * {@code SUBSIZED} which are are always reported + * @return A spliterator from an iterator + * @throws NullPointerException if the given iterator is {@code null} + */ + public static Spliterator.OfInt spliterator(PrimitiveIterator.OfInt iterator, + long size, + int additionalCharacteristics) { + return new IntIteratorSpliterator(Objects.requireNonNull(iterator), + size, additionalCharacteristics); + } + + /** + * Creates a {@code Spliterator.OfInt} using a given + * {@code IntStream.IntIterator} as the source of elements, with no initial + * size estimate. + * + *

The spliterator is not + * late-binding, inherits + * the fail-fast properties of the iterator, and implements + * {@code trySplit} to permit limited parallelism. + * + *

Traversal of elements should be accomplished through the spliterator. + * The behaviour of splitting and traversal is undefined if the iterator is + * operated on after the spliterator is returned. + * + * @param iterator The iterator for the source + * @param characteristics Properties of this spliterator's source + * or elements ({@code SIZED} and {@code SUBSIZED}, if supplied, are + * ignored and are not reported.) + * @return A spliterator from an iterator + * @throws NullPointerException if the given iterator is {@code null} + */ + public static Spliterator.OfInt spliteratorUnknownSize(PrimitiveIterator.OfInt iterator, + int characteristics) { + return new IntIteratorSpliterator(Objects.requireNonNull(iterator), characteristics); + } + + /** + * Creates a {@code Spliterator.OfLong} using a given + * {@code LongStream.LongIterator} as the source of elements, and with a + * given initially reported size. + * + *

The spliterator is not + * late-binding, inherits + * the fail-fast properties of the iterator, and implements + * {@code trySplit} to permit limited parallelism. + * + *

Traversal of elements should be accomplished through the spliterator. + * The behaviour of splitting and traversal is undefined if the iterator is + * operated on after the spliterator is returned, or the initially reported + * size is not equal to the actual number of elements in the source. + * + * @param iterator The iterator for the source + * @param size The number of elements in the source, to be reported as + * initial {@code estimateSize}. + * @param additionalCharacteristics Additional spliterator characteristics + * of this spliterator's source or elements beyond {@code SIZED} and + * {@code SUBSIZED} which are are always reported + * @return A spliterator from an iterator + * @throws NullPointerException if the given iterator is {@code null} + */ + public static Spliterator.OfLong spliterator(PrimitiveIterator.OfLong iterator, + long size, + int additionalCharacteristics) { + return new LongIteratorSpliterator(Objects.requireNonNull(iterator), + size, additionalCharacteristics); + } + + /** + * Creates a {@code Spliterator.OfLong} using a given + * {@code LongStream.LongIterator} as the source of elements, with no + * initial size estimate. + * + *

The spliterator is not + * late-binding, inherits + * the fail-fast properties of the iterator, and implements + * {@code trySplit} to permit limited parallelism. + * + *

Traversal of elements should be accomplished through the spliterator. + * The behaviour of splitting and traversal is undefined if the iterator is + * operated on after the spliterator is returned. + * + * @param iterator The iterator for the source + * @param characteristics Properties of this spliterator's source + * or elements ({@code SIZED} and {@code SUBSIZED}, if supplied, are + * ignored and are not reported.) + * @return A spliterator from an iterator + * @throws NullPointerException if the given iterator is {@code null} + */ + public static Spliterator.OfLong spliteratorUnknownSize(PrimitiveIterator.OfLong iterator, + int characteristics) { + return new LongIteratorSpliterator(Objects.requireNonNull(iterator), characteristics); + } + + /** + * Creates a {@code Spliterator.OfDouble} using a given + * {@code DoubleStream.DoubleIterator} as the source of elements, and with a + * given initially reported size. + * + *

The spliterator is not + * late-binding, inherits + * the fail-fast properties of the iterator, and implements + * {@code trySplit} to permit limited parallelism. + * + *

Traversal of elements should be accomplished through the spliterator. + * The behaviour of splitting and traversal is undefined if the iterator is + * operated on after the spliterator is returned, or the initially reported + * size is not equal to the actual number of elements in the source. + * + * @param iterator The iterator for the source + * @param size The number of elements in the source, to be reported as + * initial {@code estimateSize} + * @param additionalCharacteristics Additional spliterator characteristics + * of this spliterator's source or elements beyond {@code SIZED} and + * {@code SUBSIZED} which are are always reported + * @return A spliterator from an iterator + * @throws NullPointerException if the given iterator is {@code null} + */ + public static Spliterator.OfDouble spliterator(PrimitiveIterator.OfDouble iterator, + long size, + int additionalCharacteristics) { + return new DoubleIteratorSpliterator(Objects.requireNonNull(iterator), + size, additionalCharacteristics); + } + + /** + * Creates a {@code Spliterator.OfDouble} using a given + * {@code DoubleStream.DoubleIterator} as the source of elements, with no + * initial size estimate. + * + *

The spliterator is not + * late-binding, inherits + * the fail-fast properties of the iterator, and implements + * {@code trySplit} to permit limited parallelism. + * + *

Traversal of elements should be accomplished through the spliterator. + * The behaviour of splitting and traversal is undefined if the iterator is + * operated on after the spliterator is returned. + * + * @param iterator The iterator for the source + * @param characteristics Properties of this spliterator's source + * or elements ({@code SIZED} and {@code SUBSIZED}, if supplied, are + * ignored and are not reported.) + * @return A spliterator from an iterator + * @throws NullPointerException if the given iterator is {@code null} + */ + public static Spliterator.OfDouble spliteratorUnknownSize(PrimitiveIterator.OfDouble iterator, + int characteristics) { + return new DoubleIteratorSpliterator(Objects.requireNonNull(iterator), characteristics); + } + + // Iterators from Spliterators + + /** + * Creates an {@code Iterator} from a {@code Spliterator}. + * + *

Traversal of elements should be accomplished through the iterator. + * The behaviour of traversal is undefined if the spliterator is operated + * after the iterator is returned. + * + * @param Type of elements + * @param spliterator The spliterator + * @return An iterator + * @throws NullPointerException if the given spliterator is {@code null} + */ + public static Iterator iteratorFromSpliterator(Spliterator spliterator) { + Objects.requireNonNull(spliterator); + class Adapter implements Iterator, Consumer { + boolean valueReady = false; + T nextElement; + + @Override + public void accept(T t) { + valueReady = true; + nextElement = t; + } + + @Override + public boolean hasNext() { + if (!valueReady) + spliterator.tryAdvance(this); + return valueReady; + } + + @Override + public T next() { + if (!valueReady && !hasNext()) + throw new NoSuchElementException(); + else { + valueReady = false; + return nextElement; + } + } + } + + return new Adapter(); + } + + /** + * Creates an {@code PrimitiveIterator.OfInt} from a + * {@code Spliterator.OfInt}. + * + *

Traversal of elements should be accomplished through the iterator. + * The behaviour of traversal is undefined if the spliterator is operated + * after the iterator is returned. + * + * @param spliterator The spliterator + * @return An iterator + * @throws NullPointerException if the given spliterator is {@code null} + */ + public static PrimitiveIterator.OfInt iteratorFromSpliterator(Spliterator.OfInt spliterator) { + Objects.requireNonNull(spliterator); + class Adapter implements PrimitiveIterator.OfInt, IntConsumer { + boolean valueReady = false; + int nextElement; + + @Override + public void accept(int t) { + valueReady = true; + nextElement = t; + } + + @Override + public boolean hasNext() { + if (!valueReady) + spliterator.tryAdvance(this); + return valueReady; + } + + @Override + public int nextInt() { + if (!valueReady && !hasNext()) + throw new NoSuchElementException(); + else { + valueReady = false; + return nextElement; + } + } + } + + return new Adapter(); + } + + /** + * Creates an {@code PrimitiveIterator.OfLong} from a + * {@code Spliterator.OfLong}. + * + *

Traversal of elements should be accomplished through the iterator. + * The behaviour of traversal is undefined if the spliterator is operated + * after the iterator is returned. + * + * @param spliterator The spliterator + * @return An iterator + * @throws NullPointerException if the given spliterator is {@code null} + */ + public static PrimitiveIterator.OfLong iteratorFromSpliterator(Spliterator.OfLong spliterator) { + Objects.requireNonNull(spliterator); + class Adapter implements PrimitiveIterator.OfLong, LongConsumer { + boolean valueReady = false; + long nextElement; + + @Override + public void accept(long t) { + valueReady = true; + nextElement = t; + } + + @Override + public boolean hasNext() { + if (!valueReady) + spliterator.tryAdvance(this); + return valueReady; + } + + @Override + public long nextLong() { + if (!valueReady && !hasNext()) + throw new NoSuchElementException(); + else { + valueReady = false; + return nextElement; + } + } + } + + return new Adapter(); + } + + /** + * Creates an {@code PrimitiveIterator.OfDouble} from a + * {@code Spliterator.OfDouble}. + * + *

Traversal of elements should be accomplished through the iterator. + * The behaviour of traversal is undefined if the spliterator is operated + * after the iterator is returned. + * + * @param spliterator The spliterator + * @return An iterator + * @throws NullPointerException if the given spliterator is {@code null} + */ + public static PrimitiveIterator.OfDouble iteratorFromSpliterator(Spliterator.OfDouble spliterator) { + Objects.requireNonNull(spliterator); + class Adapter implements PrimitiveIterator.OfDouble, DoubleConsumer { + boolean valueReady = false; + double nextElement; + + @Override + public void accept(double t) { + valueReady = true; + nextElement = t; + } + + @Override + public boolean hasNext() { + if (!valueReady) + spliterator.tryAdvance(this); + return valueReady; + } + + @Override + public double nextDouble() { + if (!valueReady && !hasNext()) + throw new NoSuchElementException(); + else { + valueReady = false; + return nextElement; + } + } + } + + return new Adapter(); + } + + // Implementations + + private static abstract class EmptySpliterator, C> { + + EmptySpliterator() { } + + public S trySplit() { + return null; + } + + public boolean tryAdvance(C consumer) { + Objects.requireNonNull(consumer); + return false; + } + + public void forEachRemaining(C consumer) { + Objects.requireNonNull(consumer); + } + + public long estimateSize() { + return 0; + } + + public int characteristics() { + return Spliterator.SIZED | Spliterator.SUBSIZED; + } + + private static final class OfRef + extends EmptySpliterator, Consumer> + implements Spliterator { + OfRef() { } + } + + private static final class OfInt + extends EmptySpliterator + implements Spliterator.OfInt { + OfInt() { } + } + + private static final class OfLong + extends EmptySpliterator + implements Spliterator.OfLong { + OfLong() { } + } + + private static final class OfDouble + extends EmptySpliterator + implements Spliterator.OfDouble { + OfDouble() { } + } + } + + // Array-based spliterators + + /** + * A Spliterator designed for use by sources that traverse and split + * elements maintained in an unmodifiable {@code Object[]} array. + */ + static final class ArraySpliterator implements Spliterator { + /** + * The array, explicitly typed as Object[]. Unlike in some other + * classes (see for example CR 6260652), we do not need to + * screen arguments to ensure they are exactly of type Object[] + * so long as no methods write into the array or serialize it, + * which we ensure here by defining this class as final. + */ + private final Object[] array; + private int index; // current index, modified on advance/split + private final int fence; // one past last index + private final int characteristics; + + /** + * Creates a spliterator covering all of the given array. + * @param array the array, assumed to be unmodified during use + * @param additionalCharacteristics Additional spliterator characteristics + * of this spliterator's source or elements beyond {@code SIZED} and + * {@code SUBSIZED} which are are always reported + */ + public ArraySpliterator(Object[] array, int additionalCharacteristics) { + this(array, 0, array.length, additionalCharacteristics); + } + + /** + * Creates a spliterator covering the given array and range + * @param array the array, assumed to be unmodified during use + * @param origin the least index (inclusive) to cover + * @param fence one past the greatest index to cover + * @param additionalCharacteristics Additional spliterator characteristics + * of this spliterator's source or elements beyond {@code SIZED} and + * {@code SUBSIZED} which are are always reported + */ + public ArraySpliterator(Object[] array, int origin, int fence, int additionalCharacteristics) { + this.array = array; + this.index = origin; + this.fence = fence; + this.characteristics = additionalCharacteristics | Spliterator.SIZED | Spliterator.SUBSIZED; + } + + @Override + public Spliterator trySplit() { + int lo = index, mid = (lo + fence) >>> 1; + return (lo >= mid) + ? null + : new ArraySpliterator<>(array, lo, index = mid, characteristics); + } + + @SuppressWarnings("unchecked") + @Override + public void forEachRemaining(Consumer action) { + Object[] a; int i, hi; // hoist accesses and checks from loop + if (action == null) + throw new NullPointerException(); + if ((a = array).length >= (hi = fence) && + (i = index) >= 0 && i < (index = hi)) { + do { action.accept((T)a[i]); } while (++i < hi); + } + } + + @Override + public boolean tryAdvance(Consumer action) { + if (action == null) + throw new NullPointerException(); + if (index >= 0 && index < fence) { + @SuppressWarnings("unchecked") T e = (T) array[index++]; + action.accept(e); + return true; + } + return false; + } + + @Override + public long estimateSize() { return (long)(fence - index); } + + @Override + public int characteristics() { + return characteristics; + } + + @Override + public Comparator getComparator() { + if (hasCharacteristics(Spliterator.SORTED)) + return null; + throw new IllegalStateException(); + } + } + + /** + * A Spliterator.OfInt designed for use by sources that traverse and split + * elements maintained in an unmodifiable {@code int[]} array. + */ + static final class IntArraySpliterator implements Spliterator.OfInt { + private final int[] array; + private int index; // current index, modified on advance/split + private final int fence; // one past last index + private final int characteristics; + + /** + * Creates a spliterator covering all of the given array. + * @param array the array, assumed to be unmodified during use + * @param additionalCharacteristics Additional spliterator characteristics + * of this spliterator's source or elements beyond {@code SIZED} and + * {@code SUBSIZED} which are are always reported + */ + public IntArraySpliterator(int[] array, int additionalCharacteristics) { + this(array, 0, array.length, additionalCharacteristics); + } + + /** + * Creates a spliterator covering the given array and range + * @param array the array, assumed to be unmodified during use + * @param origin the least index (inclusive) to cover + * @param fence one past the greatest index to cover + * @param additionalCharacteristics Additional spliterator characteristics + * of this spliterator's source or elements beyond {@code SIZED} and + * {@code SUBSIZED} which are are always reported + */ + public IntArraySpliterator(int[] array, int origin, int fence, int additionalCharacteristics) { + this.array = array; + this.index = origin; + this.fence = fence; + this.characteristics = additionalCharacteristics | Spliterator.SIZED | Spliterator.SUBSIZED; + } + + @Override + public OfInt trySplit() { + int lo = index, mid = (lo + fence) >>> 1; + return (lo >= mid) + ? null + : new IntArraySpliterator(array, lo, index = mid, characteristics); + } + + @Override + public void forEachRemaining(IntConsumer action) { + int[] a; int i, hi; // hoist accesses and checks from loop + if (action == null) + throw new NullPointerException(); + if ((a = array).length >= (hi = fence) && + (i = index) >= 0 && i < (index = hi)) { + do { action.accept(a[i]); } while (++i < hi); + } + } + + @Override + public boolean tryAdvance(IntConsumer action) { + if (action == null) + throw new NullPointerException(); + if (index >= 0 && index < fence) { + action.accept(array[index++]); + return true; + } + return false; + } + + @Override + public long estimateSize() { return (long)(fence - index); } + + @Override + public int characteristics() { + return characteristics; + } + + @Override + public Comparator getComparator() { + if (hasCharacteristics(Spliterator.SORTED)) + return null; + throw new IllegalStateException(); + } + } + + /** + * A Spliterator.OfLong designed for use by sources that traverse and split + * elements maintained in an unmodifiable {@code int[]} array. + */ + static final class LongArraySpliterator implements Spliterator.OfLong { + private final long[] array; + private int index; // current index, modified on advance/split + private final int fence; // one past last index + private final int characteristics; + + /** + * Creates a spliterator covering all of the given array. + * @param array the array, assumed to be unmodified during use + * @param additionalCharacteristics Additional spliterator characteristics + * of this spliterator's source or elements beyond {@code SIZED} and + * {@code SUBSIZED} which are are always reported + */ + public LongArraySpliterator(long[] array, int additionalCharacteristics) { + this(array, 0, array.length, additionalCharacteristics); + } + + /** + * Creates a spliterator covering the given array and range + * @param array the array, assumed to be unmodified during use + * @param origin the least index (inclusive) to cover + * @param fence one past the greatest index to cover + * @param additionalCharacteristics Additional spliterator characteristics + * of this spliterator's source or elements beyond {@code SIZED} and + * {@code SUBSIZED} which are are always reported + */ + public LongArraySpliterator(long[] array, int origin, int fence, int additionalCharacteristics) { + this.array = array; + this.index = origin; + this.fence = fence; + this.characteristics = additionalCharacteristics | Spliterator.SIZED | Spliterator.SUBSIZED; + } + + @Override + public OfLong trySplit() { + int lo = index, mid = (lo + fence) >>> 1; + return (lo >= mid) + ? null + : new LongArraySpliterator(array, lo, index = mid, characteristics); + } + + @Override + public void forEachRemaining(LongConsumer action) { + long[] a; int i, hi; // hoist accesses and checks from loop + if (action == null) + throw new NullPointerException(); + if ((a = array).length >= (hi = fence) && + (i = index) >= 0 && i < (index = hi)) { + do { action.accept(a[i]); } while (++i < hi); + } + } + + @Override + public boolean tryAdvance(LongConsumer action) { + if (action == null) + throw new NullPointerException(); + if (index >= 0 && index < fence) { + action.accept(array[index++]); + return true; + } + return false; + } + + @Override + public long estimateSize() { return (long)(fence - index); } + + @Override + public int characteristics() { + return characteristics; + } + + @Override + public Comparator getComparator() { + if (hasCharacteristics(Spliterator.SORTED)) + return null; + throw new IllegalStateException(); + } + } + + /** + * A Spliterator.OfDouble designed for use by sources that traverse and split + * elements maintained in an unmodifiable {@code int[]} array. + */ + static final class DoubleArraySpliterator implements Spliterator.OfDouble { + private final double[] array; + private int index; // current index, modified on advance/split + private final int fence; // one past last index + private final int characteristics; + + /** + * Creates a spliterator covering all of the given array. + * @param array the array, assumed to be unmodified during use + * @param additionalCharacteristics Additional spliterator characteristics + * of this spliterator's source or elements beyond {@code SIZED} and + * {@code SUBSIZED} which are are always reported + */ + public DoubleArraySpliterator(double[] array, int additionalCharacteristics) { + this(array, 0, array.length, additionalCharacteristics); + } + + /** + * Creates a spliterator covering the given array and range + * @param array the array, assumed to be unmodified during use + * @param origin the least index (inclusive) to cover + * @param fence one past the greatest index to cover + * @param additionalCharacteristics Additional spliterator characteristics + * of this spliterator's source or elements beyond {@code SIZED} and + * {@code SUBSIZED} which are are always reported + */ + public DoubleArraySpliterator(double[] array, int origin, int fence, int additionalCharacteristics) { + this.array = array; + this.index = origin; + this.fence = fence; + this.characteristics = additionalCharacteristics | Spliterator.SIZED | Spliterator.SUBSIZED; + } + + @Override + public OfDouble trySplit() { + int lo = index, mid = (lo + fence) >>> 1; + return (lo >= mid) + ? null + : new DoubleArraySpliterator(array, lo, index = mid, characteristics); + } + + @Override + public void forEachRemaining(DoubleConsumer action) { + double[] a; int i, hi; // hoist accesses and checks from loop + if (action == null) + throw new NullPointerException(); + if ((a = array).length >= (hi = fence) && + (i = index) >= 0 && i < (index = hi)) { + do { action.accept(a[i]); } while (++i < hi); + } + } + + @Override + public boolean tryAdvance(DoubleConsumer action) { + if (action == null) + throw new NullPointerException(); + if (index >= 0 && index < fence) { + action.accept(array[index++]); + return true; + } + return false; + } + + @Override + public long estimateSize() { return (long)(fence - index); } + + @Override + public int characteristics() { + return characteristics; + } + + @Override + public Comparator getComparator() { + if (hasCharacteristics(Spliterator.SORTED)) + return null; + throw new IllegalStateException(); + } + } + + // + + /** + * An abstract {@code Spliterator} that implements {@code trySplit} to + * permit limited parallelism. + * + *

An extending class need only + * implement {@link #tryAdvance(java.util.function.Consumer) tryAdvance}. + * The extending class should override + * {@link #forEachRemaining(java.util.function.Consumer) forEach} if it can + * provide a more performant implementation. + * + * @apiNote + * This class is a useful aid for creating a spliterator when it is not + * possible or difficult to efficiently partition elements in a manner + * allowing balanced parallel computation. + * + *

An alternative to using this class, that also permits limited + * parallelism, is to create a spliterator from an iterator + * (see {@link #spliterator(Iterator, long, int)}. Depending on the + * circumstances using an iterator may be easier or more convenient than + * extending this class, such as when there is already an iterator + * available to use. + * + * @see #spliterator(Iterator, long, int) + * @since 1.8 + */ + public static abstract class AbstractSpliterator implements Spliterator { + static final int BATCH_UNIT = 1 << 10; // batch array size increment + static final int MAX_BATCH = 1 << 25; // max batch array size; + private final int characteristics; + private long est; // size estimate + private int batch; // batch size for splits + + /** + * Creates a spliterator reporting the given estimated size and + * additionalCharacteristics. + * + * @param est the estimated size of this spliterator if known, otherwise + * {@code Long.MAX_VALUE}. + * @param additionalCharacteristics properties of this spliterator's + * source or elements. If {@code SIZED} is reported then this + * spliterator will additionally report {@code SUBSIZED}. + */ + protected AbstractSpliterator(long est, int additionalCharacteristics) { + this.est = est; + this.characteristics = ((additionalCharacteristics & Spliterator.SIZED) != 0) + ? additionalCharacteristics | Spliterator.SUBSIZED + : additionalCharacteristics; + } + + static final class HoldingConsumer implements Consumer { + Object value; + + @Override + public void accept(T value) { + this.value = value; + } + } + + /** + * {@inheritDoc} + * + * This implementation permits limited parallelism. + */ + @Override + public Spliterator trySplit() { + /* + * Split into arrays of arithmetically increasing batch + * sizes. This will only improve parallel performance if + * per-element Consumer actions are more costly than + * transferring them into an array. The use of an + * arithmetic progression in split sizes provides overhead + * vs parallelism bounds that do not particularly favor or + * penalize cases of lightweight vs heavyweight element + * operations, across combinations of #elements vs #cores, + * whether or not either are known. We generate + * O(sqrt(#elements)) splits, allowing O(sqrt(#cores)) + * potential speedup. + */ + HoldingConsumer holder = new HoldingConsumer<>(); + long s = est; + if (s > 1 && tryAdvance(holder)) { + int n = batch + BATCH_UNIT; + if (n > s) + n = (int) s; + if (n > MAX_BATCH) + n = MAX_BATCH; + Object[] a; + try { + a = new Object[n]; + } catch (OutOfMemoryError oome) { + return null; + } + int j = 0; + do { a[j] = holder.value; } while (++j < n && tryAdvance(holder)); + batch = j; + if (est != Long.MAX_VALUE) + est -= j; + return new ArraySpliterator<>(a, 0, j, characteristics()); + } + return null; + } + + /** + * {@inheritDoc} + * + * @implSpec + * This implementation returns the estimated size as reported when + * created and, if the estimate size is known, decreases in size when + * split. + */ + @Override + public long estimateSize() { + return est; + } + + /** + * {@inheritDoc} + * + * @implSpec + * This implementation returns the characteristics as reported when + * created. + */ + @Override + public int characteristics() { + return characteristics; + } + } + + /** + * An abstract {@code Spliterator.OfInt} that implements {@code trySplit} to + * permit limited parallelism. + * + *

To implement a spliterator an extending class need only + * implement {@link #tryAdvance(java.util.function.IntConsumer)} + * tryAdvance}. The extending class should override + * {@link #forEachRemaining(java.util.function.IntConsumer)} forEach} if it + * can provide a more performant implementation. + * + * @apiNote + * This class is a useful aid for creating a spliterator when it is not + * possible or difficult to efficiently partition elements in a manner + * allowing balanced parallel computation. + * + *

An alternative to using this class, that also permits limited + * parallelism, is to create a spliterator from an iterator + * (see {@link #spliterator(java.util.PrimitiveIterator.OfInt, long, int)}. + * Depending on the circumstances using an iterator may be easier or more + * convenient than extending this class. For example, if there is already an + * iterator available to use then there is no need to extend this class. + * + * @see #spliterator(java.util.PrimitiveIterator.OfInt, long, int) + * @since 1.8 + */ + public static abstract class AbstractIntSpliterator implements Spliterator.OfInt { + static final int MAX_BATCH = AbstractSpliterator.MAX_BATCH; + static final int BATCH_UNIT = AbstractSpliterator.BATCH_UNIT; + private final int characteristics; + private long est; // size estimate + private int batch; // batch size for splits + + /** + * Creates a spliterator reporting the given estimated size and + * characteristics. + * + * @param est the estimated size of this spliterator if known, otherwise + * {@code Long.MAX_VALUE}. + * @param additionalCharacteristics properties of this spliterator's + * source or elements. If {@code SIZED} is reported then this + * spliterator will additionally report {@code SUBSIZED}. + */ + protected AbstractIntSpliterator(long est, int additionalCharacteristics) { + this.est = est; + this.characteristics = ((additionalCharacteristics & Spliterator.SIZED) != 0) + ? additionalCharacteristics | Spliterator.SUBSIZED + : additionalCharacteristics; + } + + static final class HoldingIntConsumer implements IntConsumer { + int value; + + @Override + public void accept(int value) { + this.value = value; + } + } + + /** + * {@inheritDoc} + * + * This implementation permits limited parallelism. + */ + @Override + public Spliterator.OfInt trySplit() { + HoldingIntConsumer holder = new HoldingIntConsumer(); + long s = est; + if (s > 1 && tryAdvance(holder)) { + int n = batch + BATCH_UNIT; + if (n > s) + n = (int) s; + if (n > MAX_BATCH) + n = MAX_BATCH; + int[] a; + try { + a = new int[n]; + } catch (OutOfMemoryError oome) { + return null; + } + int j = 0; + do { a[j] = holder.value; } while (++j < n && tryAdvance(holder)); + batch = j; + if (est != Long.MAX_VALUE) + est -= j; + return new IntArraySpliterator(a, 0, j, characteristics()); + } + return null; + } + + /** + * {@inheritDoc} + * + * @implSpec + * This implementation returns the estimated size as reported when + * created and, if the estimate size is known, decreases in size when + * split. + */ + @Override + public long estimateSize() { + return est; + } + + /** + * {@inheritDoc} + * + * @implSpec + * This implementation returns the characteristics as reported when + * created. + */ + @Override + public int characteristics() { + return characteristics; + } + } + + /** + * An abstract {@code Spliterator.OfLong} that implements {@code trySplit} + * to permit limited parallelism. + * + *

To implement a spliterator an extending class need only + * implement {@link #tryAdvance(java.util.function.LongConsumer)} + * tryAdvance}. The extending class should override + * {@link #forEachRemaining(java.util.function.LongConsumer)} forEach} if it + * can provide a more performant implementation. + * + * @apiNote + * This class is a useful aid for creating a spliterator when it is not + * possible or difficult to efficiently partition elements in a manner + * allowing balanced parallel computation. + * + *

An alternative to using this class, that also permits limited + * parallelism, is to create a spliterator from an iterator + * (see {@link #spliterator(java.util.PrimitiveIterator.OfLong, long, int)}. + * Depending on the circumstances using an iterator may be easier or more + * convenient than extending this class. For example, if there is already an + * iterator available to use then there is no need to extend this class. + * + * @see #spliterator(java.util.PrimitiveIterator.OfLong, long, int) + * @since 1.8 + */ + public static abstract class AbstractLongSpliterator implements Spliterator.OfLong { + static final int MAX_BATCH = AbstractSpliterator.MAX_BATCH; + static final int BATCH_UNIT = AbstractSpliterator.BATCH_UNIT; + private final int characteristics; + private long est; // size estimate + private int batch; // batch size for splits + + /** + * Creates a spliterator reporting the given estimated size and + * characteristics. + * + * @param est the estimated size of this spliterator if known, otherwise + * {@code Long.MAX_VALUE}. + * @param additionalCharacteristics properties of this spliterator's + * source or elements. If {@code SIZED} is reported then this + * spliterator will additionally report {@code SUBSIZED}. + */ + protected AbstractLongSpliterator(long est, int additionalCharacteristics) { + this.est = est; + this.characteristics = ((additionalCharacteristics & Spliterator.SIZED) != 0) + ? additionalCharacteristics | Spliterator.SUBSIZED + : additionalCharacteristics; + } + + static final class HoldingLongConsumer implements LongConsumer { + long value; + + @Override + public void accept(long value) { + this.value = value; + } + } + + /** + * {@inheritDoc} + * + * This implementation permits limited parallelism. + */ + @Override + public Spliterator.OfLong trySplit() { + HoldingLongConsumer holder = new HoldingLongConsumer(); + long s = est; + if (s > 1 && tryAdvance(holder)) { + int n = batch + BATCH_UNIT; + if (n > s) + n = (int) s; + if (n > MAX_BATCH) + n = MAX_BATCH; + long[] a; + try { + a = new long[n]; + } catch (OutOfMemoryError oome) { + return null; + } + int j = 0; + do { a[j] = holder.value; } while (++j < n && tryAdvance(holder)); + batch = j; + if (est != Long.MAX_VALUE) + est -= j; + return new LongArraySpliterator(a, 0, j, characteristics()); + } + return null; + } + + /** + * {@inheritDoc} + * + * @implSpec + * This implementation returns the estimated size as reported when + * created and, if the estimate size is known, decreases in size when + * split. + */ + @Override + public long estimateSize() { + return est; + } + + /** + * {@inheritDoc} + * + * @implSpec + * This implementation returns the characteristics as reported when + * created. + */ + @Override + public int characteristics() { + return characteristics; + } + } + + /** + * An abstract {@code Spliterator.OfDouble} that implements + * {@code trySplit} to permit limited parallelism. + * + *

To implement a spliterator an extending class need only + * implement {@link #tryAdvance(java.util.function.DoubleConsumer)} + * tryAdvance}. The extending class should override + * {@link #forEachRemaining(java.util.function.DoubleConsumer)} forEach} if + * it can provide a more performant implementation. + * + * @apiNote + * This class is a useful aid for creating a spliterator when it is not + * possible or difficult to efficiently partition elements in a manner + * allowing balanced parallel computation. + * + *

An alternative to using this class, that also permits limited + * parallelism, is to create a spliterator from an iterator + * (see {@link #spliterator(java.util.PrimitiveIterator.OfDouble, long, int)}. + * Depending on the circumstances using an iterator may be easier or more + * convenient than extending this class. For example, if there is already an + * iterator available to use then there is no need to extend this class. + * + * @see #spliterator(java.util.PrimitiveIterator.OfDouble, long, int) + * @since 1.8 + */ + public static abstract class AbstractDoubleSpliterator implements Spliterator.OfDouble { + static final int MAX_BATCH = AbstractSpliterator.MAX_BATCH; + static final int BATCH_UNIT = AbstractSpliterator.BATCH_UNIT; + private final int characteristics; + private long est; // size estimate + private int batch; // batch size for splits + + /** + * Creates a spliterator reporting the given estimated size and + * characteristics. + * + * @param est the estimated size of this spliterator if known, otherwise + * {@code Long.MAX_VALUE}. + * @param additionalCharacteristics properties of this spliterator's + * source or elements. If {@code SIZED} is reported then this + * spliterator will additionally report {@code SUBSIZED}. + */ + protected AbstractDoubleSpliterator(long est, int additionalCharacteristics) { + this.est = est; + this.characteristics = ((additionalCharacteristics & Spliterator.SIZED) != 0) + ? additionalCharacteristics | Spliterator.SUBSIZED + : additionalCharacteristics; + } + + static final class HoldingDoubleConsumer implements DoubleConsumer { + double value; + + @Override + public void accept(double value) { + this.value = value; + } + } + + /** + * {@inheritDoc} + * + * This implementation permits limited parallelism. + */ + @Override + public Spliterator.OfDouble trySplit() { + HoldingDoubleConsumer holder = new HoldingDoubleConsumer(); + long s = est; + if (s > 1 && tryAdvance(holder)) { + int n = batch + BATCH_UNIT; + if (n > s) + n = (int) s; + if (n > MAX_BATCH) + n = MAX_BATCH; + double[] a; + try { + a = new double[n]; + } catch (OutOfMemoryError oome) { + return null; + } + int j = 0; + do { a[j] = holder.value; } while (++j < n && tryAdvance(holder)); + batch = j; + if (est != Long.MAX_VALUE) + est -= j; + return new DoubleArraySpliterator(a, 0, j, characteristics()); + } + return null; + } + + /** + * {@inheritDoc} + * + * @implSpec + * This implementation returns the estimated size as reported when + * created and, if the estimate size is known, decreases in size when + * split. + */ + @Override + public long estimateSize() { + return est; + } + + /** + * {@inheritDoc} + * + * @implSpec + * This implementation returns the characteristics as reported when + * created. + */ + @Override + public int characteristics() { + return characteristics; + } + } + + // Iterator-based Spliterators + + /** + * A Spliterator using a given Iterator for element + * operations. The spliterator implements {@code trySplit} to + * permit limited parallelism. + */ + static class IteratorSpliterator implements Spliterator { + static final int BATCH_UNIT = 1 << 10; // batch array size increment + static final int MAX_BATCH = 1 << 25; // max batch array size; + private final Collection collection; // null OK + private Iterator it; + private final int characteristics; + private long est; // size estimate + private int batch; // batch size for splits + + /** + * Creates a spliterator using the given given + * collection's {@link java.util.Collection#iterator()) for traversal, + * and reporting its {@link java.util.Collection#size()) as its initial + * size. + * + * @param c the collection + * @param characteristics properties of this spliterator's + * source or elements. + */ + public IteratorSpliterator(Collection collection, int characteristics) { + this.collection = collection; + this.it = null; + this.characteristics = characteristics | Spliterator.SIZED | Spliterator.SUBSIZED; + } + + /** + * Creates a spliterator using the given iterator + * for traversal, and reporting the given initial size + * and characteristics. + * + * @param iterator the iterator for the source + * @param size the number of elements in the source + * @param characteristics properties of this spliterator's + * source or elements. + */ + public IteratorSpliterator(Iterator iterator, long size, int characteristics) { + this.collection = null; + this.it = iterator; + this.est = size; + this.characteristics = characteristics | Spliterator.SIZED | Spliterator.SUBSIZED; + } + + /** + * Creates a spliterator using the given iterator + * for traversal, and reporting the given initial size + * and characteristics. + * + * @param iterator the iterator for the source + * @param characteristics properties of this spliterator's + * source or elements. + */ + public IteratorSpliterator(Iterator iterator, int characteristics) { + this.collection = null; + this.it = iterator; + this.est = Long.MAX_VALUE; + this.characteristics = characteristics & ~(Spliterator.SIZED | Spliterator.SUBSIZED); + } + + @Override + public Spliterator trySplit() { + /* + * Split into arrays of arithmetically increasing batch + * sizes. This will only improve parallel performance if + * per-element Consumer actions are more costly than + * transferring them into an array. The use of an + * arithmetic progression in split sizes provides overhead + * vs parallelism bounds that do not particularly favor or + * penalize cases of lightweight vs heavyweight element + * operations, across combinations of #elements vs #cores, + * whether or not either are known. We generate + * O(sqrt(#elements)) splits, allowing O(sqrt(#cores)) + * potential speedup. + */ + Iterator i; + long s; + if ((i = it) == null) { + i = it = collection.iterator(); + s = est = (long) collection.size(); + } + else + s = est; + if (s > 1 && i.hasNext()) { + int n = batch + BATCH_UNIT; + if (n > s) + n = (int) s; + if (n > MAX_BATCH) + n = MAX_BATCH; + Object[] a; + try { + a = new Object[n]; + } catch (OutOfMemoryError oome) { + return null; + } + int j = 0; + do { a[j] = i.next(); } while (++j < n && i.hasNext()); + batch = j; + if (est != Long.MAX_VALUE) + est -= j; + return new ArraySpliterator<>(a, 0, j, characteristics); + } + return null; + } + + @Override + public void forEachRemaining(Consumer action) { + if (action == null) throw new NullPointerException(); + Iterator i; + if ((i = it) == null) { + i = it = collection.iterator(); + est = (long)collection.size(); + } + i.forEachRemaining(action); + } + + @Override + public boolean tryAdvance(Consumer action) { + if (action == null) throw new NullPointerException(); + if (it == null) { + it = collection.iterator(); + est = (long) collection.size(); + } + if (it.hasNext()) { + action.accept(it.next()); + return true; + } + return false; + } + + @Override + public long estimateSize() { + if (it == null) { + it = collection.iterator(); + return est = (long)collection.size(); + } + return est; + } + + @Override + public int characteristics() { return characteristics; } + + @Override + public Comparator getComparator() { + if (hasCharacteristics(Spliterator.SORTED)) + return null; + throw new IllegalStateException(); + } + } + + /** + * A Spliterator.OfInt using a given IntStream.IntIterator for element + * operations. The spliterator implements {@code trySplit} to + * permit limited parallelism. + */ + static final class IntIteratorSpliterator implements Spliterator.OfInt { + static final int BATCH_UNIT = IteratorSpliterator.BATCH_UNIT; + static final int MAX_BATCH = IteratorSpliterator.MAX_BATCH; + private PrimitiveIterator.OfInt it; + private final int characteristics; + private long est; // size estimate + private int batch; // batch size for splits + + /** + * Creates a spliterator using the given iterator + * for traversal, and reporting the given initial size + * and characteristics. + * + * @param iterator the iterator for the source + * @param size the number of elements in the source + * @param characteristics properties of this spliterator's + * source or elements. + */ + public IntIteratorSpliterator(PrimitiveIterator.OfInt iterator, long size, int characteristics) { + this.it = iterator; + this.est = size; + this.characteristics = characteristics | Spliterator.SIZED | Spliterator.SUBSIZED; + } + + /** + * Creates a spliterator using the given iterator for a + * source of unknown size, reporting the given + * characteristics. + * + * @param iterator the iterator for the source + * @param characteristics properties of this spliterator's + * source or elements. + */ + public IntIteratorSpliterator(PrimitiveIterator.OfInt iterator, int characteristics) { + this.it = iterator; + this.est = Long.MAX_VALUE; + this.characteristics = characteristics & ~(Spliterator.SIZED | Spliterator.SUBSIZED); + } + + @Override + public OfInt trySplit() { + PrimitiveIterator.OfInt i = it; + long s = est; + if (s > 1 && i.hasNext()) { + int n = batch + BATCH_UNIT; + if (n > s) + n = (int) s; + if (n > MAX_BATCH) + n = MAX_BATCH; + int[] a; + try { + a = new int[n]; + } catch (OutOfMemoryError oome) { + return null; + } + int j = 0; + do { a[j] = i.nextInt(); } while (++j < n && i.hasNext()); + batch = j; + if (est != Long.MAX_VALUE) + est -= j; + return new IntArraySpliterator(a, 0, j, characteristics); + } + return null; + } + + @Override + public void forEachRemaining(IntConsumer action) { + if (action == null) throw new NullPointerException(); + it.forEachRemaining(action); + } + + @Override + public boolean tryAdvance(IntConsumer action) { + if (action == null) throw new NullPointerException(); + if (it.hasNext()) { + action.accept(it.nextInt()); + return true; + } + return false; + } + + @Override + public long estimateSize() { + return est; + } + + @Override + public int characteristics() { return characteristics; } + + @Override + public Comparator getComparator() { + if (hasCharacteristics(Spliterator.SORTED)) + return null; + throw new IllegalStateException(); + } + } + + static final class LongIteratorSpliterator implements Spliterator.OfLong { + static final int BATCH_UNIT = IteratorSpliterator.BATCH_UNIT; + static final int MAX_BATCH = IteratorSpliterator.MAX_BATCH; + private PrimitiveIterator.OfLong it; + private final int characteristics; + private long est; // size estimate + private int batch; // batch size for splits + + /** + * Creates a spliterator using the given iterator + * for traversal, and reporting the given initial size + * and characteristics. + * + * @param iterator the iterator for the source + * @param size the number of elements in the source + * @param characteristics properties of this spliterator's + * source or elements. + */ + public LongIteratorSpliterator(PrimitiveIterator.OfLong iterator, long size, int characteristics) { + this.it = iterator; + this.est = size; + this.characteristics = characteristics | Spliterator.SIZED | Spliterator.SUBSIZED; + } + + /** + * Creates a spliterator using the given iterator for a + * source of unknown size, reporting the given + * characteristics. + * + * @param iterator the iterator for the source + * @param characteristics properties of this spliterator's + * source or elements. + */ + public LongIteratorSpliterator(PrimitiveIterator.OfLong iterator, int characteristics) { + this.it = iterator; + this.est = Long.MAX_VALUE; + this.characteristics = characteristics & ~(Spliterator.SIZED | Spliterator.SUBSIZED); + } + + @Override + public OfLong trySplit() { + PrimitiveIterator.OfLong i = it; + long s = est; + if (s > 1 && i.hasNext()) { + int n = batch + BATCH_UNIT; + if (n > s) + n = (int) s; + if (n > MAX_BATCH) + n = MAX_BATCH; + long[] a; + try { + a = new long[n]; + } catch (OutOfMemoryError oome) { + return null; + } + int j = 0; + do { a[j] = i.nextLong(); } while (++j < n && i.hasNext()); + batch = j; + if (est != Long.MAX_VALUE) + est -= j; + return new LongArraySpliterator(a, 0, j, characteristics); + } + return null; + } + + @Override + public void forEachRemaining(LongConsumer action) { + if (action == null) throw new NullPointerException(); + it.forEachRemaining(action); + } + + @Override + public boolean tryAdvance(LongConsumer action) { + if (action == null) throw new NullPointerException(); + if (it.hasNext()) { + action.accept(it.nextLong()); + return true; + } + return false; + } + + @Override + public long estimateSize() { + return est; + } + + @Override + public int characteristics() { return characteristics; } + + @Override + public Comparator getComparator() { + if (hasCharacteristics(Spliterator.SORTED)) + return null; + throw new IllegalStateException(); + } + } + + static final class DoubleIteratorSpliterator implements Spliterator.OfDouble { + static final int BATCH_UNIT = IteratorSpliterator.BATCH_UNIT; + static final int MAX_BATCH = IteratorSpliterator.MAX_BATCH; + private PrimitiveIterator.OfDouble it; + private final int characteristics; + private long est; // size estimate + private int batch; // batch size for splits + + /** + * Creates a spliterator using the given iterator + * for traversal, and reporting the given initial size + * and characteristics. + * + * @param iterator the iterator for the source + * @param size the number of elements in the source + * @param characteristics properties of this spliterator's + * source or elements. + */ + public DoubleIteratorSpliterator(PrimitiveIterator.OfDouble iterator, long size, int characteristics) { + this.it = iterator; + this.est = size; + this.characteristics = characteristics | Spliterator.SIZED | Spliterator.SUBSIZED; + } + + /** + * Creates a spliterator using the given iterator for a + * source of unknown size, reporting the given + * characteristics. + * + * @param iterator the iterator for the source + * @param characteristics properties of this spliterator's + * source or elements. + */ + public DoubleIteratorSpliterator(PrimitiveIterator.OfDouble iterator, int characteristics) { + this.it = iterator; + this.est = Long.MAX_VALUE; + this.characteristics = characteristics & ~(Spliterator.SIZED | Spliterator.SUBSIZED); + } + + @Override + public OfDouble trySplit() { + PrimitiveIterator.OfDouble i = it; + long s = est; + if (s > 1 && i.hasNext()) { + int n = batch + BATCH_UNIT; + if (n > s) + n = (int) s; + if (n > MAX_BATCH) + n = MAX_BATCH; + double[] a; + try { + a = new double[n]; + } catch (OutOfMemoryError oome) { + return null; + } + int j = 0; + do { a[j] = i.nextDouble(); } while (++j < n && i.hasNext()); + batch = j; + if (est != Long.MAX_VALUE) + est -= j; + return new DoubleArraySpliterator(a, 0, j, characteristics); + } + return null; + } + + @Override + public void forEachRemaining(DoubleConsumer action) { + if (action == null) throw new NullPointerException(); + it.forEachRemaining(action); + } + + @Override + public boolean tryAdvance(DoubleConsumer action) { + if (action == null) throw new NullPointerException(); + if (it.hasNext()) { + action.accept(it.nextDouble()); + return true; + } + return false; + } + + @Override + public long estimateSize() { + return est; + } + + @Override + public int characteristics() { return characteristics; } + + @Override + public Comparator getComparator() { + if (hasCharacteristics(Spliterator.SORTED)) + return null; + throw new IllegalStateException(); + } + } +} diff --git a/src/share/classes/java/util/Tripwire.java b/src/share/classes/java/util/Tripwire.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/Tripwire.java @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util; + +import sun.util.logging.PlatformLogger; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +/** + * Utility class for detecting inadvertent uses of boxing in + * {@code java.util} classes. The detection is turned on or off based on + * whether the system property {@code org.openjdk.java.util.stream.tripwire} is + * considered {@code true} according to {@link Boolean#getBoolean(String)}. + * This should normally be turned off for production use. + * + * @apiNote + * Typical usage would be for boxing code to do: + *

{@code
+ *     if (Tripwire.ENABLED)
+ *         Tripwire.trip(getClass(), "{0} calling PrimitiveIterator.OfInt.nextInt()");
+ * }
+ * + * @since 1.8 + */ +final class Tripwire { + private static final String TRIPWIRE_PROPERTY = "org.openjdk.java.util.stream.tripwire"; + + /** Should debugging checks be enabled? */ + static final boolean ENABLED = AccessController.doPrivileged( + (PrivilegedAction) () -> Boolean.getBoolean(TRIPWIRE_PROPERTY)); + + private Tripwire() { } + + /** + * Produces a log warning, using {@code PlatformLogger.getLogger(className)}, + * using the supplied message. The class name of {@code trippingClass} will + * be used as the first parameter to the message. + * + * @param trippingClass Name of the class generating the message + * @param msg A message format string of the type expected by + * {@link PlatformLogger} + */ + static void trip(Class trippingClass, String msg) { + PlatformLogger.getLogger(trippingClass.getName()).warning(msg, trippingClass.getName()); + } +} diff --git a/test/java/util/Spliterator/SpliteratorLateBindingFailFastTest.java b/test/java/util/Spliterator/SpliteratorLateBindingFailFastTest.java new file mode 100644 --- /dev/null +++ b/test/java/util/Spliterator/SpliteratorLateBindingFailFastTest.java @@ -0,0 +1,357 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.ConcurrentModificationException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.PriorityQueue; +import java.util.Set; +import java.util.Spliterator; +import java.util.Stack; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.Vector; +import java.util.WeakHashMap; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Supplier; + +import static org.testng.Assert.*; + +/** + * @test + * @summary Spliterator last-binding and fail-fast tests + * @run testng SpliteratorLateBindingFailFastTest + */ + +@Test +public class SpliteratorLateBindingFailFastTest { + + private interface Source { + Collection asCollection(); + void update(); + } + + private static class SpliteratorDataBuilder { + final List data; + + final T newValue; + + final List exp; + + final Map mExp; + + SpliteratorDataBuilder(List data, T newValue, List exp) { + this.data = data; + this.newValue = newValue; + this.exp = exp; + this.mExp = createMap(exp); + } + + Map createMap(List l) { + Map m = new LinkedHashMap<>(); + for (T t : l) { + m.put(t, t); + } + return m; + } + + void add(String description, Supplier> s) { + description = joiner(description).toString(); + data.add(new Object[]{description, s}); + } + + void addCollection(Function, ? extends Collection> f) { + class CollectionSource implements Source { + final Collection c = f.apply(exp); + + final Consumer> updater; + + CollectionSource(Consumer> updater) { + this.updater = updater; + } + + @Override + public Collection asCollection() { + return c; + } + + @Override + public void update() { + updater.accept(c); + } + } + + String description = "new " + f.apply(Collections.emptyList()).getClass().getName() + ".spliterator() "; + add(description + "ADD", () -> new CollectionSource(c -> c.add(newValue))); + add(description + "REMOVE", () -> new CollectionSource(c -> c.remove(c.iterator().next()))); + } + + void addList(Function, ? extends List> l) { + // @@@ If collection is instance of List then add sub-list tests + addCollection(l); + } + + void addMap(Function, ? extends Map> mapConstructor) { + class MapSource implements Source { + final Map m = mapConstructor.apply(mExp); + + final Collection c; + + final Consumer> updater; + + MapSource(Function, Collection> f, Consumer> updater) { + this.c = f.apply(m); + this.updater = updater; + } + + @Override + public Collection asCollection() { + return c; + } + + @Override + public void update() { + updater.accept(m); + } + } + + Map>> actions = new HashMap<>(); + actions.put("ADD", m -> m.put(newValue, newValue)); + actions.put("REMOVE", m -> m.remove(m.keySet().iterator().next())); + + String description = "new " + mapConstructor.apply(Collections.emptyMap()).getClass().getName(); + for (Map.Entry>> e : actions.entrySet()) { + add(description + ".keySet().spliterator() " + e.getKey(), + () -> new MapSource(m -> m.keySet(), e.getValue())); + add(description + ".values().spliterator() " + e.getKey(), + () -> new MapSource(m -> m.values(), e.getValue())); + add(description + ".entrySet().spliterator() " + e.getKey(), + () -> new MapSource>(m -> m.entrySet(), e.getValue())); + } + } + + StringBuilder joiner(String description) { + return new StringBuilder(description). + append(" {"). + append("size=").append(exp.size()). + append("}"); + } + } + + static Object[][] spliteratorDataProvider; + + @DataProvider(name = "Source") + public static Object[][] spliteratorDataProvider() { + if (spliteratorDataProvider != null) { + return spliteratorDataProvider; + } + + List data = new ArrayList<>(); + SpliteratorDataBuilder db = new SpliteratorDataBuilder<>(data, 5, Arrays.asList(1, 2, 3, 4)); + + // Collections + + db.addList(ArrayList::new); + + db.addList(LinkedList::new); + + db.addList(Vector::new); + + + db.addCollection(HashSet::new); + + db.addCollection(LinkedHashSet::new); + + db.addCollection(TreeSet::new); + + + db.addCollection(c -> { Stack s = new Stack<>(); s.addAll(c); return s;}); + + db.addCollection(PriorityQueue::new); + + // ArrayDeque fails some tests since it's fail-fast support is weaker + // than other collections and limited to detecting most, but not all, + // removals. It probably requires it's own test since it is difficult + // to abstract out the conditions under which it fails-fast. +// db.addCollection(ArrayDeque::new); + + // Maps + + db.addMap(HashMap::new); + + db.addMap(LinkedHashMap::new); + + // This fails when run through jrteg but passes when run though + // ant +// db.addMap(IdentityHashMap::new); + + db.addMap(WeakHashMap::new); + + // @@@ Descending maps etc + db.addMap(TreeMap::new); + + return spliteratorDataProvider = data.toArray(new Object[0][]); + } + + @Test(dataProvider = "Source") + public void lateBindingTestWithForEach(String description, Supplier> ss) { + Source source = ss.get(); + Collection c = source.asCollection(); + Spliterator s = c.spliterator(); + + source.update(); + + Set r = new HashSet<>(); + s.forEachRemaining(r::add); + + assertEquals(r, new HashSet<>(c)); + } + + @Test(dataProvider = "Source") + public void lateBindingTestWithTryAdvance(String description, Supplier> ss) { + Source source = ss.get(); + Collection c = source.asCollection(); + Spliterator s = c.spliterator(); + + source.update(); + + Set r = new HashSet<>(); + while (s.tryAdvance(r::add)) { } + + assertEquals(r, new HashSet<>(c)); + } + + @Test(dataProvider = "Source") + public void lateBindingTestWithCharacteritics(String description, Supplier> ss) { + Source source = ss.get(); + Collection c = source.asCollection(); + Spliterator s = c.spliterator(); + s.characteristics(); + + Set r = new HashSet<>(); + s.forEachRemaining(r::add); + + assertEquals(r, new HashSet<>(c)); + } + + + @Test(dataProvider = "Source") + public void testFailFastTestWithTryAdvance(String description, Supplier> ss) { + { + Source source = ss.get(); + Collection c = source.asCollection(); + Spliterator s = c.spliterator(); + + s.tryAdvance(e -> { + }); + source.update(); + + executeAndCatch(() -> s.tryAdvance(e -> { })); + } + + { + Source source = ss.get(); + Collection c = source.asCollection(); + Spliterator s = c.spliterator(); + + s.tryAdvance(e -> { + }); + source.update(); + + executeAndCatch(() -> s.forEachRemaining(e -> { + })); + } + } + + @Test(dataProvider = "Source") + public void testFailFastTestWithForEach(String description, Supplier> ss) { + Source source = ss.get(); + Collection c = source.asCollection(); + Spliterator s = c.spliterator(); + + executeAndCatch(() -> s.forEachRemaining(e -> { + source.update(); + })); + } + + @Test(dataProvider = "Source") + public void testFailFastTestWithEstimateSize(String description, Supplier> ss) { + { + Source source = ss.get(); + Collection c = source.asCollection(); + Spliterator s = c.spliterator(); + + s.estimateSize(); + source.update(); + + executeAndCatch(() -> s.tryAdvance(e -> { })); + } + + { + Source source = ss.get(); + Collection c = source.asCollection(); + Spliterator s = c.spliterator(); + + s.estimateSize(); + source.update(); + + executeAndCatch(() -> s.forEachRemaining(e -> { + })); + } + } + + private void executeAndCatch(Runnable r) { + executeAndCatch(ConcurrentModificationException.class, r); + } + + private void executeAndCatch(Class expected, Runnable r) { + Exception caught = null; + try { + r.run(); + } + catch (Exception e) { + caught = e; + } + + assertNotNull(caught, + String.format("No Exception was thrown, expected an Exception of %s to be thrown", + expected.getName())); + assertTrue(expected.isInstance(caught), + String.format("Exception thrown %s not an instance of %s", + caught.getClass().getName(), expected.getName())); + } + +} diff --git a/test/java/util/Spliterator/SpliteratorTraversingAndSplittingTest.java b/test/java/util/Spliterator/SpliteratorTraversingAndSplittingTest.java new file mode 100755 --- /dev/null +++ b/test/java/util/Spliterator/SpliteratorTraversingAndSplittingTest.java @@ -0,0 +1,1257 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @summary Spliterator traversing and splitting tests + * @run testng SpliteratorTraversingAndSplittingTest + */ + +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import java.util.AbstractCollection; +import java.util.AbstractList; +import java.util.AbstractSet; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.Deque; +import java.util.HashMap; +import java.util.HashSet; +import java.util.IdentityHashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.PriorityQueue; +import java.util.Set; +import java.util.SortedSet; +import java.util.Spliterator; +import java.util.Spliterators; +import java.util.Stack; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.Vector; +import java.util.WeakHashMap; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.ConcurrentSkipListSet; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CopyOnWriteArraySet; +import java.util.concurrent.LinkedBlockingDeque; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.LinkedTransferQueue; +import java.util.concurrent.PriorityBlockingQueue; +import java.util.function.Consumer; +import java.util.function.DoubleConsumer; +import java.util.function.Function; +import java.util.function.IntConsumer; +import java.util.function.LongConsumer; +import java.util.function.Supplier; +import java.util.function.UnaryOperator; + +import static org.testng.Assert.*; +import static org.testng.Assert.assertEquals; + +@Test +public class SpliteratorTraversingAndSplittingTest { + + private static List SIZES = Arrays.asList(0, 1, 10, 100, 1000); + + private static class SpliteratorDataBuilder { + List data; + + List exp; + + Map mExp; + + SpliteratorDataBuilder(List data, List exp) { + this.data = data; + this.exp = exp; + this.mExp = createMap(exp); + } + + Map createMap(List l) { + Map m = new LinkedHashMap<>(); + for (T t : l) { + m.put(t, t); + } + return m; + } + + void add(String description, Collection expected, Supplier> s) { + description = joiner(description).toString(); + data.add(new Object[]{description, expected, s}); + } + + void add(String description, Supplier> s) { + add(description, exp, s); + } + + void addCollection(Function, ? extends Collection> c) { + add("new " + c.apply(Collections.emptyList()).getClass().getName() + ".spliterator()", + () -> c.apply(exp).spliterator()); + } + + void addList(Function, ? extends List> l) { + // @@@ If collection is instance of List then add sub-list tests + addCollection(l); + } + + void addMap(Function, ? extends Map> m) { + String description = "new " + m.apply(Collections.emptyMap()).getClass().getName(); + add(description + ".keySet().spliterator()", () -> m.apply(mExp).keySet().spliterator()); + add(description + ".values().spliterator()", () -> m.apply(mExp).values().spliterator()); + add(description + ".entrySet().spliterator()", mExp.entrySet(), () -> m.apply(mExp).entrySet().spliterator()); + } + + StringBuilder joiner(String description) { + return new StringBuilder(description). + append(" {"). + append("size=").append(exp.size()). + append("}"); + } + } + + static Object[][] spliteratorDataProvider; + + @DataProvider(name = "Spliterator") + public static Object[][] spliteratorDataProvider() { + if (spliteratorDataProvider != null) { + return spliteratorDataProvider; + } + + List data = new ArrayList<>(); + for (int size : SIZES) { + List exp = listIntRange(size); + SpliteratorDataBuilder db = new SpliteratorDataBuilder<>(data, exp); + + // Direct spliterator methods + + db.add("Spliterators.spliterator(Collection, ...)", + () -> Spliterators.spliterator(exp, 0)); + + db.add("Spliterators.spliterator(Iterator, ...)", + () -> Spliterators.spliterator(exp.iterator(), exp.size(), 0)); + + db.add("Spliterators.spliteratorUnknownSize(Iterator, ...)", + () -> Spliterators.spliteratorUnknownSize(exp.iterator(), 0)); + + db.add("Spliterators.spliterator(Spliterators.iteratorFromSpliterator(Spliterator ), ...)", + () -> Spliterators.spliterator(Spliterators.iteratorFromSpliterator(exp.spliterator()), exp.size(), 0)); + + db.add("Spliterators.spliterator(T[], ...)", + () -> Spliterators.spliterator(exp.toArray(new Integer[0]), 0)); + + db.add("Arrays.spliterator(T[], ...)", + () -> Arrays.spliterator(exp.toArray(new Integer[0]))); + + class SpliteratorFromIterator extends Spliterators.AbstractSpliterator { + Iterator it; + + SpliteratorFromIterator(Iterator it, long est) { + super(est, Spliterator.SIZED); + this.it = it; + } + + @Override + public boolean tryAdvance(Consumer action) { + if (it.hasNext()) { + action.accept(it.next()); + return true; + } + else { + return false; + } + } + } + db.add("new Spliterators.AbstractAdvancingSpliterator()", + () -> new SpliteratorFromIterator(exp.iterator(), exp.size())); + + // Collections + + // default method implementations + + class AbstractCollectionImpl extends AbstractCollection { + Collection c; + + AbstractCollectionImpl(Collection c) { + this.c = c; + } + + @Override + public Iterator iterator() { + return c.iterator(); + } + + @Override + public int size() { + return c.size(); + } + } + db.addCollection( + c -> new AbstractCollectionImpl(c)); + + class AbstractListImpl extends AbstractList { + List l; + + AbstractListImpl(Collection c) { + this.l = new ArrayList<>(c); + } + + @Override + public Integer get(int index) { + return l.get(index); + } + + @Override + public int size() { + return l.size(); + } + } + db.addCollection( + c -> new AbstractListImpl(c)); + + class AbstractSetImpl extends AbstractSet { + Set s; + + AbstractSetImpl(Collection c) { + this.s = new HashSet<>(c); + } + + @Override + public Iterator iterator() { + return s.iterator(); + } + + @Override + public int size() { + return s.size(); + } + } + db.addCollection( + c -> new AbstractSetImpl(c)); + + class AbstractSortedSetImpl extends AbstractSet implements SortedSet { + SortedSet s; + + AbstractSortedSetImpl(Collection c) { + this.s = new TreeSet<>(c); + } + + @Override + public Iterator iterator() { + return s.iterator(); + } + + @Override + public int size() { + return s.size(); + } + + @Override + public Comparator comparator() { + return s.comparator(); + } + + @Override + public SortedSet subSet(Integer fromElement, Integer toElement) { + return s.subSet(fromElement, toElement); + } + + @Override + public SortedSet headSet(Integer toElement) { + return s.headSet(toElement); + } + + @Override + public SortedSet tailSet(Integer fromElement) { + return s.tailSet(fromElement); + } + + @Override + public Integer first() { + return s.first(); + } + + @Override + public Integer last() { + return s.last(); + } + + @Override + public Spliterator spliterator() { + return SortedSet.super.spliterator(); + } + } + db.addCollection( + c -> new AbstractSortedSetImpl(c)); + + // + + db.add("Arrays.asList().spliterator()", + () -> Spliterators.spliterator(Arrays.asList(exp.toArray(new Integer[0])), 0)); + + db.addList(ArrayList::new); + + db.addList(LinkedList::new); + + db.addList(Vector::new); + + + db.addCollection(HashSet::new); + + db.addCollection(LinkedHashSet::new); + + db.addCollection(TreeSet::new); + + + db.addCollection(c -> { Stack s = new Stack<>(); s.addAll(c); return s;}); + + db.addCollection(PriorityQueue::new); + + db.addCollection(ArrayDeque::new); + + + db.addCollection(ConcurrentSkipListSet::new); + + if (size > 0) { + db.addCollection(c -> { + ArrayBlockingQueue abq = new ArrayBlockingQueue<>(size); + abq.addAll(c); + return abq; + }); + } + + db.addCollection(PriorityBlockingQueue::new); + + db.addCollection(LinkedBlockingQueue::new); + + db.addCollection(LinkedTransferQueue::new); + + db.addCollection(ConcurrentLinkedQueue::new); + + db.addCollection(LinkedBlockingDeque::new); + + db.addCollection(CopyOnWriteArrayList::new); + + db.addCollection(CopyOnWriteArraySet::new); + + if (size == 1) { + db.addCollection(c -> Collections.singleton(exp.get(0))); + db.addCollection(c -> Collections.singletonList(exp.get(0))); + } + + // @@@ Collections.synchronized/unmodifiable/checked wrappers + + // Maps + + db.addMap(HashMap::new); + + db.addMap(LinkedHashMap::new); + + db.addMap(IdentityHashMap::new); + + db.addMap(WeakHashMap::new); + + // @@@ Descending maps etc + db.addMap(TreeMap::new); + + db.addMap(ConcurrentHashMap::new); + + db.addMap(ConcurrentSkipListMap::new); + } + + return spliteratorDataProvider = data.toArray(new Object[0][]); + } + + private static List listIntRange(int upTo) { + List exp = new ArrayList<>(); + for (int i = 0; i < upTo; i++) + exp.add(i); + return Collections.unmodifiableList(exp); + } + + @Test(dataProvider = "Spliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testForEach(String description, Collection exp, Supplier s) { + testForEach(exp, s, (Consumer b) -> b); + } + + @Test(dataProvider = "Spliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testTryAdvance(String description, Collection exp, Supplier s) { + testTryAdvance(exp, s, (Consumer b) -> b); + } + + @Test(dataProvider = "Spliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testMixedTryAdvanceForEach(String description, Collection exp, Supplier s) { + testMixedTryAdvanceForEach(exp, s, (Consumer b) -> b); + } + + @Test(dataProvider = "Spliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSplitAfterFullTraversal(String description, Collection exp, Supplier s) { + testSplitAfterFullTraversal(s, (Consumer b) -> b); + } + + @Test(dataProvider = "Spliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSplitOnce(String description, Collection exp, Supplier s) { + testSplitOnce(exp, s, (Consumer b) -> b); + } + + @Test(dataProvider = "Spliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSplitSixDeep(String description, Collection exp, Supplier s) { + testSplitSixDeep(exp, s, (Consumer b) -> b); + } + + @Test(dataProvider = "Spliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSplitUntilNull(String description, Collection exp, Supplier s) { + testSplitUntilNull(exp, s, (Consumer b) -> b); + } + + // + + private static class SpliteratorOfIntDataBuilder { + List data; + + List exp; + + SpliteratorOfIntDataBuilder(List data, List exp) { + this.data = data; + this.exp = exp; + } + + void add(String description, List expected, Supplier s) { + description = joiner(description).toString(); + data.add(new Object[]{description, expected, s}); + } + + void add(String description, Supplier s) { + add(description, exp, s); + } + + StringBuilder joiner(String description) { + return new StringBuilder(description). + append(" {"). + append("size=").append(exp.size()). + append("}"); + } + } + + static Object[][] spliteratorOfIntDataProvider; + + @DataProvider(name = "Spliterator.OfInt") + public static Object[][] spliteratorOfIntDataProvider() { + if (spliteratorOfIntDataProvider != null) { + return spliteratorOfIntDataProvider; + } + + List data = new ArrayList<>(); + for (int size : SIZES) { + int exp[] = arrayIntRange(size); + SpliteratorOfIntDataBuilder db = new SpliteratorOfIntDataBuilder(data, listIntRange(size)); + + db.add("Spliterators.spliterator(int[], ...)", + () -> Spliterators.spliterator(exp, 0)); + + db.add("Arrays.spliterator(int[], ...)", + () -> Arrays.spliterator(exp)); + + db.add("Spliterators.spliterator(PrimitiveIterator.OfInt, ...)", + () -> Spliterators.spliterator(Spliterators.iteratorFromSpliterator(Arrays.spliterator(exp)), exp.length, 0)); + + db.add("Spliterators.spliteratorUnknownSize(PrimitiveIterator.OfInt, ...)", + () -> Spliterators.spliteratorUnknownSize(Spliterators.iteratorFromSpliterator(Arrays.spliterator(exp)), 0)); + + class IntSpliteratorFromArray extends Spliterators.AbstractIntSpliterator { + int[] a; + int index = 0; + + IntSpliteratorFromArray(int[] a) { + super(a.length, Spliterator.SIZED); + this.a = a; + } + + @Override + public boolean tryAdvance(IntConsumer action) { + if (index < a.length) { + action.accept(a[index++]); + return true; + } + else { + return false; + } + } + } + db.add("new Spliterators.AbstractIntAdvancingSpliterator()", + () -> new IntSpliteratorFromArray(exp)); + } + + return spliteratorOfIntDataProvider = data.toArray(new Object[0][]); + } + + private static int[] arrayIntRange(int upTo) { + int[] exp = new int[upTo]; + for (int i = 0; i < upTo; i++) + exp[i] = i; + return exp; + } + + private static UnaryOperator> intBoxingConsumer() { + class BoxingAdapter implements Consumer, IntConsumer { + private final Consumer b; + + BoxingAdapter(Consumer b) { + this.b = b; + } + + @Override + public void accept(Integer value) { + throw new IllegalStateException(); + } + + @Override + public void accept(int value) { + b.accept(value); + } + } + + return b -> new BoxingAdapter(b); + } + + @Test(dataProvider = "Spliterator.OfInt") + public void testIntForEach(String description, Collection exp, Supplier s) { + testForEach(exp, s, intBoxingConsumer()); + } + + @Test(dataProvider = "Spliterator.OfInt") + public void testIntTryAdvance(String description, Collection exp, Supplier s) { + testTryAdvance(exp, s, intBoxingConsumer()); + } + + @Test(dataProvider = "Spliterator.OfInt") + public void testIntMixedTryAdvanceForEach(String description, Collection exp, Supplier s) { + testMixedTryAdvanceForEach(exp, s, intBoxingConsumer()); + } + + @Test(dataProvider = "Spliterator.OfInt") + public void testIntSplitAfterFullTraversal(String description, Collection exp, Supplier s) { + testSplitAfterFullTraversal(s, intBoxingConsumer()); + } + + @Test(dataProvider = "Spliterator.OfInt") + public void testIntSplitOnce(String description, Collection exp, Supplier s) { + testSplitOnce(exp, s, intBoxingConsumer()); + } + + @Test(dataProvider = "Spliterator.OfInt") + public void testIntSplitSixDeep(String description, Collection exp, Supplier s) { + testSplitSixDeep(exp, s, intBoxingConsumer()); + } + + @Test(dataProvider = "Spliterator.OfInt") + public void testIntSplitUntilNull(String description, Collection exp, Supplier s) { + testSplitUntilNull(exp, s, intBoxingConsumer()); + } + + // + + private static class SpliteratorOfLongDataBuilder { + List data; + + List exp; + + SpliteratorOfLongDataBuilder(List data, List exp) { + this.data = data; + this.exp = exp; + } + + void add(String description, List expected, Supplier s) { + description = joiner(description).toString(); + data.add(new Object[]{description, expected, s}); + } + + void add(String description, Supplier s) { + add(description, exp, s); + } + + StringBuilder joiner(String description) { + return new StringBuilder(description). + append(" {"). + append("size=").append(exp.size()). + append("}"); + } + } + + static Object[][] spliteratorOfLongDataProvider; + + @DataProvider(name = "Spliterator.OfLong") + public static Object[][] spliteratorOfLongDataProvider() { + if (spliteratorOfLongDataProvider != null) { + return spliteratorOfLongDataProvider; + } + + List data = new ArrayList<>(); + for (int size : SIZES) { + long exp[] = arrayLongRange(size); + SpliteratorOfLongDataBuilder db = new SpliteratorOfLongDataBuilder(data, listLongRange(size)); + + db.add("Spliterators.spliterator(long[], ...)", + () -> Spliterators.spliterator(exp, 0)); + + db.add("Arrays.spliterator(long[], ...)", + () -> Arrays.spliterator(exp)); + + db.add("Spliterators.spliterator(PrimitiveIterator.OfLong, ...)", + () -> Spliterators.spliterator(Spliterators.iteratorFromSpliterator(Arrays.spliterator(exp)), exp.length, 0)); + + db.add("Spliterators.spliteratorUnknownSize(PrimitiveIterator.OfLong, ...)", + () -> Spliterators.spliteratorUnknownSize(Spliterators.iteratorFromSpliterator(Arrays.spliterator(exp)), 0)); + + class LongSpliteratorFromArray extends Spliterators.AbstractLongSpliterator { + long[] a; + int index = 0; + + LongSpliteratorFromArray(long[] a) { + super(a.length, Spliterator.SIZED); + this.a = a; + } + + @Override + public boolean tryAdvance(LongConsumer action) { + if (index < a.length) { + action.accept(a[index++]); + return true; + } + else { + return false; + } + } + } + db.add("new Spliterators.AbstractLongAdvancingSpliterator()", + () -> new LongSpliteratorFromArray(exp)); + } + + return spliteratorOfLongDataProvider = data.toArray(new Object[0][]); + } + + private static List listLongRange(int upTo) { + List exp = new ArrayList<>(); + for (long i = 0; i < upTo; i++) + exp.add(i); + return Collections.unmodifiableList(exp); + } + + private static long[] arrayLongRange(int upTo) { + long[] exp = new long[upTo]; + for (int i = 0; i < upTo; i++) + exp[i] = i; + return exp; + } + + private static UnaryOperator> longBoxingConsumer() { + class BoxingAdapter implements Consumer, LongConsumer { + private final Consumer b; + + BoxingAdapter(Consumer b) { + this.b = b; + } + + @Override + public void accept(Long value) { + throw new IllegalStateException(); + } + + @Override + public void accept(long value) { + b.accept(value); + } + } + + return b -> new BoxingAdapter(b); + } + + @Test(dataProvider = "Spliterator.OfLong") + public void testLongForEach(String description, Collection exp, Supplier s) { + testForEach(exp, s, longBoxingConsumer()); + } + + @Test(dataProvider = "Spliterator.OfLong") + public void testLongTryAdvance(String description, Collection exp, Supplier s) { + testTryAdvance(exp, s, longBoxingConsumer()); + } + + @Test(dataProvider = "Spliterator.OfLong") + public void testLongMixedTryAdvanceForEach(String description, Collection exp, Supplier s) { + testMixedTryAdvanceForEach(exp, s, longBoxingConsumer()); + } + + @Test(dataProvider = "Spliterator.OfLong") + public void testLongSplitAfterFullTraversal(String description, Collection exp, Supplier s) { + testSplitAfterFullTraversal(s, longBoxingConsumer()); + } + + @Test(dataProvider = "Spliterator.OfLong") + public void testLongSplitOnce(String description, Collection exp, Supplier s) { + testSplitOnce(exp, s, longBoxingConsumer()); + } + + @Test(dataProvider = "Spliterator.OfLong") + public void testLongSplitSixDeep(String description, Collection exp, Supplier s) { + testSplitSixDeep(exp, s, longBoxingConsumer()); + } + + @Test(dataProvider = "Spliterator.OfLong") + public void testLongSplitUntilNull(String description, Collection exp, Supplier s) { + testSplitUntilNull(exp, s, longBoxingConsumer()); + } + + // + + private static class SpliteratorOfDoubleDataBuilder { + List data; + + List exp; + + SpliteratorOfDoubleDataBuilder(List data, List exp) { + this.data = data; + this.exp = exp; + } + + void add(String description, List expected, Supplier s) { + description = joiner(description).toString(); + data.add(new Object[]{description, expected, s}); + } + + void add(String description, Supplier s) { + add(description, exp, s); + } + + StringBuilder joiner(String description) { + return new StringBuilder(description). + append(" {"). + append("size=").append(exp.size()). + append("}"); + } + } + + static Object[][] spliteratorOfDoubleDataProvider; + + @DataProvider(name = "Spliterator.OfDouble") + public static Object[][] spliteratorOfDoubleDataProvider() { + if (spliteratorOfDoubleDataProvider != null) { + return spliteratorOfDoubleDataProvider; + } + + List data = new ArrayList<>(); + for (int size : SIZES) { + double exp[] = arrayDoubleRange(size); + SpliteratorOfDoubleDataBuilder db = new SpliteratorOfDoubleDataBuilder(data, listDoubleRange(size)); + + db.add("Spliterators.spliterator(double[], ...)", + () -> Spliterators.spliterator(exp, 0)); + + db.add("Arrays.spliterator(double[], ...)", + () -> Arrays.spliterator(exp)); + + db.add("Spliterators.spliterator(PrimitiveIterator.OfDouble, ...)", + () -> Spliterators.spliterator(Spliterators.iteratorFromSpliterator(Arrays.spliterator(exp)), exp.length, 0)); + + db.add("Spliterators.spliteratorUnknownSize(PrimitiveIterator.OfDouble, ...)", + () -> Spliterators.spliteratorUnknownSize(Spliterators.iteratorFromSpliterator(Arrays.spliterator(exp)), 0)); + + class DoubleSpliteratorFromArray extends Spliterators.AbstractDoubleSpliterator { + double[] a; + int index = 0; + + DoubleSpliteratorFromArray(double[] a) { + super(a.length, Spliterator.SIZED); + this.a = a; + } + + @Override + public boolean tryAdvance(DoubleConsumer action) { + if (index < a.length) { + action.accept(a[index++]); + return true; + } + else { + return false; + } + } + } + db.add("new Spliterators.AbstractDoubleAdvancingSpliterator()", + () -> new DoubleSpliteratorFromArray(exp)); + } + + return spliteratorOfDoubleDataProvider = data.toArray(new Object[0][]); + } + + private static List listDoubleRange(int upTo) { + List exp = new ArrayList<>(); + for (double i = 0; i < upTo; i++) + exp.add(i); + return Collections.unmodifiableList(exp); + } + + private static double[] arrayDoubleRange(int upTo) { + double[] exp = new double[upTo]; + for (int i = 0; i < upTo; i++) + exp[i] = i; + return exp; + } + + private static UnaryOperator> doubleBoxingConsumer() { + class BoxingAdapter implements Consumer, DoubleConsumer { + private final Consumer b; + + BoxingAdapter(Consumer b) { + this.b = b; + } + + @Override + public void accept(Double value) { + throw new IllegalStateException(); + } + + @Override + public void accept(double value) { + b.accept(value); + } + } + + return b -> new BoxingAdapter(b); + } + + @Test(dataProvider = "Spliterator.OfDouble") + public void testDoubleForEach(String description, Collection exp, Supplier s) { + testForEach(exp, s, doubleBoxingConsumer()); + } + + @Test(dataProvider = "Spliterator.OfDouble") + public void testDoubleTryAdvance(String description, Collection exp, Supplier s) { + testTryAdvance(exp, s, doubleBoxingConsumer()); + } + + @Test(dataProvider = "Spliterator.OfDouble") + public void testDoubleMixedTryAdvanceForEach(String description, Collection exp, Supplier s) { + testMixedTryAdvanceForEach(exp, s, doubleBoxingConsumer()); + } + + @Test(dataProvider = "Spliterator.OfDouble") + public void testDoubleSplitAfterFullTraversal(String description, Collection exp, Supplier s) { + testSplitAfterFullTraversal(s, doubleBoxingConsumer()); + } + + @Test(dataProvider = "Spliterator.OfDouble") + public void testDoubleSplitOnce(String description, Collection exp, Supplier s) { + testSplitOnce(exp, s, doubleBoxingConsumer()); + } + + @Test(dataProvider = "Spliterator.OfDouble") + public void testDoubleSplitSixDeep(String description, Collection exp, Supplier s) { + testSplitSixDeep(exp, s, doubleBoxingConsumer()); + } + + @Test(dataProvider = "Spliterator.OfDouble") + public void testDoubleSplitUntilNull(String description, Collection exp, Supplier s) { + testSplitUntilNull(exp, s, doubleBoxingConsumer()); + } + + // + + private static > void testForEach( + Collection exp, + Supplier supplier, + UnaryOperator> boxingAdapter) { + S spliterator = supplier.get(); + long sizeIfKnown = spliterator.getExactSizeIfKnown(); + boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); + + ArrayList fromForEach = new ArrayList<>(); + spliterator = supplier.get(); + Consumer addToFromForEach = boxingAdapter.apply(fromForEach::add); + spliterator.forEachRemaining(addToFromForEach); + + // Assert that forEach now produces no elements + spliterator.forEachRemaining(boxingAdapter.apply(e -> fail("Spliterator.forEach produced an element after spliterator exhausted: " + e))); + // Assert that tryAdvance now produce no elements + spliterator.tryAdvance(boxingAdapter.apply(e -> fail("Spliterator.tryAdvance produced an element after spliterator exhausted: " + e))); + + // assert that size, tryAdvance, and forEach are consistent + if (sizeIfKnown >= 0) { + assertEquals(sizeIfKnown, exp.size()); + } + assertEquals(fromForEach.size(), exp.size()); + + assertContents(fromForEach, exp, isOrdered); + } + + private static > void testTryAdvance( + Collection exp, + Supplier supplier, + UnaryOperator> boxingAdapter) { + S spliterator = supplier.get(); + long sizeIfKnown = spliterator.getExactSizeIfKnown(); + boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); + + spliterator = supplier.get(); + ArrayList fromTryAdvance = new ArrayList<>(); + Consumer addToFromTryAdvance = boxingAdapter.apply(fromTryAdvance::add); + while (spliterator.tryAdvance(addToFromTryAdvance)) { } + + // Assert that forEach now produces no elements + spliterator.forEachRemaining(boxingAdapter.apply(e -> fail("Spliterator.forEach produced an element after spliterator exhausted: " + e))); + // Assert that tryAdvance now produce no elements + spliterator.tryAdvance(boxingAdapter.apply(e -> fail("Spliterator.tryAdvance produced an element after spliterator exhausted: " + e))); + + // assert that size, tryAdvance, and forEach are consistent + if (sizeIfKnown >= 0) { + assertEquals(sizeIfKnown, exp.size()); + } + assertEquals(fromTryAdvance.size(), exp.size()); + + assertContents(fromTryAdvance, exp, isOrdered); + } + + private static > void testMixedTryAdvanceForEach( + Collection exp, + Supplier supplier, + UnaryOperator> boxingAdapter) { + S spliterator = supplier.get(); + long sizeIfKnown = spliterator.getExactSizeIfKnown(); + boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); + + // tryAdvance first few elements, then forEach rest + ArrayList dest = new ArrayList<>(); + spliterator = supplier.get(); + Consumer addToDest = boxingAdapter.apply(dest::add); + for (int i = 0; i < 10 && spliterator.tryAdvance(addToDest); i++) { } + spliterator.forEachRemaining(addToDest); + + // Assert that forEach now produces no elements + spliterator.forEachRemaining(boxingAdapter.apply(e -> fail("Spliterator.forEach produced an element after spliterator exhausted: " + e))); + // Assert that tryAdvance now produce no elements + spliterator.tryAdvance(boxingAdapter.apply(e -> fail("Spliterator.tryAdvance produced an element after spliterator exhausted: " + e))); + + if (sizeIfKnown >= 0) { + assertEquals(sizeIfKnown, dest.size()); + } + assertEquals(dest.size(), exp.size()); + + if (isOrdered) { + assertEquals(dest, exp); + } + else { + assertContentsUnordered(dest, exp); + } + } + + private static > void testSplitAfterFullTraversal( + Supplier supplier, + UnaryOperator> boxingAdapter) { + // Full traversal using tryAdvance + Spliterator spliterator = supplier.get(); + while (spliterator.tryAdvance(boxingAdapter.apply(e -> { }))) { } + Spliterator split = spliterator.trySplit(); + assertNull(split); + + // Full traversal using forEach + spliterator = supplier.get(); + spliterator.forEachRemaining(boxingAdapter.apply(e -> { + })); + split = spliterator.trySplit(); + assertNull(split); + + // Full traversal using tryAdvance then forEach + spliterator = supplier.get(); + spliterator.tryAdvance(boxingAdapter.apply(e -> { })); + spliterator.forEachRemaining(boxingAdapter.apply(e -> { + })); + split = spliterator.trySplit(); + assertNull(split); + } + + private static > void testSplitOnce( + Collection exp, + Supplier supplier, + UnaryOperator> boxingAdapter) { + S spliterator = supplier.get(); + long sizeIfKnown = spliterator.getExactSizeIfKnown(); + boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); + + ArrayList fromSplit = new ArrayList<>(); + Spliterator s1 = supplier.get(); + Spliterator s2 = s1.trySplit(); + long s1Size = s1.getExactSizeIfKnown(); + long s2Size = (s2 != null) ? s2.getExactSizeIfKnown() : 0; + Consumer addToFromSplit = boxingAdapter.apply(fromSplit::add); + if (s2 != null) + s2.forEachRemaining(addToFromSplit); + s1.forEachRemaining(addToFromSplit); + + if (sizeIfKnown >= 0) { + assertEquals(sizeIfKnown, fromSplit.size()); + if (s1Size >= 0 && s2Size >= 0) + assertEquals(sizeIfKnown, s1Size + s2Size); + } + assertContents(fromSplit, exp, isOrdered); + } + + private static > void testSplitSixDeep( + Collection exp, + Supplier supplier, + UnaryOperator> boxingAdapter) { + S spliterator = supplier.get(); + boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); + + for (int depth=0; depth < 6; depth++) { + List dest = new ArrayList<>(); + spliterator = supplier.get(); + + assertSpliterator(spliterator); + + // verify splitting with forEach + visit(depth, 0, dest, spliterator, boxingAdapter, spliterator.characteristics(), false); + assertContents(dest, exp, isOrdered); + + // verify splitting with tryAdvance + dest.clear(); + spliterator = supplier.get(); + visit(depth, 0, dest, spliterator, boxingAdapter, spliterator.characteristics(), true); + assertContents(dest, exp, isOrdered); + } + } + + private static > void visit(int depth, int curLevel, + List dest, S spliterator, UnaryOperator> boxingAdapter, + int rootCharacteristics, boolean useTryAdvance) { + if (curLevel < depth) { + long beforeSize = spliterator.getExactSizeIfKnown(); + Spliterator split = spliterator.trySplit(); + if (split != null) { + assertSpliterator(split, rootCharacteristics); + assertSpliterator(spliterator, rootCharacteristics); + + if ((rootCharacteristics & Spliterator.SUBSIZED) != 0 && + (rootCharacteristics & Spliterator.SIZED) != 0) { + assertEquals(beforeSize, split.estimateSize() + spliterator.estimateSize()); + } + visit(depth, curLevel + 1, dest, split, boxingAdapter, rootCharacteristics, useTryAdvance); + } + visit(depth, curLevel + 1, dest, spliterator, boxingAdapter, rootCharacteristics, useTryAdvance); + } + else { + long sizeIfKnown = spliterator.getExactSizeIfKnown(); + if (useTryAdvance) { + Consumer addToDest = boxingAdapter.apply(dest::add); + int count = 0; + while (spliterator.tryAdvance(addToDest)) { + ++count; + } + + if (sizeIfKnown >= 0) + assertEquals(sizeIfKnown, count); + + // Assert that forEach now produces no elements + spliterator.forEachRemaining(boxingAdapter.apply(e -> fail("Spliterator.forEach produced an element after spliterator exhausted: " + e))); + + Spliterator split = spliterator.trySplit(); + assertNull(split); + } + else { + List leafDest = new ArrayList<>(); + Consumer addToLeafDest = boxingAdapter.apply(leafDest::add); + spliterator.forEachRemaining(addToLeafDest); + + if (sizeIfKnown >= 0) + assertEquals(sizeIfKnown, leafDest.size()); + + // Assert that forEach now produces no elements + spliterator.tryAdvance(boxingAdapter.apply(e -> fail("Spliterator.tryAdvance produced an element after spliterator exhausted: " + e))); + + Spliterator split = spliterator.trySplit(); + assertNull(split); + + dest.addAll(leafDest); + } + } + } + + private static > void testSplitUntilNull( + Collection exp, + Supplier supplier, + UnaryOperator> boxingAdapter) { + Spliterator s = supplier.get(); + boolean isOrdered = s.hasCharacteristics(Spliterator.ORDERED); + assertSpliterator(s); + + List splits = new ArrayList<>(); + Consumer c = boxingAdapter.apply(splits::add); + + testSplitUntilNull(new SplitNode(c, s)); + assertContents(splits, exp, isOrdered); + } + + private static class SplitNode { + // Constant for every node + final Consumer c; + final int rootCharacteristics; + + final Spliterator s; + + SplitNode(Consumer c, Spliterator s) { + this(c, s.characteristics(), s); + } + + private SplitNode(Consumer c, int rootCharacteristics, Spliterator s) { + this.c = c; + this.rootCharacteristics = rootCharacteristics; + this.s = s; + } + + SplitNode fromSplit(Spliterator split) { + return new SplitNode<>(c, rootCharacteristics, split); + } + } + + /** + * Set the maximum stack capacity to 0.25MB. This should be more than enough to detect a bad spliterator + * while not unduly disrupting test infrastructure given the test data sizes that are used are small. + * Note that j.u.c.ForkJoinPool sets the max queue size to 64M (1 << 26). + */ + private static final int MAXIMUM_STACK_CAPACITY = 1 << 18; // 0.25MB + + private static void testSplitUntilNull(SplitNode e) { + // Use an explicit stack to avoid a StackOverflowException when testing a Spliterator + // that when repeatedly split produces a right-balanced (and maybe degenerate) tree, or + // for a spliterator that is badly behaved. + Deque> stack = new ArrayDeque<>(); + stack.push(e); + + int iteration = 0; + while (!stack.isEmpty()) { + assertTrue(iteration++ < MAXIMUM_STACK_CAPACITY, "Exceeded maximum stack modification count of 1 << 18"); + + e = stack.pop(); + Spliterator parentAndRightSplit = e.s; + + long parentEstimateSize = parentAndRightSplit.estimateSize(); + assertTrue(parentEstimateSize >= 0, + String.format("Split size estimate %d < 0", parentEstimateSize)); + + long parentSize = parentAndRightSplit.getExactSizeIfKnown(); + Spliterator leftSplit = parentAndRightSplit.trySplit(); + if (leftSplit == null) { + parentAndRightSplit.forEachRemaining(e.c); + continue; + } + + assertSpliterator(leftSplit, e.rootCharacteristics); + assertSpliterator(parentAndRightSplit, e.rootCharacteristics); + + if (parentEstimateSize != Long.MAX_VALUE && leftSplit.estimateSize() > 0 && parentAndRightSplit.estimateSize() > 0) { + assertTrue(leftSplit.estimateSize() < parentEstimateSize, + String.format("Left split size estimate %d >= parent split size estimate %d", leftSplit.estimateSize(), parentEstimateSize)); + assertTrue(parentAndRightSplit.estimateSize() < parentEstimateSize, + String.format("Right split size estimate %d >= parent split size estimate %d", leftSplit.estimateSize(), parentEstimateSize)); + } + else { + assertTrue(leftSplit.estimateSize() <= parentEstimateSize, + String.format("Left split size estimate %d > parent split size estimate %d", leftSplit.estimateSize(), parentEstimateSize)); + assertTrue(parentAndRightSplit.estimateSize() <= parentEstimateSize, + String.format("Right split size estimate %d > parent split size estimate %d", leftSplit.estimateSize(), parentEstimateSize)); + } + + long leftSize = leftSplit.getExactSizeIfKnown(); + long rightSize = parentAndRightSplit.getExactSizeIfKnown(); + if (parentSize >= 0 && leftSize >= 0 && rightSize >= 0) + assertEquals(parentSize, leftSize + rightSize, + String.format("exact left split size %d + exact right split size %d != parent exact split size %d", + leftSize, rightSize, parentSize)); + + // Add right side to stack first so left side is popped off first + stack.push(e.fromSplit(parentAndRightSplit)); + stack.push(e.fromSplit(leftSplit)); + } + } + + private static void assertSpliterator(Spliterator s, int rootCharacteristics) { + if ((rootCharacteristics & Spliterator.SUBSIZED) != 0) { + assertTrue(s.hasCharacteristics(Spliterator.SUBSIZED), + "Child split is not SUBSIZED when root split is SUBSIZED"); + } + assertSpliterator(s); + } + + private static void assertSpliterator(Spliterator s) { + if (s.hasCharacteristics(Spliterator.SUBSIZED)) { + assertTrue(s.hasCharacteristics(Spliterator.SIZED)); + } + if (s.hasCharacteristics(Spliterator.SIZED)) { + assertTrue(s.estimateSize() != Long.MAX_VALUE); + assertTrue(s.getExactSizeIfKnown() >= 0); + } + try { + s.getComparator(); + assertTrue(s.hasCharacteristics(Spliterator.SORTED)); + } catch (IllegalStateException e) { + assertFalse(s.hasCharacteristics(Spliterator.SORTED)); + } + } + + private static void assertContents(Collection actual, Collection expected, boolean isOrdered) { + if (isOrdered) { + assertEquals(actual, expected); + } + else { + assertContentsUnordered(actual, expected); + } + } + + private static void assertContentsUnordered(Iterable actual, Iterable expected) { + assertEquals(toBoxedMultiset(actual), toBoxedMultiset(expected)); + } + + private static Map toBoxedMultiset(Iterable c) { + Map result = new HashMap<>(); + c.forEach(e -> { + if (result.containsKey(e)) result.put(e, result.get(e) + 1); + else result.put(e, 1); + }); + return result; + } +} # HG changeset patch # User briangoetz # Date 1366167048 14400 # Node ID 162871f3ae8945b44fa0840f20721d5491510c2f # Parent 5148e3e669c5a535a58deb9dd1a3cce59acbaf94 [mq]: JDK-8008670 diff --git a/src/share/classes/java/util/stream/AbstractShortCircuitTask.java b/src/share/classes/java/util/stream/AbstractShortCircuitTask.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/AbstractShortCircuitTask.java @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util.stream; + +import java.util.Spliterator; +import java.util.concurrent.atomic.AtomicReference; + +/** + * Abstract class for fork-join tasks used to implement short-circuiting + * stream ops, which can produce a result without processing all elements of the + * stream. + * + * @param Type of input elements to the pipeline + * @param Type of output elements from the pipeline + * @param Type of intermediate result, may be different from operation + * result type + * @param Type of child and sibling tasks + * @since 1.8 + */ +abstract class AbstractShortCircuitTask> + extends AbstractTask { + /** + * The result for this computation; this is shared among all tasks and set + * exactly once + */ + protected final AtomicReference sharedResult; + + /** + * Indicates whether this task has been canceled. Tasks may cancel other + * tasks in the computation under various conditions, such as in a + * find-first operation, a task that finds a value will cancel all tasks + * that are later in the encounter order. + */ + protected volatile boolean canceled; + + /** + * Constructor for root nodes. + * @param helper The {@code PipelineHelper} describing the stream pipeline + * up to this operation + * @param spliterator The {@code Spliterator} describing the source for this + * pipeline + */ + protected AbstractShortCircuitTask(PipelineHelper helper, + Spliterator spliterator) { + super(helper, spliterator); + sharedResult = new AtomicReference<>(null); + } + + /** + * Constructor for non-root nodes. + * @param parent Parent task in the computation tree + * @param spliterator The {@code Spliterator} for the portion of the + * computation tree described by this task + */ + protected AbstractShortCircuitTask(K parent, + Spliterator spliterator) { + super(parent, spliterator); + sharedResult = parent.sharedResult; + } + + /** + * Returns the value indicating the computation completed with no task + * finding a short-circuitable result. For example, for a "find" operation, + * this might be null or an empty {@code Optional}. + * + * @return the result to return when no task finds a result + */ + protected abstract R getEmptyResult(); + + @Override + protected boolean canCompute() { + // Have we already found an answer? + if (sharedResult.get() != null) { + tryComplete(); + return false; + } else if (taskCanceled()) { + setLocalResult(getEmptyResult()); + tryComplete(); + return false; + } + else { + return true; + } + } + + /** + * Declares that a globally valid result has been found. If another task has + * not already found the answer, the result is installed in + * {@code sharedResult}. The {@code compute()} method will check + * {@code sharedResult} before proceeding with computation, so this causes + * the computation to terminate early. + * @param result The result found + */ + protected void shortCircuit(R result) { + if (result != null) + sharedResult.compareAndSet(null, result); + } + + /** + * Sets a local result for this task. If this task is the root, set the + * shared result instead (if not already set). + * @param localResult The result to set for this task + */ + @Override + protected void setLocalResult(R localResult) { + if (isRoot()) { + if (localResult != null) + sharedResult.compareAndSet(null, localResult); + } + else + super.setLocalResult(localResult); + } + + /** Retrieves the local result for this task */ + @Override + public R getRawResult() { + return getLocalResult(); + } + + /** + * Retrieves the local result for this task. If this task is the root, + * retrieves the shared result instead. + */ + @Override + public R getLocalResult() { + if (isRoot()) { + R answer = sharedResult.get(); + return (answer == null) ? getEmptyResult() : answer; + } + else + return super.getLocalResult(); + } + + /** Mark this node as canceled */ + protected void cancel() { + canceled = true; + } + + /** + * Queries whether this task is canceled. A task is considered canceled if it + * or any of its parents have been canceled. + */ + protected boolean taskCanceled() { + boolean cancel = canceled; + if (!cancel) + for (K parent = getParent(); !cancel && parent != null; parent = parent.getParent()) + cancel = parent.canceled; + return cancel; + } + + /** + * Cancels all tasks which succeed this one in the encounter order. This + * includes canceling all the current task's right sibling, as well as the + * later right siblings of all its parents. + */ + protected void cancelLaterNodes() { + // Go up the tree, cancel right siblings of this node and all parents + for (K parent = getParent(), node = (K) this; parent != null; + node = parent, parent = parent.getParent()) { + // If node is a left child of parent, then has a right sibling + if (parent.leftChild == node) { + K rightSibling = parent.rightChild; + if (!rightSibling.canceled) + rightSibling.canceled = true; + } + } + } +} diff --git a/src/share/classes/java/util/stream/AbstractTask.java b/src/share/classes/java/util/stream/AbstractTask.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/AbstractTask.java @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util.stream; + +import java.util.Spliterator; +import java.util.concurrent.CountedCompleter; +import java.util.concurrent.ForkJoinPool; + +/** + * Abstract base class for most fork-join tasks used to implement stream ops. + * Manages splitting logic, tracking of child tasks, and intermediate results. + * Each task is associated with a {@link Spliterator} that describes the portion + * of the input associated with the subtree rooted at this task. + * Tasks may be leaf nodes (which will traverse the elements of + * the {@code Spliterator}) or internal nodes (which split the + * {@code Spliterator} into multiple child tasks). + * + * @implNote + *

This class is based on {@link CountedCompleter}, a form of fork-join task + * where each task has a semaphore-like count of uncompleted children, and the + * task is implicitly completed and notified when its last child completes. + * Internal node tasks will likely override the {@code onCompletion} method from + * {@code CountedCompleter} to merge the results from child tasks into the + * current task's result. + * + *

Splitting and setting up the child task links is done by {@code compute()} + * for internal nodes. At {@code compute()} time for leaf nodes, it is + * guaranteed that the parent's child-related fields (including sibling links + * for the parent's children) will be set up for all children. + * + *

For example, a task that performs a reduce would override {@code doLeaf()} + * to perform a reduction on that leaf node's chunk using the + * {@code Spliterator}, and override {@code onCompletion()} to merge the results + * of the child tasks for internal nodes: + * + *

{@code
+ *     protected S doLeaf() {
+ *         spliterator.forEach(...);
+ *         return localReductionResult;
+ *     }
+ *
+ *     public void onCompletion(CountedCompleter caller) {
+ *         if (!isLeaf()) {
+ *             ReduceTask child = children;
+ *             R result = child.getLocalResult();
+ *             child = child.nextSibling;
+ *             for (; child != null; child = child.nextSibling)
+ *                 result = combine(result, child.getLocalResult());
+ *             setLocalResult(result);
+ *         }
+ *     }
+ * }
+ * + * @param Type of elements input to the pipeline + * @param Type of elements output from the pipeline + * @param Type of intermediate result, which may be different from operation + * result type + * @param Type of parent, child and sibling tasks + * @since 1.8 + */ +abstract class AbstractTask> + extends CountedCompleter { + + /** + * Default target factor of leaf tasks for parallel decomposition. + * To allow load balancing, we over-partition, currently to approximately + * four tasks per processor, which enables others to help out + * if leaf tasks are uneven or some processors are otherwise busy. + */ + static final int LEAF_TARGET = ForkJoinPool.getCommonPoolParallelism() << 2; + + /** The pipeline helper, common to all tasks in a computation */ + protected final PipelineHelper helper; + + /** + * The spliterator for the portion of the input associated with the subtree + * rooted at this task + */ + protected Spliterator spliterator; + + /** Target leaf size, common to all tasks in a computation */ + protected final long targetSize; + + /** + * The left child. + * null if no children + * if non-null rightChild is non-null + */ + protected K leftChild; + + /** + * The right child. + * null if no children + * if non-null leftChild is non-null + */ + protected K rightChild; + + /** The result of this node, if completed */ + private R localResult; + + /** + * Constructor for root nodes. + * @param helper The {@code PipelineHelper} describing the stream pipeline + * up to this operation + * @param spliterator The {@code Spliterator} describing the source for this + * pipeline + */ + protected AbstractTask(PipelineHelper helper, + Spliterator spliterator) { + super(null); + this.helper = helper; + this.spliterator = spliterator; + this.targetSize = suggestTargetSize(spliterator.estimateSize()); + } + + /** + * Constructor for non-root nodes + * + * @param parent This node's parent task + * @param spliterator Spliterator describing the subtree rooted at this + * node, obtained by splitting the parent spliterator + */ + protected AbstractTask(K parent, + Spliterator spliterator) { + super(parent); + this.spliterator = spliterator; + this.helper = parent.helper; + this.targetSize = parent.targetSize; + } + + /** + * Constructs a new node of type T whose parent is the receiver; must call + * the AbstractTask(T, Spliterator) constructor with the receiver and the + * provided Spliterator. + */ + protected abstract K makeChild(Spliterator spliterator); + + /** + * Computes the result associated with a leaf node. Will be called by + * {@code compute()} and the result passed to @{code setLocalResult()} + */ + protected abstract R doLeaf(); + + /** Suggests a target leaf size based on the initial size estimate */ + public static long suggestTargetSize(long sizeEstimate) { + long est = sizeEstimate / LEAF_TARGET; + return est > 0L ? est : 1L; + } + + /** + * Suggests whether it is adviseable to split the provided spliterator based + * on target size and other considerations, such as pool state + */ + public static boolean suggestSplit(Spliterator spliterator, + long targetSize) { + long remaining = spliterator.estimateSize(); + return (remaining > targetSize); + // @@@ May additionally want to fold in pool characteristics such as surplus task count + } + + /** + * Suggests whether it is adviseable to split this task based on target size + * and other considerations + */ + public boolean suggestSplit() { + return suggestSplit(spliterator, targetSize); + } + + /** + * Returns the local result, if any. Subclasses should use + * {@link #setLocalResult(Object)} and {@link #getLocalResult()} to manage + * results. This returns the local result so that calls from within the + * fork-join framework will return the correct result. + * + * @return the local result. + */ + @Override + public R getRawResult() { + return localResult; + } + + /** + * Does nothing; instead, subclasses should use + * {@link #setLocalResult(Object)}} to manage results. + * + * @param result must be null, or an exception is thrown (this is a safety + * tripwire to detect when {@code setRawResult()} is being used + * instead of {@code setLocalResult()} + */ + @Override + protected void setRawResult(R result) { + if (result != null) + throw new IllegalStateException(); + } + + /** + * Retrieves a result previously stored with {@link #setLocalResult} + */ + protected R getLocalResult() { + return localResult; + } + + /** + * Associates the result with the task, can be retrieved with + * {@link #getLocalResult} + */ + protected void setLocalResult(R localResult) { + this.localResult = localResult; + } + + /** + * Indicates whether this this task a leaf node. (Only valid after + * {@link #compute} has been called on this node). If the node is not a + * leaf node, then children will be non-null and numChildren will be + * positive. + */ + protected boolean isLeaf() { + return leftChild == null; + } + + /** + * Indicates whether this task is the root node + */ + protected boolean isRoot() { + return getParent() == null; + } + + /** + * Returns the parent of this task, or null if this task is the root + */ + @SuppressWarnings("unchecked") + protected K getParent() { + return (K) getCompleter(); + } + + /** + * Decides whether or not to split a task further or compute it directly. If + * computing directly, call {@code doLeaf} and pass the result to + * {@code setRawResult}. If splitting, set up the child-related fields, + * create the child tasks, fork the leftmost (prefix) child tasks, and + * compute the rightmost (remaining) child tasks. + * + *

+ * Computing will continue for rightmost tasks while a task can be computed + * as determined by {@link #canCompute()} and that task should and can be + * split into left and right tasks. + * + *

+ * The rightmost tasks are computed in a loop rather than recursively to + * avoid potential stack overflows when computing with a right-balanced + * tree, such as that produced when splitting with a {@link Spliterator} + * created from an {@link java.util.Iterator}. + */ + @Override + public final void compute() { + @SuppressWarnings("unchecked") + K task = (K) this; + while (task.canCompute()) { + Spliterator split; + if (!task.suggestSplit() || (split = task.spliterator.trySplit()) == null) { + task.setLocalResult(task.doLeaf()); + task.tryComplete(); + return; + } + else { + K l = task.leftChild = task.makeChild(split); + K r = task.rightChild = task.makeChild(task.spliterator); + task.setPendingCount(1); + l.fork(); + task = r; + } + } + } + + /** + * {@inheritDoc} + * @implNote + * Clears spliterator and children fields. Overriders MUST call + * {@code super.onCompletion} as the last thing they do if they want these + * cleared + */ + @Override + public void onCompletion(CountedCompleter caller) { + spliterator = null; + leftChild = rightChild = null; + } + + /** + * Determines if the task can be computed. + * @implSpec The default always returns true + * + * @return true if this task can be computed to either calculate the leaf + * via {@link #doLeaf()} or split, otherwise false if this task + * cannot be computed, for example if this task has been cancelled + * and/or a result for the computation has been found by another + * task. + */ + protected boolean canCompute() { + return true; + } + + /** + * Returns whether this node is a "leftmost" node -- whether the path from + * the root to this node involves only traversing leftmost child links. For + * a leaf node, this means it is the first leaf node in the encounter order. + */ + protected boolean isLeftmostNode() { + @SuppressWarnings("unchecked") + K node = (K) this; + while (node != null) { + K parent = node.getParent(); + if (parent != null && parent.leftChild != node) + return false; + node = parent; + } + return true; + } +} diff --git a/src/share/classes/java/util/stream/FindOps.java b/src/share/classes/java/util/stream/FindOps.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/FindOps.java @@ -0,0 +1,317 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util.stream; + +import java.util.Optional; +import java.util.OptionalDouble; +import java.util.OptionalInt; +import java.util.OptionalLong; +import java.util.Spliterator; +import java.util.concurrent.CountedCompleter; +import java.util.function.Predicate; +import java.util.function.Supplier; + +/** + * Factory for instances of a short-circuiting {@code TerminalOp} that searches + * for an element in a stream pipeline, and terminates when it finds one. + * Supported variants include find-first (find the first element in the + * encounter order) and find-any (find any element, may not be the first in + * encounter order.) + * + * @since 1.8 + */ +final class FindOps { + + private FindOps() { } + + /** + * Constructs a {@code TerminalOp} for streams of objects + * + * @param mustFindFirst Whether the {@code TerminalOp} must produce the + * first element in the encounter order + * @param The type of elements of the stream + * @return A {@code TerminalOp} implementing the find operation + */ + public static FindOp> makeRef(boolean mustFindFirst) { + return new FindOp<>(mustFindFirst, StreamShape.REFERENCE, Optional.empty(), + Optional::isPresent, FindSink.OfRef::new); + } + + /** + * Constructs a {@code TerminalOp} for streams of ints + * + * @param mustFindFirst Whether the {@code TerminalOp} must produce the + * first element in the encounter order + * @return A {@code TerminalOp} implementing the find operation + */ + public static FindOp makeInt(boolean mustFindFirst) { + return new FindOp<>(mustFindFirst, StreamShape.INT_VALUE, OptionalInt.empty(), + OptionalInt::isPresent, FindSink.OfInt::new); + } + + /** + * Constructs a {@code TerminalOp} for streams of longs + * + * @param mustFindFirst Whether the {@code TerminalOp} must produce the + * first element in the encounter order + * @return A {@code TerminalOp} implementing the find operation + */ + public static FindOp makeLong(boolean mustFindFirst) { + return new FindOp<>(mustFindFirst, StreamShape.LONG_VALUE, OptionalLong.empty(), + OptionalLong::isPresent, FindSink.OfLong::new); + } + + /** + * Constructs a {@code TerminalOp} for streams of doubles + * + * @param mustFindFirst Whether the {@code TerminalOp} must produce the + * first element in the encounter order + * @return A {@code TerminalOp} implementing the find operation + */ + public static FindOp makeDouble(boolean mustFindFirst) { + return new FindOp<>(mustFindFirst, StreamShape.DOUBLE_VALUE, OptionalDouble.empty(), + OptionalDouble::isPresent, FindSink.OfDouble::new); + } + + /** + * A short-circuiting {@code TerminalOp} that searches for an element in a + * stream pipeline, and terminates when it finds one. Implements both + * find-first (find the first element in the encounter order) and find-any + * (find any element, may not be the first in encounter order.) + * + * @param The output type of the stream pipeline + * @param The result type of the find operation, typically an optional + * type + */ + private static final class FindOp implements TerminalOp { + private final StreamShape shape; + final boolean mustFindFirst; + final O emptyValue; + final Predicate presentPredicate; + final Supplier> sinkSupplier; + + /** + * Constructs a {@code FindOp} + * + * @param mustFindFirst If true, must find the first element in + * encounter order, otherwise can find any element + * @param shape Stream shape of elements to search + * @param emptyValue Result value corresponding to "found nothing" + * @param presentPredicate {@code Predicate} on result value + * corresponding to "found something" + * @param sinkSupplier Factory for a {@code TerminalSink} implementing + * the matching functionality + */ + FindOp(boolean mustFindFirst, + StreamShape shape, + O emptyValue, + Predicate presentPredicate, + Supplier> sinkSupplier) { + this.mustFindFirst = mustFindFirst; + this.shape = shape; + this.emptyValue = emptyValue; + this.presentPredicate = presentPredicate; + this.sinkSupplier = sinkSupplier; + } + + @Override + public int getOpFlags() { + return StreamOpFlag.IS_SHORT_CIRCUIT | (mustFindFirst ? 0 : StreamOpFlag.NOT_ORDERED); + } + + @Override + public StreamShape inputShape() { + return shape; + } + + @Override + public O evaluateSequential(PipelineHelper helper, + Spliterator spliterator) { + O result = helper.wrapAndCopyInto(sinkSupplier.get(), spliterator).get(); + return result != null ? result : emptyValue; + } + + @Override + public O evaluateParallel(PipelineHelper helper, + Spliterator spliterator) { + return new FindTask<>(this, helper, spliterator).invoke(); + } + } + + /** + * Implementation of @{code TerminalSink} that implements the find + * functionality, requesting cancellation when something has been found + * + * @param The type of input element + * @param The result type, typically an optional type + */ + private static abstract class FindSink implements TerminalSink { + boolean hasValue; + T value; + + FindSink() {} // Avoid creation of special accessor + + @Override + public void accept(T value) { + if (!hasValue) { + hasValue = true; + this.value = value; + } + } + + @Override + public boolean cancellationRequested() { + return hasValue; + } + + /** Specialization of {@code FindSink} for reference streams */ + static final class OfRef extends FindSink> { + @Override + public Optional get() { + return hasValue ? Optional.of(value) : null; + } + } + + /** Specialization of {@code FindSink} for int streams */ + static final class OfInt extends FindSink + implements Sink.OfInt { + @Override + public void accept(int value) { + // Boxing is OK here, since few values will actually flow into the sink + accept((Integer) value); + } + + @Override + public OptionalInt get() { + return hasValue ? OptionalInt.of(value) : null; + } + } + + /** Specialization of {@code FindSink} for long streams */ + static final class OfLong extends FindSink + implements Sink.OfLong { + @Override + public void accept(long value) { + // Boxing is OK here, since few values will actually flow into the sink + accept((Long) value); + } + + @Override + public OptionalLong get() { + return hasValue ? OptionalLong.of(value) : null; + } + } + + /** Specialization of {@code FindSink} for double streams */ + static final class OfDouble extends FindSink + implements Sink.OfDouble { + @Override + public void accept(double value) { + // Boxing is OK here, since few values will actually flow into the sink + accept((Double) value); + } + + @Override + public OptionalDouble get() { + return hasValue ? OptionalDouble.of(value) : null; + } + } + } + + /** + * {@code ForkJoinTask} implementing parallel short-circuiting search + * @param Input element type to the stream pipeline + * @param Output element type from the stream pipeline + * @param Result type from the find operation + */ + private static final class FindTask + extends AbstractShortCircuitTask> { + private final FindOp op; + + FindTask(FindOp op, + PipelineHelper helper, + Spliterator spliterator) { + super(helper, spliterator); + this.op = op; + } + + FindTask(FindTask parent, Spliterator spliterator) { + super(parent, spliterator); + this.op = parent.op; + } + + @Override + protected FindTask makeChild(Spliterator spliterator) { + return new FindTask<>(this, spliterator); + } + + @Override + protected O getEmptyResult() { + return op.emptyValue; + } + + private void foundResult(O answer) { + if (isLeftmostNode()) + shortCircuit(answer); + else + cancelLaterNodes(); + } + + @Override + protected O doLeaf() { + O result = helper.wrapAndCopyInto(op.sinkSupplier.get(), spliterator).get(); + if (!op.mustFindFirst) { + if (result != null) + shortCircuit(result); + return null; + } + else { + if (result != null) { + foundResult(result); + return result; + } + else + return null; + } + } + + @Override + public void onCompletion(CountedCompleter caller) { + if (op.mustFindFirst) { + for (FindTask child = leftChild, p = null; child != p; + p = child, child = rightChild) { + O result = child.getLocalResult(); + if (result != null && op.presentPredicate.test(result)) { + setLocalResult(result); + foundResult(result); + break; + } + } + } + super.onCompletion(caller); + } + } +} + diff --git a/src/share/classes/java/util/stream/ForEachOps.java b/src/share/classes/java/util/stream/ForEachOps.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/ForEachOps.java @@ -0,0 +1,396 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util.stream; + +import java.util.Objects; +import java.util.Spliterator; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountedCompleter; +import java.util.function.Consumer; +import java.util.function.DoubleConsumer; +import java.util.function.IntConsumer; +import java.util.function.LongConsumer; + +/** + * Factory for creating instances of {@code TerminalOp} that perform an + * action for every element of a stream. Supported variants include unordered + * traversal (elements are provided to the {@code Consumer} as soon as they are + * available), and ordered traversal (elements are provided to the + * {@code Consumer} in encounter order.) + * + *

Elements are provided to the {@code Consumer} on whatever thread and + * whatever order they become available. For ordered traversals, it is + * guaranteed that processing an element happens-before processing + * subsequent elements in the encounter order. + * + *

Exceptions occurring as a result of sending an element to the + * {@code Consumer} will be relayed to the caller and traversal will be + * prematurely terminated. + * + * @since 1.8 + */ +final class ForEachOps { + + private ForEachOps() { } + + /** + * Constructs a {@code TerminalOp} that perform an action for every element + * of a stream. + * + * @param action The {@code Consumer} that receives all elements of a + * stream + * @param ordered Whether an ordered traversal is requested + * @param The type of the stream elements + * @return the {@code TerminalOp} instance + */ + public static TerminalOp makeRef(Consumer action, + boolean ordered) { + Objects.requireNonNull(action); + return new ForEachOp.OfRef<>(action, ordered); + } + + /** + * Constructs a {@code TerminalOp} that perform an action for every element + * of an {@code IntStream}. + * + * @param action The {@code IntConsumer} that receives all elements of a + * stream + * @param ordered Whether an ordered traversal is requested + * @return the {@code TerminalOp} instance + */ + public static TerminalOp makeInt(IntConsumer action, + boolean ordered) { + Objects.requireNonNull(action); + return new ForEachOp.OfInt(action, ordered); + } + + /** + * Constructs a {@code TerminalOp} that perform an action for every element + * of an {@code LongStream}. + * + * @param action The {@code LongConsumer} that receives all elements of a + * stream + * @param ordered Whether an ordered traversal is requested + * @return the {@code TerminalOp} instance + */ + public static TerminalOp makeLong(LongConsumer action, + boolean ordered) { + Objects.requireNonNull(action); + return new ForEachOp.OfLong(action, ordered); + } + + /** + * Constructs a {@code TerminalOp} that perform an action for every element + * of an {@code DoubleStream}. + * + * @param action The {@code DoubleConsumer} that receives all elements of + * a stream + * @param ordered Whether an ordered traversal is requested + * @return the {@code TerminalOp} instance + */ + public static TerminalOp makeDouble(DoubleConsumer action, + boolean ordered) { + Objects.requireNonNull(action); + return new ForEachOp.OfDouble(action, ordered); + } + + /** + * A {@code TerminalOp} that evaluates a stream pipeline and sends the + * output to itself as a {@code TerminalSink}. Elements will be sent in + * whatever thread they become available. If the traversal is unordered, + * they will be sent independent of the stream's encounter order. + * + *

This terminal operation is stateless. For parallel evaluation, each + * leaf instance of a {@code ForEachTask} will send elements to the same + * {@code TerminalSink} reference that is an instance of this class. + * + * @param The output type of the stream pipeline + */ + private static abstract class ForEachOp + implements TerminalOp, TerminalSink { + private final boolean ordered; + + protected ForEachOp(boolean ordered) { + this.ordered = ordered; + } + + // TerminalOp + + @Override + public int getOpFlags() { + return ordered ? 0 : StreamOpFlag.NOT_ORDERED; + } + + @Override + public Void evaluateSequential(PipelineHelper helper, + Spliterator spliterator) { + return helper.wrapAndCopyInto(this, spliterator).get(); + } + + @Override + public Void evaluateParallel(PipelineHelper helper, + Spliterator spliterator) { + if (ordered) + new ForEachOrderedTask<>(helper, spliterator, this).invoke(); + else + new ForEachTask<>(helper, spliterator, helper.wrapSink(this)).invoke(); + return null; + } + + // TerminalSink + + @Override + public Void get() { + return null; + } + + // Implementations + + /** Implementation class for reference streams */ + private static class OfRef extends ForEachOp { + final Consumer consumer; + + OfRef(Consumer consumer, boolean ordered) { + super(ordered); + this.consumer = consumer; + } + + @Override + public void accept(T t) { + consumer.accept(t); + } + } + + /** Implementation class for {@code IntStream} */ + private static class OfInt extends ForEachOp + implements Sink.OfInt { + final IntConsumer consumer; + + OfInt(IntConsumer consumer, boolean ordered) { + super(ordered); + this.consumer = consumer; + } + + @Override + public StreamShape inputShape() { + return StreamShape.INT_VALUE; + } + + @Override + public void accept(int t) { + consumer.accept(t); + } + } + + /** Implementation class for {@code LongStream} */ + private static class OfLong extends ForEachOp + implements Sink.OfLong { + final LongConsumer consumer; + + OfLong(LongConsumer consumer, boolean ordered) { + super(ordered); + this.consumer = consumer; + } + + @Override + public StreamShape inputShape() { + return StreamShape.LONG_VALUE; + } + + @Override + public void accept(long t) { + consumer.accept(t); + } + } + + /** Implementation class for {@code DoubleStream} */ + private static class OfDouble extends ForEachOp + implements Sink.OfDouble { + final DoubleConsumer consumer; + + OfDouble(DoubleConsumer consumer, boolean ordered) { + super(ordered); + this.consumer = consumer; + } + + @Override + public StreamShape inputShape() { + return StreamShape.DOUBLE_VALUE; + } + + @Override + public void accept(double t) { + consumer.accept(t); + } + } + } + + /** A {@code ForkJoinTask} for performing a parallel for-each operation */ + private static class ForEachTask extends CountedCompleter { + private Spliterator spliterator; + private final Sink sink; + private final PipelineHelper helper; + private final long targetSize; + + ForEachTask(PipelineHelper helper, + Spliterator spliterator, + Sink sink) { + super(null); + this.spliterator = spliterator; + this.sink = sink; + this.targetSize = AbstractTask.suggestTargetSize(spliterator.estimateSize()); + this.helper = helper; + } + + ForEachTask(ForEachTask parent, Spliterator spliterator) { + super(parent); + this.spliterator = spliterator; + this.sink = parent.sink; + this.targetSize = parent.targetSize; + this.helper = parent.helper; + } + + public void compute() { + boolean isShortCircuit = StreamOpFlag.SHORT_CIRCUIT.isKnown(helper.getStreamAndOpFlags()); + while (true) { + if (isShortCircuit && sink.cancellationRequested()) { + propagateCompletion(); + spliterator = null; + return; + } + + Spliterator split; + if (!AbstractTask.suggestSplit(spliterator, targetSize) + || (split = spliterator.trySplit()) == null) { + helper.copyInto(sink, spliterator); + propagateCompletion(); + spliterator = null; + return; + } + else { + addToPendingCount(1); + new ForEachTask<>(this, split).fork(); + } + } + } + } + + /** + * A {@code ForkJoinTask} for performing a parallel for-each operation + * which visits the elements in encounter order + */ + private static class ForEachOrderedTask extends CountedCompleter { + private final PipelineHelper helper; + private Spliterator spliterator; + private final long targetSize; + private final ConcurrentHashMap, ForEachOrderedTask> completionMap; + private final Sink action; + private final Object lock; + private final ForEachOrderedTask leftPredecessor; + private Node node; + + protected ForEachOrderedTask(PipelineHelper helper, + Spliterator spliterator, + Sink action) { + super(null); + this.helper = helper; + this.spliterator = spliterator; + this.targetSize = AbstractTask.suggestTargetSize(spliterator.estimateSize()); + this.completionMap = new ConcurrentHashMap<>(); + this.action = action; + this.lock = new Object(); + this.leftPredecessor = null; + } + + ForEachOrderedTask(ForEachOrderedTask parent, + Spliterator spliterator, + ForEachOrderedTask leftPredecessor) { + super(parent); + this.helper = parent.helper; + this.spliterator = spliterator; + this.targetSize = parent.targetSize; + this.completionMap = parent.completionMap; + this.action = parent.action; + this.lock = parent.lock; + this.leftPredecessor = leftPredecessor; + } + + @Override + public final void compute() { + doCompute(this); + } + + private static void doCompute(ForEachOrderedTask task) { + while (true) { + Spliterator split; + if (!AbstractTask.suggestSplit(task.spliterator, task.targetSize) + || (split = task.spliterator.trySplit()) == null) { + if (task.getPendingCount() == 0) { + task.helper.wrapAndCopyInto(task.action, task.spliterator); + } + else { + Node.Builder nb = task.helper.makeNodeBuilder( + task.helper.exactOutputSizeIfKnown(task.spliterator), + size -> (T[]) new Object[size]); + task.node = task.helper.wrapAndCopyInto(nb, task.spliterator).build(); + } + task.tryComplete(); + return; + } + else { + ForEachOrderedTask leftChild = new ForEachOrderedTask<>(task, split, task.leftPredecessor); + ForEachOrderedTask rightChild = new ForEachOrderedTask<>(task, task.spliterator, leftChild); + task.completionMap.put(leftChild, rightChild); + task.addToPendingCount(1); // forking + rightChild.addToPendingCount(1); // right pending on left child + if (task.leftPredecessor != null) { + leftChild.addToPendingCount(1); // left pending on previous subtree, except left spine + if (task.completionMap.replace(task.leftPredecessor, task, leftChild)) + task.addToPendingCount(-1); // transfer my "right child" count to my left child + else + leftChild.addToPendingCount(-1); // left child is ready to go when ready + } + leftChild.fork(); + task = rightChild; + } + } + } + + @Override + public void onCompletion(CountedCompleter caller) { + spliterator = null; + if (node != null) { + // Dump any data from this leaf into the sink + synchronized (lock) { + node.forEach(action); + } + node = null; + } + ForEachOrderedTask victim = completionMap.remove(this); + if (victim != null) + victim.tryComplete(); + } + } +} diff --git a/src/share/classes/java/util/stream/MatchOps.java b/src/share/classes/java/util/stream/MatchOps.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/MatchOps.java @@ -0,0 +1,333 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util.stream; + +import java.util.Objects; +import java.util.Spliterator; +import java.util.function.DoublePredicate; +import java.util.function.IntPredicate; +import java.util.function.LongPredicate; +import java.util.function.Predicate; +import java.util.function.Supplier; + +/** + * Factory for instances of a short-circuiting {@code TerminalOp} that + * implement quantified predicate matching on the elements of a stream. + * Supported variants include match-all, match-any, and match-none. + * + * @since 1.8 + */ +final class MatchOps { + + private MatchOps() { } + + /** + * Enum describing quantified match options -- all match, any match, none + * match + */ + enum MatchKind { + /** Do all elements match the predicate? */ + ANY(true, true), + + /** Do any elements match the predicate? */ + ALL(false, false), + + /** Do no elements match the predicate? */ + NONE(true, false); + + private final boolean stopOnPredicateMatches; + private final boolean shortCircuitResult; + + private MatchKind(boolean stopOnPredicateMatches, + boolean shortCircuitResult) { + this.stopOnPredicateMatches = stopOnPredicateMatches; + this.shortCircuitResult = shortCircuitResult; + } + } + + /** + * Constructs a quantified predicate matcher for a Stream + * + * @param predicate The {@code Predicate} to apply to stream elements + * @param matchKind The kind of quantified match (all, any, none) + * @param The type of stream elements + * @return A {@code TerminalOp} implementing the desired quantified match + * criteria + */ + public static TerminalOp makeRef(Predicate predicate, + MatchKind matchKind) { + Objects.requireNonNull(predicate); + Objects.requireNonNull(matchKind); + class MatchSink extends BooleanTerminalSink { + MatchSink() { + super(matchKind); + } + + @Override + public void accept(T t) { + if (!stop && predicate.test(t) == matchKind.stopOnPredicateMatches) { + stop = true; + value = matchKind.shortCircuitResult; + } + } + } + + // @@@ Workaround for JDK-8011591 -- when fixed, replace s with constructor ref + Supplier> s = new Supplier>() { + @Override + public BooleanTerminalSink get() {return new MatchSink();} + }; + return new MatchOp<>(StreamShape.REFERENCE, matchKind, s); + } + + /** + * Constructs a quantified predicate matcher for an {@code IntStream} + * + * @param predicate The {@code Predicate} to apply to stream elements + * @param matchKind The kind of quantified match (all, any, none) + * @return A {@code TerminalOp} implementing the desired quantified match + * criteria + */ + public static TerminalOp makeInt(IntPredicate predicate, + MatchKind matchKind) { + Objects.requireNonNull(predicate); + Objects.requireNonNull(matchKind); + class MatchSink extends BooleanTerminalSink implements Sink.OfInt { + MatchSink() { + super(matchKind); + } + + @Override + public void accept(int t) { + if (!stop && predicate.test(t) == matchKind.stopOnPredicateMatches) { + stop = true; + value = matchKind.shortCircuitResult; + } + } + } + + // @@@ Workaround for JDK-8011591 -- when fixed, replace s with constructor ref + Supplier> s = new Supplier>() { + @Override + public BooleanTerminalSink get() {return new MatchSink();} + }; + return new MatchOp<>(StreamShape.INT_VALUE, matchKind, s); + } + + /** + * Constructs a quantified predicate matcher for a {@code LongStream} + * + * @param predicate The {@code Predicate} to apply to stream elements + * @param matchKind The kind of quantified match (all, any, none) + * @return A {@code TerminalOp} implementing the desired quantified match + * criteria + */ + public static TerminalOp makeLong(LongPredicate predicate, + MatchKind matchKind) { + Objects.requireNonNull(predicate); + Objects.requireNonNull(matchKind); + class MatchSink extends BooleanTerminalSink implements Sink.OfLong { + + MatchSink() { + super(matchKind); + } + + @Override + public void accept(long t) { + if (!stop && predicate.test(t) == matchKind.stopOnPredicateMatches) { + stop = true; + value = matchKind.shortCircuitResult; + } + } + } + + // @@@ Workaround for JDK-8011591 -- when fixed, replace s with constructor ref + Supplier> s = new Supplier>() { + @Override + public BooleanTerminalSink get() {return new MatchSink();} + }; + return new MatchOp<>(StreamShape.LONG_VALUE, matchKind, s); + } + + /** + * Constructs a quantified predicate matcher for a {@code DoubleStream} + * + * @param predicate The {@code Predicate} to apply to stream elements + * @param matchKind The kind of quantified match (all, any, none) + * @return A {@code TerminalOp} implementing the desired quantified match + * criteria + */ + public static TerminalOp makeDouble(DoublePredicate predicate, + MatchKind matchKind) { + Objects.requireNonNull(predicate); + Objects.requireNonNull(matchKind); + class MatchSink extends BooleanTerminalSink implements Sink.OfDouble { + + MatchSink() { + super(matchKind); + } + + @Override + public void accept(double t) { + if (!stop && predicate.test(t) == matchKind.stopOnPredicateMatches) { + stop = true; + value = matchKind.shortCircuitResult; + } + } + } + + // @@@ Workaround for JDK-8011591 -- when fixed, replace s with constructor ref + Supplier> s = new Supplier>() { + @Override + public BooleanTerminalSink get() {return new MatchSink();} + }; + return new MatchOp<>(StreamShape.DOUBLE_VALUE, matchKind, s); + } + + /** + * A short-circuiting {@code TerminalOp} that evaluates a predicate on the + * elements of a stream and determines whether all, any or none of those + * elements match the predicate. + * + * @param The output type of the stream pipeline + */ + private static final class MatchOp implements TerminalOp { + private final StreamShape inputShape; + final MatchKind matchKind; + final Supplier> sinkSupplier; + + /** + * Constructs a {@code MatchOp} + * + * @param shape The output shape of the stream pipeline + * @param matchKind The kind of quantified match (all, any, none) + * @param sinkSupplier {@code Supplier} for a {@code Sink} of the + * appropriate shape which implements the matching operation + */ + MatchOp(StreamShape shape, + MatchKind matchKind, + Supplier> sinkSupplier) { + this.inputShape = shape; + this.matchKind = matchKind; + this.sinkSupplier = sinkSupplier; + } + + @Override + public int getOpFlags() { + return StreamOpFlag.IS_SHORT_CIRCUIT | StreamOpFlag.NOT_ORDERED; + } + + @Override + public StreamShape inputShape() { + return inputShape; + } + + @Override + public Boolean evaluateSequential(PipelineHelper helper, + Spliterator spliterator) { + return helper.wrapAndCopyInto(sinkSupplier.get(), spliterator).getAndClearState(); + } + + @Override + public Boolean evaluateParallel(PipelineHelper helper, + Spliterator spliterator) { + // Approach for parallel implementation: + // - Decompose as per usual + // - run match on leaf chunks, call result "b" + // - if b == matchKind.shortCircuitOn, complete early and return b + // - else if we complete normally, return !shortCircuitOn + + return new MatchTask<>(this, helper, spliterator).invoke(); + } + } + + /** + * Boolean specific terminal sink to avoid the boxing costs when returning + * results. Subclasses implement the shape-specific functionality. + * + * @param The output type of the stream pipeline + */ + private static abstract class BooleanTerminalSink implements Sink { + boolean stop; + boolean value; + + BooleanTerminalSink(MatchKind matchKind) { + value = !matchKind.shortCircuitResult; + } + + public boolean getAndClearState() { + return value; + } + + @Override + public boolean cancellationRequested() { + return stop; + } + } + + /** + * ForkJoinTask implementation to implement a parallel short-circuiting + * quantified match + * + * @param The type of source elements for the pipeline + * @param The type of output elements for the pipeline + */ + private static final class MatchTask + extends AbstractShortCircuitTask> { + private final MatchOp op; + + /** Constructor for root node */ + MatchTask(MatchOp op, PipelineHelper helper, + Spliterator spliterator) { + super(helper, spliterator); + this.op = op; + } + + /** Constructor for non-root node */ + MatchTask(MatchTask parent, Spliterator spliterator) { + super(parent, spliterator); + this.op = parent.op; + } + + @Override + protected MatchTask makeChild(Spliterator spliterator) { + return new MatchTask<>(this, spliterator); + } + + @Override + protected Boolean doLeaf() { + boolean b = helper.wrapAndCopyInto(op.sinkSupplier.get(), spliterator).getAndClearState(); + if (b == op.matchKind.shortCircuitResult) + shortCircuit(b); + return null; + } + + @Override + protected Boolean getEmptyResult() { + return !op.matchKind.shortCircuitResult; + } + } +} + diff --git a/src/share/classes/java/util/stream/Node.java b/src/share/classes/java/util/stream/Node.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/Node.java @@ -0,0 +1,531 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util.stream; + +import java.util.Spliterator; +import java.util.function.Consumer; +import java.util.function.DoubleConsumer; +import java.util.function.IntConsumer; +import java.util.function.IntFunction; +import java.util.function.LongConsumer; + +/** + * An immutable container for describing an ordered sequence of elements of some + * type {@code T}. + * + *

A {@code Node} contains a fixed number of elements, which can be accessed + * via the {@link #count}, {@link #spliterator}, {@link #forEach}, + * {@link #asArray}, or {@link #copyInto} methods. A {@code Node} may have zero + * or more child {@code Node}s; if it has no children (accessed via + * {@link #getChildCount} and {@link #getChild(int)}, it is considered flat + * or a leaf; if it has children, it is considered an + * internal node. The size of an internal node is the sum of sizes of + * its children. + * + * @apiNote + *

A {@code Node} typically does not store the elements directly, but instead + * mediates access to one or more existing (effectively immutable) data + * structures such as a {@code Collection}, array, or a set of other + * {@code Node}s. Commonly {@code Node}s are formed into a tree whose shape + * corresponds to the computation tree that produced the elements that are + * contained in the leaf nodes. The use of {@code Node} within the stream + * framework is largely to avoid copying data unnecessarily during parallel + * operations. + * + * @param the type of elements. + * @since 1.8 + */ +interface Node { + + /** + * Returns a {@link Spliterator} describing the elements contained in this + * {@code Node}. + * + * @return a {@code Spliterator} describing the elements contained in this + * {@code Node} + */ + Spliterator spliterator(); + + /** + * Traverses the elements of this node, and invoke the provided + * {@code Consumer} with each element. Elements are provided in encounter + * order if the source for the {@code Node} has a defined encounter order. + * + * @param consumer A {@code Consumer} that is to be invoked with each + * element in this {@code Node} + */ + void forEach(Consumer consumer); + + /** + * Returns the number of child nodes of this node. + * + * @implSpec The default implementation returns zero + * @return the number of child nodes + */ + default int getChildCount() { + return 0; + } + + /** + * Retrieves the child {@code Node} at a given index. + * + * @implSpec The default implementation always throws + * {@code IndexOutOfBoundsException} + * @param i the index to the child node + * @return the child node + * @throws IndexOutOfBoundsException if the index is less than 0 or greater + * than or equal to the number of child nodes. + */ + default Node getChild(int i) { + throw new IndexOutOfBoundsException(); + } + + /** + * Provides an array view of the contents of this node. + * + *

Depending on the underlying implementation, this may return a + * reference to an internal array rather than a copy. Since the returned + * array may be shared, the returned array should not be modified. The + * {@code generator} function may be consulted to create the array if a new + * array needs to be created. + * + * @param generator A factory function which takes an integer parameter and + * returns a new, empty array of that size and of the appropriate + * array type + * @return an array containing the contents of this {@code Node} + */ + T[] asArray(IntFunction generator); + + /** + * Copies the content of this {@code Node} into an array, starting at a + * given offset into the array. It is the caller's responsibility to ensure + * there is sufficient room in the array. + * + * @param array the array into which to copy the contents of this + * {@code Node} + * @param offset the starting offset within the array + * @throws IndexOutOfBoundsException if copying would cause access of data + * outside array bounds + * @throws NullPointerException if {@code array} is {@code null} + */ + void copyInto(T[] array, int offset); + + /** + * Gets the {@code StreamShape} associated with this {@code Node}. + * + * @implSpec The default in {@code Node} returns + * {@code StreamShape.REFERENCE} + * @return the stream shape associated with this node + */ + default StreamShape getShape() { + return StreamShape.REFERENCE; + } + + /** + * Returns the number of elements contained in this node + * + * @return the number of elements contained in this node + */ + long count(); + + /** + * A mutable builder for a {@code Node} that implements {@link Sink}, which + * builds a flat node containing the elements that have been pushed to it. + * + */ + interface Builder extends Sink { + + /** + * Builds the node. Should be called after all elements have been + * pushed and signalled with an invocation of {@link Sink#end()}. + * + * @return the resulting {@code Node} + */ + Node build(); + + /** Specialized @{code Node.Builder} for int elements */ + interface OfInt extends Node.Builder, Sink.OfInt { + @Override + Node.OfInt build(); + } + + /** Specialized @{code Node.Builder} for long elements */ + interface OfLong extends Node.Builder, Sink.OfLong { + @Override + Node.OfLong build(); + } + + /** Specialized @{code Node.Builder} for double elements */ + interface OfDouble extends Node.Builder, Sink.OfDouble { + @Override + Node.OfDouble build(); + } + } + + /** Specialized {@code Node} for int elements */ + interface OfInt extends Node { + + /** + * {@inheritDoc} + * @return A {@link Spliterator.OfInt} describing the elements of this + * node + */ + @Override + Spliterator.OfInt spliterator(); + + /** + * {@inheritDoc} + * @param consumer A {@code Consumer} that is to be invoked with each + * element in this {@code Node}. If this is an + * {@code IntConsumer}, it is cast to {@code IntConsumer} so the + * elements may be processed without boxing. + */ + @Override + default void forEach(Consumer consumer) { + if (consumer instanceof IntConsumer) { + forEach((IntConsumer) consumer); + } + else { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), "{0} calling Node.OfInt.forEachRemaining(Consumer)"); + spliterator().forEachRemaining(consumer); + } + } + + /** + * Traverses the elements of this node, and invoke the provided + * {@code IntConsumer} with each element. + * + * @param consumer A {@code IntConsumer} that is to be invoked with each + * element in this {@code Node} + */ + void forEach(IntConsumer consumer); + + /** + * {@inheritDoc} + * @implSpec the default implementation invokes the generator to create + * an instance of an Integer[] array with a length of {@link #count()} + * and then invokes {@link #copyInto(Integer[], int)} with that + * Integer[] array at an offset of 0. This is not efficient and it is + * recommended to invoke {@link #asIntArray()}. + */ + @Override + default Integer[] asArray(IntFunction generator) { + Integer[] boxed = generator.apply((int) count()); + copyInto(boxed, 0); + return boxed; + } + + /** + * {@inheritDoc} + * @implSpec the default implementation invokes {@link #asIntArray()} to + * obtain an int[] array then and copies the elements from that int[] + * array into the boxed Integer[] array. This is not efficient and it + * is recommended to invoke {@link #copyInto(int[], int)}. + */ + @Override + default void copyInto(Integer[] boxed, int offset) { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), "{0} calling Node.OfInt.copyInto(Integer[], int)"); + + int[] array = asIntArray(); + for (int i = 0; i < array.length; i++) { + boxed[offset + i] = array[i]; + } + } + + @Override + default Node.OfInt getChild(int i) { + throw new IndexOutOfBoundsException(); + } + + /** + * Views this node as an int[] array. + * + *

Depending on the underlying implementation this may return a + * reference to an internal array rather than a copy. It is the callers + * responsibility to decide if either this node or the array is utilized + * as the primary reference for the data.

+ * + * @return an array containing the contents of this {@code Node} + */ + int[] asIntArray(); + + /** + * Copies the content of this {@code Node} into an int[] array, starting + * at a given offset into the array. It is the caller's responsibility + * to ensure there is sufficient room in the array. + * + * @param array the array into which to copy the contents of this + * {@code Node} + * @param offset the starting offset within the array + * @throws IndexOutOfBoundsException if copying would cause access of + * data outside array bounds + * @throws NullPointerException if {@code array} is {@code null} + */ + void copyInto(int[] array, int offset); + + /** + * {@inheritDoc} + * @implSpec The default in {@code Node.OfInt} returns + * {@code StreamShape.INT_VALUE} + */ + default StreamShape getShape() { + return StreamShape.INT_VALUE; + } + + } + + /** Specialized {@code Node} for long elements */ + interface OfLong extends Node { + + /** + * {@inheritDoc} + * @return A {@link Spliterator.OfLong} describing the elements of this + * node + */ + @Override + Spliterator.OfLong spliterator(); + + /** + * {@inheritDoc} + * @param consumer A {@code Consumer} that is to be invoked with each + * element in this {@code Node}. If this is an + * {@code LongConsumer}, it is cast to {@code LongConsumer} so + * the elements may be processed without boxing. + */ + @Override + default void forEach(Consumer consumer) { + if (consumer instanceof LongConsumer) { + forEach((LongConsumer) consumer); + } + else { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), "{0} calling Node.OfLong.forEachRemaining(Consumer)"); + spliterator().forEachRemaining(consumer); + } + } + + /** + * Traverses the elements of this node, and invoke the provided + * {@code LongConsumer} with each element. + * + * @param consumer A {@code LongConsumer} that is to be invoked with + * each element in this {@code Node} + */ + void forEach(LongConsumer consumer); + + /** + * {@inheritDoc} + * @implSpec the default implementation invokes the generator to create + * an instance of a Long[] array with a length of {@link #count()} and + * then invokes {@link #copyInto(Long[], int)} with that Long[] array at + * an offset of 0. This is not efficient and it is recommended to + * invoke {@link #asLongArray()}. + */ + @Override + default Long[] asArray(IntFunction generator) { + Long[] boxed = generator.apply((int) count()); + copyInto(boxed, 0); + return boxed; + } + + /** + * {@inheritDoc} + * @implSpec the default implementation invokes {@link #asLongArray()} + * to obtain a long[] array then and copies the elements from that + * long[] array into the boxed Long[] array. This is not efficient and + * it is recommended to invoke {@link #copyInto(long[], int)}. + */ + @Override + default void copyInto(Long[] boxed, int offset) { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), "{0} calling Node.OfInt.copyInto(Long[], int)"); + + long[] array = asLongArray(); + for (int i = 0; i < array.length; i++) { + boxed[offset + i] = array[i]; + } + } + + @Override + default Node.OfLong getChild(int i) { + throw new IndexOutOfBoundsException(); + } + + /** + * Views this node as a long[] array. + * + *

Depending on the underlying implementation this may return a + * reference to an internal array rather than a copy. It is the callers + * responsibility to decide if either this node or the array is utilized + * as the primary reference for the data.

+ * + * @return an array containing the contents of this {@code Node} + */ + long[] asLongArray(); + + /** + * Copies the content of this {@code Node} into a long[] array, starting + * at a given offset into the array. It is the caller's responsibility + * to ensure there is sufficient room in the array. + * + * @param array the array into which to copy the contents of this + * {@code Node} + * @param offset the starting offset within the array + * @throws IndexOutOfBoundsException if copying would cause access of + * data outside array bounds + * @throws NullPointerException if {@code array} is {@code null} + */ + void copyInto(long[] array, int offset); + + /** + * {@inheritDoc} + * @implSpec The default in {@code Node.OfLong} returns + * {@code StreamShape.LONG_VALUE} + */ + default StreamShape getShape() { + return StreamShape.LONG_VALUE; + } + + + } + + /** Specialized {@code Node} for double elements */ + interface OfDouble extends Node { + + /** + * {@inheritDoc} + * @return A {@link Spliterator.OfDouble} describing the elements of + * this node + */ + @Override + Spliterator.OfDouble spliterator(); + + /** + * {@inheritDoc} + * @param consumer A {@code Consumer} that is to be invoked with each + * element in this {@code Node}. If this is an + * {@code DoubleConsumer}, it is cast to {@code DoubleConsumer} + * so the elements may be processed without boxing. + */ + @Override + default void forEach(Consumer consumer) { + if (consumer instanceof DoubleConsumer) { + forEach((DoubleConsumer) consumer); + } + else { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), "{0} calling Node.OfLong.forEachRemaining(Consumer)"); + spliterator().forEachRemaining(consumer); + } + } + + /** + * Traverses the elements of this node, and invoke the provided + * {@code DoubleConsumer} with each element. + * + * @param consumer A {@code DoubleConsumer} that is to be invoked with + * each element in this {@code Node} + */ + void forEach(DoubleConsumer consumer); + + // + + /** + * {@inheritDoc} + * @implSpec the default implementation invokes the generator to create + * an instance of a Double[] array with a length of {@link #count()} and + * then invokes {@link #copyInto(Double[], int)} with that Double[] + * array at an offset of 0. This is not efficient and it is recommended + * to invoke {@link #asDoubleArray()}. + */ + @Override + default Double[] asArray(IntFunction generator) { + Double[] boxed = generator.apply((int) count()); + copyInto(boxed, 0); + return boxed; + } + + /** + * {@inheritDoc} + * @implSpec the default implementation invokes {@link #asDoubleArray()} + * to obtain a double[] array then and copies the elements from that + * double[] array into the boxed Double[] array. This is not efficient + * and it is recommended to invoke {@link #copyInto(double[], int)}. + */ + @Override + default void copyInto(Double[] boxed, int offset) { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), "{0} calling Node.OfDouble.copyInto(Double[], int)"); + + double[] array = asDoubleArray(); + for (int i = 0; i < array.length; i++) { + boxed[offset + i] = array[i]; + } + } + + @Override + default Node.OfDouble getChild(int i) { + throw new IndexOutOfBoundsException(); + } + + /** + * Views this node as a double[] array. + * + *

Depending on the underlying implementation this may return a + * reference to an internal array rather than a copy. It is the callers + * responsibility to decide if either this node or the array is utilized + * as the primary reference for the data.

+ * + * @return an array containing the contents of this {@code Node} + */ + double[] asDoubleArray(); + + /** + * Copies the content of this {@code Node} into a double[] array, starting + * at a given offset into the array. It is the caller's responsibility + * to ensure there is sufficient room in the array. + * + * @param array the array into which to copy the contents of this + * {@code Node} + * @param offset the starting offset within the array + * @throws IndexOutOfBoundsException if copying would cause access of + * data outside array bounds + * @throws NullPointerException if {@code array} is {@code null} + */ + void copyInto(double[] array, int offset); + + /** + * {@inheritDoc} + * @implSpec The default in {@code Node.OfDouble} returns + * {@code StreamShape.DOUBLE_VALUE} + */ + default StreamShape getShape() { + return StreamShape.DOUBLE_VALUE; + } + + } +} diff --git a/src/share/classes/java/util/stream/PipelineHelper.java b/src/share/classes/java/util/stream/PipelineHelper.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/PipelineHelper.java @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util.stream; + +import java.util.Spliterator; +import java.util.function.IntFunction; + +/** + * Helper class for executing + * stream pipelines, capturing all of the information about a stream + * pipeline (output shape, intermediate operations, stream flags, parallelism, + * etc) in one place. + * + * @apiNote + * A {@code PipelineHelper} describes the initial segment of a stream pipeline, + * including its source, intermediate operations, and may additionally + * incorporate information about the terminal (or stateful) operation which + * follows the last intermediate operation described by this + * {@code PipelineHelper}. The {@code PipelineHelper} is passed to the + * {@link TerminalOp#evaluateParallel(PipelineHelper, java.util.Spliterator)}, + * {@link TerminalOp#evaluateSequential(PipelineHelper, java.util.Spliterator)}, + * and {@link AbstractPipeline#opEvaluateParallel(PipelineHelper, java.util.Spliterator, + * java.util.function.IntFunction)}, methods, which can use the + * {@code PipelineHelper} to access information about the pipeline such as + * input shape, output shape, stream flags, and size, and use the helper methods + * such as {@link #wrapAndCopyInto(Sink, Spliterator)}, + * {@link #copyInto(Sink, Spliterator)}, and {@link #wrapSink(Sink)} to execute + * pipeline operations. + * + * @param Type of output elements from the pipeline + * @since 1.8 + */ +abstract class PipelineHelper { + + /** + * Gets the combined stream and operation flags for the output of the described + * pipeline. This will incorporate stream flags from the stream source, all + * the intermediate operations and the terminal operation. + * + * @return the combined stream and operation flags + * @see StreamOpFlag + */ + abstract int getStreamAndOpFlags(); + + /** + * Returns the exact output size of the portion of the output resulting from + * applying the pipeline stages described by this {@code PipelineHelper} to + * the the portion of the input described by the provided + * {@code Spliterator}, if known. If not known or known infinite, will + * return {@code -1}. + * + * @apiNote + * The exact output size is known if the {@code Spliterator} has the + * {@code SIZED} characteristic, and the operation flags + * {@link StreamOpFlag#SIZED} is known on the combined stream and operation + * flags. + * + * @param spliterator the spliterator describing the relevant portion of the + * source data + * @return the exact size if known, or -1 if infinite or unknown + */ + abstract long exactOutputSizeIfKnown(Spliterator spliterator); + + /** + * Applies the pipeline stages described by this {@code PipelineHelper} to + * the provided {@code Spliterator} and send the results to the provided + * {@code Sink}. + * + * @implSpec + * The implementation behaves as if: + *
{@code
+     *     intoWrapped(wrapSink(sink), spliterator);
+     * }
+ * + * @param sink the {@code Sink} to receive the results + * @param spliterator the spliterator describing the source input to process + */ + abstract> S wrapAndCopyInto(S sink, Spliterator spliterator); + + /** + * Pushes elements obtained from the {@code Spliterator} into the provided + * {@code Sink}. If the stream pipeline is known to have short-circuiting + * stages in it (see {@link StreamOpFlag#SHORT_CIRCUIT}), the + * {@link Sink#cancellationRequested()} is checked after each + * element, stopping if cancellation is requested. + * + * @implSpec + * This method conforms to the {@code Sink} protocol of calling + * {@code Sink.begin} before pushing elements, via {@code Sink.accept}, and + * calling {@code Sink.end} after all elements have been pushed. + * + * @param wrappedSink the destination {@code Sink} + * @param spliterator the source {@code Spliterator} + */ + abstract void copyInto(Sink wrappedSink, Spliterator spliterator); + + /** + * Pushes elements obtained from the {@code Spliterator} into the provided + * {@code Sink}, checking {@link Sink#cancellationRequested()} after each + * element, and stopping if cancellation is requested. + * + * @implSpec + * This method conforms to the {@code Sink} protocol of calling + * {@code Sink.begin} before pushing elements, via {@code Sink.accept}, and + * calling {@code Sink.end} after all elements have been pushed or if + * cancellation is requested. + * + * @param wrappedSink the destination {@code Sink} + * @param spliterator the source {@code Spliterator} + */ + abstract void copyIntoWithCancel(Sink wrappedSink, Spliterator spliterator); + + /** + * Takes a {@code Sink} that accepts elements of the output type of the + * {@code PipelineHelper}, and wrap it with a {@code Sink} that accepts + * elements of the input type and implements all the intermediate operations + * described by this {@code PipelineHelper}, delivering the result into the + * provided {@code Sink}. + * + * @param sink the {@code Sink} to receive the results + * @return a {@code Sink} that implements the pipeline stages and sends + * results to the provided {@code Sink} + */ + abstract Sink wrapSink(Sink sink); + + /** + * Constructs a @{link Node.Builder} compatible with the output shape of + * this {@code PipelineHelper} + * + * @param exactSizeIfKnown if >=0 then a builder will be created that has a + * fixed capacity of exactly sizeIfKnown elements; if < 0 then the + * builder has variable capacity. A fixed capacity builder will fail + * if an element is added after the builder has reached capacity. + * @param generator a factory function for array instances + * @return A {@code Node.Builder} compatible with the output shape of this + * {@code PipelineHelper} + */ + abstract Node.Builder makeNodeBuilder(long exactSizeIfKnown, + IntFunction generator); + + /** + * Collects all output elements resulting from applying the pipeline stages + * to the source {@code Spliterator} into a {@code Node}. + * + * @implNote + * If the pipeline has no intermediate operations and the source is backed + * by a {@code Node} then that {@code Node} will be returned (or flattened + * and then returned). This reduces copying for a pipeline consisting of a + * stateful operation followed by a terminal operation that returns an + * array, such as: + *
{@code
+     *     stream.sorted().toArray();
+     * }
+ * + * @param spliterator the source {@code Spliterator} + * @param flatten if true and the pipeline is a parallel pipeline then the + * {@code Node} returned will contain no children, otherwise the + * {@code Node} may represent the root in a tree that reflects the + * shape of the computation tree. + * @param generator a factory function for array instances + * @return the {@code Node} containing all output elements + */ + abstract Node evaluate(Spliterator spliterator, + boolean flatten, + IntFunction generator); +} diff --git a/src/share/classes/java/util/stream/Sink.java b/src/share/classes/java/util/stream/Sink.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/Sink.java @@ -0,0 +1,361 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util.stream; + +import java.util.Objects; +import java.util.function.Consumer; +import java.util.function.DoubleConsumer; +import java.util.function.IntConsumer; +import java.util.function.LongConsumer; + +/** + * An extension of {@link Consumer} used to conduct values through the stages of + * a stream pipeline, with additional methods to manage size information, + * control flow, etc. Before calling the {@code accept()} method on a + * {@code Sink} for the first time, you must first call the {@code begin()} + * method to inform it that data is coming (optionally informing the sink how + * much data is coming), and after all data has been sent, you must call the + * {@code end()} method. After calling {@code end()}, you should not call + * {@code accept()} without again calling {@code begin()}. {@code Sink} also + * offers a mechanism by which the sink can cooperatively signal that it does + * not wish to receive any more data (the {@code cancellationRequested()} + * method), which a source can poll before sending more data to the + * {@code Sink}. + * + *

A sink may be in one of two states: an initial state and an active state. + * It starts out in the initial state; the {@code begin()} method transitions + * it to the active state, and the {@code end()} method transitions it back into + * the initial state, where it can be re-used. Data-accepting methods (such as + * {@code accept()} are only valid in the active state. + * + * @apiNote + * + * A stream pipeline consists of a source, zero or more intermediate stages + * (such as filtering or mapping), and a terminal stage, such as reduction or + * for-each. For concreteness, consider the pipeline: + * + *

{@code
+ *     int longestStringLengthStartingWithA
+ *         = strings.stream()
+ *                  .filter(s -> s.startsWith("A"))
+ *                  .mapToInt(String::length)
+ *                  .max();
+ * }
+ * + *

Here, we have three stages, filtering, mapping, and reducing. The + * filtering stage consumes strings and emits a subset of those strings; the + * mapping stage consumes strings and emits ints; the reduction stage consumes + * those ints and computes the maximal value. + * + *

A {@code Sink} instance is used to represent each stage of this pipeline, + * whether the stage accepts objects, ints, longs, or doubles. Sink has entry + * points for {@code accept(Object)}, {@code accept(int)}, etc, so that we do + * not need a specialized interface for each primitive specialization. (It + * might be called a "kitchen sink" for this omnivorous tendency.) The entry + * point to the pipeline is the {@code Sink} for the filtering stage, which + * sends some elements "downstream" -- into the {@code Sink} for the mapping + * stage, which in turn sends integral values downstream into the {@code Sink} + * for the reduction stage. The {@code Sink} implementations associated with a + * given stage is expected to know the data type for the next stage, and call + * the correct {@code accept} method on its downstream {@code Sink}. Similarly, + * each stage must implement the correct {@code accept} method corresponding to + * the data type it accepts. + * + *

The specialized subtypes such as {@link Sink.OfInt} override + * {@code accept(Object)} to call the appropriate primitive specialization of + * {@code accept}, implement the appropriate primitive specialization of + * {@code Consumer}, and re-abstract the appropriate primitive specialization of + * {@code accept}. + * + *

The chaining subtypes such as {@link ChainedInt} not only implement + * {@code Sink.OfInt}, but also maintain a {@code downstream} field which + * represents the downstream {@code Sink}, and implement the methods + * {@code begin()}, {@code end()}, and {@code cancellationRequested()} to + * delegate to the downstream {@code Sink}. Most implementations of + * intermediate operations will use these chaining wrappers. For example, the + * mapping stage in the above example would look like: + * + *

{@code
+ *     IntSink is = new Sink.ChainedReference(sink) {
+ *         public void accept(U u) {
+ *             downstream.accept(mapper.applyAsInt(u));
+ *         }
+ *     };
+ * }
+ * + *

Here, we implement {@code Sink.ChainedReference}, meaning that we expect + * to receive elements of type {@code U} as input, and pass the downstream sink + * to the constructor. Because the next stage expects to receive integers, we + * must call the {@code accept(int)} method when emitting values to the downstream. + * The {@code accept()} method applies the mapping function from {@code U} to + * {@code int} and passes the resulting value to the downstream {@code Sink}. + * + * @param Type of elements for value streams + * @since 1.8 + */ +interface Sink extends Consumer { + /** + * Resets the sink state to receive a fresh data set. This must be called + * before sending any data to the sink. After calling {@link #end()}, + * you may call this method to reset the sink for another calculation. + * @param size The exact size of the data to be pushed downstream, if + * known or {@code -1} if unknown or infinite. + * + *

Prior to this call, the sink must be in the initial state, and after + * this call it is in the active state. + */ + default void begin(long size) {} + + /** + * Indicates that all elements have been pushed. If the {@code Sink} is + * stateful, it should send any stored state downstream at this time, and + * should clear any accumulated state (and associated resources). + * + *

Prior to this call, the sink must be in the active state, and after + * this call it is returned to the initial state. + */ + default void end() {} + + /** + * Indicates that this {@code Sink} does not wish to receive any more data. + * + * @implSpec The default implementation always returns false + * + * @return true if cancellation is requested + */ + default boolean cancellationRequested() { + return false; + } + + /** + * Accepts an int value. + * + * @implSpec The default implementation throws IllegalStateException + * + * @throws IllegalStateException If this sink does not accept int values + */ + default void accept(int value) { + throw new IllegalStateException("called wrong accept method"); + } + + /** + * Accepts a long value. + * @implSpec The default implementation throws IllegalStateException + * + * @throws IllegalStateException If this sink does not accept long values + */ + default void accept(long value) { + throw new IllegalStateException("called wrong accept method"); + } + + /** + * Accepts a double value. + * @implSpec The default implementation throws IllegalStateException + * + * @throws IllegalStateException If this sink does not accept double values + */ + default void accept(double value) { + throw new IllegalStateException("called wrong accept method"); + } + + /** + * {@code Sink} that implements {@code Sink}, re-abstracts + * {@code accept(int)}, and wires {@code accept(Integer)} to bridge to + * {@code accept(int)}. + */ + interface OfInt extends Sink, IntConsumer { + @Override + void accept(int value); + + @Override + default void accept(Integer i) { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), "{0} calling Sink.OfInt.accept(Integer)"); + accept(i.intValue()); + } + } + + /** + * {@code Sink} that implements {@code Sink}, re-abstracts + * {@code accept(long)}, and wires {@code accept(Long)} to bridge to + * {@code accept(long)}. + */ + interface OfLong extends Sink, LongConsumer { + @Override + void accept(long value); + + @Override + default void accept(Long i) { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), "{0} calling Sink.OfLong.accept(Long)"); + accept(i.longValue()); + } + } + + /** + * {@code Sink} that implements {@code Sink}, re-abstracts + * {@code accept(double)}, and wires {@code accept(Double)} to bridge to + * {@code accept(double)}. + */ + interface OfDouble extends Sink, DoubleConsumer { + @Override + void accept(double value); + + @Override + default void accept(Double i) { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), "{0} calling Sink.OfDouble.accept(Double)"); + accept(i.doubleValue()); + } + } + + /** + * Abstract {@code Sink} implementation for creating chains of + * sinks. The {@code begin}, {@code end}, and + * {@code cancellationRequested} methods are wired to chain to the + * downstream {@code Sink}. This implementation takes a downstream + * {@code Sink} of unknown input shape and produces a {@code Sink}. The + * implementation of the {@code accept()} method must call the correct + * {@code accept()} method on the downstream {@code Sink}. + */ + static abstract class ChainedReference implements Sink { + protected final Sink downstream; + + public ChainedReference(Sink downstream) { + this.downstream = Objects.requireNonNull(downstream); + } + + @Override + public void begin(long size) { + downstream.begin(size); + } + + @Override + public void end() { + downstream.end(); + } + + @Override + public boolean cancellationRequested() { + return downstream.cancellationRequested(); + } + } + + /** + * Abstract {@code Sink} implementation designed for creating chains of + * sinks. The {@code begin}, {@code end}, and + * {@code cancellationRequested} methods are wired to chain to the + * downstream {@code Sink}. This implementation takes a downstream + * {@code Sink} of unknown input shape and produces a {@code Sink.OfInt}. + * The implementation of the {@code accept()} method must call the correct + * {@code accept()} method on the downstream {@code Sink}. + */ + static abstract class ChainedInt implements Sink.OfInt { + protected final Sink downstream; + + public ChainedInt(Sink downstream) { + this.downstream = Objects.requireNonNull(downstream); + } + + @Override + public void begin(long size) { + downstream.begin(size); + } + + @Override + public void end() { + downstream.end(); + } + + @Override + public boolean cancellationRequested() { + return downstream.cancellationRequested(); + } + } + + /** + * Abstract {@code Sink} implementation designed for creating chains of + * sinks. The {@code begin}, {@code end}, and + * {@code cancellationRequested} methods are wired to chain to the + * downstream {@code Sink}. This implementation takes a downstream + * {@code Sink} of unknown input shape and produces a {@code Sink.OfLong}. + * The implementation of the {@code accept()} method must call the correct + * {@code accept()} method on the downstream {@code Sink}. + */ + static abstract class ChainedLong implements Sink.OfLong { + protected final Sink downstream; + + public ChainedLong(Sink downstream) { + this.downstream = Objects.requireNonNull(downstream); + } + + @Override + public void begin(long size) { + downstream.begin(size); + } + + @Override + public void end() { + downstream.end(); + } + + @Override + public boolean cancellationRequested() { + return downstream.cancellationRequested(); + } + } + + /** + * Abstract {@code Sink} implementation designed for creating chains of + * sinks. The {@code begin}, {@code end}, and + * {@code cancellationRequested} methods are wired to chain to the + * downstream {@code Sink}. This implementation takes a downstream + * {@code Sink} of unknown input shape and produces a {@code Sink.OfDouble}. + * The implementation of the {@code accept()} method must call the correct + * {@code accept()} method on the downstream {@code Sink}. + */ + static abstract class ChainedDouble implements Sink.OfDouble { + protected final Sink downstream; + + public ChainedDouble(Sink downstream) { + this.downstream = Objects.requireNonNull(downstream); + } + + @Override + public void begin(long size) { + downstream.begin(size); + } + + @Override + public void end() { + downstream.end(); + } + + @Override + public boolean cancellationRequested() { + return downstream.cancellationRequested(); + } + } +} diff --git a/src/share/classes/java/util/stream/StreamOpFlag.java b/src/share/classes/java/util/stream/StreamOpFlag.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/StreamOpFlag.java @@ -0,0 +1,729 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util.stream; + +import java.util.EnumMap; +import java.util.Map; +import java.util.Spliterator; + +/** + * Flags corresponding to characteristics of streams and operations. Flags are + * utilized by the stream framework to control, specialize or optimize + * computation. + * + *

+ * Stream flags may be used to describe characteristics of several different + * entities associated with streams: stream sources, intermediate operations, + * and terminal operations. Not all stream flags are meaningful for all + * entities; the following table summarizes which flags are meaningful in what + * contexts: + * + *

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Type Characteristics
 {@code DISTINCT}{@code SORTED}{@code ORDERED}{@code SIZED}{@code SHORT_CIRCUIT}
Stream sourceYYYYN
Intermediate operationPCIPCIPCIPCPI
Terminal operationNNPCNPI
Legend 
FlagMeaning
YAllowed
NInvalid
PPreserves
CClears
IInjects
+ *
+ * + *

In the above table, "PCI" means "may preserve, clear, or inject"; "PC" + * means "may preserve or clear", "PI" means "may preserve or inject", and "N" + * means "not valid". + * + *

Stream flags are represented by unioned bit sets, so that a single word + * may describe all the characteristics of a given stream entity, and that, for + * example, the flags for a stream source can be efficiently combined with the + * flags for later operations on that stream. + * + *

The bit masks {@link #STREAM_MASK}, {@link #OP_MASK}, and + * {@link #TERMINAL_OP_MASK} can be ANDed with a bit set of stream flags to + * produce a mask containing only the valid flags for that entity type. + * + *

When describing a stream source, one only need describe what + * characteristics that stream has; when describing a stream operation, one need + * describe whether the operation preserves, injects, or clears that + * characteristic. Accordingly, two bits are used for each flag, so as to allow + * representing not only the presence of of a characteristic, but how an + * operation modifies that characteristic. There are two common forms in which + * flag bits are combined into an {@code int} bit set. Stream flags + * are a unioned bit set constructed by ORing the enum characteristic values of + * {@link #set()} (or, more commonly, ORing the corresponding static named + * constants prefixed with {@code IS_}). Operation flags are a unioned + * bit set constructed by ORing the enum characteristic values of {@link #set()} + * or {@link #clear()} (to inject, or clear, respectively, the corresponding + * flag), or more commonly ORing the corresponding named constants prefixed with + * {@code IS_} or {@code NOT_}. Flags that are not marked with {@code IS_} or + * {@code NOT_} are implicitly treated as preserved. Care must be taken when + * combining bitsets that the correct combining operations are applied in the + * correct order. + * + *

+ * With the exception of {@link #SHORT_CIRCUIT}, stream characteristics can be + * derived from the equivalent {@link java.util.Spliterator} characteristics: + * {@link java.util.Spliterator#DISTINCT}, {@link java.util.Spliterator#SORTED}, + * {@link java.util.Spliterator#ORDERED}, and + * {@link java.util.Spliterator#SIZED}. A spliterator characteristics bit set + * can be converted to stream flags using the method + * {@link #fromCharacteristics(java.util.Spliterator)} and converted back using + * {@link #toCharacteristics(int)}. (The bit set + * {@link #SPLITERATOR_CHARACTERISTICS_MASK} is used to AND with a bit set to + * produce a valid spliterator characteristics bit set that can be converted to + * stream flags.) + * + *

+ * The source of a stream encapsulates a spliterator. The characteristics of + * that source spliterator when transformed to stream flags will be a proper + * subset of stream flags of that stream. + * For example: + *

 {@code
+ *     Spliterator s = ...;
+ *     Stream stream = Streams.stream(s);
+ *     flagsFromSplitr = fromCharacteristics(s.characteristics());
+ *     assert(flagsFromSplitr & stream.getStreamFlags() == flagsFromSplitr);
+ * }
+ * + *

+ * An intermediate operation, performed on an input stream to create a new + * output stream, may preserve, clear or inject stream or operation + * characteristics. Similarly, a terminal operation, performed on an input + * stream to produce an output result may preserve, clear or inject stream or + * operation characteristics. Preservation means that if that characteristic + * is present on the input, then it is also present on the output. Clearing + * means that the characteristic is not present on the output regardless of the + * input. Injection means that the characteristic is present on the output + * regardless of the input. If a characteristic is not cleared or injected then + * it is implicitly preserved. + * + *

+ * A pipeline consists of a stream source encapsulating a spliterator, one or + * more intermediate operations, and finally a terminal operation that produces + * a result. At each stage of the pipeline, a combined stream and operation + * flags can be calculated, using {@link #combineOpFlags(int, int)}. Such flags + * ensure that preservation, clearing and injecting information is retained at + * each stage. + * + * The combined stream and operation flags for the source stage of the pipeline + * is calculated as follows: + *

 {@code
+ *     int flagsForSourceStage = combineOpFlags(sourceFlags, INITIAL_OPS_VALUE);
+ * }
+ * + * The combined stream and operation flags of each subsequent intermediate + * operation stage in the pipeline is calculated as follows: + *
 {@code
+ *     int flagsForThisStage = combineOpFlags(flagsForPreviousStage, thisOpFlags);
+ * }
+ * + * Finally the flags output from the last intermediate operation of the pipeline + * are combined with the operation flags of the terminal operation to produce + * the flags output from the pipeline. + * + *

Those flags can then be used to apply optimizations. For example, if + * {@code SIZED.isKnown(flags)} returns true then the stream size remains + * constant throughout the pipeline, this information can be utilized to + * pre-allocate data structures and combined with + * {@link java.util.Spliterator#SUBSIZED} that information can be utilized to + * perform concurrent in-place updates into a shared array. + * + * For specific details see the {@link AbstractPipeline} constructors. + * + * @since 1.8 + */ +enum StreamOpFlag { + + /* + * Each characteristic takes up 2 bits in a bit set to accommodate + * preserving, clearing and setting/injecting information. + * + * This applies to stream flags, intermediate/terminal operation flags, and + * combined stream and operation flags. Even though the former only requires + * 1 bit of information per characteristic, is it more efficient when + * combining flags to align set and inject bits. + * + * Characteristics belong to certain types, see the Type enum. Bit masks for + * the types are constructed as per the following table: + * + * DISTINCT SORTED ORDERED SIZED SHORT_CIRCUIT + * SPLITERATOR 01 01 01 01 00 + * STREAM 01 01 01 01 00 + * OP 11 11 11 10 01 + * TERMINAL_OP 00 00 10 00 01 + * UPSTREAM_TERMINAL_OP 00 00 10 00 00 + * + * 01 = set/inject + * 10 = clear + * 11 = preserve + * + * Construction of the columns is performed using a simple builder for + * non-zero values. + */ + + + // The following flags correspond to characteristics on Spliterator + // and the values MUST be equal. + // + + /** + * Characteristic value signifying that, for each pair of + * encountered elements in a stream {@code x, y}, {@code !x.equals(y)}. + *

+ * A stream may have this value or an intermediate operation can preserve, + * clear or inject this value. + */ + // 0, 0x00000001 + // Matches Spliterator.DISTINCT + DISTINCT(0, + set(Type.SPLITERATOR).set(Type.STREAM).setAndClear(Type.OP)), + + /** + * Characteristic value signifying that encounter order follows a natural + * sort order of comparable elements. + *

+ * A stream can have this value or an intermediate operation can preserve, + * clear or inject this value. + *

+ * Note: The {@link java.util.Spliterator#SORTED} characteristic can define + * a sort order with an associated non-null comparator. Augmenting flag + * state with addition properties such that those properties can be passed + * to operations requires some disruptive changes for a singular use-case. + * Furthermore, comparing comparators for equality beyond that of identity + * is likely to be unreliable. Therefore the {@code SORTED} characteristic + * for a defined non-natural sort order is not mapped internally to the + * {@code SORTED} flag. + */ + // 1, 0x00000004 + // Matches Spliterator.SORTED + SORTED(1, + set(Type.SPLITERATOR).set(Type.STREAM).setAndClear(Type.OP)), + + /** + * Characteristic value signifying that an encounter order is + * defined for stream elements. + *

+ * A stream can have this value, an intermediate operation can preserve, + * clear or inject this value, or a terminal operation can preserve or clear + * this value. + */ + // 2, 0x00000010 + // Matches Spliterator.ORDERED + ORDERED(2, + set(Type.SPLITERATOR).set(Type.STREAM).setAndClear(Type.OP).clear(Type.TERMINAL_OP) + .clear(Type.UPSTREAM_TERMINAL_OP)), + + /** + * Characteristic value signifying that size of the stream + * is of a known finite size that is equal to the known finite + * size of the source spliterator input to the first stream + * in the pipeline. + *

+ * A stream can have this value or an intermediate operation can preserve or + * clear this value. + */ + // 3, 0x00000040 + // Matches Spliterator.SIZED + SIZED(3, + set(Type.SPLITERATOR).set(Type.STREAM).clear(Type.OP)), + + // The following Spliterator characteristics are not currently used but a + // gap in the bit set is deliberately retained to enable corresponding + // stream flags if//when required without modification to other flag values. + // + // 4, 0x00000100 NONNULL(4, ... + // 5, 0x00000400 IMMUTABLE(5, ... + // 6, 0x00001000 CONCURRENT(6, ... + // 7, 0x00004000 SUBSIZED(7, ... + + // The following 4 flags are currently undefined and a free for any further + // spliterator characteristics. + // + // 8, 0x00010000 + // 9, 0x00040000 + // 10, 0x00100000 + // 11, 0x00400000 + + // The following flags are specific to streams and operations + // + + /** + * Characteristic value signifying that an operation may short-circuit the + * stream. + *

+ * An intermediate operation can preserve or inject this value, + * or a terminal operation can preserve or inject this value. + */ + // 12, 0x01000000 + SHORT_CIRCUIT(12, + set(Type.OP).set(Type.TERMINAL_OP)); + + // The following 2 flags are currently undefined and a free for any further + // stream flags if/when required + // + // 13, 0x04000000 + // 14, 0x10000000 + // 15, 0x40000000 + + /** + * Type of a flag + */ + enum Type { + /** The flag is associated with spliterator characteristics. */ + SPLITERATOR, + + /** The flag is associated with stream flags. */ + STREAM, + + /** The flag is associated with intermediate operation flags. */ + OP, + + /** The flag is associated with terminal operation flags. */ + TERMINAL_OP, + + /** + * The flag is associated with terminal operation flags that are + * propagated upstream across the last stateful operation boundary + */ + UPSTREAM_TERMINAL_OP + } + + /** + * The bit pattern for setting/injecting a flag. + */ + private static final int SET_BITS = 0b01; + + /** + * The bit pattern for clearing a flag. + */ + private static final int CLEAR_BITS = 0b10; + + /** + * The bit pattern for preserving a flag. + */ + private static final int PRESERVE_BITS = 0b11; + + private static MaskBuilder set(Type t) { + return new MaskBuilder(new EnumMap<>(Type.class)).set(t); + } + + private static class MaskBuilder { + final Map map; + + MaskBuilder(Map map) { + this.map = map; + } + + MaskBuilder mask(Type t, Integer i) { + map.put(t, i); + return this; + } + + MaskBuilder set(Type t) { + return mask(t, SET_BITS); + } + + MaskBuilder clear(Type t) { + return mask(t, CLEAR_BITS); + } + + MaskBuilder setAndClear(Type t) { + return mask(t, PRESERVE_BITS); + } + + Map build() { + for (Type t : Type.values()) { + map.putIfAbsent(t, 0b00); + } + return map; + } + } + + // The mask table for a flag, this is used to determine + // if a flag corresponds to a certain flag type and for creating + // mask constants. + private final Map maskTable; + + // The bit position in the bit mask + private final int bitPosition; + + // The set 2 bit set offset at the bit position + private final int set; + + // The clear 2 bit set offset at the bit position + private final int clear; + + // The preserve 2 bit set offset at the bit position + private final int preserve; + + private StreamOpFlag(int position, MaskBuilder maskBuilder) { + this.maskTable = maskBuilder.build(); + // Two bits per flag + position *= 2; + this.bitPosition = position; + this.set = SET_BITS << position; + this.clear = CLEAR_BITS << position; + this.preserve = PRESERVE_BITS << position; + } + + /** + * Gets the bitmap associated with setting this characteristic + * @return the bitmap for setting this characteristic + */ + int set() { + return set; + } + + /** + * Gets the bitmap associated with clearing this characteristic + * @return the bitmap for clearing this characteristic + */ + int clear() { + return clear; + } + + /** + * Determines if this flag is a stream-based flag. + * + * @return true if a stream-based flag, otherwise false. + */ + boolean isStreamFlag() { + return maskTable.get(Type.STREAM) > 0; + } + + /** + * Checks if this flag is set on stream flags, injected on operation flags, + * and injected on combined stream and operation flags. + * + * @param flags the stream flags, operation flags, or combined stream and + * operation flags + * @return true if this flag is known, otherwise false. + */ + boolean isKnown(int flags) { + return (flags & preserve) == set; + } + + /** + * Checks if this flag is cleared on operation flags or combined stream and + * operation flags. + * + * @param flags the operation flags or combined stream and operations flags. + * @return true if this flag is preserved, otherwise false. + */ + boolean isCleared(int flags) { + return (flags & preserve) == clear; + } + + /** + * Checks if this flag is preserved on combined stream and operation flags. + * + * @param flags the combined stream and operations flags. + * @return true if this flag is preserved, otherwise false. + */ + boolean isPreserved(int flags) { + return (flags & preserve) == preserve; + } + + /** + * Determines if this flag can be set for a flag type. + * + * @param t the flag type. + * @return true if this flag can be set for the flag type, otherwise false. + */ + boolean canSet(Type t) { + return (maskTable.get(t) & SET_BITS) > 0; + } + + /** + * The bit mask for spliterator characteristics + */ + static final int SPLITERATOR_CHARACTERISTICS_MASK = createMask(Type.SPLITERATOR); + + /** + * The bit mask for source stream flags. + */ + static final int STREAM_MASK = createMask(Type.STREAM); + + /** + * The bit mask for intermediate operation flags. + */ + static final int OP_MASK = createMask(Type.OP); + + /** + * The bit mask for terminal operation flags. + */ + static final int TERMINAL_OP_MASK = createMask(Type.TERMINAL_OP); + + /** + * The bit mask for upstream terminal operation flags. + */ + static final int UPSTREAM_TERMINAL_OP_MASK = createMask(Type.UPSTREAM_TERMINAL_OP); + + private static int createMask(Type t) { + int mask = 0; + for (StreamOpFlag flag : StreamOpFlag.values()) { + mask |= flag.maskTable.get(t) << flag.bitPosition; + } + return mask; + } + + // Complete flag mask + private static final int FLAG_MASK = createFlagMask(); + + private static int createFlagMask() { + int mask = 0; + for (StreamOpFlag flag : StreamOpFlag.values()) { + mask |= flag.preserve; + } + return mask; + } + + // Flag mask for stream flags that are set + private static final int FLAG_MASK_IS = STREAM_MASK; + + // Flag mask for stream flags that are cleared + private static final int FLAG_MASK_NOT = STREAM_MASK << 1; + + /** + * The initial value to be combined with the stream flags of the first + * stream in the pipeline. + */ + static final int INITIAL_OPS_VALUE = FLAG_MASK_IS | FLAG_MASK_NOT; + + /** + * The bit value to set or inject {@link #DISTINCT} + */ + static final int IS_DISTINCT = DISTINCT.set; + + /** + * The bit value to clear {@link #DISTINCT} + */ + static final int NOT_DISTINCT = DISTINCT.clear; + + /** + * The bit value to set or inject {@link #SORTED} + */ + static final int IS_SORTED = SORTED.set; + + /** + * The bit value to clear {@link #SORTED} + */ + static final int NOT_SORTED = SORTED.clear; + + /** + * The bit value to set or inject {@link #ORDERED} + */ + static final int IS_ORDERED = ORDERED.set; + + /** + * The bit value to clear {@link #ORDERED} + */ + static final int NOT_ORDERED = ORDERED.clear; + + /** + * The bit value to set {@link #SIZED} + */ + static final int IS_SIZED = SIZED.set; + + /** + * The bit value to clear {@link #SIZED} + */ + static final int NOT_SIZED = SIZED.clear; + + /** + * The bit value to inject {@link #SHORT_CIRCUIT} + */ + static final int IS_SHORT_CIRCUIT = SHORT_CIRCUIT.set; + + private static int getMask(int flags) { + return (flags == 0) + ? FLAG_MASK + : ~(flags | ((FLAG_MASK_IS & flags) << 1) | ((FLAG_MASK_NOT & flags) >> 1)); + } + + /** + * Combines stream or operation flags with previously combined stream and + * operation flags to produce updated combined stream and operation flags. + *

+ * A flag set on stream flags or injected on operation flags, + * and injected combined stream and operation flags, + * will be injected on the updated combined stream and operation flags. + *

+ *

+ * A flag set on stream flags or injected on operation flags, + * and cleared on the combined stream and operation flags, + * will be cleared on the updated combined stream and operation flags. + *

+ *

+ * A flag set on the stream flags or injected on operation flags, + * and preserved on the combined stream and operation flags, + * will be injected on the updated combined stream and operation flags. + *

+ *

+ * A flag not set on the stream flags or cleared/preserved on operation + * flags, and injected on the combined stream and operation flags, + * will be injected on the updated combined stream and operation flags. + *

+ *

+ * A flag not set on the stream flags or cleared/preserved on operation + * flags, and cleared on the combined stream and operation flags, + * will be cleared on the updated combined stream and operation flags. + *

+ *

+ * A flag not set on the stream flags, + * and preserved on the combined stream and operation flags + * will be preserved on the updated combined stream and operation flags. + *

+ *

+ * A flag cleared on operation flags, + * and preserved on the combined stream and operation flags + * will be cleared on the updated combined stream and operation flags. + *

+ *

+ * A flag preserved on operation flags, + * and preserved on the combined stream and operation flags + * will be preserved on the updated combined stream and operation flags. + *

+ * + * @param newStreamOrOpFlags the stream or operation flags. + * @param prevCombOpFlags previously combined stream and operation flags. + * The value {#link INITIAL_OPS_VALUE} must be used as the seed value. + * @return the updated combined stream and operation flags. + */ + static int combineOpFlags(int newStreamOrOpFlags, int prevCombOpFlags) { + // 0x01 or 0x10 nibbles are transformed to 0x11 + // 0x00 nibbles remain unchanged + // Then all the bits are flipped + // Then the result is logically or'ed with the operation flags. + return (prevCombOpFlags & StreamOpFlag.getMask(newStreamOrOpFlags)) | newStreamOrOpFlags; + } + + /** + * Converts combined stream and operation flags to stream flags. + * + *

Each flag injected on the combined stream and operation flags will be + * set on the stream flags. + * + * @param combOpFlags the combined stream and operation flags. + * @return the stream flags. + */ + static int toStreamFlags(int combOpFlags) { + // By flipping the nibbles 0x11 become 0x00 and 0x01 become 0x10 + // Shift left 1 to restore set flags and mask off anything other than the set flags + return ((~combOpFlags) >> 1) & FLAG_MASK_IS & combOpFlags; + } + + /** + * Converts stream flags to a spliterator characteristic bit set. + * + * @param streamFlags the stream flags. + * @return the spliterator characteristic bit set. + */ + static int toCharacteristics(int streamFlags) { + return streamFlags & SPLITERATOR_CHARACTERISTICS_MASK; + } + + /** + * Converts a spliterator characteristic bit set to stream flags. + * + * @implSpec + * If the spliterator is naturally {@code SORTED} (the associated + * {@code Comparator} is {@code null}) then the characteristic is converted + * to the {@link #SORTED} flag, otherwise the characteristic is not + * converted. + * + * @param spliterator the spliterator from which to obtain characteristic + * bit set. + * @return the stream flags. + */ + static int fromCharacteristics(Spliterator spliterator) { + int characteristics = spliterator.characteristics(); + if ((characteristics & Spliterator.SORTED) != 0 && spliterator.getComparator() != null) { + // Do not propagate the SORTED characteristic if it does not correspond + // to a natural sort order + return characteristics & SPLITERATOR_CHARACTERISTICS_MASK & ~Spliterator.SORTED; + } + else { + return characteristics & SPLITERATOR_CHARACTERISTICS_MASK; + } + } + + /** + * Converts a spliterator characteristic bit set to stream flags. + * + * @param characteristics the spliterator characteristic bit set. + * @return the stream flags. + */ + static int fromCharacteristics(int characteristics) { + return characteristics & SPLITERATOR_CHARACTERISTICS_MASK; + } +} diff --git a/src/share/classes/java/util/stream/StreamShape.java b/src/share/classes/java/util/stream/StreamShape.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/StreamShape.java @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util.stream; + +/** + * An enum describing the known shape specializations for stream abstractions. + * Each will correspond to a specific subinterface of {@link BaseStream} + * (e.g., {@code REFERENCE} corresponds to {@code Stream}, {@code INT_VALUE} + * corresponds to {@code IntStream}). Each may also correspond to + * specializations of value-handling abstractions such as {@code Spliterator}, + * {@code Consumer}, etc. + * + * @apiNote + * This enum is used by implementations to determine compatibility between + * streams and operations (i.e., if the output shape of a stream is compatible + * with the input shape of the next operation). + * + *

Some APIs require you to specify both a generic type and a stream shape + * for input or output elements, such as {@link TerminalOp} which has both + * generic type parameters for its input types, and a getter for the + * input shape. When representing primitive streams in this way, the + * generic type parameter should correspond to the wrapper type for that + * primitive type. + * @since 1.8 + */ +enum StreamShape { + /** + * The shape specialization corresponding to {@code Stream} and elements + * that are object references + */ + REFERENCE, + /** + * The shape specialization corresponding to {@code IntStream} and elements + * that are {@code int} values + */ + INT_VALUE, + /** + * The shape specialization corresponding to {@code LongStream} and elements + * that are {@code long} values + */ + LONG_VALUE, + /** + * The shape specialization corresponding to {@code DoubleStream} and + * elements that are {@code double} values + */ + DOUBLE_VALUE +} diff --git a/src/share/classes/java/util/stream/TerminalOp.java b/src/share/classes/java/util/stream/TerminalOp.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/TerminalOp.java @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util.stream; + +import java.util.Spliterator; + +/** + * An operation in a stream pipeline that takes a stream as input and produces + * a result or side-effect. A {@code TerminalOp} has an input type and stream + * shape, and a result type. A {@code TerminalOp} also has a set of + * operation flags that describes how the operation processes elements + * of the stream (such as short-circuiting or respecting encounter order; see + * {@link StreamOpFlag}). + * + *

A {@code TerminalOp} must provide a sequential and parallel implementation + * of the operation relative to a given stream source and set of intermediate + * operations. + * + * @param The type of input elements + * @param The type of the result + * @since 1.8 + */ +interface TerminalOp { + /** + * Gets the shape of the input type of this operation + * + * @implSpec The default returns {@code StreamShape.REFERENCE} + * @return Shape of the input type of this operation + */ + default StreamShape inputShape() { return StreamShape.REFERENCE; } + + /** + * Gets the stream flags of the operation. Terminal operations may set a + * limited subset of the stream flags defined in {@link StreamOpFlag}, and + * these flags are combined with the previously combined stream and + * intermediate operation flags for the pipeline. + * + * @implSpec The default implementation returns zero + * @return the stream flags for this operation + * @see StreamOpFlag + */ + default int getOpFlags() { return 0; } + + /** + * Performs a parallel evaluation of the operation using the specified + * {@code PipelineHelper}, which describes the upstream intermediate + * operations. + * + * @implSpec The default performs a sequential evaluation of the operation + * using the specified {@code PipelineHelper} + * + * @param helper the pipeline helper + * @param spliterator the source spliterator + * @return the result of the evaluation + */ + default R evaluateParallel(PipelineHelper helper, + Spliterator spliterator) { + if (Tripwire.ENABLED) + Tripwire.trip(getClass(), "{0} triggering TerminalOp.evaluateParallel serial default"); + return evaluateSequential(helper, spliterator); + } + + /** + * Performs a sequential evaluation of the operation using the specified + * {@code PipelineHelper}, which describes the upstream intermediate + * operations. + * + * @param helper the pipeline helper + * @param spliterator the source spliterator + * @return the result of the evaluation + */ + R evaluateSequential(PipelineHelper helper, + Spliterator spliterator); +} diff --git a/src/share/classes/java/util/stream/TerminalSink.java b/src/share/classes/java/util/stream/TerminalSink.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/TerminalSink.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util.stream; + +import java.util.function.Supplier; + +/** + * A {@link Sink} which accumulates state as elements are accepted, and allows + * a result to be retrieved after the computation is finished. + * + * @param The type of elements to be accepted + * @param The type of the result + * + * @since 1.8 + */ +interface TerminalSink extends Sink, Supplier { } diff --git a/src/share/classes/java/util/stream/Tripwire.java b/src/share/classes/java/util/stream/Tripwire.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/Tripwire.java @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util.stream; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import sun.util.logging.PlatformLogger; + +/** + * Utility class for detecting inadvertent uses of boxing in + * {@code java.util.stream} classes. The detection is turned on or off based on + * whether the system property {@code org.openjdk.java.util.stream.tripwire} is + * considered {@code true} according to {@link Boolean#getBoolean(String)}. + * This should normally be turned off for production use. + * + * @apiNote + * Typical usage would be for boxing code to do: + *

{@code
+ *     if (Tripwire.ENABLED)
+ *         Tripwire.trip(getClass(), "{0} calling Sink.OfInt.accept(Integer)");
+ * }
+ * + * @since 1.8 + */ +final class Tripwire { + private static final String TRIPWIRE_PROPERTY = "org.openjdk.java.util.stream.tripwire"; + + /** Should debugging checks be enabled? */ + static final boolean ENABLED = AccessController.doPrivileged( + (PrivilegedAction) () -> Boolean.getBoolean(TRIPWIRE_PROPERTY)); + + private Tripwire() { } + + /** + * Produces a log warning, using {@code PlatformLogger.getLogger(className)}, + * using the supplied message. The class name of {@code trippingClass} will + * be used as the first parameter to the message. + * + * @param trippingClass Name of the class generating the message + * @param msg A message format string of the type expected by + * {@link PlatformLogger} + */ + static void trip(Class trippingClass, String msg) { + PlatformLogger.getLogger(trippingClass.getName()).warning(msg, trippingClass.getName()); + } +} # HG changeset patch # User briangoetz # Date 1366167049 14400 # Node ID bb0490f08ae093ca3b24b540229da9483aa893f9 # Parent 162871f3ae8945b44fa0840f20721d5491510c2f imported patch JDK-8010488 diff --git a/src/share/classes/java/util/DoubleSummaryStatistics.java b/src/share/classes/java/util/DoubleSummaryStatistics.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/DoubleSummaryStatistics.java @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util; + +import java.util.function.DoubleConsumer; + +/** + * A state object for collecting statistics such as count, min, max, sum, and + * average. + * + *

This class is designed to work with (though does not require) + * {@linkplain java.util.stream streams}. For example, you can compute + * summary statistics on a stream of doubles with: + *

 {@code
+ * DoubleSummaryStatistics stats = doubleStream.collect(DoubleSummaryStatistics::new,
+ *     DoubleSummaryStatistics::accept,
+ *     DoubleSummaryStatistics::combine);
+ * }
+ * + *

{@code DoubleSummaryStatistics} can be used as a + * {@linkplain java.util.stream.Stream#reduce(java.util.function.BinaryOperator) reduction} + * target for a {@linkplain java.util.stream.Stream stream}. For example: + * + *

 {@code
+ * DoubleSummaryStatistics stats = people.stream()
+ *     .collect(Collectors.toDoubleSummaryStatistics(Person::getWeight));
+ *}
+ * + * This computes, in a single pass, the count of people, as well as the minimum, + * maximum, sum, and average of their weights. + * + * @implNote This implementation is not thread safe. However, it is safe to use + * {@link java.util.stream.Collectors#toDoubleSummaryStatistics(java.util.function.ToDoubleFunction) + * Collectors.toDoubleStatistics()} on a parallel stream, because the parallel + * implementation of {@link java.util.stream.Stream#collect Stream.collect()} + * provides the necessary partitioning, isolation, and merging of results for + * safe and efficient parallel execution. + * @since 1.8 + */ +public class DoubleSummaryStatistics implements DoubleConsumer { + private long count; + private double sum; + private double min = Double.POSITIVE_INFINITY; + private double max = Double.NEGATIVE_INFINITY; + + /** + * Construct an empty instance with zero count, zero sum, + * {@code Double.POSITIVE_INFINITY} min, {@code Double.NEGATIVE_INFINITY} + * max and zero average. + */ + public DoubleSummaryStatistics() { } + + /** + * Records another value into the summary information. + * + * @param value the input value + */ + @Override + public void accept(double value) { + ++count; + sum += value; + min = Math.min(min, value); + max = Math.max(max, value); + } + + /** + * Combines the state of another {@code DoubleSummaryStatistics} into this + * one. + * + * @param other Another {@code DoubleSummaryStatistics} + * @throws NullPointerException if {@code other} is null + */ + public void combine(DoubleSummaryStatistics other) { + count += other.count; + sum += other.sum; + min = Math.min(min, other.min); + max = Math.max(max, other.max); + } + + /** + * Return the count of values recorded. + * + * @return the count of values + */ + public long getCount() { + return count; + } + + /** + * Returns the sum of values recorded, or zero if no values have been + * recorded. The sum returned can vary depending upon the order in which + * values are recorded. This is due to accumulated rounding error in + * addition of values of differing magnitudes. Values sorted by increasing + * absolute magnitude tend to yield more accurate results. If any recorded + * value is a {@code NaN} or the sum is at any point a {@code NaN} then the + * sum will be {@code NaN}. + * + * @return The sum of values, or zero if none + */ + public double getSum() { + return sum; + } + + /** + * Returns the recorded value closest to {@code Double.NEGATIVE_INFINITY}, + * {@code Double.POSITIVE_INFINITY} if no values have been recorded or if + * any recorded value is NaN, then the result is NaN. Unlike the numerical + * comparison operators, this method considers negative zero to be strictly + * smaller than positive zero. + * + * @return The minimal recorded value, {@code Double.NaN} if any recorded + * value was NaN or {@code Double.POSITIVE_INFINITY} if no values were + * recorded. + */ + public double getMin() { + return min; + } + + /** + * Returns the recorded value closest to {@code Double.POSITIVE_INFINITY}, + * {@code Double.NEGATIVE_INFINITY} if no values have been recorded or if + * any recorded value is {@code NaN}, then the result is {@code NaN}. + * Unlike the numerical comparison operators, this method considers negative + * zero to be strictly smaller than positive zero. + * + * @return The maximal recorded value, {@code Double.NaN} if any recorded + * value was NaN or {@code Double.NEGATIVE_INFINITY} if no values were + * recorded. + */ + public double getMax() { + return max; + } + + /** + * Returns the average of values recorded, or zero if no values have been + * recorded. The average returned can vary depending upon the order in + * which values are recorded. This is due to accumulated rounding error in + * addition of values of differing magnitudes. Values sorted by increasing + * absolute magnitude tend to yield more accurate results. If any recorded + * value is a {@code NaN} or the sum is at any point a {@code NaN} then the + * average will be {@code NaN}. + * + * @return The average of values, or zero if none + */ + public double getAverage() { + return count > 0 ? sum / count : 0.0d; + } + + /** + * {@inheritDoc} + * + * Returns a non-empty string representation of this object suitable for + * debugging. The exact presentation format is unspecified and may vary + * between implementations and versions. + */ + @Override + public String toString() { + return String.format( + "%s{count=%d, sum=%f, min=%f, average=%f, max=%f}", + this.getClass().getSimpleName(), + getCount(), + getSum(), + getMin(), + getAverage(), + getMax()); + } +} diff --git a/src/share/classes/java/util/IntSummaryStatistics.java b/src/share/classes/java/util/IntSummaryStatistics.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/IntSummaryStatistics.java @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util; + +import java.util.function.IntConsumer; + +/** + * A state object for collecting statistics such as count, min, max, sum, and + * average. + * + *

This class is designed to work with (though does not require) + * {@linkplain java.util.stream streams}. For example, you can compute + * summary statistics on a stream of ints with: + *

 {@code
+ * IntSummaryStatistics stats = intStream.collect(IntSummaryStatistics::new,
+ *     IntSummaryStatistics::accept,
+ *     IntSummaryStatistics::combine);
+ * }
+ * + *

{@code IntSummaryStatistics} can be used as a + * {@linkplain java.util.stream.Stream#reduce(java.util.function.BinaryOperator) reduction} + * target for a {@linkplain java.util.stream.Stream stream}. For example: + * + *

 {@code
+ * IntSummaryStatistics stats = people.stream()
+ *     .collect(Collectors.toIntSummaryStatistics(Person::getDependents));
+ *}
+ * + * This computes, in a single pass, the count of people, as well as the minimum, + * maximum, sum, and average of their number of dependents. + * + * @implNote This implementation is not thread safe. However, it is safe to use + * {@link java.util.stream.Collectors#toIntSummaryStatistics(java.util.function.ToIntFunction) + * Collectors.toIntStatistics()} on a parallel stream, because the parallel + * implementation of {@link java.util.stream.Stream#collect Stream.collect()} + * provides the necessary partitioning, isolation, and merging of results for + * safe and efficient parallel execution. + * + *

This implementation does not check for overflow of the sum. + * @since 1.8 + */ +public class IntSummaryStatistics implements IntConsumer { + private long count; + private long sum; + private int min = Integer.MAX_VALUE; + private int max = Integer.MIN_VALUE; + + /** + * Construct an empty instance with zero count, zero sum, + * {@code Integer.MAX_VALUE} min, {@code Integer.MIN_VALUE} max and zero + * average. + */ + public IntSummaryStatistics() { } + + /** + * Records a new value into the summary information + * + * @param value the input value + */ + @Override + public void accept(int value) { + ++count; + sum += value; + min = Math.min(min, value); + max = Math.max(max, value); + } + + /** + * Combines the state of another {@code IntSummaryStatistics} into this one. + * + * @param other Another {@code IntSummaryStatistics} + * @throws NullPointerException if {@code other} is null + */ + public void combine(IntSummaryStatistics other) { + count += other.count; + sum += other.sum; + min = Math.min(min, other.min); + max = Math.max(max, other.max); + } + + /** + * Returns the count of values recorded. + * + * @return the count of values + */ + public long getCount() { + return count; + } + + /** + * Returns the sum of values recorded, or zero if no values have been + * recorded. + * + * @return The sum of values, or zero if none + */ + public long getSum() { + return sum; + } + + /** + * Returns the minimal value recorded, or {@code Integer.MAX_VALUE} if no + * values have been recorded. + * + * @return The minimal value, or {@code Integer.MAX_VALUE} if none + */ + public int getMin() { + return min; + } + + /** + * Returns the maximal value recorded, or {@code Integer.MIN_VALUE} if no + * values have been recorded. + * + * @return The maximal value, or {@code Integer.MIN_VALUE} if none + */ + public int getMax() { + return max; + } + + /** + * Returns the average of values recorded, or zero if no values have been + * recorded. + * + * @return The average of values, or zero if none + */ + public double getAverage() { + return count > 0 ? (double) sum / count : 0.0d; + } + + @Override + /** + * {@inheritDoc} + * + * Returns a non-empty string representation of this object suitable for + * debugging. The exact presentation format is unspecified and may vary + * between implementations and versions. + */ + public String toString() { + return String.format( + "%s{count=%d, sum=%d, min=%d, average=%d, max=%d}", + this.getClass().getSimpleName(), + getCount(), + getSum(), + getMin(), + getAverage(), + getMax()); + } +} diff --git a/src/share/classes/java/util/LongSummaryStatistics.java b/src/share/classes/java/util/LongSummaryStatistics.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/LongSummaryStatistics.java @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util; + +import java.util.function.IntConsumer; +import java.util.function.LongConsumer; + +/** + * A state object for collecting statistics such as count, min, max, sum, and + * average. + * + *

This class is designed to work with (though does not require) + * {@linkplain java.util.stream streams}. For example, you can compute + * summary statistics on a stream of longs with: + *

 {@code
+ * LongSummaryStatistics stats = longStream.collect(LongSummaryStatistics::new,
+ *     LongSummaryStatistics::accept,
+ *     LongSummaryStatistics::combine);
+ * }
+ * + *

{@code LongSummaryStatistics} can be used as a + * {@linkplain java.util.stream.Stream#reduce(java.util.function.BinaryOperator) reduction} + * target for a {@linkplain java.util.stream.Stream stream}. For example: + * + *

 {@code
+ * LongSummaryStatistics stats = people.stream()
+ *     .collect(Collectors.toLongSummaryStatistics(Person::getAge));
+ *}
+ * + * This computes, in a single pass, the count of people, as well as the minimum, + * maximum, sum, and average of their ages in milliseconds. + * + * @implNote This implementation is not thread safe. However, it is safe to use + * {@link java.util.stream.Collectors#toLongSummaryStatistics(java.util.function.ToLongFunction) + * Collectors.toLongStatistics()} on a parallel stream, because the parallel + * implementation of {@link java.util.stream.Stream#collect Stream.collect()} + * provides the necessary partitioning, isolation, and merging of results for + * safe and efficient parallel execution. + * + *

This implementation does not check for overflow of the sum. + * @since 1.8 + */ +public class LongSummaryStatistics implements LongConsumer, IntConsumer { + private long count; + private long sum; + private long min = Long.MAX_VALUE; + private long max = Long.MIN_VALUE; + + /** + * Construct an empty instance with zero count, zero sum, + * {@code Long.MAX_VALUE} min, {@code Long.MIN_VALUE} max and zero + * average. + */ + public LongSummaryStatistics() { } + + /** + * Records a new {@code int} value into the summary information. + * + * @param value the input value + */ + @Override + public void accept(int value) { + accept((long) value); + } + + /** + * Records a new {@code long} value into the summary information. + * + * @param value the input value + */ + @Override + public void accept(long value) { + ++count; + sum += value; + min = Math.min(min, value); + max = Math.max(max, value); + } + + /** + * Combines the state of another {@code LongSummaryStatistics} into this + * one. + * + * @param other Another {@code LongSummaryStatistics} + * @throws NullPointerException if {@code other} is null + */ + public void combine(LongSummaryStatistics other) { + count += other.count; + sum += other.sum; + min = Math.min(min, other.min); + max = Math.max(max, other.max); + } + + /** + * Returns the count of values recorded. + * + * @return the count of values + */ + public long getCount() { + return count; + } + + /** + * Returns the sum of values recorded, or zero if no values have been + * recorded. + * + * @return The sum of values, or zero if none + */ + public long getSum() { + return sum; + } + + /** + * Returns the minimal value recorded, or {@code Long.MAX_VALUE} if no + * values have been recorded. + * + * @return The minimal value, or {@code Long.MAX_VALUE} if none + */ + public long getMin() { + return min; + } + + /** + * Returns the maximal value recorded, or {@code Long.MIN_VALUE} if no + * values have been recorded + * + * @return The maximal value, or {@code Long.MIN_VALUE} if none + */ + public long getMax() { + return max; + } + + /** + * Returns the average of values recorded, or zero if no values have been + * recorded. + * + * @return The average of values, or zero if none + */ + public double getAverage() { + return count > 0 ? (double) sum / count : 0.0d; + } + + @Override + /** + * {@inheritDoc} + * + * Returns a non-empty string representation of this object suitable for + * debugging. The exact presentation format is unspecified and may vary + * between implementations and versions. + */ + public String toString() { + return String.format( + "%s{count=%d, sum=%d, min=%d, average=%d, max=%d}", + this.getClass().getSimpleName(), + getCount(), + getSum(), + getMin(), + getAverage(), + getMax()); + } +} # HG changeset patch # User briangoetz # Date 1366223944 14400 # Node ID e956201d5be7309a51dc710c4b88d4744620456f # Parent bb0490f08ae093ca3b24b540229da9483aa893f9 [mq]: JDK-8008682 diff --git a/src/share/classes/java/util/stream/BaseStream.java b/src/share/classes/java/util/stream/BaseStream.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/BaseStream.java @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util.stream; + +import java.util.Iterator; +import java.util.Spliterator; + +/** + * Base interface for stream types such as {@link Stream}, {@link IntStream}, + * etc. Contains methods common to all stream types. Many of these methods + * are implemented by {@link AbstractPipeline}, even though + * {@code AbstractPipeline} does not directly implement {@code BaseStream}. + * + * @param Type of stream elements. + * @param Type of stream implementing {@code BaseStream}. + * @since 1.8 + */ +interface BaseStream> { + /** + * Returns an iterator for the elements of this stream. + * + *

This is a terminal + * operation. + * + * @return the element iterator for this stream + */ + Iterator iterator(); + + /** + * Returns a spliterator for the elements of this stream. + * + *

This is a terminal + * operation. + * + * @return the element spliterator for this stream + */ + Spliterator spliterator(); + + /** + * Returns whether this stream, when executed, would execute in parallel + * (assuming no further modification of the stream, such as appending + * further intermediate operations or changing its parallelism). Calling + * this method after invoking an intermediate or terminal stream operation + * method may yield unpredictable results. + * + * @return whether this stream would execute in parallel if executed without + * further modification + */ + boolean isParallel(); + + /** + * Produces an equivalent stream that is sequential. May return + * itself, either because the stream was already sequential, or because + * the underlying stream state was modified to be sequential. + * + *

This is an intermediate + * operation. + * + * @return a sequential stream + */ + S sequential(); + + /** + * Produces an equivalent stream that is parallel. May return + * itself, either because the stream was already parallel, or because + * the underlying stream state was modified to be parallel. + * + *

This is an intermediate + * operation. + * + * @return a parallel stream + */ + S parallel(); + + /** + * Produces an equivalent stream that is + * unordered. May return + * itself if the stream was already unordered. + * + *

This is an intermediate + * operation. + * @return an unordered stream + */ + S unordered(); +} diff --git a/src/share/classes/java/util/stream/CloseableStream.java b/src/share/classes/java/util/stream/CloseableStream.java new file mode 100644 --- /dev/null +++ b/src/share/classes/java/util/stream/CloseableStream.java @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.util.stream; + +/** + * A {@code CloseableStream} is a {@code Stream} that can be closed. + * The close method is invoked to release resources that the object is + * holding (such as open files). + * + * @param The type of stream elements + * @since 1.8 + */ +public interface CloseableStream extends Stream, AutoCloseable { + + /** + * Closes this resource, relinquishing any underlying resources. + * This method is invoked automatically on objects managed by the + * {@code try}-with-resources statement. Does nothing if called when + * the resource has already been closed. + * + * This method does not allow throwing checked {@code Exception} like + * {@link AutoCloseable#close() AutoCloseable.close()}. Cases where the + * close operation may fail require careful attention by implementers. It + * is strongly advised to relinquish the underlying resources and to + * internally mark the resource as closed. The {@code close} + * method is unlikely to be invoked more than once and so this ensures + * that the resources are released in a timely manner. Furthermore it + * reduces problems that could arise when the resource wraps, or is + * wrapped, by another resource. + * + * @see AutoCloseable#close() + */ + void close(); +} diff --git a/src/share/classes/java/util/stream/Collector.java b/src/share/classes/java/util/stream/Collector.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/Collector.java @@ -0,0 +1,248 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util.stream; + +import java.util.Collections; +import java.util.Set; +import java.util.function.BiFunction; +import java.util.function.BinaryOperator; +import java.util.function.Supplier; + +/** + * A reduction operation that + * supports folding input elements into a cumulative result. The result may be + * a value or may be a mutable result container. Examples of operations + * accumulating results into a mutable result container include: accumulating + * input elements into a {@code Collection}; concatenating strings into a + * {@code StringBuilder}; computing summary information about elements such as + * sum, min, max, or average; computing "pivot table" summaries such as "maximum + * valued transaction by seller", etc. Reduction operations can be performed + * either sequentially or in parallel. + * + *

The following are examples of using the predefined {@code Collector} + * implementations in {@link Collectors} with the {@code Stream} API to perform + * mutable reduction tasks: + *

{@code
+ *     // Accumulate elements into a List
+ *     List list = stream.collect(Collectors.toList());
+ *
+ *     // Accumulate elements into a TreeSet
+ *     List list = stream.collect(Collectors.toCollection(TreeSet::new));
+ *
+ *     // Convert elements to strings and concatenate them, separated by commas
+ *     String joined = stream.map(Object::toString)
+ *                           .collect(Collectors.toStringJoiner(", "))
+ *                           .toString();
+ *
+ *     // Find highest-paid employee
+ *     Employee highestPaid = employees.stream()
+ *                                     .collect(Collectors.maxBy(Comparators.comparing(Employee::getSalary)));
+ *
+ *     // Group employees by department
+ *     Map> byDept
+ *         = employees.stream()
+ *                    .collect(Collectors.groupingBy(Employee::getDepartment));
+ *
+ *     // Find highest-paid employee by department
+ *     Map highestPaidByDept
+ *         = employees.stream()
+ *                    .collect(Collectors.groupingBy(Employee::getDepartment,
+ *                                                   Collectors.maxBy(Comparators.comparing(Employee::getSalary))));
+ *
+ *     // Partition students into passing and failing
+ *     Map> passingFailing =
+ *         students.stream()
+ *                 .collect(Collectors.partitioningBy(s -> s.getGrade() >= PASS_THRESHOLD);
+ *
+ * }
+ * + *

A {@code Collector} is specified by three functions that work together to + * manage a result or result container. They are: creation of an initial + * result, incorporating a new data element into a result, and combining two + * results into one. The last function -- combining two results into one -- is + * used during parallel operations, where subsets of the input are accumulated + * in parallel, and then the subresults merged into a combined result. The + * result may be a mutable container or a value. If the result is mutable, the + * accumulation and combination functions may either mutate their left argument + * and return that (such as adding elements to a collection), or return a new + * result, in which case it should not perform any mutation. + * + *

Collectors also have a set of characteristics, including + * {@link Characteristics#CONCURRENT} and + * {@link Characteristics#STRICTLY_MUTATIVE}. These characteristics provide + * hints that can be used by a reduction implementation to provide better + * performance. + * + *

Libraries that implement reduction based on {@code Collector}, such as + * {@link Stream#collect(Collector)}, must adhere to the following constraints: + *

    + *
  • The first argument passed to the accumulator function, and both + * arguments passed to the combiner function, must be the result of a + * previous invocation of {@link #resultSupplier()}, {@link #accumulator()}, + * or {@link #combiner()}.
  • + *
  • The implementation should not do anything with the result of any of + * the result supplier, accumulator, or combiner functions other than to + * pass them again to the accumulator or combiner functions, or return them + * to the caller of the reduction operation.
  • + *
  • If a result is passed to the accumulator or combiner function, and + * the same object is not returned from that function, it is never used + * again.
  • + *
  • Once a result is passed to the combiner function, it is never passed + * to the accumulator function again.
  • + *
  • For non-concurrent collectors, any result returned from the result + * supplier, accumulator, or combiner functions must be serially + * thread-confined. This enables collection to occur in parallel without + * the {@code Collector} needing to implement any additional synchronization. + * The reduction implementation must manage that the input is properly + * partitioned, that partitions are processed in isolation, and combining + * happens only after accumulation is complete.
  • + *
  • For concurrent collectors, an implementation is free to (but not + * required to) implement reduction concurrently. A concurrent reduction + * is one where the accumulator function is called concurrently from + * multiple threads, using the same concurrently-modifiable result container, + * rather than keeping the result isolated during accumulation. + * A concurrent reduction should only be applied if the collector has the + * {@link Characteristics#UNORDERED} characteristics or if the + * originating data is unordered.
  • + *
+ * + * @apiNote + *

Performing a reduction operation with a {@code Collector} should produce a + * result equivalent to: + *

{@code
+ *     BiFunction accumulator = collector.accumulator();
+ *     R result = collector.resultSupplier().get();
+ *     for (T t : data)
+ *         result = accumulator.apply(result, t);
+ *     return result;
+ * }
+ * + * However, the library is free to partition the input, perform the reduction on + * the partitions, and then use the combiner function to combine the partial + * results to achieve a parallel reduction. Depending on the specific reduction + * operation, this may perform better or worse, depending on the relative cost + * of the accumulator and combiner functions. + * + *

An example of an operation that can be easily modeled by {@code Collector} + * is accumulating elements into a {@code TreeSet}. In this case, the {@code + * resultSupplier()} function is {@code () -> new Treeset()}, the + * {@code accumulator} function is + * {@code (set, element) -> { set.add(element); return set; }}, and the combiner + * function is {@code (left, right) -> { left.addAll(right); return left; }}. + * (This behavior is implemented by + * {@code Collectors.toCollection(TreeSet::new)}). + * + * TODO Associativity and commutativity + * + * @see Stream#collect(Collector) + * @see Collectors + * + * @param The type of input element to the collect operation + * @param The result type of the collect operation + * @since 1.8 + */ +public interface Collector { + /** + * A function that creates and returns a new result that represents + * "no values". If the accumulator or combiner functions may mutate their + * arguments, this must be a new, empty result container. + * + * @return A function which, when invoked, returns a result representing + * "no values" + */ + Supplier resultSupplier(); + + /** + * A function that folds a new value into a cumulative result. The result + * may be a mutable result container or a value. The accumulator function + * may modify a mutable container and return it, or create a new result and + * return that, but if it returns a new result object, it must not modify + * any of its arguments. + * + *

If the collector has the {@link Characteristics#STRICTLY_MUTATIVE} + * characteristic, then the accumulator function must always return + * its first argument, after possibly mutating its state. + * + * @return A function which folds a new value into a cumulative result + */ + BiFunction accumulator(); + + /** + * A function that accepts two partial results and merges them. The + * combiner function may fold state from one argument into the other and + * return that, or may return a new result object, but if it returns + * a new result object, it must not modify the state of either of its + * arguments. + * + *

If the collector has the {@link Characteristics#STRICTLY_MUTATIVE} + * characteristic, then the combiner function must always return + * its first argument, after possibly mutating its state. + * + * @return A function which combines two partial results into a cumulative + * result + */ + BinaryOperator combiner(); + + /** + * Returns a {@code Set} of {@code Collector.Characteristics} indicating + * the characteristics of this Collector. This set should be immutable. + * @return An immutable set of collector characteristics + */ + default Set characteristics() { + return Collections.emptySet(); + } + + /** + * Characteristics indicating properties of a {@code Collector}, which can + * be used to optimize reduction implementations. + */ + enum Characteristics { + /** + * Indicates that this collector is concurrent, meaning that + * the result container can support the accumulator function being + * called concurrently with the same result container from multiple + * threads. Concurrent collectors must also always have the + * {@code STRICTLY_MUTATIVE} characteristic. + * + *

If a {@code CONCURRENT} collector is not also {@code UNORDERED}, + * then it should only be evaluated concurrently if applied to an + * unordered data source. + */ + CONCURRENT, + /** + * Indicates that the result container has no intrinsic order, such as + * a {@link Set}. + */ + UNORDERED, + /** + * Indicates that this collector operates by strict mutation of its + * result container. This means that the {@link #accumulator()} and + * {@link #combiner()} functions will always modify the state of and + * return their first argument, rather than returning a different result + * container. + */ + STRICTLY_MUTATIVE + } +} diff --git a/src/share/classes/java/util/stream/DelegatingStream.java b/src/share/classes/java/util/stream/DelegatingStream.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/DelegatingStream.java @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.util.stream; + +import java.util.Comparator; +import java.util.Iterator; +import java.util.Objects; +import java.util.Optional; +import java.util.Spliterator; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.util.function.BinaryOperator; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.IntFunction; +import java.util.function.Predicate; +import java.util.function.Supplier; +import java.util.function.ToDoubleFunction; +import java.util.function.ToIntFunction; +import java.util.function.ToLongFunction; + +/** + * A {@code Stream} implementation that delegates operations to another {@code + * Stream}. + * + * @since 1.8 + */ +public class DelegatingStream implements Stream { + final private Stream delegate; + + /** + * Construct a {@code Stream} that delegates operations to another {@code + * Stream}. + * + * @param delegate The underlying {@link Stream} to which we delegate all + * {@code Stream} methods + * @throws NullPointerException if the delegate is null + */ + public DelegatingStream(Stream delegate) { + this.delegate = Objects.requireNonNull(delegate); + } + + // -- BaseStream methods -- + + @Override + public Spliterator spliterator() { + return delegate.spliterator(); + } + + @Override + public boolean isParallel() { + return delegate.isParallel(); + } + + @Override + public Iterator iterator() { + return delegate.iterator(); + } + + // -- Stream methods -- + + @Override + public Stream filter(Predicate predicate) { + return delegate.filter(predicate); + } + + @Override + public Stream map(Function mapper) { + return delegate.map(mapper); + } + + @Override + public IntStream mapToInt(ToIntFunction mapper) { + return delegate.mapToInt(mapper); + } + + @Override + public LongStream mapToLong(ToLongFunction mapper) { + return delegate.mapToLong(mapper); + } + + @Override + public DoubleStream mapToDouble(ToDoubleFunction mapper) { + return delegate.mapToDouble(mapper); + } + + @Override + public Stream flatMap(Function> mapper) { + return delegate.flatMap(mapper); + } + + @Override + public IntStream flatMapToInt(Function mapper) { + return delegate.flatMapToInt(mapper); + } + + @Override + public LongStream flatMapToLong(Function mapper) { + return delegate.flatMapToLong(mapper); + } + + @Override + public DoubleStream flatMapToDouble(Function mapper) { + return delegate.flatMapToDouble(mapper); + } + + @Override + public Stream distinct() { + return delegate.distinct(); + } + + @Override + public Stream sorted() { + return delegate.sorted(); + } + + @Override + public Stream sorted(Comparator comparator) { + return delegate.sorted(comparator); + } + + @Override + public void forEach(Consumer action) { + delegate.forEach(action); + } + + @Override + public void forEachOrdered(Consumer action) { + delegate.forEachOrdered(action); + } + + @Override + public Stream peek(Consumer consumer) { + return delegate.peek(consumer); + } + + @Override + public Stream limit(long maxSize) { + return delegate.limit(maxSize); + } + + @Override + public Stream substream(long startingOffset) { + return delegate.substream(startingOffset); + } + + @Override + public Stream substream(long startingOffset, long endingOffset) { + return delegate.substream(startingOffset, endingOffset); + } + + @Override + public A[] toArray(IntFunction generator) { + return delegate.toArray(generator); + } + + @Override + public Object[] toArray() { + return delegate.toArray(); + } + + @Override + public T reduce(T identity, BinaryOperator accumulator) { + return delegate.reduce(identity, accumulator); + } + + @Override + public Optional reduce(BinaryOperator accumulator) { + return delegate.reduce(accumulator); + } + + @Override + public U reduce(U identity, BiFunction accumulator, + BinaryOperator combiner) { + return delegate.reduce(identity, accumulator, combiner); + } + + @Override + public R collect(Supplier resultFactory, + BiConsumer accumulator, + BiConsumer combiner) { + return delegate.collect(resultFactory, accumulator, combiner); + } + + @Override + public R collect(Collector collector) { + return delegate.collect(collector); + } + + @Override + public Optional max(Comparator comparator) { + return delegate.max(comparator); + } + + @Override + public Optional min(Comparator comparator) { + return delegate.min(comparator); + } + + @Override + public long count() { + return delegate.count(); + } + + @Override + public boolean anyMatch(Predicate predicate) { + return delegate.anyMatch(predicate); + } + + @Override + public boolean allMatch(Predicate predicate) { + return delegate.allMatch(predicate); + } + + @Override + public boolean noneMatch(Predicate predicate) { + return delegate.noneMatch(predicate); + } + + @Override + public Optional findFirst() { + return delegate.findFirst(); + } + + @Override + public Optional findAny() { + return delegate.findAny(); + } + + @Override + public Stream unordered() { + return delegate.unordered(); + } + + @Override + public Stream sequential() { + return delegate.sequential(); + } + + @Override + public Stream parallel() { + return delegate.parallel(); + } +} diff --git a/src/share/classes/java/util/stream/DoubleStream.java b/src/share/classes/java/util/stream/DoubleStream.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/DoubleStream.java @@ -0,0 +1,587 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util.stream; + +import java.util.DoubleSummaryStatistics; +import java.util.OptionalDouble; +import java.util.PrimitiveIterator; +import java.util.Spliterator; +import java.util.function.BiConsumer; +import java.util.function.DoubleBinaryOperator; +import java.util.function.DoubleConsumer; +import java.util.function.DoubleFunction; +import java.util.function.DoublePredicate; +import java.util.function.DoubleToIntFunction; +import java.util.function.DoubleToLongFunction; +import java.util.function.DoubleUnaryOperator; +import java.util.function.Function; +import java.util.function.ObjDoubleConsumer; +import java.util.function.Supplier; + +/** + * A sequence of primitive double elements supporting sequential and parallel + * bulk operations. Streams support lazy transformative operations (transforming + * a stream to another stream) such as {@code filter} and {@code map}, and + * consuming operations, such as {@code forEach}, {@code findFirst}, and {@code + * iterator}. Once an operation has been performed on a stream, it + * is considered consumed and no longer usable for other operations. + * + *

For sequential stream pipelines, all operations are performed in the + * encounter order of the pipeline + * source, if the pipeline source has a defined encounter order. + * + *

For parallel stream pipelines, unless otherwise specified, intermediate + * stream operations preserve the + * encounter order of their source, and terminal operations + * respect the encounter order of their source, if the source + * has an encounter order. + * + *

Unless otherwise noted, passing a {@code null} argument to any stream + * method may result in a {@link NullPointerException}. + * + * @apiNote + * Streams are not data structures; they do not manage the storage for their + * elements, nor do they support access to individual elements. However, + * you can use the {@link #iterator()} or {@link #spliterator()} operations to + * perform a controlled traversal. + * + * @since 1.8 + * @see java.util.stream + */ +public interface DoubleStream extends BaseStream { + + /** + * Produces a stream consisting of the elements of this stream that match + * the given predicate. + * + *

This is an intermediate + * operation. + * + * @param predicate A + * non-interfering, stateless predicate to apply to + * each element to determine if it should be included + * @return the new stream + */ + DoubleStream filter(DoublePredicate predicate); + + /** + * Produces a stream consisting of the results of applying the given + * function to the elements of this stream. + * + *

This is an intermediate + * operation. + * + * @param mapper a + * non-interfering, stateless function to apply to + * each element + * @return the new stream + */ + DoubleStream map(DoubleUnaryOperator mapper); + + /** + * Produces an object-valued {@code Stream} consisting of the results of + * applying the given function to the elements of this stream. + * + *

This is an + * intermediate operation. + * + * @param mapper A + * non-interfering, stateless function to apply to + * each element + * @param The element type of the new stream + * @return the new stream + */ + Stream mapToObj(DoubleFunction mapper); + + /** + * Produces an {@code IntStream} consisting of the results of applying + * the given function to the elements of this stream. + * + *

This is an + * intermediate operation. + * + * @param mapper A + * non-interfering, stateless function to apply to + * each element + * @return the new stream + */ + IntStream mapToInt(DoubleToIntFunction mapper); + + /** + * Produces a {@code LongStream} consisting of the results of applying + * the given function to the elements of this stream. + * + *

This is an + * intermediate operation. + * + * @param mapper A + * non-interfering, stateless function to apply to + * each element + * @return the new stream + */ + LongStream mapToLong(DoubleToLongFunction mapper); + + /** + * Produces a stream consisting of the results of replacing each + * element of this stream with the contents of the stream + * produced by applying the provided function to each element. + * + *

This is an intermediate + * operation. + * + * @param mapper A + * non-interfering, stateless function to apply to + * each element which produces a stream of new + * values + * @return the new stream + * @see Stream#flatMap(Function) + */ + DoubleStream flatMap(DoubleFunction mapper); + + /** + * Produces a stream consisting of the distinct elements of this stream. + * + *

This is a stateful + * intermediate operation. + * @return the new stream + */ + DoubleStream distinct(); + + /** + * Produces a stream consisting of the elements of this stream in sorted + * order. + * + *

This is a stateful + * intermediate operation. + * @return the new stream + */ + DoubleStream sorted(); + + /** + * Produces a stream consisting of the elements of this stream, additionally + * performing the provided action on each element as elements are consumed + * from the resulting stream. + * + *

This is an intermediate + * operation. + * + *

For parallel stream pipelines, the action may be called at + * whatever time and in whatever thread the element is made available by the + * upstream operation. If the action modifies shared state, + * it is responsible for providing the required synchronization. + * + * @apiNote This method exists mainly to support debugging, where you want + * to see the elements as they flow past a certain point in a pipeline: + *

{@code
+     *     list.stream()
+     *         .filter(filteringFunction)
+     *         .peek(e -> {System.out.println("Filtered value: " + e); });
+     *         .map(mappingFunction)
+     *         .peek(e -> {System.out.println("Mapped value: " + e); });
+     *         .collect(Collectors.toDoubleSummaryStastistics());
+     * }
+ * + * @param consumer A + * non-interfering action to perform on the elements as + * they are consumed from the stream + * @return the new stream + */ + DoubleStream peek(DoubleConsumer consumer); + + /** + * Produces a stream consisting of the elements of this stream, + * truncated to be no longer than {@code maxSize} in length. + * + *

This is a short-circuiting + * stateful intermediate operation. + * + * @param maxSize the number of elements the stream should be limited to + * @return the new stream + */ + DoubleStream limit(long maxSize); + + /** + * Produces a stream consisting of the remaining elements of this stream + * after discarding the first {@code startingOffset} elements (or all + * elements if the stream has fewer than {@code startingOffset} elements). + * + *

This is a stateful + * intermediate operation. + * + * @param startingOffset the number of leading elements to skip + * @return the new stream + */ + DoubleStream substream(long startingOffset); + + /** + * Produces a stream consisting of the elements of this stream after + * discarding the first {@code startingOffset} elements (or all elements + * if the stream has fewer than {@code startingOffset} elements), and + * truncating the remainder to be no longer than {@code maxSize} in length. + * + *

This is a short-circuiting + * stateful intermediate operation. + * + * @param startingOffset the starting position of the substream, inclusive + * @param endingOffset the ending position of the substream, exclusive + * @return the new stream + */ + DoubleStream substream(long startingOffset, long endingOffset); + + /** + * Performs an operation for each element of this stream. + * + *

This is a terminal + * operation. + * + *

For parallel stream pipelines, this operation does not + * guarantee to respect the encounter order of the stream, as doing so + * would sacrifice the benefit of parallelism. For any given element, the + * action may be performed at whatever time and in whatever thread the + * library chooses. If the operation accesses shared state, it is + * responsible for providing the required synchronization. + * + * @param consumer A + * non-interfering action to perform on the elements + */ + void forEach(DoubleConsumer consumer); + + /** + * Performs an operation for each element of this stream, guaranteeing that + * each element is processed in encounter order for streams that have a + * defined encounter order. + * + *

This is a terminal + * operation. + * + * @param consumer A + * non-interfering action to perform on the elements + * @see #forEach(DoubleConsumer) + */ + void forEachOrdered(DoubleConsumer consumer); + + /** + * Produces an array containing the elements of this stream. + * + *

This is a terminal + * operation. + * + * @return an array containing the elements of this stream + */ + double[] toArray(); + + /** + * Performs a reduction on the + * elements of this stream, using the provided identity value and + * an associative + * accumulation function, and returns the reduced value. This is equivalent + * to: + *

{@code
+     *     double result = identity;
+     *     for (double element : this stream)
+     *         result = accumulator.apply(result, element)
+     *     return result;
+     * }
+ * + * but is not constrained to execute sequentially. + * + *

The {@code identity} value must be an identity for the accumulator + * function. This means that for all {@code x}, + * {@code accumulator.apply(identity, x)} is equal to {@code x}. + * The {@code accumulator} function must be an + * associative function. + * + *

This is a terminal + * operation. + * + * @apiNote Sum, min, max, and average are all special cases of reduction. + * Summing a stream of numbers can be expressed as: + * + *

{@code
+     *     double sum = numbers.reduce(0, (a, b) -> a+b);
+     * }
+ * + * or more compactly: + * + *
{@code
+     *     double sum = numbers.reduce(0, Double::sum);
+     * }
+ * + *

While this may seem a more roundabout way to perform an aggregation + * compared to simply mutating a running total in a loop, reduction + * operations parallelize more gracefully, without needing additional + * synchronization and with greatly reduced risk of data races. + * + * @param identity The identity value for the accumulating function + * @param op An associative + * non-interfering, + * stateless function for combining two values + * @return The result of the reduction + * @see #sum() + * @see #min() + * @see #max() + * @see #average() + */ + double reduce(double identity, DoubleBinaryOperator op); + + /** + * Performs a reduction on the + * elements of this stream, using an + * associative accumulation + * function, and returns an {@code OptionalDouble} describing the reduced value, + * if any. This is equivalent to: + *

{@code
+     *     boolean foundAny = false;
+     *     double result = null;
+     *     for (double element : this stream) {
+     *         if (!foundAny) {
+     *             foundAny = true;
+     *             result = element;
+     *         }
+     *         else
+     *             result = accumulator.apply(result, element);
+     *     }
+     *     return foundAny ? OptionalDouble.of(result) : OptionalDouble.empty();
+     * }
+ * + * but is not constrained to execute sequentially. + * + *

The {@code accumulator} function must be an + * associative function. + * + *

This is a terminal + * operation. + * + * @param op An associative + * non-interfering, + * stateless function for combining two values + * @return The result of the reduction + * @see #reduce(double, DoubleBinaryOperator) + */ + OptionalDouble reduce(DoubleBinaryOperator op); + + /** + * Performs a mutable + * reduction operation on the elements of this stream. A mutable + * reduction is one in which the reduced value is a mutable value holder, + * such as an {@code ArrayList}, and elements are incorporated by updating + * the state of the result, rather than by replacing the result. This + * produces a result equivalent to: + *

{@code
+     *     R result = resultFactory.get();
+     *     for (double element : this stream)
+     *         accumulator.accept(result, element);
+     *     return result;
+     * }
+ * + * Like {@link #reduce(double, DoubleBinaryOperator)}, {@code collect} operations + * can be parallelized without requiring additional sychronization. + * + *

This is a terminal + * operation. + * + * @param resultFactory Function that creates a new result container. + * For a parallel execution, this function may be + * called multiple times and must return a fresh value + * each time. + * @param accumulator An associative + * non-interfering, + * stateless function for incorporating an additional + * element into a result + * @param combiner An associative + * non-interfering, + * stateless function for combining two values, which + * must be compatible with the accumulator function + * @param Type of the result + * @return The result of the reduction + * @see Stream#collect(Supplier, BiConsumer, BiConsumer) + */ + R collect(Supplier resultFactory, + ObjDoubleConsumer accumulator, + BiConsumer combiner); + + /** + * Returns the sum of elements in this stream. This is a special case + * of a reduction + * and is equivalent to: + *

{@code
+     *     return reduce(0, Double::sum);
+     * }
+ * @return The sum of elements in this stream + */ + double sum(); + + /** + * Returns an {@code OptionalDouble} describing the minimal element of this + * stream, or an empty optional if this stream is empty. This is a special + * case of a reduction + * and is equivalent to: + *
{@code
+     *     return reduce(Double::min);
+     * }
+ * @return The minimal element of this stream, or an empty + * {@code OptionalDouble} + */ + OptionalDouble min(); + + /** + * Returns an {@code OptionalDouble} describing the maximal element of this + * stream, or an empty optional if this stream is empty. This is a special + * case of a reduction + * and is equivalent to: + *
{@code
+     *     return reduce(Double::max);
+     * }
+ * @return The maximal element of this stream, or an empty + * {@code OptionalDouble} + */ + OptionalDouble max(); + + /** + * Returns the count of elements in this stream. This is a special case of + * a reduction and is + * equivalent to: + *
{@code
+     *     return mapToLong(e -> 1L).sum();
+     * }
+ * @return The count of elements in this stream + */ + long count(); + + /** + * Returns an {@code OptionalDouble} describing the average of elements of this + * stream, or an empty optional if this stream is empty. This is a special + * case of a reduction. + * @return The average of elements in this stream, or an empty + * {@code OptionalDouble} + */ + OptionalDouble average(); + + /** + * Returns a {@code DoubleSummaryStatistics} describing various + * summary data about the elements of this stream. This is a special + * case of a reduction. + * @return A {@code DoubleSummaryStatistics} describing various + * summary data about the elements of this stream + */ + DoubleSummaryStatistics summaryStatistics(); + + /** + * Returns whether any elements of this stream match the provided + * predicate. May not evaluate the predicate on all elements if + * not necessary for determining the result. + * + *

This is a short-circuiting + * terminal operation. + * + * @param predicate A non-interfering, + * stateless predicate to apply to elements of this + * stream + * @return True if any elements of the stream match the provided predicate + */ + boolean anyMatch(DoublePredicate predicate); + + /** + * Returns whether all elements of this stream match the provided + * predicate. May not evaluate the predicate on all elements if + * not necessary for determining the result. + * + *

This is a short-circuiting + * terminal operation. + * + * @param predicate A non-interfering, + * stateless predicate to apply to elements of this stream + * @return True if all elements of the stream match the provided predicate + */ + boolean allMatch(DoublePredicate predicate); + + /** + * Returns whether no elements of this stream match the provided + * predicate. May not evaluate the predicate on all elements if + * not necessary for determining the result. + * + *

This is a short-circuiting + * terminal operation. + * + * @param predicate A non-interfering, + * stateless predicate to apply to elements of this stream + * @return True if no elements of the stream match the provided predicate + */ + boolean noneMatch(DoublePredicate predicate); + + /** + * Returns an {@link OptionalDouble} describing the first element of this stream + * (in the encounter order), or an empty {@code OptionalDouble} if the stream is + * empty. If the stream has no encounter order, than any element may be + * returned. + * + *

This is a short-circuiting + * terminal operation. + * + * @return An {@code OptionalDouble} describing the first element of this stream, + * or an empty {@code OptionalDouble} if the stream is empty + */ + OptionalDouble findFirst(); + + /** + * Returns an {@link OptionalDouble} describing some element of the stream, or an + * empty {@code OptionalDouble} if the stream is empty. + * + *

This is a short-circuiting + * terminal operation. + * + *

The behavior of this operation is explicitly nondeterministic; it is + * free to select any element in the stream. This is to allow for maximal + * performance in parallel operations; the cost is that multiple invocations + * on the same source may not return the same result. (If the first element + * in the encounter order is desired, use {@link #findFirst()} instead.) + * + * @return An {@code OptionalDouble} describing some element of this stream, or an + * empty {@code OptionalDouble} if the stream is empty + * @see #findFirst() + */ + OptionalDouble findAny(); + + /** + * Returns a {@code Stream} consisting of the elements of this stream, + * boxed to {@code Double}. + * @return A {@code Stream} consistent of the elements of this stream, + * boxed to {@code Double} + */ + Stream boxed(); + + @Override + DoubleStream sequential(); + + @Override + DoubleStream parallel(); + + @Override + PrimitiveIterator.OfDouble iterator(); + + @Override + Spliterator.OfDouble spliterator(); + +} diff --git a/src/share/classes/java/util/stream/IntStream.java b/src/share/classes/java/util/stream/IntStream.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/IntStream.java @@ -0,0 +1,603 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util.stream; + +import java.util.IntSummaryStatistics; +import java.util.OptionalDouble; +import java.util.OptionalInt; +import java.util.PrimitiveIterator; +import java.util.Spliterator; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.function.IntBinaryOperator; +import java.util.function.IntConsumer; +import java.util.function.IntFunction; +import java.util.function.IntPredicate; +import java.util.function.IntToDoubleFunction; +import java.util.function.IntToLongFunction; +import java.util.function.IntUnaryOperator; +import java.util.function.ObjIntConsumer; +import java.util.function.Supplier; + +/** + * A sequence of primitive integer elements supporting sequential and parallel + * bulk operations. Streams support lazy transformative operations (transforming + * a stream to another stream) such as {@code filter} and {@code map}, and + * consuming operations, such as {@code forEach}, {@code findFirst}, and {@code + * iterator}. Once an operation has been performed on a stream, it + * is considered consumed and no longer usable for other operations. + * + *

For sequential stream pipelines, all operations are performed in the + * encounter order of the pipeline + * source, if the pipeline source has a defined encounter order. + * + *

For parallel stream pipelines, unless otherwise specified, intermediate + * stream operations preserve the + * encounter order of their source, and terminal operations + * respect the encounter order of their source, if the source + * has an encounter order. + * + *

Unless otherwise noted, passing a {@code null} argument to any stream + * method may result in a {@link NullPointerException}. + * + * @apiNote + * Streams are not data structures; they do not manage the storage for their + * elements, nor do they support access to individual elements. However, + * you can use the {@link #iterator()} or {@link #spliterator()} operations to + * perform a controlled traversal. + * + * @since 1.8 + * @see java.util.stream + */ +public interface IntStream extends BaseStream { + + /** + * Produces a stream consisting of the elements of this stream that match + * the given predicate. + * + *

This is an intermediate + * operation. + * + * @param predicate A + * non-interfering, stateless predicate to apply to + * each element to determine if it should be included + * @return the new stream + */ + IntStream filter(IntPredicate predicate); + + /** + * Produces a stream consisting of the results of applying the given + * function to the elements of this stream. + * + *

This is an intermediate + * operation. + * + * @param mapper a + * non-interfering, stateless function to apply to + * each element + * @return the new stream + */ + IntStream map(IntUnaryOperator mapper); + + /** + * Produces an object-valued {@code Stream} consisting of the results of + * applying the given function to the elements of this stream. + * + *

This is an + * intermediate operation. + * + * @param mapper A + * non-interfering, stateless function to apply to + * each element + * @param The element type of the new stream + * @return the new stream + */ + Stream mapToObj(IntFunction mapper); + + /** + * Produces a {@code LongStream} consisting of the results of applying + * the given function to the elements of this stream. + * + *

This is an + * intermediate operation. + * + * @param mapper A + * non-interfering, stateless function to apply to + * each element + * @return the new stream + */ + LongStream mapToLong(IntToLongFunction mapper); + + /** + * Produces a {@code DoubleStream} consisting of the results of applying + * the given function to the elements of this stream. + * + *

This is an + * intermediate operation. + * + * @param mapper A + * non-interfering, stateless function to apply to + * each element + * @return the new stream + */ + DoubleStream mapToDouble(IntToDoubleFunction mapper); + + /** + * Produces a stream consisting of the results of replacing each + * element of this stream with the contents of the stream + * produced by applying the provided function to each element. + * + *

This is an intermediate + * operation. + * + * @param mapper A + * non-interfering, stateless function to apply to + * each element which produces an {@code IntStream} of new + * values + * @return the new stream + * @see Stream#flatMap(Function) + */ + IntStream flatMap(IntFunction mapper); + + /** + * Produces a stream consisting of the distinct elements of this stream. + * + *

This is a stateful + * intermediate operation. + * @return the new stream + */ + IntStream distinct(); + + /** + * Produces a stream consisting of the elements of this stream in sorted + * order. + * + *

This is a stateful + * intermediate operation. + * @return the new stream + */ + IntStream sorted(); + + /** + * Produces a stream consisting of the elements of this stream, additionally + * performing the provided action on each element as elements are consumed + * from the resulting stream. + * + *

This is an intermediate + * operation. + * + *

For parallel stream pipelines, the action may be called at + * whatever time and in whatever thread the element is made available by the + * upstream operation. If the action modifies shared state, + * it is responsible for providing the required synchronization. + * + * @apiNote This method exists mainly to support debugging, where you want + * to see the elements as they flow past a certain point in a pipeline: + *

{@code
+     *     list.stream()
+     *         .filter(filteringFunction)
+     *         .peek(e -> {System.out.println("Filtered value: " + e); });
+     *         .map(mappingFunction)
+     *         .peek(e -> {System.out.println("Mapped value: " + e); });
+     *         .collect(Collectors.toIntSummaryStastistics());
+     * }
+ * + * @param consumer A + * non-interfering action to perform on the elements as + * they are consumed from the stream + * @return the new stream + */ + IntStream peek(IntConsumer consumer); + + /** + * Produces a stream consisting of the elements of this stream, + * truncated to be no longer than {@code maxSize} in length. + * + *

This is a short-circuiting + * stateful intermediate operation. + * + * @param maxSize the number of elements the stream should be limited to + * @return the new stream + */ + IntStream limit(long maxSize); + + /** + * Produces a stream consisting of the remaining elements of this stream + * after discarding the first {@code startingOffset} elements (or all + * elements if the stream has fewer than {@code startingOffset} elements). + * + *

This is a stateful + * intermediate operation. + * + * @param startingOffset the number of leading elements to skip + * @return the new stream + */ + IntStream substream(long startingOffset); + + /** + * Produces a stream consisting of the elements of this stream after + * discarding the first {@code startingOffset} elements (or all elements + * if the stream has fewer than {@code startingOffset} elements), and + * truncating the remainder to be no longer than {@code maxSize} in length. + * + *

This is a short-circuiting + * stateful intermediate operation. + * + * @param startingOffset the starting position of the substream, inclusive + * @param endingOffset the ending position of the substream, exclusive + * @return the new stream + */ + IntStream substream(long startingOffset, long endingOffset); + + /** + * Performs an action for each element of this stream. + * + *

This is a terminal + * operation. + * + *

For parallel stream pipelines, this operation does not + * guarantee to respect the encounter order of the stream, as doing so + * would sacrifice the benefit of parallelism. For any given element, the + * action may be performed at whatever time and in whatever thread the + * library chooses. If the action accesses shared state, it is + * responsible for providing the required synchronization. + * + * @param action A + * non-interfering action to perform on the elements + */ + void forEach(IntConsumer action); + + /** + * Performs an operation for each element of this stream, guaranteeing that + * each element is processed in encounter order for streams that have a + * defined encounter order. + * + *

This is a terminal + * operation. + * + * @param action A + * non-interfering action to perform on the elements + * @see #forEach(IntConsumer) + */ + void forEachOrdered(IntConsumer action); + + /** + * Produces an array containing the elements of this stream. + * + *

This is a terminal + * operation. + * + * @return an array containing the elements of this stream + */ + int[] toArray(); + + /** + * Performs a reduction on the + * elements of this stream, using the provided identity value and + * an associative + * accumulation function, and returns the reduced value. This is equivalent + * to: + *

{@code
+     *     int result = identity;
+     *     for (int element : this stream)
+     *         result = accumulator.apply(result, element)
+     *     return result;
+     * }
+ * + * but is not constrained to execute sequentially. + * + *

The {@code identity} value must be an identity for the accumulator + * function. This means that for all {@code x}, + * {@code accumulator.apply(identity, x)} is equal to {@code x}. + * The {@code accumulator} function must be an + * associative function. + * + *

This is a terminal + * operation. + * + * @apiNote Sum, min, max, and average are all special cases of reduction. + * Summing a stream of numbers can be expressed as: + * + *

{@code
+     *     int sum = integers.reduce(0, (a, b) -> a+b);
+     * }
+ * + * or more compactly: + * + *
{@code
+     *     int sum = integers.reduce(0, Integer::sum);
+     * }
+ * + *

While this may seem a more roundabout way to perform an aggregation + * compared to simply mutating a running total in a loop, reduction + * operations parallelize more gracefully, without needing additional + * synchronization and with greatly reduced risk of data races. + * + * @param identity The identity value for the accumulating function + * @param op An associative + * non-interfering, + * stateless function for combining two values + * @return The result of the reduction + * @see #sum() + * @see #min() + * @see #max() + * @see #average() + */ + int reduce(int identity, IntBinaryOperator op); + + /** + * Performs a reduction on the + * elements of this stream, using an + * associative accumulation + * function, and returns an {@code OptionalInt} describing the reduced value, + * if any. This is equivalent to: + *

{@code
+     *     boolean foundAny = false;
+     *     int result = null;
+     *     for (int element : this stream) {
+     *         if (!foundAny) {
+     *             foundAny = true;
+     *             result = element;
+     *         }
+     *         else
+     *             result = accumulator.apply(result, element);
+     *     }
+     *     return foundAny ? OptionalInt.of(result) : OptionalInt.empty();
+     * }
+ * + * but is not constrained to execute sequentially. + * + *

The {@code accumulator} function must be an + * associative function. + * + *

This is a terminal + * operation. + * + * @param op An associative + * non-interfering, + * stateless function for combining two values + * @return The result of the reduction + * @see #reduce(int, IntBinaryOperator) + */ + OptionalInt reduce(IntBinaryOperator op); + + /** + * Performs a mutable + * reduction operation on the elements of this stream. A mutable + * reduction is one in which the reduced value is a mutable value holder, + * such as an {@code ArrayList}, and elements are incorporated by updating + * the state of the result, rather than by replacing the result. This + * produces a result equivalent to: + *

{@code
+     *     R result = resultFactory.get();
+     *     for (int element : this stream)
+     *         accumulator.accept(result, element);
+     *     return result;
+     * }
+ * + * Like {@link #reduce(int, IntBinaryOperator)}, {@code collect} operations + * can be parallelized without requiring additional sychronization. + * + *

This is a terminal + * operation. + * + * @param resultFactory Function that creates a new result container. + * For a parallel execution, this function may be + * called multiple times and must return a fresh value + * each time. + * @param accumulator An associative + * non-interfering, + * stateless function for incorporating an additional + * element into a result + * @param combiner An associative + * non-interfering, + * stateless function for combining two values, which + * must be compatible with the accumulator function + * @param Type of the result + * @return The result of the reduction + * @see Stream#collect(Supplier, BiConsumer, BiConsumer) + */ + R collect(Supplier resultFactory, + ObjIntConsumer accumulator, + BiConsumer combiner); + + /** + * Returns the sum of elements in this stream. This is a special case + * of a reduction + * and is equivalent to: + *

{@code
+     *     return reduce(0, Integer::sum);
+     * }
+ * @return The sum of elements in this stream + */ + int sum(); + + /** + * Returns an {@code OptionalInt} describing the minimal element of this + * stream, or an empty optional if this stream is empty. This is a special + * case of a reduction + * and is equivalent to: + *
{@code
+     *     return reduce(Integer::min);
+     * }
+ * @return The minimal element of this stream, or an empty + * {@code OptionalInt} + */ + OptionalInt min(); + + /** + * Returns an {@code OptionalInt} describing the maximal element of this + * stream, or an empty optional if this stream is empty. This is a special + * case of a reduction + * and is equivalent to: + *
{@code
+     *     return reduce(Integer::max);
+     * }
+ * @return The maximal element of this stream, or an empty + * {@code OptionalInt} + */ + OptionalInt max(); + + /** + * Returns the count of elements in this stream. This is a special case of + * a reduction and is + * equivalent to: + *
{@code
+     *     return mapToLong(e -> 1L).sum();
+     * }
+ * @return The count of elements in this stream + */ + long count(); + + /** + * Returns an {@code OptionalDouble} describing the average of elements of this + * stream, or an empty optional if this stream is empty. This is a special + * case of a reduction. + * @return The average of elements in this stream, or an empty + * {@code OptionalDouble} + */ + OptionalDouble average(); + + /** + * Returns an {@code IntSummaryStatistics} describing various + * summary data about the elements of this stream. This is a special + * case of a reduction. + * @return An {@code IntSummaryStatistics} describing various + * summary data about the elements of this stream + */ + IntSummaryStatistics summaryStatistics(); + + /** + * Returns whether any elements of this stream match the provided + * predicate. May not evaluate the predicate on all elements if + * not necessary for determining the result. + * + *

This is a short-circuiting + * terminal operation. + * + * @param predicate A non-interfering, + * stateless predicate to apply to elements of this + * stream + * @return True if any elements of the stream match the provided predicate + */ + boolean anyMatch(IntPredicate predicate); + + /** + * Returns whether all elements of this stream match the provided + * predicate. May not evaluate the predicate on all elements if + * not necessary for determining the result. + * + *

This is a short-circuiting + * terminal operation. + * + * @param predicate A non-interfering, + * stateless predicate to apply to elements of this stream + * @return True if all elements of the stream match the provided predicate + */ + boolean allMatch(IntPredicate predicate); + + /** + * Returns whether no elements of this stream match the provided + * predicate. May not evaluate the predicate on all elements if + * not necessary for determining the result. + * + *

This is a short-circuiting + * terminal operation. + * + * @param predicate A non-interfering, + * stateless predicate to apply to elements of this stream + * @return True if no elements of the stream match the provided predicate + */ + boolean noneMatch(IntPredicate predicate); + + /** + * Returns an {@link OptionalInt} describing the first element of this stream + * (in the encounter order), or an empty {@code OptionalInt} if the stream is + * empty. If the stream has no encounter order, than any element may be + * returned. + * + *

This is a short-circuiting + * terminal operation. + * + * @return An {@code OptionalInt} describing the first element of this stream, + * or an empty {@code OptionalInt} if the stream is empty + */ + OptionalInt findFirst(); + + /** + * Returns an {@link OptionalInt} describing some element of the stream, or an + * empty {@code OptionalInt} if the stream is empty. + * + *

This is a short-circuiting + * terminal operation. + * + *

The behavior of this operation is explicitly nondeterministic; it is + * free to select any element in the stream. This is to allow for maximal + * performance in parallel operations; the cost is that multiple invocations + * on the same source may not return the same result. (If the first element + * in the encounter order is desired, use {@link #findFirst()} instead.) + * + * @return An {@code OptionalInt} describing some element of this stream, or an + * empty {@code OptionalInt} if the stream is empty + * @see #findFirst() + */ + OptionalInt findAny(); + + /** + * Returns a {@code LongStream} consisting of the elements of this stream, + * converted to {@code long}. + * @return A {@code LongStream} consisting of the elements of this stream, + * converted to {@code long} + */ + LongStream longs(); + + /** + * Returns a {@code DoubleStream} consisting of the elements of this stream, + * converted to {@code double}. + * @return A {@code DoubleStream} consisting of the elements of this stream, + * converted to {@code double} + */ + DoubleStream doubles(); + + /** + * Returns a {@code Stream} consisting of the elements of this stream, + * boxed to {@code Integer}. + * @return A {@code Stream} consistent of the elements of this stream, + * boxed to {@code Integer} + */ + Stream boxed(); + + @Override + IntStream sequential(); + + @Override + IntStream parallel(); + + @Override + PrimitiveIterator.OfInt iterator(); + + @Override + Spliterator.OfInt spliterator(); +} diff --git a/src/share/classes/java/util/stream/LongStream.java b/src/share/classes/java/util/stream/LongStream.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/LongStream.java @@ -0,0 +1,595 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util.stream; + +import java.util.LongSummaryStatistics; +import java.util.OptionalDouble; +import java.util.OptionalLong; +import java.util.PrimitiveIterator; +import java.util.Spliterator; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.function.LongBinaryOperator; +import java.util.function.LongConsumer; +import java.util.function.LongFunction; +import java.util.function.LongPredicate; +import java.util.function.LongToDoubleFunction; +import java.util.function.LongToIntFunction; +import java.util.function.LongUnaryOperator; +import java.util.function.ObjLongConsumer; +import java.util.function.Supplier; + +/** + * A sequence of primitive long elements supporting sequential and parallel + * bulk operations. Streams support lazy transformative operations (transforming + * a stream to another stream) such as {@code filter} and {@code map}, and + * consuming operations, such as {@code forEach}, {@code findFirst}, and {@code + * iterator}. Once an operation has been performed on a stream, it + * is considered consumed and no longer usable for other operations. + * + *

For sequential stream pipelines, all operations are performed in the + * encounter order of the pipeline + * source, if the pipeline source has a defined encounter order. + * + *

For parallel stream pipelines, unless otherwise specified, intermediate + * stream operations preserve the + * encounter order of their source, and terminal operations + * respect the encounter order of their source, if the source + * has an encounter order. + * + *

Unless otherwise noted, passing a {@code null} argument to any stream + * method may result in a {@link NullPointerException}. + * + * @apiNote + * Streams are not data structures; they do not manage the storage for their + * elements, nor do they support access to individual elements. However, + * you can use the {@link #iterator()} or {@link #spliterator()} operations to + * perform a controlled traversal. + * + * @since 1.8 + * @see java.util.stream + */ +public interface LongStream extends BaseStream { + + /** + * Produces a stream consisting of the elements of this stream that match + * the given predicate. + * + *

This is an intermediate + * operation. + * + * @param predicate A + * non-interfering, stateless predicate to apply to + * each element to determine if it should be included + * @return the new stream + */ + LongStream filter(LongPredicate predicate); + + /** + * Produces a stream consisting of the results of applying the given + * function to the elements of this stream. + * + *

This is an intermediate + * operation. + * + * @param mapper a + * non-interfering, stateless function to apply to + * each element + * @return the new stream + */ + LongStream map(LongUnaryOperator mapper); + + /** + * Produces an object-valued {@code Stream} consisting of the results of + * applying the given function to the elements of this stream. + * + *

This is an + * intermediate operation. + * + * @param mapper A + * non-interfering, stateless function to apply to + * each element + * @param The element type of the new stream + * @return the new stream + */ + Stream mapToObj(LongFunction mapper); + + /** + * Produces an {@code IntStream} consisting of the results of applying + * the given function to the elements of this stream. + * + *

This is an + * intermediate operation. + * + * @param mapper A + * non-interfering, stateless function to apply to + * each element + * @return the new stream + */ + IntStream mapToInt(LongToIntFunction mapper); + + /** + * Produces a {@code DoubleStream} consisting of the results of applying + * the given function to the elements of this stream. + * + *

This is an + * intermediate operation. + * + * @param mapper A + * non-interfering, stateless function to apply to + * each element + * @return the new stream + */ + DoubleStream mapToDouble(LongToDoubleFunction mapper); + + /** + * Produces a stream consisting of the results of replacing each + * element of this stream with the contents of the stream + * produced by applying the provided function to each element. + * + *

This is an intermediate + * operation. + * + * @param mapper A + * non-interfering, stateless function to apply to + * each element which produces an stream of new + * values + * @return the new stream + * @see Stream#flatMap(Function) + */ + LongStream flatMap(LongFunction mapper); + + /** + * Produces a stream consisting of the distinct elements of this stream. + * + *

This is a stateful + * intermediate operation. + * @return the new stream + */ + LongStream distinct(); + + /** + * Produces a stream consisting of the elements of this stream in sorted + * order. + * + *

This is a stateful + * intermediate operation. + * @return the new stream + */ + LongStream sorted(); + + /** + * Produces a stream consisting of the elements of this stream, additionally + * performing the provided action on each element as elements are consumed + * from the resulting stream. + * + *

This is an intermediate + * operation. + * + *

For parallel stream pipelines, the action may be called at + * whatever time and in whatever thread the element is made available by the + * upstream operation. If the action modifies shared state, + * it is responsible for providing the required synchronization. + * + * @apiNote This method exists mainly to support debugging, where you want + * to see the elements as they flow past a certain point in a pipeline: + *

{@code
+     *     list.stream()
+     *         .filter(filteringFunction)
+     *         .peek(e -> {System.out.println("Filtered value: " + e); });
+     *         .map(mappingFunction)
+     *         .peek(e -> {System.out.println("Mapped value: " + e); });
+     *         .collect(Collectors.toLongSummaryStastistics());
+     * }
+ * + * @param consumer A + * non-interfering action to perform on the elements as + * they are consumed from the stream + * @return the new stream + */ + LongStream peek(LongConsumer consumer); + + /** + * Produces a stream consisting of the elements of this stream, + * truncated to be no longer than {@code maxSize} in length. + * + *

This is a short-circuiting + * stateful intermediate operation. + * + * @param maxSize the number of elements the stream should be limited to + * @return the new stream + */ + LongStream limit(long maxSize); + + /** + * Produces a stream consisting of the remaining elements of this stream + * after discarding the first {@code startingOffset} elements (or all + * elements if the stream has fewer than {@code startingOffset} elements). + * + *

This is a stateful + * intermediate operation. + * + * @param startingOffset the number of leading elements to skip + * @return the new stream + */ + LongStream substream(long startingOffset); + + /** + * Produces a stream consisting of the elements of this stream after + * discarding the first {@code startingOffset} elements (or all elements + * if the stream has fewer than {@code startingOffset} elements), and + * truncating the remainder to be no longer than {@code maxSize} in length. + * + *

This is a short-circuiting + * stateful intermediate operation. + * + * @param startingOffset the starting position of the substream, inclusive + * @param endingOffset the ending position of the substream, exclusive + * @return the new stream + */ + LongStream substream(long startingOffset, long endingOffset); + + /** + * Performs an action for each element of this stream. + * + *

This is a terminal + * operation. + * + *

For parallel stream pipelines, this operation does not + * guarantee to respect the encounter order of the stream, as doing so + * would sacrifice the benefit of parallelism. For any given element, the + * action may be performed at whatever time and in whatever thread the + * library chooses. If the action accesses shared state, it is + * responsible for providing the required synchronization. + * + * @param action A + * non-interfering action to perform on the elements + */ + void forEach(LongConsumer action); + + /** + * Performs an action for each element of this stream, guaranteeing that + * each element is processed in encounter order for streams that have a + * defined encounter order. + * + *

This is a terminal + * operation. + * + * @param action A + * non-interfering action to perform on the elements + * @see #forEach(LongConsumer) + */ + void forEachOrdered(LongConsumer action); + + /** + * Produces an array containing the elements of this stream. + * + *

This is a terminal + * operation. + * + * @return an array containing the elements of this stream + */ + long[] toArray(); + + /** + * Performs a reduction on the + * elements of this stream, using the provided identity value and + * an associative + * accumulation function, and returns the reduced value. This is equivalent + * to: + *

{@code
+     *     long result = identity;
+     *     for (long element : this stream)
+     *         result = accumulator.apply(result, element)
+     *     return result;
+     * }
+ * + * but is not constrained to execute sequentially. + * + *

The {@code identity} value must be an identity for the accumulator + * function. This means that for all {@code x}, + * {@code accumulator.apply(identity, x)} is equal to {@code x}. + * The {@code accumulator} function must be an + * associative function. + * + *

This is a terminal + * operation. + * + * @apiNote Sum, min, max, and average are all special cases of reduction. + * Summing a stream of numbers can be expressed as: + * + *

{@code
+     *     long sum = integers.reduce(0, (a, b) -> a+b);
+     * }
+ * + * or more compactly: + * + *
{@code
+     *     long sum = integers.reduce(0, Long::sum);
+     * }
+ * + *

While this may seem a more roundabout way to perform an aggregation + * compared to simply mutating a running total in a loop, reduction + * operations parallelize more gracefully, without needing additional + * synchronization and with greatly reduced risk of data races. + * + * @param identity The identity value for the accumulating function + * @param op An associative + * non-interfering, + * stateless function for combining two values + * @return The result of the reduction + * @see #sum() + * @see #min() + * @see #max() + * @see #average() + */ + long reduce(long identity, LongBinaryOperator op); + + /** + * Performs a reduction on the + * elements of this stream, using an + * associative accumulation + * function, and returns an {@code OptionalLong} describing the reduced value, + * if any. This is equivalent to: + *

{@code
+     *     boolean foundAny = false;
+     *     long result = null;
+     *     for (long element : this stream) {
+     *         if (!foundAny) {
+     *             foundAny = true;
+     *             result = element;
+     *         }
+     *         else
+     *             result = accumulator.apply(result, element);
+     *     }
+     *     return foundAny ? OptionalLong.of(result) : OptionalLong.empty();
+     * }
+ * + * but is not constrained to execute sequentially. + * + *

The {@code accumulator} function must be an + * associative function. + * + *

This is a terminal + * operation. + * + * @param op An associative + * non-interfering, + * stateless function for combining two values + * @return The result of the reduction + * @see #reduce(long, LongBinaryOperator) + */ + OptionalLong reduce(LongBinaryOperator op); + + /** + * Performs a mutable + * reduction operation on the elements of this stream. A mutable + * reduction is one in which the reduced value is a mutable value holder, + * such as an {@code ArrayList}, and elements are incorporated by updating + * the state of the result, rather than by replacing the result. This + * produces a result equivalent to: + *

{@code
+     *     R result = resultFactory.get();
+     *     for (long element : this stream)
+     *         accumulator.accept(result, element);
+     *     return result;
+     * }
+ * + * Like {@link #reduce(long, LongBinaryOperator)}, {@code collect} operations + * can be parallelized without requiring additional sychronization. + * + *

This is a terminal + * operation. + * + * @param resultFactory Function that creates a new result container. + * For a parallel execution, this function may be + * called multiple times and must return a fresh value + * each time. + * @param accumulator An associative + * non-interfering, + * stateless function for incorporating an additional + * element into a result + * @param combiner An associative + * non-interfering, + * stateless function for combining two values, which + * must be compatible with the accumulator function + * @param Type of the result + * @return The result of the reduction + * @see Stream#collect(Supplier, BiConsumer, BiConsumer) + */ + R collect(Supplier resultFactory, + ObjLongConsumer accumulator, + BiConsumer combiner); + + /** + * Returns the sum of elements in this stream. This is a special case + * of a reduction + * and is equivalent to: + *

{@code
+     *     return reduce(0, Long::sum);
+     * }
+ * @return The sum of elements in this stream + */ + long sum(); + + /** + * Returns an {@code OptionalLong} describing the minimal element of this + * stream, or an empty optional if this stream is empty. This is a special + * case of a reduction + * and is equivalent to: + *
{@code
+     *     return reduce(Long::min);
+     * }
+ * @return The minimal element of this stream, or an empty + * {@code OptionalLong} + */ + OptionalLong min(); + + /** + * Returns an {@code OptionalLong} describing the maximal element of this + * stream, or an empty optional if this stream is empty. This is a special + * case of a reduction + * and is equivalent to: + *
{@code
+     *     return reduce(Long::max);
+     * }
+ * @return The maximal element of this stream, or an empty + * {@code OptionalLong} + */ + OptionalLong max(); + + /** + * Returns the count of elements in this stream. This is a special case of + * a reduction and is + * equivalent to: + *
{@code
+     *     return map(e -> 1).sum();
+     * }
+ * @return The count of elements in this stream + */ + long count(); + + /** + * Returns an {@code OptionalDouble} describing the average of elements of this + * stream, or an empty optional if this stream is empty. This is a special + * case of a reduction. + * @return The average of elements in this stream, or an empty + * {@code OptionalDouble} + */ + OptionalDouble average(); + + /** + * Returns a {@code LongSummaryStatistics} describing various + * summary data about the elements of this stream. This is a special + * case of a reduction. + * @return A {@code LongSummaryStatistics} describing various + * summary data about the elements of this stream + */ + LongSummaryStatistics summaryStatistics(); + + /** + * Returns whether any elements of this stream match the provided + * predicate. May not evaluate the predicate on all elements if + * not necessary for determining the result. + * + *

This is a short-circuiting + * terminal operation. + * + * @param predicate A non-interfering, + * stateless predicate to apply to elements of this + * stream + * @return True if any elements of the stream match the provided predicate + */ + boolean anyMatch(LongPredicate predicate); + + /** + * Returns whether all elements of this stream match the provided + * predicate. May not evaluate the predicate on all elements if + * not necessary for determining the result. + * + *

This is a short-circuiting + * terminal operation. + * + * @param predicate A non-interfering, + * stateless predicate to apply to elements of this stream + * @return True if all elements of the stream match the provided predicate + */ + boolean allMatch(LongPredicate predicate); + + /** + * Returns whether no elements of this stream match the provided + * predicate. May not evaluate the predicate on all elements if + * not necessary for determining the result. + * + *

This is a short-circuiting + * terminal operation. + * + * @param predicate A non-interfering, + * stateless predicate to apply to elements of this stream + * @return True if no elements of the stream match the provided predicate + */ + boolean noneMatch(LongPredicate predicate); + + /** + * Returns an {@link OptionalLong} describing the first element of this stream + * (in the encounter order), or an empty {@code OptionalLong} if the stream is + * empty. If the stream has no encounter order, than any element may be + * returned. + * + *

This is a short-circuiting + * terminal operation. + * + * @return An {@code OptionalLong} describing the first element of this stream, + * or an empty {@code OptionalLong} if the stream is empty + */ + OptionalLong findFirst(); + + /** + * Returns an {@link OptionalLong} describing some element of the stream, or an + * empty {@code OptionalLong} if the stream is empty. + * + *

This is a short-circuiting + * terminal operation. + * + *

The behavior of this operation is explicitly nondeterministic; it is + * free to select any element in the stream. This is to allow for maximal + * performance in parallel operations; the cost is that multiple invocations + * on the same source may not return the same result. (If the first element + * in the encounter order is desired, use {@link #findFirst()} instead.) + * + * @return An {@code OptionalLong} describing some element of this stream, or an + * empty {@code OptionalLong} if the stream is empty + * @see #findFirst() + */ + OptionalLong findAny(); + + /** + * Returns a {@code DoubleStream} consisting of the elements of this stream, + * converted to {@code double}. + * @return A {@code DoubleStream} consisting of the elements of this stream, + * converted to {@code double} + */ + DoubleStream doubles(); + + /** + * Returns a {@code Stream} consisting of the elements of this stream, + * boxed to {@code Long}. + * @return A {@code Stream} consistent of the elements of this stream, + * boxed to {@code Long} + */ + Stream boxed(); + + @Override + LongStream sequential(); + + @Override + LongStream parallel(); + + @Override + PrimitiveIterator.OfLong iterator(); + + @Override + Spliterator.OfLong spliterator(); +} diff --git a/src/share/classes/java/util/stream/Stream.java b/src/share/classes/java/util/stream/Stream.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/Stream.java @@ -0,0 +1,759 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util.stream; + +import java.util.Comparator; +import java.util.Optional; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.util.function.BinaryOperator; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.IntFunction; +import java.util.function.Predicate; +import java.util.function.Supplier; +import java.util.function.ToDoubleFunction; +import java.util.function.ToIntFunction; +import java.util.function.ToLongFunction; + +// @@@ Specification to-do list @@@ +// - Describe the difference between sequential and parallel streams +// - More general information about reduce, better definitions for associativity, more description of +// how reduce employs parallelism, more examples +// - Role of stream flags in various operations, specifically ordering +// - Whether each op preserves encounter order +// @@@ Specification to-do list @@@ + +/** + * A sequence of elements supporting sequential and parallel bulk operations. + * Streams support lazy intermediate operations (transforming a stream to + * another stream) such as {@code filter} and {@code map}, and terminal + * operations (consuming the contents of a stream to produce a result or + * side-effect), such as {@code forEach}, {@code findFirst}, and {@code + * iterator}. Once an operation has been performed on a stream, it + * is considered consumed and no longer usable for other operations. + * + *

For sequential stream pipelines, all operations are performed in the + * encounter order of the pipeline + * source, if the pipeline source has a defined encounter order. + * + *

For parallel stream pipelines, unless otherwise specified, intermediate + * stream operations preserve the + * encounter order of their source, and terminal operations + * respect the encounter order of their source, if the source + * has an encounter order. Provided that and parameters to stream operations + * satisfy the non-interference + * requirements, and excepting differences arising from the absence of + * a defined encounter order, the result of a stream pipeline should be the + * stable across multiple executions of the same operations on the same source. + * However, the timing and thread in which side-effects occur (for those + * operations which are allowed to produce side-effects, such as + * {@link #forEach(Consumer)}), are explicitly nondeterministic for parallel + * execution of stream pipelines. + * + *

Unless otherwise noted, passing a {@code null} argument to any stream + * method may result in a {@link NullPointerException}. + * + * @apiNote + * Streams are not data structures; they do not manage the storage for their + * elements, nor do they support access to individual elements. However, + * you can use the {@link #iterator()} or {@link #spliterator()} operations to + * perform a controlled traversal. + * + * @param Type of elements. + * @since 1.8 + * @see java.util.stream + */ +public interface Stream extends BaseStream> { + + /** + * Produces a stream consisting of the elements of this stream that match + * the given predicate. + * + *

This is an intermediate + * operation. + * + * @param predicate A + * non-interfering, stateless predicate to apply to + * each element to determine if it should be included + * @return the new stream + */ + Stream filter(Predicate predicate); + + /** + * Produces a stream consisting of the results of applying the given + * function to the elements of this stream. + * + *

This is an intermediate + * operation. + * + * @param mapper a + * non-interfering, stateless function to apply to + * each element + * @param The element type of the new stream + * @return the new stream + */ + Stream map(Function mapper); + + /** + * Produces an {@code IntStream} consisting of the results of applying + * the given function to the elements of this stream. + * + *

This is an + * intermediate operation. + * + * @param mapper A + * non-interfering, stateless function to apply to + * each element + * @return the new stream + */ + IntStream mapToInt(ToIntFunction mapper); + + /** + * Produces a {@code LongStream} consisting of the results of applying the + * given function to the elements of this stream. + * + *

This is an intermediate + * operation. + * + * @param mapper A + * non-interfering, stateless function to apply to + * each element + * @return the new stream + */ + LongStream mapToLong(ToLongFunction mapper); + + /** + * Produces a {@code DoubleStream} consisting of the results of applying + * the given function to the elements of this stream. + * + *

This is an intermediate + * operation. + * + * @param mapper A + * non-interfering, stateless function to apply to + * each element + * @return the new stream + */ + DoubleStream mapToDouble(ToDoubleFunction mapper); + + /** + * Produces a stream consisting of the results of replacing each + * element of this stream with the contents of the stream produced + * by applying the provided mapping function to each element. If the result + * of the mapping function is {@code null}, this is treated as if the + * result is an empty stream. + * + *

This is an intermediate + * operation. + * + * @apiNote + * The {@code flatMap()} operation has the effect of applying a one-to-many + * tranformation to the elements of the stream, and then flattening the + * resulting elements into a new stream. For example, if {@code orders} + * is a stream of purchase orders, and each purchase order contains a + * collection of line items, then the following produces a stream of line + * items: + *

{@code
+     *     orderStream.flatMap(order -> order.getLineItems().stream())...
+     * }
+ * + * @param mapper A + * non-interfering, stateless function to apply to + * each element which produces a stream of new values + * @param The element type of the new stream + * @return the new stream + */ + Stream flatMap(Function> mapper); + + /** + * Produces an {@code IntStream} consisting of the results of replacing each + * element of this stream with the contents of the stream produced + * by applying the provided mapping function to each element. If the result + * of the mapping function is {@code null}, this is treated as if the + * result is an empty stream. + * + *

This is an intermediate + * operation. + * + * @param mapper A + * non-interfering, stateless function to apply to + * each element which produces a stream of new values + * @return the new stream + */ + IntStream flatMapToInt(Function mapper); + + /** + * Produces a {@code LongStream} consisting of the results of replacing each + * element of this stream with the contents of the stream produced + * by applying the provided mapping function to each element. If the result + * of the mapping function is {@code null}, this is treated as if the + * result is an empty stream. + * + *

This is an intermediate + * operation. + * + * @param mapper A + * non-interfering, stateless function to apply to + * each element which produces a stream of new values + * @return the new stream + */ + LongStream flatMapToLong(Function mapper); + + /** + * Produces a {@code DoubleStream} consisting of the results of replacing each + * element of this stream with the contents of the stream produced + * by applying the provided mapping function to each element. If the result + * of the mapping function is {@code null}, this is treated as if the + * result is an empty stream. + * + *

This is an intermediate + * operation. + * + * @param mapper A + * non-interfering, stateless function to apply to + * each element which produces a stream of new values + * @return the new stream + */ + DoubleStream flatMapToDouble(Function mapper); + + /** + * Produces a stream consisting of the distinct elements (according to + * {@link Object#equals(Object)}) of this stream. + * + *

This is a stateful + * intermediate operation. + * @return the new stream + */ + Stream distinct(); + + /** + * Produces a stream consisting of the elements of this stream, sorted + * according to natural order. If the elements of this stream are not + * {@code Comparable}, a {@code java.lang.ClassCastException} + * may be thrown when the stream pipeline is executed. + * + *

This is a stateful + * intermediate operation. + * @return the new stream + */ + Stream sorted(); + + /** + * Produces a stream consisting of the elements of this stream, sorted + * according to the provided {@code Comparator}. + * + *

This is a stateful + * intermediate operation. + * @param comparator A + * non-interfering, stateless {@code Comparator} to + * be used to compare stream elements + * @return the new stream + */ + Stream sorted(Comparator comparator); + + /** + * Produces a stream consisting of the elements of this stream, additionally + * performing the provided action on each element as elements are consumed + * from the resulting stream. + * + *

This is an intermediate + * operation. + * + *

For parallel stream pipelines, the action may be called at + * whatever time and in whatever thread the element is made available by the + * upstream operation. If the action modifies shared state, + * it is responsible for providing the required synchronization. + * + * @apiNote This method exists mainly to support debugging, where you want + * to see the elements as they flow past a certain point in a pipeline: + *

{@code
+     *     list.stream()
+     *         .filter(filteringFunction)
+     *         .peek(e -> {System.out.println("Filtered value: " + e); });
+     *         .map(mappingFunction)
+     *         .peek(e -> {System.out.println("Mapped value: " + e); });
+     *         .collect(Collectors.intoList());
+     * }
+ * + * @param consumer A + * non-interfering action to perform on the elements as + * they are consumed from the stream + * @return the new stream + */ + Stream peek(Consumer consumer); + + /** + * Produces a stream consisting of the elements of this stream, + * truncated to be no longer than {@code maxSize} in length. + * + *

This is a short-circuiting + * stateful intermediate operation. + * + * @param maxSize the number of elements the stream should be limited to + * @return the new stream + */ + Stream limit(long maxSize); + + /** + * Produces a stream consisting of the remaining elements of this stream + * after discarding the first {@code startingOffset} elements (or all + * elements if the stream has fewer than {@code startingOffset} elements). + * + *

This is a stateful + * intermediate operation. + * + * @param startingOffset the number of leading elements to skip + * @return the new stream + */ + Stream substream(long startingOffset); + + /** + * Produces a stream consisting of the elements of this stream after + * discarding the first {@code startingOffset} elements (or all elements + * if the stream has fewer than {@code startingOffset} elements), and + * truncating the remainder to be no longer than {@code maxSize} in length. + * + *

This is a short-circuiting + * stateful intermediate operation. + * + * @param startingOffset the starting position of the substream, inclusive + * @param endingOffset the ending position of the substream, exclusive + * @return the new stream + */ + Stream substream(long startingOffset, long endingOffset); + + /** + * Performs an action for each element of this stream. + * + *

This is a terminal + * operation. + * + *

For parallel stream pipelines, this operation does not + * guarantee to respect the encounter order of the stream, as doing so + * would sacrifice the benefit of parallelism. For any given element, the + * action may be performed at whatever time and in whatever thread the + * library chooses. If the action accesses shared state, it is + * responsible for providing the required synchronization. + * + * @param action A + * non-interfering action to perform on the elements + */ + void forEach(Consumer action); + + /** + * Performs an action for each element of this stream, guaranteeing that + * each element is processed in encounter order for streams that have a + * defined encounter order. + * + *

This is a terminal + * operation. + * + * @param action A + * non-interfering action to perform on the elements + * @see #forEach(Consumer) + */ + void forEachOrdered(Consumer action); + + /** + * Produces an array containing the elements of this stream. + * + *

This is a terminal + * operation. + * + * @return an array containing the elements of this stream + */ + Object[] toArray(); + + /** + * Produces an array containing the elements of this stream, using the + * provided {@code generator} function to allocate the returned array. + * + *

This is a terminal + * operation. + * + * @param generator a function which produces a new array of the desired + * type and the provided length + * @param the element type of the resulting array + * @return an array containing the elements in this stream + * @throws ArrayStoreException if the runtime type of the array returned + * from the array generator is not a supertype of the runtime type of every + * element in this stream + */ + A[] toArray(IntFunction generator); + + /** + * Performs a reduction on the + * elements of this stream, using the provided identity value and + * an associative + * accumulation function, and returns the reduced value. This is equivalent + * to: + *

{@code
+     *     T result = identity;
+     *     for (T element : this stream)
+     *         result = accumulator.apply(result, element)
+     *     return result;
+     * }
+ * + * but is not constrained to execute sequentially. + * + *

The {@code identity} value must be an identity for the accumulator + * function. This means that for all {@code t}, + * {@code accumulator.apply(identity, t)} is equal to {@code t}. + * The {@code accumulator} function must be an + * associative function. + * + *

This is a terminal + * operation. + * + * @apiNote Sum, min, max, average, and string concatenation are all special + * cases of reduction. Summing a stream of numbers can be expressed as: + * + *

{@code
+     *     Integer sum = integers.reduce(0, (a, b) -> a+b);
+     * }
+ * + * or more compactly: + * + *
{@code
+     *     Integer sum = integers.reduce(0, Integer::sum);
+     * }
+ * + *

While this may seem a more roundabout way to perform an aggregation + * compared to simply mutating a running total in a loop, reduction + * operations parallelize more gracefully, without needing additional + * synchronization and with greatly reduced risk of data races. + * + * @param identity The identity value for the accumulating function + * @param accumulator An associative + * non-interfering, + * stateless function for combining two values + * @return The result of the reduction + */ + T reduce(T identity, BinaryOperator accumulator); + + /** + * Performs a reduction on the + * elements of this stream, using an + * associative accumulation + * function, and returns an {@code Optional} describing the reduced value, + * if any. This is equivalent to: + *

{@code
+     *     boolean foundAny = false;
+     *     T result = null;
+     *     for (T element : this stream) {
+     *         if (!foundAny) {
+     *             foundAny = true;
+     *             result = element;
+     *         }
+     *         else
+     *             result = accumulator.apply(result, element);
+     *     }
+     *     return foundAny ? Optional.of(result) : Optional.empty();
+     * }
+ * + * but is not constrained to execute sequentially. + * + *

The {@code accumulator} function must be an + * associative function. + * + *

This is a terminal + * operation. + * + * @param accumulator An associative + * non-interfering, + * stateless function for combining two values + * @return The result of the reduction + * @see #reduce(Object, BinaryOperator) + */ + Optional reduce(BinaryOperator accumulator); + + /** + * Performs a reduction on the + * elements of this stream, using the provided identity, accumulation + * function, and a combining functions. This is equivalent to: + *

{@code
+     *     U result = identity;
+     *     for (T element : this stream)
+     *         result = accumulator.apply(result, element)
+     *     return result;
+     * }
+ * + * but is not constrained to execute sequentially. + * + *

The {@code identity} value must be an identity for the combiner + * function. This means that for all {@code u}, {@code combiner(identity, u)} + * is equal to {@code u}. Additionally, the {@code combiner} function + * must be compatible with the {@code accumulator} function; for all + * {@code u} and {@code t}, the following must hold: + *

{@code
+     *     combiner.apply(u, accumulator.apply(identity, t)) == accumulator.apply(u, t)
+     * }
+ * + *

This is a terminal operation. + * + * @apiNote Many reductions using this form can be represented more simply + * by an explicit combination of {@code map} and {@code reduce} operations. + * The {@code accumulator} function acts as a fused mapper and accumulator, + * which can sometimes be more efficient than separate mapping and reduction, + * such as in the case where knowing the previously reduced value allows you + * to avoid some computation. + * + * @param identity The identity value for the combiner function + * @param accumulator An associative + * non-interfering, + * stateless function for incorporating an additional + * element into a result + * @param combiner An associative + * non-interfering, + * stateless function for combining two values, which + * must be compatible with the accumulator function + * @param The type of the result + * @return The result of the reduction + * @see #reduce(BinaryOperator) + * @see #reduce(Object, BinaryOperator) + */ + U reduce(U identity, + BiFunction accumulator, + BinaryOperator combiner); + + /** + * Performs a mutable + * reduction operation on the elements of this stream. A mutable + * reduction is one in which the reduced value is a mutable value holder, + * such as an {@code ArrayList}, and elements are incorporated by updating + * the state of the result, rather than by replacing the result. This + * produces a result equivalent to: + *

{@code
+     *     R result = resultFactory.get();
+     *     for (T element : this stream)
+     *         accumulator.accept(result, element);
+     *     return result;
+     * }
+ * + * Like {@link #reduce(Object, BinaryOperator)}, {@code collect} operations + * can be parallelized without requiring additional sychronization. + * + *

This is a terminal + * operation. + * + * @apiNote There are many existing classes in the JDK whose signatures are + * a good match for use as arguments to {@code collect()}. For example, + * the following will accumulate strings into an ArrayList: + *

{@code
+     *     List asList = stringStream.collect(ArrayList::new, ArrayList::add, ArrayList::addAll);
+     * }
+ * + * The following will take a stream of strings and concatenates them into a + * single string: + *
{@code
+     *     String concat = stringStream.collect(StringBuilder::new, StringBuilder::append,
+     *                                          StringBuilder::append)
+     *                                 .toString();
+     * }
+ * + * @param resultFactory Function that creates a new result container. + * For a parallel execution, this function may be + * called multiple times and must return a fresh value + * each time. + * @param accumulator An associative + * non-interfering, + * stateless function for incorporating an additional + * element into a result + * @param combiner An associative + * non-interfering, + * stateless function for combining two values, which + * must be compatible with the accumulator function + * @param Type of the result + * @return The result of the reduction + */ + R collect(Supplier resultFactory, + BiConsumer accumulator, + BiConsumer combiner); + + /** Performs a mutable + * reduction operation on the elements of this stream using a + * {@code Collector} object to describe the reduction. A {@code Collector} + * encapsulates the functions used as arguments to + * {@link #collect(Supplier, BiConsumer, BiConsumer)}, allowing for reuse of + * collection strategies, and composition of collect operations such as + * multiple-level grouping or partitioning. + * + *

This is a terminal + * operation. + * + *

When executed in parallel, multiple intermediate results may be + * instantiated, populated, and merged, so as to maintain isolation of + * mutable data structures. Therefore, even when executed in parallel + * with non-thread-safe data structures (such as {@code ArrayList}), no + * additional synchronization is needed for a parallel reduction. + * + * @apiNote + * The following will accumulate strings into an ArrayList: + *

{@code
+     *     List asList = stringStream.collect(Collectors.toList());
+     * }
+ * + * The following will classify {@code Person} objects by city: + *
{@code
+     *     Map> peopleByCity
+     *         = personStream.collect(Collectors.groupBy(Person::getCity));
+     * }
+ * + * The following will classify {@code Person} objects by state and city, + * cascading two {@code Collector}s together: + *
{@code
+     *     Map>> peopleByStateAndCity
+     *         = personStream.collect(Collectors.groupBy(Person::getState,
+     *                                                   Collectors.groupBy(Person::getCity)));
+     * }
+ * + * @param collector The {@code Collector} describing the reduction + * @param The type of the result + * @return The result of the reduction + * @see #collect(Supplier, BiConsumer, BiConsumer) + * @see Collectors + */ + R collect(Collector collector); + + /** + * Returns the maximal element of this stream according to the provided + * {@code Comparator}. This is a special case of a + * reduction. + * + *

This is a terminal + * operation. + * + * @param comparator A non-interfering, + * stateless {@code Comparator} to use to compare + * elements of this stream + * @return An {@code Optional} describing the maximal element of this stream, + * or an empty {@code Optional} if the stream is empty + */ + Optional max(Comparator comparator); + + /** + * Returns the minimal element of this stream according to the provided + * {@code Comparator}. This is a special case of a + * reduction. + * + *

This is a terminal operation. + * + * @param comparator A non-interfering, + * stateless {@code Comparator} to use to compare + * elements of this stream + * @return An {@code Optional} describing the minimal element of this stream, + * or an empty {@code Optional} if the stream is empty + */ + Optional min(Comparator comparator); + + /** + * Returns the count of elements in this stream. This is a special case of a + * reduction. + * + *

This is a terminal operation. + * + * @return The count of elements in this stream. + */ + long count(); + + /** + * Returns whether any elements of this stream match the provided + * predicate. May not evaluate the predicate on all elements if not + * necessary for determining the result. + * + *

This is a short-circuiting + * terminal operation. + * + * @param predicate A non-interfering, + * stateless predicate to apply to elements of this + * stream + * @return True if any elements of the stream match the provided predicate + */ + boolean anyMatch(Predicate predicate); + + /** + * Returns whether all elements of this stream match the provided + * predicate. May not evaluate the predicate on all elements if + * not necessary for determining the result. + * + *

This is a short-circuiting + * terminal operation. + * + * @param predicate A non-interfering, + * stateless predicate to apply to elements of this stream + * @return True if all elements of the stream match the provided predicate + */ + boolean allMatch(Predicate predicate); + + /** + * Returns whether no elements of this stream match the provided + * predicate. May not evaluate the predicate on all elements if + * not necessary for determining the result. + * + *

This is a short-circuiting + * terminal operation. + * + * @param predicate A non-interfering, + * stateless predicate to apply to elements of this stream + * @return True if no elements of the stream match the provided predicate + */ + boolean noneMatch(Predicate predicate); + + /** + * Returns an {@link Optional} describing the first element of this stream + * (in the encounter order), or an empty {@code Optional} if the stream is + * empty. If the stream has no encounter order, than any element may be + * returned. + * + *

This is a short-circuiting + * terminal operation. + * + * @return An {@code Optional} describing the first element of this stream, + * or an empty {@code Optional} if the stream is empty + * @throws NullPointerException if the element selected is null + */ + Optional findFirst(); + + /** + * Returns an {@link Optional} describing some element of the stream, or an + * empty {@code Optional} if the stream is empty. + * + *

This is a short-circuiting + * terminal operation. + * + *

The behavior of this operation is explicitly nondeterministic; it is + * free to select any element in the stream. This is to allow for maximal + * performance in parallel operations; the cost is that multiple invocations + * on the same source may not return the same result. (If the first element + * in the encounter order is desired, use {@link #findFirst()} instead.) + * + * @return An {@code Optional} describing some element of this stream, or an + * empty {@code Optional} if the stream is empty + * @throws NullPointerException if the element selected is null + * @see #findFirst() + */ + Optional findAny(); +} diff --git a/src/share/classes/java/util/stream/package-info.java b/src/share/classes/java/util/stream/package-info.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/package-info.java @@ -0,0 +1,565 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + *

java.util.stream

+ * + * Classes to support functional-style operations on streams of values, as in the following: + * + *
{@code
+ *     int sumOfWeights = blocks.stream().filter(b -> b.getColor() == RED)
+ *                                       .mapToInt(b -> b.getWeight())
+ *                                       .sum();
+ * }
+ * + *

Here we use {@code blocks}, which might be a {@code Collection}, as a source for a stream, + * and then perform a filter-map-reduce ({@code sum()} is an example of a reduction + * operation) on the stream to obtain the sum of the weights of the red blocks. + * + *

The key abstraction used in this approach is {@link java.util.stream.Stream}, as well as its primitive + * specializations {@link java.util.stream.IntStream}, {@link java.util.stream.LongStream}, + * and {@link java.util.stream.DoubleStream}. Streams differ from Collections in several ways: + * + *

    + *
  • No storage. A stream is not a data structure that stores elements; instead, they + * carry values from a source (which could be a data structure, a generator, an IO channel, etc) + * through a pipeline of computational operations.
  • + *
  • Functional in nature. An operation on a stream produces a result, but does not modify + * its underlying data source. For example, filtering a {@code Stream} produces a new {@code Stream}, + * rather than removing elements from the underlying source.
  • + *
  • Laziness-seeking. Many stream operations, such as filtering, mapping, or duplicate removal, + * can be implemented lazily, exposing opportunities for optimization. (For example, "find the first + * {@code String} matching a pattern" need not examine all the input strings.) Stream operations + * are divided into intermediate ({@code Stream}-producing) operations and terminal (value-producing) + * operations; all intermediate operations are lazy.
  • + *
  • Possibly unbounded. While collections have a finite size, streams need not. Operations + * such as {@code limit(n)} or {@code findFirst()} can allow computations on infinite streams + * to complete in finite time.
  • + *
+ * + *

Stream pipelines

+ * + *

Streams are used to create pipelines of operations. A + * complete stream pipeline has several components: a source (which may be a {@code Collection}, + * an array, a generator function, or an IO channel); zero or more intermediate operations + * such as {@code Stream#filter} or {@code Stream#map}; and a terminal operation such + * as {@code Stream#forEach} or {@code Stream#reduce}. Stream operations may take as parameters + * function values (which are often lambda expressions, but could be method references + * or objects) which parameterize the behavior of the operation, such as a {@code Predicate} + * passed to the {@code Stream#filter} method. + * + *

Intermediate operations return a new {@code Stream}. They are lazy; executing an + * intermediate operation such as {@code Stream#filter} does not actually perform any filtering, + * instead creating a new {@code Stream} that, when traversed, contains the elements of the + * initial {@code Stream} that match the given {@code Predicate}. Consuming elements from the + * stream source does not begin until the terminal operation is executed. + * + *

Terminal operations consume the {@code Stream} and produce a result or a side-effect. + * After a terminal operation is performed, the stream can no longer be used and you must + * return to the data source, or select a new data source, to get a new stream. For example, + * obtaining the sum of weights of all red blocks, and then of all blue blocks, requires a + * filter-map-reduce on two different streams: + *

{@code
+ *     int sumOfRedWeights  = blocks.stream().filter(b -> b.getColor() == RED)
+ *                                           .mapToInt(b -> b.getWeight())
+ *                                           .sum();
+ *     int sumOfBlueWeights = blocks.stream().filter(b -> b.getColor() == BLUE)
+ *                                           .mapToInt(b -> b.getWeight())
+ *                                           .sum();
+ * }
+ * + *

However, there are other techniques that allow you to obtain both results in a single + * pass if multiple traversal is impractical or inefficient. TODO provide link + * + *

Stream operations

+ * + *

Intermediate stream operation (such as {@code filter} or {@code sorted}) always produce a + * new {@code Stream}, and are alwayslazy. Executing a lazy operations does not + * trigger processing of the stream contents; all processing is deferred until the terminal + * operation commences. Processing streams lazily allows for significant efficiencies; in a + * pipeline such as the filter-map-sum example above, filtering, mapping, and addition can be + * fused into a single pass, with minimal intermediate state. Laziness also enables us to avoid + * examining all the data when it is not necessary; for operations such as "find the first + * string longer than 1000 characters", one need not examine all the input strings, just enough + * to find one that has the desired characteristics. (This behavior becomes even more important + * when the input stream is infinite and not merely large.) + * + *

Intermediate operations are further divided into stateless and stateful + * operations. Stateless operations retain no state from previously seen values when processing + * a new value; examples of stateless intermediate operations include {@code filter} and + * {@code map}. Stateful operations may incorporate state from previously seen elements in + * processing new values; examples of stateful intermediate operations include {@code distict} + * and {@code sorted}. Stateful operations may need to process the entire input before + * producing a result; for example, one cannot produce any results from sorting a stream until + * one has seen all elements of the stream. As a result, under parallel computation, some + * pipelines containing stateful intermediate operations have to be executed in multiple passes. + * Pipelines containing exclusively stateless intermediate operations can be processed in a + * single pass, whether sequential or parallel. + * + *

Further, some operations are deemed short-circuiting operations. An intermediate + * operation is short-circuiting if, when presented with infinite input, it may produce a + * finite stream as a result. A terminal operation is short-circuiting if, when presented with + * infinite input, it may terminate in finite time. (Having a short-circuiting operation is a + * necessary, but not sufficient, condition for the processing of an infinite stream to + * terminate normally in finite time.) + * + * Terminal operations (such as {@code forEach} or {@code findFirst}) are always eager + * (they execute completely before returning), and produce a non-{@code Stream} result, such + * as a primitive value or a {@code Collection}, or have side-effects. + * + *

Parallelism

+ * + *

By recasting aggregate operations as a pipeline of operations on a stream of values, many + * aggregate operations can be more easily parallelized. A {@code Stream} can execute either + * in serial or in parallel. When streams are created, they are either created as sequential + * or parallel streams; the parallel-ness of streams can also be switched by the + * {@link java.util.stream Stream#sequential()} and {@link java.util.stream.Stream#parallel()} + * operations. The {@code Stream} implementations in the JDK create serial streams unless + * parallelism is explicitly requested. For example, {@code Collection} has methods + * {@link java.util.Collection#stream} and {@link java.util.Collection#parallelStream}, + * which produce sequential and parallel streams respectively; other stream-bearing methods + * such as {@link java.util.stream.Streams#intRange(int, int)} produce sequential + * streams but these can be efficiently parallelized by calling {@code parallel()} on the + * result. The set of operations on serial and parallel streams is identical. To execute the + * "sum of weights of blocks" query in parallel, we would do: + * + *

{@code
+ *     int sumOfWeights = blocks.parallelStream().filter(b -> b.getColor() == RED)
+ *                                               .mapToInt(b -> b.getWeight())
+ *                                               .sum();
+ * }
+ * + *

The only difference between the serial and parallel versions of this example code is + * the creation of the initial {@code Stream}. Whether a {@code Stream} will execute in serial + * or parallel can be determined by the {@code Stream#isParallel} method. When the terminal + * operation is initiated, the entire stream pipeline is either executed sequentially or in + * parallel, determined by the last operation that affected the stream's serial-parallel + * orientation (which could be the stream source, or the {@code sequential()} or + * {@code parallel()} methods.) + * + *

In order for the results of parallel operations to be deterministic and consistent with + * their serial equivalent, the function values passed into the various stream operations should + * be stateless. + * + *

Ordering

+ * + *

Streams may or may not have an encounter order. Whether or not + * there is an encounter order depends on the source, the intermediate + * operations, and the terminal operation. Certain stream sources (such as + * {@code List} or arrays) are intrinsically ordered, whereas others (such as + * {@code HashSet}) are not. Some intermediate operations may impose an + * encounter order on an otherwise unordered stream, such as + * {@link java.util.stream.Stream#sorted()}, and others may render an ordered + * stream unordered (such as {@link java.util.stream.Stream#unordered()}). + * Some terminal operations may ignore encounter order, such as + * {@link java.util.stream.Stream#forEach}. + * + *

If a Stream is ordered, most operations are constrained to operate on the + * elements in their encounter order; if the source of a stream is a {@code List} + * containing {@code [1, 2, 3]}, then the result of executing {@code map(x -> x*2)} + * must be {@code [2, 4, 6]}. However, if the source has no defined encounter + * order, than any permutation of the values {@code [2, 4, 6]} would be a valid + * result. Many operations can still be efficiently parallelized even under + * ordering constraints. + * + *

For sequential streams, ordering is only relevant to the determinism + * of operations performed repeatedly on the same source. (An {@code ArrayList} + * is constrained to iterate elements in order; a {@code HashSet} is not, and + * repeated iteration might produce a different order.) + * + *

For parallel streams, relaxing the ordering constraint can enable + * optimized implementation for some operations. For example, duplicate + * filtration on an ordered stream must completely process the first partition + * before it can return any elements from a subsequent partition, even if those + * elements are available earlier. On the other hand, without the constraint of + * ordering, duplicate filtration can be done more efficiently by using + * a shared {@code ConcurrentHashSet}. There will be cases where the stream + * is structurally ordered (the source is ordered and the intermediate + * operations are order-preserving), but the user does not particularly care + * about the encounter order. In some cases, explicitly de-ordering the stream + * with the {@link java.util.stream.Stream#unordered()} method may result in + * improved parallel performance for some stateful or terminal operations. + * + *

Non-interference

+ * + * The {@code java.util.stream} package enables you to execute possibly-parallel + * bulk-data operations over a variety of data sources, including even non-thread-safe + * collections such as {@code ArrayList}. This is possible only if we can + * prevent interference with the data source during the execution of a + * stream pipeline. (Execution begins when the terminal operation is invoked, and ends + * when the terminal operation completes.) For most data sources, preventing interference + * means ensuring that the data source is not modified at all during the execution + * of the stream pipeline. (Some data sources, such as concurrent collections, are + * specifically designed to handle concurrent modification.) + * + *

Accordingly, lambda expressions (or other objects implementing the appropriate functional + * interface) passed to stream methods should never modify the stream's data source. An + * implementation is said to interfere with the data source if it modifies, or causes + * to be modified, the stream's data source. The need for non-interference applies to all + * pipelines, not just parallel ones. Unless the stream source is concurrent, modifying a + * stream's data source during execution of a stream pipeline can cause exceptions, incorrect + * answers, or nonconformant results. + * + *

Further, results may be nondeterministic or incorrect if the lambda expressions passed to + * stream operations are stateful. A stateful lambda (or other object implementing the + * appropriate functional interface) is one whose result depends on any state which might change + * during the execution of the stream pipeline. An example of a stateful lambda is: + *

{@code
+ *     Set seen = Collections.synchronizedSet(new HashSet<>());
+ *     stream.parallel().map(e -> { if (seen.add(e)) return 0; else return e; })...
+ * }
+ * Here, if the mapping operation us performed in parallel, the results for the same input + * could vary from run to run, due to thread scheduling differences, whereas, with a stateless + * lambda expression the results would always be the same. + * + *

Side-effects

+ * + *

Reduction operations

+ * + * A reduction operation takes a stream of elements and processes them in a way + * that reduces to a single value or summary description, such as finding the sum or maximum + * of a set of numbers. (In more complex scenarios, the reduction operation might need to + * extract data from the elements before reducing that data to a single value, such as + * finding the sum of weights of a set of blocks. This would require extracting the weight + * from each block before summing up the weights.) + * + *

Of course, such operations can be readily implemented as simple sequential loops, as in: + *

{@code
+ *    int sum = 0;
+ *    for (int x : numbers) {
+ *       sum += x;
+ *    }
+ * }
+ * However, there may be a significant advantage to preferring a {@link Stream#reduce reduce operation} + * over a mutative accumulation such as the above -- a properly constructed reduce operation is + * inherently parallelizable, so long as the {@link java.util.function.BinaryOperator} has the right characteristics, + * specifically that it is associative. For example, given a + * stream of numbers for which we want to find the sum, we can write: + *
{@code
+ *    int sum = numbers.reduce(0, (x,y) -> x+y);
+ * }
+ * or more succinctly: + *
{@code
+ *    int sum = numbers.reduce(0, Integer::sum);
+ * }
+ * + *

(The primitive specializations of {@link java.util.stream.Stream}, such as + * {@link java.util.stream.IntStream}, even have convenience methods for common reductions, + * such as {@link java.util.stream.IntStream#sum()} or {@link java.util.stream.IntStream#max()}, + * which are implemented as simple wrappers around reduce.) + * + *

Reduction parallellizes well since the implementation of {@code reduce} can operate on + * subsets of the stream in parallel, and then combine the intermediate results to get the final + * correct answer. Even if you were to use a parallelizable form of the {@link java.util.stream.Stream#forEach(Consumer) forEach()} method + * in place of the original for-each loop above, you would still have to provide thread-safe + * updates to the shared accumulating variable {@code sum}, and the required synchronization + * would likely eliminate any performance gain from parallelism. Using a {@code reduce} method + * instead removes all of the burden of parallelizing the reduction operation, and the library + * can provide an efficient parallel implementation with no additional synchronization needed. + * + *

The "blocks" examples shown earlier shows how reduction combines with other operations + * to replace for loops with bulk operations. If {@code blocks} is a collection of {@code Block} + * objects, which have a {@code getWeight} method, we can find the heaviest block with: + *

{@code
+ *     OptionalInt heaviest = blocks.stream()
+ *                                  .mapToInt(Block::getWeight)
+ *                                  .reduce(Integer::max);
+ * }
+ * + *

In its more general form, a {@code reduce} operation on elements of type {@code T} + * yielding a result of type {@code U} requires three parameters: + *

{@code
+ *  U reduce(U identity,
+ *              BiFunction accumlator,
+ *              BinaryOperator combiner);
+ * }
+ * Here, the identity element is both an initial seed for the reduction, and a default + * result if there are no elements. The accumulator function takes a partial result and + * the next element, and produce a new partial result. The combiner function combines + * the partial results of two accumulators to produce a new partial result, and eventually the + * final result. + * + *

This form is a generalization of the two-argument form, and is also a generalization of + * the map-reduce construct illustrated above. If we wanted to re-cast the simple {@code sum} + * example using the more general form, {@code 0} would be the identity element, while + * {@code Integer::sum} would be both the accumulator and combiner. For the sum-of-weights + * example, this could be re-cast as: + *

{@code
+ *     int sumOfWeights = blocks.stream().reduce(0,
+ *                                               (sum, b) -> sum + b.getWeight())
+ *                                               Integer::sum);
+ * }
+ * though the map-reduce form is more readable and generally preferable. The generalized form + * is provided for cases where significant work can be optimized away by combining mapping and + * reducing into a single function. + * + *

More formally, the {@code identity} value must be an identity for the combiner + * function. This means that for all {@code u}, {@code combiner.apply(identity, u)} is equal + * to {@code u}. Additionally, the {@code combiner} function must be + * associative and must be compatible with the {@code accumulator} + * function; for all {@code u} and {@code t}, the following must hold: + *

{@code
+ *     combiner.apply(u, accumulator.apply(identity, t)) == accumulator.apply(u, t)
+ * }
+ * + *

Mutable Reduction

+ * + * A mutable reduction operation is similar to an ordinary reduction, in that it reduces + * a stream of values to a single value, but instead of producing a distinct single-valued result, it + * mutates a general result container, such as a {@code Collection} or {@code StringBuilder}, + * as it processes the elements in the stream. + * + *

For example, if we wanted to take a stream of strings and concatenate them into a single + * long string, we could achieve this with ordinary reduction: + *

{@code
+ *     String concatenated = strings.reduce("", String::concat)
+ * }
+ * + * We would get the desired result, and it would even work in parallel. However, we might not + * be happy about the performance! Such an implementation would do a great deal of string + * copying, and the run time would be O(n^2) in the number of elements. A more + * performant approach would be to accumulate the results into a {@link StringBuilder}, which + * is a mutable container for accumulating strings. We can use the same technique to + * parallelize mutable reduction as we do with ordinary reduction. + * + *

The mutable reduction operation is called {@link java.util.stream.Stream#collect(Collector) collect()}, as it + * collects together the desired results into a result container such as {@code StringBuilder}. + * A {@code collect} operation requires three things: a factory function which will construct + * new instances of the result container, an accumulating function that will update a result + * container by incorporating a new element, and a combining function that can take two + * result containers and merge their contents. The form of this is very similar to the general + * form of ordinary reduction: + *

{@code
+ *  R collect(Supplier resultFactory,
+ *               BiConsumer accumulator,
+ *               BiConsumer combiner);
+ * }
+ * As with {@code reduce()}, the benefit of expressing {@collect} in this abstract way is + * that it is directly amenable to parallelization: we can accumulate partial results in parallel + * and then combine them. For example, to collect the string representations of the elements + * in a stream into an {@code ArrayList}, we could write the obvious sequential for-each form: + *
{@code
+ *     ArrayList strings = new ArrayList<>();
+ *     for (T element : stream) {
+ *         strings.add(element.toString());
+ *     }
+ * }
+ * Or we could use a parallelizable collect form: + *
{@code
+ *     ArrayList strings = stream.collect(() -> new ArrayList<>(),
+ *                                                (c, e) -> c.add(e.toString()),
+ *                                                (c1, c2) -> c1.addAll(c2));
+ * }
+ * or, noting that we have buried a mapping operation inside the accumlator function, more + * succinctly as: + *
{@code
+ *     ArrayList strings = stream.map(Object::toString)
+ *                                       .collect(ArrayList::new, ArrayList::add, ArrayList::addAll);
+ * }
+ * Here, our supplier is just the {@link java.util.ArrayList#ArrayList() ArrayList constructor}, the + * accumulator adds the stringified element to an {@code ArrayList}, and the combiner simply + * uses {@link java.util.ArrayList#addAll addAll} to copy the strings from one container into the other. + * + *

As with the regular reduction operation, the ability to parallelize only comes if an + * associativity condition is met. The {@code combiner} is associative + * if for result containers {@code r1}, {@code r2}, and {@code r3}: + *

{@code
+ *    combiner.accept(r1, r2);
+ *    combiner.accept(r1, r3);
+ * }
+ * is equivalent to + *
{@code
+ *    combiner.accept(r2, r3);
+ *    combiner.accept(r1, r2);
+ * }
+ * where equivalence means that {@code r1} is left in the same state (according to the meaning + * of {@link Object#equals equals} for the element types). Similarly, the {@code resultFactory} + * must act as an identity with respect to the {@code combiner} so that for any result + * container {@code r}: + *
{@code
+ *     combiner.accept(r, resultFactory.get());
+ * }
+ * does not modify the state of {@code r} (again according to the meaning of + * {@link Object#equals equals}). Finally, the {@code accumulator} and {@code combiner} must be + * compatible such that for a result container {@code r} and element {@code t}: + *
{@code
+ *    r2 = resultFactory.get();
+ *    accumulator.accept(r2, t);
+ *    combiner.accept(r, r2);
+ * }
+ * is equivalent to: + *
{@code
+ *    accumulator.accept(r,t);
+ * }
+ * where equivalence means that {@code r} is left in the same state (again according to the + * meaning of {@link Object#equals equals}). + * + *

The three aspects of {@code collect}: supplier, accumulator, and combiner, are often very + * tightly coupled, and it is convenient to introduce the notion of a {@link Collector} as + * being an object that embodies all three aspects. There is a {@link Stream#collect(Collector) collect} + * method that simply takes a {@code Collector} and returns the resulting container. + * The above example for collecting strings into a {@code List} can be rewritten using a + * standard {@code Collector} as: + *

{@code
+ *     ArrayList strings = stream.map(Object::toString)
+ *                                       .collect(Collectors.toList());
+ * }
+ * + *

Reduction, Concurrency, and Ordering

+ * + * With some complex reduction operations, for example a collect that produces a + * {@code Map}, such as: + *
{@code
+ *     Map> salesByBuyer
+ *         = txns.parallelStream()
+ *               .collect(Collectors.groupingBy(Transaction::getBuyer));
+ * }
+ * (where {@link java.util.stream.Collectors#groupingBy} is a utility function + * that returns a {@link Collector} for grouping sets of elements based on some key) + * it may actually be counterproductive to perform the operation in parallel. + * This is because the combining step (merging one {@code Map} into another by key) + * can be expensive for some {@code Map} implementations. + * + *

Suppose, however, that the result container used in this reduction + * was a concurrently modifiable collection -- such as a + * {@link java.util.concurrent.ConcurrentHashMap ConcurrentHashMap}. In that case, + * the parallel invocations of the accumulator could actually deposit their results + * concurrently into the same shared result container, elminating the need for the combiner to + * merge distinct result containers. This potentially provides a boost + * to the parallel execution performance. We call this a concurrent reduction. + * + *

A {@link Collector} that supports concurrent reduction is marked with the + * {@link java.util.stream.Collector.Characteristics#CONCURRENT} characteristic. + * Having a concurrent collector is a necessary condition for performing a + * concurrent reduction, but that alone is not sufficient. If you imagine multiple + * accumulators depositing results into a shared container, the order in which + * results are deposited is non-deterministic. Consequently, a concurrent reduction + * is only possible if ordering is not important for the stream being processed. + * The {@link java.util.stream.Stream#collect(Collector)} + * implementation will only perform a concurrent reduction if + *

    + *
  • The stream is parallel;
  • + *
  • The collector has the + * {@link java.util.stream.Collector.Characteristics#CONCURRENT} characteristic, + * and;
  • + *
  • Either the stream is unordered, or the collector has the + * {@link java.util.stream.Collector.Characteristics#UNORDERED} characteristic. + *
+ * For example: + *
{@code
+ *     Map> salesByBuyer
+ *         = txns.parallelStream()
+ *               .unordered()
+ *               .collect(groupingByConcurrent(Transaction::getBuyer));
+ * }
+ * (where {@link java.util.stream.Collectors#groupingByConcurrent} is the concurrent companion + * to {@code groupingBy}). + * + *

Note that if it is important that the elements for a given key appear in the + * order they appear in the source, then we cannot use a concurrent reduction, + * as ordering is one of the casualties of concurrent insertion. We would then + * be constrained to implement either a sequential reduction or a merge-based + * parallel reduction. + * + *

Associativity

+ * + * An operator or function {@code op} is associative if the following holds: + *
{@code
+ *     (a op b) op c == a op (b op c)
+ * }
+ * The importance of this to parallel evaluation can be seen if we expand this to four terms: + *
{@code
+ *     a op b op c op d == (a op b) op (c op d)
+ * }
+ * So we can evaluate {@code (a op b)} in parallel with {@code (c op d)} and then invoke {@code op} on + * the results. + * TODO what does associative mean for mutative combining functions? + * FIXME: we described mutative associativity above. + * + *

Stream sources

+ * TODO where does this section go? + * + * XXX - change to section to stream construction gradually introducing more + * complex ways to construct + * - construction from Collection + * - construction from Iterator + * - construction from array + * - construction from generators + * - construction from spliterator + * + * XXX - the following is quite low-level but important aspect of stream constriction + * + *

A pipeline is initially constructed from a spliterator (see {@link java.util.Spliterator}) supplied by a stream source. + * The spliterator covers elements of the source and provides element traversal operations + * for a possibly-parallel computation. See methods on {@link java.util.stream.Streams} for construction + * of pipelines using spliterators. + * + *

A source may directly supply a spliterator. If so, the spliterator is traversed, split, or queried + * for estimated size after, and never before, the terminal operation commences. It is strongly recommended + * that the spliterator report a characteristic of {@code IMMUTABLE} or {@code CONCURRENT}, or be + * late-binding and not bind to the elements it covers until traversed, split or queried for + * estimated size + * + *

If a source cannot directly supply a recommended spliterator then it may indirectly supply a spliterator + * using a {@code Supplier}. The spliterator is obtained from the supplier after, and never before, the terminal + * operation of the stream pipeline commences. + * + *

Such requirements significantly reduce the scope of potential interference to the interval starting + * with the commencing of the terminal operation and ending with the producing a result or side-effect. See + * Non-Interference for + * more details. + * + * XXX - move the following to the non-interference section + * + *

A source can be modified before the terminal operation commences and those modifications will be reflected in + * the covered elements. Afterwards, and depending on the properties of the source, further modifications + * might not be reflected and the throwing of a {@code ConcurrentModificationException} may occur. + * + *

For example, consider the following code: + *

{@code
+ *     List l = new ArrayList(Arrays.asList("one", "two"));
+ *     Stream sl = l.stream();
+ *     l.add("three");
+ *     String s = sl.collect(toStringJoiner(" ")).toString();
+ * }
+ * First a list is created consisting of two strings: "one"; and "two". Then a stream is created from that list. + * Next the list is modified by adding a third string: "three". Finally the elements of the stream are collected + * and joined together. Since the list was modified before the terminal {@code collect} operation commenced + * the result will be a string of "one two three". However, if the list is modified after the terminal operation + * commences, as in: + *
{@code
+ *     List l = new ArrayList(Arrays.asList("one", "two"));
+ *     Stream sl = l.stream();
+ *     String s = sl.peek(s -> l.add("BAD LAMBDA")).collect(toStringJoiner(" ")).toString();
+ * }
+ * then a {@code ConcurrentModificationException} will be thrown since the {@code peek} operation will attempt + * to add the string "BAD LAMBDA" to the list after the terminal operation has commenced. + * + * + */ + +package java.util.stream; + +import java.util.function.Consumer; \ No newline at end of file # HG changeset patch # User briangoetz # Date 1366224999 14400 # Node ID 52713ffed4c3f2f619074662ba2e26a287c94544 # Parent e956201d5be7309a51dc710c4b88d4744620456f [mq]: JDK-7172553 diff --git a/src/share/classes/java/util/StringJoiner.java b/src/share/classes/java/util/StringJoiner.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/StringJoiner.java @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util; + +/** + * StringJoiner is used to construct a sequence of characters separated + * by an infix delimiter and optionally starting with a supplied prefix + * and ending with a supplied suffix. + * + * For example, the String {@code "[George:Sally:Fred]"} may + * be constructed as follows: +
+
+  StringJoiner sj = new StringJoiner( ":", "[", "]" );
+  sj.add( "George" ).add( "Sally" ).add( "Fred" );
+  String desiredString = sj.toString();
+
+
+ * + * Prior to adding something to the StringJoiner, {@code sj.toString()} + * will, by default, return {@code prefix+suffix}. However, if the + * {@code emptyOutput} parameter is supplied via the setEmptyOutput method, + * the value supplied will be returned instead. This can be used, for example, + * when creating a string using set notation to indicate an empty set, i.e. "{}", + * where the prefix is "{", the suffix is "}" and nothing has been added to the + * StringJoiner. + * + * A StringJoiner may be employed to create formatted output from a + * collection using lambda expressions. For example, + * + * +
+      List people = ...
+      String commaSeparatedNames =
+          people.map( p -> p.getName() )
+              .into( new StringJoiner(", ") )
+              .toString();
+ 
+ * + * @author Jim Gish + * @since 1.8 +*/ +public class StringJoiner { + private final String prefix; + private final String infix; + private final String suffix; + + /* + * StringBuilder value -- at any time, the characters constructed from the + * prefix, the added element separated by the infix, but without the suffix, + * so that we can more easily add elements without having to jigger the + * suffix each time. + */ + private StringBuilder value; + + /* + * By default, the string consisting of prefix+suffix, returned by toString(), + * or properties of value, when no elements have yet been added, i.e. when it + * is empty. This may be overridden by the user to be some other value + * including the empty String. + */ + private String emptyOutput; + + /** + * Constructs a string joiner with no characters in it with no prefix or + * suffix and using the supplied infix delimiter. Also, if no characters + * are added to the StringJoiner and methods accessing the value of it are + * invoked, it will not return a prefix or suffix (or properties thereof) + * in the result, unless {@code setEmptyOutput} has first been called. + * + * @param infix the sequence of characters to be used between each element + * added to the StringJoiner value + * @throws NullPointerException if infix is null + */ + public StringJoiner(CharSequence infix) { + this(infix, "", ""); + } + + /** + * Constructs a string joiner with no characters in it and using the + * supplied prefix, infix and suffix. Also, if no characters are added to + * the StringJoiner and methods accessing the string value of it are + * invoked, it will return the prefix+suffix (or properties thereof) in the + * result, unless {@code setEmptyOutput} has first been called. + * + * @param infix the sequence of characters to be used between each element + * added to the StringJoiner + * @param prefix the sequence of characters to be used at the beginning + * @param suffix the sequence of characters to be used at the end + * @throws NullPointerException if prefix, infix, or suffix is null + */ + public StringJoiner(CharSequence infix, CharSequence prefix, CharSequence suffix) { + Objects.requireNonNull(prefix, "The prefix must not be null"); + Objects.requireNonNull(infix, "The infix delimiter must not be null"); + Objects.requireNonNull(suffix, "The suffix must not be null"); + // make defensive copies of arguments + this.prefix = prefix.toString(); + this.infix = infix.toString(); + this.suffix = suffix.toString(); + this.emptyOutput = this.prefix + this.suffix; + } + + /** + * Sets the sequence of characters to be used when determine the string + * representation of this StringJoiner and no elements have been added yet, + * i.e. when it is empty. Note that once an add method has been called, + * the StringJoiner is no longer considered empty, even if the element(s) + * added correspond to the empty String. + * + * @param emptyOutput the characters to return as the value of an empty + * StringJoiner + * @return this StringJoiner itself so the calls may be chained + * @throws NullPointerException when the emptyOutput parameter is null + */ + public StringJoiner setEmptyOutput(CharSequence emptyOutput) { + this.emptyOutput = Objects.requireNonNull(emptyOutput, "The empty output value must not be null").toString(); + return this; + } + + /** + * Returns the current value, consisting of the prefix, the values added so + * far separated by the infix delimiter, and the suffix, unless no elements + * have been added in which case, the prefix+suffix or the emptyOutput + * characters are returned + * + * @return the string representation of this StringJoiner + */ + @Override + public String toString() { + return (value != null ? value.toString() + suffix : emptyOutput); + } + + /** + * add the supplied CharSequence value as the next element of the StringJoiner value. + * If newElement is null, then {@code "null"} is added. + * + * @param newElement The element to add + * @return a reference to this StringJoiner + */ + public StringJoiner add(CharSequence newElement) { + prepareBuilder().append(newElement); + return this; + } + + private StringBuilder prepareBuilder() { + if (value != null) { + value.append(infix); + } else { + value = new StringBuilder().append(prefix); + } + return value; + } + + /** + * The length of the StringJoiner value. i.e. the length of String + * representation of the StringJoiner. Note that if no add methods have been + * called, then the length of the String representation (either + * prefix+suffix or emptyOutput) will be returned. The value should be + * equivalent to toString().length(). + * + * @return the length of the current value of StringJoiner + */ + public int length() { + // Remember that we never actually append the suffix unless we return + // the full (present) value or some sub-string or length of it, so that + // we can add on more if we need to. + return (value != null ? value.length() + suffix.length() : emptyOutput.length()); + } +} # HG changeset patch # User briangoetz # Date 1366225233 14400 # Node ID 68560ad3901c59654b0dfb0a01d38aa2ac2129d5 # Parent 52713ffed4c3f2f619074662ba2e26a287c94544 [mq]: JDK-8011917 diff --git a/src/share/classes/java/util/stream/Collectors.java b/src/share/classes/java/util/stream/Collectors.java new file mode 100755 --- /dev/null +++ b/src/share/classes/java/util/stream/Collectors.java @@ -0,0 +1,1273 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.util.stream; + +import java.util.AbstractMap; +import java.util.AbstractSet; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.Comparators; +import java.util.DoubleSummaryStatistics; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.IntSummaryStatistics; +import java.util.Iterator; +import java.util.List; +import java.util.LongSummaryStatistics; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Objects; +import java.util.Set; +import java.util.StringJoiner; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.function.BiFunction; +import java.util.function.BinaryOperator; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.function.Supplier; +import java.util.function.ToDoubleFunction; +import java.util.function.ToIntFunction; +import java.util.function.ToLongFunction; + +/** + * Implementations of {@link Collector} that implement various useful reduction + * operations, such as accumulating elements into collections, summarizing + * elements according to various criteria, etc. + * + *

The following are examples of using the predefined {@code Collector} + * implementations in {@link Collectors} with the {@code Stream} API to perform + * mutable reduction tasks: + * + *

{@code
+ *     // Accumulate elements into a List
+ *     List list = stream.collect(Collectors.toList());
+ *
+ *     // Accumulate elements into a TreeSet
+ *     List list = stream.collect(Collectors.toCollection(TreeSet::new));
+ *
+ *     // Convert elements to strings and concatenate them, separated by commas
+ *     String joined = stream.map(Object::toString)
+ *                           .collect(Collectors.toStringJoiner(", "))
+ *                           .toString();
+ *
+ *     // Find highest-paid employee
+ *     Employee highestPaid = employees.stream()
+ *                                     .collect(Collectors.maxBy(Comparators.comparing(Employee::getSalary)));
+ *
+ *     // Group employees by department
+ *     Map> byDept
+ *         = employees.stream()
+ *                    .collect(Collectors.groupingBy(Employee::getDepartment));
+ *
+ *     // Find highest-paid employee by department
+ *     Map highestPaidByDept
+ *         = employees.stream()
+ *                    .collect(Collectors.groupingBy(Employee::getDepartment,
+ *                                                   Collectors.maxBy(Comparators.comparing(Employee::getSalary))));
+ *
+ *     // Partition students into passing and failing
+ *     Map> passingFailing =
+ *         students.stream()
+ *                 .collect(Collectors.partitioningBy(s -> s.getGrade() >= PASS_THRESHOLD);
+ *
+ * }
+ * + * TODO explanation of parallel collection + * + * @since 1.8 + */ +public final class Collectors { + + private static final Set CH_CONCURRENT + = Collections.unmodifiableSet(EnumSet.of(Collector.Characteristics.CONCURRENT, + Collector.Characteristics.STRICTLY_MUTATIVE, + Collector.Characteristics.UNORDERED)); + private static final Set CH_STRICT + = Collections.unmodifiableSet(EnumSet.of(Collector.Characteristics.STRICTLY_MUTATIVE)); + private static final Set CH_STRICT_UNORDERED + = Collections.unmodifiableSet(EnumSet.of(Collector.Characteristics.STRICTLY_MUTATIVE, + Collector.Characteristics.UNORDERED)); + + private Collectors() { } + + /** + * Returns a merge function, suitable for use in + * {@link Map#merge(Object, Object, BiFunction) Map.merge()} or + * {@link #toMap(Function, Function, BinaryOperator) toMap()}, which always + * throws {@code IllegalStateException}. This can be used to enforce the + * assumption that the elements being collected are distinct. + * + * @param The type of input arguments to the merge function + * @return A merge function which always throw {@code IllegalStateException} + * @see #firstWinsMerger() + * @see #lastWinsMerger() + */ + public static BinaryOperator throwingMerger() { + return (u,v) -> { throw new IllegalStateException(String.format("Duplicate key %s", u)); }; + } + + /** + * Returns a merge function, suitable for use in + * {@link Map#merge(Object, Object, BiFunction) Map.merge()} or + * {@link #toMap(Function, Function, BinaryOperator) toMap()}, + * which implements a "first wins" policy. + * + * @param The type of input arguments to the merge function + * @return A merge function which always returns its first argument + * @see #lastWinsMerger() + * @see #throwingMerger() + */ + public static BinaryOperator firstWinsMerger() { + return (u,v) -> u; + } + + /** + * Returns a merge function, suitable for use in + * {@link Map#merge(Object, Object, BiFunction) Map.merge()} or + * {@link #toMap(Function, Function, BinaryOperator) toMap()}, + * which implements a "last wins" policy. + * + * @param The type of input arguments to the merge function + * @return A merge function which always returns its second argument + * @see #firstWinsMerger() + * @see #throwingMerger() + */ + public static BinaryOperator lastWinsMerger() { + return (u,v) -> v; + } + + /** Simple implementation class for {@code Collector}. */ + private static final class CollectorImpl implements Collector { + private final Supplier resultSupplier; + private final BiFunction accumulator; + private final BinaryOperator combiner; + private final Set characteristics; + + CollectorImpl(Supplier resultSupplier, + BiFunction accumulator, + BinaryOperator combiner, + Set characteristics) { + this.resultSupplier = resultSupplier; + this.accumulator = accumulator; + this.combiner = combiner; + this.characteristics = characteristics; + } + + CollectorImpl(Supplier resultSupplier, + BiFunction accumulator, + BinaryOperator combiner) { + this(resultSupplier, accumulator, combiner, Collections.emptySet()); + } + + @Override + public BiFunction accumulator() { + return accumulator; + } + + @Override + public Supplier resultSupplier() { + return resultSupplier; + } + + @Override + public BinaryOperator combiner() { + return combiner; + } + + @Override + public Set characteristics() { + return characteristics; + } + } + + /** + * Returns a {@code Collector} that accumulates the input elements into a + * new {@code Collection}, in encounter order. The {@code Collection} is + * created by the provided factory. + * + * @param collectionFactory A {@code Supplier} which returns a new, empty + * {@code Collection} of the appropriate type + * @param The type of the input elements + * @param The type of the resulting {@code Collection} + * @return A {@code Collector} which collects all the input elements into a + * {@code Collection}, in encounter order + */ + public static> + Collector toCollection(Supplier collectionFactory) { + return new CollectorImpl<>(collectionFactory, + (r, t) -> { r.add(t); return r; }, + (r1, r2) -> { r1.addAll(r2); return r1; }, + CH_STRICT); + } + + /** + * Returns a {@code Collector} that accumulates the input elements into a + * new {@code List}. There are no guarantees on the type, mutability, + * serializability, or thread-safety of the {@code List} returned. + * + * @param The type of the input elements + * @return A {@code Collector} which collects all the input elements into a + * {@code List}, in encounter order + */ + public static + Collector> toList() { + BiFunction, T, List> accumulator = (list, t) -> { + switch (list.size()) { + case 0: + return Collections.singletonList(t); + case 1: + List newList = new ArrayList<>(); + newList.add(list.get(0)); + newList.add(t); + return newList; + default: + list.add(t); + return list; + } + }; + BinaryOperator> combiner = (left, right) -> { + switch (left.size()) { + case 0: + return right; + case 1: + List newList = new ArrayList<>(left.size() + right.size()); + newList.addAll(left); + newList.addAll(right); + return newList; + default: + left.addAll(right); + return left; + } + }; + return new CollectorImpl<>(Collections::emptyList, accumulator, combiner); + } + + /** + * Returns a {@code Collector} that accumulates the input elements into a + * new {@code Set}. There are no guarantees on the type, mutability, + * serializability, or thread-safety of the {@code Set} returned. + * + * @param The type of the input elements + * @return A {@code Collector} which collects all the input elements into a + * {@code Set} + */ + public static + Collector> toSet() { + return new CollectorImpl<>((Supplier>) HashSet::new, + (r, t) -> { r.add(t); return r; }, + (r1, r2) -> { r1.addAll(r2); return r1; }, + CH_STRICT_UNORDERED); + } + + /** + * Returns a {@code Collector} that concatenates the input elements into a + * new {@link StringBuilder}. + * + * @return A {@code Collector} which collects String elements into a + * {@code StringBuilder}, in encounter order + */ + public static Collector toStringBuilder() { + return new CollectorImpl<>(StringBuilder::new, + (r, t) -> { r.append(t); return r; }, + (r1, r2) -> { r1.append(r2); return r1; }, + CH_STRICT); + } + + /** + * Returns a {@code Collector} that concatenates the input elements into a + * new {@link StringJoiner}, using the specified separator. + * + * @return A {@code Collector} which collects String elements into a + * {@code StringJoiner}, in encounter order + * @param separator the separator to be used between each element + * added to the StringJoiner + */ + public static Collector toStringJoiner(String separator) { + BinaryOperator merger = (sj, other) -> { + if (other.length() > 0) + sj.add(other.toString()); + return sj; + }; + return new CollectorImpl<>(() -> new StringJoiner(separator), + (r, t) -> { r.add(t); return r; }, + merger, CH_STRICT); + } + + /** + * {@code BinaryOperator} that merges the contents of its right + * argument into its left argument, using the provided merge function to + * handle duplicate keys. + * @param mergeFunction A merge function suitable for + * {@link Map#merge(Object, Object, BiFunction) Map.merge()} + * @param Type of the map keys + * @param Type of the map values + * @param Type of the map + * @return A merge function for two maps + */ + private static> + BinaryOperator mapMerger(BinaryOperator mergeFunction) { + return (m1, m2) -> { + for (Map.Entry e : m2.entrySet()) + m1.merge(e.getKey(), e.getValue(), mergeFunction); + return m1; + }; + } + + /** + * Adapts a {@code Collector} to a {@code Collector} by applying + * a mapping function to each input element before accumulation. + * + * @apiNote + * The {@code mapping()} collectors are most useful when used in a + * multi-level reduction, downstream of {@code groupingBy} or + * {@code partitioningBy}. For example, given a stream of + * {@code Person}, to accumulate the set of last names in each city: + *
{@code
+     *     Map> lastNamesByCity
+     *         = people.stream().collect(groupingBy(Person::getCity,
+     *                                              mapping(Person::getLastName, toSet())));
+     * }
+ * + * @param The type of the input elements + * @param Type of elements accepted by downstream collector + * @param Result type of collector + * @param mapper A function to be applied to the input elements + * @param downstream A collector which will accept mapped values + * @return A collector which applies the mapping function to the input + * elements and provides the mapped results to the downstream collector + */ + public static Collector + mapping(Function mapper, Collector downstream) { + BiFunction downstreamAccumulator = downstream.accumulator(); + return new CollectorImpl<>(downstream.resultSupplier(), + (r, t) -> downstreamAccumulator.apply(r, mapper.apply(t)), + downstream.combiner(), downstream.characteristics()); + } + + /** + * Returns a {@code Collector} that counts the number of input + * elements. + * + * @implSpec + * This produces a result equivalent to: + *
{@code
+     *     reducing(0L, e -> 1L, Long::sum)
+     * }
+ * @param The type of the input elements + * @return A {@code Collector} that counts the input elements + */ + public static Collector + counting() { + return reducing(0L, e -> 1L, Long::sum); + } + + /** + * Returns a {@code Collector} that produces the minimal element + * according to a given {@code Comparator}. + * + * @implSpec + * This produces a result equivalent to: + *
{@code
+     *     reducing(Comparators.lesserOf(comparator))
+     * }
+ * @param The type of the input elements + * @param comparator A {@code Comparator} for comparing elements + * @return A {@code Collector} that produces the minimal value + */ + public static Collector + minBy(Comparator comparator) { + return reducing(Comparators.lesserOf(comparator)); + } + + /** + * Returns a {@code Collector} that produces the maximal element + * according to a given {@code Comparator}. + * + * @implSpec + * This produces a result equivalent to: + *
{@code
+     *     reducing(Comparators.greaterOf(comparator))
+     * }
+ * @param The type of the input elements + * @param comparator A {@code Comparator} for comparing elements + * @return A {@code Collector} that produces the maximal value + */ + public static Collector + maxBy(Comparator comparator) { + return reducing(Comparators.greaterOf(comparator)); + } + + /** + * Returns a {@code Collector} that produces the sum of a + * long-valued function applied to the input element. + * + * @implSpec + * This produces a result equivalent to: + *
{@code
+     *     reducing(0L, mapper, Long::sum)
+     * }
+ * @param The type of the input elements + * @param mapper A function extracting the property to be summed + * @return A {@code Collector} that produces the sum of a derived property + */ + public static Collector + sumBy(Function mapper) { + return reducing(0L, mapper, Long::sum); + } + + /** + * Returns a {@code Collector} which performs a reduction of its + * input elements under a specified {@code BinaryOperator}. + * + * @apiNote + * The {@code reducing()} collectors are most useful when used in a + * multi-level reduction, downstream of {@code groupingBy} or + * {@code partitioningBy}. To perform a simple reduction on a stream, + * use {@link Stream#reduce(BinaryOperator)} instead. + * @param identity The identity value for the reduction (also, the value + * that is returned when there are no input elements) + * @param op A {@code BinaryOperator} used to reduce the input elements + * @param Element type for the input and output of the reduction + * @return A {@code Collector} which implements the reduction operation + * @see #reducing(BinaryOperator) + * @see #reducing(Object, Function, BinaryOperator) + */ + public static Collector + reducing(T identity, BinaryOperator op) { + return new CollectorImpl<>(() -> identity, (r, t) -> (r == null ? t : op.apply(r, t)), op); + } + + /** + * Returns a {@code Collector} which performs a reduction of its + * input elements under a specified {@code BinaryOperator}. + * + * @apiNote + * The {@code reducing()} collectors are most useful when used in a + * multi-level reduction, downstream of {@code groupingBy} or + * {@code partitioningBy}. To perform a simple reduction on a stream, + * use {@link Stream#reduce(BinaryOperator)} instead. + * + *

For example, given a stream of {@code Person}, to calculate tallest + * person in each city: + *

{@code
+     *     Comparator byHeight = Comparators.comparing(Person::getHeight);
+     *     BinaryOperator tallerOf = Comparators.greaterOf(byHeight);
+     *     Map tallestByCity
+     *         = people.stream().collect(groupingBy(Person::getCity, reducing(tallerOf)));
+     * }
+ * @param op A {@code BinaryOperator} used to reduce the input elements + * @param Element type for the input and output of the reduction + * @return A {@code Collector} which implements the reduction operation + * @see #reducing(Object, BinaryOperator) + * @see #reducing(Object, Function, BinaryOperator) + */ + public static Collector + reducing(BinaryOperator op) { + return reducing(null, op); + } + + /** + * Returns a {@code Collector} which performs a reduction of its + * input elements under a specified mapping function and + * {@code BinaryOperator}. This is a generalization of + * {@link #reducing(Object, BinaryOperator)} which allows a transformation + * of the elements before reduction. + * + * @apiNote + * The {@code reducing()} collectors are most useful when used in a + * multi-level reduction, downstream of {@code groupingBy} or + * {@code partitioningBy}. To perform a simple reduction on a stream, + * use {@link Stream#reduce(BinaryOperator)} instead. + * + *

For example, given a stream of {@code Person}, to calculate the longest + * last name of residents in each city: + *

{@code
+     *     Comparator byLength = Comparators.comparing(String::length);
+     *     BinaryOperator longerOf = Comparators.greaterOf(byLength);
+     *     Map longestLastNameByCity
+     *         = people.stream().collect(groupingBy(Person::getCity,
+     *                                              reducing(Person::getLastName, longerOf)));
+     * }
+ * + * @param identity The identity value for the reduction (also, the value + * that is returned when there are no input elements) + * @param mapper A mapping function to apply to each input value + * @param op A {@code BinaryOperator} used to reduce the mapped values + * @param The type of the input elements + * @param The type of the mapped values + * @return A {@code Collector} implementing the map-reduce operation + * @see #reducing(Object, BinaryOperator) + * @see #reducing(BinaryOperator) + */ + public static + Collector reducing(U identity, + Function mapper, + BinaryOperator op) { + return new CollectorImpl<>(() -> identity, + (r, t) -> (r == null ? mapper.apply(t) : op.apply(r, mapper.apply(t))), + op); + } + + /** + * Returns a {@code Collector} implementing a "group by" operation on + * input elements of type {@code T}, grouping elements according to a + * classification function. + * + *

The classification function maps elements to some key type {@code K}. + * The collector produces a {@code Map>} whose keys are the + * values resulting from applying the classification function to the input + * elements, and whose corresponding values are {@code List}s containing the + * input elements which map to the associated key under the classification + * function. + * + *

There are no guarantees on the type, mutability, serializability, or + * thread-safety of the {@code Map} or {@code List} objects returned. + * @implSpec + * This produces a result similar to: + *

{@code
+     *     groupingBy(classifier, toList());
+     * }
+ * + * @param classifier The classifier function mapping input elements to keys + * @param The type of the input elements + * @param The type of the keys + * @return A {@code Collector} implementing the group-by operation + * @see #groupingBy(Function, Collector) + * @see #groupingBy(Function, Supplier, Collector) + * @see #groupingByConcurrent(Function) + */ + public static + Collector>> groupingBy(Function classifier) { + return groupingBy(classifier, HashMap::new, toList()); + } + + /** + * Returns a {@code Collector} implementing a cascaded "group by" operation + * on input elements of type {@code T}, grouping elements according to a + * classification function, and then performing a reduction operation on + * the values associated with a given key using the specified downstream + * {@code Collector}. + * + *

The classification function maps elements to some key type {@code K}. + * The downstream collector operates on elements of type {@code T} and + * produces a result of type {@code D}. The resulting collector produces a + * {@code Map}. + * + *

For example, to compute the set of last names of people in each city: + *

{@code
+     *     Map> namesByCity
+     *         = people.stream().collect(groupingBy(Person::getCity,
+     *                                              mapping(Person::getLastName, toSet())));
+     * }
+ * + * There are no guarantees on the type, mutability, + * serializability, or thread-safety of the {@code Map} returned. + * + * @param classifier The classifier function mapping input elements to keys + * @param downstream A {@code Collector} implementing the downstream reduction + * @param The type of the input elements + * @param The type of the keys + * @param The result type of the downstream reduction + * @return A {@code Collector} implementing the cascaded group-by operation + * @see #groupingBy(Function) + * @see #groupingBy(Function, Supplier, Collector) + * @see #groupingByConcurrent(Function, Collector) + */ + public static + Collector> groupingBy(Function classifier, + Collector downstream) { + return groupingBy(classifier, HashMap::new, downstream); + } + + /** + * Returns a {@code Collector} implementing a cascaded "group by" operation + * on input elements of type {@code T}, grouping elements according to a + * classification function, and then performing a reduction operation on + * the values associated with a given key using the specified downstream + * {@code Collector}. The {@code Map} produced by the Collector is created + * with the supplied factory function. + * + *

The classification function maps elements to some key type {@code K}. + * The downstream collector operates on elements of type {@code T} and + * produces a result of type {@code D}. The resulting collector produces a + * {@code Map}. + * + *

For example, to compute the set of last names of people in each city, + * where the city names are sorted: + *

{@code
+     *     Map> namesByCity
+     *         = people.stream().collect(groupingBy(Person::getCity, TreeMap::new,
+     *                                              mapping(Person::getLastName, toSet())));
+     * }
+ * + * @param classifier The classifier function mapping input elements to keys + * @param downstream A {@code Collector} implementing the downstream reduction + * @param mapFactory A function which, when called, produces a new empty + * {@code Map} of the desired type + * @param The type of the input elements + * @param The type of the keys + * @param The result type of the downstream reduction + * @param The type of the resulting {@code Map} + * @return A {@code Collector} implementing the cascaded group-by operation + * @see #groupingBy(Function, Collector) + * @see #groupingBy(Function) + * @see #groupingByConcurrent(Function, Supplier, Collector) + */ + public static> + Collector groupingBy(Function classifier, + Supplier mapFactory, + Collector downstream) { + Supplier downstreamSupplier = downstream.resultSupplier(); + BiFunction downstreamAccumulator = downstream.accumulator(); + BiFunction accumulator = (m, t) -> { + K key = Objects.requireNonNull(classifier.apply(t), "element cannot be mapped to a null key"); + D oldContainer = m.computeIfAbsent(key, k -> downstreamSupplier.get()); + D newContainer = downstreamAccumulator.apply(oldContainer, t); + if (newContainer != oldContainer) + m.put(key, newContainer); + return m; + }; + return new CollectorImpl<>(mapFactory, accumulator, mapMerger(downstream.combiner()), CH_STRICT); + } + + /** + * Returns a {@code Collector} implementing a concurrent "group by" + * operation on input elements of type {@code T}, grouping elements + * according to a classification function. + * + *

This is a {@link Collector.Characteristics#CONCURRENT concurrent} and + * {@link Collector.Characteristics#UNORDERED unordered} Collector. + * + *

The classification function maps elements to some key type {@code K}. + * The collector produces a {@code ConcurrentMap>} whose keys are the + * values resulting from applying the classification function to the input + * elements, and whose corresponding values are {@code List}s containing the + * input elements which map to the associated key under the classification + * function. + * + *

There are no guarantees on the type, mutability, serializability, or + * thread-safety of the {@code Map} or {@code List} objects returned. + * @implSpec + * This produces a result similar to: + *

{@code
+     *     groupingByConcurrent(classifier, toList());
+     * }
+ * + * @param classifier The classifier function mapping input elements to keys + * @param The type of the input elements + * @param The type of the keys + * @return A {@code Collector} implementing the group-by operation + * @see #groupingBy(Function) + * @see #groupingByConcurrent(Function, Collector) + * @see #groupingByConcurrent(Function, Supplier, Collector) + */ + public static + Collector>> groupingByConcurrent(Function classifier) { + return groupingByConcurrent(classifier, ConcurrentHashMap::new, toList()); + } + + /** + * Returns a {@code Collector} implementing a concurrent cascaded "group by" + * operation on input elements of type {@code T}, grouping elements + * according to a classification function, and then performing a reduction + * operation on the values associated with a given key using the specified + * downstream {@code Collector}. + * + *

This is a {@link Collector.Characteristics#CONCURRENT concurrent} and + * {@link Collector.Characteristics#UNORDERED unordered} Collector. + * + *

The classification function maps elements to some key type {@code K}. + * The downstream collector operates on elements of type {@code T} and + * produces a result of type {@code D}. The resulting collector produces a + * {@code Map}. + * + *

For example, to compute the set of last names of people in each city, + * where the city names are sorted: + *

{@code
+     *     ConcurrentMap> namesByCity
+     *         = people.stream().collect(groupingByConcurrent(Person::getCity, TreeMap::new,
+     *                                                        mapping(Person::getLastName, toSet())));
+     * }
+ * + * @param classifier The classifier function mapping input elements to keys + * @param downstream A {@code Collector} implementing the downstream reduction + * @param The type of the input elements + * @param The type of the keys + * @param The result type of the downstream reduction + * @return A {@code Collector} implementing the cascaded group-by operation + * @see #groupingBy(Function, Collector) + * @see #groupingByConcurrent(Function) + * @see #groupingByConcurrent(Function, Supplier, Collector) + */ + public static + Collector> groupingByConcurrent(Function classifier, + Collector downstream) { + return groupingByConcurrent(classifier, ConcurrentHashMap::new, downstream); + } + + /** + * Returns a concurrent {@code Collector} implementing a cascaded "group by" + * operation on input elements of type {@code T}, grouping elements + * according to a classification function, and then performing a reduction + * operation on the values associated with a given key using the specified + * downstream {@code Collector}. The {@code ConcurrentMap} produced by the + * Collector is created with the supplied factory function. + * + *

This is a {@link Collector.Characteristics#CONCURRENT concurrent} and + * {@link Collector.Characteristics#UNORDERED unordered} Collector. + * + *

The classification function maps elements to some key type {@code K}. + * The downstream collector operates on elements of type {@code T} and + * produces a result of type {@code D}. The resulting collector produces a + * {@code Map}. + * + *

For example, to compute the set of last names of people in each city, + * where the city names are sorted: + *

{@code
+     *     ConcurrentMap> namesByCity
+     *         = people.stream().collect(groupingBy(Person::getCity, ConcurrentSkipListMap::new,
+     *                                              mapping(Person::getLastName, toSet())));
+     * }
+ * + * + * @param classifier The classifier function mapping input elements to keys + * @param downstream A {@code Collector} implementing the downstream reduction + * @param mapFactory A function which, when called, produces a new empty + * {@code ConcurrentMap} of the desired type + * @param The type of the input elements + * @param The type of the keys + * @param The result type of the downstream reduction + * @param The type of the resulting {@code ConcurrentMap} + * @return A {@code Collector} implementing the cascaded group-by operation + * @see #groupingByConcurrent(Function) + * @see #groupingByConcurrent(Function, Collector) + * @see #groupingBy(Function, Supplier, Collector) + */ + public static> + Collector groupingByConcurrent(Function classifier, + Supplier mapFactory, + Collector downstream) { + Supplier downstreamSupplier = downstream.resultSupplier(); + BiFunction downstreamAccumulator = downstream.accumulator(); + BinaryOperator combiner = mapMerger(downstream.combiner()); + if (downstream.characteristics().contains(Collector.Characteristics.CONCURRENT)) { + BiFunction accumulator = (m, t) -> { + K key = Objects.requireNonNull(classifier.apply(t), "element cannot be mapped to a null key"); + downstreamAccumulator.apply(m.computeIfAbsent(key, k -> downstreamSupplier.get()), t); + return m; + }; + return new CollectorImpl<>(mapFactory, accumulator, combiner, CH_CONCURRENT); + } + else if (downstream.characteristics().contains(Collector.Characteristics.STRICTLY_MUTATIVE)) { + BiFunction accumulator = (m, t) -> { + K key = Objects.requireNonNull(classifier.apply(t), "element cannot be mapped to a null key"); + D resultContainer = m.computeIfAbsent(key, k -> downstreamSupplier.get()); + synchronized (resultContainer) { + downstreamAccumulator.apply(resultContainer, t); + } + return m; + }; + return new CollectorImpl<>(mapFactory, accumulator, combiner, CH_CONCURRENT); + } + else { + BiFunction accumulator = (m, t) -> { + K key = Objects.requireNonNull(classifier.apply(t), "element cannot be mapped to a null key"); + do { + D oldResult = m.computeIfAbsent(key, k -> downstreamSupplier.get()); + if (oldResult == null) { + if (m.putIfAbsent(key, downstreamAccumulator.apply(null, t)) == null) + return m; + } + else { + synchronized (oldResult) { + if (m.get(key) != oldResult) + continue; + D newResult = downstreamAccumulator.apply(oldResult, t); + if (oldResult != newResult) + m.put(key, newResult); + return m; + } + } + } while (true); + }; + return new CollectorImpl<>(mapFactory, accumulator, combiner, CH_CONCURRENT); + } + } + + /** + * Returns a {@code Collector} which partitions the input elements according + * to a {@code Predicate}, and organizes them into a + * {@code Map>}. + * + * There are no guarantees on the type, mutability, + * serializability, or thread-safety of the {@code Map} returned. + * + * @param predicate The predicate used for classifying input elements + * @param The type of the input elements + * @return A {@code Collector} implementing the partitioning operation. + * @see #partitioningBy(Predicate, Collector) + */ + public static + Collector>> partitioningBy(Predicate predicate) { + return partitioningBy(predicate, toList()); + } + + /** + * Returns a {@code Collector} which partitions the input elements according + * to a {@code Predicate}, reduces the values in each partition according to + * another {@code Collector}, and organizes them into a + * {@code Map} whose values are the result of the downstream + * reduction. + * + * There are no guarantees on the type, mutability, + * serializability, or thread-safety of the {@code Map} returned. + * + * @param predicate The predicate used for classifying input elements + * @param downstream A {@code Collector} implementing the downstream reduction + * @param The type of the input elements + * @param The result type of the downstream reduction + * @return A {@code Collector} implementing the cascaded partitioning operation. + * @see #partitioningBy(Predicate) + */ + public static + Collector> partitioningBy(Predicate predicate, + Collector downstream) { + BiFunction downstreamAccumulator = downstream.accumulator(); + BiFunction, T, Map> accumulator = (result, t) -> { + Partition asPartition = ((Partition) result); + if (predicate.test(t)) { + D newResult = downstreamAccumulator.apply(asPartition.forTrue, t); + if (newResult != asPartition.forTrue) + asPartition.forTrue = newResult; + } + else { + D newResult = downstreamAccumulator.apply(asPartition.forFalse, t); + if (newResult != asPartition.forFalse) + asPartition.forFalse = newResult; + } + return result; + }; + return new CollectorImpl<>(() -> new Partition<>(downstream.resultSupplier().get(), + downstream.resultSupplier().get()), + accumulator, partitionMerger(downstream.combiner()), CH_STRICT); + } + + /** Merge function for two partitions, given a merge function for the elements. */ + private static BinaryOperator> partitionMerger(BinaryOperator op) { + return (m1, m2) -> { + Partition left = (Partition) m1; + Partition right = (Partition) m2; + if (left.forFalse == null) + left.forFalse = right.forFalse; + else if (right.forFalse != null) + left.forFalse = op.apply(left.forFalse, right.forFalse); + if (left.forTrue == null) + left.forTrue = right.forTrue; + else if (right.forTrue != null) + left.forTrue = op.apply(left.forTrue, right.forTrue); + return left; + }; + } + + /** + * Accumulate elements into a {@code Map} whose keys and values are the + * result of applying mapping functions to the input elements. + * If the mapped keys contains duplicates (according to + * {@link Object#equals(Object)}), an {@code IllegalStateException} is + * thrown when the collection operation is performed. If the mapped keys + * may have duplicates, use {@link #toMap(Function, Function, BinaryOperator)} + * instead. + * + * @apiNote + * It is common for either the key or the value to be the input elements. + * In this case, the utility method + * {@link java.util.function.Functions#identity()} may be helpful. + * For example, the following produces a {@code Map} mapping + * students to their grade point average: + *
{@code
+     *     Map studentToGPA
+     *         students.stream().collect(toMap(Functions.identity(),
+     *                                         student -> computeGPA(student)));
+     * }
+ * And the following produces a {@code Map} mapping a unique identifier to + * students: + *
{@code
+     *     Map studentIdToStudent
+     *         students.stream().collect(toMap(Student::getId,
+     *                                         Functions.identity());
+     * }
+ * @param The type of the input elements + * @param The output type of the key mapping function + * @param The output type of the value mapping function + * @param keyMapper The mapping function to produce keys + * @param valueMapper The mapping function to produce values + * @return A {@code Collector} which collects elements into a {@code Map} + * whose keys and values are the result of applying mapping functions to + * the input elements + * @see #toMap(Function, Function, BinaryOperator) + * @see #toMap(Function, Function, BinaryOperator, Supplier) + * @see #toConcurrentMap(Function, Function) + */ + public static + Collector> toMap(Function keyMapper, + Function valueMapper) { + return toMap(keyMapper, valueMapper, throwingMerger(), HashMap::new); + } + + /** + * Accumulate elements into a {@code Map} whose keys and values are the + * result of applying mapping functions to the input elements. If the mapped + * keys contains duplicates (according to {@link Object#equals(Object)}), + * the value mapping function is applied to each equal element, and the + * results are merged using the provided merging function. + * + * @apiNote + * There are multiple ways to deal with collisions between multiple elements + * mapping to the same key. There are some predefined merging functions, + * such as {@link #throwingMerger()}, {@link #firstWinsMerger()}, and + * {@link #lastWinsMerger()}, that implement common policies, or you can + * implement custom policies easily. For example, if you have a stream + * of {@code Person}, and you want to produce a "phone book" mapping name to + * address, but it is possible that two persons have the same name, you can + * do as follows to gracefully deals with these collisions, and produce a + * {@code Map} mapping names to a concatened list of addresses: + *
{@code
+     *     Map phoneBook
+     *         people.stream().collect(toMap(Person::getName,
+     *                                       Person::getAddress,
+     *                                       (s, a) -> s + ", " + a));
+     * }
+ * @param The type of the input elements + * @param The output type of the key mapping function + * @param The output type of the value mapping function + * @param keyMapper The mapping function to produce keys + * @param valueMapper The mapping function to produce values + * @param mergeFunction A merge function, used to resolve collisions between + * values associated with the same key, as supplied + * to {@link Map#merge(Object, Object, BiFunction)} + * @return A {@code Collector} which collects elements into a {@code Map} + * whose keys are the result of applying a key mapping function to the input + * elements, and whose values are the result of applying a value mapping + * function to all input elements equal to the key and combining them + * using the merge function + * @see #toMap(Function, Function) + * @see #toMap(Function, Function, BinaryOperator, Supplier) + * @see #toConcurrentMap(Function, Function, BinaryOperator) + */ + public static + Collector> toMap(Function keyMapper, + Function valueMapper, + BinaryOperator mergeFunction) { + return toMap(keyMapper, valueMapper, mergeFunction, HashMap::new); + } + + /** + * Accumulate elements into a {@code Map} whose keys and values are the + * result of applying mapping functions to the input elements. If the mapped + * keys contains duplicates (according to {@link Object#equals(Object)}), + * the value mapping function is applied to each equal element, and the + * results are merged using the provided merging function. The {@code Map} + * is created by a provided supplier function. + * + * @param The type of the input elements + * @param The output type of the key mapping function + * @param The output type of the value mapping function + * @param The type of the resulting {@code Map} + * @param keyMapper The mapping function to produce keys + * @param valueMapper The mapping function to produce values + * @param mergeFunction A merge function, used to resolve collisions between + * values associated with the same key, as supplied + * to {@link Map#merge(Object, Object, BiFunction)} + * @param mapSupplier A function which returns a new, empty {@code Map} into + * which the results will be inserted + * @return A {@code Collector} which collects elements into a {@code Map} + * whose keys are the result of applying a key mapping function to the input + * elements, and whose values are the result of applying a value mapping + * function to all input elements equal to the key and combining them + * using the merge function + * @see #toMap(Function, Function) + * @see #toMap(Function, Function, BinaryOperator) + * @see #toConcurrentMap(Function, Function, BinaryOperator, Supplier) + */ + public static > + Collector toMap(Function keyMapper, + Function valueMapper, + BinaryOperator mergeFunction, + Supplier mapSupplier) { + BiFunction accumulator + = (map, element) -> { + map.merge(keyMapper.apply(element), valueMapper.apply(element), mergeFunction); + return map; + }; + return new CollectorImpl<>(mapSupplier, accumulator, mapMerger(mergeFunction), CH_STRICT); + } + + /** + * Accumulate elements into a {@code ConcurrentMap} whose keys and values + * are the result of applying mapping functions to the input elements. + * If the mapped keys contains duplicates (according to + * {@link Object#equals(Object)}), an {@code IllegalStateException} is + * thrown when the collection operation is performed. If the mapped keys + * may have duplicates, use + * {@link #toConcurrentMap(Function, Function, BinaryOperator)} instead. + * + * @apiNote + * It is common for either the key or the value to be the input elements. + * In this case, the utility method + * {@link java.util.function.Functions#identity()} may be helpful. + * For example, the following produces a {@code Map} mapping + * students to their grade point average: + *
{@code
+     *     Map studentToGPA
+     *         students.stream().collect(toMap(Functions.identity(),
+     *                                         student -> computeGPA(student)));
+     * }
+ * And the following produces a {@code Map} mapping a unique identifier to + * students: + *
{@code
+     *     Map studentIdToStudent
+     *         students.stream().collect(toConcurrentMap(Student::getId,
+     *                                                   Functions.identity());
+     * }
+ * + *

This is a {@link Collector.Characteristics#CONCURRENT concurrent} and + * {@link Collector.Characteristics#UNORDERED unordered} Collector. + * + * @param The type of the input elements + * @param The output type of the key mapping function + * @param The output type of the value mapping function + * @param keyMapper The mapping function to produce keys + * @param valueMapper The mapping function to produce values + * @return A concurrent {@code Collector} which collects elements into a + * {@code ConcurrentMap} whose keys are the result of applying a key mapping + * function to the input elements, and whose values are the result of + * applying a value mapping function to the input elements + * @see #toMap(Function, Function) + * @see #toConcurrentMap(Function, Function, BinaryOperator) + * @see #toConcurrentMap(Function, Function, BinaryOperator, Supplier) + */ + public static + Collector> toConcurrentMap(Function keyMapper, + Function valueMapper) { + return toConcurrentMap(keyMapper, valueMapper, throwingMerger(), ConcurrentHashMap::new); + } + + /** + * Accumulate elements into a {@code ConcurrentMap} whose keys and values + * are the result of applying mapping functions to the input elements. If + * the mapped keys contains duplicates (according to {@link Object#equals(Object)}), + * the value mapping function is applied to each equal element, and the + * results are merged using the provided merging function. + * + * @apiNote + * There are multiple ways to deal with collisions between multiple elements + * mapping to the same key. There are some predefined merging functions, + * such as {@link #throwingMerger()}, {@link #firstWinsMerger()}, and + * {@link #lastWinsMerger()}, that implement common policies, or you can + * implement custom policies easily. For example, if you have a stream + * of {@code Person}, and you want to produce a "phone book" mapping name to + * address, but it is possible that two persons have the same name, you can + * do as follows to gracefully deals with these collisions, and produce a + * {@code Map} mapping names to a concatened list of addresses: + *

{@code
+     *     Map phoneBook
+     *         people.stream().collect(toConcurrentMap(Person::getName,
+     *                                                 Person::getAddress,
+     *                                                 (s, a) -> s + ", " + a));
+     * }
+ * + *

This is a {@link Collector.Characteristics#CONCURRENT concurrent} and + * {@link Collector.Characteristics#UNORDERED unordered} Collector. + * + * @param The type of the input elements + * @param The output type of the key mapping function + * @param The output type of the value mapping function + * @param keyMapper The mapping function to produce keys + * @param valueMapper The mapping function to produce values + * @param mergeFunction A merge function, used to resolve collisions between + * values associated with the same key, as supplied + * to {@link Map#merge(Object, Object, BiFunction)} + * @return A concurrent {@code Collector} which collects elements into a + * {@code ConcurrentMap} whose keys are the result of applying a key mapping + * function to the input elements, and whose values are the result of + * applying a value mapping function to all input elements equal to the key + * and combining them using the merge function + * @see #toConcurrentMap(Function, Function) + * @see #toConcurrentMap(Function, Function, BinaryOperator, Supplier) + * @see #toMap(Function, Function, BinaryOperator) + */ + public static + Collector> toConcurrentMap(Function keyMapper, + Function valueMapper, + BinaryOperator mergeFunction) { + return toConcurrentMap(keyMapper, valueMapper, mergeFunction, ConcurrentHashMap::new); + } + + /** + * Accumulate elements into a {@code ConcurrentMap} whose keys and values + * are the result of applying mapping functions to the input elements. If + * the mapped keys contains duplicates (according to {@link Object#equals(Object)}), + * the value mapping function is applied to each equal element, and the + * results are merged using the provided merging function. The + * {@code ConcurrentMap} is created by a provided supplier function. + * + *

This is a {@link Collector.Characteristics#CONCURRENT concurrent} and + * {@link Collector.Characteristics#UNORDERED unordered} Collector. + * + * @param The type of the input elements + * @param The output type of the key mapping function + * @param The output type of the value mapping function + * @param The type of the resulting {@code ConcurrentMap} + * @param keyMapper The mapping function to produce keys + * @param valueMapper The mapping function to produce values + * @param mergeFunction A merge function, used to resolve collisions between + * values associated with the same key, as supplied + * to {@link Map#merge(Object, Object, BiFunction)} + * @param mapSupplier A function which returns a new, empty {@code Map} into + * which the results will be inserted + * @return A concurrent {@code Collector} which collects elements into a + * {@code ConcurrentMap} whose keys are the result of applying a key mapping + * function to the input elements, and whose values are the result of + * applying a value mapping function to all input elements equal to the key + * and combining them using the merge function + * @see #toConcurrentMap(Function, Function) + * @see #toConcurrentMap(Function, Function, BinaryOperator) + * @see #toMap(Function, Function, BinaryOperator, Supplier) + */ + public static > + Collector toConcurrentMap(Function keyMapper, + Function valueMapper, + BinaryOperator mergeFunction, + Supplier mapSupplier) { + BiFunction accumulator = (map, element) -> { + map.merge(keyMapper.apply(element), valueMapper.apply(element), mergeFunction); + return map; + }; + return new CollectorImpl<>(mapSupplier, accumulator, mapMerger(mergeFunction), CH_CONCURRENT); + } + + /** + * Returns a {@code Collector} which applies an {@code int}-producing + * mapping function to each input element, and returns summary statistics + * for the resulting values. + * + * @param mapper The mapping function to apply to each element + * @param The type of the input elements + * @return A {@code Collector} implementing the summary-statistics reduction + * @see #toDoubleSummaryStatistics(ToDoubleFunction) + * @see #toLongSummaryStatistics(ToLongFunction) + */ + public static + Collector toIntSummaryStatistics(ToIntFunction mapper) { + return new CollectorImpl<>(IntSummaryStatistics::new, + (r, t) -> { r.accept(mapper.applyAsInt(t)); return r; }, + (l, r) -> { l.combine(r); return l; }, CH_STRICT); + } + + /** + * Returns a {@code Collector} which applies an {@code long}-producing + * mapping function to each input element, and returns summary statistics + * for the resulting values. + * + * @param mapper The mapping function to apply to each element + * @param The type of the input elements + * @return A {@code Collector} implementing the summary-statistics reduction + * @see #toDoubleSummaryStatistics(ToDoubleFunction) + * @see #toIntSummaryStatistics(ToIntFunction) + */ + public static + Collector toLongSummaryStatistics(ToLongFunction mapper) { + return new CollectorImpl<>(LongSummaryStatistics::new, + (r, t) -> { r.accept(mapper.applyAsLong(t)); return r; }, + (l, r) -> { l.combine(r); return l; }, CH_STRICT); + } + + /** + * Returns a {@code Collector} which applies an {@code double}-producing + * mapping function to each input element, and returns summary statistics + * for the resulting values. + * + * @param mapper The mapping function to apply to each element + * @param The type of the input elements + * @return A {@code Collector} implementing the summary-statistics reduction + * @see #toLongSummaryStatistics(ToLongFunction) + * @see #toIntSummaryStatistics(ToIntFunction) + */ + public static + Collector toDoubleSummaryStatistics(ToDoubleFunction mapper) { + return new CollectorImpl<>(DoubleSummaryStatistics::new, + (r, t) -> { r.accept(mapper.applyAsDouble(t)); return r; }, + (l, r) -> { l.combine(r); return l; }, CH_STRICT); + } + + /** Implementation class used by partitioningBy. */ + private static final class Partition + extends AbstractMap + implements Map { + T forTrue; + T forFalse; + + Partition(T forTrue, T forFalse) { + this.forTrue = forTrue; + this.forFalse = forFalse; + } + + @Override + public Set> entrySet() { + return new AbstractSet>() { + @Override + public Iterator> iterator() { + + return new Iterator>() { + int state = 0; + + @Override + public boolean hasNext() { + return state < 2; + } + + @Override + public Map.Entry next() { + if (state >= 2) + throw new NoSuchElementException(); + return (state++ == 0) + ? new SimpleImmutableEntry<>(false, forFalse) + : new SimpleImmutableEntry<>(true, forTrue); + } + }; + } + + @Override + public int size() { + return 2; + } + }; + } + } +} # HG changeset patch # User briangoetz # Date 1366235893 14400 # Node ID 59cca31501f113a025788e7769e2cf9f068bd040 # Parent 68560ad3901c59654b0dfb0a01d38aa2ac2129d5 Akhil's Collection extension methods diff --git a/src/share/classes/java/util/ArrayList.java b/src/share/classes/java/util/ArrayList.java --- a/src/share/classes/java/util/ArrayList.java +++ b/src/share/classes/java/util/ArrayList.java @@ -25,6 +25,10 @@ package java.util; +import java.util.function.Consumer; +import java.util.function.Predicate; +import java.util.function.UnaryOperator; + /** * Resizable-array implementation of the List interface. Implements * all optional list operations, and permits all elements, including @@ -1168,4 +1172,90 @@ throw new ConcurrentModificationException(); } } + + @Override + public void forEach(Consumer action) { + Objects.requireNonNull(action); + final int expectedModCount = modCount; + @SuppressWarnings("unchecked") + final E[] elementData = (E[]) this.elementData; + final int size = this.size; + for (int i=0; modCount == expectedModCount && i < size; i++) { + action.accept(elementData[i]); + } + if (modCount != expectedModCount) { + throw new ConcurrentModificationException(); + } + } + + @Override + public boolean removeIf(Predicate filter) { + Objects.requireNonNull(filter); + // figure out which elements are to be removed + // any exception thrown from the filter predicate at this stage + // will leave the collection unmodified + int removeCount = 0; + final BitSet removeSet = new BitSet(size); + final int expectedModCount = modCount; + final int size = this.size; + for (int i=0; modCount == expectedModCount && i < size; i++) { + @SuppressWarnings("unchecked") + final E element = (E) elementData[i]; + if (filter.test(element)) { + removeSet.set(i); + removeCount++; + } + } + + if (modCount != expectedModCount) { + throw new ConcurrentModificationException(); + } + + // shift surviving elements left over the spaces left by removed elements + final boolean anyToRemove = removeCount > 0; + if (anyToRemove) { + final int newSize = size - removeCount; + for (int i=0, j=0; modCount == expectedModCount && + (i < size) && (j < newSize); i++, j++) { + i = removeSet.nextClearBit(i); + elementData[j] = elementData[i]; + } + for (int k=newSize; modCount == expectedModCount && k < size; k++) { + elementData[k] = null; // Let gc do its work + } + this.size = newSize; + if (modCount != expectedModCount) { + throw new ConcurrentModificationException(); + } + modCount++; + } + + return anyToRemove; + } + + @Override + @SuppressWarnings("unchecked") + public void replaceAll(UnaryOperator operator) { + Objects.requireNonNull(operator); + final int expectedModCount = modCount; + final int size = this.size; + for (int i=0; modCount == expectedModCount && i < size; i++) { + elementData[i] = operator.apply((E) elementData[i]); + } + if (modCount != expectedModCount) { + throw new ConcurrentModificationException(); + } + modCount++; + } + + @Override + @SuppressWarnings("unchecked") + public void sort(Comparator c) { + final int expectedModCount = modCount; + Arrays.sort((E[]) elementData, 0, size, c); + if (modCount != expectedModCount) { + throw new ConcurrentModificationException(); + } + modCount++; + } } diff --git a/src/share/classes/java/util/Collection.java b/src/share/classes/java/util/Collection.java --- a/src/share/classes/java/util/Collection.java +++ b/src/share/classes/java/util/Collection.java @@ -25,6 +25,8 @@ package java.util; +import java.util.function.Predicate; + /** * The root interface in the collection hierarchy. A collection * represents a group of objects, known as its elements. Some @@ -373,6 +375,38 @@ boolean removeAll(Collection c); /** + * Removes all of the elements of this collection which match the provided + * predicate. Exceptions thrown by the predicate are relayed to the caller. + * + * @implSpec + * The default implementation traverses all elements of the collection using + * its {@link #iterator}. Each matching element is removed using + * {@link Iterator#remove()}. If the collection's iterator does not + * support removal then an {@code UnsupportedOperationException} will be + * thrown on the first matching element. + * + * @param filter a predicate which returns {@code true} for elements to be + * removed + * @return {@code true} if any elements were removed + * @throws NullPointerException if the specified filter is null + * @throws UnsupportedOperationException if the {@code removeIf} + * method is not supported by this collection + * @since 1.8 + */ + default boolean removeIf(Predicate filter) { + Objects.requireNonNull(filter); + boolean removed = false; + final Iterator each = iterator(); + while (each.hasNext()) { + if (filter.test(each.next())) { + each.remove(); + removed = true; + } + } + return removed; + } + + /** * Retains only the elements in this collection that are contained in the * specified collection (optional operation). In other words, removes from * this collection all of its elements that are not contained in the diff --git a/src/share/classes/java/util/Collections.java b/src/share/classes/java/util/Collections.java --- a/src/share/classes/java/util/Collections.java +++ b/src/share/classes/java/util/Collections.java @@ -30,7 +30,10 @@ import java.lang.reflect.Array; import java.util.function.BiConsumer; import java.util.function.BiFunction; +import java.util.function.Consumer; import java.util.function.Function; +import java.util.function.Predicate; +import java.util.function.UnaryOperator; /** * This class consists exclusively of static methods that operate on or return @@ -1110,6 +1113,15 @@ public void clear() { throw new UnsupportedOperationException(); } + + @Override + public void forEach(Consumer action) { + c.forEach(action); + } + @Override + public boolean removeIf(Predicate filter) { + throw new UnsupportedOperationException(); + } } /** @@ -1240,6 +1252,16 @@ public boolean addAll(int index, Collection c) { throw new UnsupportedOperationException(); } + + @Override + public void replaceAll(UnaryOperator operator) { + throw new UnsupportedOperationException(); + } + @Override + public void sort(Comparator c) { + throw new UnsupportedOperationException(); + } + public ListIterator listIterator() {return listIterator(0);} public ListIterator listIterator(final int index) { @@ -1742,6 +1764,15 @@ private void writeObject(ObjectOutputStream s) throws IOException { synchronized (mutex) {s.defaultWriteObject();} } + + @Override + public void forEach(Consumer action) { + synchronized (mutex) {c.forEach(action);} + } + @Override + public boolean removeIf(Predicate filter) { + synchronized (mutex) {return c.removeIf(filter);} + } } /** @@ -1996,6 +2027,15 @@ } } + @Override + public void replaceAll(UnaryOperator operator) { + synchronized (mutex) {list.replaceAll(operator);} + } + @Override + public void sort(Comparator c) { + synchronized (mutex) {list.sort(c);} + } + /** * SynchronizedRandomAccessList instances are serialized as * SynchronizedList instances to allow them to be deserialized @@ -2492,6 +2532,15 @@ // element as we added it) return c.addAll(checkedCopyOf(coll)); } + + @Override + public void forEach(Consumer action) { + c.forEach(action); + } + @Override + public boolean removeIf(Predicate filter) { + return c.removeIf(filter); + } } /** @@ -2753,6 +2802,15 @@ public List subList(int fromIndex, int toIndex) { return new CheckedList<>(list.subList(fromIndex, toIndex), type); } + + @Override + public void replaceAll(UnaryOperator operator) { + list.replaceAll(operator); + } + @Override + public void sort(Comparator c) { + list.sort(c); + } } /** @@ -3416,6 +3474,16 @@ return a; } + @Override + public void forEach(Consumer action) { + Objects.requireNonNull(action); + } + @Override + public boolean removeIf(Predicate filter) { + Objects.requireNonNull(filter); + return false; + } + // Preserves singleton property private Object readResolve() { return EMPTY_SET; @@ -3523,6 +3591,16 @@ public E last() { throw new NoSuchElementException(); } + + @Override + public void forEach(Consumer action) { + Objects.requireNonNull(action); + } + @Override + public boolean removeIf(Predicate filter) { + Objects.requireNonNull(filter); + return false; + } } /** @@ -3592,6 +3670,24 @@ public int hashCode() { return 1; } + @Override + public void forEach(Consumer action) { + Objects.requireNonNull(action); + } + @Override + public boolean removeIf(Predicate filter) { + Objects.requireNonNull(filter); + return false; + } + @Override + public void replaceAll(UnaryOperator operator) { + Objects.requireNonNull(operator); + } + @Override + public void sort(Comparator c) { + Objects.requireNonNull(c); + } + // Preserves singleton property private Object readResolve() { return EMPTY_LIST; @@ -3770,6 +3866,15 @@ public int size() {return 1;} public boolean contains(Object o) {return eq(o, element);} + + @Override + public void forEach(Consumer action) { + action.accept(element); + } + @Override + public boolean removeIf(Predicate filter) { + throw new UnsupportedOperationException(); + } } /** @@ -3810,6 +3915,22 @@ throw new IndexOutOfBoundsException("Index: "+index+", Size: 1"); return element; } + + @Override + public void forEach(Consumer action) { + action.accept(element); + } + @Override + public boolean removeIf(Predicate filter) { + throw new UnsupportedOperationException(); + } + @Override + public void replaceAll(UnaryOperator operator) { + throw new UnsupportedOperationException(); + } + @Override + public void sort(Comparator c) { + } } /** @@ -4408,6 +4529,15 @@ public boolean retainAll(Collection c) {return s.retainAll(c);} // addAll is the only inherited implementation + @Override + public void forEach(Consumer action) { + s.forEach(action); + } + @Override + public boolean removeIf(Predicate filter) { + return s.removeIf(filter); + } + private static final long serialVersionUID = 2454657854757543876L; private void readObject(java.io.ObjectInputStream stream) @@ -4466,5 +4596,14 @@ public boolean removeAll(Collection c) {return q.removeAll(c);} public boolean retainAll(Collection c) {return q.retainAll(c);} // We use inherited addAll; forwarding addAll would be wrong + + @Override + public void forEach(Consumer action) { + q.forEach(action); + } + @Override + public boolean removeIf(Predicate filter) { + return q.removeIf(filter); + } } } diff --git a/src/share/classes/java/util/List.java b/src/share/classes/java/util/List.java --- a/src/share/classes/java/util/List.java +++ b/src/share/classes/java/util/List.java @@ -25,6 +25,8 @@ package java.util; +import java.util.function.UnaryOperator; + /** * An ordered collection (also known as a sequence). The user of this * interface has precise control over where in the list each element is @@ -375,6 +377,62 @@ boolean retainAll(Collection c); /** + * Replaces each element of this list with the result of applying the + * operator to that element. Exceptions thrown by the operator are relayed + * to the caller. + * + * @implSpec + * The default implementation is equivalent to, for this {@code list}: + *

+     * final ListIterator li = list.listIterator();
+     * while (li.hasNext()) {
+     *   li.set(operator.apply(li.next()));
+     * }
+     * 
+ * If the list's list-iterator does not support the {@code set} operation + * then an {@code UnsupportedOperationException} will be thrown when + * replacing the first element. + * + * @param operator the operator to apply to each element + * @throws UnsupportedOperationException if the replaceAll + * operation is not supported by this list + * @throws NullPointerException if the specified operator is null + * @throws NullPointerException if the an element is replaced with a null + * value and this list does not permit null elements + * (optional) + * @since 1.8 + */ + default void replaceAll(UnaryOperator operator) { + Objects.requireNonNull(operator); + final ListIterator li = this.listIterator(); + while (li.hasNext()) { + li.set(operator.apply(li.next())); + } + } + + /** + * Sorts this list using the supplied {@code Comparator} to compare elements. + * + * @implSpec + * The default implementation is equivalent to, for this {@code list}: + *
Collections.sort(list, c)
+ * + * @param c the {@code Comparator} used to compare list elements. + * A {@code null} value indicates that the elements' + * {@linkplain Comparable natural ordering} should be used. + * @since 1.8 + * @throws ClassCastException if the list contains elements that are not + * mutually comparable using the specified comparator. + * @throws UnsupportedOperationException if the list's list-iterator does + * not support the {@code set} operation. + * @throws IllegalArgumentException (optional) if the comparator is + * found to violate the {@link Comparator} contract + */ + default void sort(Comparator c) { + Collections.sort(this, c); + } + + /** * Removes all of the elements from this list (optional operation). * The list will be empty after this call returns. * diff --git a/src/share/classes/java/util/Vector.java b/src/share/classes/java/util/Vector.java --- a/src/share/classes/java/util/Vector.java +++ b/src/share/classes/java/util/Vector.java @@ -25,6 +25,10 @@ package java.util; +import java.util.function.Consumer; +import java.util.function.Predicate; +import java.util.function.UnaryOperator; + /** * The {@code Vector} class implements a growable array of * objects. Like an array, it contains components that can be @@ -1209,4 +1213,91 @@ lastRet = -1; } } + + @Override + public synchronized void forEach(Consumer action) { + Objects.requireNonNull(action); + final int expectedModCount = modCount; + @SuppressWarnings("unchecked") + final E[] elementData = (E[]) this.elementData; + final int elementCount = this.elementCount; + for (int i=0; modCount == expectedModCount && i < elementCount; i++) { + action.accept(elementData[i]); + } + if (modCount != expectedModCount) { + throw new ConcurrentModificationException(); + } + } + + @Override + @SuppressWarnings("unchecked") + public synchronized boolean removeIf(Predicate filter) { + Objects.requireNonNull(filter); + // figure out which elements are to be removed + // any exception thrown from the filter predicate at this stage + // will leave the collection unmodified + int removeCount = 0; + final int size = elementCount; + final BitSet removeSet = new BitSet(size); + final int expectedModCount = modCount; + for (int i=0; modCount == expectedModCount && i < size; i++) { + @SuppressWarnings("unchecked") + final E element = (E) elementData[i]; + if (filter.test(element)) { + removeSet.set(i); + removeCount++; + } + } + + if (modCount != expectedModCount) { + throw new ConcurrentModificationException(); + } + + // shift surviving elements left over the spaces left by removed elements + final boolean anyToRemove = removeCount > 0; + if (anyToRemove) { + final int newSize = size - removeCount; + for (int i=0, j=0; modCount == expectedModCount && + (i < size) && (j < newSize); i++, j++) { + i = removeSet.nextClearBit(i); + elementData[j] = elementData[i]; + } + for (int k=newSize; modCount == expectedModCount && k < size; k++) { + elementData[k] = null; // Let gc do its work + } + elementCount = newSize; + if (modCount != expectedModCount) { + throw new ConcurrentModificationException(); + } + modCount++; + } + + return anyToRemove; + } + + @Override + @SuppressWarnings("unchecked") + public synchronized void replaceAll(UnaryOperator operator) { + Objects.requireNonNull(operator); + final int expectedModCount = modCount; + final int size = elementCount; + for (int i=0; modCount == expectedModCount && i < size; i++) { + elementData[i] = operator.apply((E) elementData[i]); + } + if (modCount != expectedModCount) { + throw new ConcurrentModificationException(); + } + modCount++; + } + + @Override + @SuppressWarnings("unchecked") + public synchronized void sort(Comparator c) { + final int expectedModCount = modCount; + Arrays.sort((E[]) elementData, 0, elementCount, c); + if (modCount != expectedModCount) { + throw new ConcurrentModificationException(); + } + modCount++; + } } diff --git a/src/share/classes/java/util/concurrent/CopyOnWriteArrayList.java b/src/share/classes/java/util/concurrent/CopyOnWriteArrayList.java --- a/src/share/classes/java/util/concurrent/CopyOnWriteArrayList.java +++ b/src/share/classes/java/util/concurrent/CopyOnWriteArrayList.java @@ -36,6 +36,9 @@ package java.util.concurrent; import java.util.*; import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Consumer; +import java.util.function.Predicate; +import java.util.function.UnaryOperator; /** * A thread-safe variant of {@link java.util.ArrayList} in which all mutative @@ -1317,6 +1320,91 @@ } } + @SuppressWarnings("unchecked") + public void forEach(Consumer action) { + Objects.requireNonNull(action); + final Object[] elements = getArray(); + for (final Object element : elements) { + action.accept((E) element); + } + } + + @Override + public void sort(Comparator c) { + final ReentrantLock lock = this.lock; + lock.lock(); + try { + @SuppressWarnings("unchecked") + final E[] elements = (E[]) getArray(); + final E[] newElements = Arrays.copyOf(elements, elements.length); + Arrays.sort(newElements, c); + setArray(newElements); + } finally { + lock.unlock(); + } + } + + @Override + public boolean removeIf(Predicate filter) { + Objects.requireNonNull(filter); + final ReentrantLock lock = this.lock; + lock.lock(); + try { + @SuppressWarnings("unchecked") + final E[] elements = (E[]) getArray(); + final int size = elements.length; + + // figure out which elements are to be removed + // any exception thrown from the filter predicate at this stage + // will leave the collection unmodified + int removeCount = 0; + final BitSet removeSet = new BitSet(size); + for (int i=0; i < size; i++) { + final E element = elements[i]; + if (filter.test(element)) { + removeSet.set(i); + removeCount++; + } + } + + // copy surviving elements into a new array + final boolean anyToRemove = removeCount > 0; + if (anyToRemove) { + final int newSize = size - removeCount; + final Object[] newElements = new Object[newSize]; + for (int i=0, j=0; (i < size) && (j < newSize); i++, j++) { + i = removeSet.nextClearBit(i); + newElements[j] = elements[i]; + } + setArray(newElements); + } + + return anyToRemove; + } finally { + lock.unlock(); + } + } + + @Override + public void replaceAll(UnaryOperator operator) { + Objects.requireNonNull(operator); + final ReentrantLock lock = this.lock; + lock.lock(); + try { + @SuppressWarnings("unchecked") + final E[] elements = (E[]) getArray(); + final int len = elements.length; + @SuppressWarnings("unchecked") + final E[] newElements = (E[]) new Object[len]; + for (int i=0; i < len; i++) { + newElements[i] = operator.apply(elements[i]); + } + setArray(newElements); + } finally { + lock.unlock(); + } + } + // Support for resetting lock while deserializing private void resetLock() { UNSAFE.putObjectVolatile(this, lockOffset, new ReentrantLock()); diff --git a/test/java/util/CollectionExtensionMethods/CollectionExtensionMethodsTest.java b/test/java/util/CollectionExtensionMethods/CollectionExtensionMethodsTest.java new file mode 100644 --- /dev/null +++ b/test/java/util/CollectionExtensionMethods/CollectionExtensionMethodsTest.java @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; + +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; + +import java.util.TreeSet; +import java.util.function.Predicate; + +/** + * @test + * @library testlibrary + * @build CollectionAsserts CollectionSupplier + * @run testng CollectionExtensionMethodsTest + * @summary Unit tests for extension methods on Collection + */ +public class CollectionExtensionMethodsTest { + + public static final Predicate pEven = x -> 0 == x % 2; + public static final Predicate pOdd = x -> 1 == x % 2; + + private static final String[] SET_CLASSES = { + "java.util.HashSet", + "java.util.LinkedHashSet", + "java.util.TreeSet" + }; + + private static final int SIZE = 100; + + @DataProvider(name="setProvider") + public static Object[][] setCases() { + final List cases = new LinkedList<>(); + cases.add(new Object[] { new HashSet<>() }); + cases.add(new Object[] { new LinkedHashSet<>() }); + cases.add(new Object[] { new TreeSet<>() }); + + cases.add(new Object[] { new HashSet(){{add(42);}} }); + cases.add(new Object[] { new LinkedHashSet(){{add(42);}} }); + cases.add(new Object[] { new TreeSet(){{add(42);}} }); + return cases.toArray(new Object[0][cases.size()]); + } + + @Test(dataProvider = "setProvider") + public void testProvidedWithNull(final Set set) throws Exception { + try { + set.forEach(null); + fail("expected NPE not thrown"); + } catch (NullPointerException npe) {} + try { + set.removeIf(null); + fail("expected NPE not thrown"); + } catch (NullPointerException npe) {} + } + + @Test + public void testForEach() throws Exception { + final CollectionSupplier supplier = new CollectionSupplier(SET_CLASSES, SIZE); + for (final CollectionSupplier.TestCase test : supplier.get()) { + final Set original = ((Set) test.original); + final Set set = ((Set) test.collection); + + try { + set.forEach(null); + fail("expected NPE not thrown"); + } catch (NullPointerException npe) {} + if (test.className.equals("java.util.HashSet")) { + CollectionAsserts.assertContentsUnordered(set, original); + } else { + CollectionAsserts.assertContents(set, original); + } + + final List actual = new LinkedList<>(); + set.forEach(actual::add); + if (test.className.equals("java.util.HashSet")) { + CollectionAsserts.assertContentsUnordered(actual, set); + CollectionAsserts.assertContentsUnordered(actual, original); + } else { + CollectionAsserts.assertContents(actual, set); + CollectionAsserts.assertContents(actual, original); + } + } + } + + @Test + public void testRemoveIf() throws Exception { + final CollectionSupplier supplier = new CollectionSupplier(SET_CLASSES, SIZE); + for (final CollectionSupplier.TestCase test : supplier.get()) { + final Set original = ((Set) test.original); + final Set set = ((Set) test.collection); + + try { + set.removeIf(null); + fail("expected NPE not thrown"); + } catch (NullPointerException npe) {} + if (test.className.equals("java.util.HashSet")) { + CollectionAsserts.assertContentsUnordered(set, original); + } else { + CollectionAsserts.assertContents(set, original); + } + + set.removeIf(pEven); + for (int i : set) { + assertTrue((i % 2) == 1); + } + for (int i : original) { + if (i % 2 == 1) { + assertTrue(set.contains(i)); + } + } + set.removeIf(pOdd); + assertTrue(set.isEmpty()); + } + } +} diff --git a/test/java/util/CollectionExtensionMethods/ListExtensionMethodsTest.java b/test/java/util/CollectionExtensionMethods/ListExtensionMethodsTest.java new file mode 100644 --- /dev/null +++ b/test/java/util/CollectionExtensionMethods/ListExtensionMethodsTest.java @@ -0,0 +1,486 @@ +/* + * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Comparators; +import java.util.List; +import java.util.LinkedList; +import java.util.Stack; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.Vector; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; + +import java.lang.reflect.Constructor; +import java.util.ConcurrentModificationException; +import java.util.function.Predicate; + +/** + * @test + * @library testlibrary + * @build CollectionAsserts CollectionSupplier + * @run testng ListExtensionMethodsTest + * @summary Unit tests for extension methods on List + */ +public class ListExtensionMethodsTest { + + private static final String[] LIST_CLASSES = { + "java.util.ArrayList", + "java.util.LinkedList", + "java.util.Vector", + "java.util.concurrent.CopyOnWriteArrayList" + }; + + private static final String[] LIST_CME_CLASSES = { + "java.util.ArrayList", + "java.util.Vector" + }; + + private static final Predicate pEven = x -> 0 == x % 2; + private static final Predicate pOdd = x -> 1 == x % 2; + + private static final Comparator BIT_COUNT_COMPARATOR = + (x, y) -> Integer.bitCount(x) - Integer.bitCount(y); + + private static final Comparator ATOMIC_INTEGER_COMPARATOR = + (x, y) -> x.intValue() - y.intValue(); + + private static final int SIZE = 100; + private static final int SUBLIST_FROM = 2; + private static final int SUBLIST_TO = SIZE - SUBLIST_FROM; + private static final int SUBLIST_SIZE = SUBLIST_TO - SUBLIST_FROM; + + private static interface Callback { + void call(List list); + } + + // call the callback for each recursive subList + private void trimmedSubList(final List list, final Callback callback) { + int size = list.size(); + if (size > 1) { + // trim 1 element from both ends + final List subList = list.subList(1, size - 1); + callback.call(subList); + trimmedSubList(subList, callback); + } + } + + @DataProvider(name="listProvider") + public static Object[][] listCases() { + final List cases = new LinkedList<>(); + cases.add(new Object[] { new ArrayList<>() }); + cases.add(new Object[] { new LinkedList<>() }); + cases.add(new Object[] { new Vector<>() }); + cases.add(new Object[] { new Stack<>() }); + cases.add(new Object[] { new CopyOnWriteArrayList<>() }); + + cases.add(new Object[] { new ArrayList(){{add(42);}} }); + cases.add(new Object[] { new LinkedList(){{add(42);}} }); + cases.add(new Object[] { new Vector(){{add(42);}} }); + cases.add(new Object[] { new Stack(){{add(42);}} }); + cases.add(new Object[] { new CopyOnWriteArrayList(){{add(42);}} }); + return cases.toArray(new Object[0][cases.size()]); + } + + @Test(dataProvider = "listProvider") + public void testProvidedWithNull(final List list) throws Exception { + try { + list.forEach(null); + fail("expected NPE not thrown"); + } catch (NullPointerException npe) {} + try { + list.replaceAll(null); + fail("expected NPE not thrown"); + } catch (NullPointerException npe) {} + try { + list.removeIf(null); + fail("expected NPE not thrown"); + } catch (NullPointerException npe) {} + } + + @Test + public void testForEach() throws Exception { + final CollectionSupplier supplier = new CollectionSupplier(LIST_CLASSES, SIZE); + for (final CollectionSupplier.TestCase test : supplier.get()) { + final List original = ((List) test.original); + final List list = ((List) test.collection); + } + for (final CollectionSupplier.TestCase test : supplier.get()) { + final List original = ((List) test.original); + final List list = ((List) test.collection); + + try { + list.forEach(null); + fail("expected NPE not thrown"); + } catch (NullPointerException npe) {} + CollectionAsserts.assertContents(list, original); + + final List actual = new LinkedList<>(); + list.forEach(actual::add); + CollectionAsserts.assertContents(actual, list); + CollectionAsserts.assertContents(actual, original); + + if (original.size() > SUBLIST_SIZE) { + final List subList = original.subList(SUBLIST_FROM, SUBLIST_TO); + final List actualSubList = new LinkedList<>(); + subList.forEach(actualSubList::add); + assertEquals(actualSubList.size(), SUBLIST_SIZE); + for (int i = 0; i < SUBLIST_SIZE; i++) { + assertEquals(actualSubList.get(i), original.get(i + SUBLIST_FROM)); + } + } + + trimmedSubList(list, new Callback() { + @Override + public void call(final List list) { + final List actual = new LinkedList<>(); + list.forEach(actual::add); + CollectionAsserts.assertContents(actual, list); + } + }); + } + } + + @Test + public void testRemoveIf() throws Exception { + final CollectionSupplier supplier = new CollectionSupplier(LIST_CLASSES, SIZE); + + for (final CollectionSupplier.TestCase test : supplier.get()) { + final List original = ((List) test.original); + final List list = ((List) test.collection); + + try { + list.removeIf(null); + fail("expected NPE not thrown"); + } catch (NullPointerException npe) {} + CollectionAsserts.assertContents(list, original); + + final AtomicInteger offset = new AtomicInteger(1); + while (list.size() > 0) { + removeFirst(original, list, offset); + } + } + + for (final CollectionSupplier.TestCase test : supplier.get()) { + final List original = ((List) test.original); + final List list = ((List) test.collection); + list.removeIf(pOdd); + for (int i : list) { + assertTrue((i % 2) == 0); + } + for (int i : original) { + if (i % 2 == 0) { + assertTrue(list.contains(i)); + } + } + list.removeIf(pEven); + assertTrue(list.isEmpty()); + } + + for (final CollectionSupplier.TestCase test : supplier.get()) { + final List original = ((List) test.original); + final List list = ((List) test.collection); + final List listCopy = new ArrayList<>(list); + if (original.size() > SUBLIST_SIZE) { + final List subList = list.subList(SUBLIST_FROM, SUBLIST_TO); + final List subListCopy = new ArrayList<>(subList); + listCopy.removeAll(subList); + subList.removeIf(pOdd); + for (int i : subList) { + assertTrue((i % 2) == 0); + } + for (int i : subListCopy) { + if (i % 2 == 0) { + assertTrue(subList.contains(i)); + } else { + assertFalse(subList.contains(i)); + } + } + subList.removeIf(pEven); + assertTrue(subList.isEmpty()); + // elements outside the view should remain + CollectionAsserts.assertContents(list, listCopy); + } + } + + for (final CollectionSupplier.TestCase test : supplier.get()) { + final List list = ((List) test.collection); + trimmedSubList(list, new Callback() { + @Override + public void call(final List list) { + final List copy = new ArrayList<>(list); + list.removeIf(pOdd); + for (int i : list) { + assertTrue((i % 2) == 0); + } + for (int i : copy) { + if (i % 2 == 0) { + assertTrue(list.contains(i)); + } else { + assertFalse(list.contains(i)); + } + } + } + }); + } + } + + // remove the first element + private void removeFirst(final List original, final List list, final AtomicInteger offset) { + final AtomicBoolean first = new AtomicBoolean(true); + list.removeIf(x -> first.getAndSet(false)); + CollectionAsserts.assertContents(original.subList(offset.getAndIncrement(), original.size()), list); + } + + @Test + public void testReplaceAll() throws Exception { + final int scale = 3; + final CollectionSupplier supplier = new CollectionSupplier(LIST_CLASSES, SIZE); + for (final CollectionSupplier.TestCase test : supplier.get()) { + final List original = ((List) test.original); + final List list = ((List) test.collection); + + try { + list.replaceAll(null); + fail("expected NPE not thrown"); + } catch (NullPointerException npe) {} + CollectionAsserts.assertContents(list, original); + + list.replaceAll(x -> scale * x); + for (int i=0; i < original.size(); i++) { + assertTrue(list.get(i) == (scale * original.get(i)), "mismatch at index " + i); + } + + if (original.size() > SUBLIST_SIZE) { + final List subList = list.subList(SUBLIST_FROM, SUBLIST_TO); + subList.replaceAll(x -> x + 1); + // verify elements in view [from, to) were replaced + for (int i = 0; i < SUBLIST_SIZE; i++) { + assertTrue(subList.get(i) == ((scale * original.get(i + SUBLIST_FROM)) + 1), + "mismatch at sublist index " + i); + } + // verify that elements [0, from) remain unmodified + for (int i = 0; i < SUBLIST_FROM; i++) { + assertTrue(list.get(i) == (scale * original.get(i)), + "mismatch at original index " + i); + } + // verify that elements [to, size) remain unmodified + for (int i = SUBLIST_TO; i < list.size(); i++) { + assertTrue(list.get(i) == (scale * original.get(i)), + "mismatch at original index " + i); + } + } + } + + for (final CollectionSupplier.TestCase test : supplier.get()) { + final List list = ((List) test.collection); + trimmedSubList(list, new Callback() { + @Override + public void call(final List list) { + final List copy = new ArrayList<>(list); + final int offset = 5; + list.replaceAll(x -> offset + x); + for (int i=0; i < copy.size(); i++) { + assertTrue(list.get(i) == (offset + copy.get(i)), "mismatch at index " + i); + } + } + }); + } + } + + @Test + public void testSort() throws Exception { + final CollectionSupplier supplier = new CollectionSupplier(LIST_CLASSES, SIZE); + for (final CollectionSupplier.TestCase test : supplier.get()) { + final List original = ((List) test.original); + final List list = ((List) test.collection); + CollectionSupplier.shuffle(list); + list.sort(Integer::compare); + CollectionAsserts.assertSorted(list, Integer::compare); + if (test.name.startsWith("reverse")) { + Collections.reverse(list); + } + CollectionAsserts.assertContents(list, original); + + CollectionSupplier.shuffle(list); + list.sort(null); + CollectionAsserts.assertSorted(list, Comparators.naturalOrder()); + if (test.name.startsWith("reverse")) { + Collections.reverse(list); + } + CollectionAsserts.assertContents(list, original); + + CollectionSupplier.shuffle(list); + list.sort(Comparators.naturalOrder()); + CollectionAsserts.assertSorted(list, Comparators.naturalOrder()); + if (test.name.startsWith("reverse")) { + Collections.reverse(list); + } + CollectionAsserts.assertContents(list, original); + + CollectionSupplier.shuffle(list); + list.sort(Comparators.reverseOrder()); + CollectionAsserts.assertSorted(list, Comparators.reverseOrder()); + if (!test.name.startsWith("reverse")) { + Collections.reverse(list); + } + CollectionAsserts.assertContents(list, original); + + CollectionSupplier.shuffle(list); + list.sort(BIT_COUNT_COMPARATOR); + CollectionAsserts.assertSorted(list, BIT_COUNT_COMPARATOR); + // check sort by verifying that bitCount increases and never drops + int minBitCount = 0; + int bitCount = 0; + for (final Integer i : list) { + bitCount = Integer.bitCount(i); + assertTrue(bitCount >= minBitCount); + minBitCount = bitCount; + } + + @SuppressWarnings("unchecked") + final Class> type = + (Class>) Class.forName(test.className); + final Constructor> defaultConstructor = type.getConstructor(); + final List incomparables = (List) defaultConstructor.newInstance(); + + for (int i=0; i < test.original.size(); i++) { + incomparables.add(new AtomicInteger(i)); + } + CollectionSupplier.shuffle(incomparables); + incomparables.sort(ATOMIC_INTEGER_COMPARATOR); + for (int i=0; i < test.original.size(); i++) { + assertEquals(i, incomparables.get(i).intValue()); + } + + if (original.size() > SUBLIST_SIZE) { + final List copy = new ArrayList<>(list); + final List subList = list.subList(SUBLIST_FROM, SUBLIST_TO); + CollectionSupplier.shuffle(subList); + subList.sort(Comparators.naturalOrder()); + CollectionAsserts.assertSorted(subList, Comparators.naturalOrder()); + // verify that elements [0, from) remain unmodified + for (int i = 0; i < SUBLIST_FROM; i++) { + assertTrue(list.get(i) == copy.get(i), + "mismatch at index " + i); + } + // verify that elements [to, size) remain unmodified + for (int i = SUBLIST_TO; i < list.size(); i++) { + assertTrue(list.get(i) == copy.get(i), + "mismatch at index " + i); + } + } + } + + for (final CollectionSupplier.TestCase test : supplier.get()) { + final List list = ((List) test.collection); + trimmedSubList(list, new Callback() { + @Override + public void call(final List list) { + final List copy = new ArrayList<>(list); + CollectionSupplier.shuffle(list); + list.sort(Comparators.naturalOrder()); + CollectionAsserts.assertSorted(list, Comparators.naturalOrder()); + } + }); + } + } + + @Test + public void testRemoveIfThrowsCME() throws Exception { + final CollectionSupplier supplier = new CollectionSupplier(LIST_CME_CLASSES, SIZE); + for (final CollectionSupplier.TestCase test : supplier.get()) { + final List list = ((List) test.collection); + if (list.size() <= 1) { + continue; + } + boolean gotException = false; + try { + // bad predicate that modifies its list, should throw CME + list.removeIf((x) -> {return list.add(x);}); + } catch (ConcurrentModificationException cme) { + gotException = true; + } + if (!gotException) { + fail("expected CME was not thrown from " + test); + } + } + } + + @Test + public void testReplaceAllThrowsCME() throws Exception { + final CollectionSupplier supplier = new CollectionSupplier(LIST_CME_CLASSES, SIZE); + for (final CollectionSupplier.TestCase test : supplier.get()) { + final List list = ((List) test.collection); + if (list.size() <= 1) { + continue; + } + boolean gotException = false; + try { + // bad predicate that modifies its list, should throw CME + list.replaceAll(x -> {int n = 3 * x; list.add(n); return n;}); + } catch (ConcurrentModificationException cme) { + gotException = true; + } + if (!gotException) { + fail("expected CME was not thrown from " + test); + } + } + } + + @Test + public void testSortThrowsCME() throws Exception { + final CollectionSupplier supplier = new CollectionSupplier(LIST_CME_CLASSES, SIZE); + for (final CollectionSupplier.TestCase test : supplier.get()) { + final List list = ((List) test.collection); + if (list.size() <= 1) { + continue; + } + boolean gotException = false; + try { + // bad predicate that modifies its list, should throw CME + list.sort((x, y) -> {list.add(x); return x - y;}); + } catch (ConcurrentModificationException cme) { + gotException = true; + } + if (!gotException) { + fail("expected CME was not thrown from " + test); + } + } + } + +} diff --git a/test/java/util/CollectionExtensionMethods/testlibrary/CollectionAsserts.java b/test/java/util/CollectionExtensionMethods/testlibrary/CollectionAsserts.java new file mode 100644 --- /dev/null +++ b/test/java/util/CollectionExtensionMethods/testlibrary/CollectionAsserts.java @@ -0,0 +1,213 @@ +/* + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Objects; +import java.util.Set; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; + +/** + * @library + * CollectionAssert -- assertion methods for lambda test cases + */ +public class CollectionAsserts { + + public static void assertCountSum(Iterable it, int count, int sum) { + assertCountSum(it.iterator(), count, sum); + } + + public static void assertCountSum(Iterator it, int count, int sum) { + int c = 0; + int s = 0; + while (it.hasNext()) { + int i = (Integer) it.next(); + c++; + s += i; + } + + assertEquals(c, count); + assertEquals(s, sum); + } + + public static void assertConcat(Iterator it, String result) { + StringBuilder sb = new StringBuilder(); + while (it.hasNext()) { + sb.append(it.next()); + } + + assertEquals(result, sb.toString()); + } + + public static> void assertSorted(Iterator i) { + if (!i.hasNext()) + return; + T last = i.next(); + while (i.hasNext()) { + T t = i.next(); + assertTrue(last.compareTo(t) <= 0); + assertTrue(t.compareTo(last) >= 0); + last = t; + } + } + + public static void assertSorted(Iterator i, Comparator comp) { + if (!i.hasNext()) + return; + T last = i.next(); + while (i.hasNext()) { + T t = i.next(); + assertTrue(comp.compare(last, t) <= 0); + assertTrue(comp.compare(t, last) >= 0); + last = t; + } + } + + public static> void assertSorted(Iterable iter) { + assertSorted(iter.iterator()); + } + + public static void assertSorted(Iterable iter, Comparator comp) { + assertSorted(iter.iterator(), comp); + } + + public static void assertUnique(Iterable iter) { + assertUnique(iter.iterator()); + } + + public static void assertUnique(Iterator iter) { + if (!iter.hasNext()) { + return; + } + + Set uniq = new HashSet<>(); + while(iter.hasNext()) { + T each = iter.next(); + assertTrue(!uniq.contains(each)); + uniq.add(each); + } + } + + public static void assertContents(Iterable actual, Iterable expected) { + assertContents(actual.iterator(), expected.iterator()); + } + + public static void assertContents(Iterator actual, Iterator expected) { + List history = new ArrayList<>(); + + while (expected.hasNext()) { + if (!actual.hasNext()) { + List expectedData = new ArrayList<>(history); + while (expected.hasNext()) + expectedData.add(expected.next()); + fail(String.format("Premature end of data; expected=%s, found=%s", expectedData, history)); + } + T a = actual.next(); + T e = expected.next(); + history.add(a); + + if (!Objects.equals(a, e)) + fail(String.format("Data mismatch; preceding=%s, nextExpected=%s, nextFound=%s", history, e, a)); + } + if (actual.hasNext()) { + List rest = new ArrayList<>(); + while (actual.hasNext()) + rest.add(actual.next()); + fail(String.format("Unexpected data %s after %s", rest, history)); + } + } + + @SafeVarargs + @SuppressWarnings("varargs") + public static void assertContents(Iterator actual, T... expected) { + assertContents(actual, Arrays.asList(expected).iterator()); + } + + public static boolean equalsContentsUnordered(Iterable a, Iterable b) { + Set sa = new HashSet<>(); + for (T t : a) { + sa.add(t); + } + + Set sb = new HashSet<>(); + for (T t : b) { + sb.add(t); + } + + return Objects.equals(sa, sb); + } + + public static> void assertContentsUnordered(Iterable actual, Iterable expected) { + ArrayList one = new ArrayList<>(); + for (T t : actual) + one.add(t); + ArrayList two = new ArrayList<>(); + for (T t : expected) + two.add(t); + Collections.sort(one); + Collections.sort(two); + assertContents(one, two); + } + + static void assertSplitContents(Iterable> splits, Iterable list) { + Iterator> mI = splits.iterator(); + Iterator pI = null; + Iterator lI = list.iterator(); + + while (lI.hasNext()) { + if (pI == null) + pI = mI.next().iterator(); + while (!pI.hasNext()) { + if (!mI.hasNext()) { + break; + } + else { + pI = mI.next().iterator(); + } + } + assertTrue(pI.hasNext()); + T pT = pI.next(); + T lT = lI.next(); + assertEquals(pT, lT); + } + + if (pI != null) { + assertTrue(!pI.hasNext()); + } + + while(mI.hasNext()) { + pI = mI.next().iterator(); + assertTrue(!pI.hasNext()); + } + } +} diff --git a/test/java/util/CollectionExtensionMethods/testlibrary/CollectionSupplier.java b/test/java/util/CollectionExtensionMethods/testlibrary/CollectionSupplier.java new file mode 100644 --- /dev/null +++ b/test/java/util/CollectionExtensionMethods/testlibrary/CollectionSupplier.java @@ -0,0 +1,311 @@ +/* + * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.lang.ArrayStoreException; +import java.lang.AssertionError; +import java.lang.Exception; +import java.lang.Integer; +import java.lang.Iterable; +import java.lang.Override; +import java.util.Arrays; +import java.util.LinkedList; +import java.util.List; +import java.util.Random; +import java.util.Set; + +import org.testng.annotations.Test; +import org.testng.TestException; + +import static org.testng.Assert.assertTrue; + +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.util.Collection; +import java.util.Collections; +import java.util.function.Predicate; +import java.util.function.Supplier; + +/** + * @library + * @summary A Supplier of test cases for Collection tests + */ +public final class CollectionSupplier implements Supplier> { + + private final String[] classNames; + private final int size; + + /** + * A Collection test case. + */ + public static final class TestCase { + + /** + * The name of the test case. + */ + public final String name; + + /** + * Class name of the instantiated Collection. + */ + public final String className; + + /** + * Unmodifiable reference collection, useful for comparisons. + */ + public final Collection original; + + /** + * A modifiable test collection. + */ + public final Collection collection; + + /** + * Create a Collection test case. + * @param name name of the test case + * @param className class name of the instantiated collection + * @param original reference collection + * @param collection the modifiable test collection + */ + public TestCase(String name, String className, + Collection original, Collection collection) { + this.name = name; + this.className = className; + this.original = + List.class.isAssignableFrom(original.getClass()) ? + Collections.unmodifiableList((List) original) : + Set.class.isAssignableFrom(original.getClass()) ? + Collections.unmodifiableSet((Set) original) : + Collections.unmodifiableCollection(original); + this.collection = collection; + } + + @Override + public String toString() { + return name + " " + className + + "\n original: " + original + + "\n target: " + collection; + } + } + + /** + * Shuffle a list using a PRNG with known seed for repeatability + * @param list the list to be shuffled + */ + public static void shuffle(final List list) { + // PRNG with known seed for repeatable tests + final Random prng = new Random(13); + final int size = list.size(); + for (int i=0; i < size; i++) { + // random index in interval [i, size) + final int j = i + prng.nextInt(size - i); + // swap elements at indices i & j + final E e = list.get(i); + list.set(i, list.get(j)); + list.set(j, e); + } + } + + /** + * Create a {@code Supplier} that creates instances of specified collection + * classes of specified length. + * + * @param classNames class names that implement {@code Collection} + * @param size the desired size of each collection + */ + public CollectionSupplier(String[] classNames, int size) { + this.classNames = Arrays.copyOf(classNames, classNames.length); + this.size = size; + } + + @Override + public Iterable get() { + try { + return getThrows(); + } catch (Exception e) { + throw new TestException(e); + } + } + + private Iterable getThrows() throws Exception { + final Collection collections = new LinkedList<>(); + for (final String className : classNames) { + @SuppressWarnings("unchecked") + final Class> type = + (Class>) Class.forName(className); + final Constructor> + defaultConstructor = type.getConstructor(); + final Constructor> + copyConstructor = type.getConstructor(Collection.class); + + final Collection empty = defaultConstructor.newInstance(); + collections.add(new TestCase("empty", + className, + copyConstructor.newInstance(empty), + empty)); + + final Collection single = defaultConstructor.newInstance(); + single.add(42); + collections.add(new TestCase("single", + className, + copyConstructor.newInstance(single), + single)); + + final Collection regular = defaultConstructor.newInstance(); + for (int i=0; i < size; i++) { + regular.add(i); + } + collections.add(new TestCase("regular", + className, + copyConstructor.newInstance(regular), + regular)); + + final Collection reverse = defaultConstructor.newInstance(); + for (int i=size; i >= 0; i--) { + reverse.add(i); + } + collections.add(new TestCase("reverse", + className, + copyConstructor.newInstance(reverse), + reverse)); + + final Collection odds = defaultConstructor.newInstance(); + for (int i=0; i < size; i++) { + odds.add((i * 2) + 1); + } + collections.add(new TestCase("odds", + className, + copyConstructor.newInstance(odds), + odds)); + + final Collection evens = defaultConstructor.newInstance(); + for (int i=0; i < size; i++) { + evens.add(i * 2); + } + collections.add(new TestCase("evens", + className, + copyConstructor.newInstance(evens), + evens)); + + final Collection fibonacci = defaultConstructor.newInstance(); + int prev2 = 0; + int prev1 = 1; + for (int i=0; i < size; i++) { + final int n = prev1 + prev2; + if (n < 0) { // stop on overflow + break; + } + fibonacci.add(n); + prev2 = prev1; + prev1 = n; + } + collections.add(new TestCase("fibonacci", + className, + copyConstructor.newInstance(fibonacci), + fibonacci)); + + // variants where the size of the backing storage != reported size + // created by removing half of the elements + + final Collection emptyWithSlack = defaultConstructor.newInstance(); + emptyWithSlack.add(42); + assertTrue(emptyWithSlack.remove(42)); + collections.add(new TestCase("emptyWithSlack", + className, + copyConstructor.newInstance(emptyWithSlack), + emptyWithSlack)); + + final Collection singleWithSlack = defaultConstructor.newInstance(); + singleWithSlack.add(42); + singleWithSlack.add(43); + assertTrue(singleWithSlack.remove(43)); + collections.add(new TestCase("singleWithSlack", + className, + copyConstructor.newInstance(singleWithSlack), + singleWithSlack)); + + final Collection regularWithSlack = defaultConstructor.newInstance(); + for (int i=0; i < (2 * size); i++) { + regularWithSlack.add(i); + } + assertTrue(regularWithSlack.removeIf((x) -> {return x >= size;})); + collections.add(new TestCase("regularWithSlack", + className, + copyConstructor.newInstance(regularWithSlack), + regularWithSlack)); + + final Collection reverseWithSlack = defaultConstructor.newInstance(); + for (int i=2 * size; i >= 0; i--) { + reverseWithSlack.add(i); + } + assertTrue(reverseWithSlack.removeIf((x) -> {return x < size;})); + collections.add(new TestCase("reverseWithSlack", + className, + copyConstructor.newInstance(reverseWithSlack), + reverseWithSlack)); + + final Collection oddsWithSlack = defaultConstructor.newInstance(); + for (int i = 0; i < 2 * size; i++) { + oddsWithSlack.add((i * 2) + 1); + } + assertTrue(oddsWithSlack.removeIf((x) -> {return x >= size;})); + collections.add(new TestCase("oddsWithSlack", + className, + copyConstructor.newInstance(oddsWithSlack), + oddsWithSlack)); + + final Collection evensWithSlack = defaultConstructor.newInstance(); + for (int i = 0; i < 2 * size; i++) { + evensWithSlack.add(i * 2); + } + assertTrue(evensWithSlack.removeIf((x) -> {return x >= size;})); + collections.add(new TestCase("evensWithSlack", + className, + copyConstructor.newInstance(evensWithSlack), + evensWithSlack)); + + final Collection fibonacciWithSlack = defaultConstructor.newInstance(); + prev2 = 0; + prev1 = 1; + for (int i=0; i < size; i++) { + final int n = prev1 + prev2; + if (n < 0) { // stop on overflow + break; + } + fibonacciWithSlack.add(n); + prev2 = prev1; + prev1 = n; + } + assertTrue(fibonacciWithSlack.removeIf((x) -> {return x < 20;})); + collections.add(new TestCase("fibonacciWithSlack", + className, + copyConstructor.newInstance(fibonacciWithSlack), + fibonacciWithSlack)); + + } + + return collections; + } + +}