1 /*
   2  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.  Oracle designates this
   7  * particular file as subject to the "Classpath" exception as provided
   8  * by Oracle in the LICENSE file that accompanied this code.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  */
  24 
  25 /*
  26  * This file is available under and governed by the GNU General Public
  27  * License version 2 only, as published by the Free Software Foundation.
  28  * However, the following notice accompanied the original version of this
  29  * file:
  30  *
  31  * Written by Doug Lea, Bill Scherer, and Michael Scott with
  32  * assistance from members of JCP JSR-166 Expert Group and released to
  33  * the public domain, as explained at
  34  * http://creativecommons.org/licenses/publicdomain
  35  */
  36 
  37 package java.util.concurrent;
  38 import java.util.concurrent.locks.*;
  39 import java.util.concurrent.atomic.*;
  40 import java.util.*;
  41 
  42 /**
  43  * A {@linkplain BlockingQueue blocking queue} in which each insert
  44  * operation must wait for a corresponding remove operation by another
  45  * thread, and vice versa.  A synchronous queue does not have any
  46  * internal capacity, not even a capacity of one.  You cannot
  47  * <tt>peek</tt> at a synchronous queue because an element is only
  48  * present when you try to remove it; you cannot insert an element
  49  * (using any method) unless another thread is trying to remove it;
  50  * you cannot iterate as there is nothing to iterate.  The
  51  * <em>head</em> of the queue is the element that the first queued
  52  * inserting thread is trying to add to the queue; if there is no such
  53  * queued thread then no element is available for removal and
  54  * <tt>poll()</tt> will return <tt>null</tt>.  For purposes of other
  55  * <tt>Collection</tt> methods (for example <tt>contains</tt>), a
  56  * <tt>SynchronousQueue</tt> acts as an empty collection.  This queue
  57  * does not permit <tt>null</tt> elements.
  58  *
  59  * <p>Synchronous queues are similar to rendezvous channels used in
  60  * CSP and Ada. They are well suited for handoff designs, in which an
  61  * object running in one thread must sync up with an object running
  62  * in another thread in order to hand it some information, event, or
  63  * task.
  64  *
  65  * <p> This class supports an optional fairness policy for ordering
  66  * waiting producer and consumer threads.  By default, this ordering
  67  * is not guaranteed. However, a queue constructed with fairness set
  68  * to <tt>true</tt> grants threads access in FIFO order.
  69  *
  70  * <p>This class and its iterator implement all of the
  71  * <em>optional</em> methods of the {@link Collection} and {@link
  72  * Iterator} interfaces.
  73  *
  74  * <p>This class is a member of the
  75  * <a href="{@docRoot}/../technotes/guides/collections/index.html">
  76  * Java Collections Framework</a>.
  77  *
  78  * @since 1.5
  79  * @author Doug Lea and Bill Scherer and Michael Scott
  80  * @param <E> the type of elements held in this collection
  81  */
  82 public class SynchronousQueue<E> extends AbstractQueue<E>
  83     implements BlockingQueue<E>, java.io.Serializable {
  84     private static final long serialVersionUID = -3223113410248163686L;
  85 
  86     /*
  87      * This class implements extensions of the dual stack and dual
  88      * queue algorithms described in "Nonblocking Concurrent Objects
  89      * with Condition Synchronization", by W. N. Scherer III and
  90      * M. L. Scott.  18th Annual Conf. on Distributed Computing,
  91      * Oct. 2004 (see also
  92      * http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/duals.html).
  93      * The (Lifo) stack is used for non-fair mode, and the (Fifo)
  94      * queue for fair mode. The performance of the two is generally
  95      * similar. Fifo usually supports higher throughput under
  96      * contention but Lifo maintains higher thread locality in common
  97      * applications.
  98      *
  99      * A dual queue (and similarly stack) is one that at any given
 100      * time either holds "data" -- items provided by put operations,
 101      * or "requests" -- slots representing take operations, or is
 102      * empty. A call to "fulfill" (i.e., a call requesting an item
 103      * from a queue holding data or vice versa) dequeues a
 104      * complementary node.  The most interesting feature of these
 105      * queues is that any operation can figure out which mode the
 106      * queue is in, and act accordingly without needing locks.
 107      *
 108      * Both the queue and stack extend abstract class Transferer
 109      * defining the single method transfer that does a put or a
 110      * take. These are unified into a single method because in dual
 111      * data structures, the put and take operations are symmetrical,
 112      * so nearly all code can be combined. The resulting transfer
 113      * methods are on the long side, but are easier to follow than
 114      * they would be if broken up into nearly-duplicated parts.
 115      *
 116      * The queue and stack data structures share many conceptual
 117      * similarities but very few concrete details. For simplicity,
 118      * they are kept distinct so that they can later evolve
 119      * separately.
 120      *
 121      * The algorithms here differ from the versions in the above paper
 122      * in extending them for use in synchronous queues, as well as
 123      * dealing with cancellation. The main differences include:
 124      *
 125      *  1. The original algorithms used bit-marked pointers, but
 126      *     the ones here use mode bits in nodes, leading to a number
 127      *     of further adaptations.
 128      *  2. SynchronousQueues must block threads waiting to become
 129      *     fulfilled.
 130      *  3. Support for cancellation via timeout and interrupts,
 131      *     including cleaning out cancelled nodes/threads
 132      *     from lists to avoid garbage retention and memory depletion.
 133      *
 134      * Blocking is mainly accomplished using LockSupport park/unpark,
 135      * except that nodes that appear to be the next ones to become
 136      * fulfilled first spin a bit (on multiprocessors only). On very
 137      * busy synchronous queues, spinning can dramatically improve
 138      * throughput. And on less busy ones, the amount of spinning is
 139      * small enough not to be noticeable.
 140      *
 141      * Cleaning is done in different ways in queues vs stacks.  For
 142      * queues, we can almost always remove a node immediately in O(1)
 143      * time (modulo retries for consistency checks) when it is
 144      * cancelled. But if it may be pinned as the current tail, it must
 145      * wait until some subsequent cancellation. For stacks, we need a
 146      * potentially O(n) traversal to be sure that we can remove the
 147      * node, but this can run concurrently with other threads
 148      * accessing the stack.
 149      *
 150      * While garbage collection takes care of most node reclamation
 151      * issues that otherwise complicate nonblocking algorithms, care
 152      * is taken to "forget" references to data, other nodes, and
 153      * threads that might be held on to long-term by blocked
 154      * threads. In cases where setting to null would otherwise
 155      * conflict with main algorithms, this is done by changing a
 156      * node's link to now point to the node itself. This doesn't arise
 157      * much for Stack nodes (because blocked threads do not hang on to
 158      * old head pointers), but references in Queue nodes must be
 159      * aggressively forgotten to avoid reachability of everything any
 160      * node has ever referred to since arrival.
 161      */
 162 
 163     /**
 164      * Shared internal API for dual stacks and queues.
 165      */
 166     static abstract class Transferer {
 167         /**
 168          * Performs a put or take.
 169          *
 170          * @param e if non-null, the item to be handed to a consumer;
 171          *          if null, requests that transfer return an item
 172          *          offered by producer.
 173          * @param timed if this operation should timeout
 174          * @param nanos the timeout, in nanoseconds
 175          * @return if non-null, the item provided or received; if null,
 176          *         the operation failed due to timeout or interrupt --
 177          *         the caller can distinguish which of these occurred
 178          *         by checking Thread.interrupted.
 179          */
 180         abstract Object transfer(Object e, boolean timed, long nanos);
 181     }
 182 
 183     /** The number of CPUs, for spin control */
 184     static final int NCPUS = Runtime.getRuntime().availableProcessors();
 185 
 186     /**
 187      * The number of times to spin before blocking in timed waits.
 188      * The value is empirically derived -- it works well across a
 189      * variety of processors and OSes. Empirically, the best value
 190      * seems not to vary with number of CPUs (beyond 2) so is just
 191      * a constant.
 192      */
 193     static final int maxTimedSpins = (NCPUS < 2)? 0 : 32;
 194 
 195     /**
 196      * The number of times to spin before blocking in untimed waits.
 197      * This is greater than timed value because untimed waits spin
 198      * faster since they don't need to check times on each spin.
 199      */
 200     static final int maxUntimedSpins = maxTimedSpins * 16;
 201 
 202     /**
 203      * The number of nanoseconds for which it is faster to spin
 204      * rather than to use timed park. A rough estimate suffices.
 205      */
 206     static final long spinForTimeoutThreshold = 1000L;
 207 
 208     /** Dual stack */
 209     static final class TransferStack extends Transferer {
 210         /*
 211          * This extends Scherer-Scott dual stack algorithm, differing,
 212          * among other ways, by using "covering" nodes rather than
 213          * bit-marked pointers: Fulfilling operations push on marker
 214          * nodes (with FULFILLING bit set in mode) to reserve a spot
 215          * to match a waiting node.
 216          */
 217 
 218         /* Modes for SNodes, ORed together in node fields */
 219         /** Node represents an unfulfilled consumer */
 220         static final int REQUEST    = 0;
 221         /** Node represents an unfulfilled producer */
 222         static final int DATA       = 1;
 223         /** Node is fulfilling another unfulfilled DATA or REQUEST */
 224         static final int FULFILLING = 2;
 225 
 226         /** Return true if m has fulfilling bit set */
 227         static boolean isFulfilling(int m) { return (m & FULFILLING) != 0; }
 228 
 229         /** Node class for TransferStacks. */
 230         static final class SNode {
 231             volatile SNode next;        // next node in stack
 232             volatile SNode match;       // the node matched to this
 233             volatile Thread waiter;     // to control park/unpark
 234             Object item;                // data; or null for REQUESTs
 235             int mode;
 236             // Note: item and mode fields don't need to be volatile
 237             // since they are always written before, and read after,
 238             // other volatile/atomic operations.
 239 
 240             SNode(Object item) {
 241                 this.item = item;
 242             }
 243 
 244             static final AtomicReferenceFieldUpdater<SNode, SNode>
 245                 nextUpdater = AtomicReferenceFieldUpdater.newUpdater
 246                 (SNode.class, SNode.class, "next");
 247 
 248             boolean casNext(SNode cmp, SNode val) {
 249                 return (cmp == next &&
 250                         nextUpdater.compareAndSet(this, cmp, val));
 251             }
 252 
 253             static final AtomicReferenceFieldUpdater<SNode, SNode>
 254                 matchUpdater = AtomicReferenceFieldUpdater.newUpdater
 255                 (SNode.class, SNode.class, "match");
 256 
 257             /**
 258              * Tries to match node s to this node, if so, waking up thread.
 259              * Fulfillers call tryMatch to identify their waiters.
 260              * Waiters block until they have been matched.
 261              *
 262              * @param s the node to match
 263              * @return true if successfully matched to s
 264              */
 265             boolean tryMatch(SNode s) {
 266                 if (match == null &&
 267                     matchUpdater.compareAndSet(this, null, s)) {
 268                     Thread w = waiter;
 269                     if (w != null) {    // waiters need at most one unpark
 270                         waiter = null;
 271                         LockSupport.unpark(w);
 272                     }
 273                     return true;
 274                 }
 275                 return match == s;
 276             }
 277 
 278             /**
 279              * Tries to cancel a wait by matching node to itself.
 280              */
 281             void tryCancel() {
 282                 matchUpdater.compareAndSet(this, null, this);
 283             }
 284 
 285             boolean isCancelled() {
 286                 return match == this;
 287             }
 288         }
 289 
 290         /** The head (top) of the stack */
 291         volatile SNode head;
 292 
 293         static final AtomicReferenceFieldUpdater<TransferStack, SNode>
 294             headUpdater = AtomicReferenceFieldUpdater.newUpdater
 295             (TransferStack.class,  SNode.class, "head");
 296 
 297         boolean casHead(SNode h, SNode nh) {
 298             return h == head && headUpdater.compareAndSet(this, h, nh);
 299         }
 300 
 301         /**
 302          * Creates or resets fields of a node. Called only from transfer
 303          * where the node to push on stack is lazily created and
 304          * reused when possible to help reduce intervals between reads
 305          * and CASes of head and to avoid surges of garbage when CASes
 306          * to push nodes fail due to contention.
 307          */
 308         static SNode snode(SNode s, Object e, SNode next, int mode) {
 309             if (s == null) s = new SNode(e);
 310             s.mode = mode;
 311             s.next = next;
 312             return s;
 313         }
 314 
 315         /**
 316          * Puts or takes an item.
 317          */
 318         Object transfer(Object e, boolean timed, long nanos) {
 319             /*
 320              * Basic algorithm is to loop trying one of three actions:
 321              *
 322              * 1. If apparently empty or already containing nodes of same
 323              *    mode, try to push node on stack and wait for a match,
 324              *    returning it, or null if cancelled.
 325              *
 326              * 2. If apparently containing node of complementary mode,
 327              *    try to push a fulfilling node on to stack, match
 328              *    with corresponding waiting node, pop both from
 329              *    stack, and return matched item. The matching or
 330              *    unlinking might not actually be necessary because of
 331              *    other threads performing action 3:
 332              *
 333              * 3. If top of stack already holds another fulfilling node,
 334              *    help it out by doing its match and/or pop
 335              *    operations, and then continue. The code for helping
 336              *    is essentially the same as for fulfilling, except
 337              *    that it doesn't return the item.
 338              */
 339 
 340             SNode s = null; // constructed/reused as needed
 341             int mode = (e == null)? REQUEST : DATA;
 342 
 343             for (;;) {
 344                 SNode h = head;
 345                 if (h == null || h.mode == mode) {  // empty or same-mode
 346                     if (timed && nanos <= 0) {      // can't wait
 347                         if (h != null && h.isCancelled())
 348                             casHead(h, h.next);     // pop cancelled node
 349                         else
 350                             return null;
 351                     } else if (casHead(h, s = snode(s, e, h, mode))) {
 352                         SNode m = awaitFulfill(s, timed, nanos);
 353                         if (m == s) {               // wait was cancelled
 354                             clean(s);
 355                             return null;
 356                         }
 357                         if ((h = head) != null && h.next == s)
 358                             casHead(h, s.next);     // help s's fulfiller
 359                         return mode == REQUEST? m.item : s.item;
 360                     }
 361                 } else if (!isFulfilling(h.mode)) { // try to fulfill
 362                     if (h.isCancelled())            // already cancelled
 363                         casHead(h, h.next);         // pop and retry
 364                     else if (casHead(h, s=snode(s, e, h, FULFILLING|mode))) {
 365                         for (;;) { // loop until matched or waiters disappear
 366                             SNode m = s.next;       // m is s's match
 367                             if (m == null) {        // all waiters are gone
 368                                 casHead(s, null);   // pop fulfill node
 369                                 s = null;           // use new node next time
 370                                 break;              // restart main loop
 371                             }
 372                             SNode mn = m.next;
 373                             if (m.tryMatch(s)) {
 374                                 casHead(s, mn);     // pop both s and m
 375                                 return (mode == REQUEST)? m.item : s.item;
 376                             } else                  // lost match
 377                                 s.casNext(m, mn);   // help unlink
 378                         }
 379                     }
 380                 } else {                            // help a fulfiller
 381                     SNode m = h.next;               // m is h's match
 382                     if (m == null)                  // waiter is gone
 383                         casHead(h, null);           // pop fulfilling node
 384                     else {
 385                         SNode mn = m.next;
 386                         if (m.tryMatch(h))          // help match
 387                             casHead(h, mn);         // pop both h and m
 388                         else                        // lost match
 389                             h.casNext(m, mn);       // help unlink
 390                     }
 391                 }
 392             }
 393         }
 394 
 395         /**
 396          * Spins/blocks until node s is matched by a fulfill operation.
 397          *
 398          * @param s the waiting node
 399          * @param timed true if timed wait
 400          * @param nanos timeout value
 401          * @return matched node, or s if cancelled
 402          */
 403         SNode awaitFulfill(SNode s, boolean timed, long nanos) {
 404             /*
 405              * When a node/thread is about to block, it sets its waiter
 406              * field and then rechecks state at least one more time
 407              * before actually parking, thus covering race vs
 408              * fulfiller noticing that waiter is non-null so should be
 409              * woken.
 410              *
 411              * When invoked by nodes that appear at the point of call
 412              * to be at the head of the stack, calls to park are
 413              * preceded by spins to avoid blocking when producers and
 414              * consumers are arriving very close in time.  This can
 415              * happen enough to bother only on multiprocessors.
 416              *
 417              * The order of checks for returning out of main loop
 418              * reflects fact that interrupts have precedence over
 419              * normal returns, which have precedence over
 420              * timeouts. (So, on timeout, one last check for match is
 421              * done before giving up.) Except that calls from untimed
 422              * SynchronousQueue.{poll/offer} don't check interrupts
 423              * and don't wait at all, so are trapped in transfer
 424              * method rather than calling awaitFulfill.
 425              */
 426             long lastTime = (timed)? System.nanoTime() : 0;
 427             Thread w = Thread.currentThread();
 428             SNode h = head;
 429             int spins = (shouldSpin(s)?
 430                          (timed? maxTimedSpins : maxUntimedSpins) : 0);
 431             for (;;) {
 432                 if (w.isInterrupted())
 433                     s.tryCancel();
 434                 SNode m = s.match;
 435                 if (m != null)
 436                     return m;
 437                 if (timed) {
 438                     long now = System.nanoTime();
 439                     nanos -= now - lastTime;
 440                     lastTime = now;
 441                     if (nanos <= 0) {
 442                         s.tryCancel();
 443                         continue;
 444                     }
 445                 }
 446                 if (spins > 0)
 447                     spins = shouldSpin(s)? (spins-1) : 0;
 448                 else if (s.waiter == null)
 449                     s.waiter = w; // establish waiter so can park next iter
 450                 else if (!timed)
 451                     LockSupport.park(this);
 452                 else if (nanos > spinForTimeoutThreshold)
 453                     LockSupport.parkNanos(this, nanos);
 454             }
 455         }
 456 
 457         /**
 458          * Returns true if node s is at head or there is an active
 459          * fulfiller.
 460          */
 461         boolean shouldSpin(SNode s) {
 462             SNode h = head;
 463             return (h == s || h == null || isFulfilling(h.mode));
 464         }
 465 
 466         /**
 467          * Unlinks s from the stack.
 468          */
 469         void clean(SNode s) {
 470             s.item = null;   // forget item
 471             s.waiter = null; // forget thread
 472 
 473             /*
 474              * At worst we may need to traverse entire stack to unlink
 475              * s. If there are multiple concurrent calls to clean, we
 476              * might not see s if another thread has already removed
 477              * it. But we can stop when we see any node known to
 478              * follow s. We use s.next unless it too is cancelled, in
 479              * which case we try the node one past. We don't check any
 480              * further because we don't want to doubly traverse just to
 481              * find sentinel.
 482              */
 483 
 484             SNode past = s.next;
 485             if (past != null && past.isCancelled())
 486                 past = past.next;
 487 
 488             // Absorb cancelled nodes at head
 489             SNode p;
 490             while ((p = head) != null && p != past && p.isCancelled())
 491                 casHead(p, p.next);
 492 
 493             // Unsplice embedded nodes
 494             while (p != null && p != past) {
 495                 SNode n = p.next;
 496                 if (n != null && n.isCancelled())
 497                     p.casNext(n, n.next);
 498                 else
 499                     p = n;
 500             }
 501         }
 502     }
 503 
 504     /** Dual Queue */
 505     static final class TransferQueue extends Transferer {
 506         /*
 507          * This extends Scherer-Scott dual queue algorithm, differing,
 508          * among other ways, by using modes within nodes rather than
 509          * marked pointers. The algorithm is a little simpler than
 510          * that for stacks because fulfillers do not need explicit
 511          * nodes, and matching is done by CAS'ing QNode.item field
 512          * from non-null to null (for put) or vice versa (for take).
 513          */
 514 
 515         /** Node class for TransferQueue. */
 516         static final class QNode {
 517             volatile QNode next;          // next node in queue
 518             volatile Object item;         // CAS'ed to or from null
 519             volatile Thread waiter;       // to control park/unpark
 520             final boolean isData;
 521 
 522             QNode(Object item, boolean isData) {
 523                 this.item = item;
 524                 this.isData = isData;
 525             }
 526 
 527             static final AtomicReferenceFieldUpdater<QNode, QNode>
 528                 nextUpdater = AtomicReferenceFieldUpdater.newUpdater
 529                 (QNode.class, QNode.class, "next");
 530 
 531             boolean casNext(QNode cmp, QNode val) {
 532                 return (next == cmp &&
 533                         nextUpdater.compareAndSet(this, cmp, val));
 534             }
 535 
 536             static final AtomicReferenceFieldUpdater<QNode, Object>
 537                 itemUpdater = AtomicReferenceFieldUpdater.newUpdater
 538                 (QNode.class, Object.class, "item");
 539 
 540             boolean casItem(Object cmp, Object val) {
 541                 return (item == cmp &&
 542                         itemUpdater.compareAndSet(this, cmp, val));
 543             }
 544 
 545             /**
 546              * Tries to cancel by CAS'ing ref to this as item.
 547              */
 548             void tryCancel(Object cmp) {
 549                 itemUpdater.compareAndSet(this, cmp, this);
 550             }
 551 
 552             boolean isCancelled() {
 553                 return item == this;
 554             }
 555 
 556             /**
 557              * Returns true if this node is known to be off the queue
 558              * because its next pointer has been forgotten due to
 559              * an advanceHead operation.
 560              */
 561             boolean isOffList() {
 562                 return next == this;
 563             }
 564         }
 565 
 566         /** Head of queue */
 567         transient volatile QNode head;
 568         /** Tail of queue */
 569         transient volatile QNode tail;
 570         /**
 571          * Reference to a cancelled node that might not yet have been
 572          * unlinked from queue because it was the last inserted node
 573          * when it cancelled.
 574          */
 575         transient volatile QNode cleanMe;
 576 
 577         TransferQueue() {
 578             QNode h = new QNode(null, false); // initialize to dummy node.
 579             head = h;
 580             tail = h;
 581         }
 582 
 583         static final AtomicReferenceFieldUpdater<TransferQueue, QNode>
 584             headUpdater = AtomicReferenceFieldUpdater.newUpdater
 585             (TransferQueue.class,  QNode.class, "head");
 586 
 587         /**
 588          * Tries to cas nh as new head; if successful, unlink
 589          * old head's next node to avoid garbage retention.
 590          */
 591         void advanceHead(QNode h, QNode nh) {
 592             if (h == head && headUpdater.compareAndSet(this, h, nh))
 593                 h.next = h; // forget old next
 594         }
 595 
 596         static final AtomicReferenceFieldUpdater<TransferQueue, QNode>
 597             tailUpdater = AtomicReferenceFieldUpdater.newUpdater
 598             (TransferQueue.class, QNode.class, "tail");
 599 
 600         /**
 601          * Tries to cas nt as new tail.
 602          */
 603         void advanceTail(QNode t, QNode nt) {
 604             if (tail == t)
 605                 tailUpdater.compareAndSet(this, t, nt);
 606         }
 607 
 608         static final AtomicReferenceFieldUpdater<TransferQueue, QNode>
 609             cleanMeUpdater = AtomicReferenceFieldUpdater.newUpdater
 610             (TransferQueue.class, QNode.class, "cleanMe");
 611 
 612         /**
 613          * Tries to CAS cleanMe slot.
 614          */
 615         boolean casCleanMe(QNode cmp, QNode val) {
 616             return (cleanMe == cmp &&
 617                     cleanMeUpdater.compareAndSet(this, cmp, val));
 618         }
 619 
 620         /**
 621          * Puts or takes an item.
 622          */
 623         Object transfer(Object e, boolean timed, long nanos) {
 624             /* Basic algorithm is to loop trying to take either of
 625              * two actions:
 626              *
 627              * 1. If queue apparently empty or holding same-mode nodes,
 628              *    try to add node to queue of waiters, wait to be
 629              *    fulfilled (or cancelled) and return matching item.
 630              *
 631              * 2. If queue apparently contains waiting items, and this
 632              *    call is of complementary mode, try to fulfill by CAS'ing
 633              *    item field of waiting node and dequeuing it, and then
 634              *    returning matching item.
 635              *
 636              * In each case, along the way, check for and try to help
 637              * advance head and tail on behalf of other stalled/slow
 638              * threads.
 639              *
 640              * The loop starts off with a null check guarding against
 641              * seeing uninitialized head or tail values. This never
 642              * happens in current SynchronousQueue, but could if
 643              * callers held non-volatile/final ref to the
 644              * transferer. The check is here anyway because it places
 645              * null checks at top of loop, which is usually faster
 646              * than having them implicitly interspersed.
 647              */
 648 
 649             QNode s = null; // constructed/reused as needed
 650             boolean isData = (e != null);
 651 
 652             for (;;) {
 653                 QNode t = tail;
 654                 QNode h = head;
 655                 if (t == null || h == null)         // saw uninitialized value
 656                     continue;                       // spin
 657 
 658                 if (h == t || t.isData == isData) { // empty or same-mode
 659                     QNode tn = t.next;
 660                     if (t != tail)                  // inconsistent read
 661                         continue;
 662                     if (tn != null) {               // lagging tail
 663                         advanceTail(t, tn);
 664                         continue;
 665                     }
 666                     if (timed && nanos <= 0)        // can't wait
 667                         return null;
 668                     if (s == null)
 669                         s = new QNode(e, isData);
 670                     if (!t.casNext(null, s))        // failed to link in
 671                         continue;
 672 
 673                     advanceTail(t, s);              // swing tail and wait
 674                     Object x = awaitFulfill(s, e, timed, nanos);
 675                     if (x == s) {                   // wait was cancelled
 676                         clean(t, s);
 677                         return null;
 678                     }
 679 
 680                     if (!s.isOffList()) {           // not already unlinked
 681                         advanceHead(t, s);          // unlink if head
 682                         if (x != null)              // and forget fields
 683                             s.item = s;
 684                         s.waiter = null;
 685                     }
 686                     return (x != null)? x : e;
 687 
 688                 } else {                            // complementary-mode
 689                     QNode m = h.next;               // node to fulfill
 690                     if (t != tail || m == null || h != head)
 691                         continue;                   // inconsistent read
 692 
 693                     Object x = m.item;
 694                     if (isData == (x != null) ||    // m already fulfilled
 695                         x == m ||                   // m cancelled
 696                         !m.casItem(x, e)) {         // lost CAS
 697                         advanceHead(h, m);          // dequeue and retry
 698                         continue;
 699                     }
 700 
 701                     advanceHead(h, m);              // successfully fulfilled
 702                     LockSupport.unpark(m.waiter);
 703                     return (x != null)? x : e;
 704                 }
 705             }
 706         }
 707 
 708         /**
 709          * Spins/blocks until node s is fulfilled.
 710          *
 711          * @param s the waiting node
 712          * @param e the comparison value for checking match
 713          * @param timed true if timed wait
 714          * @param nanos timeout value
 715          * @return matched item, or s if cancelled
 716          */
 717         Object awaitFulfill(QNode s, Object e, boolean timed, long nanos) {
 718             /* Same idea as TransferStack.awaitFulfill */
 719             long lastTime = (timed)? System.nanoTime() : 0;
 720             Thread w = Thread.currentThread();
 721             int spins = ((head.next == s) ?
 722                          (timed? maxTimedSpins : maxUntimedSpins) : 0);
 723             for (;;) {
 724                 if (w.isInterrupted())
 725                     s.tryCancel(e);
 726                 Object x = s.item;
 727                 if (x != e)
 728                     return x;
 729                 if (timed) {
 730                     long now = System.nanoTime();
 731                     nanos -= now - lastTime;
 732                     lastTime = now;
 733                     if (nanos <= 0) {
 734                         s.tryCancel(e);
 735                         continue;
 736                     }
 737                 }
 738                 if (spins > 0)
 739                     --spins;
 740                 else if (s.waiter == null)
 741                     s.waiter = w;
 742                 else if (!timed)
 743                     LockSupport.park(this);
 744                 else if (nanos > spinForTimeoutThreshold)
 745                     LockSupport.parkNanos(this, nanos);
 746             }
 747         }
 748 
 749         /**
 750          * Gets rid of cancelled node s with original predecessor pred.
 751          */
 752         void clean(QNode pred, QNode s) {
 753             s.waiter = null; // forget thread
 754             /*
 755              * At any given time, exactly one node on list cannot be
 756              * deleted -- the last inserted node. To accommodate this,
 757              * if we cannot delete s, we save its predecessor as
 758              * "cleanMe", deleting the previously saved version
 759              * first. At least one of node s or the node previously
 760              * saved can always be deleted, so this always terminates.
 761              */
 762             while (pred.next == s) { // Return early if already unlinked
 763                 QNode h = head;
 764                 QNode hn = h.next;   // Absorb cancelled first node as head
 765                 if (hn != null && hn.isCancelled()) {
 766                     advanceHead(h, hn);
 767                     continue;
 768                 }
 769                 QNode t = tail;      // Ensure consistent read for tail
 770                 if (t == h)
 771                     return;
 772                 QNode tn = t.next;
 773                 if (t != tail)
 774                     continue;
 775                 if (tn != null) {
 776                     advanceTail(t, tn);
 777                     continue;
 778                 }
 779                 if (s != t) {        // If not tail, try to unsplice
 780                     QNode sn = s.next;
 781                     if (sn == s || pred.casNext(s, sn))
 782                         return;
 783                 }
 784                 QNode dp = cleanMe;
 785                 if (dp != null) {    // Try unlinking previous cancelled node
 786                     QNode d = dp.next;
 787                     QNode dn;
 788                     if (d == null ||               // d is gone or
 789                         d == dp ||                 // d is off list or
 790                         !d.isCancelled() ||        // d not cancelled or
 791                         (d != t &&                 // d not tail and
 792                          (dn = d.next) != null &&  //   has successor
 793                          dn != d &&                //   that is on list
 794                          dp.casNext(d, dn)))       // d unspliced
 795                         casCleanMe(dp, null);
 796                     if (dp == pred)
 797                         return;      // s is already saved node
 798                 } else if (casCleanMe(null, pred))
 799                     return;          // Postpone cleaning s
 800             }
 801         }
 802     }
 803 
 804     /**
 805      * The transferer. Set only in constructor, but cannot be declared
 806      * as final without further complicating serialization.  Since
 807      * this is accessed only at most once per public method, there
 808      * isn't a noticeable performance penalty for using volatile
 809      * instead of final here.
 810      */
 811     private transient volatile Transferer transferer;
 812 
 813     /**
 814      * Creates a <tt>SynchronousQueue</tt> with nonfair access policy.
 815      */
 816     public SynchronousQueue() {
 817         this(false);
 818     }
 819 
 820     /**
 821      * Creates a <tt>SynchronousQueue</tt> with the specified fairness policy.
 822      *
 823      * @param fair if true, waiting threads contend in FIFO order for
 824      *        access; otherwise the order is unspecified.
 825      */
 826     public SynchronousQueue(boolean fair) {
 827         transferer = (fair)? new TransferQueue() : new TransferStack();
 828     }
 829 
 830     /**
 831      * Adds the specified element to this queue, waiting if necessary for
 832      * another thread to receive it.
 833      *
 834      * @throws InterruptedException {@inheritDoc}
 835      * @throws NullPointerException {@inheritDoc}
 836      */
 837     public void put(E o) throws InterruptedException {
 838         if (o == null) throw new NullPointerException();
 839         if (transferer.transfer(o, false, 0) == null) {
 840             Thread.interrupted();
 841             throw new InterruptedException();
 842         }
 843     }
 844 
 845     /**
 846      * Inserts the specified element into this queue, waiting if necessary
 847      * up to the specified wait time for another thread to receive it.
 848      *
 849      * @return <tt>true</tt> if successful, or <tt>false</tt> if the
 850      *         specified waiting time elapses before a consumer appears.
 851      * @throws InterruptedException {@inheritDoc}
 852      * @throws NullPointerException {@inheritDoc}
 853      */
 854     public boolean offer(E o, long timeout, TimeUnit unit)
 855         throws InterruptedException {
 856         if (o == null) throw new NullPointerException();
 857         if (transferer.transfer(o, true, unit.toNanos(timeout)) != null)
 858             return true;
 859         if (!Thread.interrupted())
 860             return false;
 861         throw new InterruptedException();
 862     }
 863 
 864     /**
 865      * Inserts the specified element into this queue, if another thread is
 866      * waiting to receive it.
 867      *
 868      * @param e the element to add
 869      * @return <tt>true</tt> if the element was added to this queue, else
 870      *         <tt>false</tt>
 871      * @throws NullPointerException if the specified element is null
 872      */
 873     public boolean offer(E e) {
 874         if (e == null) throw new NullPointerException();
 875         return transferer.transfer(e, true, 0) != null;
 876     }
 877 
 878     /**
 879      * Retrieves and removes the head of this queue, waiting if necessary
 880      * for another thread to insert it.
 881      *
 882      * @return the head of this queue
 883      * @throws InterruptedException {@inheritDoc}
 884      */
 885     public E take() throws InterruptedException {
 886         Object e = transferer.transfer(null, false, 0);
 887         if (e != null)
 888             return (E)e;
 889         Thread.interrupted();
 890         throw new InterruptedException();
 891     }
 892 
 893     /**
 894      * Retrieves and removes the head of this queue, waiting
 895      * if necessary up to the specified wait time, for another thread
 896      * to insert it.
 897      *
 898      * @return the head of this queue, or <tt>null</tt> if the
 899      *         specified waiting time elapses before an element is present.
 900      * @throws InterruptedException {@inheritDoc}
 901      */
 902     public E poll(long timeout, TimeUnit unit) throws InterruptedException {
 903         Object e = transferer.transfer(null, true, unit.toNanos(timeout));
 904         if (e != null || !Thread.interrupted())
 905             return (E)e;
 906         throw new InterruptedException();
 907     }
 908 
 909     /**
 910      * Retrieves and removes the head of this queue, if another thread
 911      * is currently making an element available.
 912      *
 913      * @return the head of this queue, or <tt>null</tt> if no
 914      *         element is available.
 915      */
 916     public E poll() {
 917         return (E)transferer.transfer(null, true, 0);
 918     }
 919 
 920     /**
 921      * Always returns <tt>true</tt>.
 922      * A <tt>SynchronousQueue</tt> has no internal capacity.
 923      *
 924      * @return <tt>true</tt>
 925      */
 926     public boolean isEmpty() {
 927         return true;
 928     }
 929 
 930     /**
 931      * Always returns zero.
 932      * A <tt>SynchronousQueue</tt> has no internal capacity.
 933      *
 934      * @return zero.
 935      */
 936     public int size() {
 937         return 0;
 938     }
 939 
 940     /**
 941      * Always returns zero.
 942      * A <tt>SynchronousQueue</tt> has no internal capacity.
 943      *
 944      * @return zero.
 945      */
 946     public int remainingCapacity() {
 947         return 0;
 948     }
 949 
 950     /**
 951      * Does nothing.
 952      * A <tt>SynchronousQueue</tt> has no internal capacity.
 953      */
 954     public void clear() {
 955     }
 956 
 957     /**
 958      * Always returns <tt>false</tt>.
 959      * A <tt>SynchronousQueue</tt> has no internal capacity.
 960      *
 961      * @param o the element
 962      * @return <tt>false</tt>
 963      */
 964     public boolean contains(Object o) {
 965         return false;
 966     }
 967 
 968     /**
 969      * Always returns <tt>false</tt>.
 970      * A <tt>SynchronousQueue</tt> has no internal capacity.
 971      *
 972      * @param o the element to remove
 973      * @return <tt>false</tt>
 974      */
 975     public boolean remove(Object o) {
 976         return false;
 977     }
 978 
 979     /**
 980      * Returns <tt>false</tt> unless the given collection is empty.
 981      * A <tt>SynchronousQueue</tt> has no internal capacity.
 982      *
 983      * @param c the collection
 984      * @return <tt>false</tt> unless given collection is empty
 985      */
 986     public boolean containsAll(Collection<?> c) {
 987         return c.isEmpty();
 988     }
 989 
 990     /**
 991      * Always returns <tt>false</tt>.
 992      * A <tt>SynchronousQueue</tt> has no internal capacity.
 993      *
 994      * @param c the collection
 995      * @return <tt>false</tt>
 996      */
 997     public boolean removeAll(Collection<?> c) {
 998         return false;
 999     }
1000 
1001     /**
1002      * Always returns <tt>false</tt>.
1003      * A <tt>SynchronousQueue</tt> has no internal capacity.
1004      *
1005      * @param c the collection
1006      * @return <tt>false</tt>
1007      */
1008     public boolean retainAll(Collection<?> c) {
1009         return false;
1010     }
1011 
1012     /**
1013      * Always returns <tt>null</tt>.
1014      * A <tt>SynchronousQueue</tt> does not return elements
1015      * unless actively waited on.
1016      *
1017      * @return <tt>null</tt>
1018      */
1019     public E peek() {
1020         return null;
1021     }
1022 
1023     /**
1024      * Returns an empty iterator in which <tt>hasNext</tt> always returns
1025      * <tt>false</tt>.
1026      *
1027      * @return an empty iterator
1028      */
1029     public Iterator<E> iterator() {
1030         return Collections.emptyIterator();
1031     }
1032 
1033     /**
1034      * Returns a zero-length array.
1035      * @return a zero-length array
1036      */
1037     public Object[] toArray() {
1038         return new Object[0];
1039     }
1040 
1041     /**
1042      * Sets the zeroeth element of the specified array to <tt>null</tt>
1043      * (if the array has non-zero length) and returns it.
1044      *
1045      * @param a the array
1046      * @return the specified array
1047      * @throws NullPointerException if the specified array is null
1048      */
1049     public <T> T[] toArray(T[] a) {
1050         if (a.length > 0)
1051             a[0] = null;
1052         return a;
1053     }
1054 
1055     /**
1056      * @throws UnsupportedOperationException {@inheritDoc}
1057      * @throws ClassCastException            {@inheritDoc}
1058      * @throws NullPointerException          {@inheritDoc}
1059      * @throws IllegalArgumentException      {@inheritDoc}
1060      */
1061     public int drainTo(Collection<? super E> c) {
1062         if (c == null)
1063             throw new NullPointerException();
1064         if (c == this)
1065             throw new IllegalArgumentException();
1066         int n = 0;
1067         E e;
1068         while ( (e = poll()) != null) {
1069             c.add(e);
1070             ++n;
1071         }
1072         return n;
1073     }
1074 
1075     /**
1076      * @throws UnsupportedOperationException {@inheritDoc}
1077      * @throws ClassCastException            {@inheritDoc}
1078      * @throws NullPointerException          {@inheritDoc}
1079      * @throws IllegalArgumentException      {@inheritDoc}
1080      */
1081     public int drainTo(Collection<? super E> c, int maxElements) {
1082         if (c == null)
1083             throw new NullPointerException();
1084         if (c == this)
1085             throw new IllegalArgumentException();
1086         int n = 0;
1087         E e;
1088         while (n < maxElements && (e = poll()) != null) {
1089             c.add(e);
1090             ++n;
1091         }
1092         return n;
1093     }
1094 
1095     /*
1096      * To cope with serialization strategy in the 1.5 version of
1097      * SynchronousQueue, we declare some unused classes and fields
1098      * that exist solely to enable serializability across versions.
1099      * These fields are never used, so are initialized only if this
1100      * object is ever serialized or deserialized.
1101      */
1102 
1103     static class WaitQueue implements java.io.Serializable { }
1104     static class LifoWaitQueue extends WaitQueue {
1105         private static final long serialVersionUID = -3633113410248163686L;
1106     }
1107     static class FifoWaitQueue extends WaitQueue {
1108         private static final long serialVersionUID = -3623113410248163686L;
1109     }
1110     private ReentrantLock qlock;
1111     private WaitQueue waitingProducers;
1112     private WaitQueue waitingConsumers;
1113 
1114     /**
1115      * Save the state to a stream (that is, serialize it).
1116      *
1117      * @param s the stream
1118      */
1119     private void writeObject(java.io.ObjectOutputStream s)
1120         throws java.io.IOException {
1121         boolean fair = transferer instanceof TransferQueue;
1122         if (fair) {
1123             qlock = new ReentrantLock(true);
1124             waitingProducers = new FifoWaitQueue();
1125             waitingConsumers = new FifoWaitQueue();
1126         }
1127         else {
1128             qlock = new ReentrantLock();
1129             waitingProducers = new LifoWaitQueue();
1130             waitingConsumers = new LifoWaitQueue();
1131         }
1132         s.defaultWriteObject();
1133     }
1134 
1135     private void readObject(final java.io.ObjectInputStream s)
1136         throws java.io.IOException, ClassNotFoundException {
1137         s.defaultReadObject();
1138         if (waitingProducers instanceof FifoWaitQueue)
1139             transferer = new TransferQueue();
1140         else
1141             transferer = new TransferStack();
1142     }
1143 
1144 }