Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/classes/java/util/concurrent/SynchronousQueue.java
+++ new/src/share/classes/java/util/concurrent/SynchronousQueue.java
1 1 /*
2 2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 3 *
4 4 * This code is free software; you can redistribute it and/or modify it
5 5 * under the terms of the GNU General Public License version 2 only, as
6 6 * published by the Free Software Foundation. Oracle designates this
7 7 * particular file as subject to the "Classpath" exception as provided
8 8 * by Oracle in the LICENSE file that accompanied this code.
9 9 *
10 10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 13 * version 2 for more details (a copy is included in the LICENSE file that
14 14 * accompanied this code).
15 15 *
16 16 * You should have received a copy of the GNU General Public License version
17 17 * 2 along with this work; if not, write to the Free Software Foundation,
18 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 19 *
20 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 21 * or visit www.oracle.com if you need additional information or have any
22 22 * questions.
23 23 */
24 24
25 25 /*
26 26 * This file is available under and governed by the GNU General Public
27 27 * License version 2 only, as published by the Free Software Foundation.
28 28 * However, the following notice accompanied the original version of this
29 29 * file:
30 30 *
31 31 * Written by Doug Lea, Bill Scherer, and Michael Scott with
32 32 * assistance from members of JCP JSR-166 Expert Group and released to
33 33 * the public domain, as explained at
34 34 * http://creativecommons.org/licenses/publicdomain
35 35 */
36 36
37 37 package java.util.concurrent;
38 38 import java.util.concurrent.locks.*;
39 39 import java.util.concurrent.atomic.*;
40 40 import java.util.*;
41 41
42 42 /**
43 43 * A {@linkplain BlockingQueue blocking queue} in which each insert
44 44 * operation must wait for a corresponding remove operation by another
45 45 * thread, and vice versa. A synchronous queue does not have any
46 46 * internal capacity, not even a capacity of one. You cannot
47 47 * <tt>peek</tt> at a synchronous queue because an element is only
48 48 * present when you try to remove it; you cannot insert an element
49 49 * (using any method) unless another thread is trying to remove it;
50 50 * you cannot iterate as there is nothing to iterate. The
51 51 * <em>head</em> of the queue is the element that the first queued
52 52 * inserting thread is trying to add to the queue; if there is no such
53 53 * queued thread then no element is available for removal and
54 54 * <tt>poll()</tt> will return <tt>null</tt>. For purposes of other
55 55 * <tt>Collection</tt> methods (for example <tt>contains</tt>), a
56 56 * <tt>SynchronousQueue</tt> acts as an empty collection. This queue
57 57 * does not permit <tt>null</tt> elements.
58 58 *
59 59 * <p>Synchronous queues are similar to rendezvous channels used in
60 60 * CSP and Ada. They are well suited for handoff designs, in which an
61 61 * object running in one thread must sync up with an object running
62 62 * in another thread in order to hand it some information, event, or
63 63 * task.
64 64 *
65 65 * <p> This class supports an optional fairness policy for ordering
66 66 * waiting producer and consumer threads. By default, this ordering
67 67 * is not guaranteed. However, a queue constructed with fairness set
68 68 * to <tt>true</tt> grants threads access in FIFO order.
69 69 *
70 70 * <p>This class and its iterator implement all of the
71 71 * <em>optional</em> methods of the {@link Collection} and {@link
72 72 * Iterator} interfaces.
73 73 *
74 74 * <p>This class is a member of the
75 75 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
76 76 * Java Collections Framework</a>.
77 77 *
78 78 * @since 1.5
79 79 * @author Doug Lea and Bill Scherer and Michael Scott
80 80 * @param <E> the type of elements held in this collection
81 81 */
82 82 public class SynchronousQueue<E> extends AbstractQueue<E>
83 83 implements BlockingQueue<E>, java.io.Serializable {
84 84 private static final long serialVersionUID = -3223113410248163686L;
85 85
86 86 /*
87 87 * This class implements extensions of the dual stack and dual
88 88 * queue algorithms described in "Nonblocking Concurrent Objects
89 89 * with Condition Synchronization", by W. N. Scherer III and
90 90 * M. L. Scott. 18th Annual Conf. on Distributed Computing,
91 91 * Oct. 2004 (see also
92 92 * http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/duals.html).
93 93 * The (Lifo) stack is used for non-fair mode, and the (Fifo)
94 94 * queue for fair mode. The performance of the two is generally
95 95 * similar. Fifo usually supports higher throughput under
96 96 * contention but Lifo maintains higher thread locality in common
97 97 * applications.
98 98 *
99 99 * A dual queue (and similarly stack) is one that at any given
100 100 * time either holds "data" -- items provided by put operations,
101 101 * or "requests" -- slots representing take operations, or is
102 102 * empty. A call to "fulfill" (i.e., a call requesting an item
103 103 * from a queue holding data or vice versa) dequeues a
104 104 * complementary node. The most interesting feature of these
105 105 * queues is that any operation can figure out which mode the
106 106 * queue is in, and act accordingly without needing locks.
107 107 *
108 108 * Both the queue and stack extend abstract class Transferer
109 109 * defining the single method transfer that does a put or a
110 110 * take. These are unified into a single method because in dual
111 111 * data structures, the put and take operations are symmetrical,
112 112 * so nearly all code can be combined. The resulting transfer
113 113 * methods are on the long side, but are easier to follow than
114 114 * they would be if broken up into nearly-duplicated parts.
115 115 *
116 116 * The queue and stack data structures share many conceptual
117 117 * similarities but very few concrete details. For simplicity,
118 118 * they are kept distinct so that they can later evolve
119 119 * separately.
120 120 *
121 121 * The algorithms here differ from the versions in the above paper
122 122 * in extending them for use in synchronous queues, as well as
123 123 * dealing with cancellation. The main differences include:
124 124 *
125 125 * 1. The original algorithms used bit-marked pointers, but
126 126 * the ones here use mode bits in nodes, leading to a number
127 127 * of further adaptations.
128 128 * 2. SynchronousQueues must block threads waiting to become
129 129 * fulfilled.
130 130 * 3. Support for cancellation via timeout and interrupts,
131 131 * including cleaning out cancelled nodes/threads
132 132 * from lists to avoid garbage retention and memory depletion.
133 133 *
134 134 * Blocking is mainly accomplished using LockSupport park/unpark,
135 135 * except that nodes that appear to be the next ones to become
136 136 * fulfilled first spin a bit (on multiprocessors only). On very
137 137 * busy synchronous queues, spinning can dramatically improve
138 138 * throughput. And on less busy ones, the amount of spinning is
139 139 * small enough not to be noticeable.
140 140 *
141 141 * Cleaning is done in different ways in queues vs stacks. For
142 142 * queues, we can almost always remove a node immediately in O(1)
143 143 * time (modulo retries for consistency checks) when it is
144 144 * cancelled. But if it may be pinned as the current tail, it must
145 145 * wait until some subsequent cancellation. For stacks, we need a
146 146 * potentially O(n) traversal to be sure that we can remove the
147 147 * node, but this can run concurrently with other threads
148 148 * accessing the stack.
149 149 *
150 150 * While garbage collection takes care of most node reclamation
151 151 * issues that otherwise complicate nonblocking algorithms, care
152 152 * is taken to "forget" references to data, other nodes, and
153 153 * threads that might be held on to long-term by blocked
154 154 * threads. In cases where setting to null would otherwise
155 155 * conflict with main algorithms, this is done by changing a
↓ open down ↓ |
155 lines elided |
↑ open up ↑ |
156 156 * node's link to now point to the node itself. This doesn't arise
157 157 * much for Stack nodes (because blocked threads do not hang on to
158 158 * old head pointers), but references in Queue nodes must be
159 159 * aggressively forgotten to avoid reachability of everything any
160 160 * node has ever referred to since arrival.
161 161 */
162 162
163 163 /**
164 164 * Shared internal API for dual stacks and queues.
165 165 */
166 - static abstract class Transferer {
166 + abstract static class Transferer {
167 167 /**
168 168 * Performs a put or take.
169 169 *
170 170 * @param e if non-null, the item to be handed to a consumer;
171 171 * if null, requests that transfer return an item
172 172 * offered by producer.
173 173 * @param timed if this operation should timeout
174 174 * @param nanos the timeout, in nanoseconds
175 175 * @return if non-null, the item provided or received; if null,
176 176 * the operation failed due to timeout or interrupt --
177 177 * the caller can distinguish which of these occurred
178 178 * by checking Thread.interrupted.
179 179 */
180 180 abstract Object transfer(Object e, boolean timed, long nanos);
181 181 }
182 182
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
183 183 /** The number of CPUs, for spin control */
184 184 static final int NCPUS = Runtime.getRuntime().availableProcessors();
185 185
186 186 /**
187 187 * The number of times to spin before blocking in timed waits.
188 188 * The value is empirically derived -- it works well across a
189 189 * variety of processors and OSes. Empirically, the best value
190 190 * seems not to vary with number of CPUs (beyond 2) so is just
191 191 * a constant.
192 192 */
193 - static final int maxTimedSpins = (NCPUS < 2)? 0 : 32;
193 + static final int maxTimedSpins = (NCPUS < 2) ? 0 : 32;
194 194
195 195 /**
196 196 * The number of times to spin before blocking in untimed waits.
197 197 * This is greater than timed value because untimed waits spin
198 198 * faster since they don't need to check times on each spin.
199 199 */
200 200 static final int maxUntimedSpins = maxTimedSpins * 16;
201 201
202 202 /**
203 203 * The number of nanoseconds for which it is faster to spin
204 204 * rather than to use timed park. A rough estimate suffices.
205 205 */
206 206 static final long spinForTimeoutThreshold = 1000L;
207 207
208 208 /** Dual stack */
209 209 static final class TransferStack extends Transferer {
210 210 /*
211 211 * This extends Scherer-Scott dual stack algorithm, differing,
212 212 * among other ways, by using "covering" nodes rather than
213 213 * bit-marked pointers: Fulfilling operations push on marker
214 214 * nodes (with FULFILLING bit set in mode) to reserve a spot
215 215 * to match a waiting node.
216 216 */
217 217
218 218 /* Modes for SNodes, ORed together in node fields */
219 219 /** Node represents an unfulfilled consumer */
220 220 static final int REQUEST = 0;
221 221 /** Node represents an unfulfilled producer */
222 222 static final int DATA = 1;
223 223 /** Node is fulfilling another unfulfilled DATA or REQUEST */
224 224 static final int FULFILLING = 2;
225 225
226 226 /** Return true if m has fulfilling bit set */
227 227 static boolean isFulfilling(int m) { return (m & FULFILLING) != 0; }
228 228
229 229 /** Node class for TransferStacks. */
230 230 static final class SNode {
231 231 volatile SNode next; // next node in stack
232 232 volatile SNode match; // the node matched to this
233 233 volatile Thread waiter; // to control park/unpark
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
234 234 Object item; // data; or null for REQUESTs
235 235 int mode;
236 236 // Note: item and mode fields don't need to be volatile
237 237 // since they are always written before, and read after,
238 238 // other volatile/atomic operations.
239 239
240 240 SNode(Object item) {
241 241 this.item = item;
242 242 }
243 243
244 - static final AtomicReferenceFieldUpdater<SNode, SNode>
245 - nextUpdater = AtomicReferenceFieldUpdater.newUpdater
246 - (SNode.class, SNode.class, "next");
247 -
248 244 boolean casNext(SNode cmp, SNode val) {
249 - return (cmp == next &&
250 - nextUpdater.compareAndSet(this, cmp, val));
245 + return cmp == next &&
246 + UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
251 247 }
252 248
253 - static final AtomicReferenceFieldUpdater<SNode, SNode>
254 - matchUpdater = AtomicReferenceFieldUpdater.newUpdater
255 - (SNode.class, SNode.class, "match");
256 -
257 249 /**
258 250 * Tries to match node s to this node, if so, waking up thread.
259 251 * Fulfillers call tryMatch to identify their waiters.
260 252 * Waiters block until they have been matched.
261 253 *
262 254 * @param s the node to match
263 255 * @return true if successfully matched to s
264 256 */
265 257 boolean tryMatch(SNode s) {
266 258 if (match == null &&
267 - matchUpdater.compareAndSet(this, null, s)) {
259 + UNSAFE.compareAndSwapObject(this, matchOffset, null, s)) {
268 260 Thread w = waiter;
269 261 if (w != null) { // waiters need at most one unpark
270 262 waiter = null;
271 263 LockSupport.unpark(w);
272 264 }
273 265 return true;
274 266 }
275 267 return match == s;
276 268 }
277 269
278 270 /**
279 271 * Tries to cancel a wait by matching node to itself.
280 272 */
281 273 void tryCancel() {
282 - matchUpdater.compareAndSet(this, null, this);
274 + UNSAFE.compareAndSwapObject(this, matchOffset, null, this);
283 275 }
284 276
285 277 boolean isCancelled() {
286 278 return match == this;
287 279 }
280 +
281 + // Unsafe mechanics
282 + private static final sun.misc.Unsafe UNSAFE = sun.misc.Unsafe.getUnsafe();
283 + private static final long nextOffset =
284 + objectFieldOffset(UNSAFE, "next", SNode.class);
285 + private static final long matchOffset =
286 + objectFieldOffset(UNSAFE, "match", SNode.class);
287 +
288 288 }
289 289
290 290 /** The head (top) of the stack */
291 291 volatile SNode head;
292 292
293 - static final AtomicReferenceFieldUpdater<TransferStack, SNode>
294 - headUpdater = AtomicReferenceFieldUpdater.newUpdater
295 - (TransferStack.class, SNode.class, "head");
296 -
297 293 boolean casHead(SNode h, SNode nh) {
298 - return h == head && headUpdater.compareAndSet(this, h, nh);
294 + return h == head &&
295 + UNSAFE.compareAndSwapObject(this, headOffset, h, nh);
299 296 }
300 297
301 298 /**
302 299 * Creates or resets fields of a node. Called only from transfer
303 300 * where the node to push on stack is lazily created and
304 301 * reused when possible to help reduce intervals between reads
305 302 * and CASes of head and to avoid surges of garbage when CASes
306 303 * to push nodes fail due to contention.
307 304 */
308 305 static SNode snode(SNode s, Object e, SNode next, int mode) {
309 306 if (s == null) s = new SNode(e);
310 307 s.mode = mode;
311 308 s.next = next;
312 309 return s;
313 310 }
314 311
315 312 /**
316 313 * Puts or takes an item.
317 314 */
318 315 Object transfer(Object e, boolean timed, long nanos) {
319 316 /*
320 317 * Basic algorithm is to loop trying one of three actions:
321 318 *
322 319 * 1. If apparently empty or already containing nodes of same
323 320 * mode, try to push node on stack and wait for a match,
324 321 * returning it, or null if cancelled.
325 322 *
326 323 * 2. If apparently containing node of complementary mode,
327 324 * try to push a fulfilling node on to stack, match
328 325 * with corresponding waiting node, pop both from
329 326 * stack, and return matched item. The matching or
330 327 * unlinking might not actually be necessary because of
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
331 328 * other threads performing action 3:
332 329 *
333 330 * 3. If top of stack already holds another fulfilling node,
334 331 * help it out by doing its match and/or pop
335 332 * operations, and then continue. The code for helping
336 333 * is essentially the same as for fulfilling, except
337 334 * that it doesn't return the item.
338 335 */
339 336
340 337 SNode s = null; // constructed/reused as needed
341 - int mode = (e == null)? REQUEST : DATA;
338 + int mode = (e == null) ? REQUEST : DATA;
342 339
343 340 for (;;) {
344 341 SNode h = head;
345 342 if (h == null || h.mode == mode) { // empty or same-mode
346 343 if (timed && nanos <= 0) { // can't wait
347 344 if (h != null && h.isCancelled())
348 345 casHead(h, h.next); // pop cancelled node
349 346 else
350 347 return null;
351 348 } else if (casHead(h, s = snode(s, e, h, mode))) {
352 349 SNode m = awaitFulfill(s, timed, nanos);
353 350 if (m == s) { // wait was cancelled
354 351 clean(s);
355 352 return null;
356 353 }
357 354 if ((h = head) != null && h.next == s)
358 355 casHead(h, s.next); // help s's fulfiller
359 - return mode == REQUEST? m.item : s.item;
356 + return (mode == REQUEST) ? m.item : s.item;
360 357 }
361 358 } else if (!isFulfilling(h.mode)) { // try to fulfill
362 359 if (h.isCancelled()) // already cancelled
363 360 casHead(h, h.next); // pop and retry
364 361 else if (casHead(h, s=snode(s, e, h, FULFILLING|mode))) {
365 362 for (;;) { // loop until matched or waiters disappear
366 363 SNode m = s.next; // m is s's match
367 364 if (m == null) { // all waiters are gone
368 365 casHead(s, null); // pop fulfill node
369 366 s = null; // use new node next time
370 367 break; // restart main loop
371 368 }
372 369 SNode mn = m.next;
373 370 if (m.tryMatch(s)) {
374 371 casHead(s, mn); // pop both s and m
375 - return (mode == REQUEST)? m.item : s.item;
372 + return (mode == REQUEST) ? m.item : s.item;
376 373 } else // lost match
377 374 s.casNext(m, mn); // help unlink
378 375 }
379 376 }
380 377 } else { // help a fulfiller
381 378 SNode m = h.next; // m is h's match
382 379 if (m == null) // waiter is gone
383 380 casHead(h, null); // pop fulfilling node
384 381 else {
385 382 SNode mn = m.next;
386 383 if (m.tryMatch(h)) // help match
387 384 casHead(h, mn); // pop both h and m
388 385 else // lost match
389 386 h.casNext(m, mn); // help unlink
390 387 }
391 388 }
392 389 }
393 390 }
394 391
395 392 /**
396 393 * Spins/blocks until node s is matched by a fulfill operation.
397 394 *
398 395 * @param s the waiting node
399 396 * @param timed true if timed wait
400 397 * @param nanos timeout value
401 398 * @return matched node, or s if cancelled
402 399 */
403 400 SNode awaitFulfill(SNode s, boolean timed, long nanos) {
404 401 /*
405 402 * When a node/thread is about to block, it sets its waiter
406 403 * field and then rechecks state at least one more time
407 404 * before actually parking, thus covering race vs
408 405 * fulfiller noticing that waiter is non-null so should be
409 406 * woken.
410 407 *
411 408 * When invoked by nodes that appear at the point of call
412 409 * to be at the head of the stack, calls to park are
413 410 * preceded by spins to avoid blocking when producers and
414 411 * consumers are arriving very close in time. This can
415 412 * happen enough to bother only on multiprocessors.
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
416 413 *
417 414 * The order of checks for returning out of main loop
418 415 * reflects fact that interrupts have precedence over
419 416 * normal returns, which have precedence over
420 417 * timeouts. (So, on timeout, one last check for match is
421 418 * done before giving up.) Except that calls from untimed
422 419 * SynchronousQueue.{poll/offer} don't check interrupts
423 420 * and don't wait at all, so are trapped in transfer
424 421 * method rather than calling awaitFulfill.
425 422 */
426 - long lastTime = (timed)? System.nanoTime() : 0;
423 + long lastTime = timed ? System.nanoTime() : 0;
427 424 Thread w = Thread.currentThread();
428 425 SNode h = head;
429 - int spins = (shouldSpin(s)?
430 - (timed? maxTimedSpins : maxUntimedSpins) : 0);
426 + int spins = (shouldSpin(s) ?
427 + (timed ? maxTimedSpins : maxUntimedSpins) : 0);
431 428 for (;;) {
432 429 if (w.isInterrupted())
433 430 s.tryCancel();
434 431 SNode m = s.match;
435 432 if (m != null)
436 433 return m;
437 434 if (timed) {
438 435 long now = System.nanoTime();
439 436 nanos -= now - lastTime;
440 437 lastTime = now;
441 438 if (nanos <= 0) {
442 439 s.tryCancel();
443 440 continue;
444 441 }
445 442 }
446 443 if (spins > 0)
447 - spins = shouldSpin(s)? (spins-1) : 0;
444 + spins = shouldSpin(s) ? (spins-1) : 0;
448 445 else if (s.waiter == null)
449 446 s.waiter = w; // establish waiter so can park next iter
450 447 else if (!timed)
451 448 LockSupport.park(this);
452 449 else if (nanos > spinForTimeoutThreshold)
453 450 LockSupport.parkNanos(this, nanos);
454 451 }
455 452 }
456 453
457 454 /**
458 455 * Returns true if node s is at head or there is an active
459 456 * fulfiller.
460 457 */
461 458 boolean shouldSpin(SNode s) {
462 459 SNode h = head;
463 460 return (h == s || h == null || isFulfilling(h.mode));
464 461 }
465 462
466 463 /**
467 464 * Unlinks s from the stack.
468 465 */
469 466 void clean(SNode s) {
470 467 s.item = null; // forget item
471 468 s.waiter = null; // forget thread
472 469
473 470 /*
474 471 * At worst we may need to traverse entire stack to unlink
475 472 * s. If there are multiple concurrent calls to clean, we
476 473 * might not see s if another thread has already removed
477 474 * it. But we can stop when we see any node known to
478 475 * follow s. We use s.next unless it too is cancelled, in
479 476 * which case we try the node one past. We don't check any
480 477 * further because we don't want to doubly traverse just to
481 478 * find sentinel.
482 479 */
483 480
484 481 SNode past = s.next;
485 482 if (past != null && past.isCancelled())
486 483 past = past.next;
487 484
488 485 // Absorb cancelled nodes at head
489 486 SNode p;
490 487 while ((p = head) != null && p != past && p.isCancelled())
491 488 casHead(p, p.next);
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
492 489
493 490 // Unsplice embedded nodes
494 491 while (p != null && p != past) {
495 492 SNode n = p.next;
496 493 if (n != null && n.isCancelled())
497 494 p.casNext(n, n.next);
498 495 else
499 496 p = n;
500 497 }
501 498 }
499 +
500 + // Unsafe mechanics
501 + private static final sun.misc.Unsafe UNSAFE = sun.misc.Unsafe.getUnsafe();
502 + private static final long headOffset =
503 + objectFieldOffset(UNSAFE, "head", TransferStack.class);
504 +
502 505 }
503 506
504 507 /** Dual Queue */
505 508 static final class TransferQueue extends Transferer {
506 509 /*
507 510 * This extends Scherer-Scott dual queue algorithm, differing,
508 511 * among other ways, by using modes within nodes rather than
509 512 * marked pointers. The algorithm is a little simpler than
510 513 * that for stacks because fulfillers do not need explicit
511 514 * nodes, and matching is done by CAS'ing QNode.item field
512 515 * from non-null to null (for put) or vice versa (for take).
513 516 */
514 517
515 518 /** Node class for TransferQueue. */
516 519 static final class QNode {
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
517 520 volatile QNode next; // next node in queue
518 521 volatile Object item; // CAS'ed to or from null
519 522 volatile Thread waiter; // to control park/unpark
520 523 final boolean isData;
521 524
522 525 QNode(Object item, boolean isData) {
523 526 this.item = item;
524 527 this.isData = isData;
525 528 }
526 529
527 - static final AtomicReferenceFieldUpdater<QNode, QNode>
528 - nextUpdater = AtomicReferenceFieldUpdater.newUpdater
529 - (QNode.class, QNode.class, "next");
530 -
531 530 boolean casNext(QNode cmp, QNode val) {
532 - return (next == cmp &&
533 - nextUpdater.compareAndSet(this, cmp, val));
531 + return next == cmp &&
532 + UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
534 533 }
535 534
536 - static final AtomicReferenceFieldUpdater<QNode, Object>
537 - itemUpdater = AtomicReferenceFieldUpdater.newUpdater
538 - (QNode.class, Object.class, "item");
539 -
540 535 boolean casItem(Object cmp, Object val) {
541 - return (item == cmp &&
542 - itemUpdater.compareAndSet(this, cmp, val));
536 + return item == cmp &&
537 + UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val);
543 538 }
544 539
545 540 /**
546 541 * Tries to cancel by CAS'ing ref to this as item.
547 542 */
548 543 void tryCancel(Object cmp) {
549 - itemUpdater.compareAndSet(this, cmp, this);
544 + UNSAFE.compareAndSwapObject(this, itemOffset, cmp, this);
550 545 }
551 546
552 547 boolean isCancelled() {
553 548 return item == this;
554 549 }
555 550
556 551 /**
557 552 * Returns true if this node is known to be off the queue
558 553 * because its next pointer has been forgotten due to
559 554 * an advanceHead operation.
560 555 */
561 556 boolean isOffList() {
562 557 return next == this;
563 558 }
559 +
560 + // Unsafe mechanics
561 + private static final sun.misc.Unsafe UNSAFE = sun.misc.Unsafe.getUnsafe();
562 + private static final long nextOffset =
563 + objectFieldOffset(UNSAFE, "next", QNode.class);
564 + private static final long itemOffset =
565 + objectFieldOffset(UNSAFE, "item", QNode.class);
564 566 }
565 567
566 568 /** Head of queue */
567 569 transient volatile QNode head;
568 570 /** Tail of queue */
569 571 transient volatile QNode tail;
570 572 /**
571 573 * Reference to a cancelled node that might not yet have been
572 574 * unlinked from queue because it was the last inserted node
573 575 * when it cancelled.
574 576 */
575 577 transient volatile QNode cleanMe;
576 578
577 579 TransferQueue() {
578 580 QNode h = new QNode(null, false); // initialize to dummy node.
579 581 head = h;
580 582 tail = h;
581 583 }
582 584
583 - static final AtomicReferenceFieldUpdater<TransferQueue, QNode>
584 - headUpdater = AtomicReferenceFieldUpdater.newUpdater
585 - (TransferQueue.class, QNode.class, "head");
586 -
587 585 /**
588 586 * Tries to cas nh as new head; if successful, unlink
589 587 * old head's next node to avoid garbage retention.
590 588 */
591 589 void advanceHead(QNode h, QNode nh) {
592 - if (h == head && headUpdater.compareAndSet(this, h, nh))
590 + if (h == head &&
591 + UNSAFE.compareAndSwapObject(this, headOffset, h, nh))
593 592 h.next = h; // forget old next
594 593 }
595 594
596 - static final AtomicReferenceFieldUpdater<TransferQueue, QNode>
597 - tailUpdater = AtomicReferenceFieldUpdater.newUpdater
598 - (TransferQueue.class, QNode.class, "tail");
599 -
600 595 /**
601 596 * Tries to cas nt as new tail.
602 597 */
603 598 void advanceTail(QNode t, QNode nt) {
604 599 if (tail == t)
605 - tailUpdater.compareAndSet(this, t, nt);
600 + UNSAFE.compareAndSwapObject(this, tailOffset, t, nt);
606 601 }
607 602
608 - static final AtomicReferenceFieldUpdater<TransferQueue, QNode>
609 - cleanMeUpdater = AtomicReferenceFieldUpdater.newUpdater
610 - (TransferQueue.class, QNode.class, "cleanMe");
611 -
612 603 /**
613 604 * Tries to CAS cleanMe slot.
614 605 */
615 606 boolean casCleanMe(QNode cmp, QNode val) {
616 - return (cleanMe == cmp &&
617 - cleanMeUpdater.compareAndSet(this, cmp, val));
607 + return cleanMe == cmp &&
608 + UNSAFE.compareAndSwapObject(this, cleanMeOffset, cmp, val);
618 609 }
619 610
620 611 /**
621 612 * Puts or takes an item.
622 613 */
623 614 Object transfer(Object e, boolean timed, long nanos) {
624 615 /* Basic algorithm is to loop trying to take either of
625 616 * two actions:
626 617 *
627 618 * 1. If queue apparently empty or holding same-mode nodes,
628 619 * try to add node to queue of waiters, wait to be
629 620 * fulfilled (or cancelled) and return matching item.
630 621 *
631 622 * 2. If queue apparently contains waiting items, and this
632 623 * call is of complementary mode, try to fulfill by CAS'ing
633 624 * item field of waiting node and dequeuing it, and then
634 625 * returning matching item.
635 626 *
636 627 * In each case, along the way, check for and try to help
637 628 * advance head and tail on behalf of other stalled/slow
638 629 * threads.
639 630 *
640 631 * The loop starts off with a null check guarding against
641 632 * seeing uninitialized head or tail values. This never
642 633 * happens in current SynchronousQueue, but could if
643 634 * callers held non-volatile/final ref to the
644 635 * transferer. The check is here anyway because it places
645 636 * null checks at top of loop, which is usually faster
646 637 * than having them implicitly interspersed.
647 638 */
648 639
649 640 QNode s = null; // constructed/reused as needed
650 641 boolean isData = (e != null);
651 642
652 643 for (;;) {
653 644 QNode t = tail;
654 645 QNode h = head;
655 646 if (t == null || h == null) // saw uninitialized value
656 647 continue; // spin
657 648
658 649 if (h == t || t.isData == isData) { // empty or same-mode
659 650 QNode tn = t.next;
660 651 if (t != tail) // inconsistent read
661 652 continue;
662 653 if (tn != null) { // lagging tail
663 654 advanceTail(t, tn);
664 655 continue;
665 656 }
666 657 if (timed && nanos <= 0) // can't wait
667 658 return null;
668 659 if (s == null)
669 660 s = new QNode(e, isData);
670 661 if (!t.casNext(null, s)) // failed to link in
671 662 continue;
672 663
673 664 advanceTail(t, s); // swing tail and wait
674 665 Object x = awaitFulfill(s, e, timed, nanos);
675 666 if (x == s) { // wait was cancelled
↓ open down ↓ |
48 lines elided |
↑ open up ↑ |
676 667 clean(t, s);
677 668 return null;
678 669 }
679 670
680 671 if (!s.isOffList()) { // not already unlinked
681 672 advanceHead(t, s); // unlink if head
682 673 if (x != null) // and forget fields
683 674 s.item = s;
684 675 s.waiter = null;
685 676 }
686 - return (x != null)? x : e;
677 + return (x != null) ? x : e;
687 678
688 679 } else { // complementary-mode
689 680 QNode m = h.next; // node to fulfill
690 681 if (t != tail || m == null || h != head)
691 682 continue; // inconsistent read
692 683
693 684 Object x = m.item;
694 685 if (isData == (x != null) || // m already fulfilled
695 686 x == m || // m cancelled
696 687 !m.casItem(x, e)) { // lost CAS
697 688 advanceHead(h, m); // dequeue and retry
698 689 continue;
699 690 }
700 691
701 692 advanceHead(h, m); // successfully fulfilled
702 693 LockSupport.unpark(m.waiter);
703 - return (x != null)? x : e;
694 + return (x != null) ? x : e;
704 695 }
705 696 }
706 697 }
707 698
708 699 /**
709 700 * Spins/blocks until node s is fulfilled.
710 701 *
711 702 * @param s the waiting node
712 703 * @param e the comparison value for checking match
713 704 * @param timed true if timed wait
714 705 * @param nanos timeout value
715 706 * @return matched item, or s if cancelled
716 707 */
717 708 Object awaitFulfill(QNode s, Object e, boolean timed, long nanos) {
718 709 /* Same idea as TransferStack.awaitFulfill */
719 - long lastTime = (timed)? System.nanoTime() : 0;
710 + long lastTime = timed ? System.nanoTime() : 0;
720 711 Thread w = Thread.currentThread();
721 712 int spins = ((head.next == s) ?
722 - (timed? maxTimedSpins : maxUntimedSpins) : 0);
713 + (timed ? maxTimedSpins : maxUntimedSpins) : 0);
723 714 for (;;) {
724 715 if (w.isInterrupted())
725 716 s.tryCancel(e);
726 717 Object x = s.item;
727 718 if (x != e)
728 719 return x;
729 720 if (timed) {
730 721 long now = System.nanoTime();
731 722 nanos -= now - lastTime;
732 723 lastTime = now;
733 724 if (nanos <= 0) {
734 725 s.tryCancel(e);
735 726 continue;
736 727 }
737 728 }
738 729 if (spins > 0)
739 730 --spins;
740 731 else if (s.waiter == null)
741 732 s.waiter = w;
742 733 else if (!timed)
743 734 LockSupport.park(this);
744 735 else if (nanos > spinForTimeoutThreshold)
745 736 LockSupport.parkNanos(this, nanos);
746 737 }
747 738 }
748 739
749 740 /**
750 741 * Gets rid of cancelled node s with original predecessor pred.
751 742 */
752 743 void clean(QNode pred, QNode s) {
753 744 s.waiter = null; // forget thread
754 745 /*
755 746 * At any given time, exactly one node on list cannot be
756 747 * deleted -- the last inserted node. To accommodate this,
757 748 * if we cannot delete s, we save its predecessor as
758 749 * "cleanMe", deleting the previously saved version
759 750 * first. At least one of node s or the node previously
760 751 * saved can always be deleted, so this always terminates.
761 752 */
762 753 while (pred.next == s) { // Return early if already unlinked
763 754 QNode h = head;
764 755 QNode hn = h.next; // Absorb cancelled first node as head
765 756 if (hn != null && hn.isCancelled()) {
766 757 advanceHead(h, hn);
767 758 continue;
768 759 }
769 760 QNode t = tail; // Ensure consistent read for tail
770 761 if (t == h)
771 762 return;
772 763 QNode tn = t.next;
773 764 if (t != tail)
774 765 continue;
775 766 if (tn != null) {
776 767 advanceTail(t, tn);
777 768 continue;
778 769 }
779 770 if (s != t) { // If not tail, try to unsplice
780 771 QNode sn = s.next;
781 772 if (sn == s || pred.casNext(s, sn))
782 773 return;
783 774 }
784 775 QNode dp = cleanMe;
785 776 if (dp != null) { // Try unlinking previous cancelled node
786 777 QNode d = dp.next;
787 778 QNode dn;
788 779 if (d == null || // d is gone or
789 780 d == dp || // d is off list or
790 781 !d.isCancelled() || // d not cancelled or
791 782 (d != t && // d not tail and
↓ open down ↓ |
59 lines elided |
↑ open up ↑ |
792 783 (dn = d.next) != null && // has successor
793 784 dn != d && // that is on list
794 785 dp.casNext(d, dn))) // d unspliced
795 786 casCleanMe(dp, null);
796 787 if (dp == pred)
797 788 return; // s is already saved node
798 789 } else if (casCleanMe(null, pred))
799 790 return; // Postpone cleaning s
800 791 }
801 792 }
793 +
794 + // unsafe mechanics
795 + private static final sun.misc.Unsafe UNSAFE = sun.misc.Unsafe.getUnsafe();
796 + private static final long headOffset =
797 + objectFieldOffset(UNSAFE, "head", TransferQueue.class);
798 + private static final long tailOffset =
799 + objectFieldOffset(UNSAFE, "tail", TransferQueue.class);
800 + private static final long cleanMeOffset =
801 + objectFieldOffset(UNSAFE, "cleanMe", TransferQueue.class);
802 +
802 803 }
803 804
804 805 /**
805 806 * The transferer. Set only in constructor, but cannot be declared
806 807 * as final without further complicating serialization. Since
807 808 * this is accessed only at most once per public method, there
808 809 * isn't a noticeable performance penalty for using volatile
809 810 * instead of final here.
810 811 */
811 812 private transient volatile Transferer transferer;
812 813
813 814 /**
814 815 * Creates a <tt>SynchronousQueue</tt> with nonfair access policy.
815 816 */
816 817 public SynchronousQueue() {
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
817 818 this(false);
818 819 }
819 820
820 821 /**
821 822 * Creates a <tt>SynchronousQueue</tt> with the specified fairness policy.
822 823 *
823 824 * @param fair if true, waiting threads contend in FIFO order for
824 825 * access; otherwise the order is unspecified.
825 826 */
826 827 public SynchronousQueue(boolean fair) {
827 - transferer = (fair)? new TransferQueue() : new TransferStack();
828 + transferer = fair ? new TransferQueue() : new TransferStack();
828 829 }
829 830
830 831 /**
831 832 * Adds the specified element to this queue, waiting if necessary for
832 833 * another thread to receive it.
833 834 *
834 835 * @throws InterruptedException {@inheritDoc}
835 836 * @throws NullPointerException {@inheritDoc}
836 837 */
837 838 public void put(E o) throws InterruptedException {
838 839 if (o == null) throw new NullPointerException();
839 840 if (transferer.transfer(o, false, 0) == null) {
840 841 Thread.interrupted();
841 842 throw new InterruptedException();
842 843 }
843 844 }
844 845
845 846 /**
846 847 * Inserts the specified element into this queue, waiting if necessary
847 848 * up to the specified wait time for another thread to receive it.
848 849 *
849 850 * @return <tt>true</tt> if successful, or <tt>false</tt> if the
850 851 * specified waiting time elapses before a consumer appears.
851 852 * @throws InterruptedException {@inheritDoc}
852 853 * @throws NullPointerException {@inheritDoc}
853 854 */
854 855 public boolean offer(E o, long timeout, TimeUnit unit)
855 856 throws InterruptedException {
856 857 if (o == null) throw new NullPointerException();
857 858 if (transferer.transfer(o, true, unit.toNanos(timeout)) != null)
858 859 return true;
859 860 if (!Thread.interrupted())
860 861 return false;
861 862 throw new InterruptedException();
862 863 }
863 864
864 865 /**
865 866 * Inserts the specified element into this queue, if another thread is
866 867 * waiting to receive it.
867 868 *
868 869 * @param e the element to add
869 870 * @return <tt>true</tt> if the element was added to this queue, else
870 871 * <tt>false</tt>
871 872 * @throws NullPointerException if the specified element is null
872 873 */
873 874 public boolean offer(E e) {
874 875 if (e == null) throw new NullPointerException();
875 876 return transferer.transfer(e, true, 0) != null;
876 877 }
877 878
878 879 /**
879 880 * Retrieves and removes the head of this queue, waiting if necessary
880 881 * for another thread to insert it.
881 882 *
882 883 * @return the head of this queue
883 884 * @throws InterruptedException {@inheritDoc}
884 885 */
885 886 public E take() throws InterruptedException {
886 887 Object e = transferer.transfer(null, false, 0);
887 888 if (e != null)
888 889 return (E)e;
889 890 Thread.interrupted();
890 891 throw new InterruptedException();
891 892 }
892 893
893 894 /**
894 895 * Retrieves and removes the head of this queue, waiting
895 896 * if necessary up to the specified wait time, for another thread
896 897 * to insert it.
897 898 *
898 899 * @return the head of this queue, or <tt>null</tt> if the
899 900 * specified waiting time elapses before an element is present.
900 901 * @throws InterruptedException {@inheritDoc}
901 902 */
902 903 public E poll(long timeout, TimeUnit unit) throws InterruptedException {
903 904 Object e = transferer.transfer(null, true, unit.toNanos(timeout));
904 905 if (e != null || !Thread.interrupted())
905 906 return (E)e;
906 907 throw new InterruptedException();
907 908 }
908 909
909 910 /**
910 911 * Retrieves and removes the head of this queue, if another thread
911 912 * is currently making an element available.
912 913 *
913 914 * @return the head of this queue, or <tt>null</tt> if no
914 915 * element is available.
915 916 */
916 917 public E poll() {
917 918 return (E)transferer.transfer(null, true, 0);
918 919 }
919 920
920 921 /**
921 922 * Always returns <tt>true</tt>.
922 923 * A <tt>SynchronousQueue</tt> has no internal capacity.
923 924 *
924 925 * @return <tt>true</tt>
925 926 */
926 927 public boolean isEmpty() {
927 928 return true;
928 929 }
929 930
930 931 /**
931 932 * Always returns zero.
932 933 * A <tt>SynchronousQueue</tt> has no internal capacity.
933 934 *
934 935 * @return zero.
935 936 */
936 937 public int size() {
937 938 return 0;
938 939 }
939 940
940 941 /**
941 942 * Always returns zero.
942 943 * A <tt>SynchronousQueue</tt> has no internal capacity.
943 944 *
944 945 * @return zero.
945 946 */
946 947 public int remainingCapacity() {
947 948 return 0;
948 949 }
949 950
950 951 /**
951 952 * Does nothing.
952 953 * A <tt>SynchronousQueue</tt> has no internal capacity.
953 954 */
954 955 public void clear() {
955 956 }
956 957
957 958 /**
958 959 * Always returns <tt>false</tt>.
959 960 * A <tt>SynchronousQueue</tt> has no internal capacity.
960 961 *
961 962 * @param o the element
962 963 * @return <tt>false</tt>
963 964 */
964 965 public boolean contains(Object o) {
965 966 return false;
966 967 }
967 968
968 969 /**
969 970 * Always returns <tt>false</tt>.
970 971 * A <tt>SynchronousQueue</tt> has no internal capacity.
971 972 *
972 973 * @param o the element to remove
973 974 * @return <tt>false</tt>
974 975 */
975 976 public boolean remove(Object o) {
976 977 return false;
977 978 }
978 979
979 980 /**
980 981 * Returns <tt>false</tt> unless the given collection is empty.
981 982 * A <tt>SynchronousQueue</tt> has no internal capacity.
982 983 *
983 984 * @param c the collection
984 985 * @return <tt>false</tt> unless given collection is empty
985 986 */
986 987 public boolean containsAll(Collection<?> c) {
987 988 return c.isEmpty();
988 989 }
989 990
990 991 /**
991 992 * Always returns <tt>false</tt>.
992 993 * A <tt>SynchronousQueue</tt> has no internal capacity.
993 994 *
994 995 * @param c the collection
995 996 * @return <tt>false</tt>
996 997 */
997 998 public boolean removeAll(Collection<?> c) {
998 999 return false;
999 1000 }
1000 1001
1001 1002 /**
1002 1003 * Always returns <tt>false</tt>.
1003 1004 * A <tt>SynchronousQueue</tt> has no internal capacity.
1004 1005 *
1005 1006 * @param c the collection
1006 1007 * @return <tt>false</tt>
1007 1008 */
1008 1009 public boolean retainAll(Collection<?> c) {
1009 1010 return false;
1010 1011 }
1011 1012
1012 1013 /**
1013 1014 * Always returns <tt>null</tt>.
1014 1015 * A <tt>SynchronousQueue</tt> does not return elements
1015 1016 * unless actively waited on.
1016 1017 *
1017 1018 * @return <tt>null</tt>
1018 1019 */
1019 1020 public E peek() {
1020 1021 return null;
1021 1022 }
1022 1023
1023 1024 /**
1024 1025 * Returns an empty iterator in which <tt>hasNext</tt> always returns
1025 1026 * <tt>false</tt>.
1026 1027 *
1027 1028 * @return an empty iterator
1028 1029 */
1029 1030 public Iterator<E> iterator() {
1030 1031 return Collections.emptyIterator();
1031 1032 }
1032 1033
1033 1034 /**
1034 1035 * Returns a zero-length array.
1035 1036 * @return a zero-length array
1036 1037 */
1037 1038 public Object[] toArray() {
1038 1039 return new Object[0];
1039 1040 }
1040 1041
1041 1042 /**
1042 1043 * Sets the zeroeth element of the specified array to <tt>null</tt>
1043 1044 * (if the array has non-zero length) and returns it.
1044 1045 *
1045 1046 * @param a the array
1046 1047 * @return the specified array
1047 1048 * @throws NullPointerException if the specified array is null
1048 1049 */
1049 1050 public <T> T[] toArray(T[] a) {
1050 1051 if (a.length > 0)
1051 1052 a[0] = null;
1052 1053 return a;
1053 1054 }
1054 1055
1055 1056 /**
1056 1057 * @throws UnsupportedOperationException {@inheritDoc}
1057 1058 * @throws ClassCastException {@inheritDoc}
1058 1059 * @throws NullPointerException {@inheritDoc}
1059 1060 * @throws IllegalArgumentException {@inheritDoc}
1060 1061 */
1061 1062 public int drainTo(Collection<? super E> c) {
1062 1063 if (c == null)
1063 1064 throw new NullPointerException();
1064 1065 if (c == this)
1065 1066 throw new IllegalArgumentException();
1066 1067 int n = 0;
1067 1068 E e;
1068 1069 while ( (e = poll()) != null) {
1069 1070 c.add(e);
1070 1071 ++n;
1071 1072 }
1072 1073 return n;
1073 1074 }
1074 1075
1075 1076 /**
1076 1077 * @throws UnsupportedOperationException {@inheritDoc}
1077 1078 * @throws ClassCastException {@inheritDoc}
1078 1079 * @throws NullPointerException {@inheritDoc}
1079 1080 * @throws IllegalArgumentException {@inheritDoc}
1080 1081 */
1081 1082 public int drainTo(Collection<? super E> c, int maxElements) {
1082 1083 if (c == null)
1083 1084 throw new NullPointerException();
1084 1085 if (c == this)
1085 1086 throw new IllegalArgumentException();
1086 1087 int n = 0;
1087 1088 E e;
1088 1089 while (n < maxElements && (e = poll()) != null) {
1089 1090 c.add(e);
1090 1091 ++n;
1091 1092 }
1092 1093 return n;
1093 1094 }
1094 1095
1095 1096 /*
1096 1097 * To cope with serialization strategy in the 1.5 version of
1097 1098 * SynchronousQueue, we declare some unused classes and fields
1098 1099 * that exist solely to enable serializability across versions.
1099 1100 * These fields are never used, so are initialized only if this
1100 1101 * object is ever serialized or deserialized.
1101 1102 */
1102 1103
1103 1104 static class WaitQueue implements java.io.Serializable { }
1104 1105 static class LifoWaitQueue extends WaitQueue {
1105 1106 private static final long serialVersionUID = -3633113410248163686L;
1106 1107 }
1107 1108 static class FifoWaitQueue extends WaitQueue {
1108 1109 private static final long serialVersionUID = -3623113410248163686L;
1109 1110 }
1110 1111 private ReentrantLock qlock;
1111 1112 private WaitQueue waitingProducers;
1112 1113 private WaitQueue waitingConsumers;
1113 1114
1114 1115 /**
1115 1116 * Save the state to a stream (that is, serialize it).
1116 1117 *
1117 1118 * @param s the stream
1118 1119 */
1119 1120 private void writeObject(java.io.ObjectOutputStream s)
1120 1121 throws java.io.IOException {
1121 1122 boolean fair = transferer instanceof TransferQueue;
1122 1123 if (fair) {
1123 1124 qlock = new ReentrantLock(true);
1124 1125 waitingProducers = new FifoWaitQueue();
1125 1126 waitingConsumers = new FifoWaitQueue();
1126 1127 }
1127 1128 else {
1128 1129 qlock = new ReentrantLock();
1129 1130 waitingProducers = new LifoWaitQueue();
1130 1131 waitingConsumers = new LifoWaitQueue();
1131 1132 }
1132 1133 s.defaultWriteObject();
1133 1134 }
↓ open down ↓ |
296 lines elided |
↑ open up ↑ |
1134 1135
1135 1136 private void readObject(final java.io.ObjectInputStream s)
1136 1137 throws java.io.IOException, ClassNotFoundException {
1137 1138 s.defaultReadObject();
1138 1139 if (waitingProducers instanceof FifoWaitQueue)
1139 1140 transferer = new TransferQueue();
1140 1141 else
1141 1142 transferer = new TransferStack();
1142 1143 }
1143 1144
1145 + // Unsafe mechanics
1146 + static long objectFieldOffset(sun.misc.Unsafe UNSAFE,
1147 + String field, Class<?> klazz) {
1148 + try {
1149 + return UNSAFE.objectFieldOffset(klazz.getDeclaredField(field));
1150 + } catch (NoSuchFieldException e) {
1151 + // Convert Exception to corresponding Error
1152 + NoSuchFieldError error = new NoSuchFieldError(field);
1153 + error.initCause(e);
1154 + throw error;
1155 + }
1156 + }
1157 +
1144 1158 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX