18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 */
24
25 /*
26 * This file is available under and governed by the GNU General Public
27 * License version 2 only, as published by the Free Software Foundation.
28 * However, the following notice accompanied the original version of this
29 * file:
30 *
31 * Written by Doug Lea with assistance from members of JCP JSR-166
32 * Expert Group and released to the public domain, as explained at
33 * http://creativecommons.org/publicdomain/zero/1.0/
34 */
35
36 package java.util.concurrent.locks;
37
38 import java.lang.invoke.MethodHandles;
39 import java.lang.invoke.VarHandle;
40 import java.util.concurrent.TimeUnit;
41 import jdk.internal.vm.annotation.ReservedStackAccess;
42
43 /**
44 * A capability-based lock with three modes for controlling read/write
45 * access. The state of a StampedLock consists of a version and mode.
46 * Lock acquisition methods return a stamp that represents and
47 * controls access with respect to a lock state; "try" versions of
48 * these methods may instead return the special value zero to
49 * represent failure to acquire access. Lock release and conversion
50 * methods require stamps as arguments, and fail if they do not match
51 * the state of the lock. The three modes are:
52 *
53 * <ul>
54 *
55 * <li><b>Writing.</b> Method {@link #writeLock} possibly blocks
56 * waiting for exclusive access, returning a stamp that can be used
57 * in method {@link #unlockWrite} to release the lock. Untimed and
58 * timed versions of {@code tryWriteLock} are also provided. When
59 * the lock is held in write mode, no read locks may be obtained,
60 * and all optimistic read validations will fail.
115 * <p>Like {@link java.util.concurrent.Semaphore Semaphore}, but unlike most
116 * {@link Lock} implementations, StampedLocks have no notion of ownership.
117 * Locks acquired in one thread can be released or converted in another.
118 *
119 * <p>The scheduling policy of StampedLock does not consistently
120 * prefer readers over writers or vice versa. All "try" methods are
121 * best-effort and do not necessarily conform to any scheduling or
122 * fairness policy. A zero return from any "try" method for acquiring
123 * or converting locks does not carry any information about the state
124 * of the lock; a subsequent invocation may succeed.
125 *
126 * <p>Because it supports coordinated usage across multiple lock
127 * modes, this class does not directly implement the {@link Lock} or
128 * {@link ReadWriteLock} interfaces. However, a StampedLock may be
129 * viewed {@link #asReadLock()}, {@link #asWriteLock()}, or {@link
130 * #asReadWriteLock()} in applications requiring only the associated
131 * set of functionality.
132 *
133 * <p><b>Memory Synchronization.</b> Methods with the effect of
134 * successfully locking in any mode have the same memory
135 * synchronization effects as a <em>Lock</em> action described in
136 * <a href="https://docs.oracle.com/javase/specs/jls/se11/html/jls-17.html#jls-17.4">
137 * Chapter 17 of <cite>The Java™ Language Specification</cite></a>.
138 * Methods successfully unlocking in write mode have the same memory
139 * synchronization effects as an <em>Unlock</em> action. In optimistic
140 * read usages, actions prior to the most recent write mode unlock action
141 * are guaranteed to happen-before those following a tryOptimisticRead
142 * only if a later validate returns true; otherwise there is no guarantee
143 * that the reads between tryOptimisticRead and validate obtain a
144 * consistent snapshot.
145 *
146 * <p><b>Sample Usage.</b> The following illustrates some usage idioms
147 * in a class that maintains simple two-dimensional points. The sample
148 * code illustrates some try/catch conventions even though they are
149 * not strictly needed here because no exceptions can occur in their
150 * bodies.
151 *
152 * <pre> {@code
153 * class Point {
154 * private double x, y;
155 * private final StampedLock sl = new StampedLock();
156 *
157 * // an exclusively locked method
220 * try {
221 * while (x == 0.0 && y == 0.0) {
222 * long ws = sl.tryConvertToWriteLock(stamp);
223 * if (ws != 0L) {
224 * stamp = ws;
225 * x = newX;
226 * y = newY;
227 * break;
228 * }
229 * else {
230 * sl.unlockRead(stamp);
231 * stamp = sl.writeLock();
232 * }
233 * }
234 * } finally {
235 * sl.unlock(stamp);
236 * }
237 * }
238 * }}</pre>
239 *
240 * @since 1.8
241 * @author Doug Lea
242 */
243 public class StampedLock implements java.io.Serializable {
244 /*
245 * Algorithmic notes:
246 *
247 * The design employs elements of Sequence locks
248 * (as used in linux kernels; see Lameter's
249 * http://www.lameter.com/gelato2005.pdf
250 * and elsewhere; see
251 * Boehm's http://www.hpl.hp.com/techreports/2012/HPL-2012-68.html)
252 * and Ordered RW locks (see Shirako et al
253 * http://dl.acm.org/citation.cfm?id=2312015)
254 *
255 * Conceptually, the primary state of the lock includes a sequence
256 * number that is odd when write-locked and even otherwise.
257 * However, this is offset by a reader count that is non-zero when
258 * read-locked. The read count is ignored when validating
259 * "optimistic" seqlock-reader-style stamps. Because we must use
260 * a small finite number of bits (currently 7) for readers, a
261 * supplementary reader overflow word is used when the number of
262 * readers exceeds the count field. We do this by treating the max
263 * reader count value (RBITS) as a spinlock protecting overflow
264 * updates.
265 *
266 * Waiters use a modified form of CLH lock used in
267 * AbstractQueuedSynchronizer (see its internal documentation for
268 * a fuller account), where each node is tagged (field mode) as
269 * either a reader or writer. Sets of waiting readers are grouped
270 * (linked) under a common node (field cowait) so act as a single
271 * node with respect to most CLH mechanics. By virtue of the
272 * queue structure, wait nodes need not actually carry sequence
273 * numbers; we know each is greater than its predecessor. This
274 * simplifies the scheduling policy to a mainly-FIFO scheme that
275 * incorporates elements of Phase-Fair locks (see Brandenburg &
276 * Anderson, especially http://www.cs.unc.edu/~bbb/diss/). In
277 * particular, we use the phase-fair anti-barging rule: If an
278 * incoming reader arrives while read lock is held but there is a
279 * queued writer, this incoming reader is queued. (This rule is
280 * responsible for some of the complexity of method acquireRead,
281 * but without it, the lock becomes highly unfair.) Method release
282 * does not (and sometimes cannot) itself wake up cowaiters. This
283 * is done by the primary thread, but helped by any other threads
284 * with nothing better to do in methods acquireRead and
285 * acquireWrite.
286 *
287 * These rules apply to threads actually queued. All tryLock forms
288 * opportunistically try to acquire locks regardless of preference
289 * rules, and so may "barge" their way in. Randomized spinning is
290 * used in the acquire methods to reduce (increasingly expensive)
291 * context switching while also avoiding sustained memory
292 * thrashing among many threads. We limit spins to the head of
293 * queue. If, upon wakening, a thread fails to obtain lock, and is
294 * still (or becomes) the first waiting thread (which indicates
295 * that some other thread barged and obtained lock), it escalates
296 * spins (up to MAX_HEAD_SPINS) to reduce the likelihood of
297 * continually losing to barging threads.
298 *
299 * Nearly all of these mechanics are carried out in methods
300 * acquireWrite and acquireRead, that, as typical of such code,
301 * sprawl out because actions and retries rely on consistent sets
302 * of locally cached reads.
303 *
304 * As noted in Boehm's paper (above), sequence validation (mainly
305 * method validate()) requires stricter ordering rules than apply
306 * to normal volatile reads (of "state"). To force orderings of
307 * reads before a validation and the validation itself in those
308 * cases where this is not already forced, we use acquireFence.
309 * Unlike in that paper, we allow writers to use plain writes.
310 * One would not expect reorderings of such writes with the lock
311 * acquisition CAS because there is a "control dependency", but it
312 * is theoretically possible, so we additionally add a
313 * storeStoreFence after lock acquisition CAS.
314 *
315 * ----------------------------------------------------------------
316 * Here's an informal proof that plain reads by _successful_
317 * readers see plain writes from preceding but not following
318 * writers (following Boehm and the C++ standard [atomics.fences]):
319 *
320 * Because of the total synchronization order of accesses to
321 * volatile long state containing the sequence number, writers and
322 * _successful_ readers can be globally sequenced.
323 *
324 * int x, y;
325 *
326 * Writer 1:
327 * inc sequence (odd - "locked")
328 * storeStoreFence();
329 * x = 1; y = 2;
330 * inc sequence (even - "unlocked")
331 *
332 * Successful Reader:
333 * read sequence (even)
334 * // must see writes from Writer 1 but not Writer 2
335 * r1 = x; r2 = y;
336 * acquireFence();
337 * read sequence (even - validated unchanged)
338 * // use r1 and r2
339 *
340 * Writer 2:
341 * inc sequence (odd - "locked")
342 * storeStoreFence();
343 * x = 3; y = 4;
344 * inc sequence (even - "unlocked")
345 *
346 * Visibility of writer 1's stores is normal - reader's initial
347 * read of state synchronizes with writer 1's final write to state.
348 * Lack of visibility of writer 2's plain writes is less obvious.
349 * If reader's read of x or y saw writer 2's write, then (assuming
350 * semantics of C++ fences) the storeStoreFence would "synchronize"
351 * with reader's acquireFence and reader's validation read must see
352 * writer 2's initial write to state and so validation must fail.
353 * But making this "proof" formal and rigorous is an open problem!
354 * ----------------------------------------------------------------
355 *
356 * The memory layout keeps lock state and queue pointers together
357 * (normally on the same cache line). This usually works well for
358 * read-mostly loads. In most other cases, the natural tendency of
359 * adaptive-spin CLH locks to reduce memory contention lessens
360 * motivation to further spread out contended locations, but might
361 * be subject to future improvements.
362 */
363
364 private static final long serialVersionUID = -6001602636862214147L;
365
366 /** Number of processors, for spin control */
367 private static final int NCPU = Runtime.getRuntime().availableProcessors();
368
369 /** Maximum number of retries before enqueuing on acquisition; at least 1 */
370 private static final int SPINS = (NCPU > 1) ? 1 << 6 : 1;
371
372 /** Maximum number of tries before blocking at head on acquisition */
373 private static final int HEAD_SPINS = (NCPU > 1) ? 1 << 10 : 1;
374
375 /** Maximum number of retries before re-blocking */
376 private static final int MAX_HEAD_SPINS = (NCPU > 1) ? 1 << 16 : 1;
377
378 /** The period for yielding when waiting for overflow spinlock */
379 private static final int OVERFLOW_YIELD_RATE = 7; // must be power 2 - 1
380
381 /** The number of bits to use for reader count before overflowing */
382 private static final int LG_READERS = 7;
383
384 // Values for lock state and stamp operations
385 private static final long RUNIT = 1L;
386 private static final long WBIT = 1L << LG_READERS;
387 private static final long RBITS = WBIT - 1L;
388 private static final long RFULL = RBITS - 1L;
389 private static final long ABITS = RBITS | WBIT;
390 private static final long SBITS = ~RBITS; // note overlap with ABITS
391
392 /*
393 * 3 stamp modes can be distinguished by examining (m = stamp & ABITS):
394 * write mode: m == WBIT
395 * optimistic read mode: m == 0L (even when read lock is held)
396 * read mode: m > 0L && m <= RFULL (the stamp is a copy of state, but the
397 * read hold count in the stamp is unused other than to determine mode)
398 *
399 * This differs slightly from the encoding of state:
400 * (state & ABITS) == 0L indicates the lock is currently unlocked.
401 * (state & ABITS) == RBITS is a special transient value
402 * indicating spin-locked to manipulate reader bits overflow.
403 */
404
405 /** Initial value for lock state; avoids failure value zero. */
406 private static final long ORIGIN = WBIT << 1;
407
408 // Special value from cancelled acquire methods so caller can throw IE
409 private static final long INTERRUPTED = 1L;
410
411 // Values for node status; order matters
412 private static final int WAITING = -1;
413 private static final int CANCELLED = 1;
414
415 // Modes for nodes (int not boolean to allow arithmetic)
416 private static final int RMODE = 0;
417 private static final int WMODE = 1;
418
419 /** Wait nodes */
420 static final class WNode {
421 volatile WNode prev;
422 volatile WNode next;
423 volatile WNode cowait; // list of linked readers
424 volatile Thread thread; // non-null while possibly parked
425 volatile int status; // 0, WAITING, or CANCELLED
426 final int mode; // RMODE or WMODE
427 WNode(int m, WNode p) { mode = m; prev = p; }
428 }
429
430 /** Head of CLH queue */
431 private transient volatile WNode whead;
432 /** Tail (last) of CLH queue */
433 private transient volatile WNode wtail;
434
435 // views
436 transient ReadLockView readLockView;
437 transient WriteLockView writeLockView;
438 transient ReadWriteLockView readWriteLockView;
439
440 /** Lock sequence/state */
441 private transient volatile long state;
442 /** extra reader count when state read count saturated */
443 private transient int readerOverflow;
444
445 /**
446 * Creates a new lock, initially in unlocked state.
447 */
448 public StampedLock() {
449 state = ORIGIN;
450 }
451
452 private boolean casState(long expectedValue, long newValue) {
453 return STATE.compareAndSet(this, expectedValue, newValue);
454 }
455
456 private long tryWriteLock(long s) {
457 // assert (s & ABITS) == 0L;
458 long next;
459 if (casState(s, next = s | WBIT)) {
460 VarHandle.storeStoreFence();
461 return next;
462 }
463 return 0L;
464 }
465
466 /**
467 * Exclusively acquires the lock, blocking if necessary
468 * until available.
469 *
470 * @return a write stamp that can be used to unlock or convert mode
471 */
472 @ReservedStackAccess
473 public long writeLock() {
474 long next;
475 return ((next = tryWriteLock()) != 0L) ? next : acquireWrite(false, 0L);
476 }
477
478 /**
479 * Exclusively acquires the lock if it is immediately available.
480 *
481 * @return a write stamp that can be used to unlock or convert mode,
482 * or zero if the lock is not available
483 */
484 @ReservedStackAccess
485 public long tryWriteLock() {
486 long s;
487 return (((s = state) & ABITS) == 0L) ? tryWriteLock(s) : 0L;
488 }
489
490 /**
491 * Exclusively acquires the lock if it is available within the
492 * given time and the current thread has not been interrupted.
493 * Behavior under timeout and interruption matches that specified
494 * for method {@link Lock#tryLock(long,TimeUnit)}.
495 *
496 * @param time the maximum time to wait for the lock
497 * @param unit the time unit of the {@code time} argument
498 * @return a write stamp that can be used to unlock or convert mode,
499 * or zero if the lock is not available
500 * @throws InterruptedException if the current thread is interrupted
501 * before acquiring the lock
502 */
503 public long tryWriteLock(long time, TimeUnit unit)
504 throws InterruptedException {
505 long nanos = unit.toNanos(time);
506 if (!Thread.interrupted()) {
507 long next, deadline;
508 if ((next = tryWriteLock()) != 0L)
509 return next;
510 if (nanos <= 0L)
511 return 0L;
512 if ((deadline = System.nanoTime() + nanos) == 0L)
513 deadline = 1L;
514 if ((next = acquireWrite(true, deadline)) != INTERRUPTED)
515 return next;
516 }
517 throw new InterruptedException();
518 }
519
520 /**
521 * Exclusively acquires the lock, blocking if necessary
522 * until available or the current thread is interrupted.
523 * Behavior under interruption matches that specified
524 * for method {@link Lock#lockInterruptibly()}.
525 *
526 * @return a write stamp that can be used to unlock or convert mode
527 * @throws InterruptedException if the current thread is interrupted
528 * before acquiring the lock
529 */
530 @ReservedStackAccess
531 public long writeLockInterruptibly() throws InterruptedException {
532 long next;
533 if (!Thread.interrupted() &&
534 (next = acquireWrite(true, 0L)) != INTERRUPTED)
535 return next;
536 throw new InterruptedException();
537 }
538
539 /**
540 * Non-exclusively acquires the lock, blocking if necessary
541 * until available.
542 *
543 * @return a read stamp that can be used to unlock or convert mode
544 */
545 @ReservedStackAccess
546 public long readLock() {
547 long s, next;
548 // bypass acquireRead on common uncontended case
549 return (whead == wtail
550 && ((s = state) & ABITS) < RFULL
551 && casState(s, next = s + RUNIT))
552 ? next
553 : acquireRead(false, 0L);
554 }
555
556 /**
557 * Non-exclusively acquires the lock if it is immediately available.
558 *
559 * @return a read stamp that can be used to unlock or convert mode,
560 * or zero if the lock is not available
561 */
562 @ReservedStackAccess
563 public long tryReadLock() {
564 long s, m, next;
565 while ((m = (s = state) & ABITS) != WBIT) {
566 if (m < RFULL) {
567 if (casState(s, next = s + RUNIT))
568 return next;
569 }
570 else if ((next = tryIncReaderOverflow(s)) != 0L)
571 return next;
572 }
573 return 0L;
574 }
575
576 /**
577 * Non-exclusively acquires the lock if it is available within the
578 * given time and the current thread has not been interrupted.
579 * Behavior under timeout and interruption matches that specified
580 * for method {@link Lock#tryLock(long,TimeUnit)}.
581 *
582 * @param time the maximum time to wait for the lock
583 * @param unit the time unit of the {@code time} argument
584 * @return a read stamp that can be used to unlock or convert mode,
585 * or zero if the lock is not available
586 * @throws InterruptedException if the current thread is interrupted
587 * before acquiring the lock
588 */
589 @ReservedStackAccess
590 public long tryReadLock(long time, TimeUnit unit)
591 throws InterruptedException {
592 long s, m, next, deadline;
593 long nanos = unit.toNanos(time);
594 if (!Thread.interrupted()) {
595 if ((m = (s = state) & ABITS) != WBIT) {
596 if (m < RFULL) {
597 if (casState(s, next = s + RUNIT))
598 return next;
599 }
600 else if ((next = tryIncReaderOverflow(s)) != 0L)
601 return next;
602 }
603 if (nanos <= 0L)
604 return 0L;
605 if ((deadline = System.nanoTime() + nanos) == 0L)
606 deadline = 1L;
607 if ((next = acquireRead(true, deadline)) != INTERRUPTED)
608 return next;
609 }
610 throw new InterruptedException();
611 }
612
613 /**
614 * Non-exclusively acquires the lock, blocking if necessary
615 * until available or the current thread is interrupted.
616 * Behavior under interruption matches that specified
617 * for method {@link Lock#lockInterruptibly()}.
618 *
619 * @return a read stamp that can be used to unlock or convert mode
620 * @throws InterruptedException if the current thread is interrupted
621 * before acquiring the lock
622 */
623 @ReservedStackAccess
624 public long readLockInterruptibly() throws InterruptedException {
625 long s, next;
626 if (!Thread.interrupted()
627 // bypass acquireRead on common uncontended case
628 && ((whead == wtail
629 && ((s = state) & ABITS) < RFULL
630 && casState(s, next = s + RUNIT))
631 ||
632 (next = acquireRead(true, 0L)) != INTERRUPTED))
633 return next;
634 throw new InterruptedException();
635 }
636
637 /**
638 * Returns a stamp that can later be validated, or zero
639 * if exclusively locked.
640 *
641 * @return a valid optimistic read stamp, or zero if exclusively locked
642 */
643 public long tryOptimisticRead() {
644 long s;
645 return (((s = state) & WBIT) == 0L) ? (s & SBITS) : 0L;
646 }
647
648 /**
649 * Returns true if the lock has not been exclusively acquired
650 * since issuance of the given stamp. Always returns false if the
651 * stamp is zero. Always returns true if the stamp represents a
652 * currently held lock. Invoking this method with a value not
653 * obtained from {@link #tryOptimisticRead} or a locking method
654 * for this lock has no defined effect or result.
655 *
656 * @param stamp a stamp
657 * @return {@code true} if the lock has not been exclusively acquired
658 * since issuance of the given stamp; else false
659 */
660 public boolean validate(long stamp) {
661 VarHandle.acquireFence();
662 return (stamp & SBITS) == (state & SBITS);
663 }
664
665 /**
666 * Returns an unlocked state, incrementing the version and
667 * avoiding special failure value 0L.
668 *
669 * @param s a write-locked state (or stamp)
670 */
671 private static long unlockWriteState(long s) {
672 return ((s += WBIT) == 0L) ? ORIGIN : s;
673 }
674
675 private long unlockWriteInternal(long s) {
676 long next; WNode h;
677 STATE.setVolatile(this, next = unlockWriteState(s));
678 if ((h = whead) != null && h.status != 0)
679 release(h);
680 return next;
681 }
682
683 /**
684 * If the lock state matches the given stamp, releases the
685 * exclusive lock.
686 *
687 * @param stamp a stamp returned by a write-lock operation
688 * @throws IllegalMonitorStateException if the stamp does
689 * not match the current state of this lock
690 */
691 @ReservedStackAccess
692 public void unlockWrite(long stamp) {
693 if (state != stamp || (stamp & WBIT) == 0L)
694 throw new IllegalMonitorStateException();
695 unlockWriteInternal(stamp);
696 }
697
698 /**
699 * If the lock state matches the given stamp, releases the
700 * non-exclusive lock.
701 *
702 * @param stamp a stamp returned by a read-lock operation
703 * @throws IllegalMonitorStateException if the stamp does
704 * not match the current state of this lock
705 */
706 @ReservedStackAccess
707 public void unlockRead(long stamp) {
708 long s, m; WNode h;
709 while (((s = state) & SBITS) == (stamp & SBITS)
710 && (stamp & RBITS) > 0L
711 && ((m = s & RBITS) > 0L)) {
712 if (m < RFULL) {
713 if (casState(s, s - RUNIT)) {
714 if (m == RUNIT && (h = whead) != null && h.status != 0)
715 release(h);
716 return;
717 }
718 }
719 else if (tryDecReaderOverflow(s) != 0L)
720 return;
721 }
722 throw new IllegalMonitorStateException();
723 }
724
725 /**
726 * If the lock state matches the given stamp, releases the
727 * corresponding mode of the lock.
728 *
729 * @param stamp a stamp returned by a lock operation
730 * @throws IllegalMonitorStateException if the stamp does
731 * not match the current state of this lock
732 */
733 @ReservedStackAccess
734 public void unlock(long stamp) {
735 if ((stamp & WBIT) != 0L)
736 unlockWrite(stamp);
737 else
738 unlockRead(stamp);
739 }
740
741 /**
742 * If the lock state matches the given stamp, atomically performs one of
743 * the following actions. If the stamp represents holding a write
744 * lock, returns it. Or, if a read lock, if the write lock is
745 * available, releases the read lock and returns a write stamp.
746 * Or, if an optimistic read, returns a write stamp only if
747 * immediately available. This method returns zero in all other
748 * cases.
749 *
750 * @param stamp a stamp
751 * @return a valid write stamp, or zero on failure
752 */
753 public long tryConvertToWriteLock(long stamp) {
754 long a = stamp & ABITS, m, s, next;
755 while (((s = state) & SBITS) == (stamp & SBITS)) {
756 if ((m = s & ABITS) == 0L) {
757 if (a != 0L)
758 break;
759 if ((next = tryWriteLock(s)) != 0L)
760 return next;
761 }
762 else if (m == WBIT) {
763 if (a != m)
764 break;
765 return stamp;
766 }
767 else if (m == RUNIT && a != 0L) {
768 if (casState(s, next = s - RUNIT + WBIT)) {
769 VarHandle.storeStoreFence();
770 return next;
771 }
772 }
773 else
774 break;
775 }
776 return 0L;
777 }
778
779 /**
780 * If the lock state matches the given stamp, atomically performs one of
781 * the following actions. If the stamp represents holding a write
782 * lock, releases it and obtains a read lock. Or, if a read lock,
783 * returns it. Or, if an optimistic read, acquires a read lock and
784 * returns a read stamp only if immediately available. This method
785 * returns zero in all other cases.
786 *
787 * @param stamp a stamp
788 * @return a valid read stamp, or zero on failure
789 */
790 public long tryConvertToReadLock(long stamp) {
791 long a, s, next; WNode h;
792 while (((s = state) & SBITS) == (stamp & SBITS)) {
793 if ((a = stamp & ABITS) >= WBIT) {
794 // write stamp
795 if (s != stamp)
796 break;
797 STATE.setVolatile(this, next = unlockWriteState(s) + RUNIT);
798 if ((h = whead) != null && h.status != 0)
799 release(h);
800 return next;
801 }
802 else if (a == 0L) {
803 // optimistic read stamp
804 if ((s & ABITS) < RFULL) {
805 if (casState(s, next = s + RUNIT))
806 return next;
807 }
808 else if ((next = tryIncReaderOverflow(s)) != 0L)
809 return next;
810 }
811 else {
812 // already a read stamp
813 if ((s & ABITS) == 0L)
814 break;
815 return stamp;
816 }
817 }
818 return 0L;
819 }
820
821 /**
822 * If the lock state matches the given stamp then, atomically, if the stamp
823 * represents holding a lock, releases it and returns an
824 * observation stamp. Or, if an optimistic read, returns it if
825 * validated. This method returns zero in all other cases, and so
826 * may be useful as a form of "tryUnlock".
827 *
828 * @param stamp a stamp
829 * @return a valid optimistic read stamp, or zero on failure
830 */
831 public long tryConvertToOptimisticRead(long stamp) {
832 long a, m, s, next; WNode h;
833 VarHandle.acquireFence();
834 while (((s = state) & SBITS) == (stamp & SBITS)) {
835 if ((a = stamp & ABITS) >= WBIT) {
836 // write stamp
837 if (s != stamp)
838 break;
839 return unlockWriteInternal(s);
840 }
841 else if (a == 0L)
842 // already an optimistic read stamp
843 return stamp;
844 else if ((m = s & ABITS) == 0L) // invalid read stamp
845 break;
846 else if (m < RFULL) {
847 if (casState(s, next = s - RUNIT)) {
848 if (m == RUNIT && (h = whead) != null && h.status != 0)
849 release(h);
850 return next & SBITS;
851 }
852 }
853 else if ((next = tryDecReaderOverflow(s)) != 0L)
854 return next & SBITS;
855 }
856 return 0L;
857 }
858
859 /**
860 * Releases the write lock if it is held, without requiring a
861 * stamp value. This method may be useful for recovery after
862 * errors.
863 *
864 * @return {@code true} if the lock was held, else false
865 */
866 @ReservedStackAccess
867 public boolean tryUnlockWrite() {
868 long s;
869 if (((s = state) & WBIT) != 0L) {
870 unlockWriteInternal(s);
871 return true;
872 }
873 return false;
874 }
875
876 /**
877 * Releases one hold of the read lock if it is held, without
878 * requiring a stamp value. This method may be useful for recovery
879 * after errors.
880 *
881 * @return {@code true} if the read lock was held, else false
882 */
883 @ReservedStackAccess
884 public boolean tryUnlockRead() {
885 long s, m; WNode h;
886 while ((m = (s = state) & ABITS) != 0L && m < WBIT) {
887 if (m < RFULL) {
888 if (casState(s, s - RUNIT)) {
889 if (m == RUNIT && (h = whead) != null && h.status != 0)
890 release(h);
891 return true;
892 }
893 }
894 else if (tryDecReaderOverflow(s) != 0L)
895 return true;
896 }
897 return false;
898 }
899
900 // status monitoring methods
901
902 /**
903 * Returns combined state-held and overflow read count for given
904 * state s.
905 */
906 private int getReadLockCount(long s) {
907 long readers;
908 if ((readers = s & RBITS) >= RFULL)
909 readers = RFULL + readerOverflow;
910 return (int) readers;
1116 return tryWriteLock(time, unit) != 0L;
1117 }
1118 public void unlock() { unstampedUnlockWrite(); }
1119 public Condition newCondition() {
1120 throw new UnsupportedOperationException();
1121 }
1122 }
1123
1124 final class ReadWriteLockView implements ReadWriteLock {
1125 public Lock readLock() { return asReadLock(); }
1126 public Lock writeLock() { return asWriteLock(); }
1127 }
1128
1129 // Unlock methods without stamp argument checks for view classes.
1130 // Needed because view-class lock methods throw away stamps.
1131
1132 final void unstampedUnlockWrite() {
1133 long s;
1134 if (((s = state) & WBIT) == 0L)
1135 throw new IllegalMonitorStateException();
1136 unlockWriteInternal(s);
1137 }
1138
1139 final void unstampedUnlockRead() {
1140 long s, m; WNode h;
1141 while ((m = (s = state) & RBITS) > 0L) {
1142 if (m < RFULL) {
1143 if (casState(s, s - RUNIT)) {
1144 if (m == RUNIT && (h = whead) != null && h.status != 0)
1145 release(h);
1146 return;
1147 }
1148 }
1149 else if (tryDecReaderOverflow(s) != 0L)
1150 return;
1151 }
1152 throw new IllegalMonitorStateException();
1153 }
1154
1155 private void readObject(java.io.ObjectInputStream s)
1156 throws java.io.IOException, ClassNotFoundException {
1157 s.defaultReadObject();
1158 STATE.setVolatile(this, ORIGIN); // reset to unlocked state
1159 }
1160
1161 // internals
1162
1163 /**
1164 * Tries to increment readerOverflow by first setting state
1165 * access bits value to RBITS, indicating hold of spinlock,
1166 * then updating, then releasing.
1167 *
1168 * @param s a reader overflow stamp: (s & ABITS) >= RFULL
1169 * @return new stamp on success, else zero
1170 */
1171 private long tryIncReaderOverflow(long s) {
1172 // assert (s & ABITS) >= RFULL;
1173 if ((s & ABITS) == RFULL) {
1174 if (casState(s, s | RBITS)) {
1175 ++readerOverflow;
1176 STATE.setVolatile(this, s);
1177 return s;
1178 }
1179 }
1180 else if ((LockSupport.nextSecondarySeed() & OVERFLOW_YIELD_RATE) == 0)
1181 Thread.yield();
1182 else
1183 Thread.onSpinWait();
1184 return 0L;
1185 }
1186
1187 /**
1188 * Tries to decrement readerOverflow.
1189 *
1190 * @param s a reader overflow stamp: (s & ABITS) >= RFULL
1191 * @return new stamp on success, else zero
1192 */
1193 private long tryDecReaderOverflow(long s) {
1194 // assert (s & ABITS) >= RFULL;
1195 if ((s & ABITS) == RFULL) {
1196 if (casState(s, s | RBITS)) {
1197 int r; long next;
1198 if ((r = readerOverflow) > 0) {
1199 readerOverflow = r - 1;
1200 next = s;
1201 }
1202 else
1203 next = s - RUNIT;
1204 STATE.setVolatile(this, next);
1205 return next;
1206 }
1207 }
1208 else if ((LockSupport.nextSecondarySeed() & OVERFLOW_YIELD_RATE) == 0)
1209 Thread.yield();
1210 else
1211 Thread.onSpinWait();
1212 return 0L;
1213 }
1214
1215 /**
1216 * Wakes up the successor of h (normally whead). This is normally
1217 * just h.next, but may require traversal from wtail if next
1218 * pointers are lagging. This may fail to wake up an acquiring
1219 * thread when one or more have been cancelled, but the cancel
1220 * methods themselves provide extra safeguards to ensure liveness.
1221 */
1222 private void release(WNode h) {
1223 if (h != null) {
1224 WNode q; Thread w;
1225 WSTATUS.compareAndSet(h, WAITING, 0);
1226 if ((q = h.next) == null || q.status == CANCELLED) {
1227 for (WNode t = wtail; t != null && t != h; t = t.prev)
1228 if (t.status <= 0)
1229 q = t;
1230 }
1231 if (q != null && (w = q.thread) != null)
1232 LockSupport.unpark(w);
1233 }
1234 }
1235
1236 /**
1237 * See above for explanation.
1238 *
1239 * @param interruptible true if should check interrupts and if so
1240 * return INTERRUPTED
1241 * @param deadline if nonzero, the System.nanoTime value to timeout
1242 * at (and return zero)
1243 * @return next state, or INTERRUPTED
1244 */
1245 private long acquireWrite(boolean interruptible, long deadline) {
1246 WNode node = null, p;
1247 for (int spins = -1;;) { // spin while enqueuing
1248 long m, s, ns;
1249 if ((m = (s = state) & ABITS) == 0L) {
1250 if ((ns = tryWriteLock(s)) != 0L)
1251 return ns;
1252 }
1253 else if (spins < 0)
1254 spins = (m == WBIT && wtail == whead) ? SPINS : 0;
1255 else if (spins > 0) {
1256 --spins;
1257 Thread.onSpinWait();
1258 }
1259 else if ((p = wtail) == null) { // initialize queue
1260 WNode hd = new WNode(WMODE, null);
1261 if (WHEAD.weakCompareAndSet(this, null, hd))
1262 wtail = hd;
1263 }
1264 else if (node == null)
1265 node = new WNode(WMODE, p);
1266 else if (node.prev != p)
1267 node.prev = p;
1268 else if (WTAIL.weakCompareAndSet(this, p, node)) {
1269 p.next = node;
1270 break;
1271 }
1272 }
1273
1274 boolean wasInterrupted = false;
1275 for (int spins = -1;;) {
1276 WNode h, np, pp; int ps;
1277 if ((h = whead) == p) {
1278 if (spins < 0)
1279 spins = HEAD_SPINS;
1280 else if (spins < MAX_HEAD_SPINS)
1281 spins <<= 1;
1282 for (int k = spins; k > 0; --k) { // spin at head
1283 long s, ns;
1284 if (((s = state) & ABITS) == 0L) {
1285 if ((ns = tryWriteLock(s)) != 0L) {
1286 whead = node;
1287 node.prev = null;
1288 if (wasInterrupted)
1289 Thread.currentThread().interrupt();
1290 return ns;
1291 }
1292 }
1293 else
1294 Thread.onSpinWait();
1295 }
1296 }
1297 else if (h != null) { // help release stale waiters
1298 WNode c; Thread w;
1299 while ((c = h.cowait) != null) {
1300 if (WCOWAIT.weakCompareAndSet(h, c, c.cowait) &&
1301 (w = c.thread) != null)
1302 LockSupport.unpark(w);
1303 }
1304 }
1305 if (whead == h) {
1306 if ((np = node.prev) != p) {
1307 if (np != null)
1308 (p = np).next = node; // stale
1309 }
1310 else if ((ps = p.status) == 0)
1311 WSTATUS.compareAndSet(p, 0, WAITING);
1312 else if (ps == CANCELLED) {
1313 if ((pp = p.prev) != null) {
1314 node.prev = pp;
1315 pp.next = node;
1316 }
1317 }
1318 else {
1319 long time; // 0 argument to park means no timeout
1320 if (deadline == 0L)
1321 time = 0L;
1322 else if ((time = deadline - System.nanoTime()) <= 0L)
1323 return cancelWaiter(node, node, false);
1324 Thread wt = Thread.currentThread();
1325 node.thread = wt;
1326 if (p.status < 0 && (p != h || (state & ABITS) != 0L) &&
1327 whead == h && node.prev == p) {
1328 if (time == 0L)
1329 LockSupport.park(this);
1330 else
1331 LockSupport.parkNanos(this, time);
1332 }
1333 node.thread = null;
1334 if (Thread.interrupted()) {
1335 if (interruptible)
1336 return cancelWaiter(node, node, true);
1337 wasInterrupted = true;
1338 }
1339 }
1340 }
1341 }
1342 }
1343
1344 /**
1345 * See above for explanation.
1346 *
1347 * @param interruptible true if should check interrupts and if so
1348 * return INTERRUPTED
1349 * @param deadline if nonzero, the System.nanoTime value to timeout
1350 * at (and return zero)
1351 * @return next state, or INTERRUPTED
1352 */
1353 private long acquireRead(boolean interruptible, long deadline) {
1354 boolean wasInterrupted = false;
1355 WNode node = null, p;
1356 for (int spins = -1;;) {
1357 WNode h;
1358 if ((h = whead) == (p = wtail)) {
1359 for (long m, s, ns;;) {
1360 if ((m = (s = state) & ABITS) < RFULL ?
1361 casState(s, ns = s + RUNIT) :
1362 (m < WBIT && (ns = tryIncReaderOverflow(s)) != 0L)) {
1363 if (wasInterrupted)
1364 Thread.currentThread().interrupt();
1365 return ns;
1366 }
1367 else if (m >= WBIT) {
1368 if (spins > 0) {
1369 --spins;
1370 Thread.onSpinWait();
1371 }
1372 else {
1373 if (spins == 0) {
1374 WNode nh = whead, np = wtail;
1375 if ((nh == h && np == p) || (h = nh) != (p = np))
1376 break;
1377 }
1378 spins = SPINS;
1379 }
1380 }
1381 }
1382 }
1383 if (p == null) { // initialize queue
1384 WNode hd = new WNode(WMODE, null);
1385 if (WHEAD.weakCompareAndSet(this, null, hd))
1386 wtail = hd;
1387 }
1388 else if (node == null)
1389 node = new WNode(RMODE, p);
1390 else if (h == p || p.mode != RMODE) {
1391 if (node.prev != p)
1392 node.prev = p;
1393 else if (WTAIL.weakCompareAndSet(this, p, node)) {
1394 p.next = node;
1395 break;
1396 }
1397 }
1398 else if (!WCOWAIT.compareAndSet(p, node.cowait = p.cowait, node))
1399 node.cowait = null;
1400 else {
1401 for (;;) {
1402 WNode pp, c; Thread w;
1403 if ((h = whead) != null && (c = h.cowait) != null &&
1404 WCOWAIT.compareAndSet(h, c, c.cowait) &&
1405 (w = c.thread) != null) // help release
1406 LockSupport.unpark(w);
1407 if (Thread.interrupted()) {
1408 if (interruptible)
1409 return cancelWaiter(node, p, true);
1410 wasInterrupted = true;
1411 }
1412 if (h == (pp = p.prev) || h == p || pp == null) {
1413 long m, s, ns;
1414 do {
1415 if ((m = (s = state) & ABITS) < RFULL ?
1416 casState(s, ns = s + RUNIT) :
1417 (m < WBIT &&
1418 (ns = tryIncReaderOverflow(s)) != 0L)) {
1419 if (wasInterrupted)
1420 Thread.currentThread().interrupt();
1421 return ns;
1422 }
1423 } while (m < WBIT);
1424 }
1425 if (whead == h && p.prev == pp) {
1426 long time;
1427 if (pp == null || h == p || p.status > 0) {
1428 node = null; // throw away
1429 break;
1430 }
1431 if (deadline == 0L)
1432 time = 0L;
1433 else if ((time = deadline - System.nanoTime()) <= 0L) {
1434 if (wasInterrupted)
1435 Thread.currentThread().interrupt();
1436 return cancelWaiter(node, p, false);
1437 }
1438 Thread wt = Thread.currentThread();
1439 node.thread = wt;
1440 if ((h != pp || (state & ABITS) == WBIT) &&
1441 whead == h && p.prev == pp) {
1442 if (time == 0L)
1443 LockSupport.park(this);
1444 else
1445 LockSupport.parkNanos(this, time);
1446 }
1447 node.thread = null;
1448 }
1449 }
1450 }
1451 }
1452
1453 for (int spins = -1;;) {
1454 WNode h, np, pp; int ps;
1455 if ((h = whead) == p) {
1456 if (spins < 0)
1457 spins = HEAD_SPINS;
1458 else if (spins < MAX_HEAD_SPINS)
1459 spins <<= 1;
1460 for (int k = spins;;) { // spin at head
1461 long m, s, ns;
1462 if ((m = (s = state) & ABITS) < RFULL ?
1463 casState(s, ns = s + RUNIT) :
1464 (m < WBIT && (ns = tryIncReaderOverflow(s)) != 0L)) {
1465 WNode c; Thread w;
1466 whead = node;
1467 node.prev = null;
1468 while ((c = node.cowait) != null) {
1469 if (WCOWAIT.compareAndSet(node, c, c.cowait) &&
1470 (w = c.thread) != null)
1471 LockSupport.unpark(w);
1472 }
1473 if (wasInterrupted)
1474 Thread.currentThread().interrupt();
1475 return ns;
1476 }
1477 else if (m >= WBIT && --k <= 0)
1478 break;
1479 else
1480 Thread.onSpinWait();
1481 }
1482 }
1483 else if (h != null) {
1484 WNode c; Thread w;
1485 while ((c = h.cowait) != null) {
1486 if (WCOWAIT.compareAndSet(h, c, c.cowait) &&
1487 (w = c.thread) != null)
1488 LockSupport.unpark(w);
1489 }
1490 }
1491 if (whead == h) {
1492 if ((np = node.prev) != p) {
1493 if (np != null)
1494 (p = np).next = node; // stale
1495 }
1496 else if ((ps = p.status) == 0)
1497 WSTATUS.compareAndSet(p, 0, WAITING);
1498 else if (ps == CANCELLED) {
1499 if ((pp = p.prev) != null) {
1500 node.prev = pp;
1501 pp.next = node;
1502 }
1503 }
1504 else {
1505 long time;
1506 if (deadline == 0L)
1507 time = 0L;
1508 else if ((time = deadline - System.nanoTime()) <= 0L)
1509 return cancelWaiter(node, node, false);
1510 Thread wt = Thread.currentThread();
1511 node.thread = wt;
1512 if (p.status < 0 &&
1513 (p != h || (state & ABITS) == WBIT) &&
1514 whead == h && node.prev == p) {
1515 if (time == 0L)
1516 LockSupport.park(this);
1517 else
1518 LockSupport.parkNanos(this, time);
1519 }
1520 node.thread = null;
1521 if (Thread.interrupted()) {
1522 if (interruptible)
1523 return cancelWaiter(node, node, true);
1524 wasInterrupted = true;
1525 }
1526 }
1527 }
1528 }
1529 }
1530
1531 /**
1532 * If node non-null, forces cancel status and unsplices it from
1533 * queue if possible and wakes up any cowaiters (of the node, or
1534 * group, as applicable), and in any case helps release current
1535 * first waiter if lock is free. (Calling with null arguments
1536 * serves as a conditional form of release, which is not currently
1537 * needed but may be needed under possible future cancellation
1538 * policies). This is a variant of cancellation methods in
1539 * AbstractQueuedSynchronizer (see its detailed explanation in AQS
1540 * internal documentation).
1541 *
1542 * @param node if non-null, the waiter
1543 * @param group either node or the group node is cowaiting with
1544 * @param interrupted if already interrupted
1545 * @return INTERRUPTED if interrupted or Thread.interrupted, else zero
1546 */
1547 private long cancelWaiter(WNode node, WNode group, boolean interrupted) {
1548 if (node != null && group != null) {
1549 Thread w;
1550 node.status = CANCELLED;
1551 // unsplice cancelled nodes from group
1552 for (WNode p = group, q; (q = p.cowait) != null;) {
1553 if (q.status == CANCELLED) {
1554 WCOWAIT.compareAndSet(p, q, q.cowait);
1555 p = group; // restart
1556 }
1557 else
1558 p = q;
1559 }
1560 if (group == node) {
1561 for (WNode r = group.cowait; r != null; r = r.cowait) {
1562 if ((w = r.thread) != null)
1563 LockSupport.unpark(w); // wake up uncancelled co-waiters
1564 }
1565 for (WNode pred = node.prev; pred != null; ) { // unsplice
1566 WNode succ, pp; // find valid successor
1567 while ((succ = node.next) == null ||
1568 succ.status == CANCELLED) {
1569 WNode q = null; // find successor the slow way
1570 for (WNode t = wtail; t != null && t != node; t = t.prev)
1571 if (t.status != CANCELLED)
1572 q = t; // don't link if succ cancelled
1573 if (succ == q || // ensure accurate successor
1574 WNEXT.compareAndSet(node, succ, succ = q)) {
1575 if (succ == null && node == wtail)
1576 WTAIL.compareAndSet(this, node, pred);
1577 break;
1578 }
1579 }
1580 if (pred.next == node) // unsplice pred link
1581 WNEXT.compareAndSet(pred, node, succ);
1582 if (succ != null && (w = succ.thread) != null) {
1583 // wake up succ to observe new pred
1584 succ.thread = null;
1585 LockSupport.unpark(w);
1586 }
1587 if (pred.status != CANCELLED || (pp = pred.prev) == null)
1588 break;
1589 node.prev = pp; // repeat if new pred wrong/cancelled
1590 WNEXT.compareAndSet(pp, pred, succ);
1591 pred = pp;
1592 }
1593 }
1594 }
1595 WNode h; // Possibly release first waiter
1596 while ((h = whead) != null) {
1597 long s; WNode q; // similar to release() but check eligibility
1598 if ((q = h.next) == null || q.status == CANCELLED) {
1599 for (WNode t = wtail; t != null && t != h; t = t.prev)
1600 if (t.status <= 0)
1601 q = t;
1602 }
1603 if (h == whead) {
1604 if (q != null && h.status == 0 &&
1605 ((s = state) & ABITS) != WBIT && // waiter is eligible
1606 (s == 0L || q.mode == RMODE))
1607 release(h);
1608 break;
1609 }
1610 }
1611 return (interrupted || Thread.interrupted()) ? INTERRUPTED : 0L;
1612 }
1613
1614 // VarHandle mechanics
1615 private static final VarHandle STATE;
1616 private static final VarHandle WHEAD;
1617 private static final VarHandle WTAIL;
1618 private static final VarHandle WNEXT;
1619 private static final VarHandle WSTATUS;
1620 private static final VarHandle WCOWAIT;
1621 static {
1622 try {
1623 MethodHandles.Lookup l = MethodHandles.lookup();
1624 STATE = l.findVarHandle(StampedLock.class, "state", long.class);
1625 WHEAD = l.findVarHandle(StampedLock.class, "whead", WNode.class);
1626 WTAIL = l.findVarHandle(StampedLock.class, "wtail", WNode.class);
1627 WSTATUS = l.findVarHandle(WNode.class, "status", int.class);
1628 WNEXT = l.findVarHandle(WNode.class, "next", WNode.class);
1629 WCOWAIT = l.findVarHandle(WNode.class, "cowait", WNode.class);
1630 } catch (ReflectiveOperationException e) {
1631 throw new ExceptionInInitializerError(e);
1632 }
1633 }
1634 }
|
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 */
24
25 /*
26 * This file is available under and governed by the GNU General Public
27 * License version 2 only, as published by the Free Software Foundation.
28 * However, the following notice accompanied the original version of this
29 * file:
30 *
31 * Written by Doug Lea with assistance from members of JCP JSR-166
32 * Expert Group and released to the public domain, as explained at
33 * http://creativecommons.org/publicdomain/zero/1.0/
34 */
35
36 package java.util.concurrent.locks;
37
38 import java.util.concurrent.TimeUnit;
39 import jdk.internal.misc.Unsafe;
40 import jdk.internal.vm.annotation.ReservedStackAccess;
41
42 /**
43 * A capability-based lock with three modes for controlling read/write
44 * access. The state of a StampedLock consists of a version and mode.
45 * Lock acquisition methods return a stamp that represents and
46 * controls access with respect to a lock state; "try" versions of
47 * these methods may instead return the special value zero to
48 * represent failure to acquire access. Lock release and conversion
49 * methods require stamps as arguments, and fail if they do not match
50 * the state of the lock. The three modes are:
51 *
52 * <ul>
53 *
54 * <li><b>Writing.</b> Method {@link #writeLock} possibly blocks
55 * waiting for exclusive access, returning a stamp that can be used
56 * in method {@link #unlockWrite} to release the lock. Untimed and
57 * timed versions of {@code tryWriteLock} are also provided. When
58 * the lock is held in write mode, no read locks may be obtained,
59 * and all optimistic read validations will fail.
114 * <p>Like {@link java.util.concurrent.Semaphore Semaphore}, but unlike most
115 * {@link Lock} implementations, StampedLocks have no notion of ownership.
116 * Locks acquired in one thread can be released or converted in another.
117 *
118 * <p>The scheduling policy of StampedLock does not consistently
119 * prefer readers over writers or vice versa. All "try" methods are
120 * best-effort and do not necessarily conform to any scheduling or
121 * fairness policy. A zero return from any "try" method for acquiring
122 * or converting locks does not carry any information about the state
123 * of the lock; a subsequent invocation may succeed.
124 *
125 * <p>Because it supports coordinated usage across multiple lock
126 * modes, this class does not directly implement the {@link Lock} or
127 * {@link ReadWriteLock} interfaces. However, a StampedLock may be
128 * viewed {@link #asReadLock()}, {@link #asWriteLock()}, or {@link
129 * #asReadWriteLock()} in applications requiring only the associated
130 * set of functionality.
131 *
132 * <p><b>Memory Synchronization.</b> Methods with the effect of
133 * successfully locking in any mode have the same memory
134 * synchronization effects as a <em>Lock</em> action, as described in
135 * Chapter 17 of <cite>The Java™ Language Specification</cite>.
136 * Methods successfully unlocking in write mode have the same memory
137 * synchronization effects as an <em>Unlock</em> action. In optimistic
138 * read usages, actions prior to the most recent write mode unlock action
139 * are guaranteed to happen-before those following a tryOptimisticRead
140 * only if a later validate returns true; otherwise there is no guarantee
141 * that the reads between tryOptimisticRead and validate obtain a
142 * consistent snapshot.
143 *
144 * <p><b>Sample Usage.</b> The following illustrates some usage idioms
145 * in a class that maintains simple two-dimensional points. The sample
146 * code illustrates some try/catch conventions even though they are
147 * not strictly needed here because no exceptions can occur in their
148 * bodies.
149 *
150 * <pre> {@code
151 * class Point {
152 * private double x, y;
153 * private final StampedLock sl = new StampedLock();
154 *
155 * // an exclusively locked method
218 * try {
219 * while (x == 0.0 && y == 0.0) {
220 * long ws = sl.tryConvertToWriteLock(stamp);
221 * if (ws != 0L) {
222 * stamp = ws;
223 * x = newX;
224 * y = newY;
225 * break;
226 * }
227 * else {
228 * sl.unlockRead(stamp);
229 * stamp = sl.writeLock();
230 * }
231 * }
232 * } finally {
233 * sl.unlock(stamp);
234 * }
235 * }
236 * }}</pre>
237 *
238 * @jls 17.4 Memory Model
239 * @since 1.8
240 * @author Doug Lea
241 */
242 public class StampedLock implements java.io.Serializable {
243 /*
244 * Algorithmic notes:
245 *
246 * The design employs elements of Sequence locks
247 * (as used in linux kernels; see Lameter's
248 * http://www.lameter.com/gelato2005.pdf
249 * and elsewhere; see
250 * Boehm's http://www.hpl.hp.com/techreports/2012/HPL-2012-68.html)
251 * and Ordered RW locks (see Shirako et al
252 * http://dl.acm.org/citation.cfm?id=2312015)
253 *
254 * Conceptually, the primary state of the lock includes a sequence
255 * number that is odd when write-locked and even otherwise.
256 * However, this is offset by a reader count that is non-zero when
257 * read-locked. The read count is ignored when validating
258 * "optimistic" seqlock-reader-style stamps. Because we must use
259 * a small finite number of bits (currently 7) for readers, a
260 * supplementary reader overflow word is used when the number of
261 * readers exceeds the count field. We do this by treating the max
262 * reader count value (RBITS) as a spinlock protecting overflow
263 * updates.
264 *
265 * Waiters use a modified form of CLH lock used in
266 * AbstractQueuedSynchronizer (AQS; see its internal documentation
267 * for a fuller account), where each node is either a ReaderNode
268 * or WriterNode. Implementation of queued Writer mode is
269 * identical to AQS except for lock-state operations. Sets of
270 * waiting readers are grouped (linked) under a common node (field
271 * cowaiters) so act as a single node with respect to most CLH
272 * mechanics. This simplifies the scheduling policy to a
273 * mainly-FIFO scheme that incorporates elements of Phase-Fair
274 * locks (see Brandenburg & Anderson, especially
275 * http://www.cs.unc.edu/~bbb/diss/). Method release does not
276 * itself wake up cowaiters. This is done by the primary thread,
277 * but helped by other cowaiters as they awaken.
278 *
279 * These rules apply to threads actually queued. Threads may also
280 * try to acquire locks before or in the process of enqueueing
281 * regardless of preference rules, and so may "barge" their way
282 * in. Methods writeLock and readLock (but not the other variants
283 * of each) first unconditionally try to CAS state, falling back
284 * to test-and-test-and-set retries on failure, slightly shrinking
285 * race windows on initial attempts, thus making success more
286 * likely. Also, when some threads cancel (via interrupt or
287 * timeout), phase-fairness is at best roughly approximated.
288 *
289 * Nearly all of these mechanics are carried out in methods
290 * acquireWrite and acquireRead, that, as typical of such code,
291 * sprawl out because actions and retries rely on consistent sets
292 * of locally cached reads.
293 *
294 * For an explanation of the use of acquireFence, see
295 * http://gee.cs.oswego.edu/dl/html/j9mm.html as well as Boehm's
296 * paper (above). Note that sequence validation (mainly method
297 * validate()) requires stricter ordering rules than apply to
298 * normal volatile reads (of "state"). To ensure that writeLock
299 * acquisitions strictly precede subsequent writes in cases where
300 * this is not already forced, we use a storeStoreFence.
301 *
302 * The memory layout keeps lock state and queue pointers together
303 * (normally on the same cache line). This usually works well for
304 * read-mostly loads. In most other cases, the natural tendency of
305 * CLH locks to reduce memory contention lessens motivation to
306 * further spread out contended locations, but might be subject to
307 * future improvements.
308 */
309
310 private static final long serialVersionUID = -6001602636862214147L;
311
312 /** The number of bits to use for reader count before overflowing */
313 private static final int LG_READERS = 7; // 127 readers
314
315 // Values for lock state and stamp operations
316 private static final long RUNIT = 1L;
317 private static final long WBIT = 1L << LG_READERS;
318 private static final long RBITS = WBIT - 1L;
319 private static final long RFULL = RBITS - 1L;
320 private static final long ABITS = RBITS | WBIT;
321 private static final long SBITS = ~RBITS; // note overlap with ABITS
322 // not writing and conservatively non-overflowing
323 private static final long RSAFE = ~(3L << (LG_READERS - 1));
324
325 /*
326 * 3 stamp modes can be distinguished by examining (m = stamp & ABITS):
327 * write mode: m == WBIT
328 * optimistic read mode: m == 0L (even when read lock is held)
329 * read mode: m > 0L && m <= RFULL (the stamp is a copy of state, but the
330 * read hold count in the stamp is unused other than to determine mode)
331 *
332 * This differs slightly from the encoding of state:
333 * (state & ABITS) == 0L indicates the lock is currently unlocked.
334 * (state & ABITS) == RBITS is a special transient value
335 * indicating spin-locked to manipulate reader bits overflow.
336 */
337
338 /** Initial value for lock state; avoids failure value zero. */
339 private static final long ORIGIN = WBIT << 1;
340
341 // Special value from cancelled acquire methods so caller can throw IE
342 private static final long INTERRUPTED = 1L;
343
344 // Bits for Node.status
345 static final int WAITING = 1;
346 static final int CANCELLED = 0x80000000; // must be negative
347
348 /** CLH nodes */
349 abstract static class Node {
350 volatile Node prev; // initially attached via casTail
351 volatile Node next; // visibly nonnull when signallable
352 Thread waiter; // visibly nonnull when enqueued
353 volatile int status; // written by owner, atomic bit ops by others
354
355 // methods for atomic operations
356 final boolean casPrev(Node c, Node v) { // for cleanQueue
357 return U.weakCompareAndSetReference(this, PREV, c, v);
358 }
359 final boolean casNext(Node c, Node v) { // for cleanQueue
360 return U.weakCompareAndSetReference(this, NEXT, c, v);
361 }
362 final int getAndUnsetStatus(int v) { // for signalling
363 return U.getAndBitwiseAndInt(this, STATUS, ~v);
364 }
365 final void setPrevRelaxed(Node p) { // for off-queue assignment
366 U.putReference(this, PREV, p);
367 }
368 final void setStatusRelaxed(int s) { // for off-queue assignment
369 U.putInt(this, STATUS, s);
370 }
371 final void clearStatus() { // for reducing unneeded signals
372 U.putIntOpaque(this, STATUS, 0);
373 }
374
375 private static final long STATUS
376 = U.objectFieldOffset(Node.class, "status");
377 private static final long NEXT
378 = U.objectFieldOffset(Node.class, "next");
379 private static final long PREV
380 = U.objectFieldOffset(Node.class, "prev");
381 }
382
383 static final class WriterNode extends Node { // node for writers
384 }
385
386 static final class ReaderNode extends Node { // node for readers
387 volatile ReaderNode cowaiters; // list of linked readers
388 final boolean casCowaiters(ReaderNode c, ReaderNode v) {
389 return U.weakCompareAndSetReference(this, COWAITERS, c, v);
390 }
391 final void setCowaitersRelaxed(ReaderNode p) {
392 U.putReference(this, COWAITERS, p);
393 }
394 private static final long COWAITERS
395 = U.objectFieldOffset(ReaderNode.class, "cowaiters");
396 }
397
398 /** Head of CLH queue */
399 private transient volatile Node head;
400 /** Tail (last) of CLH queue */
401 private transient volatile Node tail;
402
403 // views
404 transient ReadLockView readLockView;
405 transient WriteLockView writeLockView;
406 transient ReadWriteLockView readWriteLockView;
407
408 /** Lock sequence/state */
409 private transient volatile long state;
410 /** extra reader count when state read count saturated */
411 private transient int readerOverflow;
412
413 /**
414 * Creates a new lock, initially in unlocked state.
415 */
416 public StampedLock() {
417 state = ORIGIN;
418 }
419
420 // internal lock methods
421
422 private boolean casState(long expect, long update) {
423 return U.compareAndSetLong(this, STATE, expect, update);
424 }
425
426 @ReservedStackAccess
427 private long tryAcquireWrite() {
428 long s, nextState;
429 if (((s = state) & ABITS) == 0L && casState(s, nextState = s | WBIT)) {
430 U.storeStoreFence();
431 return nextState;
432 }
433 return 0L;
434 }
435
436 @ReservedStackAccess
437 private long tryAcquireRead() {
438 for (long s, m, nextState;;) {
439 if ((m = (s = state) & ABITS) < RFULL) {
440 if (casState(s, nextState = s + RUNIT))
441 return nextState;
442 }
443 else if (m == WBIT)
444 return 0L;
445 else if ((nextState = tryIncReaderOverflow(s)) != 0L)
446 return nextState;
447 }
448 }
449
450 /**
451 * Returns an unlocked state, incrementing the version and
452 * avoiding special failure value 0L.
453 *
454 * @param s a write-locked state (or stamp)
455 */
456 private static long unlockWriteState(long s) {
457 return ((s += WBIT) == 0L) ? ORIGIN : s;
458 }
459
460 private long releaseWrite(long s) {
461 long nextState = state = unlockWriteState(s);
462 signalNext(head);
463 return nextState;
464 }
465
466 /**
467 * Exclusively acquires the lock, blocking if necessary
468 * until available.
469 *
470 * @return a write stamp that can be used to unlock or convert mode
471 */
472 @ReservedStackAccess
473 public long writeLock() {
474 // try unconditional CAS confirming weak read
475 long s = U.getLongOpaque(this, STATE) & ~ABITS, nextState;
476 if (casState(s, nextState = s | WBIT)) {
477 U.storeStoreFence();
478 return nextState;
479 }
480 return acquireWrite(false, false, 0L);
481 }
482
483 /**
484 * Exclusively acquires the lock if it is immediately available.
485 *
486 * @return a write stamp that can be used to unlock or convert mode,
487 * or zero if the lock is not available
488 */
489 public long tryWriteLock() {
490 return tryAcquireWrite();
491 }
492
493 /**
494 * Exclusively acquires the lock if it is available within the
495 * given time and the current thread has not been interrupted.
496 * Behavior under timeout and interruption matches that specified
497 * for method {@link Lock#tryLock(long,TimeUnit)}.
498 *
499 * @param time the maximum time to wait for the lock
500 * @param unit the time unit of the {@code time} argument
501 * @return a write stamp that can be used to unlock or convert mode,
502 * or zero if the lock is not available
503 * @throws InterruptedException if the current thread is interrupted
504 * before acquiring the lock
505 */
506 public long tryWriteLock(long time, TimeUnit unit)
507 throws InterruptedException {
508 long nanos = unit.toNanos(time);
509 if (!Thread.interrupted()) {
510 long nextState;
511 if ((nextState = tryAcquireWrite()) != 0L)
512 return nextState;
513 if (nanos <= 0L)
514 return 0L;
515 nextState = acquireWrite(true, true, System.nanoTime() + nanos);
516 if (nextState != INTERRUPTED)
517 return nextState;
518 }
519 throw new InterruptedException();
520 }
521
522 /**
523 * Exclusively acquires the lock, blocking if necessary
524 * until available or the current thread is interrupted.
525 * Behavior under interruption matches that specified
526 * for method {@link Lock#lockInterruptibly()}.
527 *
528 * @return a write stamp that can be used to unlock or convert mode
529 * @throws InterruptedException if the current thread is interrupted
530 * before acquiring the lock
531 */
532 public long writeLockInterruptibly() throws InterruptedException {
533 long nextState;
534 if (!Thread.interrupted() &&
535 ((nextState = tryAcquireWrite()) != 0L ||
536 (nextState = acquireWrite(true, false, 0L)) != INTERRUPTED))
537 return nextState;
538 throw new InterruptedException();
539 }
540
541 /**
542 * Non-exclusively acquires the lock, blocking if necessary
543 * until available.
544 *
545 * @return a read stamp that can be used to unlock or convert mode
546 */
547 @ReservedStackAccess
548 public long readLock() {
549 // unconditionally optimistically try non-overflow case once
550 long s = U.getLongOpaque(this, STATE) & RSAFE, nextState;
551 if (casState(s, nextState = s + RUNIT))
552 return nextState;
553 else
554 return acquireRead(false, false, 0L);
555 }
556
557 /**
558 * Non-exclusively acquires the lock if it is immediately available.
559 *
560 * @return a read stamp that can be used to unlock or convert mode,
561 * or zero if the lock is not available
562 */
563 public long tryReadLock() {
564 return tryAcquireRead();
565 }
566
567 /**
568 * Non-exclusively acquires the lock if it is available within the
569 * given time and the current thread has not been interrupted.
570 * Behavior under timeout and interruption matches that specified
571 * for method {@link Lock#tryLock(long,TimeUnit)}.
572 *
573 * @param time the maximum time to wait for the lock
574 * @param unit the time unit of the {@code time} argument
575 * @return a read stamp that can be used to unlock or convert mode,
576 * or zero if the lock is not available
577 * @throws InterruptedException if the current thread is interrupted
578 * before acquiring the lock
579 */
580 public long tryReadLock(long time, TimeUnit unit)
581 throws InterruptedException {
582 long nanos = unit.toNanos(time);
583 if (!Thread.interrupted()) {
584 long nextState;
585 if (tail == head && (nextState = tryAcquireRead()) != 0L)
586 return nextState;
587 if (nanos <= 0L)
588 return 0L;
589 nextState = acquireRead(true, true, System.nanoTime() + nanos);
590 if (nextState != INTERRUPTED)
591 return nextState;
592 }
593 throw new InterruptedException();
594 }
595
596 /**
597 * Non-exclusively acquires the lock, blocking if necessary
598 * until available or the current thread is interrupted.
599 * Behavior under interruption matches that specified
600 * for method {@link Lock#lockInterruptibly()}.
601 *
602 * @return a read stamp that can be used to unlock or convert mode
603 * @throws InterruptedException if the current thread is interrupted
604 * before acquiring the lock
605 */
606 public long readLockInterruptibly() throws InterruptedException {
607 long nextState;
608 if (!Thread.interrupted() &&
609 ((nextState = tryAcquireRead()) != 0L ||
610 (nextState = acquireRead(true, false, 0L)) != INTERRUPTED))
611 return nextState;
612 throw new InterruptedException();
613 }
614
615 /**
616 * Returns a stamp that can later be validated, or zero
617 * if exclusively locked.
618 *
619 * @return a valid optimistic read stamp, or zero if exclusively locked
620 */
621 public long tryOptimisticRead() {
622 long s;
623 return (((s = state) & WBIT) == 0L) ? (s & SBITS) : 0L;
624 }
625
626 /**
627 * Returns true if the lock has not been exclusively acquired
628 * since issuance of the given stamp. Always returns false if the
629 * stamp is zero. Always returns true if the stamp represents a
630 * currently held lock. Invoking this method with a value not
631 * obtained from {@link #tryOptimisticRead} or a locking method
632 * for this lock has no defined effect or result.
633 *
634 * @param stamp a stamp
635 * @return {@code true} if the lock has not been exclusively acquired
636 * since issuance of the given stamp; else false
637 */
638 public boolean validate(long stamp) {
639 U.loadFence();
640 return (stamp & SBITS) == (state & SBITS);
641 }
642
643 /**
644 * If the lock state matches the given stamp, releases the
645 * exclusive lock.
646 *
647 * @param stamp a stamp returned by a write-lock operation
648 * @throws IllegalMonitorStateException if the stamp does
649 * not match the current state of this lock
650 */
651 @ReservedStackAccess
652 public void unlockWrite(long stamp) {
653 if (state != stamp || (stamp & WBIT) == 0L)
654 throw new IllegalMonitorStateException();
655 releaseWrite(stamp);
656 }
657
658 /**
659 * If the lock state matches the given stamp, releases the
660 * non-exclusive lock.
661 *
662 * @param stamp a stamp returned by a read-lock operation
663 * @throws IllegalMonitorStateException if the stamp does
664 * not match the current state of this lock
665 */
666 @ReservedStackAccess
667 public void unlockRead(long stamp) {
668 long s, m;
669 if ((stamp & RBITS) != 0L) {
670 while (((s = state) & SBITS) == (stamp & SBITS) &&
671 ((m = s & RBITS) != 0L)) {
672 if (m < RFULL) {
673 if (casState(s, s - RUNIT)) {
674 if (m == RUNIT)
675 signalNext(head);
676 return;
677 }
678 }
679 else if (tryDecReaderOverflow(s) != 0L)
680 return;
681 }
682 }
683 throw new IllegalMonitorStateException();
684 }
685
686 /**
687 * If the lock state matches the given stamp, releases the
688 * corresponding mode of the lock.
689 *
690 * @param stamp a stamp returned by a lock operation
691 * @throws IllegalMonitorStateException if the stamp does
692 * not match the current state of this lock
693 */
694 public void unlock(long stamp) {
695 if ((stamp & WBIT) != 0L)
696 unlockWrite(stamp);
697 else
698 unlockRead(stamp);
699 }
700
701 /**
702 * If the lock state matches the given stamp, atomically performs one of
703 * the following actions. If the stamp represents holding a write
704 * lock, returns it. Or, if a read lock, if the write lock is
705 * available, releases the read lock and returns a write stamp.
706 * Or, if an optimistic read, returns a write stamp only if
707 * immediately available. This method returns zero in all other
708 * cases.
709 *
710 * @param stamp a stamp
711 * @return a valid write stamp, or zero on failure
712 */
713 public long tryConvertToWriteLock(long stamp) {
714 long a = stamp & ABITS, m, s, nextState;
715 while (((s = state) & SBITS) == (stamp & SBITS)) {
716 if ((m = s & ABITS) == 0L) {
717 if (a != 0L)
718 break;
719 if (casState(s, nextState = s | WBIT)) {
720 U.storeStoreFence();
721 return nextState;
722 }
723 } else if (m == WBIT) {
724 if (a != m)
725 break;
726 return stamp;
727 } else if (m == RUNIT && a != 0L) {
728 if (casState(s, nextState = s - RUNIT + WBIT))
729 return nextState;
730 } else
731 break;
732 }
733 return 0L;
734 }
735
736 /**
737 * If the lock state matches the given stamp, atomically performs one of
738 * the following actions. If the stamp represents holding a write
739 * lock, releases it and obtains a read lock. Or, if a read lock,
740 * returns it. Or, if an optimistic read, acquires a read lock and
741 * returns a read stamp only if immediately available. This method
742 * returns zero in all other cases.
743 *
744 * @param stamp a stamp
745 * @return a valid read stamp, or zero on failure
746 */
747 public long tryConvertToReadLock(long stamp) {
748 long a, s, nextState;
749 while (((s = state) & SBITS) == (stamp & SBITS)) {
750 if ((a = stamp & ABITS) >= WBIT) {
751 if (s != stamp) // write stamp
752 break;
753 nextState = state = unlockWriteState(s) + RUNIT;
754 signalNext(head);
755 return nextState;
756 } else if (a == 0L) { // optimistic read stamp
757 if ((s & ABITS) < RFULL) {
758 if (casState(s, nextState = s + RUNIT))
759 return nextState;
760 } else if ((nextState = tryIncReaderOverflow(s)) != 0L)
761 return nextState;
762 } else { // already a read stamp
763 if ((s & ABITS) == 0L)
764 break;
765 return stamp;
766 }
767 }
768 return 0L;
769 }
770
771 /**
772 * If the lock state matches the given stamp then, atomically, if the stamp
773 * represents holding a lock, releases it and returns an
774 * observation stamp. Or, if an optimistic read, returns it if
775 * validated. This method returns zero in all other cases, and so
776 * may be useful as a form of "tryUnlock".
777 *
778 * @param stamp a stamp
779 * @return a valid optimistic read stamp, or zero on failure
780 */
781 public long tryConvertToOptimisticRead(long stamp) {
782 long a, m, s, nextState;
783 U.loadFence();
784 while (((s = state) & SBITS) == (stamp & SBITS)) {
785 if ((a = stamp & ABITS) >= WBIT) {
786 if (s != stamp) // write stamp
787 break;
788 return releaseWrite(s);
789 } else if (a == 0L) { // already an optimistic read stamp
790 return stamp;
791 } else if ((m = s & ABITS) == 0L) { // invalid read stamp
792 break;
793 } else if (m < RFULL) {
794 if (casState(s, nextState = s - RUNIT)) {
795 if (m == RUNIT)
796 signalNext(head);
797 return nextState & SBITS;
798 }
799 } else if ((nextState = tryDecReaderOverflow(s)) != 0L)
800 return nextState & SBITS;
801 }
802 return 0L;
803 }
804
805 /**
806 * Releases the write lock if it is held, without requiring a
807 * stamp value. This method may be useful for recovery after
808 * errors.
809 *
810 * @return {@code true} if the lock was held, else false
811 */
812 @ReservedStackAccess
813 public boolean tryUnlockWrite() {
814 long s;
815 if (((s = state) & WBIT) != 0L) {
816 releaseWrite(s);
817 return true;
818 }
819 return false;
820 }
821
822 /**
823 * Releases one hold of the read lock if it is held, without
824 * requiring a stamp value. This method may be useful for recovery
825 * after errors.
826 *
827 * @return {@code true} if the read lock was held, else false
828 */
829 @ReservedStackAccess
830 public boolean tryUnlockRead() {
831 long s, m;
832 while ((m = (s = state) & ABITS) != 0L && m < WBIT) {
833 if (m < RFULL) {
834 if (casState(s, s - RUNIT)) {
835 if (m == RUNIT)
836 signalNext(head);
837 return true;
838 }
839 }
840 else if (tryDecReaderOverflow(s) != 0L)
841 return true;
842 }
843 return false;
844 }
845
846 // status monitoring methods
847
848 /**
849 * Returns combined state-held and overflow read count for given
850 * state s.
851 */
852 private int getReadLockCount(long s) {
853 long readers;
854 if ((readers = s & RBITS) >= RFULL)
855 readers = RFULL + readerOverflow;
856 return (int) readers;
1062 return tryWriteLock(time, unit) != 0L;
1063 }
1064 public void unlock() { unstampedUnlockWrite(); }
1065 public Condition newCondition() {
1066 throw new UnsupportedOperationException();
1067 }
1068 }
1069
1070 final class ReadWriteLockView implements ReadWriteLock {
1071 public Lock readLock() { return asReadLock(); }
1072 public Lock writeLock() { return asWriteLock(); }
1073 }
1074
1075 // Unlock methods without stamp argument checks for view classes.
1076 // Needed because view-class lock methods throw away stamps.
1077
1078 final void unstampedUnlockWrite() {
1079 long s;
1080 if (((s = state) & WBIT) == 0L)
1081 throw new IllegalMonitorStateException();
1082 releaseWrite(s);
1083 }
1084
1085 final void unstampedUnlockRead() {
1086 long s, m;
1087 while ((m = (s = state) & RBITS) > 0L) {
1088 if (m < RFULL) {
1089 if (casState(s, s - RUNIT)) {
1090 if (m == RUNIT)
1091 signalNext(head);
1092 return;
1093 }
1094 }
1095 else if (tryDecReaderOverflow(s) != 0L)
1096 return;
1097 }
1098 throw new IllegalMonitorStateException();
1099 }
1100
1101 private void readObject(java.io.ObjectInputStream s)
1102 throws java.io.IOException, ClassNotFoundException {
1103 s.defaultReadObject();
1104 state = ORIGIN; // reset to unlocked state
1105 }
1106
1107 // overflow handling methods
1108
1109 /**
1110 * Tries to increment readerOverflow by first setting state
1111 * access bits value to RBITS, indicating hold of spinlock,
1112 * then updating, then releasing.
1113 *
1114 * @param s a reader overflow stamp: (s & ABITS) >= RFULL
1115 * @return new stamp on success, else zero
1116 */
1117 private long tryIncReaderOverflow(long s) {
1118 // assert (s & ABITS) >= RFULL;
1119 if ((s & ABITS) != RFULL)
1120 Thread.onSpinWait();
1121 else if (casState(s, s | RBITS)) {
1122 ++readerOverflow;
1123 return state = s;
1124 }
1125 return 0L;
1126 }
1127
1128 /**
1129 * Tries to decrement readerOverflow.
1130 *
1131 * @param s a reader overflow stamp: (s & ABITS) >= RFULL
1132 * @return new stamp on success, else zero
1133 */
1134 private long tryDecReaderOverflow(long s) {
1135 // assert (s & ABITS) >= RFULL;
1136 if ((s & ABITS) != RFULL)
1137 Thread.onSpinWait();
1138 else if (casState(s, s | RBITS)) {
1139 int r; long nextState;
1140 if ((r = readerOverflow) > 0) {
1141 readerOverflow = r - 1;
1142 nextState = s;
1143 }
1144 else
1145 nextState = s - RUNIT;
1146 return state = nextState;
1147 }
1148 return 0L;
1149 }
1150
1151 // release methods
1152
1153 /**
1154 * Wakes up the successor of given node, if one exists, and unsets its
1155 * WAITING status to avoid park race. This may fail to wake up an
1156 * eligible thread when one or more have been cancelled, but
1157 * cancelAcquire ensures liveness.
1158 */
1159 static final void signalNext(Node h) {
1160 Node s;
1161 if (h != null && (s = h.next) != null && s.status > 0) {
1162 s.getAndUnsetStatus(WAITING);
1163 LockSupport.unpark(s.waiter);
1164 }
1165 }
1166
1167 /**
1168 * Removes and unparks all cowaiters of node, if it exists.
1169 */
1170 private static void signalCowaiters(ReaderNode node) {
1171 if (node != null) {
1172 for (ReaderNode c; (c = node.cowaiters) != null; ) {
1173 if (node.casCowaiters(c, c.cowaiters))
1174 LockSupport.unpark(c.waiter);
1175 }
1176 }
1177 }
1178
1179 // queue link methods
1180 private boolean casTail(Node c, Node v) {
1181 return U.compareAndSetReference(this, TAIL, c, v);
1182 }
1183
1184 /** tries once to CAS a new dummy node for head */
1185 private void tryInitializeHead() {
1186 Node h = new WriterNode();
1187 if (U.compareAndSetReference(this, HEAD, null, h))
1188 tail = h;
1189 }
1190
1191 /**
1192 * For explanation, see above and AbstractQueuedSynchronizer
1193 * internal documentation.
1194 *
1195 * @param interruptible true if should check interrupts and if so
1196 * return INTERRUPTED
1197 * @param timed if true use timed waits
1198 * @param time the System.nanoTime value to timeout at (and return zero)
1199 * @return next state, or INTERRUPTED
1200 */
1201 private long acquireWrite(boolean interruptible, boolean timed, long time) {
1202 byte spins = 0, postSpins = 0; // retries upon unpark of first thread
1203 boolean interrupted = false, first = false;
1204 WriterNode node = null;
1205 Node pred = null;
1206 for (long s, nextState;;) {
1207 if (!first && (pred = (node == null) ? null : node.prev) != null &&
1208 !(first = (head == pred))) {
1209 if (pred.status < 0) {
1210 cleanQueue(); // predecessor cancelled
1211 continue;
1212 } else if (pred.prev == null) {
1213 Thread.onSpinWait(); // ensure serialization
1214 continue;
1215 }
1216 }
1217 if ((first || pred == null) && ((s = state) & ABITS) == 0L &&
1218 casState(s, nextState = s | WBIT)) {
1219 U.storeStoreFence();
1220 if (first) {
1221 node.prev = null;
1222 head = node;
1223 pred.next = null;
1224 node.waiter = null;
1225 if (interrupted)
1226 Thread.currentThread().interrupt();
1227 }
1228 return nextState;
1229 } else if (node == null) { // retry before enqueuing
1230 node = new WriterNode();
1231 } else if (pred == null) { // try to enqueue
1232 Node t = tail;
1233 node.setPrevRelaxed(t);
1234 if (t == null)
1235 tryInitializeHead();
1236 else if (!casTail(t, node))
1237 node.setPrevRelaxed(null); // back out
1238 else
1239 t.next = node;
1240 } else if (first && spins != 0) { // reduce unfairness
1241 --spins;
1242 Thread.onSpinWait();
1243 } else if (node.status == 0) { // enable signal
1244 if (node.waiter == null)
1245 node.waiter = Thread.currentThread();
1246 node.status = WAITING;
1247 } else {
1248 long nanos;
1249 spins = postSpins = (byte)((postSpins << 1) | 1);
1250 if (!timed)
1251 LockSupport.park(this);
1252 else if ((nanos = time - System.nanoTime()) > 0L)
1253 LockSupport.parkNanos(this, nanos);
1254 else
1255 break;
1256 node.clearStatus();
1257 if ((interrupted |= Thread.interrupted()) && interruptible)
1258 break;
1259 }
1260 }
1261 return cancelAcquire(node, interrupted);
1262 }
1263
1264 /**
1265 * See above for explanation.
1266 *
1267 * @param interruptible true if should check interrupts and if so
1268 * return INTERRUPTED
1269 * @param timed if true use timed waits
1270 * @param time the System.nanoTime value to timeout at (and return zero)
1271 * @return next state, or INTERRUPTED
1272 */
1273 private long acquireRead(boolean interruptible, boolean timed, long time) {
1274 boolean interrupted = false;
1275 ReaderNode node = null;
1276 /*
1277 * Loop:
1278 * if empty, try to acquire
1279 * if tail is Reader, try to cowait; restart if leader stale or cancels
1280 * else try to create and enqueue node, and wait in 2nd loop below
1281 */
1282 for (;;) {
1283 ReaderNode leader; long nextState;
1284 Node tailPred = null, t = tail;
1285 if ((t == null || (tailPred = t.prev) == null) &&
1286 (nextState = tryAcquireRead()) != 0L) // try now if empty
1287 return nextState;
1288 else if (t == null)
1289 tryInitializeHead();
1290 else if (tailPred == null || !(t instanceof ReaderNode)) {
1291 if (node == null)
1292 node = new ReaderNode();
1293 if (tail == t) {
1294 node.setPrevRelaxed(t);
1295 if (casTail(t, node)) {
1296 t.next = node;
1297 break; // node is leader; wait in loop below
1298 }
1299 node.setPrevRelaxed(null);
1300 }
1301 } else if ((leader = (ReaderNode)t) == tail) { // try to cowait
1302 for (boolean attached = false;;) {
1303 if (leader.status < 0 || leader.prev == null)
1304 break;
1305 else if (node == null)
1306 node = new ReaderNode();
1307 else if (node.waiter == null)
1308 node.waiter = Thread.currentThread();
1309 else if (!attached) {
1310 ReaderNode c = leader.cowaiters;
1311 node.setCowaitersRelaxed(c);
1312 attached = leader.casCowaiters(c, node);
1313 if (!attached)
1314 node.setCowaitersRelaxed(null);
1315 } else {
1316 long nanos = 0L;
1317 if (!timed)
1318 LockSupport.park(this);
1319 else if ((nanos = time - System.nanoTime()) > 0L)
1320 LockSupport.parkNanos(this, nanos);
1321 interrupted |= Thread.interrupted();
1322 if ((interrupted && interruptible) ||
1323 (timed && nanos <= 0L))
1324 return cancelCowaiter(node, leader, interrupted);
1325 }
1326 }
1327 if (node != null)
1328 node.waiter = null;
1329 long ns = tryAcquireRead();
1330 signalCowaiters(leader);
1331 if (interrupted)
1332 Thread.currentThread().interrupt();
1333 if (ns != 0L)
1334 return ns;
1335 else
1336 node = null; // restart if stale, missed, or leader cancelled
1337 }
1338 }
1339
1340 // node is leader of a cowait group; almost same as acquireWrite
1341 byte spins = 0, postSpins = 0; // retries upon unpark of first thread
1342 boolean first = false;
1343 Node pred = null;
1344 for (long nextState;;) {
1345 if (!first && (pred = node.prev) != null &&
1346 !(first = (head == pred))) {
1347 if (pred.status < 0) {
1348 cleanQueue(); // predecessor cancelled
1349 continue;
1350 } else if (pred.prev == null) {
1351 Thread.onSpinWait(); // ensure serialization
1352 continue;
1353 }
1354 }
1355 if ((first || pred == null) &&
1356 (nextState = tryAcquireRead()) != 0L) {
1357 if (first) {
1358 node.prev = null;
1359 head = node;
1360 pred.next = null;
1361 node.waiter = null;
1362 }
1363 signalCowaiters(node);
1364 if (interrupted)
1365 Thread.currentThread().interrupt();
1366 return nextState;
1367 } else if (first && spins != 0) {
1368 --spins;
1369 Thread.onSpinWait();
1370 } else if (node.status == 0) {
1371 if (node.waiter == null)
1372 node.waiter = Thread.currentThread();
1373 node.status = WAITING;
1374 } else {
1375 long nanos;
1376 spins = postSpins = (byte)((postSpins << 1) | 1);
1377 if (!timed)
1378 LockSupport.park(this);
1379 else if ((nanos = time - System.nanoTime()) > 0L)
1380 LockSupport.parkNanos(this, nanos);
1381 else
1382 break;
1383 node.clearStatus();
1384 if ((interrupted |= Thread.interrupted()) && interruptible)
1385 break;
1386 }
1387 }
1388 return cancelAcquire(node, interrupted);
1389 }
1390
1391 // Cancellation support
1392
1393 /**
1394 * Possibly repeatedly traverses from tail, unsplicing cancelled
1395 * nodes until none are found. Unparks nodes that may have been
1396 * relinked to be next eligible acquirer.
1397 */
1398 private void cleanQueue() {
1399 for (;;) { // restart point
1400 for (Node q = tail, s = null, p, n;;) { // (p, q, s) triples
1401 if (q == null || (p = q.prev) == null)
1402 return; // end of list
1403 if (s == null ? tail != q : (s.prev != q || s.status < 0))
1404 break; // inconsistent
1405 if (q.status < 0) { // cancelled
1406 if ((s == null ? casTail(q, p) : s.casPrev(q, p)) &&
1407 q.prev == p) {
1408 p.casNext(q, s); // OK if fails
1409 if (p.prev == null)
1410 signalNext(p);
1411 }
1412 break;
1413 }
1414 if ((n = p.next) != q) { // help finish
1415 if (n != null && q.prev == p && q.status >= 0) {
1416 p.casNext(n, q);
1417 if (p.prev == null)
1418 signalNext(p);
1419 }
1420 break;
1421 }
1422 s = q;
1423 q = q.prev;
1424 }
1425 }
1426 }
1427
1428 /**
1429 * If leader exists, possibly repeatedly traverses cowaiters,
1430 * unsplicing the given cancelled node until not found.
1431 */
1432 private void unlinkCowaiter(ReaderNode node, ReaderNode leader) {
1433 if (leader != null) {
1434 while (leader.prev != null && leader.status >= 0) {
1435 for (ReaderNode p = leader, q; ; p = q) {
1436 if ((q = p.cowaiters) == null)
1437 return;
1438 if (q == node) {
1439 p.casCowaiters(q, q.cowaiters);
1440 break; // recheck even if succeeded
1441 }
1442 }
1443 }
1444 }
1445 }
1446
1447 /**
1448 * If node non-null, forces cancel status and unsplices it from
1449 * queue, wakes up any cowaiters, and possibly wakes up successor
1450 * to recheck status.
1451 *
1452 * @param node the waiter (may be null if not yet enqueued)
1453 * @param interrupted if already interrupted
1454 * @return INTERRUPTED if interrupted or Thread.interrupted, else zero
1455 */
1456 private long cancelAcquire(Node node, boolean interrupted) {
1457 if (node != null) {
1458 node.waiter = null;
1459 node.status = CANCELLED;
1460 cleanQueue();
1461 if (node instanceof ReaderNode)
1462 signalCowaiters((ReaderNode)node);
1463 }
1464 return (interrupted || Thread.interrupted()) ? INTERRUPTED : 0L;
1465 }
1466
1467 /**
1468 * If node non-null, forces cancel status and unsplices from
1469 * leader's cowaiters list unless/until it is also cancelled.
1470 *
1471 * @param node if non-null, the waiter
1472 * @param leader if non-null, the node heading cowaiters list
1473 * @param interrupted if already interrupted
1474 * @return INTERRUPTED if interrupted or Thread.interrupted, else zero
1475 */
1476 private long cancelCowaiter(ReaderNode node, ReaderNode leader,
1477 boolean interrupted) {
1478 if (node != null) {
1479 node.waiter = null;
1480 node.status = CANCELLED;
1481 unlinkCowaiter(node, leader);
1482 }
1483 return (interrupted || Thread.interrupted()) ? INTERRUPTED : 0L;
1484 }
1485
1486 // Unsafe
1487 private static final Unsafe U = Unsafe.getUnsafe();
1488 private static final long STATE
1489 = U.objectFieldOffset(StampedLock.class, "state");
1490 private static final long HEAD
1491 = U.objectFieldOffset(StampedLock.class, "head");
1492 private static final long TAIL
1493 = U.objectFieldOffset(StampedLock.class, "tail");
1494
1495 static {
1496 Class<?> ensureLoaded = LockSupport.class;
1497 }
1498 }
|