1 /*
   2  * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "runtime/atomic.inline.hpp"
  27 #include "runtime/mutex.hpp"
  28 #include "runtime/orderAccess.inline.hpp"
  29 #include "runtime/osThread.hpp"
  30 #include "runtime/thread.inline.hpp"
  31 #include "utilities/events.hpp"
  32 #ifdef TARGET_OS_FAMILY_linux
  33 # include "mutex_linux.inline.hpp"
  34 #endif
  35 #ifdef TARGET_OS_FAMILY_solaris
  36 # include "mutex_solaris.inline.hpp"
  37 #endif
  38 #ifdef TARGET_OS_FAMILY_windows
  39 # include "mutex_windows.inline.hpp"
  40 #endif
  41 #ifdef TARGET_OS_FAMILY_bsd
  42 # include "mutex_bsd.inline.hpp"
  43 #endif
  44 
  45 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
  46 //
  47 // Native Monitor-Mutex locking - theory of operations
  48 //
  49 // * Native Monitors are completely unrelated to Java-level monitors,
  50 //   although the "back-end" slow-path implementations share a common lineage.
  51 //   See objectMonitor:: in synchronizer.cpp.
  52 //   Native Monitors do *not* support nesting or recursion but otherwise
  53 //   they're basically Hoare-flavor monitors.
  54 //
  55 // * A thread acquires ownership of a Monitor/Mutex by CASing the LockByte
  56 //   in the _LockWord from zero to non-zero.  Note that the _Owner field
  57 //   is advisory and is used only to verify that the thread calling unlock()
  58 //   is indeed the last thread to have acquired the lock.
  59 //
  60 // * Contending threads "push" themselves onto the front of the contention
  61 //   queue -- called the cxq -- with CAS and then spin/park.
  62 //   The _LockWord contains the LockByte as well as the pointer to the head
  63 //   of the cxq.  Colocating the LockByte with the cxq precludes certain races.
  64 //
  65 // * Using a separately addressable LockByte allows for CAS:MEMBAR or CAS:0
  66 //   idioms.  We currently use MEMBAR in the uncontended unlock() path, as
  67 //   MEMBAR often has less latency than CAS.  If warranted, we could switch to
  68 //   a CAS:0 mode, using timers to close the resultant race, as is done
  69 //   with Java Monitors in synchronizer.cpp.
  70 //
  71 //   See the following for a discussion of the relative cost of atomics (CAS)
  72 //   MEMBAR, and ways to eliminate such instructions from the common-case paths:
  73 //   -- http://blogs.sun.com/dave/entry/biased_locking_in_hotspot
  74 //   -- http://blogs.sun.com/dave/resource/MustangSync.pdf
  75 //   -- http://blogs.sun.com/dave/resource/synchronization-public2.pdf
  76 //   -- synchronizer.cpp
  77 //
  78 // * Overall goals - desiderata
  79 //   1. Minimize context switching
  80 //   2. Minimize lock migration
  81 //   3. Minimize CPI -- affinity and locality
  82 //   4. Minimize the execution of high-latency instructions such as CAS or MEMBAR
  83 //   5. Minimize outer lock hold times
  84 //   6. Behave gracefully on a loaded system
  85 //
  86 // * Thread flow and list residency:
  87 //
  88 //   Contention queue --> EntryList --> OnDeck --> Owner --> !Owner
  89 //   [..resident on monitor list..]
  90 //   [...........contending..................]
  91 //
  92 //   -- The contention queue (cxq) contains recently-arrived threads (RATs).
  93 //      Threads on the cxq eventually drain into the EntryList.
  94 //   -- Invariant: a thread appears on at most one list -- cxq, EntryList
  95 //      or WaitSet -- at any one time.
  96 //   -- For a given monitor there can be at most one "OnDeck" thread at any
  97 //      given time but if needbe this particular invariant could be relaxed.
  98 //
  99 // * The WaitSet and EntryList linked lists are composed of ParkEvents.
 100 //   I use ParkEvent instead of threads as ParkEvents are immortal and
 101 //   type-stable, meaning we can safely unpark() a possibly stale
 102 //   list element in the unlock()-path.  (That's benign).
 103 //
 104 // * Succession policy - providing for progress:
 105 //
 106 //   As necessary, the unlock()ing thread identifies, unlinks, and unparks
 107 //   an "heir presumptive" tentative successor thread from the EntryList.
 108 //   This becomes the so-called "OnDeck" thread, of which there can be only
 109 //   one at any given time for a given monitor.  The wakee will recontend
 110 //   for ownership of monitor.
 111 //
 112 //   Succession is provided for by a policy of competitive handoff.
 113 //   The exiting thread does _not_ grant or pass ownership to the
 114 //   successor thread.  (This is also referred to as "handoff" succession").
 115 //   Instead the exiting thread releases ownership and possibly wakes
 116 //   a successor, so the successor can (re)compete for ownership of the lock.
 117 //
 118 //   Competitive handoff provides excellent overall throughput at the expense
 119 //   of short-term fairness.  If fairness is a concern then one remedy might
 120 //   be to add an AcquireCounter field to the monitor.  After a thread acquires
 121 //   the lock it will decrement the AcquireCounter field.  When the count
 122 //   reaches 0 the thread would reset the AcquireCounter variable, abdicate
 123 //   the lock directly to some thread on the EntryList, and then move itself to the
 124 //   tail of the EntryList.
 125 //
 126 //   But in practice most threads engage or otherwise participate in resource
 127 //   bounded producer-consumer relationships, so lock domination is not usually
 128 //   a practical concern.  Recall too, that in general it's easier to construct
 129 //   a fair lock from a fast lock, but not vice-versa.
 130 //
 131 // * The cxq can have multiple concurrent "pushers" but only one concurrent
 132 //   detaching thread.  This mechanism is immune from the ABA corruption.
 133 //   More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
 134 //   We use OnDeck as a pseudo-lock to enforce the at-most-one detaching
 135 //   thread constraint.
 136 //
 137 // * Taken together, the cxq and the EntryList constitute or form a
 138 //   single logical queue of threads stalled trying to acquire the lock.
 139 //   We use two distinct lists to reduce heat on the list ends.
 140 //   Threads in lock() enqueue onto cxq while threads in unlock() will
 141 //   dequeue from the EntryList.  (c.f. Michael Scott's "2Q" algorithm).
 142 //   A key desideratum is to minimize queue & monitor metadata manipulation
 143 //   that occurs while holding the "outer" monitor lock -- that is, we want to
 144 //   minimize monitor lock holds times.
 145 //
 146 //   The EntryList is ordered by the prevailing queue discipline and
 147 //   can be organized in any convenient fashion, such as a doubly-linked list or
 148 //   a circular doubly-linked list.  If we need a priority queue then something akin
 149 //   to Solaris' sleepq would work nicely.  Viz.,
 150 //   -- http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
 151 //   -- http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/os/sleepq.c
 152 //   Queue discipline is enforced at ::unlock() time, when the unlocking thread
 153 //   drains the cxq into the EntryList, and orders or reorders the threads on the
 154 //   EntryList accordingly.
 155 //
 156 //   Barring "lock barging", this mechanism provides fair cyclic ordering,
 157 //   somewhat similar to an elevator-scan.
 158 //
 159 // * OnDeck
 160 //   --  For a given monitor there can be at most one OnDeck thread at any given
 161 //       instant.  The OnDeck thread is contending for the lock, but has been
 162 //       unlinked from the EntryList and cxq by some previous unlock() operations.
 163 //       Once a thread has been designated the OnDeck thread it will remain so
 164 //       until it manages to acquire the lock -- being OnDeck is a stable property.
 165 //   --  Threads on the EntryList or cxq are _not allowed to attempt lock acquisition.
 166 //   --  OnDeck also serves as an "inner lock" as follows.  Threads in unlock() will, after
 167 //       having cleared the LockByte and dropped the outer lock,  attempt to "trylock"
 168 //       OnDeck by CASing the field from null to non-null.  If successful, that thread
 169 //       is then responsible for progress and succession and can use CAS to detach and
 170 //       drain the cxq into the EntryList.  By convention, only this thread, the holder of
 171 //       the OnDeck inner lock, can manipulate the EntryList or detach and drain the
 172 //       RATs on the cxq into the EntryList.  This avoids ABA corruption on the cxq as
 173 //       we allow multiple concurrent "push" operations but restrict detach concurrency
 174 //       to at most one thread.  Having selected and detached a successor, the thread then
 175 //       changes the OnDeck to refer to that successor, and then unparks the successor.
 176 //       That successor will eventually acquire the lock and clear OnDeck.  Beware
 177 //       that the OnDeck usage as a lock is asymmetric.  A thread in unlock() transiently
 178 //       "acquires" OnDeck, performs queue manipulations, passes OnDeck to some successor,
 179 //       and then the successor eventually "drops" OnDeck.  Note that there's never
 180 //       any sense of contention on the inner lock, however.  Threads never contend
 181 //       or wait for the inner lock.
 182 //   --  OnDeck provides for futile wakeup throttling a described in section 3.3 of
 183 //       See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
 184 //       In a sense, OnDeck subsumes the ObjectMonitor _Succ and ObjectWaiter
 185 //       TState fields found in Java-level objectMonitors.  (See synchronizer.cpp).
 186 //
 187 // * Waiting threads reside on the WaitSet list -- wait() puts
 188 //   the caller onto the WaitSet.  Notify() or notifyAll() simply
 189 //   transfers threads from the WaitSet to either the EntryList or cxq.
 190 //   Subsequent unlock() operations will eventually unpark the notifyee.
 191 //   Unparking a notifee in notify() proper is inefficient - if we were to do so
 192 //   it's likely the notifyee would simply impale itself on the lock held
 193 //   by the notifier.
 194 //
 195 // * The mechanism is obstruction-free in that if the holder of the transient
 196 //   OnDeck lock in unlock() is preempted or otherwise stalls, other threads
 197 //   can still acquire and release the outer lock and continue to make progress.
 198 //   At worst, waking of already blocked contending threads may be delayed,
 199 //   but nothing worse.  (We only use "trylock" operations on the inner OnDeck
 200 //   lock).
 201 //
 202 // * Note that thread-local storage must be initialized before a thread
 203 //   uses Native monitors or mutexes.  The native monitor-mutex subsystem
 204 //   depends on Thread::current().
 205 //
 206 // * The monitor synchronization subsystem avoids the use of native
 207 //   synchronization primitives except for the narrow platform-specific
 208 //   park-unpark abstraction.  See the comments in os_solaris.cpp regarding
 209 //   the semantics of park-unpark.  Put another way, this monitor implementation
 210 //   depends only on atomic operations and park-unpark.  The monitor subsystem
 211 //   manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
 212 //   underlying OS manages the READY<->RUN transitions.
 213 //
 214 // * The memory consistency model provide by lock()-unlock() is at least as
 215 //   strong or stronger than the Java Memory model defined by JSR-133.
 216 //   That is, we guarantee at least entry consistency, if not stronger.
 217 //   See http://g.oswego.edu/dl/jmm/cookbook.html.
 218 //
 219 // * Thread:: currently contains a set of purpose-specific ParkEvents:
 220 //   _MutexEvent, _ParkEvent, etc.  A better approach might be to do away with
 221 //   the purpose-specific ParkEvents and instead implement a general per-thread
 222 //   stack of available ParkEvents which we could provision on-demand.  The
 223 //   stack acts as a local cache to avoid excessive calls to ParkEvent::Allocate()
 224 //   and ::Release().  A thread would simply pop an element from the local stack before it
 225 //   enqueued or park()ed.  When the contention was over the thread would
 226 //   push the no-longer-needed ParkEvent back onto its stack.
 227 //
 228 // * A slightly reduced form of ILock() and IUnlock() have been partially
 229 //   model-checked (Murphi) for safety and progress at T=1,2,3 and 4.
 230 //   It'd be interesting to see if TLA/TLC could be useful as well.
 231 //
 232 // * Mutex-Monitor is a low-level "leaf" subsystem.  That is, the monitor
 233 //   code should never call other code in the JVM that might itself need to
 234 //   acquire monitors or mutexes.  That's true *except* in the case of the
 235 //   ThreadBlockInVM state transition wrappers.  The ThreadBlockInVM DTOR handles
 236 //   mutator reentry (ingress) by checking for a pending safepoint in which case it will
 237 //   call SafepointSynchronize::block(), which in turn may call Safepoint_lock->lock(), etc.
 238 //   In that particular case a call to lock() for a given Monitor can end up recursively
 239 //   calling lock() on another monitor.   While distasteful, this is largely benign
 240 //   as the calls come from jacket that wraps lock(), and not from deep within lock() itself.
 241 //
 242 //   It's unfortunate that native mutexes and thread state transitions were convolved.
 243 //   They're really separate concerns and should have remained that way.  Melding
 244 //   them together was facile -- a bit too facile.   The current implementation badly
 245 //   conflates the two concerns.
 246 //
 247 // * TODO-FIXME:
 248 //
 249 //   -- Add DTRACE probes for contended acquire, contended acquired, contended unlock
 250 //      We should also add DTRACE probes in the ParkEvent subsystem for
 251 //      Park-entry, Park-exit, and Unpark.
 252 //
 253 //   -- We have an excess of mutex-like constructs in the JVM, namely:
 254 //      1. objectMonitors for Java-level synchronization (synchronizer.cpp)
 255 //      2. low-level muxAcquire and muxRelease
 256 //      3. low-level spinAcquire and spinRelease
 257 //      4. native Mutex:: and Monitor::
 258 //      5. jvm_raw_lock() and _unlock()
 259 //      6. JVMTI raw monitors -- distinct from (5) despite having a confusingly
 260 //         similar name.
 261 //
 262 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
 263 
 264 
 265 // CASPTR() uses the canonical argument order that dominates in the literature.
 266 // Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates.
 267 
 268 #define CASPTR(a, c, s)  \
 269   intptr_t(Atomic::cmpxchg_ptr((void *)(s), (void *)(a), (void *)(c)))
 270 #define UNS(x) (uintptr_t(x))
 271 #define TRACE(m)                   \
 272   {                                \
 273     static volatile int ctr = 0;   \
 274     int x = ++ctr;                 \
 275     if ((x & (x - 1)) == 0) {      \
 276       ::printf("%d:%s\n", x, #m);  \
 277       ::fflush(stdout);            \
 278     }                              \
 279   }
 280 
 281 // Simplistic low-quality Marsaglia SHIFT-XOR RNG.
 282 // Bijective except for the trailing mask operation.
 283 // Useful for spin loops as the compiler can't optimize it away.
 284 
 285 static inline jint MarsagliaXORV(jint x) {
 286   if (x == 0) x = 1|os::random();
 287   x ^= x << 6;
 288   x ^= ((unsigned)x) >> 21;
 289   x ^= x << 7;
 290   return x & 0x7FFFFFFF;
 291 }
 292 
 293 static int Stall(int its) {
 294   static volatile jint rv = 1;
 295   volatile int OnFrame = 0;
 296   jint v = rv ^ UNS(OnFrame);
 297   while (--its >= 0) {
 298     v = MarsagliaXORV(v);
 299   }
 300   // Make this impossible for the compiler to optimize away,
 301   // but (mostly) avoid W coherency sharing on MP systems.
 302   if (v == 0x12345) rv = v;
 303   return v;
 304 }
 305 
 306 int Monitor::TryLock() {
 307   intptr_t v = _LockWord.FullWord;
 308   for (;;) {
 309     if ((v & _LBIT) != 0) return 0;
 310     const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT);
 311     if (v == u) return 1;
 312     v = u;
 313   }
 314 }
 315 
 316 int Monitor::TryFast() {
 317   // Optimistic fast-path form ...
 318   // Fast-path attempt for the common uncontended case.
 319   // Avoid RTS->RTO $ coherence upgrade on typical SMP systems.
 320   intptr_t v = CASPTR(&_LockWord, 0, _LBIT);  // agro ...
 321   if (v == 0) return 1;
 322 
 323   for (;;) {
 324     if ((v & _LBIT) != 0) return 0;
 325     const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT);
 326     if (v == u) return 1;
 327     v = u;
 328   }
 329 }
 330 
 331 int Monitor::ILocked() {
 332   const intptr_t w = _LockWord.FullWord & 0xFF;
 333   assert(w == 0 || w == _LBIT, "invariant");
 334   return w == _LBIT;
 335 }
 336 
 337 // Polite TATAS spinlock with exponential backoff - bounded spin.
 338 // Ideally we'd use processor cycles, time or vtime to control
 339 // the loop, but we currently use iterations.
 340 // All the constants within were derived empirically but work over
 341 // over the spectrum of J2SE reference platforms.
 342 // On Niagara-class systems the back-off is unnecessary but
 343 // is relatively harmless.  (At worst it'll slightly retard
 344 // acquisition times).  The back-off is critical for older SMP systems
 345 // where constant fetching of the LockWord would otherwise impair
 346 // scalability.
 347 //
 348 // Clamp spinning at approximately 1/2 of a context-switch round-trip.
 349 // See synchronizer.cpp for details and rationale.
 350 
 351 int Monitor::TrySpin(Thread * const Self) {
 352   if (TryLock())    return 1;
 353   if (!os::is_MP()) return 0;
 354 
 355   int Probes  = 0;
 356   int Delay   = 0;
 357   int Steps   = 0;
 358   int SpinMax = NativeMonitorSpinLimit;
 359   int flgs    = NativeMonitorFlags;
 360   for (;;) {
 361     intptr_t v = _LockWord.FullWord;
 362     if ((v & _LBIT) == 0) {
 363       if (CASPTR (&_LockWord, v, v|_LBIT) == v) {
 364         return 1;
 365       }
 366       continue;
 367     }
 368 
 369     if ((flgs & 8) == 0) {
 370       SpinPause();
 371     }
 372 
 373     // Periodically increase Delay -- variable Delay form
 374     // conceptually: delay *= 1 + 1/Exponent
 375     ++Probes;
 376     if (Probes > SpinMax) return 0;
 377 
 378     if ((Probes & 0x7) == 0) {
 379       Delay = ((Delay << 1)|1) & 0x7FF;
 380       // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ;
 381     }
 382 
 383     if (flgs & 2) continue;
 384 
 385     // Consider checking _owner's schedctl state, if OFFPROC abort spin.
 386     // If the owner is OFFPROC then it's unlike that the lock will be dropped
 387     // in a timely fashion, which suggests that spinning would not be fruitful
 388     // or profitable.
 389 
 390     // Stall for "Delay" time units - iterations in the current implementation.
 391     // Avoid generating coherency traffic while stalled.
 392     // Possible ways to delay:
 393     //   PAUSE, SLEEP, MEMBAR #sync, MEMBAR #halt,
 394     //   wr %g0,%asi, gethrtime, rdstick, rdtick, rdtsc, etc. ...
 395     // Note that on Niagara-class systems we want to minimize STs in the
 396     // spin loop.  N1 and brethren write-around the L1$ over the xbar into the L2$.
 397     // Furthermore, they don't have a W$ like traditional SPARC processors.
 398     // We currently use a Marsaglia Shift-Xor RNG loop.
 399     Steps += Delay;
 400     if (Self != NULL) {
 401       jint rv = Self->rng[0];
 402       for (int k = Delay; --k >= 0;) {
 403         rv = MarsagliaXORV(rv);
 404         if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0;
 405       }
 406       Self->rng[0] = rv;
 407     } else {
 408       Stall(Delay);
 409     }
 410   }
 411 }
 412 
 413 static int ParkCommon(ParkEvent * ev, jlong timo) {
 414   // Diagnostic support - periodically unwedge blocked threads
 415   intx nmt = NativeMonitorTimeout;
 416   if (nmt > 0 && (nmt < timo || timo <= 0)) {
 417     timo = nmt;
 418   }
 419   int err = OS_OK;
 420   if (0 == timo) {
 421     ev->park();
 422   } else {
 423     err = ev->park(timo);
 424   }
 425   return err;
 426 }
 427 
 428 inline int Monitor::AcquireOrPush(ParkEvent * ESelf) {
 429   intptr_t v = _LockWord.FullWord;
 430   for (;;) {
 431     if ((v & _LBIT) == 0) {
 432       const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT);
 433       if (u == v) return 1;        // indicate acquired
 434       v = u;
 435     } else {
 436       // Anticipate success ...
 437       ESelf->ListNext = (ParkEvent *)(v & ~_LBIT);
 438       const intptr_t u = CASPTR(&_LockWord, v, intptr_t(ESelf)|_LBIT);
 439       if (u == v) return 0;        // indicate pushed onto cxq
 440       v = u;
 441     }
 442     // Interference - LockWord change - just retry
 443   }
 444 }
 445 
 446 // ILock and IWait are the lowest level primitive internal blocking
 447 // synchronization functions.  The callers of IWait and ILock must have
 448 // performed any needed state transitions beforehand.
 449 // IWait and ILock may directly call park() without any concern for thread state.
 450 // Note that ILock and IWait do *not* access _owner.
 451 // _owner is a higher-level logical concept.
 452 
 453 void Monitor::ILock(Thread * Self) {
 454   assert(_OnDeck != Self->_MutexEvent, "invariant");
 455 
 456   if (TryFast()) {
 457  Exeunt:
 458     assert(ILocked(), "invariant");
 459     return;
 460   }
 461 
 462   ParkEvent * const ESelf = Self->_MutexEvent;
 463   assert(_OnDeck != ESelf, "invariant");
 464 
 465   // As an optimization, spinners could conditionally try to set ONDECK to _LBIT
 466   // Synchronizer.cpp uses a similar optimization.
 467   if (TrySpin(Self)) goto Exeunt;
 468 
 469   // Slow-path - the lock is contended.
 470   // Either Enqueue Self on cxq or acquire the outer lock.
 471   // LockWord encoding = (cxq,LOCKBYTE)
 472   ESelf->reset();
 473   OrderAccess::fence();
 474 
 475   // Optional optimization ... try barging on the inner lock
 476   if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(Self)) == 0) {
 477     goto OnDeck_LOOP;
 478   }
 479 
 480   if (AcquireOrPush(ESelf)) goto Exeunt;
 481 
 482   // At any given time there is at most one ondeck thread.
 483   // ondeck implies not resident on cxq and not resident on EntryList
 484   // Only the OnDeck thread can try to acquire -- contended for -- the lock.
 485   // CONSIDER: use Self->OnDeck instead of m->OnDeck.
 486   // Deschedule Self so that others may run.
 487   while (_OnDeck != ESelf) {
 488     ParkCommon(ESelf, 0);
 489   }
 490 
 491   // Self is now in the ONDECK position and will remain so until it
 492   // manages to acquire the lock.
 493  OnDeck_LOOP:
 494   for (;;) {
 495     assert(_OnDeck == ESelf, "invariant");
 496     if (TrySpin(Self)) break;
 497     // It's probably wise to spin only if we *actually* blocked
 498     // CONSIDER: check the lockbyte, if it remains set then
 499     // preemptively drain the cxq into the EntryList.
 500     // The best place and time to perform queue operations -- lock metadata --
 501     // is _before having acquired the outer lock, while waiting for the lock to drop.
 502     ParkCommon(ESelf, 0);
 503   }
 504 
 505   assert(_OnDeck == ESelf, "invariant");
 506   _OnDeck = NULL;
 507 
 508   // Note that we current drop the inner lock (clear OnDeck) in the slow-path
 509   // epilogue immediately after having acquired the outer lock.
 510   // But instead we could consider the following optimizations:
 511   // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation.
 512   //    This might avoid potential reacquisition of the inner lock in IUlock().
 513   // B. While still holding the inner lock, attempt to opportunistically select
 514   //    and unlink the next ONDECK thread from the EntryList.
 515   //    If successful, set ONDECK to refer to that thread, otherwise clear ONDECK.
 516   //    It's critical that the select-and-unlink operation run in constant-time as
 517   //    it executes when holding the outer lock and may artificially increase the
 518   //    effective length of the critical section.
 519   // Note that (A) and (B) are tantamount to succession by direct handoff for
 520   // the inner lock.
 521   goto Exeunt;
 522 }
 523 
 524 void Monitor::IUnlock(bool RelaxAssert) {
 525   assert(ILocked(), "invariant");
 526   // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately
 527   // before the store that releases the lock.  Crucially, all the stores and loads in the
 528   // critical section must be globally visible before the store of 0 into the lock-word
 529   // that releases the lock becomes globally visible.  That is, memory accesses in the
 530   // critical section should not be allowed to bypass or overtake the following ST that
 531   // releases the lock.  As such, to prevent accesses within the critical section
 532   // from "leaking" out, we need a release fence between the critical section and the
 533   // store that releases the lock.  In practice that release barrier is elided on
 534   // platforms with strong memory models such as TSO.
 535   //
 536   // Note that the OrderAccess::storeload() fence that appears after unlock store
 537   // provides for progress conditions and succession and is _not related to exclusion
 538   // safety or lock release consistency.
 539   OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], 0); // drop outer lock
 540 
 541   OrderAccess::storeload();
 542   ParkEvent * const w = _OnDeck;
 543   assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant");
 544   if (w != NULL) {
 545     // Either we have a valid ondeck thread or ondeck is transiently "locked"
 546     // by some exiting thread as it arranges for succession.  The LSBit of
 547     // OnDeck allows us to discriminate two cases.  If the latter, the
 548     // responsibility for progress and succession lies with that other thread.
 549     // For good performance, we also depend on the fact that redundant unpark()
 550     // operations are cheap.  That is, repeated Unpark()ing of the ONDECK thread
 551     // is inexpensive.  This approach provides implicit futile wakeup throttling.
 552     // Note that the referent "w" might be stale with respect to the lock.
 553     // In that case the following unpark() is harmless and the worst that'll happen
 554     // is a spurious return from a park() operation.  Critically, if "w" _is stale,
 555     // then progress is known to have occurred as that means the thread associated
 556     // with "w" acquired the lock.  In that case this thread need take no further
 557     // action to guarantee progress.
 558     if ((UNS(w) & _LBIT) == 0) w->unpark();
 559     return;
 560   }
 561 
 562   intptr_t cxq = _LockWord.FullWord;
 563   if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) {
 564     return;      // normal fast-path exit - cxq and EntryList both empty
 565   }
 566   if (cxq & _LBIT) {
 567     // Optional optimization ...
 568     // Some other thread acquired the lock in the window since this
 569     // thread released it.  Succession is now that thread's responsibility.
 570     return;
 571   }
 572 
 573  Succession:
 574   // Slow-path exit - this thread must ensure succession and progress.
 575   // OnDeck serves as lock to protect cxq and EntryList.
 576   // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq.
 577   // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS)
 578   // but only one concurrent consumer (detacher of RATs).
 579   // Consider protecting this critical section with schedctl on Solaris.
 580   // Unlike a normal lock, however, the exiting thread "locks" OnDeck,
 581   // picks a successor and marks that thread as OnDeck.  That successor
 582   // thread will then clear OnDeck once it eventually acquires the outer lock.
 583   if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) {
 584     return;
 585   }
 586 
 587   ParkEvent * List = _EntryList;
 588   if (List != NULL) {
 589     // Transfer the head of the EntryList to the OnDeck position.
 590     // Once OnDeck, a thread stays OnDeck until it acquires the lock.
 591     // For a given lock there is at most OnDeck thread at any one instant.
 592    WakeOne:
 593     assert(List == _EntryList, "invariant");
 594     ParkEvent * const w = List;
 595     assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant");
 596     _EntryList = w->ListNext;
 597     // as a diagnostic measure consider setting w->_ListNext = BAD
 598     assert(UNS(_OnDeck) == _LBIT, "invariant");
 599     _OnDeck = w;  // pass OnDeck to w.
 600                   // w will clear OnDeck once it acquires the outer lock
 601 
 602     // Another optional optimization ...
 603     // For heavily contended locks it's not uncommon that some other
 604     // thread acquired the lock while this thread was arranging succession.
 605     // Try to defer the unpark() operation - Delegate the responsibility
 606     // for unpark()ing the OnDeck thread to the current or subsequent owners
 607     // That is, the new owner is responsible for unparking the OnDeck thread.
 608     OrderAccess::storeload();
 609     cxq = _LockWord.FullWord;
 610     if (cxq & _LBIT) return;
 611 
 612     w->unpark();
 613     return;
 614   }
 615 
 616   cxq = _LockWord.FullWord;
 617   if ((cxq & ~_LBIT) != 0) {
 618     // The EntryList is empty but the cxq is populated.
 619     // drain RATs from cxq into EntryList
 620     // Detach RATs segment with CAS and then merge into EntryList
 621     for (;;) {
 622       // optional optimization - if locked, the owner is responsible for succession
 623       if (cxq & _LBIT) goto Punt;
 624       const intptr_t vfy = CASPTR(&_LockWord, cxq, cxq & _LBIT);
 625       if (vfy == cxq) break;
 626       cxq = vfy;
 627       // Interference - LockWord changed - Just retry
 628       // We can see concurrent interference from contending threads
 629       // pushing themselves onto the cxq or from lock-unlock operations.
 630       // From the perspective of this thread, EntryList is stable and
 631       // the cxq is prepend-only -- the head is volatile but the interior
 632       // of the cxq is stable.  In theory if we encounter interference from threads
 633       // pushing onto cxq we could simply break off the original cxq suffix and
 634       // move that segment to the EntryList, avoiding a 2nd or multiple CAS attempts
 635       // on the high-traffic LockWord variable.   For instance lets say the cxq is "ABCD"
 636       // when we first fetch cxq above.  Between the fetch -- where we observed "A"
 637       // -- and CAS -- where we attempt to CAS null over A -- "PQR" arrive,
 638       // yielding cxq = "PQRABCD".  In this case we could simply set A.ListNext
 639       // null, leaving cxq = "PQRA" and transfer the "BCD" segment to the EntryList.
 640       // Note too, that it's safe for this thread to traverse the cxq
 641       // without taking any special concurrency precautions.
 642     }
 643 
 644     // We don't currently reorder the cxq segment as we move it onto
 645     // the EntryList, but it might make sense to reverse the order
 646     // or perhaps sort by thread priority.  See the comments in
 647     // synchronizer.cpp objectMonitor::exit().
 648     assert(_EntryList == NULL, "invariant");
 649     _EntryList = List = (ParkEvent *)(cxq & ~_LBIT);
 650     assert(List != NULL, "invariant");
 651     goto WakeOne;
 652   }
 653 
 654   // cxq|EntryList is empty.
 655   // w == NULL implies that cxq|EntryList == NULL in the past.
 656   // Possible race - rare inopportune interleaving.
 657   // A thread could have added itself to cxq since this thread previously checked.
 658   // Detect and recover by refetching cxq.
 659  Punt:
 660   assert(UNS(_OnDeck) == _LBIT, "invariant");
 661   _OnDeck = NULL;            // Release inner lock.
 662   OrderAccess::storeload();   // Dekker duality - pivot point
 663 
 664   // Resample LockWord/cxq to recover from possible race.
 665   // For instance, while this thread T1 held OnDeck, some other thread T2 might
 666   // acquire the outer lock.  Another thread T3 might try to acquire the outer
 667   // lock, but encounter contention and enqueue itself on cxq.  T2 then drops the
 668   // outer lock, but skips succession as this thread T1 still holds OnDeck.
 669   // T1 is and remains responsible for ensuring succession of T3.
 670   //
 671   // Note that we don't need to recheck EntryList, just cxq.
 672   // If threads moved onto EntryList since we dropped OnDeck
 673   // that implies some other thread forced succession.
 674   cxq = _LockWord.FullWord;
 675   if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) {
 676     goto Succession;         // potential race -- re-run succession
 677   }
 678   return;
 679 }
 680 
 681 bool Monitor::notify() {
 682   assert(_owner == Thread::current(), "invariant");
 683   assert(ILocked(), "invariant");
 684   if (_WaitSet == NULL) return true;
 685   NotifyCount++;
 686 
 687   // Transfer one thread from the WaitSet to the EntryList or cxq.
 688   // Currently we just unlink the head of the WaitSet and prepend to the cxq.
 689   // And of course we could just unlink it and unpark it, too, but
 690   // in that case it'd likely impale itself on the reentry.
 691   Thread::muxAcquire(_WaitLock, "notify:WaitLock");
 692   ParkEvent * nfy = _WaitSet;
 693   if (nfy != NULL) {                  // DCL idiom
 694     _WaitSet = nfy->ListNext;
 695     assert(nfy->Notified == 0, "invariant");
 696     // push nfy onto the cxq
 697     for (;;) {
 698       const intptr_t v = _LockWord.FullWord;
 699       assert((v & 0xFF) == _LBIT, "invariant");
 700       nfy->ListNext = (ParkEvent *)(v & ~_LBIT);
 701       if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break;
 702       // interference - _LockWord changed -- just retry
 703     }
 704     // Note that setting Notified before pushing nfy onto the cxq is
 705     // also legal and safe, but the safety properties are much more
 706     // subtle, so for the sake of code stewardship ...
 707     OrderAccess::fence();
 708     nfy->Notified = 1;
 709   }
 710   Thread::muxRelease(_WaitLock);
 711   if (nfy != NULL && (NativeMonitorFlags & 16)) {
 712     // Experimental code ... light up the wakee in the hope that this thread (the owner)
 713     // will drop the lock just about the time the wakee comes ONPROC.
 714     nfy->unpark();
 715   }
 716   assert(ILocked(), "invariant");
 717   return true;
 718 }
 719 
 720 // Currently notifyAll() transfers the waiters one-at-a-time from the waitset
 721 // to the cxq.  This could be done more efficiently with a single bulk en-mass transfer,
 722 // but in practice notifyAll() for large #s of threads is rare and not time-critical.
 723 // Beware too, that we invert the order of the waiters.  Lets say that the
 724 // waitset is "ABCD" and the cxq is "XYZ".  After a notifyAll() the waitset
 725 // will be empty and the cxq will be "DCBAXYZ".  This is benign, of course.
 726 
 727 bool Monitor::notify_all() {
 728   assert(_owner == Thread::current(), "invariant");
 729   assert(ILocked(), "invariant");
 730   while (_WaitSet != NULL) notify();
 731   return true;
 732 }
 733 
 734 int Monitor::IWait(Thread * Self, jlong timo) {
 735   assert(ILocked(), "invariant");
 736 
 737   // Phases:
 738   // 1. Enqueue Self on WaitSet - currently prepend
 739   // 2. unlock - drop the outer lock
 740   // 3. wait for either notification or timeout
 741   // 4. lock - reentry - reacquire the outer lock
 742 
 743   ParkEvent * const ESelf = Self->_MutexEvent;
 744   ESelf->Notified = 0;
 745   ESelf->reset();
 746   OrderAccess::fence();
 747 
 748   // Add Self to WaitSet
 749   // Ideally only the holder of the outer lock would manipulate the WaitSet -
 750   // That is, the outer lock would implicitly protect the WaitSet.
 751   // But if a thread in wait() encounters a timeout it will need to dequeue itself
 752   // from the WaitSet _before it becomes the owner of the lock.  We need to dequeue
 753   // as the ParkEvent -- which serves as a proxy for the thread -- can't reside
 754   // on both the WaitSet and the EntryList|cxq at the same time..  That is, a thread
 755   // on the WaitSet can't be allowed to compete for the lock until it has managed to
 756   // unlink its ParkEvent from WaitSet.  Thus the need for WaitLock.
 757   // Contention on the WaitLock is minimal.
 758   //
 759   // Another viable approach would be add another ParkEvent, "WaitEvent" to the
 760   // thread class.  The WaitSet would be composed of WaitEvents.  Only the
 761   // owner of the outer lock would manipulate the WaitSet.  A thread in wait()
 762   // could then compete for the outer lock, and then, if necessary, unlink itself
 763   // from the WaitSet only after having acquired the outer lock.  More precisely,
 764   // there would be no WaitLock.  A thread in in wait() would enqueue its WaitEvent
 765   // on the WaitSet; release the outer lock; wait for either notification or timeout;
 766   // reacquire the inner lock; and then, if needed, unlink itself from the WaitSet.
 767   //
 768   // Alternatively, a 2nd set of list link fields in the ParkEvent might suffice.
 769   // One set would be for the WaitSet and one for the EntryList.
 770   // We could also deconstruct the ParkEvent into a "pure" event and add a
 771   // new immortal/TSM "ListElement" class that referred to ParkEvents.
 772   // In that case we could have one ListElement on the WaitSet and another
 773   // on the EntryList, with both referring to the same pure Event.
 774 
 775   Thread::muxAcquire(_WaitLock, "wait:WaitLock:Add");
 776   ESelf->ListNext = _WaitSet;
 777   _WaitSet = ESelf;
 778   Thread::muxRelease(_WaitLock);
 779 
 780   // Release the outer lock
 781   // We call IUnlock (RelaxAssert=true) as a thread T1 might
 782   // enqueue itself on the WaitSet, call IUnlock(), drop the lock,
 783   // and then stall before it can attempt to wake a successor.
 784   // Some other thread T2 acquires the lock, and calls notify(), moving
 785   // T1 from the WaitSet to the cxq.  T2 then drops the lock.  T1 resumes,
 786   // and then finds *itself* on the cxq.  During the course of a normal
 787   // IUnlock() call a thread should _never find itself on the EntryList
 788   // or cxq, but in the case of wait() it's possible.
 789   // See synchronizer.cpp objectMonitor::wait().
 790   IUnlock(true);
 791 
 792   // Wait for either notification or timeout
 793   // Beware that in some circumstances we might propagate
 794   // spurious wakeups back to the caller.
 795 
 796   for (;;) {
 797     if (ESelf->Notified) break;
 798     int err = ParkCommon(ESelf, timo);
 799     if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break;
 800   }
 801 
 802   // Prepare for reentry - if necessary, remove ESelf from WaitSet
 803   // ESelf can be:
 804   // 1. Still on the WaitSet.  This can happen if we exited the loop by timeout.
 805   // 2. On the cxq or EntryList
 806   // 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position.
 807 
 808   OrderAccess::fence();
 809   int WasOnWaitSet = 0;
 810   if (ESelf->Notified == 0) {
 811     Thread::muxAcquire(_WaitLock, "wait:WaitLock:remove");
 812     if (ESelf->Notified == 0) {     // DCL idiom
 813       assert(_OnDeck != ESelf, "invariant");   // can't be both OnDeck and on WaitSet
 814       // ESelf is resident on the WaitSet -- unlink it.
 815       // A doubly-linked list would be better here so we can unlink in constant-time.
 816       // We have to unlink before we potentially recontend as ESelf might otherwise
 817       // end up on the cxq|EntryList -- it can't be on two lists at once.
 818       ParkEvent * p = _WaitSet;
 819       ParkEvent * q = NULL;            // classic q chases p
 820       while (p != NULL && p != ESelf) {
 821         q = p;
 822         p = p->ListNext;
 823       }
 824       assert(p == ESelf, "invariant");
 825       if (p == _WaitSet) {      // found at head
 826         assert(q == NULL, "invariant");
 827         _WaitSet = p->ListNext;
 828       } else {                  // found in interior
 829         assert(q->ListNext == p, "invariant");
 830         q->ListNext = p->ListNext;
 831       }
 832       WasOnWaitSet = 1;        // We were *not* notified but instead encountered timeout
 833     }
 834     Thread::muxRelease(_WaitLock);
 835   }
 836 
 837   // Reentry phase - reacquire the lock
 838   if (WasOnWaitSet) {
 839     // ESelf was previously on the WaitSet but we just unlinked it above
 840     // because of a timeout.  ESelf is not resident on any list and is not OnDeck
 841     assert(_OnDeck != ESelf, "invariant");
 842     ILock(Self);
 843   } else {
 844     // A prior notify() operation moved ESelf from the WaitSet to the cxq.
 845     // ESelf is now on the cxq, EntryList or at the OnDeck position.
 846     // The following fragment is extracted from Monitor::ILock()
 847     for (;;) {
 848       if (_OnDeck == ESelf && TrySpin(Self)) break;
 849       ParkCommon(ESelf, 0);
 850     }
 851     assert(_OnDeck == ESelf, "invariant");
 852     _OnDeck = NULL;
 853   }
 854 
 855   assert(ILocked(), "invariant");
 856   return WasOnWaitSet != 0;        // return true IFF timeout
 857 }
 858 
 859 
 860 // ON THE VMTHREAD SNEAKING PAST HELD LOCKS:
 861 // In particular, there are certain types of global lock that may be held
 862 // by a Java thread while it is blocked at a safepoint but before it has
 863 // written the _owner field. These locks may be sneakily acquired by the
 864 // VM thread during a safepoint to avoid deadlocks. Alternatively, one should
 865 // identify all such locks, and ensure that Java threads never block at
 866 // safepoints while holding them (_no_safepoint_check_flag). While it
 867 // seems as though this could increase the time to reach a safepoint
 868 // (or at least increase the mean, if not the variance), the latter
 869 // approach might make for a cleaner, more maintainable JVM design.
 870 //
 871 // Sneaking is vile and reprehensible and should be excised at the 1st
 872 // opportunity.  It's possible that the need for sneaking could be obviated
 873 // as follows.  Currently, a thread might (a) while TBIVM, call pthread_mutex_lock
 874 // or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex.
 875 // (b) stall at the TBIVM exit point as a safepoint is in effect.  Critically,
 876 // it'll stall at the TBIVM reentry state transition after having acquired the
 877 // underlying lock, but before having set _owner and having entered the actual
 878 // critical section.  The lock-sneaking facility leverages that fact and allowed the
 879 // VM thread to logically acquire locks that had already be physically locked by mutators
 880 // but where mutators were known blocked by the reentry thread state transition.
 881 //
 882 // If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly
 883 // wrapped calls to park(), then we could likely do away with sneaking.  We'd
 884 // decouple lock acquisition and parking.  The critical invariant  to eliminating
 885 // sneaking is to ensure that we never "physically" acquire the lock while TBIVM.
 886 // An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket.
 887 // One difficulty with this approach is that the TBIVM wrapper could recurse and
 888 // call lock() deep from within a lock() call, while the MutexEvent was already enqueued.
 889 // Using a stack (N=2 at minimum) of ParkEvents would take care of that problem.
 890 //
 891 // But of course the proper ultimate approach is to avoid schemes that require explicit
 892 // sneaking or dependence on any any clever invariants or subtle implementation properties
 893 // of Mutex-Monitor and instead directly address the underlying design flaw.
 894 
 895 void Monitor::lock(Thread * Self) {
 896   // Ensure that the Monitor requires/allows safepoint checks.
 897   assert(_safepoint_check_required != Monitor::_safepoint_check_never,
 898          "This lock should never have a safepoint check: %s", name());
 899 
 900 #ifdef CHECK_UNHANDLED_OOPS
 901   // Clear unhandled oops so we get a crash right away.  Only clear for non-vm
 902   // or GC threads.
 903   if (Self->is_Java_thread()) {
 904     Self->clear_unhandled_oops();
 905   }
 906 #endif // CHECK_UNHANDLED_OOPS
 907 
 908   debug_only(check_prelock_state(Self));
 909   assert(_owner != Self, "invariant");
 910   assert(_OnDeck != Self->_MutexEvent, "invariant");
 911 
 912   if (TryFast()) {
 913  Exeunt:
 914     assert(ILocked(), "invariant");
 915     assert(owner() == NULL, "invariant");
 916     set_owner(Self);
 917     return;
 918   }
 919 
 920   // The lock is contended ...
 921 
 922   bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
 923   if (can_sneak && _owner == NULL) {
 924     // a java thread has locked the lock but has not entered the
 925     // critical region -- let's just pretend we've locked the lock
 926     // and go on.  we note this with _snuck so we can also
 927     // pretend to unlock when the time comes.
 928     _snuck = true;
 929     goto Exeunt;
 930   }
 931 
 932   // Try a brief spin to avoid passing thru thread state transition ...
 933   if (TrySpin(Self)) goto Exeunt;
 934 
 935   check_block_state(Self);
 936   if (Self->is_Java_thread()) {
 937     // Horrible dictu - we suffer through a state transition
 938     assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex");
 939     ThreadBlockInVM tbivm((JavaThread *) Self);
 940     ILock(Self);
 941   } else {
 942     // Mirabile dictu
 943     ILock(Self);
 944   }
 945   goto Exeunt;
 946 }
 947 
 948 void Monitor::lock() {
 949   this->lock(Thread::current());
 950 }
 951 
 952 // Lock without safepoint check - a degenerate variant of lock().
 953 // Should ONLY be used by safepoint code and other code
 954 // that is guaranteed not to block while running inside the VM. If this is called with
 955 // thread state set to be in VM, the safepoint synchronization code will deadlock!
 956 
 957 void Monitor::lock_without_safepoint_check(Thread * Self) {
 958   // Ensure that the Monitor does not require or allow safepoint checks.
 959   assert(_safepoint_check_required != Monitor::_safepoint_check_always,
 960          "This lock should always have a safepoint check: %s", name());
 961   assert(_owner != Self, "invariant");
 962   ILock(Self);
 963   assert(_owner == NULL, "invariant");
 964   set_owner(Self);
 965 }
 966 
 967 void Monitor::lock_without_safepoint_check() {
 968   lock_without_safepoint_check(Thread::current());
 969 }
 970 
 971 
 972 // Returns true if thread succeeds in grabbing the lock, otherwise false.
 973 
 974 bool Monitor::try_lock() {
 975   Thread * const Self = Thread::current();
 976   debug_only(check_prelock_state(Self));
 977   // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler");
 978 
 979   // Special case, where all Java threads are stopped.
 980   // The lock may have been acquired but _owner is not yet set.
 981   // In that case the VM thread can safely grab the lock.
 982   // It strikes me this should appear _after the TryLock() fails, below.
 983   bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
 984   if (can_sneak && _owner == NULL) {
 985     set_owner(Self); // Do not need to be atomic, since we are at a safepoint
 986     _snuck = true;
 987     return true;
 988   }
 989 
 990   if (TryLock()) {
 991     // We got the lock
 992     assert(_owner == NULL, "invariant");
 993     set_owner(Self);
 994     return true;
 995   }
 996   return false;
 997 }
 998 
 999 void Monitor::unlock() {
1000   assert(_owner == Thread::current(), "invariant");
1001   assert(_OnDeck != Thread::current()->_MutexEvent, "invariant");
1002   set_owner(NULL);
1003   if (_snuck) {
1004     assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
1005     _snuck = false;
1006     return;
1007   }
1008   IUnlock(false);
1009 }
1010 
1011 // Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check()
1012 // jvm_raw_lock() and _unlock() can be called by non-Java threads via JVM_RawMonitorEnter.
1013 //
1014 // There's no expectation that JVM_RawMonitors will interoperate properly with the native
1015 // Mutex-Monitor constructs.  We happen to implement JVM_RawMonitors in terms of
1016 // native Mutex-Monitors simply as a matter of convenience.  A simple abstraction layer
1017 // over a pthread_mutex_t would work equally as well, but require more platform-specific
1018 // code -- a "PlatformMutex".  Alternatively, a simply layer over muxAcquire-muxRelease
1019 // would work too.
1020 //
1021 // Since the caller might be a foreign thread, we don't necessarily have a Thread.MutexEvent
1022 // instance available.  Instead, we transiently allocate a ParkEvent on-demand if
1023 // we encounter contention.  That ParkEvent remains associated with the thread
1024 // until it manages to acquire the lock, at which time we return the ParkEvent
1025 // to the global ParkEvent free list.  This is correct and suffices for our purposes.
1026 //
1027 // Beware that the original jvm_raw_unlock() had a "_snuck" test but that
1028 // jvm_raw_lock() didn't have the corresponding test.  I suspect that's an
1029 // oversight, but I've replicated the original suspect logic in the new code ...
1030 
1031 void Monitor::jvm_raw_lock() {
1032   assert(rank() == native, "invariant");
1033 
1034   if (TryLock()) {
1035  Exeunt:
1036     assert(ILocked(), "invariant");
1037     assert(_owner == NULL, "invariant");
1038     // This can potentially be called by non-java Threads. Thus, the ThreadLocalStorage
1039     // might return NULL. Don't call set_owner since it will break on an NULL owner
1040     // Consider installing a non-null "ANON" distinguished value instead of just NULL.
1041     _owner = ThreadLocalStorage::thread();
1042     return;
1043   }
1044 
1045   if (TrySpin(NULL)) goto Exeunt;
1046 
1047   // slow-path - apparent contention
1048   // Allocate a ParkEvent for transient use.
1049   // The ParkEvent remains associated with this thread until
1050   // the time the thread manages to acquire the lock.
1051   ParkEvent * const ESelf = ParkEvent::Allocate(NULL);
1052   ESelf->reset();
1053   OrderAccess::storeload();
1054 
1055   // Either Enqueue Self on cxq or acquire the outer lock.
1056   if (AcquireOrPush (ESelf)) {
1057     ParkEvent::Release(ESelf);      // surrender the ParkEvent
1058     goto Exeunt;
1059   }
1060 
1061   // At any given time there is at most one ondeck thread.
1062   // ondeck implies not resident on cxq and not resident on EntryList
1063   // Only the OnDeck thread can try to acquire -- contended for -- the lock.
1064   // CONSIDER: use Self->OnDeck instead of m->OnDeck.
1065   for (;;) {
1066     if (_OnDeck == ESelf && TrySpin(NULL)) break;
1067     ParkCommon(ESelf, 0);
1068   }
1069 
1070   assert(_OnDeck == ESelf, "invariant");
1071   _OnDeck = NULL;
1072   ParkEvent::Release(ESelf);      // surrender the ParkEvent
1073   goto Exeunt;
1074 }
1075 
1076 void Monitor::jvm_raw_unlock() {
1077   // Nearly the same as Monitor::unlock() ...
1078   // directly set _owner instead of using set_owner(null)
1079   _owner = NULL;
1080   if (_snuck) {         // ???
1081     assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
1082     _snuck = false;
1083     return;
1084   }
1085   IUnlock(false);
1086 }
1087 
1088 bool Monitor::wait(bool no_safepoint_check, long timeout,
1089                    bool as_suspend_equivalent) {
1090   // Make sure safepoint checking is used properly.
1091   assert(!(_safepoint_check_required == Monitor::_safepoint_check_never && no_safepoint_check == false),
1092          "This lock should never have a safepoint check: %s", name());
1093   assert(!(_safepoint_check_required == Monitor::_safepoint_check_always && no_safepoint_check == true),
1094          "This lock should always have a safepoint check: %s", name());
1095 
1096   Thread * const Self = Thread::current();
1097   assert(_owner == Self, "invariant");
1098   assert(ILocked(), "invariant");
1099 
1100   // as_suspend_equivalent logically implies !no_safepoint_check
1101   guarantee(!as_suspend_equivalent || !no_safepoint_check, "invariant");
1102   // !no_safepoint_check logically implies java_thread
1103   guarantee(no_safepoint_check || Self->is_Java_thread(), "invariant");
1104 
1105   #ifdef ASSERT
1106   Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
1107   assert(least != this, "Specification of get_least_... call above");
1108   if (least != NULL && least->rank() <= special) {
1109     tty->print("Attempting to wait on monitor %s/%d while holding"
1110                " lock %s/%d -- possible deadlock",
1111                name(), rank(), least->name(), least->rank());
1112     assert(false, "Shouldn't block(wait) while holding a lock of rank special");
1113   }
1114   #endif // ASSERT
1115 
1116   int wait_status;
1117   // conceptually set the owner to NULL in anticipation of
1118   // abdicating the lock in wait
1119   set_owner(NULL);
1120   if (no_safepoint_check) {
1121     wait_status = IWait(Self, timeout);
1122   } else {
1123     assert(Self->is_Java_thread(), "invariant");
1124     JavaThread *jt = (JavaThread *)Self;
1125 
1126     // Enter safepoint region - ornate and Rococo ...
1127     ThreadBlockInVM tbivm(jt);
1128     OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */);
1129 
1130     if (as_suspend_equivalent) {
1131       jt->set_suspend_equivalent();
1132       // cleared by handle_special_suspend_equivalent_condition() or
1133       // java_suspend_self()
1134     }
1135 
1136     wait_status = IWait(Self, timeout);
1137 
1138     // were we externally suspended while we were waiting?
1139     if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) {
1140       // Our event wait has finished and we own the lock, but
1141       // while we were waiting another thread suspended us. We don't
1142       // want to hold the lock while suspended because that
1143       // would surprise the thread that suspended us.
1144       assert(ILocked(), "invariant");
1145       IUnlock(true);
1146       jt->java_suspend_self();
1147       ILock(Self);
1148       assert(ILocked(), "invariant");
1149     }
1150   }
1151 
1152   // Conceptually reestablish ownership of the lock.
1153   // The "real" lock -- the LockByte -- was reacquired by IWait().
1154   assert(ILocked(), "invariant");
1155   assert(_owner == NULL, "invariant");
1156   set_owner(Self);
1157   return wait_status != 0;          // return true IFF timeout
1158 }
1159 
1160 Monitor::~Monitor() {
1161   assert((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "");
1162 }
1163 
1164 void Monitor::ClearMonitor(Monitor * m, const char *name) {
1165   m->_owner             = NULL;
1166   m->_snuck             = false;
1167   if (name == NULL) {
1168     strcpy(m->_name, "UNKNOWN");
1169   } else {
1170     strncpy(m->_name, name, MONITOR_NAME_LEN - 1);
1171     m->_name[MONITOR_NAME_LEN - 1] = '\0';
1172   }
1173   m->_LockWord.FullWord = 0;
1174   m->_EntryList         = NULL;
1175   m->_OnDeck            = NULL;
1176   m->_WaitSet           = NULL;
1177   m->_WaitLock[0]       = 0;
1178 }
1179 
1180 Monitor::Monitor() { ClearMonitor(this); }
1181 
1182 Monitor::Monitor(int Rank, const char * name, bool allow_vm_block,
1183                  SafepointCheckRequired safepoint_check_required) {
1184   ClearMonitor(this, name);
1185 #ifdef ASSERT
1186   _allow_vm_block  = allow_vm_block;
1187   _rank            = Rank;
1188   NOT_PRODUCT(_safepoint_check_required = safepoint_check_required;)
1189 #endif
1190 }
1191 
1192 Mutex::~Mutex() {
1193   assert((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "");
1194 }
1195 
1196 Mutex::Mutex(int Rank, const char * name, bool allow_vm_block,
1197              SafepointCheckRequired safepoint_check_required) {
1198   ClearMonitor((Monitor *) this, name);
1199 #ifdef ASSERT
1200   _allow_vm_block   = allow_vm_block;
1201   _rank             = Rank;
1202   NOT_PRODUCT(_safepoint_check_required = safepoint_check_required;)
1203 #endif
1204 }
1205 
1206 bool Monitor::owned_by_self() const {
1207   bool ret = _owner == Thread::current();
1208   assert(!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant");
1209   return ret;
1210 }
1211 
1212 void Monitor::print_on_error(outputStream* st) const {
1213   st->print("[" PTR_FORMAT, p2i(this));
1214   st->print("] %s", _name);
1215   st->print(" - owner thread: " PTR_FORMAT, p2i(_owner));
1216 }
1217 
1218 
1219 
1220 
1221 // ----------------------------------------------------------------------------------
1222 // Non-product code
1223 
1224 #ifndef PRODUCT
1225 void Monitor::print_on(outputStream* st) const {
1226   st->print_cr("Mutex: [" PTR_FORMAT "/" PTR_FORMAT "] %s - owner: " PTR_FORMAT,
1227                p2i(this), _LockWord.FullWord, _name, p2i(_owner));
1228 }
1229 #endif
1230 
1231 #ifndef PRODUCT
1232 #ifdef ASSERT
1233 Monitor * Monitor::get_least_ranked_lock(Monitor * locks) {
1234   Monitor *res, *tmp;
1235   for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) {
1236     if (tmp->rank() < res->rank()) {
1237       res = tmp;
1238     }
1239   }
1240   if (!SafepointSynchronize::is_at_safepoint()) {
1241     // In this case, we expect the held locks to be
1242     // in increasing rank order (modulo any native ranks)
1243     for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
1244       if (tmp->next() != NULL) {
1245         assert(tmp->rank() == Mutex::native ||
1246                tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
1247       }
1248     }
1249   }
1250   return res;
1251 }
1252 
1253 Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) {
1254   Monitor *res, *tmp;
1255   for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) {
1256     if (tmp != this && (res == NULL || tmp->rank() < res->rank())) {
1257       res = tmp;
1258     }
1259   }
1260   if (!SafepointSynchronize::is_at_safepoint()) {
1261     // In this case, we expect the held locks to be
1262     // in increasing rank order (modulo any native ranks)
1263     for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
1264       if (tmp->next() != NULL) {
1265         assert(tmp->rank() == Mutex::native ||
1266                tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
1267       }
1268     }
1269   }
1270   return res;
1271 }
1272 
1273 
1274 bool Monitor::contains(Monitor* locks, Monitor * lock) {
1275   for (; locks != NULL; locks = locks->next()) {
1276     if (locks == lock) {
1277       return true;
1278     }
1279   }
1280   return false;
1281 }
1282 #endif
1283 
1284 // Called immediately after lock acquisition or release as a diagnostic
1285 // to track the lock-set of the thread and test for rank violations that
1286 // might indicate exposure to deadlock.
1287 // Rather like an EventListener for _owner (:>).
1288 
1289 void Monitor::set_owner_implementation(Thread *new_owner) {
1290   // This function is solely responsible for maintaining
1291   // and checking the invariant that threads and locks
1292   // are in a 1/N relation, with some some locks unowned.
1293   // It uses the Mutex::_owner, Mutex::_next, and
1294   // Thread::_owned_locks fields, and no other function
1295   // changes those fields.
1296   // It is illegal to set the mutex from one non-NULL
1297   // owner to another--it must be owned by NULL as an
1298   // intermediate state.
1299 
1300   if (new_owner != NULL) {
1301     // the thread is acquiring this lock
1302 
1303     assert(new_owner == Thread::current(), "Should I be doing this?");
1304     assert(_owner == NULL, "setting the owner thread of an already owned mutex");
1305     _owner = new_owner; // set the owner
1306 
1307     // link "this" into the owned locks list
1308 
1309 #ifdef ASSERT  // Thread::_owned_locks is under the same ifdef
1310     Monitor* locks = get_least_ranked_lock(new_owner->owned_locks());
1311     // Mutex::set_owner_implementation is a friend of Thread
1312 
1313     assert(this->rank() >= 0, "bad lock rank");
1314 
1315     // Deadlock avoidance rules require us to acquire Mutexes only in
1316     // a global total order. For example m1 is the lowest ranked mutex
1317     // that the thread holds and m2 is the mutex the thread is trying
1318     // to acquire, then  deadlock avoidance rules require that the rank
1319     // of m2 be less  than the rank of m1.
1320     // The rank Mutex::native  is an exception in that it is not subject
1321     // to the verification rules.
1322     // Here are some further notes relating to mutex acquisition anomalies:
1323     // . under Solaris, the interrupt lock gets acquired when doing
1324     //   profiling, so any lock could be held.
1325     // . it is also ok to acquire Safepoint_lock at the very end while we
1326     //   already hold Terminator_lock - may happen because of periodic safepoints
1327     if (this->rank() != Mutex::native &&
1328         this->rank() != Mutex::suspend_resume &&
1329         locks != NULL && locks->rank() <= this->rank() &&
1330         !SafepointSynchronize::is_at_safepoint() &&
1331         this != Interrupt_lock && this != ProfileVM_lock &&
1332         !(this == Safepoint_lock && contains(locks, Terminator_lock) &&
1333         SafepointSynchronize::is_synchronizing())) {
1334       new_owner->print_owned_locks();
1335       fatal("acquiring lock %s/%d out of order with lock %s/%d -- "
1336             "possible deadlock", this->name(), this->rank(),
1337             locks->name(), locks->rank());
1338     }
1339 
1340     this->_next = new_owner->_owned_locks;
1341     new_owner->_owned_locks = this;
1342 #endif
1343 
1344   } else {
1345     // the thread is releasing this lock
1346 
1347     Thread* old_owner = _owner;
1348     debug_only(_last_owner = old_owner);
1349 
1350     assert(old_owner != NULL, "removing the owner thread of an unowned mutex");
1351     assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex");
1352 
1353     _owner = NULL; // set the owner
1354 
1355 #ifdef ASSERT
1356     Monitor *locks = old_owner->owned_locks();
1357 
1358     // remove "this" from the owned locks list
1359 
1360     Monitor *prev = NULL;
1361     bool found = false;
1362     for (; locks != NULL; prev = locks, locks = locks->next()) {
1363       if (locks == this) {
1364         found = true;
1365         break;
1366       }
1367     }
1368     assert(found, "Removing a lock not owned");
1369     if (prev == NULL) {
1370       old_owner->_owned_locks = _next;
1371     } else {
1372       prev->_next = _next;
1373     }
1374     _next = NULL;
1375 #endif
1376   }
1377 }
1378 
1379 
1380 // Factored out common sanity checks for locking mutex'es. Used by lock() and try_lock()
1381 void Monitor::check_prelock_state(Thread *thread) {
1382   assert((!thread->is_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm)
1383          || rank() == Mutex::special, "wrong thread state for using locks");
1384   if (StrictSafepointChecks) {
1385     if (thread->is_VM_thread() && !allow_vm_block()) {
1386       fatal("VM thread using lock %s (not allowed to block on)", name());
1387     }
1388     debug_only(if (rank() != Mutex::special) \
1389                thread->check_for_valid_safepoint_state(false);)
1390   }
1391   if (thread->is_Watcher_thread()) {
1392     assert(!WatcherThread::watcher_thread()->has_crash_protection(),
1393            "locking not allowed when crash protection is set");
1394   }
1395 }
1396 
1397 void Monitor::check_block_state(Thread *thread) {
1398   if (!_allow_vm_block && thread->is_VM_thread()) {
1399     warning("VM thread blocked on lock");
1400     print();
1401     BREAKPOINT;
1402   }
1403   assert(_owner != thread, "deadlock: blocking on monitor owned by current thread");
1404 }
1405 
1406 #endif // PRODUCT