1 
   2 /*
   3  * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "runtime/mutex.hpp"
  28 #include "runtime/osThread.hpp"
  29 #include "runtime/thread.inline.hpp"
  30 #include "utilities/events.hpp"
  31 #ifdef TARGET_OS_FAMILY_linux
  32 # include "mutex_linux.inline.hpp"
  33 #endif
  34 #ifdef TARGET_OS_FAMILY_solaris
  35 # include "mutex_solaris.inline.hpp"
  36 #endif
  37 #ifdef TARGET_OS_FAMILY_windows
  38 # include "mutex_windows.inline.hpp"
  39 #endif
  40 #ifdef TARGET_OS_FAMILY_bsd
  41 # include "mutex_bsd.inline.hpp"
  42 #endif
  43 
  44 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
  45 //
  46 // Native Monitor-Mutex locking - theory of operations
  47 //
  48 // * Native Monitors are completely unrelated to Java-level monitors,
  49 //   although the "back-end" slow-path implementations share a common lineage.
  50 //   See objectMonitor:: in synchronizer.cpp.
  51 //   Native Monitors do *not* support nesting or recursion but otherwise
  52 //   they're basically Hoare-flavor monitors.
  53 //
  54 // * A thread acquires ownership of a Monitor/Mutex by CASing the LockByte
  55 //   in the _LockWord from zero to non-zero.  Note that the _Owner field
  56 //   is advisory and is used only to verify that the thread calling unlock()
  57 //   is indeed the last thread to have acquired the lock.
  58 //
  59 // * Contending threads "push" themselves onto the front of the contention
  60 //   queue -- called the cxq -- with CAS and then spin/park.
  61 //   The _LockWord contains the LockByte as well as the pointer to the head
  62 //   of the cxq.  Colocating the LockByte with the cxq precludes certain races.
  63 //
  64 // * Using a separately addressable LockByte allows for CAS:MEMBAR or CAS:0
  65 //   idioms.  We currently use MEMBAR in the uncontended unlock() path, as
  66 //   MEMBAR often has less latency than CAS.  If warranted, we could switch to
  67 //   a CAS:0 mode, using timers to close the resultant race, as is done
  68 //   with Java Monitors in synchronizer.cpp.
  69 //
  70 //   See the following for a discussion of the relative cost of atomics (CAS)
  71 //   MEMBAR, and ways to eliminate such instructions from the common-case paths:
  72 //   -- http://blogs.sun.com/dave/entry/biased_locking_in_hotspot
  73 //   -- http://blogs.sun.com/dave/resource/MustangSync.pdf
  74 //   -- http://blogs.sun.com/dave/resource/synchronization-public2.pdf
  75 //   -- synchronizer.cpp
  76 //
  77 // * Overall goals - desiderata
  78 //   1. Minimize context switching
  79 //   2. Minimize lock migration
  80 //   3. Minimize CPI -- affinity and locality
  81 //   4. Minimize the execution of high-latency instructions such as CAS or MEMBAR
  82 //   5. Minimize outer lock hold times
  83 //   6. Behave gracefully on a loaded system
  84 //
  85 // * Thread flow and list residency:
  86 //
  87 //   Contention queue --> EntryList --> OnDeck --> Owner --> !Owner
  88 //   [..resident on monitor list..]
  89 //   [...........contending..................]
  90 //
  91 //   -- The contention queue (cxq) contains recently-arrived threads (RATs).
  92 //      Threads on the cxq eventually drain into the EntryList.
  93 //   -- Invariant: a thread appears on at most one list -- cxq, EntryList
  94 //      or WaitSet -- at any one time.
  95 //   -- For a given monitor there can be at most one "OnDeck" thread at any
  96 //      given time but if needbe this particular invariant could be relaxed.
  97 //
  98 // * The WaitSet and EntryList linked lists are composed of ParkEvents.
  99 //   I use ParkEvent instead of threads as ParkEvents are immortal and
 100 //   type-stable, meaning we can safely unpark() a possibly stale
 101 //   list element in the unlock()-path.  (That's benign).
 102 //
 103 // * Succession policy - providing for progress:
 104 //
 105 //   As necessary, the unlock()ing thread identifies, unlinks, and unparks
 106 //   an "heir presumptive" tentative successor thread from the EntryList.
 107 //   This becomes the so-called "OnDeck" thread, of which there can be only
 108 //   one at any given time for a given monitor.  The wakee will recontend
 109 //   for ownership of monitor.
 110 //
 111 //   Succession is provided for by a policy of competitive handoff.
 112 //   The exiting thread does _not_ grant or pass ownership to the
 113 //   successor thread.  (This is also referred to as "handoff" succession").
 114 //   Instead the exiting thread releases ownership and possibly wakes
 115 //   a successor, so the successor can (re)compete for ownership of the lock.
 116 //
 117 //   Competitive handoff provides excellent overall throughput at the expense
 118 //   of short-term fairness.  If fairness is a concern then one remedy might
 119 //   be to add an AcquireCounter field to the monitor.  After a thread acquires
 120 //   the lock it will decrement the AcquireCounter field.  When the count
 121 //   reaches 0 the thread would reset the AcquireCounter variable, abdicate
 122 //   the lock directly to some thread on the EntryList, and then move itself to the
 123 //   tail of the EntryList.
 124 //
 125 //   But in practice most threads engage or otherwise participate in resource
 126 //   bounded producer-consumer relationships, so lock domination is not usually
 127 //   a practical concern.  Recall too, that in general it's easier to construct
 128 //   a fair lock from a fast lock, but not vice-versa.
 129 //
 130 // * The cxq can have multiple concurrent "pushers" but only one concurrent
 131 //   detaching thread.  This mechanism is immune from the ABA corruption.
 132 //   More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
 133 //   We use OnDeck as a pseudo-lock to enforce the at-most-one detaching
 134 //   thread constraint.
 135 //
 136 // * Taken together, the cxq and the EntryList constitute or form a
 137 //   single logical queue of threads stalled trying to acquire the lock.
 138 //   We use two distinct lists to reduce heat on the list ends.
 139 //   Threads in lock() enqueue onto cxq while threads in unlock() will
 140 //   dequeue from the EntryList.  (c.f. Michael Scott's "2Q" algorithm).
 141 //   A key desideratum is to minimize queue & monitor metadata manipulation
 142 //   that occurs while holding the "outer" monitor lock -- that is, we want to
 143 //   minimize monitor lock holds times.
 144 //
 145 //   The EntryList is ordered by the prevailing queue discipline and
 146 //   can be organized in any convenient fashion, such as a doubly-linked list or
 147 //   a circular doubly-linked list.  If we need a priority queue then something akin
 148 //   to Solaris' sleepq would work nicely.  Viz.,
 149 //   -- http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
 150 //   -- http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/os/sleepq.c
 151 //   Queue discipline is enforced at ::unlock() time, when the unlocking thread
 152 //   drains the cxq into the EntryList, and orders or reorders the threads on the
 153 //   EntryList accordingly.
 154 //
 155 //   Barring "lock barging", this mechanism provides fair cyclic ordering,
 156 //   somewhat similar to an elevator-scan.
 157 //
 158 // * OnDeck
 159 //   --  For a given monitor there can be at most one OnDeck thread at any given
 160 //       instant.  The OnDeck thread is contending for the lock, but has been
 161 //       unlinked from the EntryList and cxq by some previous unlock() operations.
 162 //       Once a thread has been designated the OnDeck thread it will remain so
 163 //       until it manages to acquire the lock -- being OnDeck is a stable property.
 164 //   --  Threads on the EntryList or cxq are _not allowed to attempt lock acquisition.
 165 //   --  OnDeck also serves as an "inner lock" as follows.  Threads in unlock() will, after
 166 //       having cleared the LockByte and dropped the outer lock,  attempt to "trylock"
 167 //       OnDeck by CASing the field from null to non-null.  If successful, that thread
 168 //       is then responsible for progress and succession and can use CAS to detach and
 169 //       drain the cxq into the EntryList.  By convention, only this thread, the holder of
 170 //       the OnDeck inner lock, can manipulate the EntryList or detach and drain the
 171 //       RATs on the cxq into the EntryList.  This avoids ABA corruption on the cxq as
 172 //       we allow multiple concurrent "push" operations but restrict detach concurrency
 173 //       to at most one thread.  Having selected and detached a successor, the thread then
 174 //       changes the OnDeck to refer to that successor, and then unparks the successor.
 175 //       That successor will eventually acquire the lock and clear OnDeck.  Beware
 176 //       that the OnDeck usage as a lock is asymmetric.  A thread in unlock() transiently
 177 //       "acquires" OnDeck, performs queue manipulations, passes OnDeck to some successor,
 178 //       and then the successor eventually "drops" OnDeck.  Note that there's never
 179 //       any sense of contention on the inner lock, however.  Threads never contend
 180 //       or wait for the inner lock.
 181 //   --  OnDeck provides for futile wakeup throttling a described in section 3.3 of
 182 //       See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
 183 //       In a sense, OnDeck subsumes the ObjectMonitor _Succ and ObjectWaiter
 184 //       TState fields found in Java-level objectMonitors.  (See synchronizer.cpp).
 185 //
 186 // * Waiting threads reside on the WaitSet list -- wait() puts
 187 //   the caller onto the WaitSet.  Notify() or notifyAll() simply
 188 //   transfers threads from the WaitSet to either the EntryList or cxq.
 189 //   Subsequent unlock() operations will eventually unpark the notifyee.
 190 //   Unparking a notifee in notify() proper is inefficient - if we were to do so
 191 //   it's likely the notifyee would simply impale itself on the lock held
 192 //   by the notifier.
 193 //
 194 // * The mechanism is obstruction-free in that if the holder of the transient
 195 //   OnDeck lock in unlock() is preempted or otherwise stalls, other threads
 196 //   can still acquire and release the outer lock and continue to make progress.
 197 //   At worst, waking of already blocked contending threads may be delayed,
 198 //   but nothing worse.  (We only use "trylock" operations on the inner OnDeck
 199 //   lock).
 200 //
 201 // * Note that thread-local storage must be initialized before a thread
 202 //   uses Native monitors or mutexes.  The native monitor-mutex subsystem
 203 //   depends on Thread::current().
 204 //
 205 // * The monitor synchronization subsystem avoids the use of native
 206 //   synchronization primitives except for the narrow platform-specific
 207 //   park-unpark abstraction.  See the comments in os_solaris.cpp regarding
 208 //   the semantics of park-unpark.  Put another way, this monitor implementation
 209 //   depends only on atomic operations and park-unpark.  The monitor subsystem
 210 //   manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
 211 //   underlying OS manages the READY<->RUN transitions.
 212 //
 213 // * The memory consistency model provide by lock()-unlock() is at least as
 214 //   strong or stronger than the Java Memory model defined by JSR-133.
 215 //   That is, we guarantee at least entry consistency, if not stronger.
 216 //   See http://g.oswego.edu/dl/jmm/cookbook.html.
 217 //
 218 // * Thread:: currently contains a set of purpose-specific ParkEvents:
 219 //   _MutexEvent, _ParkEvent, etc.  A better approach might be to do away with
 220 //   the purpose-specific ParkEvents and instead implement a general per-thread
 221 //   stack of available ParkEvents which we could provision on-demand.  The
 222 //   stack acts as a local cache to avoid excessive calls to ParkEvent::Allocate()
 223 //   and ::Release().  A thread would simply pop an element from the local stack before it
 224 //   enqueued or park()ed.  When the contention was over the thread would
 225 //   push the no-longer-needed ParkEvent back onto its stack.
 226 //
 227 // * A slightly reduced form of ILock() and IUnlock() have been partially
 228 //   model-checked (Murphi) for safety and progress at T=1,2,3 and 4.
 229 //   It'd be interesting to see if TLA/TLC could be useful as well.
 230 //
 231 // * Mutex-Monitor is a low-level "leaf" subsystem.  That is, the monitor
 232 //   code should never call other code in the JVM that might itself need to
 233 //   acquire monitors or mutexes.  That's true *except* in the case of the
 234 //   ThreadBlockInVM state transition wrappers.  The ThreadBlockInVM DTOR handles
 235 //   mutator reentry (ingress) by checking for a pending safepoint in which case it will
 236 //   call SafepointSynchronize::block(), which in turn may call Safepoint_lock->lock(), etc.
 237 //   In that particular case a call to lock() for a given Monitor can end up recursively
 238 //   calling lock() on another monitor.   While distasteful, this is largely benign
 239 //   as the calls come from jacket that wraps lock(), and not from deep within lock() itself.
 240 //
 241 //   It's unfortunate that native mutexes and thread state transitions were convolved.
 242 //   They're really separate concerns and should have remained that way.  Melding
 243 //   them together was facile -- a bit too facile.   The current implementation badly
 244 //   conflates the two concerns.
 245 //
 246 // * TODO-FIXME:
 247 //
 248 //   -- Add DTRACE probes for contended acquire, contended acquired, contended unlock
 249 //      We should also add DTRACE probes in the ParkEvent subsystem for
 250 //      Park-entry, Park-exit, and Unpark.
 251 //
 252 //   -- We have an excess of mutex-like constructs in the JVM, namely:
 253 //      1. objectMonitors for Java-level synchronization (synchronizer.cpp)
 254 //      2. low-level muxAcquire and muxRelease
 255 //      3. low-level spinAcquire and spinRelease
 256 //      4. native Mutex:: and Monitor::
 257 //      5. jvm_raw_lock() and _unlock()
 258 //      6. JVMTI raw monitors -- distinct from (5) despite having a confusingly
 259 //         similar name.
 260 //
 261 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
 262 
 263 
 264 // CASPTR() uses the canonical argument order that dominates in the literature.
 265 // Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates.
 266 
 267 #define CASPTR(a,c,s) intptr_t(Atomic::cmpxchg_ptr ((void *)(s),(void *)(a),(void *)(c)))
 268 #define UNS(x) (uintptr_t(x))
 269 #define TRACE(m) { static volatile int ctr = 0 ; int x = ++ctr ; if ((x & (x-1))==0) { ::printf ("%d:%s\n", x, #m); ::fflush(stdout); }}
 270 
 271 // Simplistic low-quality Marsaglia SHIFT-XOR RNG.
 272 // Bijective except for the trailing mask operation.
 273 // Useful for spin loops as the compiler can't optimize it away.
 274 
 275 static inline jint MarsagliaXORV (jint x) {
 276   if (x == 0) x = 1|os::random() ;
 277   x ^= x << 6;
 278   x ^= ((unsigned)x) >> 21;
 279   x ^= x << 7 ;
 280   return x & 0x7FFFFFFF ;
 281 }
 282 
 283 static inline jint MarsagliaXOR (jint * const a) {
 284   jint x = *a ;
 285   if (x == 0) x = UNS(a)|1 ;
 286   x ^= x << 6;
 287   x ^= ((unsigned)x) >> 21;
 288   x ^= x << 7 ;
 289   *a = x ;
 290   return x & 0x7FFFFFFF ;
 291 }
 292 
 293 static int Stall (int its) {
 294   static volatile jint rv = 1 ;
 295   volatile int OnFrame = 0 ;
 296   jint v = rv ^ UNS(OnFrame) ;
 297   while (--its >= 0) {
 298     v = MarsagliaXORV (v) ;
 299   }
 300   // Make this impossible for the compiler to optimize away,
 301   // but (mostly) avoid W coherency sharing on MP systems.
 302   if (v == 0x12345) rv = v ;
 303   return v ;
 304 }
 305 
 306 int Monitor::TryLock () {
 307   intptr_t v = _LockWord.FullWord ;
 308   for (;;) {
 309     if ((v & _LBIT) != 0) return 0 ;
 310     const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
 311     if (v == u) return 1 ;
 312     v = u ;
 313   }
 314 }
 315 
 316 int Monitor::TryFast () {
 317   // Optimistic fast-path form ...
 318   // Fast-path attempt for the common uncontended case.
 319   // Avoid RTS->RTO $ coherence upgrade on typical SMP systems.
 320   intptr_t v = CASPTR (&_LockWord, 0, _LBIT) ;  // agro ...
 321   if (v == 0) return 1 ;
 322 
 323   for (;;) {
 324     if ((v & _LBIT) != 0) return 0 ;
 325     const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
 326     if (v == u) return 1 ;
 327     v = u ;
 328   }
 329 }
 330 
 331 int Monitor::ILocked () {
 332   const intptr_t w = _LockWord.FullWord & 0xFF ;
 333   assert (w == 0 || w == _LBIT, "invariant") ;
 334   return w == _LBIT ;
 335 }
 336 
 337 // Polite TATAS spinlock with exponential backoff - bounded spin.
 338 // Ideally we'd use processor cycles, time or vtime to control
 339 // the loop, but we currently use iterations.
 340 // All the constants within were derived empirically but work over
 341 // over the spectrum of J2SE reference platforms.
 342 // On Niagara-class systems the back-off is unnecessary but
 343 // is relatively harmless.  (At worst it'll slightly retard
 344 // acquisition times).  The back-off is critical for older SMP systems
 345 // where constant fetching of the LockWord would otherwise impair
 346 // scalability.
 347 //
 348 // Clamp spinning at approximately 1/2 of a context-switch round-trip.
 349 // See synchronizer.cpp for details and rationale.
 350 
 351 int Monitor::TrySpin (Thread * const Self) {
 352   if (TryLock())    return 1 ;
 353   if (!os::is_MP()) return 0 ;
 354 
 355   int Probes  = 0 ;
 356   int Delay   = 0 ;
 357   int Steps   = 0 ;
 358   int SpinMax = NativeMonitorSpinLimit ;
 359   int flgs    = NativeMonitorFlags ;
 360   for (;;) {
 361     intptr_t v = _LockWord.FullWord;
 362     if ((v & _LBIT) == 0) {
 363       if (CASPTR (&_LockWord, v, v|_LBIT) == v) {
 364         return 1 ;
 365       }
 366       continue ;
 367     }
 368 
 369     if ((flgs & 8) == 0) {
 370       SpinPause () ;
 371     }
 372 
 373     // Periodically increase Delay -- variable Delay form
 374     // conceptually: delay *= 1 + 1/Exponent
 375     ++ Probes;
 376     if (Probes > SpinMax) return 0 ;
 377 
 378     if ((Probes & 0x7) == 0) {
 379       Delay = ((Delay << 1)|1) & 0x7FF ;
 380       // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ;
 381     }
 382 
 383     if (flgs & 2) continue ;
 384 
 385     // Consider checking _owner's schedctl state, if OFFPROC abort spin.
 386     // If the owner is OFFPROC then it's unlike that the lock will be dropped
 387     // in a timely fashion, which suggests that spinning would not be fruitful
 388     // or profitable.
 389 
 390     // Stall for "Delay" time units - iterations in the current implementation.
 391     // Avoid generating coherency traffic while stalled.
 392     // Possible ways to delay:
 393     //   PAUSE, SLEEP, MEMBAR #sync, MEMBAR #halt,
 394     //   wr %g0,%asi, gethrtime, rdstick, rdtick, rdtsc, etc. ...
 395     // Note that on Niagara-class systems we want to minimize STs in the
 396     // spin loop.  N1 and brethren write-around the L1$ over the xbar into the L2$.
 397     // Furthermore, they don't have a W$ like traditional SPARC processors.
 398     // We currently use a Marsaglia Shift-Xor RNG loop.
 399     Steps += Delay ;
 400     if (Self != NULL) {
 401       jint rv = Self->rng[0] ;
 402       for (int k = Delay ; --k >= 0; ) {
 403         rv = MarsagliaXORV (rv) ;
 404         if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0 ;
 405       }
 406       Self->rng[0] = rv ;
 407     } else {
 408       Stall (Delay) ;
 409     }
 410   }
 411 }
 412 
 413 static int ParkCommon (ParkEvent * ev, jlong timo) {
 414   // Diagnostic support - periodically unwedge blocked threads
 415   intx nmt = NativeMonitorTimeout ;
 416   if (nmt > 0 && (nmt < timo || timo <= 0)) {
 417      timo = nmt ;
 418   }
 419   int err = OS_OK ;
 420   if (0 == timo) {
 421     ev->park() ;
 422   } else {
 423     err = ev->park(timo) ;
 424   }
 425   return err ;
 426 }
 427 
 428 inline int Monitor::AcquireOrPush (ParkEvent * ESelf) {
 429   intptr_t v = _LockWord.FullWord ;
 430   for (;;) {
 431     if ((v & _LBIT) == 0) {
 432       const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
 433       if (u == v) return 1 ;        // indicate acquired
 434       v = u ;
 435     } else {
 436       // Anticipate success ...
 437       ESelf->ListNext = (ParkEvent *) (v & ~_LBIT) ;
 438       const intptr_t u = CASPTR (&_LockWord, v, intptr_t(ESelf)|_LBIT) ;
 439       if (u == v) return 0 ;        // indicate pushed onto cxq
 440       v = u ;
 441     }
 442     // Interference - LockWord change - just retry
 443   }
 444 }
 445 
 446 // ILock and IWait are the lowest level primitive internal blocking
 447 // synchronization functions.  The callers of IWait and ILock must have
 448 // performed any needed state transitions beforehand.
 449 // IWait and ILock may directly call park() without any concern for thread state.
 450 // Note that ILock and IWait do *not* access _owner.
 451 // _owner is a higher-level logical concept.
 452 
 453 void Monitor::ILock (Thread * Self) {
 454   assert (_OnDeck != Self->_MutexEvent, "invariant") ;
 455 
 456   if (TryFast()) {
 457  Exeunt:
 458     assert (ILocked(), "invariant") ;
 459     return ;
 460   }
 461 
 462   ParkEvent * const ESelf = Self->_MutexEvent ;
 463   assert (_OnDeck != ESelf, "invariant") ;
 464 
 465   // As an optimization, spinners could conditionally try to set ONDECK to _LBIT
 466   // Synchronizer.cpp uses a similar optimization.
 467   if (TrySpin (Self)) goto Exeunt ;
 468 
 469   // Slow-path - the lock is contended.
 470   // Either Enqueue Self on cxq or acquire the outer lock.
 471   // LockWord encoding = (cxq,LOCKBYTE)
 472   ESelf->reset() ;
 473   OrderAccess::fence() ;
 474 
 475   // Optional optimization ... try barging on the inner lock
 476   if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(Self)) == 0) {
 477     goto OnDeck_LOOP ;
 478   }
 479 
 480   if (AcquireOrPush (ESelf)) goto Exeunt ;
 481 
 482   // At any given time there is at most one ondeck thread.
 483   // ondeck implies not resident on cxq and not resident on EntryList
 484   // Only the OnDeck thread can try to acquire -- contended for -- the lock.
 485   // CONSIDER: use Self->OnDeck instead of m->OnDeck.
 486   // Deschedule Self so that others may run.
 487   while (_OnDeck != ESelf) {
 488     ParkCommon (ESelf, 0) ;
 489   }
 490 
 491   // Self is now in the ONDECK position and will remain so until it
 492   // manages to acquire the lock.
 493  OnDeck_LOOP:
 494   for (;;) {
 495     assert (_OnDeck == ESelf, "invariant") ;
 496     if (TrySpin (Self)) break ;
 497     // CONSIDER: if ESelf->TryPark() && TryLock() break ...
 498     // It's probably wise to spin only if we *actually* blocked
 499     // CONSIDER: check the lockbyte, if it remains set then
 500     // preemptively drain the cxq into the EntryList.
 501     // The best place and time to perform queue operations -- lock metadata --
 502     // is _before having acquired the outer lock, while waiting for the lock to drop.
 503     ParkCommon (ESelf, 0) ;
 504   }
 505 
 506   assert (_OnDeck == ESelf, "invariant") ;
 507   _OnDeck = NULL ;
 508 
 509   // Note that we current drop the inner lock (clear OnDeck) in the slow-path
 510   // epilog immediately after having acquired the outer lock.
 511   // But instead we could consider the following optimizations:
 512   // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation.
 513   //    This might avoid potential reacquisition of the inner lock in IUlock().
 514   // B. While still holding the inner lock, attempt to opportunistically select
 515   //    and unlink the next ONDECK thread from the EntryList.
 516   //    If successful, set ONDECK to refer to that thread, otherwise clear ONDECK.
 517   //    It's critical that the select-and-unlink operation run in constant-time as
 518   //    it executes when holding the outer lock and may artificially increase the
 519   //    effective length of the critical section.
 520   // Note that (A) and (B) are tantamount to succession by direct handoff for
 521   // the inner lock.
 522   goto Exeunt ;
 523 }
 524 
 525 void Monitor::IUnlock (bool RelaxAssert) {
 526   assert (ILocked(), "invariant") ;
 527   // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately
 528   // before the store that releases the lock.  Crucially, all the stores and loads in the
 529   // critical section must be globally visible before the store of 0 into the lock-word
 530   // that releases the lock becomes globally visible.  That is, memory accesses in the
 531   // critical section should not be allowed to bypass or overtake the following ST that
 532   // releases the lock.  As such, to prevent accesses within the critical section
 533   // from "leaking" out, we need a release fence between the critical section and the
 534   // store that releases the lock.  In practice that release barrier is elided on
 535   // platforms with strong memory models such as TSO.
 536   //
 537   // Note that the OrderAccess::storeload() fence that appears after unlock store
 538   // provides for progress conditions and succession and is _not related to exclusion
 539   // safety or lock release consistency.
 540   OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], 0); // drop outer lock
 541 
 542   OrderAccess::storeload ();
 543   ParkEvent * const w = _OnDeck ;
 544   assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ;
 545   if (w != NULL) {
 546     // Either we have a valid ondeck thread or ondeck is transiently "locked"
 547     // by some exiting thread as it arranges for succession.  The LSBit of
 548     // OnDeck allows us to discriminate two cases.  If the latter, the
 549     // responsibility for progress and succession lies with that other thread.
 550     // For good performance, we also depend on the fact that redundant unpark()
 551     // operations are cheap.  That is, repeated Unpark()ing of the ONDECK thread
 552     // is inexpensive.  This approach provides implicit futile wakeup throttling.
 553     // Note that the referent "w" might be stale with respect to the lock.
 554     // In that case the following unpark() is harmless and the worst that'll happen
 555     // is a spurious return from a park() operation.  Critically, if "w" _is stale,
 556     // then progress is known to have occurred as that means the thread associated
 557     // with "w" acquired the lock.  In that case this thread need take no further
 558     // action to guarantee progress.
 559     if ((UNS(w) & _LBIT) == 0) w->unpark() ;
 560     return ;
 561   }
 562 
 563   intptr_t cxq = _LockWord.FullWord ;
 564   if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) {
 565     return ;      // normal fast-path exit - cxq and EntryList both empty
 566   }
 567   if (cxq & _LBIT) {
 568     // Optional optimization ...
 569     // Some other thread acquired the lock in the window since this
 570     // thread released it.  Succession is now that thread's responsibility.
 571     return ;
 572   }
 573 
 574  Succession:
 575   // Slow-path exit - this thread must ensure succession and progress.
 576   // OnDeck serves as lock to protect cxq and EntryList.
 577   // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq.
 578   // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS)
 579   // but only one concurrent consumer (detacher of RATs).
 580   // Consider protecting this critical section with schedctl on Solaris.
 581   // Unlike a normal lock, however, the exiting thread "locks" OnDeck,
 582   // picks a successor and marks that thread as OnDeck.  That successor
 583   // thread will then clear OnDeck once it eventually acquires the outer lock.
 584   if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) {
 585     return ;
 586   }
 587 
 588   ParkEvent * List = _EntryList ;
 589   if (List != NULL) {
 590     // Transfer the head of the EntryList to the OnDeck position.
 591     // Once OnDeck, a thread stays OnDeck until it acquires the lock.
 592     // For a given lock there is at most OnDeck thread at any one instant.
 593    WakeOne:
 594     assert (List == _EntryList, "invariant") ;
 595     ParkEvent * const w = List ;
 596     assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ;
 597     _EntryList = w->ListNext ;
 598     // as a diagnostic measure consider setting w->_ListNext = BAD
 599     assert (UNS(_OnDeck) == _LBIT, "invariant") ;
 600     _OnDeck = w ;           // pass OnDeck to w.
 601                             // w will clear OnDeck once it acquires the outer lock
 602 
 603     // Another optional optimization ...
 604     // For heavily contended locks it's not uncommon that some other
 605     // thread acquired the lock while this thread was arranging succession.
 606     // Try to defer the unpark() operation - Delegate the responsibility
 607     // for unpark()ing the OnDeck thread to the current or subsequent owners
 608     // That is, the new owner is responsible for unparking the OnDeck thread.
 609     OrderAccess::storeload() ;
 610     cxq = _LockWord.FullWord ;
 611     if (cxq & _LBIT) return ;
 612 
 613     w->unpark() ;
 614     return ;
 615   }
 616 
 617   cxq = _LockWord.FullWord ;
 618   if ((cxq & ~_LBIT) != 0) {
 619     // The EntryList is empty but the cxq is populated.
 620     // drain RATs from cxq into EntryList
 621     // Detach RATs segment with CAS and then merge into EntryList
 622     for (;;) {
 623       // optional optimization - if locked, the owner is responsible for succession
 624       if (cxq & _LBIT) goto Punt ;
 625       const intptr_t vfy = CASPTR (&_LockWord, cxq, cxq & _LBIT) ;
 626       if (vfy == cxq) break ;
 627       cxq = vfy ;
 628       // Interference - LockWord changed - Just retry
 629       // We can see concurrent interference from contending threads
 630       // pushing themselves onto the cxq or from lock-unlock operations.
 631       // From the perspective of this thread, EntryList is stable and
 632       // the cxq is prepend-only -- the head is volatile but the interior
 633       // of the cxq is stable.  In theory if we encounter interference from threads
 634       // pushing onto cxq we could simply break off the original cxq suffix and
 635       // move that segment to the EntryList, avoiding a 2nd or multiple CAS attempts
 636       // on the high-traffic LockWord variable.   For instance lets say the cxq is "ABCD"
 637       // when we first fetch cxq above.  Between the fetch -- where we observed "A"
 638       // -- and CAS -- where we attempt to CAS null over A -- "PQR" arrive,
 639       // yielding cxq = "PQRABCD".  In this case we could simply set A.ListNext
 640       // null, leaving cxq = "PQRA" and transfer the "BCD" segment to the EntryList.
 641       // Note too, that it's safe for this thread to traverse the cxq
 642       // without taking any special concurrency precautions.
 643     }
 644 
 645     // We don't currently reorder the cxq segment as we move it onto
 646     // the EntryList, but it might make sense to reverse the order
 647     // or perhaps sort by thread priority.  See the comments in
 648     // synchronizer.cpp objectMonitor::exit().
 649     assert (_EntryList == NULL, "invariant") ;
 650     _EntryList = List = (ParkEvent *)(cxq & ~_LBIT) ;
 651     assert (List != NULL, "invariant") ;
 652     goto WakeOne ;
 653   }
 654 
 655   // cxq|EntryList is empty.
 656   // w == NULL implies that cxq|EntryList == NULL in the past.
 657   // Possible race - rare inopportune interleaving.
 658   // A thread could have added itself to cxq since this thread previously checked.
 659   // Detect and recover by refetching cxq.
 660  Punt:
 661   assert (UNS(_OnDeck) == _LBIT, "invariant") ;
 662   _OnDeck = NULL ;            // Release inner lock.
 663   OrderAccess::storeload();   // Dekker duality - pivot point
 664 
 665   // Resample LockWord/cxq to recover from possible race.
 666   // For instance, while this thread T1 held OnDeck, some other thread T2 might
 667   // acquire the outer lock.  Another thread T3 might try to acquire the outer
 668   // lock, but encounter contention and enqueue itself on cxq.  T2 then drops the
 669   // outer lock, but skips succession as this thread T1 still holds OnDeck.
 670   // T1 is and remains responsible for ensuring succession of T3.
 671   //
 672   // Note that we don't need to recheck EntryList, just cxq.
 673   // If threads moved onto EntryList since we dropped OnDeck
 674   // that implies some other thread forced succession.
 675   cxq = _LockWord.FullWord ;
 676   if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) {
 677     goto Succession ;         // potential race -- re-run succession
 678   }
 679   return ;
 680 }
 681 
 682 bool Monitor::notify() {
 683   assert (_owner == Thread::current(), "invariant") ;
 684   assert (ILocked(), "invariant") ;
 685   if (_WaitSet == NULL) return true ;
 686   NotifyCount ++ ;
 687 
 688   // Transfer one thread from the WaitSet to the EntryList or cxq.
 689   // Currently we just unlink the head of the WaitSet and prepend to the cxq.
 690   // And of course we could just unlink it and unpark it, too, but
 691   // in that case it'd likely impale itself on the reentry.
 692   Thread::muxAcquire (_WaitLock, "notify:WaitLock") ;
 693   ParkEvent * nfy = _WaitSet ;
 694   if (nfy != NULL) {                  // DCL idiom
 695     _WaitSet = nfy->ListNext ;
 696     assert (nfy->Notified == 0, "invariant") ;
 697     // push nfy onto the cxq
 698     for (;;) {
 699       const intptr_t v = _LockWord.FullWord ;
 700       assert ((v & 0xFF) == _LBIT, "invariant") ;
 701       nfy->ListNext = (ParkEvent *)(v & ~_LBIT);
 702       if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break;
 703       // interference - _LockWord changed -- just retry
 704     }
 705     // Note that setting Notified before pushing nfy onto the cxq is
 706     // also legal and safe, but the safety properties are much more
 707     // subtle, so for the sake of code stewardship ...
 708     OrderAccess::fence() ;
 709     nfy->Notified = 1;
 710   }
 711   Thread::muxRelease (_WaitLock) ;
 712   if (nfy != NULL && (NativeMonitorFlags & 16)) {
 713     // Experimental code ... light up the wakee in the hope that this thread (the owner)
 714     // will drop the lock just about the time the wakee comes ONPROC.
 715     nfy->unpark() ;
 716   }
 717   assert (ILocked(), "invariant") ;
 718   return true ;
 719 }
 720 
 721 // Currently notifyAll() transfers the waiters one-at-a-time from the waitset
 722 // to the cxq.  This could be done more efficiently with a single bulk en-mass transfer,
 723 // but in practice notifyAll() for large #s of threads is rare and not time-critical.
 724 // Beware too, that we invert the order of the waiters.  Lets say that the
 725 // waitset is "ABCD" and the cxq is "XYZ".  After a notifyAll() the waitset
 726 // will be empty and the cxq will be "DCBAXYZ".  This is benign, of course.
 727 
 728 bool Monitor::notify_all() {
 729   assert (_owner == Thread::current(), "invariant") ;
 730   assert (ILocked(), "invariant") ;
 731   while (_WaitSet != NULL) notify() ;
 732   return true ;
 733 }
 734 
 735 int Monitor::IWait (Thread * Self, jlong timo) {
 736   assert (ILocked(), "invariant") ;
 737 
 738   // Phases:
 739   // 1. Enqueue Self on WaitSet - currently prepend
 740   // 2. unlock - drop the outer lock
 741   // 3. wait for either notification or timeout
 742   // 4. lock - reentry - reacquire the outer lock
 743 
 744   ParkEvent * const ESelf = Self->_MutexEvent ;
 745   ESelf->Notified = 0 ;
 746   ESelf->reset() ;
 747   OrderAccess::fence() ;
 748 
 749   // Add Self to WaitSet
 750   // Ideally only the holder of the outer lock would manipulate the WaitSet -
 751   // That is, the outer lock would implicitly protect the WaitSet.
 752   // But if a thread in wait() encounters a timeout it will need to dequeue itself
 753   // from the WaitSet _before it becomes the owner of the lock.  We need to dequeue
 754   // as the ParkEvent -- which serves as a proxy for the thread -- can't reside
 755   // on both the WaitSet and the EntryList|cxq at the same time..  That is, a thread
 756   // on the WaitSet can't be allowed to compete for the lock until it has managed to
 757   // unlink its ParkEvent from WaitSet.  Thus the need for WaitLock.
 758   // Contention on the WaitLock is minimal.
 759   //
 760   // Another viable approach would be add another ParkEvent, "WaitEvent" to the
 761   // thread class.  The WaitSet would be composed of WaitEvents.  Only the
 762   // owner of the outer lock would manipulate the WaitSet.  A thread in wait()
 763   // could then compete for the outer lock, and then, if necessary, unlink itself
 764   // from the WaitSet only after having acquired the outer lock.  More precisely,
 765   // there would be no WaitLock.  A thread in in wait() would enqueue its WaitEvent
 766   // on the WaitSet; release the outer lock; wait for either notification or timeout;
 767   // reacquire the inner lock; and then, if needed, unlink itself from the WaitSet.
 768   //
 769   // Alternatively, a 2nd set of list link fields in the ParkEvent might suffice.
 770   // One set would be for the WaitSet and one for the EntryList.
 771   // We could also deconstruct the ParkEvent into a "pure" event and add a
 772   // new immortal/TSM "ListElement" class that referred to ParkEvents.
 773   // In that case we could have one ListElement on the WaitSet and another
 774   // on the EntryList, with both referring to the same pure Event.
 775 
 776   Thread::muxAcquire (_WaitLock, "wait:WaitLock:Add") ;
 777   ESelf->ListNext = _WaitSet ;
 778   _WaitSet = ESelf ;
 779   Thread::muxRelease (_WaitLock) ;
 780 
 781   // Release the outer lock
 782   // We call IUnlock (RelaxAssert=true) as a thread T1 might
 783   // enqueue itself on the WaitSet, call IUnlock(), drop the lock,
 784   // and then stall before it can attempt to wake a successor.
 785   // Some other thread T2 acquires the lock, and calls notify(), moving
 786   // T1 from the WaitSet to the cxq.  T2 then drops the lock.  T1 resumes,
 787   // and then finds *itself* on the cxq.  During the course of a normal
 788   // IUnlock() call a thread should _never find itself on the EntryList
 789   // or cxq, but in the case of wait() it's possible.
 790   // See synchronizer.cpp objectMonitor::wait().
 791   IUnlock (true) ;
 792 
 793   // Wait for either notification or timeout
 794   // Beware that in some circumstances we might propagate
 795   // spurious wakeups back to the caller.
 796 
 797   for (;;) {
 798     if (ESelf->Notified) break ;
 799     int err = ParkCommon (ESelf, timo) ;
 800     if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break ;
 801   }
 802 
 803   // Prepare for reentry - if necessary, remove ESelf from WaitSet
 804   // ESelf can be:
 805   // 1. Still on the WaitSet.  This can happen if we exited the loop by timeout.
 806   // 2. On the cxq or EntryList
 807   // 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position.
 808 
 809   OrderAccess::fence() ;
 810   int WasOnWaitSet = 0 ;
 811   if (ESelf->Notified == 0) {
 812     Thread::muxAcquire (_WaitLock, "wait:WaitLock:remove") ;
 813     if (ESelf->Notified == 0) {     // DCL idiom
 814       assert (_OnDeck != ESelf, "invariant") ;   // can't be both OnDeck and on WaitSet
 815       // ESelf is resident on the WaitSet -- unlink it.
 816       // A doubly-linked list would be better here so we can unlink in constant-time.
 817       // We have to unlink before we potentially recontend as ESelf might otherwise
 818       // end up on the cxq|EntryList -- it can't be on two lists at once.
 819       ParkEvent * p = _WaitSet ;
 820       ParkEvent * q = NULL ;            // classic q chases p
 821       while (p != NULL && p != ESelf) {
 822         q = p ;
 823         p = p->ListNext ;
 824       }
 825       assert (p == ESelf, "invariant") ;
 826       if (p == _WaitSet) {      // found at head
 827         assert (q == NULL, "invariant") ;
 828         _WaitSet = p->ListNext ;
 829       } else {                  // found in interior
 830         assert (q->ListNext == p, "invariant") ;
 831         q->ListNext = p->ListNext ;
 832       }
 833       WasOnWaitSet = 1 ;        // We were *not* notified but instead encountered timeout
 834     }
 835     Thread::muxRelease (_WaitLock) ;
 836   }
 837 
 838   // Reentry phase - reacquire the lock
 839   if (WasOnWaitSet) {
 840     // ESelf was previously on the WaitSet but we just unlinked it above
 841     // because of a timeout.  ESelf is not resident on any list and is not OnDeck
 842     assert (_OnDeck != ESelf, "invariant") ;
 843     ILock (Self) ;
 844   } else {
 845     // A prior notify() operation moved ESelf from the WaitSet to the cxq.
 846     // ESelf is now on the cxq, EntryList or at the OnDeck position.
 847     // The following fragment is extracted from Monitor::ILock()
 848     for (;;) {
 849       if (_OnDeck == ESelf && TrySpin(Self)) break ;
 850       ParkCommon (ESelf, 0) ;
 851     }
 852     assert (_OnDeck == ESelf, "invariant") ;
 853     _OnDeck = NULL ;
 854   }
 855 
 856   assert (ILocked(), "invariant") ;
 857   return WasOnWaitSet != 0 ;        // return true IFF timeout
 858 }
 859 
 860 
 861 // ON THE VMTHREAD SNEAKING PAST HELD LOCKS:
 862 // In particular, there are certain types of global lock that may be held
 863 // by a Java thread while it is blocked at a safepoint but before it has
 864 // written the _owner field. These locks may be sneakily acquired by the
 865 // VM thread during a safepoint to avoid deadlocks. Alternatively, one should
 866 // identify all such locks, and ensure that Java threads never block at
 867 // safepoints while holding them (_no_safepoint_check_flag). While it
 868 // seems as though this could increase the time to reach a safepoint
 869 // (or at least increase the mean, if not the variance), the latter
 870 // approach might make for a cleaner, more maintainable JVM design.
 871 //
 872 // Sneaking is vile and reprehensible and should be excised at the 1st
 873 // opportunity.  It's possible that the need for sneaking could be obviated
 874 // as follows.  Currently, a thread might (a) while TBIVM, call pthread_mutex_lock
 875 // or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex.
 876 // (b) stall at the TBIVM exit point as a safepoint is in effect.  Critically,
 877 // it'll stall at the TBIVM reentry state transition after having acquired the
 878 // underlying lock, but before having set _owner and having entered the actual
 879 // critical section.  The lock-sneaking facility leverages that fact and allowed the
 880 // VM thread to logically acquire locks that had already be physically locked by mutators
 881 // but where mutators were known blocked by the reentry thread state transition.
 882 //
 883 // If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly
 884 // wrapped calls to park(), then we could likely do away with sneaking.  We'd
 885 // decouple lock acquisition and parking.  The critical invariant  to eliminating
 886 // sneaking is to ensure that we never "physically" acquire the lock while TBIVM.
 887 // An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket.
 888 // One difficulty with this approach is that the TBIVM wrapper could recurse and
 889 // call lock() deep from within a lock() call, while the MutexEvent was already enqueued.
 890 // Using a stack (N=2 at minimum) of ParkEvents would take care of that problem.
 891 //
 892 // But of course the proper ultimate approach is to avoid schemes that require explicit
 893 // sneaking or dependence on any any clever invariants or subtle implementation properties
 894 // of Mutex-Monitor and instead directly address the underlying design flaw.
 895 
 896 void Monitor::lock (Thread * Self) {
 897 #ifdef CHECK_UNHANDLED_OOPS
 898   // Clear unhandled oops so we get a crash right away.  Only clear for non-vm
 899   // or GC threads.
 900   if (Self->is_Java_thread()) {
 901     Self->clear_unhandled_oops();
 902   }
 903 #endif // CHECK_UNHANDLED_OOPS
 904 
 905   debug_only(check_prelock_state(Self));
 906   assert (_owner != Self              , "invariant") ;
 907   assert (_OnDeck != Self->_MutexEvent, "invariant") ;
 908 
 909   if (TryFast()) {
 910  Exeunt:
 911     assert (ILocked(), "invariant") ;
 912     assert (owner() == NULL, "invariant");
 913     set_owner (Self);
 914     return ;
 915   }
 916 
 917   // The lock is contended ...
 918 
 919   bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
 920   if (can_sneak && _owner == NULL) {
 921     // a java thread has locked the lock but has not entered the
 922     // critical region -- let's just pretend we've locked the lock
 923     // and go on.  we note this with _snuck so we can also
 924     // pretend to unlock when the time comes.
 925     _snuck = true;
 926     goto Exeunt ;
 927   }
 928 
 929   // Try a brief spin to avoid passing thru thread state transition ...
 930   if (TrySpin (Self)) goto Exeunt ;
 931 
 932   check_block_state(Self);
 933   if (Self->is_Java_thread()) {
 934     // Horribile dictu - we suffer through a state transition
 935     assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex");
 936     ThreadBlockInVM tbivm ((JavaThread *) Self) ;
 937     ILock (Self) ;
 938   } else {
 939     // Mirabile dictu
 940     ILock (Self) ;
 941   }
 942   goto Exeunt ;
 943 }
 944 
 945 void Monitor::lock() {
 946   this->lock(Thread::current());
 947 }
 948 
 949 // Lock without safepoint check - a degenerate variant of lock().
 950 // Should ONLY be used by safepoint code and other code
 951 // that is guaranteed not to block while running inside the VM. If this is called with
 952 // thread state set to be in VM, the safepoint synchronization code will deadlock!
 953 
 954 void Monitor::lock_without_safepoint_check (Thread * Self) {
 955   assert (_owner != Self, "invariant") ;
 956   ILock (Self) ;
 957   assert (_owner == NULL, "invariant");
 958   set_owner (Self);
 959 }
 960 
 961 void Monitor::lock_without_safepoint_check () {
 962   lock_without_safepoint_check (Thread::current()) ;
 963 }
 964 
 965 
 966 // Returns true if thread succeceed [sic] in grabbing the lock, otherwise false.
 967 
 968 bool Monitor::try_lock() {
 969   Thread * const Self = Thread::current();
 970   debug_only(check_prelock_state(Self));
 971   // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler");
 972 
 973   // Special case, where all Java threads are stopped.
 974   // The lock may have been acquired but _owner is not yet set.
 975   // In that case the VM thread can safely grab the lock.
 976   // It strikes me this should appear _after the TryLock() fails, below.
 977   bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
 978   if (can_sneak && _owner == NULL) {
 979     set_owner(Self); // Do not need to be atomic, since we are at a safepoint
 980     _snuck = true;
 981     return true;
 982   }
 983 
 984   if (TryLock()) {
 985     // We got the lock
 986     assert (_owner == NULL, "invariant");
 987     set_owner (Self);
 988     return true;
 989   }
 990   return false;
 991 }
 992 
 993 void Monitor::unlock() {
 994   assert (_owner  == Thread::current(), "invariant") ;
 995   assert (_OnDeck != Thread::current()->_MutexEvent , "invariant") ;
 996   set_owner (NULL) ;
 997   if (_snuck) {
 998     assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
 999     _snuck = false;
1000     return ;
1001   }
1002   IUnlock (false) ;
1003 }
1004 
1005 // Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check()
1006 // jvm_raw_lock() and _unlock() can be called by non-Java threads via JVM_RawMonitorEnter.
1007 //
1008 // There's no expectation that JVM_RawMonitors will interoperate properly with the native
1009 // Mutex-Monitor constructs.  We happen to implement JVM_RawMonitors in terms of
1010 // native Mutex-Monitors simply as a matter of convenience.  A simple abstraction layer
1011 // over a pthread_mutex_t would work equally as well, but require more platform-specific
1012 // code -- a "PlatformMutex".  Alternatively, a simply layer over muxAcquire-muxRelease
1013 // would work too.
1014 //
1015 // Since the caller might be a foreign thread, we don't necessarily have a Thread.MutexEvent
1016 // instance available.  Instead, we transiently allocate a ParkEvent on-demand if
1017 // we encounter contention.  That ParkEvent remains associated with the thread
1018 // until it manages to acquire the lock, at which time we return the ParkEvent
1019 // to the global ParkEvent free list.  This is correct and suffices for our purposes.
1020 //
1021 // Beware that the original jvm_raw_unlock() had a "_snuck" test but that
1022 // jvm_raw_lock() didn't have the corresponding test.  I suspect that's an
1023 // oversight, but I've replicated the original suspect logic in the new code ...
1024 
1025 void Monitor::jvm_raw_lock() {
1026   assert(rank() == native, "invariant");
1027 
1028   if (TryLock()) {
1029  Exeunt:
1030     assert (ILocked(), "invariant") ;
1031     assert (_owner == NULL, "invariant");
1032     // This can potentially be called by non-java Threads. Thus, the ThreadLocalStorage
1033     // might return NULL. Don't call set_owner since it will break on an NULL owner
1034     // Consider installing a non-null "ANON" distinguished value instead of just NULL.
1035     _owner = ThreadLocalStorage::thread();
1036     return ;
1037   }
1038 
1039   if (TrySpin(NULL)) goto Exeunt ;
1040 
1041   // slow-path - apparent contention
1042   // Allocate a ParkEvent for transient use.
1043   // The ParkEvent remains associated with this thread until
1044   // the time the thread manages to acquire the lock.
1045   ParkEvent * const ESelf = ParkEvent::Allocate(NULL) ;
1046   ESelf->reset() ;
1047   OrderAccess::storeload() ;
1048 
1049   // Either Enqueue Self on cxq or acquire the outer lock.
1050   if (AcquireOrPush (ESelf)) {
1051     ParkEvent::Release (ESelf) ;      // surrender the ParkEvent
1052     goto Exeunt ;
1053   }
1054 
1055   // At any given time there is at most one ondeck thread.
1056   // ondeck implies not resident on cxq and not resident on EntryList
1057   // Only the OnDeck thread can try to acquire -- contended for -- the lock.
1058   // CONSIDER: use Self->OnDeck instead of m->OnDeck.
1059   for (;;) {
1060     if (_OnDeck == ESelf && TrySpin(NULL)) break ;
1061     ParkCommon (ESelf, 0) ;
1062   }
1063 
1064   assert (_OnDeck == ESelf, "invariant") ;
1065   _OnDeck = NULL ;
1066   ParkEvent::Release (ESelf) ;      // surrender the ParkEvent
1067   goto Exeunt ;
1068 }
1069 
1070 void Monitor::jvm_raw_unlock() {
1071   // Nearly the same as Monitor::unlock() ...
1072   // directly set _owner instead of using set_owner(null)
1073   _owner = NULL ;
1074   if (_snuck) {         // ???
1075     assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
1076     _snuck = false;
1077     return ;
1078   }
1079   IUnlock(false) ;
1080 }
1081 
1082 bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equivalent) {
1083   Thread * const Self = Thread::current() ;
1084   assert (_owner == Self, "invariant") ;
1085   assert (ILocked(), "invariant") ;
1086 
1087   // as_suspend_equivalent logically implies !no_safepoint_check
1088   guarantee (!as_suspend_equivalent || !no_safepoint_check, "invariant") ;
1089   // !no_safepoint_check logically implies java_thread
1090   guarantee (no_safepoint_check || Self->is_Java_thread(), "invariant") ;
1091 
1092   #ifdef ASSERT
1093     Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
1094     assert(least != this, "Specification of get_least_... call above");
1095     if (least != NULL && least->rank() <= special) {
1096       tty->print("Attempting to wait on monitor %s/%d while holding"
1097                  " lock %s/%d -- possible deadlock",
1098                  name(), rank(), least->name(), least->rank());
1099       assert(false, "Shouldn't block(wait) while holding a lock of rank special");
1100     }
1101   #endif // ASSERT
1102 
1103   int wait_status ;
1104   // conceptually set the owner to NULL in anticipation of
1105   // abdicating the lock in wait
1106   set_owner(NULL);
1107   if (no_safepoint_check) {
1108     wait_status = IWait (Self, timeout) ;
1109   } else {
1110     assert (Self->is_Java_thread(), "invariant") ;
1111     JavaThread *jt = (JavaThread *)Self;
1112 
1113     // Enter safepoint region - ornate and Rococo ...
1114     ThreadBlockInVM tbivm(jt);
1115     OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */);
1116 
1117     if (as_suspend_equivalent) {
1118       jt->set_suspend_equivalent();
1119       // cleared by handle_special_suspend_equivalent_condition() or
1120       // java_suspend_self()
1121     }
1122 
1123     wait_status = IWait (Self, timeout) ;
1124 
1125     // were we externally suspended while we were waiting?
1126     if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) {
1127       // Our event wait has finished and we own the lock, but
1128       // while we were waiting another thread suspended us. We don't
1129       // want to hold the lock while suspended because that
1130       // would surprise the thread that suspended us.
1131       assert (ILocked(), "invariant") ;
1132       IUnlock (true) ;
1133       jt->java_suspend_self();
1134       ILock (Self) ;
1135       assert (ILocked(), "invariant") ;
1136     }
1137   }
1138 
1139   // Conceptually reestablish ownership of the lock.
1140   // The "real" lock -- the LockByte -- was reacquired by IWait().
1141   assert (ILocked(), "invariant") ;
1142   assert (_owner == NULL, "invariant") ;
1143   set_owner (Self) ;
1144   return wait_status != 0 ;          // return true IFF timeout
1145 }
1146 
1147 Monitor::~Monitor() {
1148   assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ;
1149 }
1150 
1151 void Monitor::ClearMonitor (Monitor * m, const char *name) {
1152   m->_owner             = NULL ;
1153   m->_snuck             = false ;
1154   if (name == NULL) {
1155     strcpy(m->_name, "UNKNOWN") ;
1156   } else {
1157     strncpy(m->_name, name, MONITOR_NAME_LEN - 1);
1158     m->_name[MONITOR_NAME_LEN - 1] = '\0';
1159   }
1160   m->_LockWord.FullWord = 0 ;
1161   m->_EntryList         = NULL ;
1162   m->_OnDeck            = NULL ;
1163   m->_WaitSet           = NULL ;
1164   m->_WaitLock[0]       = 0 ;
1165 }
1166 
1167 Monitor::Monitor() { ClearMonitor(this); }
1168 
1169 Monitor::Monitor (int Rank, const char * name, bool allow_vm_block) {
1170   ClearMonitor (this, name) ;
1171 #ifdef ASSERT
1172   _allow_vm_block  = allow_vm_block;
1173   _rank            = Rank ;
1174 #endif
1175 }
1176 
1177 Mutex::~Mutex() {
1178   assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ;
1179 }
1180 
1181 Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) {
1182   ClearMonitor ((Monitor *) this, name) ;
1183 #ifdef ASSERT
1184  _allow_vm_block   = allow_vm_block;
1185  _rank             = Rank ;
1186 #endif
1187 }
1188 
1189 bool Monitor::owned_by_self() const {
1190   bool ret = _owner == Thread::current();
1191   assert (!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant") ;
1192   return ret;
1193 }
1194 
1195 void Monitor::print_on_error(outputStream* st) const {
1196   st->print("[" PTR_FORMAT, this);
1197   st->print("] %s", _name);
1198   st->print(" - owner thread: " PTR_FORMAT, _owner);
1199 }
1200 
1201 
1202 
1203 
1204 // ----------------------------------------------------------------------------------
1205 // Non-product code
1206 
1207 #ifndef PRODUCT
1208 void Monitor::print_on(outputStream* st) const {
1209   st->print_cr("Mutex: [0x%lx/0x%lx] %s - owner: 0x%lx", this, _LockWord.FullWord, _name, _owner);
1210 }
1211 #endif
1212 
1213 #ifndef PRODUCT
1214 #ifdef ASSERT
1215 Monitor * Monitor::get_least_ranked_lock(Monitor * locks) {
1216   Monitor *res, *tmp;
1217   for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) {
1218     if (tmp->rank() < res->rank()) {
1219       res = tmp;
1220     }
1221   }
1222   if (!SafepointSynchronize::is_at_safepoint()) {
1223     // In this case, we expect the held locks to be
1224     // in increasing rank order (modulo any native ranks)
1225     for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
1226       if (tmp->next() != NULL) {
1227         assert(tmp->rank() == Mutex::native ||
1228                tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
1229       }
1230     }
1231   }
1232   return res;
1233 }
1234 
1235 Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) {
1236   Monitor *res, *tmp;
1237   for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) {
1238     if (tmp != this && (res == NULL || tmp->rank() < res->rank())) {
1239       res = tmp;
1240     }
1241   }
1242   if (!SafepointSynchronize::is_at_safepoint()) {
1243     // In this case, we expect the held locks to be
1244     // in increasing rank order (modulo any native ranks)
1245     for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
1246       if (tmp->next() != NULL) {
1247         assert(tmp->rank() == Mutex::native ||
1248                tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
1249       }
1250     }
1251   }
1252   return res;
1253 }
1254 
1255 
1256 bool Monitor::contains(Monitor* locks, Monitor * lock) {
1257   for (; locks != NULL; locks = locks->next()) {
1258     if (locks == lock)
1259       return true;
1260   }
1261   return false;
1262 }
1263 #endif
1264 
1265 // Called immediately after lock acquisition or release as a diagnostic
1266 // to track the lock-set of the thread and test for rank violations that
1267 // might indicate exposure to deadlock.
1268 // Rather like an EventListener for _owner (:>).
1269 
1270 void Monitor::set_owner_implementation(Thread *new_owner) {
1271   // This function is solely responsible for maintaining
1272   // and checking the invariant that threads and locks
1273   // are in a 1/N relation, with some some locks unowned.
1274   // It uses the Mutex::_owner, Mutex::_next, and
1275   // Thread::_owned_locks fields, and no other function
1276   // changes those fields.
1277   // It is illegal to set the mutex from one non-NULL
1278   // owner to another--it must be owned by NULL as an
1279   // intermediate state.
1280 
1281   if (new_owner != NULL) {
1282     // the thread is acquiring this lock
1283 
1284     assert(new_owner == Thread::current(), "Should I be doing this?");
1285     assert(_owner == NULL, "setting the owner thread of an already owned mutex");
1286     _owner = new_owner; // set the owner
1287 
1288     // link "this" into the owned locks list
1289 
1290     #ifdef ASSERT  // Thread::_owned_locks is under the same ifdef
1291       Monitor* locks = get_least_ranked_lock(new_owner->owned_locks());
1292                     // Mutex::set_owner_implementation is a friend of Thread
1293 
1294       assert(this->rank() >= 0, "bad lock rank");
1295 
1296       // Deadlock avoidance rules require us to acquire Mutexes only in
1297       // a global total order. For example m1 is the lowest ranked mutex
1298       // that the thread holds and m2 is the mutex the thread is trying
1299       // to acquire, then  deadlock avoidance rules require that the rank
1300       // of m2 be less  than the rank of m1.
1301       // The rank Mutex::native  is an exception in that it is not subject
1302       // to the verification rules.
1303       // Here are some further notes relating to mutex acquisition anomalies:
1304       // . under Solaris, the interrupt lock gets acquired when doing
1305       //   profiling, so any lock could be held.
1306       // . it is also ok to acquire Safepoint_lock at the very end while we
1307       //   already hold Terminator_lock - may happen because of periodic safepoints
1308       if (this->rank() != Mutex::native &&
1309           this->rank() != Mutex::suspend_resume &&
1310           locks != NULL && locks->rank() <= this->rank() &&
1311           !SafepointSynchronize::is_at_safepoint() &&
1312           this != Interrupt_lock && this != ProfileVM_lock &&
1313           !(this == Safepoint_lock && contains(locks, Terminator_lock) &&
1314             SafepointSynchronize::is_synchronizing())) {
1315         new_owner->print_owned_locks();
1316         fatal(err_msg("acquiring lock %s/%d out of order with lock %s/%d -- "
1317                       "possible deadlock", this->name(), this->rank(),
1318                       locks->name(), locks->rank()));
1319       }
1320 
1321       this->_next = new_owner->_owned_locks;
1322       new_owner->_owned_locks = this;
1323     #endif
1324 
1325   } else {
1326     // the thread is releasing this lock
1327 
1328     Thread* old_owner = _owner;
1329     debug_only(_last_owner = old_owner);
1330 
1331     assert(old_owner != NULL, "removing the owner thread of an unowned mutex");
1332     assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex");
1333 
1334     _owner = NULL; // set the owner
1335 
1336     #ifdef ASSERT
1337       Monitor *locks = old_owner->owned_locks();
1338 
1339       // remove "this" from the owned locks list
1340 
1341       Monitor *prev = NULL;
1342       bool found = false;
1343       for (; locks != NULL; prev = locks, locks = locks->next()) {
1344         if (locks == this) {
1345           found = true;
1346           break;
1347         }
1348       }
1349       assert(found, "Removing a lock not owned");
1350       if (prev == NULL) {
1351         old_owner->_owned_locks = _next;
1352       } else {
1353         prev->_next = _next;
1354       }
1355       _next = NULL;
1356     #endif
1357   }
1358 }
1359 
1360 
1361 // Factored out common sanity checks for locking mutex'es. Used by lock() and try_lock()
1362 void Monitor::check_prelock_state(Thread *thread) {
1363   assert((!thread->is_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm)
1364          || rank() == Mutex::special, "wrong thread state for using locks");
1365   if (StrictSafepointChecks) {
1366     if (thread->is_VM_thread() && !allow_vm_block()) {
1367       fatal(err_msg("VM thread using lock %s (not allowed to block on)",
1368                     name()));
1369     }
1370     debug_only(if (rank() != Mutex::special) \
1371       thread->check_for_valid_safepoint_state(false);)
1372   }
1373 }
1374 
1375 void Monitor::check_block_state(Thread *thread) {
1376   if (!_allow_vm_block && thread->is_VM_thread()) {
1377     warning("VM thread blocked on lock");
1378     print();
1379     BREAKPOINT;
1380   }
1381   assert(_owner != thread, "deadlock: blocking on monitor owned by current thread");
1382 }
1383 
1384 #endif // PRODUCT