1 2 /* 3 * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "runtime/atomic.inline.hpp" 28 #include "runtime/mutex.hpp" 29 #include "runtime/orderAccess.inline.hpp" 30 #include "runtime/osThread.hpp" 31 #include "runtime/thread.inline.hpp" 32 #include "utilities/events.hpp" 33 #ifdef TARGET_OS_FAMILY_linux 34 # include "mutex_linux.inline.hpp" 35 #endif 36 #ifdef TARGET_OS_FAMILY_solaris 37 # include "mutex_solaris.inline.hpp" 38 #endif 39 #ifdef TARGET_OS_FAMILY_windows 40 # include "mutex_windows.inline.hpp" 41 #endif 42 #ifdef TARGET_OS_FAMILY_bsd 43 # include "mutex_bsd.inline.hpp" 44 #endif 45 46 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 47 48 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o 49 // 50 // Native Monitor-Mutex locking - theory of operations 51 // 52 // * Native Monitors are completely unrelated to Java-level monitors, 53 // although the "back-end" slow-path implementations share a common lineage. 54 // See objectMonitor:: in synchronizer.cpp. 55 // Native Monitors do *not* support nesting or recursion but otherwise 56 // they're basically Hoare-flavor monitors. 57 // 58 // * A thread acquires ownership of a Monitor/Mutex by CASing the LockByte 59 // in the _LockWord from zero to non-zero. Note that the _Owner field 60 // is advisory and is used only to verify that the thread calling unlock() 61 // is indeed the last thread to have acquired the lock. 62 // 63 // * Contending threads "push" themselves onto the front of the contention 64 // queue -- called the cxq -- with CAS and then spin/park. 65 // The _LockWord contains the LockByte as well as the pointer to the head 66 // of the cxq. Colocating the LockByte with the cxq precludes certain races. 67 // 68 // * Using a separately addressable LockByte allows for CAS:MEMBAR or CAS:0 69 // idioms. We currently use MEMBAR in the uncontended unlock() path, as 70 // MEMBAR often has less latency than CAS. If warranted, we could switch to 71 // a CAS:0 mode, using timers to close the resultant race, as is done 72 // with Java Monitors in synchronizer.cpp. 73 // 74 // See the following for a discussion of the relative cost of atomics (CAS) 75 // MEMBAR, and ways to eliminate such instructions from the common-case paths: 76 // -- http://blogs.sun.com/dave/entry/biased_locking_in_hotspot 77 // -- http://blogs.sun.com/dave/resource/MustangSync.pdf 78 // -- http://blogs.sun.com/dave/resource/synchronization-public2.pdf 79 // -- synchronizer.cpp 80 // 81 // * Overall goals - desiderata 82 // 1. Minimize context switching 83 // 2. Minimize lock migration 84 // 3. Minimize CPI -- affinity and locality 85 // 4. Minimize the execution of high-latency instructions such as CAS or MEMBAR 86 // 5. Minimize outer lock hold times 87 // 6. Behave gracefully on a loaded system 88 // 89 // * Thread flow and list residency: 90 // 91 // Contention queue --> EntryList --> OnDeck --> Owner --> !Owner 92 // [..resident on monitor list..] 93 // [...........contending..................] 94 // 95 // -- The contention queue (cxq) contains recently-arrived threads (RATs). 96 // Threads on the cxq eventually drain into the EntryList. 97 // -- Invariant: a thread appears on at most one list -- cxq, EntryList 98 // or WaitSet -- at any one time. 99 // -- For a given monitor there can be at most one "OnDeck" thread at any 100 // given time but if needbe this particular invariant could be relaxed. 101 // 102 // * The WaitSet and EntryList linked lists are composed of ParkEvents. 103 // I use ParkEvent instead of threads as ParkEvents are immortal and 104 // type-stable, meaning we can safely unpark() a possibly stale 105 // list element in the unlock()-path. (That's benign). 106 // 107 // * Succession policy - providing for progress: 108 // 109 // As necessary, the unlock()ing thread identifies, unlinks, and unparks 110 // an "heir presumptive" tentative successor thread from the EntryList. 111 // This becomes the so-called "OnDeck" thread, of which there can be only 112 // one at any given time for a given monitor. The wakee will recontend 113 // for ownership of monitor. 114 // 115 // Succession is provided for by a policy of competitive handoff. 116 // The exiting thread does _not_ grant or pass ownership to the 117 // successor thread. (This is also referred to as "handoff" succession"). 118 // Instead the exiting thread releases ownership and possibly wakes 119 // a successor, so the successor can (re)compete for ownership of the lock. 120 // 121 // Competitive handoff provides excellent overall throughput at the expense 122 // of short-term fairness. If fairness is a concern then one remedy might 123 // be to add an AcquireCounter field to the monitor. After a thread acquires 124 // the lock it will decrement the AcquireCounter field. When the count 125 // reaches 0 the thread would reset the AcquireCounter variable, abdicate 126 // the lock directly to some thread on the EntryList, and then move itself to the 127 // tail of the EntryList. 128 // 129 // But in practice most threads engage or otherwise participate in resource 130 // bounded producer-consumer relationships, so lock domination is not usually 131 // a practical concern. Recall too, that in general it's easier to construct 132 // a fair lock from a fast lock, but not vice-versa. 133 // 134 // * The cxq can have multiple concurrent "pushers" but only one concurrent 135 // detaching thread. This mechanism is immune from the ABA corruption. 136 // More precisely, the CAS-based "push" onto cxq is ABA-oblivious. 137 // We use OnDeck as a pseudo-lock to enforce the at-most-one detaching 138 // thread constraint. 139 // 140 // * Taken together, the cxq and the EntryList constitute or form a 141 // single logical queue of threads stalled trying to acquire the lock. 142 // We use two distinct lists to reduce heat on the list ends. 143 // Threads in lock() enqueue onto cxq while threads in unlock() will 144 // dequeue from the EntryList. (c.f. Michael Scott's "2Q" algorithm). 145 // A key desideratum is to minimize queue & monitor metadata manipulation 146 // that occurs while holding the "outer" monitor lock -- that is, we want to 147 // minimize monitor lock holds times. 148 // 149 // The EntryList is ordered by the prevailing queue discipline and 150 // can be organized in any convenient fashion, such as a doubly-linked list or 151 // a circular doubly-linked list. If we need a priority queue then something akin 152 // to Solaris' sleepq would work nicely. Viz., 153 // -- http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c. 154 // -- http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/os/sleepq.c 155 // Queue discipline is enforced at ::unlock() time, when the unlocking thread 156 // drains the cxq into the EntryList, and orders or reorders the threads on the 157 // EntryList accordingly. 158 // 159 // Barring "lock barging", this mechanism provides fair cyclic ordering, 160 // somewhat similar to an elevator-scan. 161 // 162 // * OnDeck 163 // -- For a given monitor there can be at most one OnDeck thread at any given 164 // instant. The OnDeck thread is contending for the lock, but has been 165 // unlinked from the EntryList and cxq by some previous unlock() operations. 166 // Once a thread has been designated the OnDeck thread it will remain so 167 // until it manages to acquire the lock -- being OnDeck is a stable property. 168 // -- Threads on the EntryList or cxq are _not allowed to attempt lock acquisition. 169 // -- OnDeck also serves as an "inner lock" as follows. Threads in unlock() will, after 170 // having cleared the LockByte and dropped the outer lock, attempt to "trylock" 171 // OnDeck by CASing the field from null to non-null. If successful, that thread 172 // is then responsible for progress and succession and can use CAS to detach and 173 // drain the cxq into the EntryList. By convention, only this thread, the holder of 174 // the OnDeck inner lock, can manipulate the EntryList or detach and drain the 175 // RATs on the cxq into the EntryList. This avoids ABA corruption on the cxq as 176 // we allow multiple concurrent "push" operations but restrict detach concurrency 177 // to at most one thread. Having selected and detached a successor, the thread then 178 // changes the OnDeck to refer to that successor, and then unparks the successor. 179 // That successor will eventually acquire the lock and clear OnDeck. Beware 180 // that the OnDeck usage as a lock is asymmetric. A thread in unlock() transiently 181 // "acquires" OnDeck, performs queue manipulations, passes OnDeck to some successor, 182 // and then the successor eventually "drops" OnDeck. Note that there's never 183 // any sense of contention on the inner lock, however. Threads never contend 184 // or wait for the inner lock. 185 // -- OnDeck provides for futile wakeup throttling a described in section 3.3 of 186 // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf 187 // In a sense, OnDeck subsumes the ObjectMonitor _Succ and ObjectWaiter 188 // TState fields found in Java-level objectMonitors. (See synchronizer.cpp). 189 // 190 // * Waiting threads reside on the WaitSet list -- wait() puts 191 // the caller onto the WaitSet. Notify() or notifyAll() simply 192 // transfers threads from the WaitSet to either the EntryList or cxq. 193 // Subsequent unlock() operations will eventually unpark the notifyee. 194 // Unparking a notifee in notify() proper is inefficient - if we were to do so 195 // it's likely the notifyee would simply impale itself on the lock held 196 // by the notifier. 197 // 198 // * The mechanism is obstruction-free in that if the holder of the transient 199 // OnDeck lock in unlock() is preempted or otherwise stalls, other threads 200 // can still acquire and release the outer lock and continue to make progress. 201 // At worst, waking of already blocked contending threads may be delayed, 202 // but nothing worse. (We only use "trylock" operations on the inner OnDeck 203 // lock). 204 // 205 // * Note that thread-local storage must be initialized before a thread 206 // uses Native monitors or mutexes. The native monitor-mutex subsystem 207 // depends on Thread::current(). 208 // 209 // * The monitor synchronization subsystem avoids the use of native 210 // synchronization primitives except for the narrow platform-specific 211 // park-unpark abstraction. See the comments in os_solaris.cpp regarding 212 // the semantics of park-unpark. Put another way, this monitor implementation 213 // depends only on atomic operations and park-unpark. The monitor subsystem 214 // manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the 215 // underlying OS manages the READY<->RUN transitions. 216 // 217 // * The memory consistency model provide by lock()-unlock() is at least as 218 // strong or stronger than the Java Memory model defined by JSR-133. 219 // That is, we guarantee at least entry consistency, if not stronger. 220 // See http://g.oswego.edu/dl/jmm/cookbook.html. 221 // 222 // * Thread:: currently contains a set of purpose-specific ParkEvents: 223 // _MutexEvent, _ParkEvent, etc. A better approach might be to do away with 224 // the purpose-specific ParkEvents and instead implement a general per-thread 225 // stack of available ParkEvents which we could provision on-demand. The 226 // stack acts as a local cache to avoid excessive calls to ParkEvent::Allocate() 227 // and ::Release(). A thread would simply pop an element from the local stack before it 228 // enqueued or park()ed. When the contention was over the thread would 229 // push the no-longer-needed ParkEvent back onto its stack. 230 // 231 // * A slightly reduced form of ILock() and IUnlock() have been partially 232 // model-checked (Murphi) for safety and progress at T=1,2,3 and 4. 233 // It'd be interesting to see if TLA/TLC could be useful as well. 234 // 235 // * Mutex-Monitor is a low-level "leaf" subsystem. That is, the monitor 236 // code should never call other code in the JVM that might itself need to 237 // acquire monitors or mutexes. That's true *except* in the case of the 238 // ThreadBlockInVM state transition wrappers. The ThreadBlockInVM DTOR handles 239 // mutator reentry (ingress) by checking for a pending safepoint in which case it will 240 // call SafepointSynchronize::block(), which in turn may call Safepoint_lock->lock(), etc. 241 // In that particular case a call to lock() for a given Monitor can end up recursively 242 // calling lock() on another monitor. While distasteful, this is largely benign 243 // as the calls come from jacket that wraps lock(), and not from deep within lock() itself. 244 // 245 // It's unfortunate that native mutexes and thread state transitions were convolved. 246 // They're really separate concerns and should have remained that way. Melding 247 // them together was facile -- a bit too facile. The current implementation badly 248 // conflates the two concerns. 249 // 250 // * TODO-FIXME: 251 // 252 // -- Add DTRACE probes for contended acquire, contended acquired, contended unlock 253 // We should also add DTRACE probes in the ParkEvent subsystem for 254 // Park-entry, Park-exit, and Unpark. 255 // 256 // -- We have an excess of mutex-like constructs in the JVM, namely: 257 // 1. objectMonitors for Java-level synchronization (synchronizer.cpp) 258 // 2. low-level muxAcquire and muxRelease 259 // 3. low-level spinAcquire and spinRelease 260 // 4. native Mutex:: and Monitor:: 261 // 5. jvm_raw_lock() and _unlock() 262 // 6. JVMTI raw monitors -- distinct from (5) despite having a confusingly 263 // similar name. 264 // 265 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o 266 267 268 // CASPTR() uses the canonical argument order that dominates in the literature. 269 // Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates. 270 271 #define CASPTR(a,c,s) intptr_t(Atomic::cmpxchg_ptr ((void *)(s),(void *)(a),(void *)(c))) 272 #define UNS(x) (uintptr_t(x)) 273 #define TRACE(m) { static volatile int ctr = 0 ; int x = ++ctr ; if ((x & (x-1))==0) { ::printf ("%d:%s\n", x, #m); ::fflush(stdout); }} 274 275 // Simplistic low-quality Marsaglia SHIFT-XOR RNG. 276 // Bijective except for the trailing mask operation. 277 // Useful for spin loops as the compiler can't optimize it away. 278 279 static inline jint MarsagliaXORV (jint x) { 280 if (x == 0) x = 1|os::random() ; 281 x ^= x << 6; 282 x ^= ((unsigned)x) >> 21; 283 x ^= x << 7 ; 284 return x & 0x7FFFFFFF ; 285 } 286 287 static inline jint MarsagliaXOR (jint * const a) { 288 jint x = *a ; 289 if (x == 0) x = UNS(a)|1 ; 290 x ^= x << 6; 291 x ^= ((unsigned)x) >> 21; 292 x ^= x << 7 ; 293 *a = x ; 294 return x & 0x7FFFFFFF ; 295 } 296 297 static int Stall (int its) { 298 static volatile jint rv = 1 ; 299 volatile int OnFrame = 0 ; 300 jint v = rv ^ UNS(OnFrame) ; 301 while (--its >= 0) { 302 v = MarsagliaXORV (v) ; 303 } 304 // Make this impossible for the compiler to optimize away, 305 // but (mostly) avoid W coherency sharing on MP systems. 306 if (v == 0x12345) rv = v ; 307 return v ; 308 } 309 310 int Monitor::TryLock () { 311 intptr_t v = _LockWord.FullWord ; 312 for (;;) { 313 if ((v & _LBIT) != 0) return 0 ; 314 const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ; 315 if (v == u) return 1 ; 316 v = u ; 317 } 318 } 319 320 int Monitor::TryFast () { 321 // Optimistic fast-path form ... 322 // Fast-path attempt for the common uncontended case. 323 // Avoid RTS->RTO $ coherence upgrade on typical SMP systems. 324 intptr_t v = CASPTR (&_LockWord, 0, _LBIT) ; // agro ... 325 if (v == 0) return 1 ; 326 327 for (;;) { 328 if ((v & _LBIT) != 0) return 0 ; 329 const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ; 330 if (v == u) return 1 ; 331 v = u ; 332 } 333 } 334 335 int Monitor::ILocked () { 336 const intptr_t w = _LockWord.FullWord & 0xFF ; 337 assert (w == 0 || w == _LBIT, "invariant") ; 338 return w == _LBIT ; 339 } 340 341 // Polite TATAS spinlock with exponential backoff - bounded spin. 342 // Ideally we'd use processor cycles, time or vtime to control 343 // the loop, but we currently use iterations. 344 // All the constants within were derived empirically but work over 345 // over the spectrum of J2SE reference platforms. 346 // On Niagara-class systems the back-off is unnecessary but 347 // is relatively harmless. (At worst it'll slightly retard 348 // acquisition times). The back-off is critical for older SMP systems 349 // where constant fetching of the LockWord would otherwise impair 350 // scalability. 351 // 352 // Clamp spinning at approximately 1/2 of a context-switch round-trip. 353 // See synchronizer.cpp for details and rationale. 354 355 int Monitor::TrySpin (Thread * const Self) { 356 if (TryLock()) return 1 ; 357 if (!os::is_MP()) return 0 ; 358 359 int Probes = 0 ; 360 int Delay = 0 ; 361 int Steps = 0 ; 362 int SpinMax = NativeMonitorSpinLimit ; 363 int flgs = NativeMonitorFlags ; 364 for (;;) { 365 intptr_t v = _LockWord.FullWord; 366 if ((v & _LBIT) == 0) { 367 if (CASPTR (&_LockWord, v, v|_LBIT) == v) { 368 return 1 ; 369 } 370 continue ; 371 } 372 373 if ((flgs & 8) == 0) { 374 SpinPause () ; 375 } 376 377 // Periodically increase Delay -- variable Delay form 378 // conceptually: delay *= 1 + 1/Exponent 379 ++ Probes; 380 if (Probes > SpinMax) return 0 ; 381 382 if ((Probes & 0x7) == 0) { 383 Delay = ((Delay << 1)|1) & 0x7FF ; 384 // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ; 385 } 386 387 if (flgs & 2) continue ; 388 389 // Consider checking _owner's schedctl state, if OFFPROC abort spin. 390 // If the owner is OFFPROC then it's unlike that the lock will be dropped 391 // in a timely fashion, which suggests that spinning would not be fruitful 392 // or profitable. 393 394 // Stall for "Delay" time units - iterations in the current implementation. 395 // Avoid generating coherency traffic while stalled. 396 // Possible ways to delay: 397 // PAUSE, SLEEP, MEMBAR #sync, MEMBAR #halt, 398 // wr %g0,%asi, gethrtime, rdstick, rdtick, rdtsc, etc. ... 399 // Note that on Niagara-class systems we want to minimize STs in the 400 // spin loop. N1 and brethren write-around the L1$ over the xbar into the L2$. 401 // Furthermore, they don't have a W$ like traditional SPARC processors. 402 // We currently use a Marsaglia Shift-Xor RNG loop. 403 Steps += Delay ; 404 if (Self != NULL) { 405 jint rv = Self->rng[0] ; 406 for (int k = Delay ; --k >= 0; ) { 407 rv = MarsagliaXORV (rv) ; 408 if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0 ; 409 } 410 Self->rng[0] = rv ; 411 } else { 412 Stall (Delay) ; 413 } 414 } 415 } 416 417 static int ParkCommon (ParkEvent * ev, jlong timo) { 418 // Diagnostic support - periodically unwedge blocked threads 419 intx nmt = NativeMonitorTimeout ; 420 if (nmt > 0 && (nmt < timo || timo <= 0)) { 421 timo = nmt ; 422 } 423 int err = OS_OK ; 424 if (0 == timo) { 425 ev->park() ; 426 } else { 427 err = ev->park(timo) ; 428 } 429 return err ; 430 } 431 432 inline int Monitor::AcquireOrPush (ParkEvent * ESelf) { 433 intptr_t v = _LockWord.FullWord ; 434 for (;;) { 435 if ((v & _LBIT) == 0) { 436 const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ; 437 if (u == v) return 1 ; // indicate acquired 438 v = u ; 439 } else { 440 // Anticipate success ... 441 ESelf->ListNext = (ParkEvent *) (v & ~_LBIT) ; 442 const intptr_t u = CASPTR (&_LockWord, v, intptr_t(ESelf)|_LBIT) ; 443 if (u == v) return 0 ; // indicate pushed onto cxq 444 v = u ; 445 } 446 // Interference - LockWord change - just retry 447 } 448 } 449 450 // ILock and IWait are the lowest level primitive internal blocking 451 // synchronization functions. The callers of IWait and ILock must have 452 // performed any needed state transitions beforehand. 453 // IWait and ILock may directly call park() without any concern for thread state. 454 // Note that ILock and IWait do *not* access _owner. 455 // _owner is a higher-level logical concept. 456 457 void Monitor::ILock (Thread * Self) { 458 assert (_OnDeck != Self->_MutexEvent, "invariant") ; 459 460 if (TryFast()) { 461 Exeunt: 462 assert (ILocked(), "invariant") ; 463 return ; 464 } 465 466 ParkEvent * const ESelf = Self->_MutexEvent ; 467 assert (_OnDeck != ESelf, "invariant") ; 468 469 // As an optimization, spinners could conditionally try to set ONDECK to _LBIT 470 // Synchronizer.cpp uses a similar optimization. 471 if (TrySpin (Self)) goto Exeunt ; 472 473 // Slow-path - the lock is contended. 474 // Either Enqueue Self on cxq or acquire the outer lock. 475 // LockWord encoding = (cxq,LOCKBYTE) 476 ESelf->reset() ; 477 OrderAccess::fence() ; 478 479 // Optional optimization ... try barging on the inner lock 480 if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(Self)) == 0) { 481 goto OnDeck_LOOP ; 482 } 483 484 if (AcquireOrPush (ESelf)) goto Exeunt ; 485 486 // At any given time there is at most one ondeck thread. 487 // ondeck implies not resident on cxq and not resident on EntryList 488 // Only the OnDeck thread can try to acquire -- contended for -- the lock. 489 // CONSIDER: use Self->OnDeck instead of m->OnDeck. 490 // Deschedule Self so that others may run. 491 while (_OnDeck != ESelf) { 492 ParkCommon (ESelf, 0) ; 493 } 494 495 // Self is now in the ONDECK position and will remain so until it 496 // manages to acquire the lock. 497 OnDeck_LOOP: 498 for (;;) { 499 assert (_OnDeck == ESelf, "invariant") ; 500 if (TrySpin (Self)) break ; 501 // CONSIDER: if ESelf->TryPark() && TryLock() break ... 502 // It's probably wise to spin only if we *actually* blocked 503 // CONSIDER: check the lockbyte, if it remains set then 504 // preemptively drain the cxq into the EntryList. 505 // The best place and time to perform queue operations -- lock metadata -- 506 // is _before having acquired the outer lock, while waiting for the lock to drop. 507 ParkCommon (ESelf, 0) ; 508 } 509 510 assert (_OnDeck == ESelf, "invariant") ; 511 _OnDeck = NULL ; 512 513 // Note that we current drop the inner lock (clear OnDeck) in the slow-path 514 // epilog immediately after having acquired the outer lock. 515 // But instead we could consider the following optimizations: 516 // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation. 517 // This might avoid potential reacquisition of the inner lock in IUlock(). 518 // B. While still holding the inner lock, attempt to opportunistically select 519 // and unlink the next ONDECK thread from the EntryList. 520 // If successful, set ONDECK to refer to that thread, otherwise clear ONDECK. 521 // It's critical that the select-and-unlink operation run in constant-time as 522 // it executes when holding the outer lock and may artificially increase the 523 // effective length of the critical section. 524 // Note that (A) and (B) are tantamount to succession by direct handoff for 525 // the inner lock. 526 goto Exeunt ; 527 } 528 529 void Monitor::IUnlock (bool RelaxAssert) { 530 assert (ILocked(), "invariant") ; 531 // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately 532 // before the store that releases the lock. Crucially, all the stores and loads in the 533 // critical section must be globally visible before the store of 0 into the lock-word 534 // that releases the lock becomes globally visible. That is, memory accesses in the 535 // critical section should not be allowed to bypass or overtake the following ST that 536 // releases the lock. As such, to prevent accesses within the critical section 537 // from "leaking" out, we need a release fence between the critical section and the 538 // store that releases the lock. In practice that release barrier is elided on 539 // platforms with strong memory models such as TSO. 540 // 541 // Note that the OrderAccess::storeload() fence that appears after unlock store 542 // provides for progress conditions and succession and is _not related to exclusion 543 // safety or lock release consistency. 544 OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], 0); // drop outer lock 545 546 OrderAccess::storeload (); 547 ParkEvent * const w = _OnDeck ; 548 assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ; 549 if (w != NULL) { 550 // Either we have a valid ondeck thread or ondeck is transiently "locked" 551 // by some exiting thread as it arranges for succession. The LSBit of 552 // OnDeck allows us to discriminate two cases. If the latter, the 553 // responsibility for progress and succession lies with that other thread. 554 // For good performance, we also depend on the fact that redundant unpark() 555 // operations are cheap. That is, repeated Unpark()ing of the ONDECK thread 556 // is inexpensive. This approach provides implicit futile wakeup throttling. 557 // Note that the referent "w" might be stale with respect to the lock. 558 // In that case the following unpark() is harmless and the worst that'll happen 559 // is a spurious return from a park() operation. Critically, if "w" _is stale, 560 // then progress is known to have occurred as that means the thread associated 561 // with "w" acquired the lock. In that case this thread need take no further 562 // action to guarantee progress. 563 if ((UNS(w) & _LBIT) == 0) w->unpark() ; 564 return ; 565 } 566 567 intptr_t cxq = _LockWord.FullWord ; 568 if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) { 569 return ; // normal fast-path exit - cxq and EntryList both empty 570 } 571 if (cxq & _LBIT) { 572 // Optional optimization ... 573 // Some other thread acquired the lock in the window since this 574 // thread released it. Succession is now that thread's responsibility. 575 return ; 576 } 577 578 Succession: 579 // Slow-path exit - this thread must ensure succession and progress. 580 // OnDeck serves as lock to protect cxq and EntryList. 581 // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq. 582 // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS) 583 // but only one concurrent consumer (detacher of RATs). 584 // Consider protecting this critical section with schedctl on Solaris. 585 // Unlike a normal lock, however, the exiting thread "locks" OnDeck, 586 // picks a successor and marks that thread as OnDeck. That successor 587 // thread will then clear OnDeck once it eventually acquires the outer lock. 588 if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) { 589 return ; 590 } 591 592 ParkEvent * List = _EntryList ; 593 if (List != NULL) { 594 // Transfer the head of the EntryList to the OnDeck position. 595 // Once OnDeck, a thread stays OnDeck until it acquires the lock. 596 // For a given lock there is at most OnDeck thread at any one instant. 597 WakeOne: 598 assert (List == _EntryList, "invariant") ; 599 ParkEvent * const w = List ; 600 assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ; 601 _EntryList = w->ListNext ; 602 // as a diagnostic measure consider setting w->_ListNext = BAD 603 assert (UNS(_OnDeck) == _LBIT, "invariant") ; 604 _OnDeck = w ; // pass OnDeck to w. 605 // w will clear OnDeck once it acquires the outer lock 606 607 // Another optional optimization ... 608 // For heavily contended locks it's not uncommon that some other 609 // thread acquired the lock while this thread was arranging succession. 610 // Try to defer the unpark() operation - Delegate the responsibility 611 // for unpark()ing the OnDeck thread to the current or subsequent owners 612 // That is, the new owner is responsible for unparking the OnDeck thread. 613 OrderAccess::storeload() ; 614 cxq = _LockWord.FullWord ; 615 if (cxq & _LBIT) return ; 616 617 w->unpark() ; 618 return ; 619 } 620 621 cxq = _LockWord.FullWord ; 622 if ((cxq & ~_LBIT) != 0) { 623 // The EntryList is empty but the cxq is populated. 624 // drain RATs from cxq into EntryList 625 // Detach RATs segment with CAS and then merge into EntryList 626 for (;;) { 627 // optional optimization - if locked, the owner is responsible for succession 628 if (cxq & _LBIT) goto Punt ; 629 const intptr_t vfy = CASPTR (&_LockWord, cxq, cxq & _LBIT) ; 630 if (vfy == cxq) break ; 631 cxq = vfy ; 632 // Interference - LockWord changed - Just retry 633 // We can see concurrent interference from contending threads 634 // pushing themselves onto the cxq or from lock-unlock operations. 635 // From the perspective of this thread, EntryList is stable and 636 // the cxq is prepend-only -- the head is volatile but the interior 637 // of the cxq is stable. In theory if we encounter interference from threads 638 // pushing onto cxq we could simply break off the original cxq suffix and 639 // move that segment to the EntryList, avoiding a 2nd or multiple CAS attempts 640 // on the high-traffic LockWord variable. For instance lets say the cxq is "ABCD" 641 // when we first fetch cxq above. Between the fetch -- where we observed "A" 642 // -- and CAS -- where we attempt to CAS null over A -- "PQR" arrive, 643 // yielding cxq = "PQRABCD". In this case we could simply set A.ListNext 644 // null, leaving cxq = "PQRA" and transfer the "BCD" segment to the EntryList. 645 // Note too, that it's safe for this thread to traverse the cxq 646 // without taking any special concurrency precautions. 647 } 648 649 // We don't currently reorder the cxq segment as we move it onto 650 // the EntryList, but it might make sense to reverse the order 651 // or perhaps sort by thread priority. See the comments in 652 // synchronizer.cpp objectMonitor::exit(). 653 assert (_EntryList == NULL, "invariant") ; 654 _EntryList = List = (ParkEvent *)(cxq & ~_LBIT) ; 655 assert (List != NULL, "invariant") ; 656 goto WakeOne ; 657 } 658 659 // cxq|EntryList is empty. 660 // w == NULL implies that cxq|EntryList == NULL in the past. 661 // Possible race - rare inopportune interleaving. 662 // A thread could have added itself to cxq since this thread previously checked. 663 // Detect and recover by refetching cxq. 664 Punt: 665 assert (UNS(_OnDeck) == _LBIT, "invariant") ; 666 _OnDeck = NULL ; // Release inner lock. 667 OrderAccess::storeload(); // Dekker duality - pivot point 668 669 // Resample LockWord/cxq to recover from possible race. 670 // For instance, while this thread T1 held OnDeck, some other thread T2 might 671 // acquire the outer lock. Another thread T3 might try to acquire the outer 672 // lock, but encounter contention and enqueue itself on cxq. T2 then drops the 673 // outer lock, but skips succession as this thread T1 still holds OnDeck. 674 // T1 is and remains responsible for ensuring succession of T3. 675 // 676 // Note that we don't need to recheck EntryList, just cxq. 677 // If threads moved onto EntryList since we dropped OnDeck 678 // that implies some other thread forced succession. 679 cxq = _LockWord.FullWord ; 680 if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) { 681 goto Succession ; // potential race -- re-run succession 682 } 683 return ; 684 } 685 686 bool Monitor::notify() { 687 assert (_owner == Thread::current(), "invariant") ; 688 assert (ILocked(), "invariant") ; 689 if (_WaitSet == NULL) return true ; 690 NotifyCount ++ ; 691 692 // Transfer one thread from the WaitSet to the EntryList or cxq. 693 // Currently we just unlink the head of the WaitSet and prepend to the cxq. 694 // And of course we could just unlink it and unpark it, too, but 695 // in that case it'd likely impale itself on the reentry. 696 Thread::muxAcquire (_WaitLock, "notify:WaitLock") ; 697 ParkEvent * nfy = _WaitSet ; 698 if (nfy != NULL) { // DCL idiom 699 _WaitSet = nfy->ListNext ; 700 assert (nfy->Notified == 0, "invariant") ; 701 // push nfy onto the cxq 702 for (;;) { 703 const intptr_t v = _LockWord.FullWord ; 704 assert ((v & 0xFF) == _LBIT, "invariant") ; 705 nfy->ListNext = (ParkEvent *)(v & ~_LBIT); 706 if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break; 707 // interference - _LockWord changed -- just retry 708 } 709 // Note that setting Notified before pushing nfy onto the cxq is 710 // also legal and safe, but the safety properties are much more 711 // subtle, so for the sake of code stewardship ... 712 OrderAccess::fence() ; 713 nfy->Notified = 1; 714 } 715 Thread::muxRelease (_WaitLock) ; 716 if (nfy != NULL && (NativeMonitorFlags & 16)) { 717 // Experimental code ... light up the wakee in the hope that this thread (the owner) 718 // will drop the lock just about the time the wakee comes ONPROC. 719 nfy->unpark() ; 720 } 721 assert (ILocked(), "invariant") ; 722 return true ; 723 } 724 725 // Currently notifyAll() transfers the waiters one-at-a-time from the waitset 726 // to the cxq. This could be done more efficiently with a single bulk en-mass transfer, 727 // but in practice notifyAll() for large #s of threads is rare and not time-critical. 728 // Beware too, that we invert the order of the waiters. Lets say that the 729 // waitset is "ABCD" and the cxq is "XYZ". After a notifyAll() the waitset 730 // will be empty and the cxq will be "DCBAXYZ". This is benign, of course. 731 732 bool Monitor::notify_all() { 733 assert (_owner == Thread::current(), "invariant") ; 734 assert (ILocked(), "invariant") ; 735 while (_WaitSet != NULL) notify() ; 736 return true ; 737 } 738 739 int Monitor::IWait (Thread * Self, jlong timo) { 740 assert (ILocked(), "invariant") ; 741 742 // Phases: 743 // 1. Enqueue Self on WaitSet - currently prepend 744 // 2. unlock - drop the outer lock 745 // 3. wait for either notification or timeout 746 // 4. lock - reentry - reacquire the outer lock 747 748 ParkEvent * const ESelf = Self->_MutexEvent ; 749 ESelf->Notified = 0 ; 750 ESelf->reset() ; 751 OrderAccess::fence() ; 752 753 // Add Self to WaitSet 754 // Ideally only the holder of the outer lock would manipulate the WaitSet - 755 // That is, the outer lock would implicitly protect the WaitSet. 756 // But if a thread in wait() encounters a timeout it will need to dequeue itself 757 // from the WaitSet _before it becomes the owner of the lock. We need to dequeue 758 // as the ParkEvent -- which serves as a proxy for the thread -- can't reside 759 // on both the WaitSet and the EntryList|cxq at the same time.. That is, a thread 760 // on the WaitSet can't be allowed to compete for the lock until it has managed to 761 // unlink its ParkEvent from WaitSet. Thus the need for WaitLock. 762 // Contention on the WaitLock is minimal. 763 // 764 // Another viable approach would be add another ParkEvent, "WaitEvent" to the 765 // thread class. The WaitSet would be composed of WaitEvents. Only the 766 // owner of the outer lock would manipulate the WaitSet. A thread in wait() 767 // could then compete for the outer lock, and then, if necessary, unlink itself 768 // from the WaitSet only after having acquired the outer lock. More precisely, 769 // there would be no WaitLock. A thread in in wait() would enqueue its WaitEvent 770 // on the WaitSet; release the outer lock; wait for either notification or timeout; 771 // reacquire the inner lock; and then, if needed, unlink itself from the WaitSet. 772 // 773 // Alternatively, a 2nd set of list link fields in the ParkEvent might suffice. 774 // One set would be for the WaitSet and one for the EntryList. 775 // We could also deconstruct the ParkEvent into a "pure" event and add a 776 // new immortal/TSM "ListElement" class that referred to ParkEvents. 777 // In that case we could have one ListElement on the WaitSet and another 778 // on the EntryList, with both referring to the same pure Event. 779 780 Thread::muxAcquire (_WaitLock, "wait:WaitLock:Add") ; 781 ESelf->ListNext = _WaitSet ; 782 _WaitSet = ESelf ; 783 Thread::muxRelease (_WaitLock) ; 784 785 // Release the outer lock 786 // We call IUnlock (RelaxAssert=true) as a thread T1 might 787 // enqueue itself on the WaitSet, call IUnlock(), drop the lock, 788 // and then stall before it can attempt to wake a successor. 789 // Some other thread T2 acquires the lock, and calls notify(), moving 790 // T1 from the WaitSet to the cxq. T2 then drops the lock. T1 resumes, 791 // and then finds *itself* on the cxq. During the course of a normal 792 // IUnlock() call a thread should _never find itself on the EntryList 793 // or cxq, but in the case of wait() it's possible. 794 // See synchronizer.cpp objectMonitor::wait(). 795 IUnlock (true) ; 796 797 // Wait for either notification or timeout 798 // Beware that in some circumstances we might propagate 799 // spurious wakeups back to the caller. 800 801 for (;;) { 802 if (ESelf->Notified) break ; 803 int err = ParkCommon (ESelf, timo) ; 804 if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break ; 805 } 806 807 // Prepare for reentry - if necessary, remove ESelf from WaitSet 808 // ESelf can be: 809 // 1. Still on the WaitSet. This can happen if we exited the loop by timeout. 810 // 2. On the cxq or EntryList 811 // 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position. 812 813 OrderAccess::fence() ; 814 int WasOnWaitSet = 0 ; 815 if (ESelf->Notified == 0) { 816 Thread::muxAcquire (_WaitLock, "wait:WaitLock:remove") ; 817 if (ESelf->Notified == 0) { // DCL idiom 818 assert (_OnDeck != ESelf, "invariant") ; // can't be both OnDeck and on WaitSet 819 // ESelf is resident on the WaitSet -- unlink it. 820 // A doubly-linked list would be better here so we can unlink in constant-time. 821 // We have to unlink before we potentially recontend as ESelf might otherwise 822 // end up on the cxq|EntryList -- it can't be on two lists at once. 823 ParkEvent * p = _WaitSet ; 824 ParkEvent * q = NULL ; // classic q chases p 825 while (p != NULL && p != ESelf) { 826 q = p ; 827 p = p->ListNext ; 828 } 829 assert (p == ESelf, "invariant") ; 830 if (p == _WaitSet) { // found at head 831 assert (q == NULL, "invariant") ; 832 _WaitSet = p->ListNext ; 833 } else { // found in interior 834 assert (q->ListNext == p, "invariant") ; 835 q->ListNext = p->ListNext ; 836 } 837 WasOnWaitSet = 1 ; // We were *not* notified but instead encountered timeout 838 } 839 Thread::muxRelease (_WaitLock) ; 840 } 841 842 // Reentry phase - reacquire the lock 843 if (WasOnWaitSet) { 844 // ESelf was previously on the WaitSet but we just unlinked it above 845 // because of a timeout. ESelf is not resident on any list and is not OnDeck 846 assert (_OnDeck != ESelf, "invariant") ; 847 ILock (Self) ; 848 } else { 849 // A prior notify() operation moved ESelf from the WaitSet to the cxq. 850 // ESelf is now on the cxq, EntryList or at the OnDeck position. 851 // The following fragment is extracted from Monitor::ILock() 852 for (;;) { 853 if (_OnDeck == ESelf && TrySpin(Self)) break ; 854 ParkCommon (ESelf, 0) ; 855 } 856 assert (_OnDeck == ESelf, "invariant") ; 857 _OnDeck = NULL ; 858 } 859 860 assert (ILocked(), "invariant") ; 861 return WasOnWaitSet != 0 ; // return true IFF timeout 862 } 863 864 865 // ON THE VMTHREAD SNEAKING PAST HELD LOCKS: 866 // In particular, there are certain types of global lock that may be held 867 // by a Java thread while it is blocked at a safepoint but before it has 868 // written the _owner field. These locks may be sneakily acquired by the 869 // VM thread during a safepoint to avoid deadlocks. Alternatively, one should 870 // identify all such locks, and ensure that Java threads never block at 871 // safepoints while holding them (_no_safepoint_check_flag). While it 872 // seems as though this could increase the time to reach a safepoint 873 // (or at least increase the mean, if not the variance), the latter 874 // approach might make for a cleaner, more maintainable JVM design. 875 // 876 // Sneaking is vile and reprehensible and should be excised at the 1st 877 // opportunity. It's possible that the need for sneaking could be obviated 878 // as follows. Currently, a thread might (a) while TBIVM, call pthread_mutex_lock 879 // or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex. 880 // (b) stall at the TBIVM exit point as a safepoint is in effect. Critically, 881 // it'll stall at the TBIVM reentry state transition after having acquired the 882 // underlying lock, but before having set _owner and having entered the actual 883 // critical section. The lock-sneaking facility leverages that fact and allowed the 884 // VM thread to logically acquire locks that had already be physically locked by mutators 885 // but where mutators were known blocked by the reentry thread state transition. 886 // 887 // If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly 888 // wrapped calls to park(), then we could likely do away with sneaking. We'd 889 // decouple lock acquisition and parking. The critical invariant to eliminating 890 // sneaking is to ensure that we never "physically" acquire the lock while TBIVM. 891 // An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket. 892 // One difficulty with this approach is that the TBIVM wrapper could recurse and 893 // call lock() deep from within a lock() call, while the MutexEvent was already enqueued. 894 // Using a stack (N=2 at minimum) of ParkEvents would take care of that problem. 895 // 896 // But of course the proper ultimate approach is to avoid schemes that require explicit 897 // sneaking or dependence on any any clever invariants or subtle implementation properties 898 // of Mutex-Monitor and instead directly address the underlying design flaw. 899 900 void Monitor::lock (Thread * Self) { 901 #ifdef CHECK_UNHANDLED_OOPS 902 // Clear unhandled oops so we get a crash right away. Only clear for non-vm 903 // or GC threads. 904 if (Self->is_Java_thread()) { 905 Self->clear_unhandled_oops(); 906 } 907 #endif // CHECK_UNHANDLED_OOPS 908 909 debug_only(check_prelock_state(Self)); 910 assert (_owner != Self , "invariant") ; 911 assert (_OnDeck != Self->_MutexEvent, "invariant") ; 912 913 if (TryFast()) { 914 Exeunt: 915 assert (ILocked(), "invariant") ; 916 assert (owner() == NULL, "invariant"); 917 set_owner (Self); 918 return ; 919 } 920 921 // The lock is contended ... 922 923 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint(); 924 if (can_sneak && _owner == NULL) { 925 // a java thread has locked the lock but has not entered the 926 // critical region -- let's just pretend we've locked the lock 927 // and go on. we note this with _snuck so we can also 928 // pretend to unlock when the time comes. 929 _snuck = true; 930 goto Exeunt ; 931 } 932 933 // Try a brief spin to avoid passing thru thread state transition ... 934 if (TrySpin (Self)) goto Exeunt ; 935 936 check_block_state(Self); 937 if (Self->is_Java_thread()) { 938 // Horribile dictu - we suffer through a state transition 939 assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex"); 940 ThreadBlockInVM tbivm ((JavaThread *) Self) ; 941 ILock (Self) ; 942 } else { 943 // Mirabile dictu 944 ILock (Self) ; 945 } 946 goto Exeunt ; 947 } 948 949 void Monitor::lock() { 950 this->lock(Thread::current()); 951 } 952 953 // Lock without safepoint check - a degenerate variant of lock(). 954 // Should ONLY be used by safepoint code and other code 955 // that is guaranteed not to block while running inside the VM. If this is called with 956 // thread state set to be in VM, the safepoint synchronization code will deadlock! 957 958 void Monitor::lock_without_safepoint_check (Thread * Self) { 959 assert (_owner != Self, "invariant") ; 960 ILock (Self) ; 961 assert (_owner == NULL, "invariant"); 962 set_owner (Self); 963 } 964 965 void Monitor::lock_without_safepoint_check () { 966 lock_without_safepoint_check (Thread::current()) ; 967 } 968 969 970 // Returns true if thread succeceed [sic] in grabbing the lock, otherwise false. 971 972 bool Monitor::try_lock() { 973 Thread * const Self = Thread::current(); 974 debug_only(check_prelock_state(Self)); 975 // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler"); 976 977 // Special case, where all Java threads are stopped. 978 // The lock may have been acquired but _owner is not yet set. 979 // In that case the VM thread can safely grab the lock. 980 // It strikes me this should appear _after the TryLock() fails, below. 981 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint(); 982 if (can_sneak && _owner == NULL) { 983 set_owner(Self); // Do not need to be atomic, since we are at a safepoint 984 _snuck = true; 985 return true; 986 } 987 988 if (TryLock()) { 989 // We got the lock 990 assert (_owner == NULL, "invariant"); 991 set_owner (Self); 992 return true; 993 } 994 return false; 995 } 996 997 void Monitor::unlock() { 998 assert (_owner == Thread::current(), "invariant") ; 999 assert (_OnDeck != Thread::current()->_MutexEvent , "invariant") ; 1000 set_owner (NULL) ; 1001 if (_snuck) { 1002 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak"); 1003 _snuck = false; 1004 return ; 1005 } 1006 IUnlock (false) ; 1007 } 1008 1009 // Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check() 1010 // jvm_raw_lock() and _unlock() can be called by non-Java threads via JVM_RawMonitorEnter. 1011 // 1012 // There's no expectation that JVM_RawMonitors will interoperate properly with the native 1013 // Mutex-Monitor constructs. We happen to implement JVM_RawMonitors in terms of 1014 // native Mutex-Monitors simply as a matter of convenience. A simple abstraction layer 1015 // over a pthread_mutex_t would work equally as well, but require more platform-specific 1016 // code -- a "PlatformMutex". Alternatively, a simply layer over muxAcquire-muxRelease 1017 // would work too. 1018 // 1019 // Since the caller might be a foreign thread, we don't necessarily have a Thread.MutexEvent 1020 // instance available. Instead, we transiently allocate a ParkEvent on-demand if 1021 // we encounter contention. That ParkEvent remains associated with the thread 1022 // until it manages to acquire the lock, at which time we return the ParkEvent 1023 // to the global ParkEvent free list. This is correct and suffices for our purposes. 1024 // 1025 // Beware that the original jvm_raw_unlock() had a "_snuck" test but that 1026 // jvm_raw_lock() didn't have the corresponding test. I suspect that's an 1027 // oversight, but I've replicated the original suspect logic in the new code ... 1028 1029 void Monitor::jvm_raw_lock() { 1030 assert(rank() == native, "invariant"); 1031 1032 if (TryLock()) { 1033 Exeunt: 1034 assert (ILocked(), "invariant") ; 1035 assert (_owner == NULL, "invariant"); 1036 // This can potentially be called by non-java Threads. Thus, the ThreadLocalStorage 1037 // might return NULL. Don't call set_owner since it will break on an NULL owner 1038 // Consider installing a non-null "ANON" distinguished value instead of just NULL. 1039 _owner = ThreadLocalStorage::thread(); 1040 return ; 1041 } 1042 1043 if (TrySpin(NULL)) goto Exeunt ; 1044 1045 // slow-path - apparent contention 1046 // Allocate a ParkEvent for transient use. 1047 // The ParkEvent remains associated with this thread until 1048 // the time the thread manages to acquire the lock. 1049 ParkEvent * const ESelf = ParkEvent::Allocate(NULL) ; 1050 ESelf->reset() ; 1051 OrderAccess::storeload() ; 1052 1053 // Either Enqueue Self on cxq or acquire the outer lock. 1054 if (AcquireOrPush (ESelf)) { 1055 ParkEvent::Release (ESelf) ; // surrender the ParkEvent 1056 goto Exeunt ; 1057 } 1058 1059 // At any given time there is at most one ondeck thread. 1060 // ondeck implies not resident on cxq and not resident on EntryList 1061 // Only the OnDeck thread can try to acquire -- contended for -- the lock. 1062 // CONSIDER: use Self->OnDeck instead of m->OnDeck. 1063 for (;;) { 1064 if (_OnDeck == ESelf && TrySpin(NULL)) break ; 1065 ParkCommon (ESelf, 0) ; 1066 } 1067 1068 assert (_OnDeck == ESelf, "invariant") ; 1069 _OnDeck = NULL ; 1070 ParkEvent::Release (ESelf) ; // surrender the ParkEvent 1071 goto Exeunt ; 1072 } 1073 1074 void Monitor::jvm_raw_unlock() { 1075 // Nearly the same as Monitor::unlock() ... 1076 // directly set _owner instead of using set_owner(null) 1077 _owner = NULL ; 1078 if (_snuck) { // ??? 1079 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak"); 1080 _snuck = false; 1081 return ; 1082 } 1083 IUnlock(false) ; 1084 } 1085 1086 bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equivalent) { 1087 Thread * const Self = Thread::current() ; 1088 assert (_owner == Self, "invariant") ; 1089 assert (ILocked(), "invariant") ; 1090 1091 // as_suspend_equivalent logically implies !no_safepoint_check 1092 guarantee (!as_suspend_equivalent || !no_safepoint_check, "invariant") ; 1093 // !no_safepoint_check logically implies java_thread 1094 guarantee (no_safepoint_check || Self->is_Java_thread(), "invariant") ; 1095 1096 #ifdef ASSERT 1097 Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks()); 1098 assert(least != this, "Specification of get_least_... call above"); 1099 if (least != NULL && least->rank() <= special) { 1100 tty->print("Attempting to wait on monitor %s/%d while holding" 1101 " lock %s/%d -- possible deadlock", 1102 name(), rank(), least->name(), least->rank()); 1103 assert(false, "Shouldn't block(wait) while holding a lock of rank special"); 1104 } 1105 #endif // ASSERT 1106 1107 int wait_status ; 1108 // conceptually set the owner to NULL in anticipation of 1109 // abdicating the lock in wait 1110 set_owner(NULL); 1111 if (no_safepoint_check) { 1112 wait_status = IWait (Self, timeout) ; 1113 } else { 1114 assert (Self->is_Java_thread(), "invariant") ; 1115 JavaThread *jt = (JavaThread *)Self; 1116 1117 // Enter safepoint region - ornate and Rococo ... 1118 ThreadBlockInVM tbivm(jt); 1119 OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */); 1120 1121 if (as_suspend_equivalent) { 1122 jt->set_suspend_equivalent(); 1123 // cleared by handle_special_suspend_equivalent_condition() or 1124 // java_suspend_self() 1125 } 1126 1127 wait_status = IWait (Self, timeout) ; 1128 1129 // were we externally suspended while we were waiting? 1130 if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) { 1131 // Our event wait has finished and we own the lock, but 1132 // while we were waiting another thread suspended us. We don't 1133 // want to hold the lock while suspended because that 1134 // would surprise the thread that suspended us. 1135 assert (ILocked(), "invariant") ; 1136 IUnlock (true) ; 1137 jt->java_suspend_self(); 1138 ILock (Self) ; 1139 assert (ILocked(), "invariant") ; 1140 } 1141 } 1142 1143 // Conceptually reestablish ownership of the lock. 1144 // The "real" lock -- the LockByte -- was reacquired by IWait(). 1145 assert (ILocked(), "invariant") ; 1146 assert (_owner == NULL, "invariant") ; 1147 set_owner (Self) ; 1148 return wait_status != 0 ; // return true IFF timeout 1149 } 1150 1151 Monitor::~Monitor() { 1152 assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ; 1153 } 1154 1155 void Monitor::ClearMonitor (Monitor * m, const char *name) { 1156 m->_owner = NULL ; 1157 m->_snuck = false ; 1158 if (name == NULL) { 1159 strcpy(m->_name, "UNKNOWN") ; 1160 } else { 1161 strncpy(m->_name, name, MONITOR_NAME_LEN - 1); 1162 m->_name[MONITOR_NAME_LEN - 1] = '\0'; 1163 } 1164 m->_LockWord.FullWord = 0 ; 1165 m->_EntryList = NULL ; 1166 m->_OnDeck = NULL ; 1167 m->_WaitSet = NULL ; 1168 m->_WaitLock[0] = 0 ; 1169 } 1170 1171 Monitor::Monitor() { ClearMonitor(this); } 1172 1173 Monitor::Monitor (int Rank, const char * name, bool allow_vm_block) { 1174 ClearMonitor (this, name) ; 1175 #ifdef ASSERT 1176 _allow_vm_block = allow_vm_block; 1177 _rank = Rank ; 1178 #endif 1179 } 1180 1181 Mutex::~Mutex() { 1182 assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ; 1183 } 1184 1185 Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) { 1186 ClearMonitor ((Monitor *) this, name) ; 1187 #ifdef ASSERT 1188 _allow_vm_block = allow_vm_block; 1189 _rank = Rank ; 1190 #endif 1191 } 1192 1193 bool Monitor::owned_by_self() const { 1194 bool ret = _owner == Thread::current(); 1195 assert (!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant") ; 1196 return ret; 1197 } 1198 1199 void Monitor::print_on_error(outputStream* st) const { 1200 st->print("[" PTR_FORMAT, this); 1201 st->print("] %s", _name); 1202 st->print(" - owner thread: " PTR_FORMAT, _owner); 1203 } 1204 1205 1206 1207 1208 // ---------------------------------------------------------------------------------- 1209 // Non-product code 1210 1211 #ifndef PRODUCT 1212 void Monitor::print_on(outputStream* st) const { 1213 st->print_cr("Mutex: [0x%lx/0x%lx] %s - owner: 0x%lx", this, _LockWord.FullWord, _name, _owner); 1214 } 1215 #endif 1216 1217 #ifndef PRODUCT 1218 #ifdef ASSERT 1219 Monitor * Monitor::get_least_ranked_lock(Monitor * locks) { 1220 Monitor *res, *tmp; 1221 for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) { 1222 if (tmp->rank() < res->rank()) { 1223 res = tmp; 1224 } 1225 } 1226 if (!SafepointSynchronize::is_at_safepoint()) { 1227 // In this case, we expect the held locks to be 1228 // in increasing rank order (modulo any native ranks) 1229 for (tmp = locks; tmp != NULL; tmp = tmp->next()) { 1230 if (tmp->next() != NULL) { 1231 assert(tmp->rank() == Mutex::native || 1232 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); 1233 } 1234 } 1235 } 1236 return res; 1237 } 1238 1239 Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) { 1240 Monitor *res, *tmp; 1241 for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) { 1242 if (tmp != this && (res == NULL || tmp->rank() < res->rank())) { 1243 res = tmp; 1244 } 1245 } 1246 if (!SafepointSynchronize::is_at_safepoint()) { 1247 // In this case, we expect the held locks to be 1248 // in increasing rank order (modulo any native ranks) 1249 for (tmp = locks; tmp != NULL; tmp = tmp->next()) { 1250 if (tmp->next() != NULL) { 1251 assert(tmp->rank() == Mutex::native || 1252 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); 1253 } 1254 } 1255 } 1256 return res; 1257 } 1258 1259 1260 bool Monitor::contains(Monitor* locks, Monitor * lock) { 1261 for (; locks != NULL; locks = locks->next()) { 1262 if (locks == lock) 1263 return true; 1264 } 1265 return false; 1266 } 1267 #endif 1268 1269 // Called immediately after lock acquisition or release as a diagnostic 1270 // to track the lock-set of the thread and test for rank violations that 1271 // might indicate exposure to deadlock. 1272 // Rather like an EventListener for _owner (:>). 1273 1274 void Monitor::set_owner_implementation(Thread *new_owner) { 1275 // This function is solely responsible for maintaining 1276 // and checking the invariant that threads and locks 1277 // are in a 1/N relation, with some some locks unowned. 1278 // It uses the Mutex::_owner, Mutex::_next, and 1279 // Thread::_owned_locks fields, and no other function 1280 // changes those fields. 1281 // It is illegal to set the mutex from one non-NULL 1282 // owner to another--it must be owned by NULL as an 1283 // intermediate state. 1284 1285 if (new_owner != NULL) { 1286 // the thread is acquiring this lock 1287 1288 assert(new_owner == Thread::current(), "Should I be doing this?"); 1289 assert(_owner == NULL, "setting the owner thread of an already owned mutex"); 1290 _owner = new_owner; // set the owner 1291 1292 // link "this" into the owned locks list 1293 1294 #ifdef ASSERT // Thread::_owned_locks is under the same ifdef 1295 Monitor* locks = get_least_ranked_lock(new_owner->owned_locks()); 1296 // Mutex::set_owner_implementation is a friend of Thread 1297 1298 assert(this->rank() >= 0, "bad lock rank"); 1299 1300 // Deadlock avoidance rules require us to acquire Mutexes only in 1301 // a global total order. For example m1 is the lowest ranked mutex 1302 // that the thread holds and m2 is the mutex the thread is trying 1303 // to acquire, then deadlock avoidance rules require that the rank 1304 // of m2 be less than the rank of m1. 1305 // The rank Mutex::native is an exception in that it is not subject 1306 // to the verification rules. 1307 // Here are some further notes relating to mutex acquisition anomalies: 1308 // . under Solaris, the interrupt lock gets acquired when doing 1309 // profiling, so any lock could be held. 1310 // . it is also ok to acquire Safepoint_lock at the very end while we 1311 // already hold Terminator_lock - may happen because of periodic safepoints 1312 if (this->rank() != Mutex::native && 1313 this->rank() != Mutex::suspend_resume && 1314 locks != NULL && locks->rank() <= this->rank() && 1315 !SafepointSynchronize::is_at_safepoint() && 1316 this != Interrupt_lock && this != ProfileVM_lock && 1317 !(this == Safepoint_lock && contains(locks, Terminator_lock) && 1318 SafepointSynchronize::is_synchronizing())) { 1319 new_owner->print_owned_locks(); 1320 fatal(err_msg("acquiring lock %s/%d out of order with lock %s/%d -- " 1321 "possible deadlock", this->name(), this->rank(), 1322 locks->name(), locks->rank())); 1323 } 1324 1325 this->_next = new_owner->_owned_locks; 1326 new_owner->_owned_locks = this; 1327 #endif 1328 1329 } else { 1330 // the thread is releasing this lock 1331 1332 Thread* old_owner = _owner; 1333 debug_only(_last_owner = old_owner); 1334 1335 assert(old_owner != NULL, "removing the owner thread of an unowned mutex"); 1336 assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex"); 1337 1338 _owner = NULL; // set the owner 1339 1340 #ifdef ASSERT 1341 Monitor *locks = old_owner->owned_locks(); 1342 1343 // remove "this" from the owned locks list 1344 1345 Monitor *prev = NULL; 1346 bool found = false; 1347 for (; locks != NULL; prev = locks, locks = locks->next()) { 1348 if (locks == this) { 1349 found = true; 1350 break; 1351 } 1352 } 1353 assert(found, "Removing a lock not owned"); 1354 if (prev == NULL) { 1355 old_owner->_owned_locks = _next; 1356 } else { 1357 prev->_next = _next; 1358 } 1359 _next = NULL; 1360 #endif 1361 } 1362 } 1363 1364 1365 // Factored out common sanity checks for locking mutex'es. Used by lock() and try_lock() 1366 void Monitor::check_prelock_state(Thread *thread) { 1367 assert((!thread->is_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm) 1368 || rank() == Mutex::special, "wrong thread state for using locks"); 1369 if (StrictSafepointChecks) { 1370 if (thread->is_VM_thread() && !allow_vm_block()) { 1371 fatal(err_msg("VM thread using lock %s (not allowed to block on)", 1372 name())); 1373 } 1374 debug_only(if (rank() != Mutex::special) \ 1375 thread->check_for_valid_safepoint_state(false);) 1376 } 1377 if (thread->is_Watcher_thread()) { 1378 assert(!WatcherThread::watcher_thread()->has_crash_protection(), 1379 "locking not allowed when crash protection is set"); 1380 } 1381 } 1382 1383 void Monitor::check_block_state(Thread *thread) { 1384 if (!_allow_vm_block && thread->is_VM_thread()) { 1385 warning("VM thread blocked on lock"); 1386 print(); 1387 BREAKPOINT; 1388 } 1389 assert(_owner != thread, "deadlock: blocking on monitor owned by current thread"); 1390 } 1391 1392 #endif // PRODUCT