1 2 /* 3 * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "runtime/mutex.hpp" 28 #include "runtime/osThread.hpp" 29 #include "runtime/thread.inline.hpp" 30 #include "utilities/events.hpp" 31 #ifdef TARGET_OS_FAMILY_linux 32 # include "mutex_linux.inline.hpp" 33 #endif 34 #ifdef TARGET_OS_FAMILY_solaris 35 # include "mutex_solaris.inline.hpp" 36 #endif 37 #ifdef TARGET_OS_FAMILY_windows 38 # include "mutex_windows.inline.hpp" 39 #endif 40 #ifdef TARGET_OS_FAMILY_bsd 41 # include "mutex_bsd.inline.hpp" 42 #endif 43 44 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o 45 // 46 // Native Monitor-Mutex locking - theory of operations 47 // 48 // * Native Monitors are completely unrelated to Java-level monitors, 49 // although the "back-end" slow-path implementations share a common lineage. 50 // See objectMonitor:: in synchronizer.cpp. 51 // Native Monitors do *not* support nesting or recursion but otherwise 52 // they're basically Hoare-flavor monitors. 53 // 54 // * A thread acquires ownership of a Monitor/Mutex by CASing the LockByte 55 // in the _LockWord from zero to non-zero. Note that the _Owner field 56 // is advisory and is used only to verify that the thread calling unlock() 57 // is indeed the last thread to have acquired the lock. 58 // 59 // * Contending threads "push" themselves onto the front of the contention 60 // queue -- called the cxq -- with CAS and then spin/park. 61 // The _LockWord contains the LockByte as well as the pointer to the head 62 // of the cxq. Colocating the LockByte with the cxq precludes certain races. 63 // 64 // * Using a separately addressable LockByte allows for CAS:MEMBAR or CAS:0 65 // idioms. We currently use MEMBAR in the uncontended unlock() path, as 66 // MEMBAR often has less latency than CAS. If warranted, we could switch to 67 // a CAS:0 mode, using timers to close the resultant race, as is done 68 // with Java Monitors in synchronizer.cpp. 69 // 70 // See the following for a discussion of the relative cost of atomics (CAS) 71 // MEMBAR, and ways to eliminate such instructions from the common-case paths: 72 // -- http://blogs.sun.com/dave/entry/biased_locking_in_hotspot 73 // -- http://blogs.sun.com/dave/resource/MustangSync.pdf 74 // -- http://blogs.sun.com/dave/resource/synchronization-public2.pdf 75 // -- synchronizer.cpp 76 // 77 // * Overall goals - desiderata 78 // 1. Minimize context switching 79 // 2. Minimize lock migration 80 // 3. Minimize CPI -- affinity and locality 81 // 4. Minimize the execution of high-latency instructions such as CAS or MEMBAR 82 // 5. Minimize outer lock hold times 83 // 6. Behave gracefully on a loaded system 84 // 85 // * Thread flow and list residency: 86 // 87 // Contention queue --> EntryList --> OnDeck --> Owner --> !Owner 88 // [..resident on monitor list..] 89 // [...........contending..................] 90 // 91 // -- The contention queue (cxq) contains recently-arrived threads (RATs). 92 // Threads on the cxq eventually drain into the EntryList. 93 // -- Invariant: a thread appears on at most one list -- cxq, EntryList 94 // or WaitSet -- at any one time. 95 // -- For a given monitor there can be at most one "OnDeck" thread at any 96 // given time but if needbe this particular invariant could be relaxed. 97 // 98 // * The WaitSet and EntryList linked lists are composed of ParkEvents. 99 // I use ParkEvent instead of threads as ParkEvents are immortal and 100 // type-stable, meaning we can safely unpark() a possibly stale 101 // list element in the unlock()-path. (That's benign). 102 // 103 // * Succession policy - providing for progress: 104 // 105 // As necessary, the unlock()ing thread identifies, unlinks, and unparks 106 // an "heir presumptive" tentative successor thread from the EntryList. 107 // This becomes the so-called "OnDeck" thread, of which there can be only 108 // one at any given time for a given monitor. The wakee will recontend 109 // for ownership of monitor. 110 // 111 // Succession is provided for by a policy of competitive handoff. 112 // The exiting thread does _not_ grant or pass ownership to the 113 // successor thread. (This is also referred to as "handoff" succession"). 114 // Instead the exiting thread releases ownership and possibly wakes 115 // a successor, so the successor can (re)compete for ownership of the lock. 116 // 117 // Competitive handoff provides excellent overall throughput at the expense 118 // of short-term fairness. If fairness is a concern then one remedy might 119 // be to add an AcquireCounter field to the monitor. After a thread acquires 120 // the lock it will decrement the AcquireCounter field. When the count 121 // reaches 0 the thread would reset the AcquireCounter variable, abdicate 122 // the lock directly to some thread on the EntryList, and then move itself to the 123 // tail of the EntryList. 124 // 125 // But in practice most threads engage or otherwise participate in resource 126 // bounded producer-consumer relationships, so lock domination is not usually 127 // a practical concern. Recall too, that in general it's easier to construct 128 // a fair lock from a fast lock, but not vice-versa. 129 // 130 // * The cxq can have multiple concurrent "pushers" but only one concurrent 131 // detaching thread. This mechanism is immune from the ABA corruption. 132 // More precisely, the CAS-based "push" onto cxq is ABA-oblivious. 133 // We use OnDeck as a pseudo-lock to enforce the at-most-one detaching 134 // thread constraint. 135 // 136 // * Taken together, the cxq and the EntryList constitute or form a 137 // single logical queue of threads stalled trying to acquire the lock. 138 // We use two distinct lists to reduce heat on the list ends. 139 // Threads in lock() enqueue onto cxq while threads in unlock() will 140 // dequeue from the EntryList. (c.f. Michael Scott's "2Q" algorithm). 141 // A key desideratum is to minimize queue & monitor metadata manipulation 142 // that occurs while holding the "outer" monitor lock -- that is, we want to 143 // minimize monitor lock holds times. 144 // 145 // The EntryList is ordered by the prevailing queue discipline and 146 // can be organized in any convenient fashion, such as a doubly-linked list or 147 // a circular doubly-linked list. If we need a priority queue then something akin 148 // to Solaris' sleepq would work nicely. Viz., 149 // -- http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c. 150 // -- http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/os/sleepq.c 151 // Queue discipline is enforced at ::unlock() time, when the unlocking thread 152 // drains the cxq into the EntryList, and orders or reorders the threads on the 153 // EntryList accordingly. 154 // 155 // Barring "lock barging", this mechanism provides fair cyclic ordering, 156 // somewhat similar to an elevator-scan. 157 // 158 // * OnDeck 159 // -- For a given monitor there can be at most one OnDeck thread at any given 160 // instant. The OnDeck thread is contending for the lock, but has been 161 // unlinked from the EntryList and cxq by some previous unlock() operations. 162 // Once a thread has been designated the OnDeck thread it will remain so 163 // until it manages to acquire the lock -- being OnDeck is a stable property. 164 // -- Threads on the EntryList or cxq are _not allowed to attempt lock acquisition. 165 // -- OnDeck also serves as an "inner lock" as follows. Threads in unlock() will, after 166 // having cleared the LockByte and dropped the outer lock, attempt to "trylock" 167 // OnDeck by CASing the field from null to non-null. If successful, that thread 168 // is then responsible for progress and succession and can use CAS to detach and 169 // drain the cxq into the EntryList. By convention, only this thread, the holder of 170 // the OnDeck inner lock, can manipulate the EntryList or detach and drain the 171 // RATs on the cxq into the EntryList. This avoids ABA corruption on the cxq as 172 // we allow multiple concurrent "push" operations but restrict detach concurrency 173 // to at most one thread. Having selected and detached a successor, the thread then 174 // changes the OnDeck to refer to that successor, and then unparks the successor. 175 // That successor will eventually acquire the lock and clear OnDeck. Beware 176 // that the OnDeck usage as a lock is asymmetric. A thread in unlock() transiently 177 // "acquires" OnDeck, performs queue manipulations, passes OnDeck to some successor, 178 // and then the successor eventually "drops" OnDeck. Note that there's never 179 // any sense of contention on the inner lock, however. Threads never contend 180 // or wait for the inner lock. 181 // -- OnDeck provides for futile wakeup throttling a described in section 3.3 of 182 // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf 183 // In a sense, OnDeck subsumes the ObjectMonitor _Succ and ObjectWaiter 184 // TState fields found in Java-level objectMonitors. (See synchronizer.cpp). 185 // 186 // * Waiting threads reside on the WaitSet list -- wait() puts 187 // the caller onto the WaitSet. Notify() or notifyAll() simply 188 // transfers threads from the WaitSet to either the EntryList or cxq. 189 // Subsequent unlock() operations will eventually unpark the notifyee. 190 // Unparking a notifee in notify() proper is inefficient - if we were to do so 191 // it's likely the notifyee would simply impale itself on the lock held 192 // by the notifier. 193 // 194 // * The mechanism is obstruction-free in that if the holder of the transient 195 // OnDeck lock in unlock() is preempted or otherwise stalls, other threads 196 // can still acquire and release the outer lock and continue to make progress. 197 // At worst, waking of already blocked contending threads may be delayed, 198 // but nothing worse. (We only use "trylock" operations on the inner OnDeck 199 // lock). 200 // 201 // * Note that thread-local storage must be initialized before a thread 202 // uses Native monitors or mutexes. The native monitor-mutex subsystem 203 // depends on Thread::current(). 204 // 205 // * The monitor synchronization subsystem avoids the use of native 206 // synchronization primitives except for the narrow platform-specific 207 // park-unpark abstraction. See the comments in os_solaris.cpp regarding 208 // the semantics of park-unpark. Put another way, this monitor implementation 209 // depends only on atomic operations and park-unpark. The monitor subsystem 210 // manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the 211 // underlying OS manages the READY<->RUN transitions. 212 // 213 // * The memory consistency model provide by lock()-unlock() is at least as 214 // strong or stronger than the Java Memory model defined by JSR-133. 215 // That is, we guarantee at least entry consistency, if not stronger. 216 // See http://g.oswego.edu/dl/jmm/cookbook.html. 217 // 218 // * Thread:: currently contains a set of purpose-specific ParkEvents: 219 // _MutexEvent, _ParkEvent, etc. A better approach might be to do away with 220 // the purpose-specific ParkEvents and instead implement a general per-thread 221 // stack of available ParkEvents which we could provision on-demand. The 222 // stack acts as a local cache to avoid excessive calls to ParkEvent::Allocate() 223 // and ::Release(). A thread would simply pop an element from the local stack before it 224 // enqueued or park()ed. When the contention was over the thread would 225 // push the no-longer-needed ParkEvent back onto its stack. 226 // 227 // * A slightly reduced form of ILock() and IUnlock() have been partially 228 // model-checked (Murphi) for safety and progress at T=1,2,3 and 4. 229 // It'd be interesting to see if TLA/TLC could be useful as well. 230 // 231 // * Mutex-Monitor is a low-level "leaf" subsystem. That is, the monitor 232 // code should never call other code in the JVM that might itself need to 233 // acquire monitors or mutexes. That's true *except* in the case of the 234 // ThreadBlockInVM state transition wrappers. The ThreadBlockInVM DTOR handles 235 // mutator reentry (ingress) by checking for a pending safepoint in which case it will 236 // call SafepointSynchronize::block(), which in turn may call Safepoint_lock->lock(), etc. 237 // In that particular case a call to lock() for a given Monitor can end up recursively 238 // calling lock() on another monitor. While distasteful, this is largely benign 239 // as the calls come from jacket that wraps lock(), and not from deep within lock() itself. 240 // 241 // It's unfortunate that native mutexes and thread state transitions were convolved. 242 // They're really separate concerns and should have remained that way. Melding 243 // them together was facile -- a bit too facile. The current implementation badly 244 // conflates the two concerns. 245 // 246 // * TODO-FIXME: 247 // 248 // -- Add DTRACE probes for contended acquire, contended acquired, contended unlock 249 // We should also add DTRACE probes in the ParkEvent subsystem for 250 // Park-entry, Park-exit, and Unpark. 251 // 252 // -- We have an excess of mutex-like constructs in the JVM, namely: 253 // 1. objectMonitors for Java-level synchronization (synchronizer.cpp) 254 // 2. low-level muxAcquire and muxRelease 255 // 3. low-level spinAcquire and spinRelease 256 // 4. native Mutex:: and Monitor:: 257 // 5. jvm_raw_lock() and _unlock() 258 // 6. JVMTI raw monitors -- distinct from (5) despite having a confusingly 259 // similar name. 260 // 261 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o 262 263 264 // CASPTR() uses the canonical argument order that dominates in the literature. 265 // Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates. 266 267 #define CASPTR(a,c,s) intptr_t(Atomic::cmpxchg_ptr ((void *)(s),(void *)(a),(void *)(c))) 268 #define UNS(x) (uintptr_t(x)) 269 #define TRACE(m) { static volatile int ctr = 0 ; int x = ++ctr ; if ((x & (x-1))==0) { ::printf ("%d:%s\n", x, #m); ::fflush(stdout); }} 270 271 // Simplistic low-quality Marsaglia SHIFT-XOR RNG. 272 // Bijective except for the trailing mask operation. 273 // Useful for spin loops as the compiler can't optimize it away. 274 275 static inline jint MarsagliaXORV (jint x) { 276 if (x == 0) x = 1|os::random() ; 277 x ^= x << 6; 278 x ^= ((unsigned)x) >> 21; 279 x ^= x << 7 ; 280 return x & 0x7FFFFFFF ; 281 } 282 283 static int Stall (int its) { 284 static volatile jint rv = 1 ; 285 volatile int OnFrame = 0 ; 286 jint v = rv ^ UNS(OnFrame) ; 287 while (--its >= 0) { 288 v = MarsagliaXORV (v) ; 289 } 290 // Make this impossible for the compiler to optimize away, 291 // but (mostly) avoid W coherency sharing on MP systems. 292 if (v == 0x12345) rv = v ; 293 return v ; 294 } 295 296 int Monitor::TryLock () { 297 intptr_t v = _LockWord.FullWord ; 298 for (;;) { 299 if ((v & _LBIT) != 0) return 0 ; 300 const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ; 301 if (v == u) return 1 ; 302 v = u ; 303 } 304 } 305 306 int Monitor::TryFast () { 307 // Optimistic fast-path form ... 308 // Fast-path attempt for the common uncontended case. 309 // Avoid RTS->RTO $ coherence upgrade on typical SMP systems. 310 intptr_t v = CASPTR (&_LockWord, 0, _LBIT) ; // agro ... 311 if (v == 0) return 1 ; 312 313 for (;;) { 314 if ((v & _LBIT) != 0) return 0 ; 315 const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ; 316 if (v == u) return 1 ; 317 v = u ; 318 } 319 } 320 321 int Monitor::ILocked () { 322 const intptr_t w = _LockWord.FullWord & 0xFF ; 323 assert (w == 0 || w == _LBIT, "invariant") ; 324 return w == _LBIT ; 325 } 326 327 // Polite TATAS spinlock with exponential backoff - bounded spin. 328 // Ideally we'd use processor cycles, time or vtime to control 329 // the loop, but we currently use iterations. 330 // All the constants within were derived empirically but work over 331 // over the spectrum of J2SE reference platforms. 332 // On Niagara-class systems the back-off is unnecessary but 333 // is relatively harmless. (At worst it'll slightly retard 334 // acquisition times). The back-off is critical for older SMP systems 335 // where constant fetching of the LockWord would otherwise impair 336 // scalability. 337 // 338 // Clamp spinning at approximately 1/2 of a context-switch round-trip. 339 // See synchronizer.cpp for details and rationale. 340 341 int Monitor::TrySpin (Thread * const Self) { 342 if (TryLock()) return 1 ; 343 if (!os::is_MP()) return 0 ; 344 345 int Probes = 0 ; 346 int Delay = 0 ; 347 int Steps = 0 ; 348 int SpinMax = NativeMonitorSpinLimit ; 349 int flgs = NativeMonitorFlags ; 350 for (;;) { 351 intptr_t v = _LockWord.FullWord; 352 if ((v & _LBIT) == 0) { 353 if (CASPTR (&_LockWord, v, v|_LBIT) == v) { 354 return 1 ; 355 } 356 continue ; 357 } 358 359 if ((flgs & 8) == 0) { 360 SpinPause () ; 361 } 362 363 // Periodically increase Delay -- variable Delay form 364 // conceptually: delay *= 1 + 1/Exponent 365 ++ Probes; 366 if (Probes > SpinMax) return 0 ; 367 368 if ((Probes & 0x7) == 0) { 369 Delay = ((Delay << 1)|1) & 0x7FF ; 370 // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ; 371 } 372 373 if (flgs & 2) continue ; 374 375 // Consider checking _owner's schedctl state, if OFFPROC abort spin. 376 // If the owner is OFFPROC then it's unlike that the lock will be dropped 377 // in a timely fashion, which suggests that spinning would not be fruitful 378 // or profitable. 379 380 // Stall for "Delay" time units - iterations in the current implementation. 381 // Avoid generating coherency traffic while stalled. 382 // Possible ways to delay: 383 // PAUSE, SLEEP, MEMBAR #sync, MEMBAR #halt, 384 // wr %g0,%asi, gethrtime, rdstick, rdtick, rdtsc, etc. ... 385 // Note that on Niagara-class systems we want to minimize STs in the 386 // spin loop. N1 and brethren write-around the L1$ over the xbar into the L2$. 387 // Furthermore, they don't have a W$ like traditional SPARC processors. 388 // We currently use a Marsaglia Shift-Xor RNG loop. 389 Steps += Delay ; 390 if (Self != NULL) { 391 jint rv = Self->rng[0] ; 392 for (int k = Delay ; --k >= 0; ) { 393 rv = MarsagliaXORV (rv) ; 394 if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0 ; 395 } 396 Self->rng[0] = rv ; 397 } else { 398 Stall (Delay) ; 399 } 400 } 401 } 402 403 static int ParkCommon (ParkEvent * ev, jlong timo) { 404 // Diagnostic support - periodically unwedge blocked threads 405 intx nmt = NativeMonitorTimeout ; 406 if (nmt > 0 && (nmt < timo || timo <= 0)) { 407 timo = nmt ; 408 } 409 int err = OS_OK ; 410 if (0 == timo) { 411 ev->park() ; 412 } else { 413 err = ev->park(timo) ; 414 } 415 return err ; 416 } 417 418 inline int Monitor::AcquireOrPush (ParkEvent * ESelf) { 419 intptr_t v = _LockWord.FullWord ; 420 for (;;) { 421 if ((v & _LBIT) == 0) { 422 const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ; 423 if (u == v) return 1 ; // indicate acquired 424 v = u ; 425 } else { 426 // Anticipate success ... 427 ESelf->ListNext = (ParkEvent *) (v & ~_LBIT) ; 428 const intptr_t u = CASPTR (&_LockWord, v, intptr_t(ESelf)|_LBIT) ; 429 if (u == v) return 0 ; // indicate pushed onto cxq 430 v = u ; 431 } 432 // Interference - LockWord change - just retry 433 } 434 } 435 436 // ILock and IWait are the lowest level primitive internal blocking 437 // synchronization functions. The callers of IWait and ILock must have 438 // performed any needed state transitions beforehand. 439 // IWait and ILock may directly call park() without any concern for thread state. 440 // Note that ILock and IWait do *not* access _owner. 441 // _owner is a higher-level logical concept. 442 443 void Monitor::ILock (Thread * Self) { 444 assert (_OnDeck != Self->_MutexEvent, "invariant") ; 445 446 if (TryFast()) { 447 Exeunt: 448 assert (ILocked(), "invariant") ; 449 return ; 450 } 451 452 ParkEvent * const ESelf = Self->_MutexEvent ; 453 assert (_OnDeck != ESelf, "invariant") ; 454 455 // As an optimization, spinners could conditionally try to set ONDECK to _LBIT 456 // Synchronizer.cpp uses a similar optimization. 457 if (TrySpin (Self)) goto Exeunt ; 458 459 // Slow-path - the lock is contended. 460 // Either Enqueue Self on cxq or acquire the outer lock. 461 // LockWord encoding = (cxq,LOCKBYTE) 462 ESelf->reset() ; 463 OrderAccess::fence() ; 464 465 // Optional optimization ... try barging on the inner lock 466 if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(Self)) == 0) { 467 goto OnDeck_LOOP ; 468 } 469 470 if (AcquireOrPush (ESelf)) goto Exeunt ; 471 472 // At any given time there is at most one ondeck thread. 473 // ondeck implies not resident on cxq and not resident on EntryList 474 // Only the OnDeck thread can try to acquire -- contended for -- the lock. 475 // CONSIDER: use Self->OnDeck instead of m->OnDeck. 476 // Deschedule Self so that others may run. 477 while (_OnDeck != ESelf) { 478 ParkCommon (ESelf, 0) ; 479 } 480 481 // Self is now in the ONDECK position and will remain so until it 482 // manages to acquire the lock. 483 OnDeck_LOOP: 484 for (;;) { 485 assert (_OnDeck == ESelf, "invariant") ; 486 if (TrySpin (Self)) break ; 487 // CONSIDER: if ESelf->TryPark() && TryLock() break ... 488 // It's probably wise to spin only if we *actually* blocked 489 // CONSIDER: check the lockbyte, if it remains set then 490 // preemptively drain the cxq into the EntryList. 491 // The best place and time to perform queue operations -- lock metadata -- 492 // is _before having acquired the outer lock, while waiting for the lock to drop. 493 ParkCommon (ESelf, 0) ; 494 } 495 496 assert (_OnDeck == ESelf, "invariant") ; 497 _OnDeck = NULL ; 498 499 // Note that we current drop the inner lock (clear OnDeck) in the slow-path 500 // epilog immediately after having acquired the outer lock. 501 // But instead we could consider the following optimizations: 502 // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation. 503 // This might avoid potential reacquisition of the inner lock in IUlock(). 504 // B. While still holding the inner lock, attempt to opportunistically select 505 // and unlink the next ONDECK thread from the EntryList. 506 // If successful, set ONDECK to refer to that thread, otherwise clear ONDECK. 507 // It's critical that the select-and-unlink operation run in constant-time as 508 // it executes when holding the outer lock and may artificially increase the 509 // effective length of the critical section. 510 // Note that (A) and (B) are tantamount to succession by direct handoff for 511 // the inner lock. 512 goto Exeunt ; 513 } 514 515 void Monitor::IUnlock (bool RelaxAssert) { 516 assert (ILocked(), "invariant") ; 517 // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately 518 // before the store that releases the lock. Crucially, all the stores and loads in the 519 // critical section must be globally visible before the store of 0 into the lock-word 520 // that releases the lock becomes globally visible. That is, memory accesses in the 521 // critical section should not be allowed to bypass or overtake the following ST that 522 // releases the lock. As such, to prevent accesses within the critical section 523 // from "leaking" out, we need a release fence between the critical section and the 524 // store that releases the lock. In practice that release barrier is elided on 525 // platforms with strong memory models such as TSO. 526 // 527 // Note that the OrderAccess::storeload() fence that appears after unlock store 528 // provides for progress conditions and succession and is _not related to exclusion 529 // safety or lock release consistency. 530 OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], 0); // drop outer lock 531 532 OrderAccess::storeload (); 533 ParkEvent * const w = _OnDeck ; 534 assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ; 535 if (w != NULL) { 536 // Either we have a valid ondeck thread or ondeck is transiently "locked" 537 // by some exiting thread as it arranges for succession. The LSBit of 538 // OnDeck allows us to discriminate two cases. If the latter, the 539 // responsibility for progress and succession lies with that other thread. 540 // For good performance, we also depend on the fact that redundant unpark() 541 // operations are cheap. That is, repeated Unpark()ing of the ONDECK thread 542 // is inexpensive. This approach provides implicit futile wakeup throttling. 543 // Note that the referent "w" might be stale with respect to the lock. 544 // In that case the following unpark() is harmless and the worst that'll happen 545 // is a spurious return from a park() operation. Critically, if "w" _is stale, 546 // then progress is known to have occurred as that means the thread associated 547 // with "w" acquired the lock. In that case this thread need take no further 548 // action to guarantee progress. 549 if ((UNS(w) & _LBIT) == 0) w->unpark() ; 550 return ; 551 } 552 553 intptr_t cxq = _LockWord.FullWord ; 554 if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) { 555 return ; // normal fast-path exit - cxq and EntryList both empty 556 } 557 if (cxq & _LBIT) { 558 // Optional optimization ... 559 // Some other thread acquired the lock in the window since this 560 // thread released it. Succession is now that thread's responsibility. 561 return ; 562 } 563 564 Succession: 565 // Slow-path exit - this thread must ensure succession and progress. 566 // OnDeck serves as lock to protect cxq and EntryList. 567 // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq. 568 // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS) 569 // but only one concurrent consumer (detacher of RATs). 570 // Consider protecting this critical section with schedctl on Solaris. 571 // Unlike a normal lock, however, the exiting thread "locks" OnDeck, 572 // picks a successor and marks that thread as OnDeck. That successor 573 // thread will then clear OnDeck once it eventually acquires the outer lock. 574 if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) { 575 return ; 576 } 577 578 ParkEvent * List = _EntryList ; 579 if (List != NULL) { 580 // Transfer the head of the EntryList to the OnDeck position. 581 // Once OnDeck, a thread stays OnDeck until it acquires the lock. 582 // For a given lock there is at most OnDeck thread at any one instant. 583 WakeOne: 584 assert (List == _EntryList, "invariant") ; 585 ParkEvent * const w = List ; 586 assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ; 587 _EntryList = w->ListNext ; 588 // as a diagnostic measure consider setting w->_ListNext = BAD 589 assert (UNS(_OnDeck) == _LBIT, "invariant") ; 590 _OnDeck = w ; // pass OnDeck to w. 591 // w will clear OnDeck once it acquires the outer lock 592 593 // Another optional optimization ... 594 // For heavily contended locks it's not uncommon that some other 595 // thread acquired the lock while this thread was arranging succession. 596 // Try to defer the unpark() operation - Delegate the responsibility 597 // for unpark()ing the OnDeck thread to the current or subsequent owners 598 // That is, the new owner is responsible for unparking the OnDeck thread. 599 OrderAccess::storeload() ; 600 cxq = _LockWord.FullWord ; 601 if (cxq & _LBIT) return ; 602 603 w->unpark() ; 604 return ; 605 } 606 607 cxq = _LockWord.FullWord ; 608 if ((cxq & ~_LBIT) != 0) { 609 // The EntryList is empty but the cxq is populated. 610 // drain RATs from cxq into EntryList 611 // Detach RATs segment with CAS and then merge into EntryList 612 for (;;) { 613 // optional optimization - if locked, the owner is responsible for succession 614 if (cxq & _LBIT) goto Punt ; 615 const intptr_t vfy = CASPTR (&_LockWord, cxq, cxq & _LBIT) ; 616 if (vfy == cxq) break ; 617 cxq = vfy ; 618 // Interference - LockWord changed - Just retry 619 // We can see concurrent interference from contending threads 620 // pushing themselves onto the cxq or from lock-unlock operations. 621 // From the perspective of this thread, EntryList is stable and 622 // the cxq is prepend-only -- the head is volatile but the interior 623 // of the cxq is stable. In theory if we encounter interference from threads 624 // pushing onto cxq we could simply break off the original cxq suffix and 625 // move that segment to the EntryList, avoiding a 2nd or multiple CAS attempts 626 // on the high-traffic LockWord variable. For instance lets say the cxq is "ABCD" 627 // when we first fetch cxq above. Between the fetch -- where we observed "A" 628 // -- and CAS -- where we attempt to CAS null over A -- "PQR" arrive, 629 // yielding cxq = "PQRABCD". In this case we could simply set A.ListNext 630 // null, leaving cxq = "PQRA" and transfer the "BCD" segment to the EntryList. 631 // Note too, that it's safe for this thread to traverse the cxq 632 // without taking any special concurrency precautions. 633 } 634 635 // We don't currently reorder the cxq segment as we move it onto 636 // the EntryList, but it might make sense to reverse the order 637 // or perhaps sort by thread priority. See the comments in 638 // synchronizer.cpp objectMonitor::exit(). 639 assert (_EntryList == NULL, "invariant") ; 640 _EntryList = List = (ParkEvent *)(cxq & ~_LBIT) ; 641 assert (List != NULL, "invariant") ; 642 goto WakeOne ; 643 } 644 645 // cxq|EntryList is empty. 646 // w == NULL implies that cxq|EntryList == NULL in the past. 647 // Possible race - rare inopportune interleaving. 648 // A thread could have added itself to cxq since this thread previously checked. 649 // Detect and recover by refetching cxq. 650 Punt: 651 assert (UNS(_OnDeck) == _LBIT, "invariant") ; 652 _OnDeck = NULL ; // Release inner lock. 653 OrderAccess::storeload(); // Dekker duality - pivot point 654 655 // Resample LockWord/cxq to recover from possible race. 656 // For instance, while this thread T1 held OnDeck, some other thread T2 might 657 // acquire the outer lock. Another thread T3 might try to acquire the outer 658 // lock, but encounter contention and enqueue itself on cxq. T2 then drops the 659 // outer lock, but skips succession as this thread T1 still holds OnDeck. 660 // T1 is and remains responsible for ensuring succession of T3. 661 // 662 // Note that we don't need to recheck EntryList, just cxq. 663 // If threads moved onto EntryList since we dropped OnDeck 664 // that implies some other thread forced succession. 665 cxq = _LockWord.FullWord ; 666 if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) { 667 goto Succession ; // potential race -- re-run succession 668 } 669 return ; 670 } 671 672 bool Monitor::notify() { 673 assert (_owner == Thread::current(), "invariant") ; 674 assert (ILocked(), "invariant") ; 675 if (_WaitSet == NULL) return true ; 676 NotifyCount ++ ; 677 678 // Transfer one thread from the WaitSet to the EntryList or cxq. 679 // Currently we just unlink the head of the WaitSet and prepend to the cxq. 680 // And of course we could just unlink it and unpark it, too, but 681 // in that case it'd likely impale itself on the reentry. 682 Thread::muxAcquire (_WaitLock, "notify:WaitLock") ; 683 ParkEvent * nfy = _WaitSet ; 684 if (nfy != NULL) { // DCL idiom 685 _WaitSet = nfy->ListNext ; 686 assert (nfy->Notified == 0, "invariant") ; 687 // push nfy onto the cxq 688 for (;;) { 689 const intptr_t v = _LockWord.FullWord ; 690 assert ((v & 0xFF) == _LBIT, "invariant") ; 691 nfy->ListNext = (ParkEvent *)(v & ~_LBIT); 692 if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break; 693 // interference - _LockWord changed -- just retry 694 } 695 // Note that setting Notified before pushing nfy onto the cxq is 696 // also legal and safe, but the safety properties are much more 697 // subtle, so for the sake of code stewardship ... 698 OrderAccess::fence() ; 699 nfy->Notified = 1; 700 } 701 Thread::muxRelease (_WaitLock) ; 702 if (nfy != NULL && (NativeMonitorFlags & 16)) { 703 // Experimental code ... light up the wakee in the hope that this thread (the owner) 704 // will drop the lock just about the time the wakee comes ONPROC. 705 nfy->unpark() ; 706 } 707 assert (ILocked(), "invariant") ; 708 return true ; 709 } 710 711 // Currently notifyAll() transfers the waiters one-at-a-time from the waitset 712 // to the cxq. This could be done more efficiently with a single bulk en-mass transfer, 713 // but in practice notifyAll() for large #s of threads is rare and not time-critical. 714 // Beware too, that we invert the order of the waiters. Lets say that the 715 // waitset is "ABCD" and the cxq is "XYZ". After a notifyAll() the waitset 716 // will be empty and the cxq will be "DCBAXYZ". This is benign, of course. 717 718 bool Monitor::notify_all() { 719 assert (_owner == Thread::current(), "invariant") ; 720 assert (ILocked(), "invariant") ; 721 while (_WaitSet != NULL) notify() ; 722 return true ; 723 } 724 725 int Monitor::IWait (Thread * Self, jlong timo) { 726 assert (ILocked(), "invariant") ; 727 728 // Phases: 729 // 1. Enqueue Self on WaitSet - currently prepend 730 // 2. unlock - drop the outer lock 731 // 3. wait for either notification or timeout 732 // 4. lock - reentry - reacquire the outer lock 733 734 ParkEvent * const ESelf = Self->_MutexEvent ; 735 ESelf->Notified = 0 ; 736 ESelf->reset() ; 737 OrderAccess::fence() ; 738 739 // Add Self to WaitSet 740 // Ideally only the holder of the outer lock would manipulate the WaitSet - 741 // That is, the outer lock would implicitly protect the WaitSet. 742 // But if a thread in wait() encounters a timeout it will need to dequeue itself 743 // from the WaitSet _before it becomes the owner of the lock. We need to dequeue 744 // as the ParkEvent -- which serves as a proxy for the thread -- can't reside 745 // on both the WaitSet and the EntryList|cxq at the same time.. That is, a thread 746 // on the WaitSet can't be allowed to compete for the lock until it has managed to 747 // unlink its ParkEvent from WaitSet. Thus the need for WaitLock. 748 // Contention on the WaitLock is minimal. 749 // 750 // Another viable approach would be add another ParkEvent, "WaitEvent" to the 751 // thread class. The WaitSet would be composed of WaitEvents. Only the 752 // owner of the outer lock would manipulate the WaitSet. A thread in wait() 753 // could then compete for the outer lock, and then, if necessary, unlink itself 754 // from the WaitSet only after having acquired the outer lock. More precisely, 755 // there would be no WaitLock. A thread in in wait() would enqueue its WaitEvent 756 // on the WaitSet; release the outer lock; wait for either notification or timeout; 757 // reacquire the inner lock; and then, if needed, unlink itself from the WaitSet. 758 // 759 // Alternatively, a 2nd set of list link fields in the ParkEvent might suffice. 760 // One set would be for the WaitSet and one for the EntryList. 761 // We could also deconstruct the ParkEvent into a "pure" event and add a 762 // new immortal/TSM "ListElement" class that referred to ParkEvents. 763 // In that case we could have one ListElement on the WaitSet and another 764 // on the EntryList, with both referring to the same pure Event. 765 766 Thread::muxAcquire (_WaitLock, "wait:WaitLock:Add") ; 767 ESelf->ListNext = _WaitSet ; 768 _WaitSet = ESelf ; 769 Thread::muxRelease (_WaitLock) ; 770 771 // Release the outer lock 772 // We call IUnlock (RelaxAssert=true) as a thread T1 might 773 // enqueue itself on the WaitSet, call IUnlock(), drop the lock, 774 // and then stall before it can attempt to wake a successor. 775 // Some other thread T2 acquires the lock, and calls notify(), moving 776 // T1 from the WaitSet to the cxq. T2 then drops the lock. T1 resumes, 777 // and then finds *itself* on the cxq. During the course of a normal 778 // IUnlock() call a thread should _never find itself on the EntryList 779 // or cxq, but in the case of wait() it's possible. 780 // See synchronizer.cpp objectMonitor::wait(). 781 IUnlock (true) ; 782 783 // Wait for either notification or timeout 784 // Beware that in some circumstances we might propagate 785 // spurious wakeups back to the caller. 786 787 for (;;) { 788 if (ESelf->Notified) break ; 789 int err = ParkCommon (ESelf, timo) ; 790 if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break ; 791 } 792 793 // Prepare for reentry - if necessary, remove ESelf from WaitSet 794 // ESelf can be: 795 // 1. Still on the WaitSet. This can happen if we exited the loop by timeout. 796 // 2. On the cxq or EntryList 797 // 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position. 798 799 OrderAccess::fence() ; 800 int WasOnWaitSet = 0 ; 801 if (ESelf->Notified == 0) { 802 Thread::muxAcquire (_WaitLock, "wait:WaitLock:remove") ; 803 if (ESelf->Notified == 0) { // DCL idiom 804 assert (_OnDeck != ESelf, "invariant") ; // can't be both OnDeck and on WaitSet 805 // ESelf is resident on the WaitSet -- unlink it. 806 // A doubly-linked list would be better here so we can unlink in constant-time. 807 // We have to unlink before we potentially recontend as ESelf might otherwise 808 // end up on the cxq|EntryList -- it can't be on two lists at once. 809 ParkEvent * p = _WaitSet ; 810 ParkEvent * q = NULL ; // classic q chases p 811 while (p != NULL && p != ESelf) { 812 q = p ; 813 p = p->ListNext ; 814 } 815 assert (p == ESelf, "invariant") ; 816 if (p == _WaitSet) { // found at head 817 assert (q == NULL, "invariant") ; 818 _WaitSet = p->ListNext ; 819 } else { // found in interior 820 assert (q->ListNext == p, "invariant") ; 821 q->ListNext = p->ListNext ; 822 } 823 WasOnWaitSet = 1 ; // We were *not* notified but instead encountered timeout 824 } 825 Thread::muxRelease (_WaitLock) ; 826 } 827 828 // Reentry phase - reacquire the lock 829 if (WasOnWaitSet) { 830 // ESelf was previously on the WaitSet but we just unlinked it above 831 // because of a timeout. ESelf is not resident on any list and is not OnDeck 832 assert (_OnDeck != ESelf, "invariant") ; 833 ILock (Self) ; 834 } else { 835 // A prior notify() operation moved ESelf from the WaitSet to the cxq. 836 // ESelf is now on the cxq, EntryList or at the OnDeck position. 837 // The following fragment is extracted from Monitor::ILock() 838 for (;;) { 839 if (_OnDeck == ESelf && TrySpin(Self)) break ; 840 ParkCommon (ESelf, 0) ; 841 } 842 assert (_OnDeck == ESelf, "invariant") ; 843 _OnDeck = NULL ; 844 } 845 846 assert (ILocked(), "invariant") ; 847 return WasOnWaitSet != 0 ; // return true IFF timeout 848 } 849 850 851 // ON THE VMTHREAD SNEAKING PAST HELD LOCKS: 852 // In particular, there are certain types of global lock that may be held 853 // by a Java thread while it is blocked at a safepoint but before it has 854 // written the _owner field. These locks may be sneakily acquired by the 855 // VM thread during a safepoint to avoid deadlocks. Alternatively, one should 856 // identify all such locks, and ensure that Java threads never block at 857 // safepoints while holding them (_no_safepoint_check_flag). While it 858 // seems as though this could increase the time to reach a safepoint 859 // (or at least increase the mean, if not the variance), the latter 860 // approach might make for a cleaner, more maintainable JVM design. 861 // 862 // Sneaking is vile and reprehensible and should be excised at the 1st 863 // opportunity. It's possible that the need for sneaking could be obviated 864 // as follows. Currently, a thread might (a) while TBIVM, call pthread_mutex_lock 865 // or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex. 866 // (b) stall at the TBIVM exit point as a safepoint is in effect. Critically, 867 // it'll stall at the TBIVM reentry state transition after having acquired the 868 // underlying lock, but before having set _owner and having entered the actual 869 // critical section. The lock-sneaking facility leverages that fact and allowed the 870 // VM thread to logically acquire locks that had already be physically locked by mutators 871 // but where mutators were known blocked by the reentry thread state transition. 872 // 873 // If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly 874 // wrapped calls to park(), then we could likely do away with sneaking. We'd 875 // decouple lock acquisition and parking. The critical invariant to eliminating 876 // sneaking is to ensure that we never "physically" acquire the lock while TBIVM. 877 // An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket. 878 // One difficulty with this approach is that the TBIVM wrapper could recurse and 879 // call lock() deep from within a lock() call, while the MutexEvent was already enqueued. 880 // Using a stack (N=2 at minimum) of ParkEvents would take care of that problem. 881 // 882 // But of course the proper ultimate approach is to avoid schemes that require explicit 883 // sneaking or dependence on any any clever invariants or subtle implementation properties 884 // of Mutex-Monitor and instead directly address the underlying design flaw. 885 886 void Monitor::lock (Thread * Self) { 887 #ifdef CHECK_UNHANDLED_OOPS 888 // Clear unhandled oops so we get a crash right away. Only clear for non-vm 889 // or GC threads. 890 if (Self->is_Java_thread()) { 891 Self->clear_unhandled_oops(); 892 } 893 #endif // CHECK_UNHANDLED_OOPS 894 895 debug_only(check_prelock_state(Self)); 896 assert (_owner != Self , "invariant") ; 897 assert (_OnDeck != Self->_MutexEvent, "invariant") ; 898 899 if (TryFast()) { 900 Exeunt: 901 assert (ILocked(), "invariant") ; 902 assert (owner() == NULL, "invariant"); 903 set_owner (Self); 904 return ; 905 } 906 907 // The lock is contended ... 908 909 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint(); 910 if (can_sneak && _owner == NULL) { 911 // a java thread has locked the lock but has not entered the 912 // critical region -- let's just pretend we've locked the lock 913 // and go on. we note this with _snuck so we can also 914 // pretend to unlock when the time comes. 915 _snuck = true; 916 goto Exeunt ; 917 } 918 919 // Try a brief spin to avoid passing thru thread state transition ... 920 if (TrySpin (Self)) goto Exeunt ; 921 922 check_block_state(Self); 923 if (Self->is_Java_thread()) { 924 // Horribile dictu - we suffer through a state transition 925 assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex"); 926 ThreadBlockInVM tbivm ((JavaThread *) Self) ; 927 ILock (Self) ; 928 } else { 929 // Mirabile dictu 930 ILock (Self) ; 931 } 932 goto Exeunt ; 933 } 934 935 void Monitor::lock() { 936 this->lock(Thread::current()); 937 } 938 939 // Lock without safepoint check - a degenerate variant of lock(). 940 // Should ONLY be used by safepoint code and other code 941 // that is guaranteed not to block while running inside the VM. If this is called with 942 // thread state set to be in VM, the safepoint synchronization code will deadlock! 943 944 void Monitor::lock_without_safepoint_check (Thread * Self) { 945 assert (_owner != Self, "invariant") ; 946 ILock (Self) ; 947 assert (_owner == NULL, "invariant"); 948 set_owner (Self); 949 } 950 951 void Monitor::lock_without_safepoint_check () { 952 lock_without_safepoint_check (Thread::current()) ; 953 } 954 955 956 // Returns true if thread succeceed [sic] in grabbing the lock, otherwise false. 957 958 bool Monitor::try_lock() { 959 Thread * const Self = Thread::current(); 960 debug_only(check_prelock_state(Self)); 961 // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler"); 962 963 // Special case, where all Java threads are stopped. 964 // The lock may have been acquired but _owner is not yet set. 965 // In that case the VM thread can safely grab the lock. 966 // It strikes me this should appear _after the TryLock() fails, below. 967 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint(); 968 if (can_sneak && _owner == NULL) { 969 set_owner(Self); // Do not need to be atomic, since we are at a safepoint 970 _snuck = true; 971 return true; 972 } 973 974 if (TryLock()) { 975 // We got the lock 976 assert (_owner == NULL, "invariant"); 977 set_owner (Self); 978 return true; 979 } 980 return false; 981 } 982 983 void Monitor::unlock() { 984 assert (_owner == Thread::current(), "invariant") ; 985 assert (_OnDeck != Thread::current()->_MutexEvent , "invariant") ; 986 set_owner (NULL) ; 987 if (_snuck) { 988 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak"); 989 _snuck = false; 990 return ; 991 } 992 IUnlock (false) ; 993 } 994 995 // Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check() 996 // jvm_raw_lock() and _unlock() can be called by non-Java threads via JVM_RawMonitorEnter. 997 // 998 // There's no expectation that JVM_RawMonitors will interoperate properly with the native 999 // Mutex-Monitor constructs. We happen to implement JVM_RawMonitors in terms of 1000 // native Mutex-Monitors simply as a matter of convenience. A simple abstraction layer 1001 // over a pthread_mutex_t would work equally as well, but require more platform-specific 1002 // code -- a "PlatformMutex". Alternatively, a simply layer over muxAcquire-muxRelease 1003 // would work too. 1004 // 1005 // Since the caller might be a foreign thread, we don't necessarily have a Thread.MutexEvent 1006 // instance available. Instead, we transiently allocate a ParkEvent on-demand if 1007 // we encounter contention. That ParkEvent remains associated with the thread 1008 // until it manages to acquire the lock, at which time we return the ParkEvent 1009 // to the global ParkEvent free list. This is correct and suffices for our purposes. 1010 // 1011 // Beware that the original jvm_raw_unlock() had a "_snuck" test but that 1012 // jvm_raw_lock() didn't have the corresponding test. I suspect that's an 1013 // oversight, but I've replicated the original suspect logic in the new code ... 1014 1015 void Monitor::jvm_raw_lock() { 1016 assert(rank() == native, "invariant"); 1017 1018 if (TryLock()) { 1019 Exeunt: 1020 assert (ILocked(), "invariant") ; 1021 assert (_owner == NULL, "invariant"); 1022 // This can potentially be called by non-java Threads. Thus, the ThreadLocalStorage 1023 // might return NULL. Don't call set_owner since it will break on an NULL owner 1024 // Consider installing a non-null "ANON" distinguished value instead of just NULL. 1025 _owner = ThreadLocalStorage::thread(); 1026 return ; 1027 } 1028 1029 if (TrySpin(NULL)) goto Exeunt ; 1030 1031 // slow-path - apparent contention 1032 // Allocate a ParkEvent for transient use. 1033 // The ParkEvent remains associated with this thread until 1034 // the time the thread manages to acquire the lock. 1035 ParkEvent * const ESelf = ParkEvent::Allocate(NULL) ; 1036 ESelf->reset() ; 1037 OrderAccess::storeload() ; 1038 1039 // Either Enqueue Self on cxq or acquire the outer lock. 1040 if (AcquireOrPush (ESelf)) { 1041 ParkEvent::Release (ESelf) ; // surrender the ParkEvent 1042 goto Exeunt ; 1043 } 1044 1045 // At any given time there is at most one ondeck thread. 1046 // ondeck implies not resident on cxq and not resident on EntryList 1047 // Only the OnDeck thread can try to acquire -- contended for -- the lock. 1048 // CONSIDER: use Self->OnDeck instead of m->OnDeck. 1049 for (;;) { 1050 if (_OnDeck == ESelf && TrySpin(NULL)) break ; 1051 ParkCommon (ESelf, 0) ; 1052 } 1053 1054 assert (_OnDeck == ESelf, "invariant") ; 1055 _OnDeck = NULL ; 1056 ParkEvent::Release (ESelf) ; // surrender the ParkEvent 1057 goto Exeunt ; 1058 } 1059 1060 void Monitor::jvm_raw_unlock() { 1061 // Nearly the same as Monitor::unlock() ... 1062 // directly set _owner instead of using set_owner(null) 1063 _owner = NULL ; 1064 if (_snuck) { // ??? 1065 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak"); 1066 _snuck = false; 1067 return ; 1068 } 1069 IUnlock(false) ; 1070 } 1071 1072 bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equivalent) { 1073 Thread * const Self = Thread::current() ; 1074 assert (_owner == Self, "invariant") ; 1075 assert (ILocked(), "invariant") ; 1076 1077 // as_suspend_equivalent logically implies !no_safepoint_check 1078 guarantee (!as_suspend_equivalent || !no_safepoint_check, "invariant") ; 1079 // !no_safepoint_check logically implies java_thread 1080 guarantee (no_safepoint_check || Self->is_Java_thread(), "invariant") ; 1081 1082 #ifdef ASSERT 1083 Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks()); 1084 assert(least != this, "Specification of get_least_... call above"); 1085 if (least != NULL && least->rank() <= special) { 1086 tty->print("Attempting to wait on monitor %s/%d while holding" 1087 " lock %s/%d -- possible deadlock", 1088 name(), rank(), least->name(), least->rank()); 1089 assert(false, "Shouldn't block(wait) while holding a lock of rank special"); 1090 } 1091 #endif // ASSERT 1092 1093 int wait_status ; 1094 // conceptually set the owner to NULL in anticipation of 1095 // abdicating the lock in wait 1096 set_owner(NULL); 1097 if (no_safepoint_check) { 1098 wait_status = IWait (Self, timeout) ; 1099 } else { 1100 assert (Self->is_Java_thread(), "invariant") ; 1101 JavaThread *jt = (JavaThread *)Self; 1102 1103 // Enter safepoint region - ornate and Rococo ... 1104 ThreadBlockInVM tbivm(jt); 1105 OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */); 1106 1107 if (as_suspend_equivalent) { 1108 jt->set_suspend_equivalent(); 1109 // cleared by handle_special_suspend_equivalent_condition() or 1110 // java_suspend_self() 1111 } 1112 1113 wait_status = IWait (Self, timeout) ; 1114 1115 // were we externally suspended while we were waiting? 1116 if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) { 1117 // Our event wait has finished and we own the lock, but 1118 // while we were waiting another thread suspended us. We don't 1119 // want to hold the lock while suspended because that 1120 // would surprise the thread that suspended us. 1121 assert (ILocked(), "invariant") ; 1122 IUnlock (true) ; 1123 jt->java_suspend_self(); 1124 ILock (Self) ; 1125 assert (ILocked(), "invariant") ; 1126 } 1127 } 1128 1129 // Conceptually reestablish ownership of the lock. 1130 // The "real" lock -- the LockByte -- was reacquired by IWait(). 1131 assert (ILocked(), "invariant") ; 1132 assert (_owner == NULL, "invariant") ; 1133 set_owner (Self) ; 1134 return wait_status != 0 ; // return true IFF timeout 1135 } 1136 1137 Monitor::~Monitor() { 1138 assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ; 1139 } 1140 1141 void Monitor::ClearMonitor (Monitor * m, const char *name) { 1142 m->_owner = NULL ; 1143 m->_snuck = false ; 1144 if (name == NULL) { 1145 strcpy(m->_name, "UNKNOWN") ; 1146 } else { 1147 strncpy(m->_name, name, MONITOR_NAME_LEN - 1); 1148 m->_name[MONITOR_NAME_LEN - 1] = '\0'; 1149 } 1150 m->_LockWord.FullWord = 0 ; 1151 m->_EntryList = NULL ; 1152 m->_OnDeck = NULL ; 1153 m->_WaitSet = NULL ; 1154 m->_WaitLock[0] = 0 ; 1155 } 1156 1157 Monitor::Monitor() { ClearMonitor(this); } 1158 1159 Monitor::Monitor (int Rank, const char * name, bool allow_vm_block) { 1160 ClearMonitor (this, name) ; 1161 #ifdef ASSERT 1162 _allow_vm_block = allow_vm_block; 1163 _rank = Rank ; 1164 #endif 1165 } 1166 1167 Mutex::~Mutex() { 1168 assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ; 1169 } 1170 1171 Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) { 1172 ClearMonitor ((Monitor *) this, name) ; 1173 #ifdef ASSERT 1174 _allow_vm_block = allow_vm_block; 1175 _rank = Rank ; 1176 #endif 1177 } 1178 1179 bool Monitor::owned_by_self() const { 1180 bool ret = _owner == Thread::current(); 1181 assert (!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant") ; 1182 return ret; 1183 } 1184 1185 void Monitor::print_on_error(outputStream* st) const { 1186 st->print("[" PTR_FORMAT, this); 1187 st->print("] %s", _name); 1188 st->print(" - owner thread: " PTR_FORMAT, _owner); 1189 } 1190 1191 1192 1193 1194 // ---------------------------------------------------------------------------------- 1195 // Non-product code 1196 1197 #ifndef PRODUCT 1198 void Monitor::print_on(outputStream* st) const { 1199 st->print_cr("Mutex: [0x%lx/0x%lx] %s - owner: 0x%lx", this, _LockWord.FullWord, _name, _owner); 1200 } 1201 #endif 1202 1203 #ifndef PRODUCT 1204 #ifdef ASSERT 1205 Monitor * Monitor::get_least_ranked_lock(Monitor * locks) { 1206 Monitor *res, *tmp; 1207 for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) { 1208 if (tmp->rank() < res->rank()) { 1209 res = tmp; 1210 } 1211 } 1212 if (!SafepointSynchronize::is_at_safepoint()) { 1213 // In this case, we expect the held locks to be 1214 // in increasing rank order (modulo any native ranks) 1215 for (tmp = locks; tmp != NULL; tmp = tmp->next()) { 1216 if (tmp->next() != NULL) { 1217 assert(tmp->rank() == Mutex::native || 1218 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); 1219 } 1220 } 1221 } 1222 return res; 1223 } 1224 1225 Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) { 1226 Monitor *res, *tmp; 1227 for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) { 1228 if (tmp != this && (res == NULL || tmp->rank() < res->rank())) { 1229 res = tmp; 1230 } 1231 } 1232 if (!SafepointSynchronize::is_at_safepoint()) { 1233 // In this case, we expect the held locks to be 1234 // in increasing rank order (modulo any native ranks) 1235 for (tmp = locks; tmp != NULL; tmp = tmp->next()) { 1236 if (tmp->next() != NULL) { 1237 assert(tmp->rank() == Mutex::native || 1238 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); 1239 } 1240 } 1241 } 1242 return res; 1243 } 1244 1245 1246 bool Monitor::contains(Monitor* locks, Monitor * lock) { 1247 for (; locks != NULL; locks = locks->next()) { 1248 if (locks == lock) 1249 return true; 1250 } 1251 return false; 1252 } 1253 #endif 1254 1255 // Called immediately after lock acquisition or release as a diagnostic 1256 // to track the lock-set of the thread and test for rank violations that 1257 // might indicate exposure to deadlock. 1258 // Rather like an EventListener for _owner (:>). 1259 1260 void Monitor::set_owner_implementation(Thread *new_owner) { 1261 // This function is solely responsible for maintaining 1262 // and checking the invariant that threads and locks 1263 // are in a 1/N relation, with some some locks unowned. 1264 // It uses the Mutex::_owner, Mutex::_next, and 1265 // Thread::_owned_locks fields, and no other function 1266 // changes those fields. 1267 // It is illegal to set the mutex from one non-NULL 1268 // owner to another--it must be owned by NULL as an 1269 // intermediate state. 1270 1271 if (new_owner != NULL) { 1272 // the thread is acquiring this lock 1273 1274 assert(new_owner == Thread::current(), "Should I be doing this?"); 1275 assert(_owner == NULL, "setting the owner thread of an already owned mutex"); 1276 _owner = new_owner; // set the owner 1277 1278 // link "this" into the owned locks list 1279 1280 #ifdef ASSERT // Thread::_owned_locks is under the same ifdef 1281 Monitor* locks = get_least_ranked_lock(new_owner->owned_locks()); 1282 // Mutex::set_owner_implementation is a friend of Thread 1283 1284 assert(this->rank() >= 0, "bad lock rank"); 1285 1286 // Deadlock avoidance rules require us to acquire Mutexes only in 1287 // a global total order. For example m1 is the lowest ranked mutex 1288 // that the thread holds and m2 is the mutex the thread is trying 1289 // to acquire, then deadlock avoidance rules require that the rank 1290 // of m2 be less than the rank of m1. 1291 // The rank Mutex::native is an exception in that it is not subject 1292 // to the verification rules. 1293 // Here are some further notes relating to mutex acquisition anomalies: 1294 // . under Solaris, the interrupt lock gets acquired when doing 1295 // profiling, so any lock could be held. 1296 // . it is also ok to acquire Safepoint_lock at the very end while we 1297 // already hold Terminator_lock - may happen because of periodic safepoints 1298 if (this->rank() != Mutex::native && 1299 this->rank() != Mutex::suspend_resume && 1300 locks != NULL && locks->rank() <= this->rank() && 1301 !SafepointSynchronize::is_at_safepoint() && 1302 this != Interrupt_lock && this != ProfileVM_lock && 1303 !(this == Safepoint_lock && contains(locks, Terminator_lock) && 1304 SafepointSynchronize::is_synchronizing())) { 1305 new_owner->print_owned_locks(); 1306 fatal(err_msg("acquiring lock %s/%d out of order with lock %s/%d -- " 1307 "possible deadlock", this->name(), this->rank(), 1308 locks->name(), locks->rank())); 1309 } 1310 1311 this->_next = new_owner->_owned_locks; 1312 new_owner->_owned_locks = this; 1313 #endif 1314 1315 } else { 1316 // the thread is releasing this lock 1317 1318 Thread* old_owner = _owner; 1319 debug_only(_last_owner = old_owner); 1320 1321 assert(old_owner != NULL, "removing the owner thread of an unowned mutex"); 1322 assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex"); 1323 1324 _owner = NULL; // set the owner 1325 1326 #ifdef ASSERT 1327 Monitor *locks = old_owner->owned_locks(); 1328 1329 // remove "this" from the owned locks list 1330 1331 Monitor *prev = NULL; 1332 bool found = false; 1333 for (; locks != NULL; prev = locks, locks = locks->next()) { 1334 if (locks == this) { 1335 found = true; 1336 break; 1337 } 1338 } 1339 assert(found, "Removing a lock not owned"); 1340 if (prev == NULL) { 1341 old_owner->_owned_locks = _next; 1342 } else { 1343 prev->_next = _next; 1344 } 1345 _next = NULL; 1346 #endif 1347 } 1348 } 1349 1350 1351 // Factored out common sanity checks for locking mutex'es. Used by lock() and try_lock() 1352 void Monitor::check_prelock_state(Thread *thread) { 1353 assert((!thread->is_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm) 1354 || rank() == Mutex::special, "wrong thread state for using locks"); 1355 if (StrictSafepointChecks) { 1356 if (thread->is_VM_thread() && !allow_vm_block()) { 1357 fatal(err_msg("VM thread using lock %s (not allowed to block on)", 1358 name())); 1359 } 1360 debug_only(if (rank() != Mutex::special) \ 1361 thread->check_for_valid_safepoint_state(false);) 1362 } 1363 if (thread->is_Watcher_thread()) { 1364 assert(!WatcherThread::watcher_thread()->has_crash_protection(), 1365 "locking not allowed when crash protection is set"); 1366 } 1367 } 1368 1369 void Monitor::check_block_state(Thread *thread) { 1370 if (!_allow_vm_block && thread->is_VM_thread()) { 1371 warning("VM thread blocked on lock"); 1372 print(); 1373 BREAKPOINT; 1374 } 1375 assert(_owner != thread, "deadlock: blocking on monitor owned by current thread"); 1376 } 1377 1378 #endif // PRODUCT