1 /* 2 * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "runtime/atomic.inline.hpp" 27 #include "runtime/mutex.hpp" 28 #include "runtime/orderAccess.inline.hpp" 29 #include "runtime/osThread.hpp" 30 #include "runtime/thread.inline.hpp" 31 #include "utilities/events.hpp" 32 #ifdef TARGET_OS_FAMILY_linux 33 # include "mutex_linux.inline.hpp" 34 #endif 35 #ifdef TARGET_OS_FAMILY_solaris 36 # include "mutex_solaris.inline.hpp" 37 #endif 38 #ifdef TARGET_OS_FAMILY_windows 39 # include "mutex_windows.inline.hpp" 40 #endif 41 #ifdef TARGET_OS_FAMILY_bsd 42 # include "mutex_bsd.inline.hpp" 43 #endif 44 45 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 46 47 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o 48 // 49 // Native Monitor-Mutex locking - theory of operations 50 // 51 // * Native Monitors are completely unrelated to Java-level monitors, 52 // although the "back-end" slow-path implementations share a common lineage. 53 // See objectMonitor:: in synchronizer.cpp. 54 // Native Monitors do *not* support nesting or recursion but otherwise 55 // they're basically Hoare-flavor monitors. 56 // 57 // * A thread acquires ownership of a Monitor/Mutex by CASing the LockByte 58 // in the _LockWord from zero to non-zero. Note that the _Owner field 59 // is advisory and is used only to verify that the thread calling unlock() 60 // is indeed the last thread to have acquired the lock. 61 // 62 // * Contending threads "push" themselves onto the front of the contention 63 // queue -- called the cxq -- with CAS and then spin/park. 64 // The _LockWord contains the LockByte as well as the pointer to the head 65 // of the cxq. Colocating the LockByte with the cxq precludes certain races. 66 // 67 // * Using a separately addressable LockByte allows for CAS:MEMBAR or CAS:0 68 // idioms. We currently use MEMBAR in the uncontended unlock() path, as 69 // MEMBAR often has less latency than CAS. If warranted, we could switch to 70 // a CAS:0 mode, using timers to close the resultant race, as is done 71 // with Java Monitors in synchronizer.cpp. 72 // 73 // See the following for a discussion of the relative cost of atomics (CAS) 74 // MEMBAR, and ways to eliminate such instructions from the common-case paths: 75 // -- http://blogs.sun.com/dave/entry/biased_locking_in_hotspot 76 // -- http://blogs.sun.com/dave/resource/MustangSync.pdf 77 // -- http://blogs.sun.com/dave/resource/synchronization-public2.pdf 78 // -- synchronizer.cpp 79 // 80 // * Overall goals - desiderata 81 // 1. Minimize context switching 82 // 2. Minimize lock migration 83 // 3. Minimize CPI -- affinity and locality 84 // 4. Minimize the execution of high-latency instructions such as CAS or MEMBAR 85 // 5. Minimize outer lock hold times 86 // 6. Behave gracefully on a loaded system 87 // 88 // * Thread flow and list residency: 89 // 90 // Contention queue --> EntryList --> OnDeck --> Owner --> !Owner 91 // [..resident on monitor list..] 92 // [...........contending..................] 93 // 94 // -- The contention queue (cxq) contains recently-arrived threads (RATs). 95 // Threads on the cxq eventually drain into the EntryList. 96 // -- Invariant: a thread appears on at most one list -- cxq, EntryList 97 // or WaitSet -- at any one time. 98 // -- For a given monitor there can be at most one "OnDeck" thread at any 99 // given time but if needbe this particular invariant could be relaxed. 100 // 101 // * The WaitSet and EntryList linked lists are composed of ParkEvents. 102 // I use ParkEvent instead of threads as ParkEvents are immortal and 103 // type-stable, meaning we can safely unpark() a possibly stale 104 // list element in the unlock()-path. (That's benign). 105 // 106 // * Succession policy - providing for progress: 107 // 108 // As necessary, the unlock()ing thread identifies, unlinks, and unparks 109 // an "heir presumptive" tentative successor thread from the EntryList. 110 // This becomes the so-called "OnDeck" thread, of which there can be only 111 // one at any given time for a given monitor. The wakee will recontend 112 // for ownership of monitor. 113 // 114 // Succession is provided for by a policy of competitive handoff. 115 // The exiting thread does _not_ grant or pass ownership to the 116 // successor thread. (This is also referred to as "handoff" succession"). 117 // Instead the exiting thread releases ownership and possibly wakes 118 // a successor, so the successor can (re)compete for ownership of the lock. 119 // 120 // Competitive handoff provides excellent overall throughput at the expense 121 // of short-term fairness. If fairness is a concern then one remedy might 122 // be to add an AcquireCounter field to the monitor. After a thread acquires 123 // the lock it will decrement the AcquireCounter field. When the count 124 // reaches 0 the thread would reset the AcquireCounter variable, abdicate 125 // the lock directly to some thread on the EntryList, and then move itself to the 126 // tail of the EntryList. 127 // 128 // But in practice most threads engage or otherwise participate in resource 129 // bounded producer-consumer relationships, so lock domination is not usually 130 // a practical concern. Recall too, that in general it's easier to construct 131 // a fair lock from a fast lock, but not vice-versa. 132 // 133 // * The cxq can have multiple concurrent "pushers" but only one concurrent 134 // detaching thread. This mechanism is immune from the ABA corruption. 135 // More precisely, the CAS-based "push" onto cxq is ABA-oblivious. 136 // We use OnDeck as a pseudo-lock to enforce the at-most-one detaching 137 // thread constraint. 138 // 139 // * Taken together, the cxq and the EntryList constitute or form a 140 // single logical queue of threads stalled trying to acquire the lock. 141 // We use two distinct lists to reduce heat on the list ends. 142 // Threads in lock() enqueue onto cxq while threads in unlock() will 143 // dequeue from the EntryList. (c.f. Michael Scott's "2Q" algorithm). 144 // A key desideratum is to minimize queue & monitor metadata manipulation 145 // that occurs while holding the "outer" monitor lock -- that is, we want to 146 // minimize monitor lock holds times. 147 // 148 // The EntryList is ordered by the prevailing queue discipline and 149 // can be organized in any convenient fashion, such as a doubly-linked list or 150 // a circular doubly-linked list. If we need a priority queue then something akin 151 // to Solaris' sleepq would work nicely. Viz., 152 // -- http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c. 153 // -- http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/os/sleepq.c 154 // Queue discipline is enforced at ::unlock() time, when the unlocking thread 155 // drains the cxq into the EntryList, and orders or reorders the threads on the 156 // EntryList accordingly. 157 // 158 // Barring "lock barging", this mechanism provides fair cyclic ordering, 159 // somewhat similar to an elevator-scan. 160 // 161 // * OnDeck 162 // -- For a given monitor there can be at most one OnDeck thread at any given 163 // instant. The OnDeck thread is contending for the lock, but has been 164 // unlinked from the EntryList and cxq by some previous unlock() operations. 165 // Once a thread has been designated the OnDeck thread it will remain so 166 // until it manages to acquire the lock -- being OnDeck is a stable property. 167 // -- Threads on the EntryList or cxq are _not allowed to attempt lock acquisition. 168 // -- OnDeck also serves as an "inner lock" as follows. Threads in unlock() will, after 169 // having cleared the LockByte and dropped the outer lock, attempt to "trylock" 170 // OnDeck by CASing the field from null to non-null. If successful, that thread 171 // is then responsible for progress and succession and can use CAS to detach and 172 // drain the cxq into the EntryList. By convention, only this thread, the holder of 173 // the OnDeck inner lock, can manipulate the EntryList or detach and drain the 174 // RATs on the cxq into the EntryList. This avoids ABA corruption on the cxq as 175 // we allow multiple concurrent "push" operations but restrict detach concurrency 176 // to at most one thread. Having selected and detached a successor, the thread then 177 // changes the OnDeck to refer to that successor, and then unparks the successor. 178 // That successor will eventually acquire the lock and clear OnDeck. Beware 179 // that the OnDeck usage as a lock is asymmetric. A thread in unlock() transiently 180 // "acquires" OnDeck, performs queue manipulations, passes OnDeck to some successor, 181 // and then the successor eventually "drops" OnDeck. Note that there's never 182 // any sense of contention on the inner lock, however. Threads never contend 183 // or wait for the inner lock. 184 // -- OnDeck provides for futile wakeup throttling a described in section 3.3 of 185 // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf 186 // In a sense, OnDeck subsumes the ObjectMonitor _Succ and ObjectWaiter 187 // TState fields found in Java-level objectMonitors. (See synchronizer.cpp). 188 // 189 // * Waiting threads reside on the WaitSet list -- wait() puts 190 // the caller onto the WaitSet. Notify() or notifyAll() simply 191 // transfers threads from the WaitSet to either the EntryList or cxq. 192 // Subsequent unlock() operations will eventually unpark the notifyee. 193 // Unparking a notifee in notify() proper is inefficient - if we were to do so 194 // it's likely the notifyee would simply impale itself on the lock held 195 // by the notifier. 196 // 197 // * The mechanism is obstruction-free in that if the holder of the transient 198 // OnDeck lock in unlock() is preempted or otherwise stalls, other threads 199 // can still acquire and release the outer lock and continue to make progress. 200 // At worst, waking of already blocked contending threads may be delayed, 201 // but nothing worse. (We only use "trylock" operations on the inner OnDeck 202 // lock). 203 // 204 // * Note that thread-local storage must be initialized before a thread 205 // uses Native monitors or mutexes. The native monitor-mutex subsystem 206 // depends on Thread::current(). 207 // 208 // * The monitor synchronization subsystem avoids the use of native 209 // synchronization primitives except for the narrow platform-specific 210 // park-unpark abstraction. See the comments in os_solaris.cpp regarding 211 // the semantics of park-unpark. Put another way, this monitor implementation 212 // depends only on atomic operations and park-unpark. The monitor subsystem 213 // manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the 214 // underlying OS manages the READY<->RUN transitions. 215 // 216 // * The memory consistency model provide by lock()-unlock() is at least as 217 // strong or stronger than the Java Memory model defined by JSR-133. 218 // That is, we guarantee at least entry consistency, if not stronger. 219 // See http://g.oswego.edu/dl/jmm/cookbook.html. 220 // 221 // * Thread:: currently contains a set of purpose-specific ParkEvents: 222 // _MutexEvent, _ParkEvent, etc. A better approach might be to do away with 223 // the purpose-specific ParkEvents and instead implement a general per-thread 224 // stack of available ParkEvents which we could provision on-demand. The 225 // stack acts as a local cache to avoid excessive calls to ParkEvent::Allocate() 226 // and ::Release(). A thread would simply pop an element from the local stack before it 227 // enqueued or park()ed. When the contention was over the thread would 228 // push the no-longer-needed ParkEvent back onto its stack. 229 // 230 // * A slightly reduced form of ILock() and IUnlock() have been partially 231 // model-checked (Murphi) for safety and progress at T=1,2,3 and 4. 232 // It'd be interesting to see if TLA/TLC could be useful as well. 233 // 234 // * Mutex-Monitor is a low-level "leaf" subsystem. That is, the monitor 235 // code should never call other code in the JVM that might itself need to 236 // acquire monitors or mutexes. That's true *except* in the case of the 237 // ThreadBlockInVM state transition wrappers. The ThreadBlockInVM DTOR handles 238 // mutator reentry (ingress) by checking for a pending safepoint in which case it will 239 // call SafepointSynchronize::block(), which in turn may call Safepoint_lock->lock(), etc. 240 // In that particular case a call to lock() for a given Monitor can end up recursively 241 // calling lock() on another monitor. While distasteful, this is largely benign 242 // as the calls come from jacket that wraps lock(), and not from deep within lock() itself. 243 // 244 // It's unfortunate that native mutexes and thread state transitions were convolved. 245 // They're really separate concerns and should have remained that way. Melding 246 // them together was facile -- a bit too facile. The current implementation badly 247 // conflates the two concerns. 248 // 249 // * TODO-FIXME: 250 // 251 // -- Add DTRACE probes for contended acquire, contended acquired, contended unlock 252 // We should also add DTRACE probes in the ParkEvent subsystem for 253 // Park-entry, Park-exit, and Unpark. 254 // 255 // -- We have an excess of mutex-like constructs in the JVM, namely: 256 // 1. objectMonitors for Java-level synchronization (synchronizer.cpp) 257 // 2. low-level muxAcquire and muxRelease 258 // 3. low-level spinAcquire and spinRelease 259 // 4. native Mutex:: and Monitor:: 260 // 5. jvm_raw_lock() and _unlock() 261 // 6. JVMTI raw monitors -- distinct from (5) despite having a confusingly 262 // similar name. 263 // 264 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o 265 266 267 // CASPTR() uses the canonical argument order that dominates in the literature. 268 // Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates. 269 270 #define CASPTR(a, c, s) \ 271 intptr_t(Atomic::cmpxchg_ptr((void *)(s), (void *)(a), (void *)(c))) 272 #define UNS(x) (uintptr_t(x)) 273 #define TRACE(m) \ 274 { \ 275 static volatile int ctr = 0; \ 276 int x = ++ctr; \ 277 if ((x & (x - 1)) == 0) { \ 278 ::printf("%d:%s\n", x, #m); \ 279 ::fflush(stdout); \ 280 } \ 281 } 282 283 // Simplistic low-quality Marsaglia SHIFT-XOR RNG. 284 // Bijective except for the trailing mask operation. 285 // Useful for spin loops as the compiler can't optimize it away. 286 287 static inline jint MarsagliaXORV(jint x) { 288 if (x == 0) x = 1|os::random(); 289 x ^= x << 6; 290 x ^= ((unsigned)x) >> 21; 291 x ^= x << 7; 292 return x & 0x7FFFFFFF; 293 } 294 295 static int Stall(int its) { 296 static volatile jint rv = 1; 297 volatile int OnFrame = 0; 298 jint v = rv ^ UNS(OnFrame); 299 while (--its >= 0) { 300 v = MarsagliaXORV(v); 301 } 302 // Make this impossible for the compiler to optimize away, 303 // but (mostly) avoid W coherency sharing on MP systems. 304 if (v == 0x12345) rv = v; 305 return v; 306 } 307 308 int Monitor::TryLock() { 309 intptr_t v = _LockWord.FullWord; 310 for (;;) { 311 if ((v & _LBIT) != 0) return 0; 312 const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT); 313 if (v == u) return 1; 314 v = u; 315 } 316 } 317 318 int Monitor::TryFast() { 319 // Optimistic fast-path form ... 320 // Fast-path attempt for the common uncontended case. 321 // Avoid RTS->RTO $ coherence upgrade on typical SMP systems. 322 intptr_t v = CASPTR(&_LockWord, 0, _LBIT); // agro ... 323 if (v == 0) return 1; 324 325 for (;;) { 326 if ((v & _LBIT) != 0) return 0; 327 const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT); 328 if (v == u) return 1; 329 v = u; 330 } 331 } 332 333 int Monitor::ILocked() { 334 const intptr_t w = _LockWord.FullWord & 0xFF; 335 assert(w == 0 || w == _LBIT, "invariant"); 336 return w == _LBIT; 337 } 338 339 // Polite TATAS spinlock with exponential backoff - bounded spin. 340 // Ideally we'd use processor cycles, time or vtime to control 341 // the loop, but we currently use iterations. 342 // All the constants within were derived empirically but work over 343 // over the spectrum of J2SE reference platforms. 344 // On Niagara-class systems the back-off is unnecessary but 345 // is relatively harmless. (At worst it'll slightly retard 346 // acquisition times). The back-off is critical for older SMP systems 347 // where constant fetching of the LockWord would otherwise impair 348 // scalability. 349 // 350 // Clamp spinning at approximately 1/2 of a context-switch round-trip. 351 // See synchronizer.cpp for details and rationale. 352 353 int Monitor::TrySpin(Thread * const Self) { 354 if (TryLock()) return 1; 355 if (!os::is_MP()) return 0; 356 357 int Probes = 0; 358 int Delay = 0; 359 int Steps = 0; 360 int SpinMax = NativeMonitorSpinLimit; 361 int flgs = NativeMonitorFlags; 362 for (;;) { 363 intptr_t v = _LockWord.FullWord; 364 if ((v & _LBIT) == 0) { 365 if (CASPTR (&_LockWord, v, v|_LBIT) == v) { 366 return 1; 367 } 368 continue; 369 } 370 371 if ((flgs & 8) == 0) { 372 SpinPause(); 373 } 374 375 // Periodically increase Delay -- variable Delay form 376 // conceptually: delay *= 1 + 1/Exponent 377 ++Probes; 378 if (Probes > SpinMax) return 0; 379 380 if ((Probes & 0x7) == 0) { 381 Delay = ((Delay << 1)|1) & 0x7FF; 382 // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ; 383 } 384 385 if (flgs & 2) continue; 386 387 // Consider checking _owner's schedctl state, if OFFPROC abort spin. 388 // If the owner is OFFPROC then it's unlike that the lock will be dropped 389 // in a timely fashion, which suggests that spinning would not be fruitful 390 // or profitable. 391 392 // Stall for "Delay" time units - iterations in the current implementation. 393 // Avoid generating coherency traffic while stalled. 394 // Possible ways to delay: 395 // PAUSE, SLEEP, MEMBAR #sync, MEMBAR #halt, 396 // wr %g0,%asi, gethrtime, rdstick, rdtick, rdtsc, etc. ... 397 // Note that on Niagara-class systems we want to minimize STs in the 398 // spin loop. N1 and brethren write-around the L1$ over the xbar into the L2$. 399 // Furthermore, they don't have a W$ like traditional SPARC processors. 400 // We currently use a Marsaglia Shift-Xor RNG loop. 401 Steps += Delay; 402 if (Self != NULL) { 403 jint rv = Self->rng[0]; 404 for (int k = Delay; --k >= 0;) { 405 rv = MarsagliaXORV(rv); 406 if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0; 407 } 408 Self->rng[0] = rv; 409 } else { 410 Stall(Delay); 411 } 412 } 413 } 414 415 static int ParkCommon(ParkEvent * ev, jlong timo) { 416 // Diagnostic support - periodically unwedge blocked threads 417 intx nmt = NativeMonitorTimeout; 418 if (nmt > 0 && (nmt < timo || timo <= 0)) { 419 timo = nmt; 420 } 421 int err = OS_OK; 422 if (0 == timo) { 423 ev->park(); 424 } else { 425 err = ev->park(timo); 426 } 427 return err; 428 } 429 430 inline int Monitor::AcquireOrPush(ParkEvent * ESelf) { 431 intptr_t v = _LockWord.FullWord; 432 for (;;) { 433 if ((v & _LBIT) == 0) { 434 const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT); 435 if (u == v) return 1; // indicate acquired 436 v = u; 437 } else { 438 // Anticipate success ... 439 ESelf->ListNext = (ParkEvent *)(v & ~_LBIT); 440 const intptr_t u = CASPTR(&_LockWord, v, intptr_t(ESelf)|_LBIT); 441 if (u == v) return 0; // indicate pushed onto cxq 442 v = u; 443 } 444 // Interference - LockWord change - just retry 445 } 446 } 447 448 // ILock and IWait are the lowest level primitive internal blocking 449 // synchronization functions. The callers of IWait and ILock must have 450 // performed any needed state transitions beforehand. 451 // IWait and ILock may directly call park() without any concern for thread state. 452 // Note that ILock and IWait do *not* access _owner. 453 // _owner is a higher-level logical concept. 454 455 void Monitor::ILock(Thread * Self) { 456 assert(_OnDeck != Self->_MutexEvent, "invariant"); 457 458 if (TryFast()) { 459 Exeunt: 460 assert(ILocked(), "invariant"); 461 return; 462 } 463 464 ParkEvent * const ESelf = Self->_MutexEvent; 465 assert(_OnDeck != ESelf, "invariant"); 466 467 // As an optimization, spinners could conditionally try to set ONDECK to _LBIT 468 // Synchronizer.cpp uses a similar optimization. 469 if (TrySpin(Self)) goto Exeunt; 470 471 // Slow-path - the lock is contended. 472 // Either Enqueue Self on cxq or acquire the outer lock. 473 // LockWord encoding = (cxq,LOCKBYTE) 474 ESelf->reset(); 475 OrderAccess::fence(); 476 477 // Optional optimization ... try barging on the inner lock 478 if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(Self)) == 0) { 479 goto OnDeck_LOOP; 480 } 481 482 if (AcquireOrPush(ESelf)) goto Exeunt; 483 484 // At any given time there is at most one ondeck thread. 485 // ondeck implies not resident on cxq and not resident on EntryList 486 // Only the OnDeck thread can try to acquire -- contended for -- the lock. 487 // CONSIDER: use Self->OnDeck instead of m->OnDeck. 488 // Deschedule Self so that others may run. 489 while (_OnDeck != ESelf) { 490 ParkCommon(ESelf, 0); 491 } 492 493 // Self is now in the ONDECK position and will remain so until it 494 // manages to acquire the lock. 495 OnDeck_LOOP: 496 for (;;) { 497 assert(_OnDeck == ESelf, "invariant"); 498 if (TrySpin(Self)) break; 499 // It's probably wise to spin only if we *actually* blocked 500 // CONSIDER: check the lockbyte, if it remains set then 501 // preemptively drain the cxq into the EntryList. 502 // The best place and time to perform queue operations -- lock metadata -- 503 // is _before having acquired the outer lock, while waiting for the lock to drop. 504 ParkCommon(ESelf, 0); 505 } 506 507 assert(_OnDeck == ESelf, "invariant"); 508 _OnDeck = NULL; 509 510 // Note that we current drop the inner lock (clear OnDeck) in the slow-path 511 // epilogue immediately after having acquired the outer lock. 512 // But instead we could consider the following optimizations: 513 // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation. 514 // This might avoid potential reacquisition of the inner lock in IUlock(). 515 // B. While still holding the inner lock, attempt to opportunistically select 516 // and unlink the next ONDECK thread from the EntryList. 517 // If successful, set ONDECK to refer to that thread, otherwise clear ONDECK. 518 // It's critical that the select-and-unlink operation run in constant-time as 519 // it executes when holding the outer lock and may artificially increase the 520 // effective length of the critical section. 521 // Note that (A) and (B) are tantamount to succession by direct handoff for 522 // the inner lock. 523 goto Exeunt; 524 } 525 526 void Monitor::IUnlock(bool RelaxAssert) { 527 assert(ILocked(), "invariant"); 528 // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately 529 // before the store that releases the lock. Crucially, all the stores and loads in the 530 // critical section must be globally visible before the store of 0 into the lock-word 531 // that releases the lock becomes globally visible. That is, memory accesses in the 532 // critical section should not be allowed to bypass or overtake the following ST that 533 // releases the lock. As such, to prevent accesses within the critical section 534 // from "leaking" out, we need a release fence between the critical section and the 535 // store that releases the lock. In practice that release barrier is elided on 536 // platforms with strong memory models such as TSO. 537 // 538 // Note that the OrderAccess::storeload() fence that appears after unlock store 539 // provides for progress conditions and succession and is _not related to exclusion 540 // safety or lock release consistency. 541 OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], 0); // drop outer lock 542 543 OrderAccess::storeload(); 544 ParkEvent * const w = _OnDeck; 545 assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant"); 546 if (w != NULL) { 547 // Either we have a valid ondeck thread or ondeck is transiently "locked" 548 // by some exiting thread as it arranges for succession. The LSBit of 549 // OnDeck allows us to discriminate two cases. If the latter, the 550 // responsibility for progress and succession lies with that other thread. 551 // For good performance, we also depend on the fact that redundant unpark() 552 // operations are cheap. That is, repeated Unpark()ing of the ONDECK thread 553 // is inexpensive. This approach provides implicit futile wakeup throttling. 554 // Note that the referent "w" might be stale with respect to the lock. 555 // In that case the following unpark() is harmless and the worst that'll happen 556 // is a spurious return from a park() operation. Critically, if "w" _is stale, 557 // then progress is known to have occurred as that means the thread associated 558 // with "w" acquired the lock. In that case this thread need take no further 559 // action to guarantee progress. 560 if ((UNS(w) & _LBIT) == 0) w->unpark(); 561 return; 562 } 563 564 intptr_t cxq = _LockWord.FullWord; 565 if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) { 566 return; // normal fast-path exit - cxq and EntryList both empty 567 } 568 if (cxq & _LBIT) { 569 // Optional optimization ... 570 // Some other thread acquired the lock in the window since this 571 // thread released it. Succession is now that thread's responsibility. 572 return; 573 } 574 575 Succession: 576 // Slow-path exit - this thread must ensure succession and progress. 577 // OnDeck serves as lock to protect cxq and EntryList. 578 // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq. 579 // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS) 580 // but only one concurrent consumer (detacher of RATs). 581 // Consider protecting this critical section with schedctl on Solaris. 582 // Unlike a normal lock, however, the exiting thread "locks" OnDeck, 583 // picks a successor and marks that thread as OnDeck. That successor 584 // thread will then clear OnDeck once it eventually acquires the outer lock. 585 if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) { 586 return; 587 } 588 589 ParkEvent * List = _EntryList; 590 if (List != NULL) { 591 // Transfer the head of the EntryList to the OnDeck position. 592 // Once OnDeck, a thread stays OnDeck until it acquires the lock. 593 // For a given lock there is at most OnDeck thread at any one instant. 594 WakeOne: 595 assert(List == _EntryList, "invariant"); 596 ParkEvent * const w = List; 597 assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant"); 598 _EntryList = w->ListNext; 599 // as a diagnostic measure consider setting w->_ListNext = BAD 600 assert(UNS(_OnDeck) == _LBIT, "invariant"); 601 _OnDeck = w; // pass OnDeck to w. 602 // w will clear OnDeck once it acquires the outer lock 603 604 // Another optional optimization ... 605 // For heavily contended locks it's not uncommon that some other 606 // thread acquired the lock while this thread was arranging succession. 607 // Try to defer the unpark() operation - Delegate the responsibility 608 // for unpark()ing the OnDeck thread to the current or subsequent owners 609 // That is, the new owner is responsible for unparking the OnDeck thread. 610 OrderAccess::storeload(); 611 cxq = _LockWord.FullWord; 612 if (cxq & _LBIT) return; 613 614 w->unpark(); 615 return; 616 } 617 618 cxq = _LockWord.FullWord; 619 if ((cxq & ~_LBIT) != 0) { 620 // The EntryList is empty but the cxq is populated. 621 // drain RATs from cxq into EntryList 622 // Detach RATs segment with CAS and then merge into EntryList 623 for (;;) { 624 // optional optimization - if locked, the owner is responsible for succession 625 if (cxq & _LBIT) goto Punt; 626 const intptr_t vfy = CASPTR(&_LockWord, cxq, cxq & _LBIT); 627 if (vfy == cxq) break; 628 cxq = vfy; 629 // Interference - LockWord changed - Just retry 630 // We can see concurrent interference from contending threads 631 // pushing themselves onto the cxq or from lock-unlock operations. 632 // From the perspective of this thread, EntryList is stable and 633 // the cxq is prepend-only -- the head is volatile but the interior 634 // of the cxq is stable. In theory if we encounter interference from threads 635 // pushing onto cxq we could simply break off the original cxq suffix and 636 // move that segment to the EntryList, avoiding a 2nd or multiple CAS attempts 637 // on the high-traffic LockWord variable. For instance lets say the cxq is "ABCD" 638 // when we first fetch cxq above. Between the fetch -- where we observed "A" 639 // -- and CAS -- where we attempt to CAS null over A -- "PQR" arrive, 640 // yielding cxq = "PQRABCD". In this case we could simply set A.ListNext 641 // null, leaving cxq = "PQRA" and transfer the "BCD" segment to the EntryList. 642 // Note too, that it's safe for this thread to traverse the cxq 643 // without taking any special concurrency precautions. 644 } 645 646 // We don't currently reorder the cxq segment as we move it onto 647 // the EntryList, but it might make sense to reverse the order 648 // or perhaps sort by thread priority. See the comments in 649 // synchronizer.cpp objectMonitor::exit(). 650 assert(_EntryList == NULL, "invariant"); 651 _EntryList = List = (ParkEvent *)(cxq & ~_LBIT); 652 assert(List != NULL, "invariant"); 653 goto WakeOne; 654 } 655 656 // cxq|EntryList is empty. 657 // w == NULL implies that cxq|EntryList == NULL in the past. 658 // Possible race - rare inopportune interleaving. 659 // A thread could have added itself to cxq since this thread previously checked. 660 // Detect and recover by refetching cxq. 661 Punt: 662 assert(UNS(_OnDeck) == _LBIT, "invariant"); 663 _OnDeck = NULL; // Release inner lock. 664 OrderAccess::storeload(); // Dekker duality - pivot point 665 666 // Resample LockWord/cxq to recover from possible race. 667 // For instance, while this thread T1 held OnDeck, some other thread T2 might 668 // acquire the outer lock. Another thread T3 might try to acquire the outer 669 // lock, but encounter contention and enqueue itself on cxq. T2 then drops the 670 // outer lock, but skips succession as this thread T1 still holds OnDeck. 671 // T1 is and remains responsible for ensuring succession of T3. 672 // 673 // Note that we don't need to recheck EntryList, just cxq. 674 // If threads moved onto EntryList since we dropped OnDeck 675 // that implies some other thread forced succession. 676 cxq = _LockWord.FullWord; 677 if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) { 678 goto Succession; // potential race -- re-run succession 679 } 680 return; 681 } 682 683 bool Monitor::notify() { 684 assert(_owner == Thread::current(), "invariant"); 685 assert(ILocked(), "invariant"); 686 if (_WaitSet == NULL) return true; 687 NotifyCount++; 688 689 // Transfer one thread from the WaitSet to the EntryList or cxq. 690 // Currently we just unlink the head of the WaitSet and prepend to the cxq. 691 // And of course we could just unlink it and unpark it, too, but 692 // in that case it'd likely impale itself on the reentry. 693 Thread::muxAcquire(_WaitLock, "notify:WaitLock"); 694 ParkEvent * nfy = _WaitSet; 695 if (nfy != NULL) { // DCL idiom 696 _WaitSet = nfy->ListNext; 697 assert(nfy->Notified == 0, "invariant"); 698 // push nfy onto the cxq 699 for (;;) { 700 const intptr_t v = _LockWord.FullWord; 701 assert((v & 0xFF) == _LBIT, "invariant"); 702 nfy->ListNext = (ParkEvent *)(v & ~_LBIT); 703 if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break; 704 // interference - _LockWord changed -- just retry 705 } 706 // Note that setting Notified before pushing nfy onto the cxq is 707 // also legal and safe, but the safety properties are much more 708 // subtle, so for the sake of code stewardship ... 709 OrderAccess::fence(); 710 nfy->Notified = 1; 711 } 712 Thread::muxRelease(_WaitLock); 713 if (nfy != NULL && (NativeMonitorFlags & 16)) { 714 // Experimental code ... light up the wakee in the hope that this thread (the owner) 715 // will drop the lock just about the time the wakee comes ONPROC. 716 nfy->unpark(); 717 } 718 assert(ILocked(), "invariant"); 719 return true; 720 } 721 722 // Currently notifyAll() transfers the waiters one-at-a-time from the waitset 723 // to the cxq. This could be done more efficiently with a single bulk en-mass transfer, 724 // but in practice notifyAll() for large #s of threads is rare and not time-critical. 725 // Beware too, that we invert the order of the waiters. Lets say that the 726 // waitset is "ABCD" and the cxq is "XYZ". After a notifyAll() the waitset 727 // will be empty and the cxq will be "DCBAXYZ". This is benign, of course. 728 729 bool Monitor::notify_all() { 730 assert(_owner == Thread::current(), "invariant"); 731 assert(ILocked(), "invariant"); 732 while (_WaitSet != NULL) notify(); 733 return true; 734 } 735 736 int Monitor::IWait(Thread * Self, jlong timo) { 737 assert(ILocked(), "invariant"); 738 739 // Phases: 740 // 1. Enqueue Self on WaitSet - currently prepend 741 // 2. unlock - drop the outer lock 742 // 3. wait for either notification or timeout 743 // 4. lock - reentry - reacquire the outer lock 744 745 ParkEvent * const ESelf = Self->_MutexEvent; 746 ESelf->Notified = 0; 747 ESelf->reset(); 748 OrderAccess::fence(); 749 750 // Add Self to WaitSet 751 // Ideally only the holder of the outer lock would manipulate the WaitSet - 752 // That is, the outer lock would implicitly protect the WaitSet. 753 // But if a thread in wait() encounters a timeout it will need to dequeue itself 754 // from the WaitSet _before it becomes the owner of the lock. We need to dequeue 755 // as the ParkEvent -- which serves as a proxy for the thread -- can't reside 756 // on both the WaitSet and the EntryList|cxq at the same time.. That is, a thread 757 // on the WaitSet can't be allowed to compete for the lock until it has managed to 758 // unlink its ParkEvent from WaitSet. Thus the need for WaitLock. 759 // Contention on the WaitLock is minimal. 760 // 761 // Another viable approach would be add another ParkEvent, "WaitEvent" to the 762 // thread class. The WaitSet would be composed of WaitEvents. Only the 763 // owner of the outer lock would manipulate the WaitSet. A thread in wait() 764 // could then compete for the outer lock, and then, if necessary, unlink itself 765 // from the WaitSet only after having acquired the outer lock. More precisely, 766 // there would be no WaitLock. A thread in in wait() would enqueue its WaitEvent 767 // on the WaitSet; release the outer lock; wait for either notification or timeout; 768 // reacquire the inner lock; and then, if needed, unlink itself from the WaitSet. 769 // 770 // Alternatively, a 2nd set of list link fields in the ParkEvent might suffice. 771 // One set would be for the WaitSet and one for the EntryList. 772 // We could also deconstruct the ParkEvent into a "pure" event and add a 773 // new immortal/TSM "ListElement" class that referred to ParkEvents. 774 // In that case we could have one ListElement on the WaitSet and another 775 // on the EntryList, with both referring to the same pure Event. 776 777 Thread::muxAcquire(_WaitLock, "wait:WaitLock:Add"); 778 ESelf->ListNext = _WaitSet; 779 _WaitSet = ESelf; 780 Thread::muxRelease(_WaitLock); 781 782 // Release the outer lock 783 // We call IUnlock (RelaxAssert=true) as a thread T1 might 784 // enqueue itself on the WaitSet, call IUnlock(), drop the lock, 785 // and then stall before it can attempt to wake a successor. 786 // Some other thread T2 acquires the lock, and calls notify(), moving 787 // T1 from the WaitSet to the cxq. T2 then drops the lock. T1 resumes, 788 // and then finds *itself* on the cxq. During the course of a normal 789 // IUnlock() call a thread should _never find itself on the EntryList 790 // or cxq, but in the case of wait() it's possible. 791 // See synchronizer.cpp objectMonitor::wait(). 792 IUnlock(true); 793 794 // Wait for either notification or timeout 795 // Beware that in some circumstances we might propagate 796 // spurious wakeups back to the caller. 797 798 for (;;) { 799 if (ESelf->Notified) break; 800 int err = ParkCommon(ESelf, timo); 801 if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break; 802 } 803 804 // Prepare for reentry - if necessary, remove ESelf from WaitSet 805 // ESelf can be: 806 // 1. Still on the WaitSet. This can happen if we exited the loop by timeout. 807 // 2. On the cxq or EntryList 808 // 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position. 809 810 OrderAccess::fence(); 811 int WasOnWaitSet = 0; 812 if (ESelf->Notified == 0) { 813 Thread::muxAcquire(_WaitLock, "wait:WaitLock:remove"); 814 if (ESelf->Notified == 0) { // DCL idiom 815 assert(_OnDeck != ESelf, "invariant"); // can't be both OnDeck and on WaitSet 816 // ESelf is resident on the WaitSet -- unlink it. 817 // A doubly-linked list would be better here so we can unlink in constant-time. 818 // We have to unlink before we potentially recontend as ESelf might otherwise 819 // end up on the cxq|EntryList -- it can't be on two lists at once. 820 ParkEvent * p = _WaitSet; 821 ParkEvent * q = NULL; // classic q chases p 822 while (p != NULL && p != ESelf) { 823 q = p; 824 p = p->ListNext; 825 } 826 assert(p == ESelf, "invariant"); 827 if (p == _WaitSet) { // found at head 828 assert(q == NULL, "invariant"); 829 _WaitSet = p->ListNext; 830 } else { // found in interior 831 assert(q->ListNext == p, "invariant"); 832 q->ListNext = p->ListNext; 833 } 834 WasOnWaitSet = 1; // We were *not* notified but instead encountered timeout 835 } 836 Thread::muxRelease(_WaitLock); 837 } 838 839 // Reentry phase - reacquire the lock 840 if (WasOnWaitSet) { 841 // ESelf was previously on the WaitSet but we just unlinked it above 842 // because of a timeout. ESelf is not resident on any list and is not OnDeck 843 assert(_OnDeck != ESelf, "invariant"); 844 ILock(Self); 845 } else { 846 // A prior notify() operation moved ESelf from the WaitSet to the cxq. 847 // ESelf is now on the cxq, EntryList or at the OnDeck position. 848 // The following fragment is extracted from Monitor::ILock() 849 for (;;) { 850 if (_OnDeck == ESelf && TrySpin(Self)) break; 851 ParkCommon(ESelf, 0); 852 } 853 assert(_OnDeck == ESelf, "invariant"); 854 _OnDeck = NULL; 855 } 856 857 assert(ILocked(), "invariant"); 858 return WasOnWaitSet != 0; // return true IFF timeout 859 } 860 861 862 // ON THE VMTHREAD SNEAKING PAST HELD LOCKS: 863 // In particular, there are certain types of global lock that may be held 864 // by a Java thread while it is blocked at a safepoint but before it has 865 // written the _owner field. These locks may be sneakily acquired by the 866 // VM thread during a safepoint to avoid deadlocks. Alternatively, one should 867 // identify all such locks, and ensure that Java threads never block at 868 // safepoints while holding them (_no_safepoint_check_flag). While it 869 // seems as though this could increase the time to reach a safepoint 870 // (or at least increase the mean, if not the variance), the latter 871 // approach might make for a cleaner, more maintainable JVM design. 872 // 873 // Sneaking is vile and reprehensible and should be excised at the 1st 874 // opportunity. It's possible that the need for sneaking could be obviated 875 // as follows. Currently, a thread might (a) while TBIVM, call pthread_mutex_lock 876 // or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex. 877 // (b) stall at the TBIVM exit point as a safepoint is in effect. Critically, 878 // it'll stall at the TBIVM reentry state transition after having acquired the 879 // underlying lock, but before having set _owner and having entered the actual 880 // critical section. The lock-sneaking facility leverages that fact and allowed the 881 // VM thread to logically acquire locks that had already be physically locked by mutators 882 // but where mutators were known blocked by the reentry thread state transition. 883 // 884 // If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly 885 // wrapped calls to park(), then we could likely do away with sneaking. We'd 886 // decouple lock acquisition and parking. The critical invariant to eliminating 887 // sneaking is to ensure that we never "physically" acquire the lock while TBIVM. 888 // An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket. 889 // One difficulty with this approach is that the TBIVM wrapper could recurse and 890 // call lock() deep from within a lock() call, while the MutexEvent was already enqueued. 891 // Using a stack (N=2 at minimum) of ParkEvents would take care of that problem. 892 // 893 // But of course the proper ultimate approach is to avoid schemes that require explicit 894 // sneaking or dependence on any any clever invariants or subtle implementation properties 895 // of Mutex-Monitor and instead directly address the underlying design flaw. 896 897 void Monitor::lock(Thread * Self) { 898 // Ensure that the Monitor requires/allows safepoint checks. 899 assert(_safepoint_check_required != Monitor::_safepoint_check_never, 900 err_msg("This lock should never have a safepoint check: %s", 901 name())); 902 903 #ifdef CHECK_UNHANDLED_OOPS 904 // Clear unhandled oops so we get a crash right away. Only clear for non-vm 905 // or GC threads. 906 if (Self->is_Java_thread()) { 907 Self->clear_unhandled_oops(); 908 } 909 #endif // CHECK_UNHANDLED_OOPS 910 911 debug_only(check_prelock_state(Self)); 912 assert(_owner != Self, "invariant"); 913 assert(_OnDeck != Self->_MutexEvent, "invariant"); 914 915 if (TryFast()) { 916 Exeunt: 917 assert(ILocked(), "invariant"); 918 assert(owner() == NULL, "invariant"); 919 set_owner(Self); 920 return; 921 } 922 923 // The lock is contended ... 924 925 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint(); 926 if (can_sneak && _owner == NULL) { 927 // a java thread has locked the lock but has not entered the 928 // critical region -- let's just pretend we've locked the lock 929 // and go on. we note this with _snuck so we can also 930 // pretend to unlock when the time comes. 931 _snuck = true; 932 goto Exeunt; 933 } 934 935 // Try a brief spin to avoid passing thru thread state transition ... 936 if (TrySpin(Self)) goto Exeunt; 937 938 check_block_state(Self); 939 if (Self->is_Java_thread()) { 940 // Horrible dictu - we suffer through a state transition 941 assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex"); 942 ThreadBlockInVM tbivm((JavaThread *) Self); 943 ILock(Self); 944 } else { 945 // Mirabile dictu 946 ILock(Self); 947 } 948 goto Exeunt; 949 } 950 951 void Monitor::lock() { 952 this->lock(Thread::current()); 953 } 954 955 // Lock without safepoint check - a degenerate variant of lock(). 956 // Should ONLY be used by safepoint code and other code 957 // that is guaranteed not to block while running inside the VM. If this is called with 958 // thread state set to be in VM, the safepoint synchronization code will deadlock! 959 960 void Monitor::lock_without_safepoint_check(Thread * Self) { 961 // Ensure that the Monitor does not require or allow safepoint checks. 962 assert(_safepoint_check_required != Monitor::_safepoint_check_always, 963 err_msg("This lock should always have a safepoint check: %s", 964 name())); 965 assert(_owner != Self, "invariant"); 966 ILock(Self); 967 assert(_owner == NULL, "invariant"); 968 set_owner(Self); 969 } 970 971 void Monitor::lock_without_safepoint_check() { 972 lock_without_safepoint_check(Thread::current()); 973 } 974 975 976 // Returns true if thread succeeds in grabbing the lock, otherwise false. 977 978 bool Monitor::try_lock() { 979 Thread * const Self = Thread::current(); 980 debug_only(check_prelock_state(Self)); 981 // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler"); 982 983 // Special case, where all Java threads are stopped. 984 // The lock may have been acquired but _owner is not yet set. 985 // In that case the VM thread can safely grab the lock. 986 // It strikes me this should appear _after the TryLock() fails, below. 987 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint(); 988 if (can_sneak && _owner == NULL) { 989 set_owner(Self); // Do not need to be atomic, since we are at a safepoint 990 _snuck = true; 991 return true; 992 } 993 994 if (TryLock()) { 995 // We got the lock 996 assert(_owner == NULL, "invariant"); 997 set_owner(Self); 998 return true; 999 } 1000 return false; 1001 } 1002 1003 void Monitor::unlock() { 1004 assert(_owner == Thread::current(), "invariant"); 1005 assert(_OnDeck != Thread::current()->_MutexEvent, "invariant"); 1006 set_owner(NULL); 1007 if (_snuck) { 1008 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak"); 1009 _snuck = false; 1010 return; 1011 } 1012 IUnlock(false); 1013 } 1014 1015 // Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check() 1016 // jvm_raw_lock() and _unlock() can be called by non-Java threads via JVM_RawMonitorEnter. 1017 // 1018 // There's no expectation that JVM_RawMonitors will interoperate properly with the native 1019 // Mutex-Monitor constructs. We happen to implement JVM_RawMonitors in terms of 1020 // native Mutex-Monitors simply as a matter of convenience. A simple abstraction layer 1021 // over a pthread_mutex_t would work equally as well, but require more platform-specific 1022 // code -- a "PlatformMutex". Alternatively, a simply layer over muxAcquire-muxRelease 1023 // would work too. 1024 // 1025 // Since the caller might be a foreign thread, we don't necessarily have a Thread.MutexEvent 1026 // instance available. Instead, we transiently allocate a ParkEvent on-demand if 1027 // we encounter contention. That ParkEvent remains associated with the thread 1028 // until it manages to acquire the lock, at which time we return the ParkEvent 1029 // to the global ParkEvent free list. This is correct and suffices for our purposes. 1030 // 1031 // Beware that the original jvm_raw_unlock() had a "_snuck" test but that 1032 // jvm_raw_lock() didn't have the corresponding test. I suspect that's an 1033 // oversight, but I've replicated the original suspect logic in the new code ... 1034 1035 void Monitor::jvm_raw_lock() { 1036 assert(rank() == native, "invariant"); 1037 1038 if (TryLock()) { 1039 Exeunt: 1040 assert(ILocked(), "invariant"); 1041 assert(_owner == NULL, "invariant"); 1042 // This can potentially be called by non-java Threads. Thus, the ThreadLocalStorage 1043 // might return NULL. Don't call set_owner since it will break on an NULL owner 1044 // Consider installing a non-null "ANON" distinguished value instead of just NULL. 1045 _owner = ThreadLocalStorage::thread(); 1046 return; 1047 } 1048 1049 if (TrySpin(NULL)) goto Exeunt; 1050 1051 // slow-path - apparent contention 1052 // Allocate a ParkEvent for transient use. 1053 // The ParkEvent remains associated with this thread until 1054 // the time the thread manages to acquire the lock. 1055 ParkEvent * const ESelf = ParkEvent::Allocate(NULL); 1056 ESelf->reset(); 1057 OrderAccess::storeload(); 1058 1059 // Either Enqueue Self on cxq or acquire the outer lock. 1060 if (AcquireOrPush (ESelf)) { 1061 ParkEvent::Release(ESelf); // surrender the ParkEvent 1062 goto Exeunt; 1063 } 1064 1065 // At any given time there is at most one ondeck thread. 1066 // ondeck implies not resident on cxq and not resident on EntryList 1067 // Only the OnDeck thread can try to acquire -- contended for -- the lock. 1068 // CONSIDER: use Self->OnDeck instead of m->OnDeck. 1069 for (;;) { 1070 if (_OnDeck == ESelf && TrySpin(NULL)) break; 1071 ParkCommon(ESelf, 0); 1072 } 1073 1074 assert(_OnDeck == ESelf, "invariant"); 1075 _OnDeck = NULL; 1076 ParkEvent::Release(ESelf); // surrender the ParkEvent 1077 goto Exeunt; 1078 } 1079 1080 void Monitor::jvm_raw_unlock() { 1081 // Nearly the same as Monitor::unlock() ... 1082 // directly set _owner instead of using set_owner(null) 1083 _owner = NULL; 1084 if (_snuck) { // ??? 1085 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak"); 1086 _snuck = false; 1087 return; 1088 } 1089 IUnlock(false); 1090 } 1091 1092 bool Monitor::wait(bool no_safepoint_check, long timeout, 1093 bool as_suspend_equivalent) { 1094 // Make sure safepoint checking is used properly. 1095 assert(!(_safepoint_check_required == Monitor::_safepoint_check_never && no_safepoint_check == false), 1096 err_msg("This lock should never have a safepoint check: %s", name())); 1097 assert(!(_safepoint_check_required == Monitor::_safepoint_check_always && no_safepoint_check == true), 1098 err_msg("This lock should always have a safepoint check: %s", name())); 1099 1100 Thread * const Self = Thread::current(); 1101 assert(_owner == Self, "invariant"); 1102 assert(ILocked(), "invariant"); 1103 1104 // as_suspend_equivalent logically implies !no_safepoint_check 1105 guarantee(!as_suspend_equivalent || !no_safepoint_check, "invariant"); 1106 // !no_safepoint_check logically implies java_thread 1107 guarantee(no_safepoint_check || Self->is_Java_thread(), "invariant"); 1108 1109 #ifdef ASSERT 1110 Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks()); 1111 assert(least != this, "Specification of get_least_... call above"); 1112 if (least != NULL && least->rank() <= special) { 1113 tty->print("Attempting to wait on monitor %s/%d while holding" 1114 " lock %s/%d -- possible deadlock", 1115 name(), rank(), least->name(), least->rank()); 1116 assert(false, "Shouldn't block(wait) while holding a lock of rank special"); 1117 } 1118 #endif // ASSERT 1119 1120 int wait_status; 1121 // conceptually set the owner to NULL in anticipation of 1122 // abdicating the lock in wait 1123 set_owner(NULL); 1124 if (no_safepoint_check) { 1125 wait_status = IWait(Self, timeout); 1126 } else { 1127 assert(Self->is_Java_thread(), "invariant"); 1128 JavaThread *jt = (JavaThread *)Self; 1129 1130 // Enter safepoint region - ornate and Rococo ... 1131 ThreadBlockInVM tbivm(jt); 1132 OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */); 1133 1134 if (as_suspend_equivalent) { 1135 jt->set_suspend_equivalent(); 1136 // cleared by handle_special_suspend_equivalent_condition() or 1137 // java_suspend_self() 1138 } 1139 1140 wait_status = IWait(Self, timeout); 1141 1142 // were we externally suspended while we were waiting? 1143 if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) { 1144 // Our event wait has finished and we own the lock, but 1145 // while we were waiting another thread suspended us. We don't 1146 // want to hold the lock while suspended because that 1147 // would surprise the thread that suspended us. 1148 assert(ILocked(), "invariant"); 1149 IUnlock(true); 1150 jt->java_suspend_self(); 1151 ILock(Self); 1152 assert(ILocked(), "invariant"); 1153 } 1154 } 1155 1156 // Conceptually reestablish ownership of the lock. 1157 // The "real" lock -- the LockByte -- was reacquired by IWait(). 1158 assert(ILocked(), "invariant"); 1159 assert(_owner == NULL, "invariant"); 1160 set_owner(Self); 1161 return wait_status != 0; // return true IFF timeout 1162 } 1163 1164 Monitor::~Monitor() { 1165 assert((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, ""); 1166 } 1167 1168 void Monitor::ClearMonitor(Monitor * m, const char *name) { 1169 m->_owner = NULL; 1170 m->_snuck = false; 1171 if (name == NULL) { 1172 strcpy(m->_name, "UNKNOWN"); 1173 } else { 1174 strncpy(m->_name, name, MONITOR_NAME_LEN - 1); 1175 m->_name[MONITOR_NAME_LEN - 1] = '\0'; 1176 } 1177 m->_LockWord.FullWord = 0; 1178 m->_EntryList = NULL; 1179 m->_OnDeck = NULL; 1180 m->_WaitSet = NULL; 1181 m->_WaitLock[0] = 0; 1182 } 1183 1184 Monitor::Monitor() { ClearMonitor(this); } 1185 1186 Monitor::Monitor(int Rank, const char * name, bool allow_vm_block, 1187 SafepointCheckRequired safepoint_check_required) { 1188 ClearMonitor(this, name); 1189 #ifdef ASSERT 1190 _allow_vm_block = allow_vm_block; 1191 _rank = Rank; 1192 NOT_PRODUCT(_safepoint_check_required = safepoint_check_required;) 1193 #endif 1194 } 1195 1196 Mutex::~Mutex() { 1197 assert((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, ""); 1198 } 1199 1200 Mutex::Mutex(int Rank, const char * name, bool allow_vm_block, 1201 SafepointCheckRequired safepoint_check_required) { 1202 ClearMonitor((Monitor *) this, name); 1203 #ifdef ASSERT 1204 _allow_vm_block = allow_vm_block; 1205 _rank = Rank; 1206 NOT_PRODUCT(_safepoint_check_required = safepoint_check_required;) 1207 #endif 1208 } 1209 1210 bool Monitor::owned_by_self() const { 1211 bool ret = _owner == Thread::current(); 1212 assert(!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant"); 1213 return ret; 1214 } 1215 1216 void Monitor::print_on_error(outputStream* st) const { 1217 st->print("[" PTR_FORMAT, this); 1218 st->print("] %s", _name); 1219 st->print(" - owner thread: " PTR_FORMAT, _owner); 1220 } 1221 1222 1223 1224 1225 // ---------------------------------------------------------------------------------- 1226 // Non-product code 1227 1228 #ifndef PRODUCT 1229 void Monitor::print_on(outputStream* st) const { 1230 st->print_cr("Mutex: [0x%lx/0x%lx] %s - owner: 0x%lx", this, _LockWord.FullWord, _name, _owner); 1231 } 1232 #endif 1233 1234 #ifndef PRODUCT 1235 #ifdef ASSERT 1236 Monitor * Monitor::get_least_ranked_lock(Monitor * locks) { 1237 Monitor *res, *tmp; 1238 for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) { 1239 if (tmp->rank() < res->rank()) { 1240 res = tmp; 1241 } 1242 } 1243 if (!SafepointSynchronize::is_at_safepoint()) { 1244 // In this case, we expect the held locks to be 1245 // in increasing rank order (modulo any native ranks) 1246 for (tmp = locks; tmp != NULL; tmp = tmp->next()) { 1247 if (tmp->next() != NULL) { 1248 assert(tmp->rank() == Mutex::native || 1249 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); 1250 } 1251 } 1252 } 1253 return res; 1254 } 1255 1256 Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) { 1257 Monitor *res, *tmp; 1258 for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) { 1259 if (tmp != this && (res == NULL || tmp->rank() < res->rank())) { 1260 res = tmp; 1261 } 1262 } 1263 if (!SafepointSynchronize::is_at_safepoint()) { 1264 // In this case, we expect the held locks to be 1265 // in increasing rank order (modulo any native ranks) 1266 for (tmp = locks; tmp != NULL; tmp = tmp->next()) { 1267 if (tmp->next() != NULL) { 1268 assert(tmp->rank() == Mutex::native || 1269 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); 1270 } 1271 } 1272 } 1273 return res; 1274 } 1275 1276 1277 bool Monitor::contains(Monitor* locks, Monitor * lock) { 1278 for (; locks != NULL; locks = locks->next()) { 1279 if (locks == lock) { 1280 return true; 1281 } 1282 } 1283 return false; 1284 } 1285 #endif 1286 1287 // Called immediately after lock acquisition or release as a diagnostic 1288 // to track the lock-set of the thread and test for rank violations that 1289 // might indicate exposure to deadlock. 1290 // Rather like an EventListener for _owner (:>). 1291 1292 void Monitor::set_owner_implementation(Thread *new_owner) { 1293 // This function is solely responsible for maintaining 1294 // and checking the invariant that threads and locks 1295 // are in a 1/N relation, with some some locks unowned. 1296 // It uses the Mutex::_owner, Mutex::_next, and 1297 // Thread::_owned_locks fields, and no other function 1298 // changes those fields. 1299 // It is illegal to set the mutex from one non-NULL 1300 // owner to another--it must be owned by NULL as an 1301 // intermediate state. 1302 1303 if (new_owner != NULL) { 1304 // the thread is acquiring this lock 1305 1306 assert(new_owner == Thread::current(), "Should I be doing this?"); 1307 assert(_owner == NULL, "setting the owner thread of an already owned mutex"); 1308 _owner = new_owner; // set the owner 1309 1310 // link "this" into the owned locks list 1311 1312 #ifdef ASSERT // Thread::_owned_locks is under the same ifdef 1313 Monitor* locks = get_least_ranked_lock(new_owner->owned_locks()); 1314 // Mutex::set_owner_implementation is a friend of Thread 1315 1316 assert(this->rank() >= 0, "bad lock rank"); 1317 1318 // Deadlock avoidance rules require us to acquire Mutexes only in 1319 // a global total order. For example m1 is the lowest ranked mutex 1320 // that the thread holds and m2 is the mutex the thread is trying 1321 // to acquire, then deadlock avoidance rules require that the rank 1322 // of m2 be less than the rank of m1. 1323 // The rank Mutex::native is an exception in that it is not subject 1324 // to the verification rules. 1325 // Here are some further notes relating to mutex acquisition anomalies: 1326 // . under Solaris, the interrupt lock gets acquired when doing 1327 // profiling, so any lock could be held. 1328 // . it is also ok to acquire Safepoint_lock at the very end while we 1329 // already hold Terminator_lock - may happen because of periodic safepoints 1330 if (this->rank() != Mutex::native && 1331 this->rank() != Mutex::suspend_resume && 1332 locks != NULL && locks->rank() <= this->rank() && 1333 !SafepointSynchronize::is_at_safepoint() && 1334 this != Interrupt_lock && this != ProfileVM_lock && 1335 !(this == Safepoint_lock && contains(locks, Terminator_lock) && 1336 SafepointSynchronize::is_synchronizing())) { 1337 new_owner->print_owned_locks(); 1338 fatal(err_msg("acquiring lock %s/%d out of order with lock %s/%d -- " 1339 "possible deadlock", this->name(), this->rank(), 1340 locks->name(), locks->rank())); 1341 } 1342 1343 this->_next = new_owner->_owned_locks; 1344 new_owner->_owned_locks = this; 1345 #endif 1346 1347 } else { 1348 // the thread is releasing this lock 1349 1350 Thread* old_owner = _owner; 1351 debug_only(_last_owner = old_owner); 1352 1353 assert(old_owner != NULL, "removing the owner thread of an unowned mutex"); 1354 assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex"); 1355 1356 _owner = NULL; // set the owner 1357 1358 #ifdef ASSERT 1359 Monitor *locks = old_owner->owned_locks(); 1360 1361 // remove "this" from the owned locks list 1362 1363 Monitor *prev = NULL; 1364 bool found = false; 1365 for (; locks != NULL; prev = locks, locks = locks->next()) { 1366 if (locks == this) { 1367 found = true; 1368 break; 1369 } 1370 } 1371 assert(found, "Removing a lock not owned"); 1372 if (prev == NULL) { 1373 old_owner->_owned_locks = _next; 1374 } else { 1375 prev->_next = _next; 1376 } 1377 _next = NULL; 1378 #endif 1379 } 1380 } 1381 1382 1383 // Factored out common sanity checks for locking mutex'es. Used by lock() and try_lock() 1384 void Monitor::check_prelock_state(Thread *thread) { 1385 assert((!thread->is_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm) 1386 || rank() == Mutex::special, "wrong thread state for using locks"); 1387 if (StrictSafepointChecks) { 1388 if (thread->is_VM_thread() && !allow_vm_block()) { 1389 fatal(err_msg("VM thread using lock %s (not allowed to block on)", 1390 name())); 1391 } 1392 debug_only(if (rank() != Mutex::special) \ 1393 thread->check_for_valid_safepoint_state(false);) 1394 } 1395 if (thread->is_Watcher_thread()) { 1396 assert(!WatcherThread::watcher_thread()->has_crash_protection(), 1397 "locking not allowed when crash protection is set"); 1398 } 1399 } 1400 1401 void Monitor::check_block_state(Thread *thread) { 1402 if (!_allow_vm_block && thread->is_VM_thread()) { 1403 warning("VM thread blocked on lock"); 1404 print(); 1405 BREAKPOINT; 1406 } 1407 assert(_owner != thread, "deadlock: blocking on monitor owned by current thread"); 1408 } 1409 1410 #endif // PRODUCT