1 /* 2 * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "runtime/atomic.hpp" 27 #include "runtime/interfaceSupport.hpp" 28 #include "runtime/mutex.hpp" 29 #include "runtime/orderAccess.inline.hpp" 30 #include "runtime/osThread.hpp" 31 #include "runtime/thread.inline.hpp" 32 #include "utilities/events.hpp" 33 #include "utilities/macros.hpp" 34 35 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o 36 // 37 // Native Monitor-Mutex locking - theory of operations 38 // 39 // * Native Monitors are completely unrelated to Java-level monitors, 40 // although the "back-end" slow-path implementations share a common lineage. 41 // See objectMonitor:: in synchronizer.cpp. 42 // Native Monitors do *not* support nesting or recursion but otherwise 43 // they're basically Hoare-flavor monitors. 44 // 45 // * A thread acquires ownership of a Monitor/Mutex by CASing the LockByte 46 // in the _LockWord from zero to non-zero. Note that the _Owner field 47 // is advisory and is used only to verify that the thread calling unlock() 48 // is indeed the last thread to have acquired the lock. 49 // 50 // * Contending threads "push" themselves onto the front of the contention 51 // queue -- called the cxq -- with CAS and then spin/park. 52 // The _LockWord contains the LockByte as well as the pointer to the head 53 // of the cxq. Colocating the LockByte with the cxq precludes certain races. 54 // 55 // * Using a separately addressable LockByte allows for CAS:MEMBAR or CAS:0 56 // idioms. We currently use MEMBAR in the uncontended unlock() path, as 57 // MEMBAR often has less latency than CAS. If warranted, we could switch to 58 // a CAS:0 mode, using timers to close the resultant race, as is done 59 // with Java Monitors in synchronizer.cpp. 60 // 61 // See the following for a discussion of the relative cost of atomics (CAS) 62 // MEMBAR, and ways to eliminate such instructions from the common-case paths: 63 // -- http://blogs.sun.com/dave/entry/biased_locking_in_hotspot 64 // -- http://blogs.sun.com/dave/resource/MustangSync.pdf 65 // -- http://blogs.sun.com/dave/resource/synchronization-public2.pdf 66 // -- synchronizer.cpp 67 // 68 // * Overall goals - desiderata 69 // 1. Minimize context switching 70 // 2. Minimize lock migration 71 // 3. Minimize CPI -- affinity and locality 72 // 4. Minimize the execution of high-latency instructions such as CAS or MEMBAR 73 // 5. Minimize outer lock hold times 74 // 6. Behave gracefully on a loaded system 75 // 76 // * Thread flow and list residency: 77 // 78 // Contention queue --> EntryList --> OnDeck --> Owner --> !Owner 79 // [..resident on monitor list..] 80 // [...........contending..................] 81 // 82 // -- The contention queue (cxq) contains recently-arrived threads (RATs). 83 // Threads on the cxq eventually drain into the EntryList. 84 // -- Invariant: a thread appears on at most one list -- cxq, EntryList 85 // or WaitSet -- at any one time. 86 // -- For a given monitor there can be at most one "OnDeck" thread at any 87 // given time but if needbe this particular invariant could be relaxed. 88 // 89 // * The WaitSet and EntryList linked lists are composed of ParkEvents. 90 // I use ParkEvent instead of threads as ParkEvents are immortal and 91 // type-stable, meaning we can safely unpark() a possibly stale 92 // list element in the unlock()-path. (That's benign). 93 // 94 // * Succession policy - providing for progress: 95 // 96 // As necessary, the unlock()ing thread identifies, unlinks, and unparks 97 // an "heir presumptive" tentative successor thread from the EntryList. 98 // This becomes the so-called "OnDeck" thread, of which there can be only 99 // one at any given time for a given monitor. The wakee will recontend 100 // for ownership of monitor. 101 // 102 // Succession is provided for by a policy of competitive handoff. 103 // The exiting thread does _not_ grant or pass ownership to the 104 // successor thread. (This is also referred to as "handoff" succession"). 105 // Instead the exiting thread releases ownership and possibly wakes 106 // a successor, so the successor can (re)compete for ownership of the lock. 107 // 108 // Competitive handoff provides excellent overall throughput at the expense 109 // of short-term fairness. If fairness is a concern then one remedy might 110 // be to add an AcquireCounter field to the monitor. After a thread acquires 111 // the lock it will decrement the AcquireCounter field. When the count 112 // reaches 0 the thread would reset the AcquireCounter variable, abdicate 113 // the lock directly to some thread on the EntryList, and then move itself to the 114 // tail of the EntryList. 115 // 116 // But in practice most threads engage or otherwise participate in resource 117 // bounded producer-consumer relationships, so lock domination is not usually 118 // a practical concern. Recall too, that in general it's easier to construct 119 // a fair lock from a fast lock, but not vice-versa. 120 // 121 // * The cxq can have multiple concurrent "pushers" but only one concurrent 122 // detaching thread. This mechanism is immune from the ABA corruption. 123 // More precisely, the CAS-based "push" onto cxq is ABA-oblivious. 124 // We use OnDeck as a pseudo-lock to enforce the at-most-one detaching 125 // thread constraint. 126 // 127 // * Taken together, the cxq and the EntryList constitute or form a 128 // single logical queue of threads stalled trying to acquire the lock. 129 // We use two distinct lists to reduce heat on the list ends. 130 // Threads in lock() enqueue onto cxq while threads in unlock() will 131 // dequeue from the EntryList. (c.f. Michael Scott's "2Q" algorithm). 132 // A key desideratum is to minimize queue & monitor metadata manipulation 133 // that occurs while holding the "outer" monitor lock -- that is, we want to 134 // minimize monitor lock holds times. 135 // 136 // The EntryList is ordered by the prevailing queue discipline and 137 // can be organized in any convenient fashion, such as a doubly-linked list or 138 // a circular doubly-linked list. If we need a priority queue then something akin 139 // to Solaris' sleepq would work nicely. Viz., 140 // -- http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c. 141 // -- http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/os/sleepq.c 142 // Queue discipline is enforced at ::unlock() time, when the unlocking thread 143 // drains the cxq into the EntryList, and orders or reorders the threads on the 144 // EntryList accordingly. 145 // 146 // Barring "lock barging", this mechanism provides fair cyclic ordering, 147 // somewhat similar to an elevator-scan. 148 // 149 // * OnDeck 150 // -- For a given monitor there can be at most one OnDeck thread at any given 151 // instant. The OnDeck thread is contending for the lock, but has been 152 // unlinked from the EntryList and cxq by some previous unlock() operations. 153 // Once a thread has been designated the OnDeck thread it will remain so 154 // until it manages to acquire the lock -- being OnDeck is a stable property. 155 // -- Threads on the EntryList or cxq are _not allowed to attempt lock acquisition. 156 // -- OnDeck also serves as an "inner lock" as follows. Threads in unlock() will, after 157 // having cleared the LockByte and dropped the outer lock, attempt to "trylock" 158 // OnDeck by CASing the field from null to non-null. If successful, that thread 159 // is then responsible for progress and succession and can use CAS to detach and 160 // drain the cxq into the EntryList. By convention, only this thread, the holder of 161 // the OnDeck inner lock, can manipulate the EntryList or detach and drain the 162 // RATs on the cxq into the EntryList. This avoids ABA corruption on the cxq as 163 // we allow multiple concurrent "push" operations but restrict detach concurrency 164 // to at most one thread. Having selected and detached a successor, the thread then 165 // changes the OnDeck to refer to that successor, and then unparks the successor. 166 // That successor will eventually acquire the lock and clear OnDeck. Beware 167 // that the OnDeck usage as a lock is asymmetric. A thread in unlock() transiently 168 // "acquires" OnDeck, performs queue manipulations, passes OnDeck to some successor, 169 // and then the successor eventually "drops" OnDeck. Note that there's never 170 // any sense of contention on the inner lock, however. Threads never contend 171 // or wait for the inner lock. 172 // -- OnDeck provides for futile wakeup throttling a described in section 3.3 of 173 // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf 174 // In a sense, OnDeck subsumes the ObjectMonitor _Succ and ObjectWaiter 175 // TState fields found in Java-level objectMonitors. (See synchronizer.cpp). 176 // 177 // * Waiting threads reside on the WaitSet list -- wait() puts 178 // the caller onto the WaitSet. Notify() or notifyAll() simply 179 // transfers threads from the WaitSet to either the EntryList or cxq. 180 // Subsequent unlock() operations will eventually unpark the notifyee. 181 // Unparking a notifee in notify() proper is inefficient - if we were to do so 182 // it's likely the notifyee would simply impale itself on the lock held 183 // by the notifier. 184 // 185 // * The mechanism is obstruction-free in that if the holder of the transient 186 // OnDeck lock in unlock() is preempted or otherwise stalls, other threads 187 // can still acquire and release the outer lock and continue to make progress. 188 // At worst, waking of already blocked contending threads may be delayed, 189 // but nothing worse. (We only use "trylock" operations on the inner OnDeck 190 // lock). 191 // 192 // * Note that thread-local storage must be initialized before a thread 193 // uses Native monitors or mutexes. The native monitor-mutex subsystem 194 // depends on Thread::current(). 195 // 196 // * The monitor synchronization subsystem avoids the use of native 197 // synchronization primitives except for the narrow platform-specific 198 // park-unpark abstraction. See the comments in os_solaris.cpp regarding 199 // the semantics of park-unpark. Put another way, this monitor implementation 200 // depends only on atomic operations and park-unpark. The monitor subsystem 201 // manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the 202 // underlying OS manages the READY<->RUN transitions. 203 // 204 // * The memory consistency model provide by lock()-unlock() is at least as 205 // strong or stronger than the Java Memory model defined by JSR-133. 206 // That is, we guarantee at least entry consistency, if not stronger. 207 // See http://g.oswego.edu/dl/jmm/cookbook.html. 208 // 209 // * Thread:: currently contains a set of purpose-specific ParkEvents: 210 // _MutexEvent, _ParkEvent, etc. A better approach might be to do away with 211 // the purpose-specific ParkEvents and instead implement a general per-thread 212 // stack of available ParkEvents which we could provision on-demand. The 213 // stack acts as a local cache to avoid excessive calls to ParkEvent::Allocate() 214 // and ::Release(). A thread would simply pop an element from the local stack before it 215 // enqueued or park()ed. When the contention was over the thread would 216 // push the no-longer-needed ParkEvent back onto its stack. 217 // 218 // * A slightly reduced form of ILock() and IUnlock() have been partially 219 // model-checked (Murphi) for safety and progress at T=1,2,3 and 4. 220 // It'd be interesting to see if TLA/TLC could be useful as well. 221 // 222 // * Mutex-Monitor is a low-level "leaf" subsystem. That is, the monitor 223 // code should never call other code in the JVM that might itself need to 224 // acquire monitors or mutexes. That's true *except* in the case of the 225 // ThreadBlockInVM state transition wrappers. The ThreadBlockInVM DTOR handles 226 // mutator reentry (ingress) by checking for a pending safepoint in which case it will 227 // call SafepointSynchronize::block(), which in turn may call Safepoint_lock->lock(), etc. 228 // In that particular case a call to lock() for a given Monitor can end up recursively 229 // calling lock() on another monitor. While distasteful, this is largely benign 230 // as the calls come from jacket that wraps lock(), and not from deep within lock() itself. 231 // 232 // It's unfortunate that native mutexes and thread state transitions were convolved. 233 // They're really separate concerns and should have remained that way. Melding 234 // them together was facile -- a bit too facile. The current implementation badly 235 // conflates the two concerns. 236 // 237 // * TODO-FIXME: 238 // 239 // -- Add DTRACE probes for contended acquire, contended acquired, contended unlock 240 // We should also add DTRACE probes in the ParkEvent subsystem for 241 // Park-entry, Park-exit, and Unpark. 242 // 243 // -- We have an excess of mutex-like constructs in the JVM, namely: 244 // 1. objectMonitors for Java-level synchronization (synchronizer.cpp) 245 // 2. low-level muxAcquire and muxRelease 246 // 3. low-level spinAcquire and spinRelease 247 // 4. native Mutex:: and Monitor:: 248 // 5. jvm_raw_lock() and _unlock() 249 // 6. JVMTI raw monitors -- distinct from (5) despite having a confusingly 250 // similar name. 251 // 252 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o 253 254 #define UNS(x) (uintptr_t(x)) 255 #define TRACE(m) \ 256 { \ 257 static volatile int ctr = 0; \ 258 int x = ++ctr; \ 259 if ((x & (x - 1)) == 0) { \ 260 ::printf("%d:%s\n", x, #m); \ 261 ::fflush(stdout); \ 262 } \ 263 } 264 265 // Simplistic low-quality Marsaglia SHIFT-XOR RNG. 266 // Bijective except for the trailing mask operation. 267 // Useful for spin loops as the compiler can't optimize it away. 268 269 static inline jint MarsagliaXORV(jint x) { 270 if (x == 0) x = 1|os::random(); 271 x ^= x << 6; 272 x ^= ((unsigned)x) >> 21; 273 x ^= x << 7; 274 return x & 0x7FFFFFFF; 275 } 276 277 static int Stall(int its) { 278 static volatile jint rv = 1; 279 volatile int OnFrame = 0; 280 jint v = rv ^ UNS(OnFrame); 281 while (--its >= 0) { 282 v = MarsagliaXORV(v); 283 } 284 // Make this impossible for the compiler to optimize away, 285 // but (mostly) avoid W coherency sharing on MP systems. 286 if (v == 0x12345) rv = v; 287 return v; 288 } 289 290 int Monitor::TryLock() { 291 intptr_t v = _LockWord.FullWord; 292 for (;;) { 293 if ((v & _LBIT) != 0) return 0; 294 const intptr_t u = Atomic::cmpxchg(v|_LBIT, &_LockWord.FullWord, v); 295 if (v == u) return 1; 296 v = u; 297 } 298 } 299 300 int Monitor::TryFast() { 301 // Optimistic fast-path form ... 302 // Fast-path attempt for the common uncontended case. 303 // Avoid RTS->RTO $ coherence upgrade on typical SMP systems. 304 intptr_t v = Atomic::cmpxchg((intptr_t)_LBIT, &_LockWord.FullWord, (intptr_t)0); // agro ... 305 if (v == 0) return 1; 306 307 for (;;) { 308 if ((v & _LBIT) != 0) return 0; 309 const intptr_t u = Atomic::cmpxchg(v|_LBIT, &_LockWord.FullWord, v); 310 if (v == u) return 1; 311 v = u; 312 } 313 } 314 315 int Monitor::ILocked() { 316 const intptr_t w = _LockWord.FullWord & 0xFF; 317 assert(w == 0 || w == _LBIT, "invariant"); 318 return w == _LBIT; 319 } 320 321 // Polite TATAS spinlock with exponential backoff - bounded spin. 322 // Ideally we'd use processor cycles, time or vtime to control 323 // the loop, but we currently use iterations. 324 // All the constants within were derived empirically but work over 325 // over the spectrum of J2SE reference platforms. 326 // On Niagara-class systems the back-off is unnecessary but 327 // is relatively harmless. (At worst it'll slightly retard 328 // acquisition times). The back-off is critical for older SMP systems 329 // where constant fetching of the LockWord would otherwise impair 330 // scalability. 331 // 332 // Clamp spinning at approximately 1/2 of a context-switch round-trip. 333 // See synchronizer.cpp for details and rationale. 334 335 int Monitor::TrySpin(Thread * const Self) { 336 if (TryLock()) return 1; 337 if (!os::is_MP()) return 0; 338 339 int Probes = 0; 340 int Delay = 0; 341 int Steps = 0; 342 int SpinMax = NativeMonitorSpinLimit; 343 int flgs = NativeMonitorFlags; 344 for (;;) { 345 intptr_t v = _LockWord.FullWord; 346 if ((v & _LBIT) == 0) { 347 if (Atomic::cmpxchg (v|_LBIT, &_LockWord.FullWord, v) == v) { 348 return 1; 349 } 350 continue; 351 } 352 353 if ((flgs & 8) == 0) { 354 SpinPause(); 355 } 356 357 // Periodically increase Delay -- variable Delay form 358 // conceptually: delay *= 1 + 1/Exponent 359 ++Probes; 360 if (Probes > SpinMax) return 0; 361 362 if ((Probes & 0x7) == 0) { 363 Delay = ((Delay << 1)|1) & 0x7FF; 364 // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ; 365 } 366 367 if (flgs & 2) continue; 368 369 // Consider checking _owner's schedctl state, if OFFPROC abort spin. 370 // If the owner is OFFPROC then it's unlike that the lock will be dropped 371 // in a timely fashion, which suggests that spinning would not be fruitful 372 // or profitable. 373 374 // Stall for "Delay" time units - iterations in the current implementation. 375 // Avoid generating coherency traffic while stalled. 376 // Possible ways to delay: 377 // PAUSE, SLEEP, MEMBAR #sync, MEMBAR #halt, 378 // wr %g0,%asi, gethrtime, rdstick, rdtick, rdtsc, etc. ... 379 // Note that on Niagara-class systems we want to minimize STs in the 380 // spin loop. N1 and brethren write-around the L1$ over the xbar into the L2$. 381 // Furthermore, they don't have a W$ like traditional SPARC processors. 382 // We currently use a Marsaglia Shift-Xor RNG loop. 383 Steps += Delay; 384 if (Self != NULL) { 385 jint rv = Self->rng[0]; 386 for (int k = Delay; --k >= 0;) { 387 rv = MarsagliaXORV(rv); 388 if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0; 389 } 390 Self->rng[0] = rv; 391 } else { 392 Stall(Delay); 393 } 394 } 395 } 396 397 static int ParkCommon(ParkEvent * ev, jlong timo) { 398 // Diagnostic support - periodically unwedge blocked threads 399 intx nmt = NativeMonitorTimeout; 400 if (nmt > 0 && (nmt < timo || timo <= 0)) { 401 timo = nmt; 402 } 403 int err = OS_OK; 404 if (0 == timo) { 405 ev->park(); 406 } else { 407 err = ev->park(timo); 408 } 409 return err; 410 } 411 412 inline int Monitor::AcquireOrPush(ParkEvent * ESelf) { 413 intptr_t v = _LockWord.FullWord; 414 for (;;) { 415 if ((v & _LBIT) == 0) { 416 const intptr_t u = Atomic::cmpxchg(v|_LBIT, &_LockWord.FullWord, v); 417 if (u == v) return 1; // indicate acquired 418 v = u; 419 } else { 420 // Anticipate success ... 421 ESelf->ListNext = (ParkEvent *)(v & ~_LBIT); 422 const intptr_t u = Atomic::cmpxchg(intptr_t(ESelf)|_LBIT, &_LockWord.FullWord, v); 423 if (u == v) return 0; // indicate pushed onto cxq 424 v = u; 425 } 426 // Interference - LockWord change - just retry 427 } 428 } 429 430 // ILock and IWait are the lowest level primitive internal blocking 431 // synchronization functions. The callers of IWait and ILock must have 432 // performed any needed state transitions beforehand. 433 // IWait and ILock may directly call park() without any concern for thread state. 434 // Note that ILock and IWait do *not* access _owner. 435 // _owner is a higher-level logical concept. 436 437 void Monitor::ILock(Thread * Self) { 438 assert(_OnDeck != Self->_MutexEvent, "invariant"); 439 440 if (TryFast()) { 441 Exeunt: 442 assert(ILocked(), "invariant"); 443 return; 444 } 445 446 ParkEvent * const ESelf = Self->_MutexEvent; 447 assert(_OnDeck != ESelf, "invariant"); 448 449 // As an optimization, spinners could conditionally try to set _OnDeck to _LBIT 450 // Synchronizer.cpp uses a similar optimization. 451 if (TrySpin(Self)) goto Exeunt; 452 453 // Slow-path - the lock is contended. 454 // Either Enqueue Self on cxq or acquire the outer lock. 455 // LockWord encoding = (cxq,LOCKBYTE) 456 ESelf->reset(); 457 OrderAccess::fence(); 458 459 // Optional optimization ... try barging on the inner lock 460 if ((NativeMonitorFlags & 32) && Atomic::cmpxchg_if_null(ESelf, &_OnDeck)) { 461 goto OnDeck_LOOP; 462 } 463 464 if (AcquireOrPush(ESelf)) goto Exeunt; 465 466 // At any given time there is at most one ondeck thread. 467 // ondeck implies not resident on cxq and not resident on EntryList 468 // Only the OnDeck thread can try to acquire -- contend for -- the lock. 469 // CONSIDER: use Self->OnDeck instead of m->OnDeck. 470 // Deschedule Self so that others may run. 471 while (OrderAccess::load_acquire(&_OnDeck) != ESelf) { 472 ParkCommon(ESelf, 0); 473 } 474 475 // Self is now in the OnDeck position and will remain so until it 476 // manages to acquire the lock. 477 OnDeck_LOOP: 478 for (;;) { 479 assert(_OnDeck == ESelf, "invariant"); 480 if (TrySpin(Self)) break; 481 // It's probably wise to spin only if we *actually* blocked 482 // CONSIDER: check the lockbyte, if it remains set then 483 // preemptively drain the cxq into the EntryList. 484 // The best place and time to perform queue operations -- lock metadata -- 485 // is _before having acquired the outer lock, while waiting for the lock to drop. 486 ParkCommon(ESelf, 0); 487 } 488 489 assert(_OnDeck == ESelf, "invariant"); 490 _OnDeck = NULL; 491 492 // Note that we current drop the inner lock (clear OnDeck) in the slow-path 493 // epilogue immediately after having acquired the outer lock. 494 // But instead we could consider the following optimizations: 495 // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation. 496 // This might avoid potential reacquisition of the inner lock in IUlock(). 497 // B. While still holding the inner lock, attempt to opportunistically select 498 // and unlink the next OnDeck thread from the EntryList. 499 // If successful, set OnDeck to refer to that thread, otherwise clear OnDeck. 500 // It's critical that the select-and-unlink operation run in constant-time as 501 // it executes when holding the outer lock and may artificially increase the 502 // effective length of the critical section. 503 // Note that (A) and (B) are tantamount to succession by direct handoff for 504 // the inner lock. 505 goto Exeunt; 506 } 507 508 void Monitor::IUnlock(bool RelaxAssert) { 509 assert(ILocked(), "invariant"); 510 // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately 511 // before the store that releases the lock. Crucially, all the stores and loads in the 512 // critical section must be globally visible before the store of 0 into the lock-word 513 // that releases the lock becomes globally visible. That is, memory accesses in the 514 // critical section should not be allowed to bypass or overtake the following ST that 515 // releases the lock. As such, to prevent accesses within the critical section 516 // from "leaking" out, we need a release fence between the critical section and the 517 // store that releases the lock. In practice that release barrier is elided on 518 // platforms with strong memory models such as TSO. 519 // 520 // Note that the OrderAccess::storeload() fence that appears after unlock store 521 // provides for progress conditions and succession and is _not related to exclusion 522 // safety or lock release consistency. 523 OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], jbyte(0)); // drop outer lock 524 525 OrderAccess::storeload(); 526 ParkEvent * const w = _OnDeck; // raw load as we will just return if non-NULL 527 assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant"); 528 if (w != NULL) { 529 // Either we have a valid ondeck thread or ondeck is transiently "locked" 530 // by some exiting thread as it arranges for succession. The LSBit of 531 // OnDeck allows us to discriminate two cases. If the latter, the 532 // responsibility for progress and succession lies with that other thread. 533 // For good performance, we also depend on the fact that redundant unpark() 534 // operations are cheap. That is, repeated Unpark()ing of the OnDeck thread 535 // is inexpensive. This approach provides implicit futile wakeup throttling. 536 // Note that the referent "w" might be stale with respect to the lock. 537 // In that case the following unpark() is harmless and the worst that'll happen 538 // is a spurious return from a park() operation. Critically, if "w" _is stale, 539 // then progress is known to have occurred as that means the thread associated 540 // with "w" acquired the lock. In that case this thread need take no further 541 // action to guarantee progress. 542 if ((UNS(w) & _LBIT) == 0) w->unpark(); 543 return; 544 } 545 546 intptr_t cxq = _LockWord.FullWord; 547 if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) { 548 return; // normal fast-path exit - cxq and EntryList both empty 549 } 550 if (cxq & _LBIT) { 551 // Optional optimization ... 552 // Some other thread acquired the lock in the window since this 553 // thread released it. Succession is now that thread's responsibility. 554 return; 555 } 556 557 Succession: 558 // Slow-path exit - this thread must ensure succession and progress. 559 // OnDeck serves as lock to protect cxq and EntryList. 560 // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq. 561 // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS) 562 // but only one concurrent consumer (detacher of RATs). 563 // Consider protecting this critical section with schedctl on Solaris. 564 // Unlike a normal lock, however, the exiting thread "locks" OnDeck, 565 // picks a successor and marks that thread as OnDeck. That successor 566 // thread will then clear OnDeck once it eventually acquires the outer lock. 567 if (!Atomic::cmpxchg_if_null((ParkEvent*)_LBIT, &_OnDeck)) { 568 return; 569 } 570 571 ParkEvent * List = _EntryList; 572 if (List != NULL) { 573 // Transfer the head of the EntryList to the OnDeck position. 574 // Once OnDeck, a thread stays OnDeck until it acquires the lock. 575 // For a given lock there is at most OnDeck thread at any one instant. 576 WakeOne: 577 assert(List == _EntryList, "invariant"); 578 ParkEvent * const w = List; 579 assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant"); 580 _EntryList = w->ListNext; 581 // as a diagnostic measure consider setting w->_ListNext = BAD 582 assert(UNS(_OnDeck) == _LBIT, "invariant"); 583 584 // Pass OnDeck role to w, ensuring that _EntryList has been set first. 585 // w will clear _OnDeck once it acquires the outer lock. 586 // Note that once we set _OnDeck that thread can acquire the mutex, proceed 587 // with its critical section and then enter this code to unlock the mutex. So 588 // you can have multiple threads active in IUnlock at the same time. 589 OrderAccess::release_store(&_OnDeck, w); 590 591 // Another optional optimization ... 592 // For heavily contended locks it's not uncommon that some other 593 // thread acquired the lock while this thread was arranging succession. 594 // Try to defer the unpark() operation - Delegate the responsibility 595 // for unpark()ing the OnDeck thread to the current or subsequent owners 596 // That is, the new owner is responsible for unparking the OnDeck thread. 597 OrderAccess::storeload(); 598 cxq = _LockWord.FullWord; 599 if (cxq & _LBIT) return; 600 601 w->unpark(); 602 return; 603 } 604 605 cxq = _LockWord.FullWord; 606 if ((cxq & ~_LBIT) != 0) { 607 // The EntryList is empty but the cxq is populated. 608 // drain RATs from cxq into EntryList 609 // Detach RATs segment with CAS and then merge into EntryList 610 for (;;) { 611 // optional optimization - if locked, the owner is responsible for succession 612 if (cxq & _LBIT) goto Punt; 613 const intptr_t vfy = Atomic::cmpxchg(cxq & _LBIT, &_LockWord.FullWord, cxq); 614 if (vfy == cxq) break; 615 cxq = vfy; 616 // Interference - LockWord changed - Just retry 617 // We can see concurrent interference from contending threads 618 // pushing themselves onto the cxq or from lock-unlock operations. 619 // From the perspective of this thread, EntryList is stable and 620 // the cxq is prepend-only -- the head is volatile but the interior 621 // of the cxq is stable. In theory if we encounter interference from threads 622 // pushing onto cxq we could simply break off the original cxq suffix and 623 // move that segment to the EntryList, avoiding a 2nd or multiple CAS attempts 624 // on the high-traffic LockWord variable. For instance lets say the cxq is "ABCD" 625 // when we first fetch cxq above. Between the fetch -- where we observed "A" 626 // -- and CAS -- where we attempt to CAS null over A -- "PQR" arrive, 627 // yielding cxq = "PQRABCD". In this case we could simply set A.ListNext 628 // null, leaving cxq = "PQRA" and transfer the "BCD" segment to the EntryList. 629 // Note too, that it's safe for this thread to traverse the cxq 630 // without taking any special concurrency precautions. 631 } 632 633 // We don't currently reorder the cxq segment as we move it onto 634 // the EntryList, but it might make sense to reverse the order 635 // or perhaps sort by thread priority. See the comments in 636 // synchronizer.cpp objectMonitor::exit(). 637 assert(_EntryList == NULL, "invariant"); 638 _EntryList = List = (ParkEvent *)(cxq & ~_LBIT); 639 assert(List != NULL, "invariant"); 640 goto WakeOne; 641 } 642 643 // cxq|EntryList is empty. 644 // w == NULL implies that cxq|EntryList == NULL in the past. 645 // Possible race - rare inopportune interleaving. 646 // A thread could have added itself to cxq since this thread previously checked. 647 // Detect and recover by refetching cxq. 648 Punt: 649 assert(UNS(_OnDeck) == _LBIT, "invariant"); 650 _OnDeck = NULL; // Release inner lock. 651 OrderAccess::storeload(); // Dekker duality - pivot point 652 653 // Resample LockWord/cxq to recover from possible race. 654 // For instance, while this thread T1 held OnDeck, some other thread T2 might 655 // acquire the outer lock. Another thread T3 might try to acquire the outer 656 // lock, but encounter contention and enqueue itself on cxq. T2 then drops the 657 // outer lock, but skips succession as this thread T1 still holds OnDeck. 658 // T1 is and remains responsible for ensuring succession of T3. 659 // 660 // Note that we don't need to recheck EntryList, just cxq. 661 // If threads moved onto EntryList since we dropped OnDeck 662 // that implies some other thread forced succession. 663 cxq = _LockWord.FullWord; 664 if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) { 665 goto Succession; // potential race -- re-run succession 666 } 667 return; 668 } 669 670 bool Monitor::notify() { 671 assert(_owner == Thread::current(), "invariant"); 672 assert(ILocked(), "invariant"); 673 if (_WaitSet == NULL) return true; 674 NotifyCount++; 675 676 // Transfer one thread from the WaitSet to the EntryList or cxq. 677 // Currently we just unlink the head of the WaitSet and prepend to the cxq. 678 // And of course we could just unlink it and unpark it, too, but 679 // in that case it'd likely impale itself on the reentry. 680 Thread::muxAcquire(_WaitLock, "notify:WaitLock"); 681 ParkEvent * nfy = _WaitSet; 682 if (nfy != NULL) { // DCL idiom 683 _WaitSet = nfy->ListNext; 684 assert(nfy->Notified == 0, "invariant"); 685 // push nfy onto the cxq 686 for (;;) { 687 const intptr_t v = _LockWord.FullWord; 688 assert((v & 0xFF) == _LBIT, "invariant"); 689 nfy->ListNext = (ParkEvent *)(v & ~_LBIT); 690 if (Atomic::cmpxchg(intptr_t(nfy)|_LBIT, &_LockWord.FullWord, v) == v) break; 691 // interference - _LockWord changed -- just retry 692 } 693 // Note that setting Notified before pushing nfy onto the cxq is 694 // also legal and safe, but the safety properties are much more 695 // subtle, so for the sake of code stewardship ... 696 OrderAccess::fence(); 697 nfy->Notified = 1; 698 } 699 Thread::muxRelease(_WaitLock); 700 if (nfy != NULL && (NativeMonitorFlags & 16)) { 701 // Experimental code ... light up the wakee in the hope that this thread (the owner) 702 // will drop the lock just about the time the wakee comes ONPROC. 703 nfy->unpark(); 704 } 705 assert(ILocked(), "invariant"); 706 return true; 707 } 708 709 // Currently notifyAll() transfers the waiters one-at-a-time from the waitset 710 // to the cxq. This could be done more efficiently with a single bulk en-mass transfer, 711 // but in practice notifyAll() for large #s of threads is rare and not time-critical. 712 // Beware too, that we invert the order of the waiters. Lets say that the 713 // waitset is "ABCD" and the cxq is "XYZ". After a notifyAll() the waitset 714 // will be empty and the cxq will be "DCBAXYZ". This is benign, of course. 715 716 bool Monitor::notify_all() { 717 assert(_owner == Thread::current(), "invariant"); 718 assert(ILocked(), "invariant"); 719 while (_WaitSet != NULL) notify(); 720 return true; 721 } 722 723 int Monitor::IWait(Thread * Self, jlong timo) { 724 assert(ILocked(), "invariant"); 725 726 // Phases: 727 // 1. Enqueue Self on WaitSet - currently prepend 728 // 2. unlock - drop the outer lock 729 // 3. wait for either notification or timeout 730 // 4. lock - reentry - reacquire the outer lock 731 732 ParkEvent * const ESelf = Self->_MutexEvent; 733 ESelf->Notified = 0; 734 ESelf->reset(); 735 OrderAccess::fence(); 736 737 // Add Self to WaitSet 738 // Ideally only the holder of the outer lock would manipulate the WaitSet - 739 // That is, the outer lock would implicitly protect the WaitSet. 740 // But if a thread in wait() encounters a timeout it will need to dequeue itself 741 // from the WaitSet _before it becomes the owner of the lock. We need to dequeue 742 // as the ParkEvent -- which serves as a proxy for the thread -- can't reside 743 // on both the WaitSet and the EntryList|cxq at the same time.. That is, a thread 744 // on the WaitSet can't be allowed to compete for the lock until it has managed to 745 // unlink its ParkEvent from WaitSet. Thus the need for WaitLock. 746 // Contention on the WaitLock is minimal. 747 // 748 // Another viable approach would be add another ParkEvent, "WaitEvent" to the 749 // thread class. The WaitSet would be composed of WaitEvents. Only the 750 // owner of the outer lock would manipulate the WaitSet. A thread in wait() 751 // could then compete for the outer lock, and then, if necessary, unlink itself 752 // from the WaitSet only after having acquired the outer lock. More precisely, 753 // there would be no WaitLock. A thread in in wait() would enqueue its WaitEvent 754 // on the WaitSet; release the outer lock; wait for either notification or timeout; 755 // reacquire the inner lock; and then, if needed, unlink itself from the WaitSet. 756 // 757 // Alternatively, a 2nd set of list link fields in the ParkEvent might suffice. 758 // One set would be for the WaitSet and one for the EntryList. 759 // We could also deconstruct the ParkEvent into a "pure" event and add a 760 // new immortal/TSM "ListElement" class that referred to ParkEvents. 761 // In that case we could have one ListElement on the WaitSet and another 762 // on the EntryList, with both referring to the same pure Event. 763 764 Thread::muxAcquire(_WaitLock, "wait:WaitLock:Add"); 765 ESelf->ListNext = _WaitSet; 766 _WaitSet = ESelf; 767 Thread::muxRelease(_WaitLock); 768 769 // Release the outer lock 770 // We call IUnlock (RelaxAssert=true) as a thread T1 might 771 // enqueue itself on the WaitSet, call IUnlock(), drop the lock, 772 // and then stall before it can attempt to wake a successor. 773 // Some other thread T2 acquires the lock, and calls notify(), moving 774 // T1 from the WaitSet to the cxq. T2 then drops the lock. T1 resumes, 775 // and then finds *itself* on the cxq. During the course of a normal 776 // IUnlock() call a thread should _never find itself on the EntryList 777 // or cxq, but in the case of wait() it's possible. 778 // See synchronizer.cpp objectMonitor::wait(). 779 IUnlock(true); 780 781 // Wait for either notification or timeout 782 // Beware that in some circumstances we might propagate 783 // spurious wakeups back to the caller. 784 785 for (;;) { 786 if (ESelf->Notified) break; 787 int err = ParkCommon(ESelf, timo); 788 if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break; 789 } 790 791 // Prepare for reentry - if necessary, remove ESelf from WaitSet 792 // ESelf can be: 793 // 1. Still on the WaitSet. This can happen if we exited the loop by timeout. 794 // 2. On the cxq or EntryList 795 // 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position. 796 797 OrderAccess::fence(); 798 int WasOnWaitSet = 0; 799 if (ESelf->Notified == 0) { 800 Thread::muxAcquire(_WaitLock, "wait:WaitLock:remove"); 801 if (ESelf->Notified == 0) { // DCL idiom 802 assert(_OnDeck != ESelf, "invariant"); // can't be both OnDeck and on WaitSet 803 // ESelf is resident on the WaitSet -- unlink it. 804 // A doubly-linked list would be better here so we can unlink in constant-time. 805 // We have to unlink before we potentially recontend as ESelf might otherwise 806 // end up on the cxq|EntryList -- it can't be on two lists at once. 807 ParkEvent * p = _WaitSet; 808 ParkEvent * q = NULL; // classic q chases p 809 while (p != NULL && p != ESelf) { 810 q = p; 811 p = p->ListNext; 812 } 813 assert(p == ESelf, "invariant"); 814 if (p == _WaitSet) { // found at head 815 assert(q == NULL, "invariant"); 816 _WaitSet = p->ListNext; 817 } else { // found in interior 818 assert(q->ListNext == p, "invariant"); 819 q->ListNext = p->ListNext; 820 } 821 WasOnWaitSet = 1; // We were *not* notified but instead encountered timeout 822 } 823 Thread::muxRelease(_WaitLock); 824 } 825 826 // Reentry phase - reacquire the lock 827 if (WasOnWaitSet) { 828 // ESelf was previously on the WaitSet but we just unlinked it above 829 // because of a timeout. ESelf is not resident on any list and is not OnDeck 830 assert(_OnDeck != ESelf, "invariant"); 831 ILock(Self); 832 } else { 833 // A prior notify() operation moved ESelf from the WaitSet to the cxq. 834 // ESelf is now on the cxq, EntryList or at the OnDeck position. 835 // The following fragment is extracted from Monitor::ILock() 836 for (;;) { 837 if (OrderAccess::load_acquire(&_OnDeck) == ESelf && TrySpin(Self)) break; 838 ParkCommon(ESelf, 0); 839 } 840 assert(_OnDeck == ESelf, "invariant"); 841 _OnDeck = NULL; 842 } 843 844 assert(ILocked(), "invariant"); 845 return WasOnWaitSet != 0; // return true IFF timeout 846 } 847 848 849 // ON THE VMTHREAD SNEAKING PAST HELD LOCKS: 850 // In particular, there are certain types of global lock that may be held 851 // by a Java thread while it is blocked at a safepoint but before it has 852 // written the _owner field. These locks may be sneakily acquired by the 853 // VM thread during a safepoint to avoid deadlocks. Alternatively, one should 854 // identify all such locks, and ensure that Java threads never block at 855 // safepoints while holding them (_no_safepoint_check_flag). While it 856 // seems as though this could increase the time to reach a safepoint 857 // (or at least increase the mean, if not the variance), the latter 858 // approach might make for a cleaner, more maintainable JVM design. 859 // 860 // Sneaking is vile and reprehensible and should be excised at the 1st 861 // opportunity. It's possible that the need for sneaking could be obviated 862 // as follows. Currently, a thread might (a) while TBIVM, call pthread_mutex_lock 863 // or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex. 864 // (b) stall at the TBIVM exit point as a safepoint is in effect. Critically, 865 // it'll stall at the TBIVM reentry state transition after having acquired the 866 // underlying lock, but before having set _owner and having entered the actual 867 // critical section. The lock-sneaking facility leverages that fact and allowed the 868 // VM thread to logically acquire locks that had already be physically locked by mutators 869 // but where mutators were known blocked by the reentry thread state transition. 870 // 871 // If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly 872 // wrapped calls to park(), then we could likely do away with sneaking. We'd 873 // decouple lock acquisition and parking. The critical invariant to eliminating 874 // sneaking is to ensure that we never "physically" acquire the lock while TBIVM. 875 // An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket. 876 // One difficulty with this approach is that the TBIVM wrapper could recurse and 877 // call lock() deep from within a lock() call, while the MutexEvent was already enqueued. 878 // Using a stack (N=2 at minimum) of ParkEvents would take care of that problem. 879 // 880 // But of course the proper ultimate approach is to avoid schemes that require explicit 881 // sneaking or dependence on any any clever invariants or subtle implementation properties 882 // of Mutex-Monitor and instead directly address the underlying design flaw. 883 884 void Monitor::lock(Thread * Self) { 885 // Ensure that the Monitor requires/allows safepoint checks. 886 assert(_safepoint_check_required != Monitor::_safepoint_check_never, 887 "This lock should never have a safepoint check: %s", name()); 888 889 #ifdef CHECK_UNHANDLED_OOPS 890 // Clear unhandled oops so we get a crash right away. Only clear for non-vm 891 // or GC threads. 892 if (Self->is_Java_thread()) { 893 Self->clear_unhandled_oops(); 894 } 895 #endif // CHECK_UNHANDLED_OOPS 896 897 debug_only(check_prelock_state(Self)); 898 assert(_owner != Self, "invariant"); 899 assert(_OnDeck != Self->_MutexEvent, "invariant"); 900 901 if (TryFast()) { 902 Exeunt: 903 assert(ILocked(), "invariant"); 904 assert(owner() == NULL, "invariant"); 905 set_owner(Self); 906 return; 907 } 908 909 // The lock is contended ... 910 911 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint(); 912 if (can_sneak && _owner == NULL) { 913 // a java thread has locked the lock but has not entered the 914 // critical region -- let's just pretend we've locked the lock 915 // and go on. we note this with _snuck so we can also 916 // pretend to unlock when the time comes. 917 _snuck = true; 918 goto Exeunt; 919 } 920 921 // Try a brief spin to avoid passing thru thread state transition ... 922 if (TrySpin(Self)) goto Exeunt; 923 924 check_block_state(Self); 925 if (Self->is_Java_thread()) { 926 // Horrible dictu - we suffer through a state transition 927 assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex"); 928 ThreadBlockInVM tbivm((JavaThread *) Self); 929 ILock(Self); 930 } else { 931 // Mirabile dictu 932 ILock(Self); 933 } 934 goto Exeunt; 935 } 936 937 void Monitor::lock() { 938 this->lock(Thread::current()); 939 } 940 941 // Lock without safepoint check - a degenerate variant of lock(). 942 // Should ONLY be used by safepoint code and other code 943 // that is guaranteed not to block while running inside the VM. If this is called with 944 // thread state set to be in VM, the safepoint synchronization code will deadlock! 945 946 void Monitor::lock_without_safepoint_check(Thread * Self) { 947 // Ensure that the Monitor does not require or allow safepoint checks. 948 assert(_safepoint_check_required != Monitor::_safepoint_check_always, 949 "This lock should always have a safepoint check: %s", name()); 950 assert(_owner != Self, "invariant"); 951 ILock(Self); 952 assert(_owner == NULL, "invariant"); 953 set_owner(Self); 954 } 955 956 void Monitor::lock_without_safepoint_check() { 957 lock_without_safepoint_check(Thread::current()); 958 } 959 960 961 // Returns true if thread succeeds in grabbing the lock, otherwise false. 962 963 bool Monitor::try_lock() { 964 Thread * const Self = Thread::current(); 965 debug_only(check_prelock_state(Self)); 966 // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler"); 967 968 // Special case, where all Java threads are stopped. 969 // The lock may have been acquired but _owner is not yet set. 970 // In that case the VM thread can safely grab the lock. 971 // It strikes me this should appear _after the TryLock() fails, below. 972 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint(); 973 if (can_sneak && _owner == NULL) { 974 set_owner(Self); // Do not need to be atomic, since we are at a safepoint 975 _snuck = true; 976 return true; 977 } 978 979 if (TryLock()) { 980 // We got the lock 981 assert(_owner == NULL, "invariant"); 982 set_owner(Self); 983 return true; 984 } 985 return false; 986 } 987 988 void Monitor::unlock() { 989 assert(_owner == Thread::current(), "invariant"); 990 assert(_OnDeck != Thread::current()->_MutexEvent, "invariant"); 991 set_owner(NULL); 992 if (_snuck) { 993 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak"); 994 _snuck = false; 995 return; 996 } 997 IUnlock(false); 998 } 999 1000 // Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check() 1001 // jvm_raw_lock() and _unlock() can be called by non-Java threads via JVM_RawMonitorEnter. 1002 // 1003 // There's no expectation that JVM_RawMonitors will interoperate properly with the native 1004 // Mutex-Monitor constructs. We happen to implement JVM_RawMonitors in terms of 1005 // native Mutex-Monitors simply as a matter of convenience. A simple abstraction layer 1006 // over a pthread_mutex_t would work equally as well, but require more platform-specific 1007 // code -- a "PlatformMutex". Alternatively, a simply layer over muxAcquire-muxRelease 1008 // would work too. 1009 // 1010 // Since the caller might be a foreign thread, we don't necessarily have a Thread.MutexEvent 1011 // instance available. Instead, we transiently allocate a ParkEvent on-demand if 1012 // we encounter contention. That ParkEvent remains associated with the thread 1013 // until it manages to acquire the lock, at which time we return the ParkEvent 1014 // to the global ParkEvent free list. This is correct and suffices for our purposes. 1015 // 1016 // Beware that the original jvm_raw_unlock() had a "_snuck" test but that 1017 // jvm_raw_lock() didn't have the corresponding test. I suspect that's an 1018 // oversight, but I've replicated the original suspect logic in the new code ... 1019 1020 void Monitor::jvm_raw_lock() { 1021 assert(rank() == native, "invariant"); 1022 1023 if (TryLock()) { 1024 Exeunt: 1025 assert(ILocked(), "invariant"); 1026 assert(_owner == NULL, "invariant"); 1027 // This can potentially be called by non-java Threads. Thus, the Thread::current_or_null() 1028 // might return NULL. Don't call set_owner since it will break on an NULL owner 1029 // Consider installing a non-null "ANON" distinguished value instead of just NULL. 1030 _owner = Thread::current_or_null(); 1031 return; 1032 } 1033 1034 if (TrySpin(NULL)) goto Exeunt; 1035 1036 // slow-path - apparent contention 1037 // Allocate a ParkEvent for transient use. 1038 // The ParkEvent remains associated with this thread until 1039 // the time the thread manages to acquire the lock. 1040 ParkEvent * const ESelf = ParkEvent::Allocate(NULL); 1041 ESelf->reset(); 1042 OrderAccess::storeload(); 1043 1044 // Either Enqueue Self on cxq or acquire the outer lock. 1045 if (AcquireOrPush (ESelf)) { 1046 ParkEvent::Release(ESelf); // surrender the ParkEvent 1047 goto Exeunt; 1048 } 1049 1050 // At any given time there is at most one ondeck thread. 1051 // ondeck implies not resident on cxq and not resident on EntryList 1052 // Only the OnDeck thread can try to acquire -- contend for -- the lock. 1053 // CONSIDER: use Self->OnDeck instead of m->OnDeck. 1054 for (;;) { 1055 if (OrderAccess::load_acquire(&_OnDeck) == ESelf && TrySpin(NULL)) break; 1056 ParkCommon(ESelf, 0); 1057 } 1058 1059 assert(_OnDeck == ESelf, "invariant"); 1060 _OnDeck = NULL; 1061 ParkEvent::Release(ESelf); // surrender the ParkEvent 1062 goto Exeunt; 1063 } 1064 1065 void Monitor::jvm_raw_unlock() { 1066 // Nearly the same as Monitor::unlock() ... 1067 // directly set _owner instead of using set_owner(null) 1068 _owner = NULL; 1069 if (_snuck) { // ??? 1070 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak"); 1071 _snuck = false; 1072 return; 1073 } 1074 IUnlock(false); 1075 } 1076 1077 bool Monitor::wait(bool no_safepoint_check, long timeout, 1078 bool as_suspend_equivalent) { 1079 // Make sure safepoint checking is used properly. 1080 assert(!(_safepoint_check_required == Monitor::_safepoint_check_never && no_safepoint_check == false), 1081 "This lock should never have a safepoint check: %s", name()); 1082 assert(!(_safepoint_check_required == Monitor::_safepoint_check_always && no_safepoint_check == true), 1083 "This lock should always have a safepoint check: %s", name()); 1084 1085 Thread * const Self = Thread::current(); 1086 assert(_owner == Self, "invariant"); 1087 assert(ILocked(), "invariant"); 1088 1089 // as_suspend_equivalent logically implies !no_safepoint_check 1090 guarantee(!as_suspend_equivalent || !no_safepoint_check, "invariant"); 1091 // !no_safepoint_check logically implies java_thread 1092 guarantee(no_safepoint_check || Self->is_Java_thread(), "invariant"); 1093 1094 #ifdef ASSERT 1095 Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks()); 1096 assert(least != this, "Specification of get_least_... call above"); 1097 if (least != NULL && least->rank() <= special) { 1098 tty->print("Attempting to wait on monitor %s/%d while holding" 1099 " lock %s/%d -- possible deadlock", 1100 name(), rank(), least->name(), least->rank()); 1101 assert(false, "Shouldn't block(wait) while holding a lock of rank special"); 1102 } 1103 #endif // ASSERT 1104 1105 int wait_status; 1106 // conceptually set the owner to NULL in anticipation of 1107 // abdicating the lock in wait 1108 set_owner(NULL); 1109 if (no_safepoint_check) { 1110 wait_status = IWait(Self, timeout); 1111 } else { 1112 assert(Self->is_Java_thread(), "invariant"); 1113 JavaThread *jt = (JavaThread *)Self; 1114 1115 // Enter safepoint region - ornate and Rococo ... 1116 ThreadBlockInVM tbivm(jt); 1117 OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */); 1118 1119 if (as_suspend_equivalent) { 1120 jt->set_suspend_equivalent(); 1121 // cleared by handle_special_suspend_equivalent_condition() or 1122 // java_suspend_self() 1123 } 1124 1125 wait_status = IWait(Self, timeout); 1126 1127 // were we externally suspended while we were waiting? 1128 if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) { 1129 // Our event wait has finished and we own the lock, but 1130 // while we were waiting another thread suspended us. We don't 1131 // want to hold the lock while suspended because that 1132 // would surprise the thread that suspended us. 1133 assert(ILocked(), "invariant"); 1134 IUnlock(true); 1135 jt->java_suspend_self(); 1136 ILock(Self); 1137 assert(ILocked(), "invariant"); 1138 } 1139 } 1140 1141 // Conceptually reestablish ownership of the lock. 1142 // The "real" lock -- the LockByte -- was reacquired by IWait(). 1143 assert(ILocked(), "invariant"); 1144 assert(_owner == NULL, "invariant"); 1145 set_owner(Self); 1146 return wait_status != 0; // return true IFF timeout 1147 } 1148 1149 Monitor::~Monitor() { 1150 #ifdef ASSERT 1151 uintptr_t owner = UNS(_owner); 1152 uintptr_t lockword = UNS(_LockWord.FullWord); 1153 uintptr_t entrylist = UNS(_EntryList); 1154 uintptr_t waitset = UNS(_WaitSet); 1155 uintptr_t ondeck = UNS(_OnDeck); 1156 // Print _name with precision limit, in case failure is due to memory 1157 // corruption that also trashed _name. 1158 assert((owner|lockword|entrylist|waitset|ondeck) == 0, 1159 "%.*s: _owner(" INTPTR_FORMAT ")|_LockWord(" INTPTR_FORMAT ")|_EntryList(" INTPTR_FORMAT ")|_WaitSet(" 1160 INTPTR_FORMAT ")|_OnDeck(" INTPTR_FORMAT ") != 0", 1161 MONITOR_NAME_LEN, _name, owner, lockword, entrylist, waitset, ondeck); 1162 #endif 1163 } 1164 1165 void Monitor::ClearMonitor(Monitor * m, const char *name) { 1166 m->_owner = NULL; 1167 m->_snuck = false; 1168 if (name == NULL) { 1169 strcpy(m->_name, "UNKNOWN"); 1170 } else { 1171 strncpy(m->_name, name, MONITOR_NAME_LEN - 1); 1172 m->_name[MONITOR_NAME_LEN - 1] = '\0'; 1173 } 1174 m->_LockWord.FullWord = 0; 1175 m->_EntryList = NULL; 1176 m->_OnDeck = NULL; 1177 m->_WaitSet = NULL; 1178 m->_WaitLock[0] = 0; 1179 } 1180 1181 Monitor::Monitor() { ClearMonitor(this); } 1182 1183 Monitor::Monitor(int Rank, const char * name, bool allow_vm_block, 1184 SafepointCheckRequired safepoint_check_required) { 1185 ClearMonitor(this, name); 1186 #ifdef ASSERT 1187 _allow_vm_block = allow_vm_block; 1188 _rank = Rank; 1189 NOT_PRODUCT(_safepoint_check_required = safepoint_check_required;) 1190 #endif 1191 } 1192 1193 Mutex::Mutex(int Rank, const char * name, bool allow_vm_block, 1194 SafepointCheckRequired safepoint_check_required) { 1195 ClearMonitor((Monitor *) this, name); 1196 #ifdef ASSERT 1197 _allow_vm_block = allow_vm_block; 1198 _rank = Rank; 1199 NOT_PRODUCT(_safepoint_check_required = safepoint_check_required;) 1200 #endif 1201 } 1202 1203 bool Monitor::owned_by_self() const { 1204 bool ret = _owner == Thread::current(); 1205 assert(!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant"); 1206 return ret; 1207 } 1208 1209 void Monitor::print_on_error(outputStream* st) const { 1210 st->print("[" PTR_FORMAT, p2i(this)); 1211 st->print("] %s", _name); 1212 st->print(" - owner thread: " PTR_FORMAT, p2i(_owner)); 1213 } 1214 1215 1216 1217 1218 // ---------------------------------------------------------------------------------- 1219 // Non-product code 1220 1221 #ifndef PRODUCT 1222 void Monitor::print_on(outputStream* st) const { 1223 st->print_cr("Mutex: [" PTR_FORMAT "/" PTR_FORMAT "] %s - owner: " PTR_FORMAT, 1224 p2i(this), _LockWord.FullWord, _name, p2i(_owner)); 1225 } 1226 #endif 1227 1228 #ifndef PRODUCT 1229 #ifdef ASSERT 1230 Monitor * Monitor::get_least_ranked_lock(Monitor * locks) { 1231 Monitor *res, *tmp; 1232 for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) { 1233 if (tmp->rank() < res->rank()) { 1234 res = tmp; 1235 } 1236 } 1237 if (!SafepointSynchronize::is_at_safepoint()) { 1238 // In this case, we expect the held locks to be 1239 // in increasing rank order (modulo any native ranks) 1240 for (tmp = locks; tmp != NULL; tmp = tmp->next()) { 1241 if (tmp->next() != NULL) { 1242 assert(tmp->rank() == Mutex::native || 1243 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); 1244 } 1245 } 1246 } 1247 return res; 1248 } 1249 1250 Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) { 1251 Monitor *res, *tmp; 1252 for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) { 1253 if (tmp != this && (res == NULL || tmp->rank() < res->rank())) { 1254 res = tmp; 1255 } 1256 } 1257 if (!SafepointSynchronize::is_at_safepoint()) { 1258 // In this case, we expect the held locks to be 1259 // in increasing rank order (modulo any native ranks) 1260 for (tmp = locks; tmp != NULL; tmp = tmp->next()) { 1261 if (tmp->next() != NULL) { 1262 assert(tmp->rank() == Mutex::native || 1263 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); 1264 } 1265 } 1266 } 1267 return res; 1268 } 1269 1270 1271 bool Monitor::contains(Monitor* locks, Monitor * lock) { 1272 for (; locks != NULL; locks = locks->next()) { 1273 if (locks == lock) { 1274 return true; 1275 } 1276 } 1277 return false; 1278 } 1279 #endif 1280 1281 // Called immediately after lock acquisition or release as a diagnostic 1282 // to track the lock-set of the thread and test for rank violations that 1283 // might indicate exposure to deadlock. 1284 // Rather like an EventListener for _owner (:>). 1285 1286 void Monitor::set_owner_implementation(Thread *new_owner) { 1287 // This function is solely responsible for maintaining 1288 // and checking the invariant that threads and locks 1289 // are in a 1/N relation, with some some locks unowned. 1290 // It uses the Mutex::_owner, Mutex::_next, and 1291 // Thread::_owned_locks fields, and no other function 1292 // changes those fields. 1293 // It is illegal to set the mutex from one non-NULL 1294 // owner to another--it must be owned by NULL as an 1295 // intermediate state. 1296 1297 if (new_owner != NULL) { 1298 // the thread is acquiring this lock 1299 1300 assert(new_owner == Thread::current(), "Should I be doing this?"); 1301 assert(_owner == NULL, "setting the owner thread of an already owned mutex"); 1302 _owner = new_owner; // set the owner 1303 1304 // link "this" into the owned locks list 1305 1306 #ifdef ASSERT // Thread::_owned_locks is under the same ifdef 1307 Monitor* locks = get_least_ranked_lock(new_owner->owned_locks()); 1308 // Mutex::set_owner_implementation is a friend of Thread 1309 1310 assert(this->rank() >= 0, "bad lock rank"); 1311 1312 // Deadlock avoidance rules require us to acquire Mutexes only in 1313 // a global total order. For example m1 is the lowest ranked mutex 1314 // that the thread holds and m2 is the mutex the thread is trying 1315 // to acquire, then deadlock avoidance rules require that the rank 1316 // of m2 be less than the rank of m1. 1317 // The rank Mutex::native is an exception in that it is not subject 1318 // to the verification rules. 1319 // Here are some further notes relating to mutex acquisition anomalies: 1320 // . it is also ok to acquire Safepoint_lock at the very end while we 1321 // already hold Terminator_lock - may happen because of periodic safepoints 1322 if (this->rank() != Mutex::native && 1323 this->rank() != Mutex::suspend_resume && 1324 locks != NULL && locks->rank() <= this->rank() && 1325 !SafepointSynchronize::is_at_safepoint() && 1326 !(this == Safepoint_lock && contains(locks, Terminator_lock) && 1327 SafepointSynchronize::is_synchronizing())) { 1328 new_owner->print_owned_locks(); 1329 fatal("acquiring lock %s/%d out of order with lock %s/%d -- " 1330 "possible deadlock", this->name(), this->rank(), 1331 locks->name(), locks->rank()); 1332 } 1333 1334 this->_next = new_owner->_owned_locks; 1335 new_owner->_owned_locks = this; 1336 #endif 1337 1338 } else { 1339 // the thread is releasing this lock 1340 1341 Thread* old_owner = _owner; 1342 debug_only(_last_owner = old_owner); 1343 1344 assert(old_owner != NULL, "removing the owner thread of an unowned mutex"); 1345 assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex"); 1346 1347 _owner = NULL; // set the owner 1348 1349 #ifdef ASSERT 1350 Monitor *locks = old_owner->owned_locks(); 1351 1352 // remove "this" from the owned locks list 1353 1354 Monitor *prev = NULL; 1355 bool found = false; 1356 for (; locks != NULL; prev = locks, locks = locks->next()) { 1357 if (locks == this) { 1358 found = true; 1359 break; 1360 } 1361 } 1362 assert(found, "Removing a lock not owned"); 1363 if (prev == NULL) { 1364 old_owner->_owned_locks = _next; 1365 } else { 1366 prev->_next = _next; 1367 } 1368 _next = NULL; 1369 #endif 1370 } 1371 } 1372 1373 1374 // Factored out common sanity checks for locking mutex'es. Used by lock() and try_lock() 1375 void Monitor::check_prelock_state(Thread *thread) { 1376 assert((!thread->is_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm) 1377 || rank() == Mutex::special, "wrong thread state for using locks"); 1378 if (StrictSafepointChecks) { 1379 if (thread->is_VM_thread() && !allow_vm_block()) { 1380 fatal("VM thread using lock %s (not allowed to block on)", name()); 1381 } 1382 debug_only(if (rank() != Mutex::special) \ 1383 thread->check_for_valid_safepoint_state(false);) 1384 } 1385 assert(!os::ThreadCrashProtection::is_crash_protected(thread), 1386 "locking not allowed when crash protection is set"); 1387 } 1388 1389 void Monitor::check_block_state(Thread *thread) { 1390 if (!_allow_vm_block && thread->is_VM_thread()) { 1391 warning("VM thread blocked on lock"); 1392 print(); 1393 BREAKPOINT; 1394 } 1395 assert(_owner != thread, "deadlock: blocking on monitor owned by current thread"); 1396 } 1397 1398 #endif // PRODUCT