1 /* 2 * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "runtime/atomic.hpp" 27 #include "runtime/interfaceSupport.hpp" 28 #include "runtime/mutex.hpp" 29 #include "runtime/orderAccess.inline.hpp" 30 #include "runtime/osThread.hpp" 31 #include "runtime/safepointMechanism.inline.hpp" 32 #include "runtime/thread.inline.hpp" 33 #include "utilities/events.hpp" 34 #include "utilities/macros.hpp" 35 36 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o 37 // 38 // Native Monitor-Mutex locking - theory of operations 39 // 40 // * Native Monitors are completely unrelated to Java-level monitors, 41 // although the "back-end" slow-path implementations share a common lineage. 42 // See objectMonitor:: in synchronizer.cpp. 43 // Native Monitors do *not* support nesting or recursion but otherwise 44 // they're basically Hoare-flavor monitors. 45 // 46 // * A thread acquires ownership of a Monitor/Mutex by CASing the LockByte 47 // in the _LockWord from zero to non-zero. Note that the _Owner field 48 // is advisory and is used only to verify that the thread calling unlock() 49 // is indeed the last thread to have acquired the lock. 50 // 51 // * Contending threads "push" themselves onto the front of the contention 52 // queue -- called the cxq -- with CAS and then spin/park. 53 // The _LockWord contains the LockByte as well as the pointer to the head 54 // of the cxq. Colocating the LockByte with the cxq precludes certain races. 55 // 56 // * Using a separately addressable LockByte allows for CAS:MEMBAR or CAS:0 57 // idioms. We currently use MEMBAR in the uncontended unlock() path, as 58 // MEMBAR often has less latency than CAS. If warranted, we could switch to 59 // a CAS:0 mode, using timers to close the resultant race, as is done 60 // with Java Monitors in synchronizer.cpp. 61 // 62 // See the following for a discussion of the relative cost of atomics (CAS) 63 // MEMBAR, and ways to eliminate such instructions from the common-case paths: 64 // -- http://blogs.sun.com/dave/entry/biased_locking_in_hotspot 65 // -- http://blogs.sun.com/dave/resource/MustangSync.pdf 66 // -- http://blogs.sun.com/dave/resource/synchronization-public2.pdf 67 // -- synchronizer.cpp 68 // 69 // * Overall goals - desiderata 70 // 1. Minimize context switching 71 // 2. Minimize lock migration 72 // 3. Minimize CPI -- affinity and locality 73 // 4. Minimize the execution of high-latency instructions such as CAS or MEMBAR 74 // 5. Minimize outer lock hold times 75 // 6. Behave gracefully on a loaded system 76 // 77 // * Thread flow and list residency: 78 // 79 // Contention queue --> EntryList --> OnDeck --> Owner --> !Owner 80 // [..resident on monitor list..] 81 // [...........contending..................] 82 // 83 // -- The contention queue (cxq) contains recently-arrived threads (RATs). 84 // Threads on the cxq eventually drain into the EntryList. 85 // -- Invariant: a thread appears on at most one list -- cxq, EntryList 86 // or WaitSet -- at any one time. 87 // -- For a given monitor there can be at most one "OnDeck" thread at any 88 // given time but if needbe this particular invariant could be relaxed. 89 // 90 // * The WaitSet and EntryList linked lists are composed of ParkEvents. 91 // I use ParkEvent instead of threads as ParkEvents are immortal and 92 // type-stable, meaning we can safely unpark() a possibly stale 93 // list element in the unlock()-path. (That's benign). 94 // 95 // * Succession policy - providing for progress: 96 // 97 // As necessary, the unlock()ing thread identifies, unlinks, and unparks 98 // an "heir presumptive" tentative successor thread from the EntryList. 99 // This becomes the so-called "OnDeck" thread, of which there can be only 100 // one at any given time for a given monitor. The wakee will recontend 101 // for ownership of monitor. 102 // 103 // Succession is provided for by a policy of competitive handoff. 104 // The exiting thread does _not_ grant or pass ownership to the 105 // successor thread. (This is also referred to as "handoff" succession"). 106 // Instead the exiting thread releases ownership and possibly wakes 107 // a successor, so the successor can (re)compete for ownership of the lock. 108 // 109 // Competitive handoff provides excellent overall throughput at the expense 110 // of short-term fairness. If fairness is a concern then one remedy might 111 // be to add an AcquireCounter field to the monitor. After a thread acquires 112 // the lock it will decrement the AcquireCounter field. When the count 113 // reaches 0 the thread would reset the AcquireCounter variable, abdicate 114 // the lock directly to some thread on the EntryList, and then move itself to the 115 // tail of the EntryList. 116 // 117 // But in practice most threads engage or otherwise participate in resource 118 // bounded producer-consumer relationships, so lock domination is not usually 119 // a practical concern. Recall too, that in general it's easier to construct 120 // a fair lock from a fast lock, but not vice-versa. 121 // 122 // * The cxq can have multiple concurrent "pushers" but only one concurrent 123 // detaching thread. This mechanism is immune from the ABA corruption. 124 // More precisely, the CAS-based "push" onto cxq is ABA-oblivious. 125 // We use OnDeck as a pseudo-lock to enforce the at-most-one detaching 126 // thread constraint. 127 // 128 // * Taken together, the cxq and the EntryList constitute or form a 129 // single logical queue of threads stalled trying to acquire the lock. 130 // We use two distinct lists to reduce heat on the list ends. 131 // Threads in lock() enqueue onto cxq while threads in unlock() will 132 // dequeue from the EntryList. (c.f. Michael Scott's "2Q" algorithm). 133 // A key desideratum is to minimize queue & monitor metadata manipulation 134 // that occurs while holding the "outer" monitor lock -- that is, we want to 135 // minimize monitor lock holds times. 136 // 137 // The EntryList is ordered by the prevailing queue discipline and 138 // can be organized in any convenient fashion, such as a doubly-linked list or 139 // a circular doubly-linked list. If we need a priority queue then something akin 140 // to Solaris' sleepq would work nicely. Viz., 141 // -- http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c. 142 // -- http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/os/sleepq.c 143 // Queue discipline is enforced at ::unlock() time, when the unlocking thread 144 // drains the cxq into the EntryList, and orders or reorders the threads on the 145 // EntryList accordingly. 146 // 147 // Barring "lock barging", this mechanism provides fair cyclic ordering, 148 // somewhat similar to an elevator-scan. 149 // 150 // * OnDeck 151 // -- For a given monitor there can be at most one OnDeck thread at any given 152 // instant. The OnDeck thread is contending for the lock, but has been 153 // unlinked from the EntryList and cxq by some previous unlock() operations. 154 // Once a thread has been designated the OnDeck thread it will remain so 155 // until it manages to acquire the lock -- being OnDeck is a stable property. 156 // -- Threads on the EntryList or cxq are _not allowed to attempt lock acquisition. 157 // -- OnDeck also serves as an "inner lock" as follows. Threads in unlock() will, after 158 // having cleared the LockByte and dropped the outer lock, attempt to "trylock" 159 // OnDeck by CASing the field from null to non-null. If successful, that thread 160 // is then responsible for progress and succession and can use CAS to detach and 161 // drain the cxq into the EntryList. By convention, only this thread, the holder of 162 // the OnDeck inner lock, can manipulate the EntryList or detach and drain the 163 // RATs on the cxq into the EntryList. This avoids ABA corruption on the cxq as 164 // we allow multiple concurrent "push" operations but restrict detach concurrency 165 // to at most one thread. Having selected and detached a successor, the thread then 166 // changes the OnDeck to refer to that successor, and then unparks the successor. 167 // That successor will eventually acquire the lock and clear OnDeck. Beware 168 // that the OnDeck usage as a lock is asymmetric. A thread in unlock() transiently 169 // "acquires" OnDeck, performs queue manipulations, passes OnDeck to some successor, 170 // and then the successor eventually "drops" OnDeck. Note that there's never 171 // any sense of contention on the inner lock, however. Threads never contend 172 // or wait for the inner lock. 173 // -- OnDeck provides for futile wakeup throttling a described in section 3.3 of 174 // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf 175 // In a sense, OnDeck subsumes the ObjectMonitor _Succ and ObjectWaiter 176 // TState fields found in Java-level objectMonitors. (See synchronizer.cpp). 177 // 178 // * Waiting threads reside on the WaitSet list -- wait() puts 179 // the caller onto the WaitSet. Notify() or notifyAll() simply 180 // transfers threads from the WaitSet to either the EntryList or cxq. 181 // Subsequent unlock() operations will eventually unpark the notifyee. 182 // Unparking a notifee in notify() proper is inefficient - if we were to do so 183 // it's likely the notifyee would simply impale itself on the lock held 184 // by the notifier. 185 // 186 // * The mechanism is obstruction-free in that if the holder of the transient 187 // OnDeck lock in unlock() is preempted or otherwise stalls, other threads 188 // can still acquire and release the outer lock and continue to make progress. 189 // At worst, waking of already blocked contending threads may be delayed, 190 // but nothing worse. (We only use "trylock" operations on the inner OnDeck 191 // lock). 192 // 193 // * Note that thread-local storage must be initialized before a thread 194 // uses Native monitors or mutexes. The native monitor-mutex subsystem 195 // depends on Thread::current(). 196 // 197 // * The monitor synchronization subsystem avoids the use of native 198 // synchronization primitives except for the narrow platform-specific 199 // park-unpark abstraction. See the comments in os_solaris.cpp regarding 200 // the semantics of park-unpark. Put another way, this monitor implementation 201 // depends only on atomic operations and park-unpark. The monitor subsystem 202 // manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the 203 // underlying OS manages the READY<->RUN transitions. 204 // 205 // * The memory consistency model provide by lock()-unlock() is at least as 206 // strong or stronger than the Java Memory model defined by JSR-133. 207 // That is, we guarantee at least entry consistency, if not stronger. 208 // See http://g.oswego.edu/dl/jmm/cookbook.html. 209 // 210 // * Thread:: currently contains a set of purpose-specific ParkEvents: 211 // _MutexEvent, _ParkEvent, etc. A better approach might be to do away with 212 // the purpose-specific ParkEvents and instead implement a general per-thread 213 // stack of available ParkEvents which we could provision on-demand. The 214 // stack acts as a local cache to avoid excessive calls to ParkEvent::Allocate() 215 // and ::Release(). A thread would simply pop an element from the local stack before it 216 // enqueued or park()ed. When the contention was over the thread would 217 // push the no-longer-needed ParkEvent back onto its stack. 218 // 219 // * A slightly reduced form of ILock() and IUnlock() have been partially 220 // model-checked (Murphi) for safety and progress at T=1,2,3 and 4. 221 // It'd be interesting to see if TLA/TLC could be useful as well. 222 // 223 // * Mutex-Monitor is a low-level "leaf" subsystem. That is, the monitor 224 // code should never call other code in the JVM that might itself need to 225 // acquire monitors or mutexes. That's true *except* in the case of the 226 // ThreadBlockInVM state transition wrappers. The ThreadBlockInVM DTOR handles 227 // mutator reentry (ingress) by checking for a pending safepoint in which case it will 228 // call SafepointSynchronize::block(), which in turn may call Safepoint_lock->lock(), etc. 229 // In that particular case a call to lock() for a given Monitor can end up recursively 230 // calling lock() on another monitor. While distasteful, this is largely benign 231 // as the calls come from jacket that wraps lock(), and not from deep within lock() itself. 232 // 233 // It's unfortunate that native mutexes and thread state transitions were convolved. 234 // They're really separate concerns and should have remained that way. Melding 235 // them together was facile -- a bit too facile. The current implementation badly 236 // conflates the two concerns. 237 // 238 // * TODO-FIXME: 239 // 240 // -- Add DTRACE probes for contended acquire, contended acquired, contended unlock 241 // We should also add DTRACE probes in the ParkEvent subsystem for 242 // Park-entry, Park-exit, and Unpark. 243 // 244 // -- We have an excess of mutex-like constructs in the JVM, namely: 245 // 1. objectMonitors for Java-level synchronization (synchronizer.cpp) 246 // 2. low-level muxAcquire and muxRelease 247 // 3. low-level spinAcquire and spinRelease 248 // 4. native Mutex:: and Monitor:: 249 // 5. jvm_raw_lock() and _unlock() 250 // 6. JVMTI raw monitors -- distinct from (5) despite having a confusingly 251 // similar name. 252 // 253 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o 254 255 #define UNS(x) (uintptr_t(x)) 256 #define TRACE(m) \ 257 { \ 258 static volatile int ctr = 0; \ 259 int x = ++ctr; \ 260 if ((x & (x - 1)) == 0) { \ 261 ::printf("%d:%s\n", x, #m); \ 262 ::fflush(stdout); \ 263 } \ 264 } 265 266 const intptr_t _LBIT = 1; 267 268 // Endian-ness ... index of least-significant byte in SplitWord.Bytes[] 269 #ifdef VM_LITTLE_ENDIAN 270 #define _LSBINDEX 0 271 #else 272 #define _LSBINDEX (sizeof(intptr_t)-1) 273 #endif 274 275 // Simplistic low-quality Marsaglia SHIFT-XOR RNG. 276 // Bijective except for the trailing mask operation. 277 // Useful for spin loops as the compiler can't optimize it away. 278 279 static inline jint MarsagliaXORV(jint x) { 280 if (x == 0) x = 1|os::random(); 281 x ^= x << 6; 282 x ^= ((unsigned)x) >> 21; 283 x ^= x << 7; 284 return x & 0x7FFFFFFF; 285 } 286 287 static int Stall(int its) { 288 static volatile jint rv = 1; 289 volatile int OnFrame = 0; 290 jint v = rv ^ UNS(OnFrame); 291 while (--its >= 0) { 292 v = MarsagliaXORV(v); 293 } 294 // Make this impossible for the compiler to optimize away, 295 // but (mostly) avoid W coherency sharing on MP systems. 296 if (v == 0x12345) rv = v; 297 return v; 298 } 299 300 int Monitor::TryLock() { 301 intptr_t v = _LockWord.FullWord; 302 for (;;) { 303 if ((v & _LBIT) != 0) return 0; 304 const intptr_t u = Atomic::cmpxchg(v|_LBIT, &_LockWord.FullWord, v); 305 if (v == u) return 1; 306 v = u; 307 } 308 } 309 310 int Monitor::TryFast() { 311 // Optimistic fast-path form ... 312 // Fast-path attempt for the common uncontended case. 313 // Avoid RTS->RTO $ coherence upgrade on typical SMP systems. 314 intptr_t v = Atomic::cmpxchg(_LBIT, &_LockWord.FullWord, (intptr_t)0); // agro ... 315 if (v == 0) return 1; 316 317 for (;;) { 318 if ((v & _LBIT) != 0) return 0; 319 const intptr_t u = Atomic::cmpxchg(v|_LBIT, &_LockWord.FullWord, v); 320 if (v == u) return 1; 321 v = u; 322 } 323 } 324 325 int Monitor::ILocked() { 326 const intptr_t w = _LockWord.FullWord & 0xFF; 327 assert(w == 0 || w == _LBIT, "invariant"); 328 return w == _LBIT; 329 } 330 331 // Polite TATAS spinlock with exponential backoff - bounded spin. 332 // Ideally we'd use processor cycles, time or vtime to control 333 // the loop, but we currently use iterations. 334 // All the constants within were derived empirically but work over 335 // over the spectrum of J2SE reference platforms. 336 // On Niagara-class systems the back-off is unnecessary but 337 // is relatively harmless. (At worst it'll slightly retard 338 // acquisition times). The back-off is critical for older SMP systems 339 // where constant fetching of the LockWord would otherwise impair 340 // scalability. 341 // 342 // Clamp spinning at approximately 1/2 of a context-switch round-trip. 343 // See synchronizer.cpp for details and rationale. 344 345 int Monitor::TrySpin(Thread * const Self) { 346 if (TryLock()) return 1; 347 if (!os::is_MP()) return 0; 348 349 int Probes = 0; 350 int Delay = 0; 351 int Steps = 0; 352 int SpinMax = NativeMonitorSpinLimit; 353 int flgs = NativeMonitorFlags; 354 for (;;) { 355 intptr_t v = _LockWord.FullWord; 356 if ((v & _LBIT) == 0) { 357 if (Atomic::cmpxchg (v|_LBIT, &_LockWord.FullWord, v) == v) { 358 return 1; 359 } 360 continue; 361 } 362 363 if ((flgs & 8) == 0) { 364 SpinPause(); 365 } 366 367 // Periodically increase Delay -- variable Delay form 368 // conceptually: delay *= 1 + 1/Exponent 369 ++Probes; 370 if (Probes > SpinMax) return 0; 371 372 if ((Probes & 0x7) == 0) { 373 Delay = ((Delay << 1)|1) & 0x7FF; 374 // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ; 375 } 376 377 if (flgs & 2) continue; 378 379 // Consider checking _owner's schedctl state, if OFFPROC abort spin. 380 // If the owner is OFFPROC then it's unlike that the lock will be dropped 381 // in a timely fashion, which suggests that spinning would not be fruitful 382 // or profitable. 383 384 // Stall for "Delay" time units - iterations in the current implementation. 385 // Avoid generating coherency traffic while stalled. 386 // Possible ways to delay: 387 // PAUSE, SLEEP, MEMBAR #sync, MEMBAR #halt, 388 // wr %g0,%asi, gethrtime, rdstick, rdtick, rdtsc, etc. ... 389 // Note that on Niagara-class systems we want to minimize STs in the 390 // spin loop. N1 and brethren write-around the L1$ over the xbar into the L2$. 391 // Furthermore, they don't have a W$ like traditional SPARC processors. 392 // We currently use a Marsaglia Shift-Xor RNG loop. 393 Steps += Delay; 394 if (Self != NULL) { 395 jint rv = Self->rng[0]; 396 for (int k = Delay; --k >= 0;) { 397 rv = MarsagliaXORV(rv); 398 if ((flgs & 4) == 0 && SafepointMechanism::poll(Self)) return 0; 399 } 400 Self->rng[0] = rv; 401 } else { 402 Stall(Delay); 403 } 404 } 405 } 406 407 static int ParkCommon(ParkEvent * ev, jlong timo) { 408 // Diagnostic support - periodically unwedge blocked threads 409 intx nmt = NativeMonitorTimeout; 410 if (nmt > 0 && (nmt < timo || timo <= 0)) { 411 timo = nmt; 412 } 413 int err = OS_OK; 414 if (0 == timo) { 415 ev->park(); 416 } else { 417 err = ev->park(timo); 418 } 419 return err; 420 } 421 422 inline int Monitor::AcquireOrPush(ParkEvent * ESelf) { 423 intptr_t v = _LockWord.FullWord; 424 for (;;) { 425 if ((v & _LBIT) == 0) { 426 const intptr_t u = Atomic::cmpxchg(v|_LBIT, &_LockWord.FullWord, v); 427 if (u == v) return 1; // indicate acquired 428 v = u; 429 } else { 430 // Anticipate success ... 431 ESelf->ListNext = (ParkEvent *)(v & ~_LBIT); 432 const intptr_t u = Atomic::cmpxchg(intptr_t(ESelf)|_LBIT, &_LockWord.FullWord, v); 433 if (u == v) return 0; // indicate pushed onto cxq 434 v = u; 435 } 436 // Interference - LockWord change - just retry 437 } 438 } 439 440 // ILock and IWait are the lowest level primitive internal blocking 441 // synchronization functions. The callers of IWait and ILock must have 442 // performed any needed state transitions beforehand. 443 // IWait and ILock may directly call park() without any concern for thread state. 444 // Note that ILock and IWait do *not* access _owner. 445 // _owner is a higher-level logical concept. 446 447 void Monitor::ILock(Thread * Self) { 448 assert(_OnDeck != Self->_MutexEvent, "invariant"); 449 450 if (TryFast()) { 451 Exeunt: 452 assert(ILocked(), "invariant"); 453 return; 454 } 455 456 ParkEvent * const ESelf = Self->_MutexEvent; 457 assert(_OnDeck != ESelf, "invariant"); 458 459 // As an optimization, spinners could conditionally try to set _OnDeck to _LBIT 460 // Synchronizer.cpp uses a similar optimization. 461 if (TrySpin(Self)) goto Exeunt; 462 463 // Slow-path - the lock is contended. 464 // Either Enqueue Self on cxq or acquire the outer lock. 465 // LockWord encoding = (cxq,LOCKBYTE) 466 ESelf->reset(); 467 OrderAccess::fence(); 468 469 // Optional optimization ... try barging on the inner lock 470 if ((NativeMonitorFlags & 32) && Atomic::replace_if_null(ESelf, &_OnDeck)) { 471 goto OnDeck_LOOP; 472 } 473 474 if (AcquireOrPush(ESelf)) goto Exeunt; 475 476 // At any given time there is at most one ondeck thread. 477 // ondeck implies not resident on cxq and not resident on EntryList 478 // Only the OnDeck thread can try to acquire -- contend for -- the lock. 479 // CONSIDER: use Self->OnDeck instead of m->OnDeck. 480 // Deschedule Self so that others may run. 481 while (OrderAccess::load_acquire(&_OnDeck) != ESelf) { 482 ParkCommon(ESelf, 0); 483 } 484 485 // Self is now in the OnDeck position and will remain so until it 486 // manages to acquire the lock. 487 OnDeck_LOOP: 488 for (;;) { 489 assert(_OnDeck == ESelf, "invariant"); 490 if (TrySpin(Self)) break; 491 // It's probably wise to spin only if we *actually* blocked 492 // CONSIDER: check the lockbyte, if it remains set then 493 // preemptively drain the cxq into the EntryList. 494 // The best place and time to perform queue operations -- lock metadata -- 495 // is _before having acquired the outer lock, while waiting for the lock to drop. 496 ParkCommon(ESelf, 0); 497 } 498 499 assert(_OnDeck == ESelf, "invariant"); 500 _OnDeck = NULL; 501 502 // Note that we current drop the inner lock (clear OnDeck) in the slow-path 503 // epilogue immediately after having acquired the outer lock. 504 // But instead we could consider the following optimizations: 505 // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation. 506 // This might avoid potential reacquisition of the inner lock in IUlock(). 507 // B. While still holding the inner lock, attempt to opportunistically select 508 // and unlink the next OnDeck thread from the EntryList. 509 // If successful, set OnDeck to refer to that thread, otherwise clear OnDeck. 510 // It's critical that the select-and-unlink operation run in constant-time as 511 // it executes when holding the outer lock and may artificially increase the 512 // effective length of the critical section. 513 // Note that (A) and (B) are tantamount to succession by direct handoff for 514 // the inner lock. 515 goto Exeunt; 516 } 517 518 void Monitor::IUnlock(bool RelaxAssert) { 519 assert(ILocked(), "invariant"); 520 // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately 521 // before the store that releases the lock. Crucially, all the stores and loads in the 522 // critical section must be globally visible before the store of 0 into the lock-word 523 // that releases the lock becomes globally visible. That is, memory accesses in the 524 // critical section should not be allowed to bypass or overtake the following ST that 525 // releases the lock. As such, to prevent accesses within the critical section 526 // from "leaking" out, we need a release fence between the critical section and the 527 // store that releases the lock. In practice that release barrier is elided on 528 // platforms with strong memory models such as TSO. 529 // 530 // Note that the OrderAccess::storeload() fence that appears after unlock store 531 // provides for progress conditions and succession and is _not related to exclusion 532 // safety or lock release consistency. 533 OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], jbyte(0)); // drop outer lock 534 535 OrderAccess::storeload(); 536 ParkEvent * const w = _OnDeck; // raw load as we will just return if non-NULL 537 assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant"); 538 if (w != NULL) { 539 // Either we have a valid ondeck thread or ondeck is transiently "locked" 540 // by some exiting thread as it arranges for succession. The LSBit of 541 // OnDeck allows us to discriminate two cases. If the latter, the 542 // responsibility for progress and succession lies with that other thread. 543 // For good performance, we also depend on the fact that redundant unpark() 544 // operations are cheap. That is, repeated Unpark()ing of the OnDeck thread 545 // is inexpensive. This approach provides implicit futile wakeup throttling. 546 // Note that the referent "w" might be stale with respect to the lock. 547 // In that case the following unpark() is harmless and the worst that'll happen 548 // is a spurious return from a park() operation. Critically, if "w" _is stale, 549 // then progress is known to have occurred as that means the thread associated 550 // with "w" acquired the lock. In that case this thread need take no further 551 // action to guarantee progress. 552 if ((UNS(w) & _LBIT) == 0) w->unpark(); 553 return; 554 } 555 556 intptr_t cxq = _LockWord.FullWord; 557 if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) { 558 return; // normal fast-path exit - cxq and EntryList both empty 559 } 560 if (cxq & _LBIT) { 561 // Optional optimization ... 562 // Some other thread acquired the lock in the window since this 563 // thread released it. Succession is now that thread's responsibility. 564 return; 565 } 566 567 Succession: 568 // Slow-path exit - this thread must ensure succession and progress. 569 // OnDeck serves as lock to protect cxq and EntryList. 570 // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq. 571 // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS) 572 // but only one concurrent consumer (detacher of RATs). 573 // Consider protecting this critical section with schedctl on Solaris. 574 // Unlike a normal lock, however, the exiting thread "locks" OnDeck, 575 // picks a successor and marks that thread as OnDeck. That successor 576 // thread will then clear OnDeck once it eventually acquires the outer lock. 577 if (!Atomic::replace_if_null((ParkEvent*)_LBIT, &_OnDeck)) { 578 return; 579 } 580 581 ParkEvent * List = _EntryList; 582 if (List != NULL) { 583 // Transfer the head of the EntryList to the OnDeck position. 584 // Once OnDeck, a thread stays OnDeck until it acquires the lock. 585 // For a given lock there is at most OnDeck thread at any one instant. 586 WakeOne: 587 assert(List == _EntryList, "invariant"); 588 ParkEvent * const w = List; 589 assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant"); 590 _EntryList = w->ListNext; 591 // as a diagnostic measure consider setting w->_ListNext = BAD 592 assert(intptr_t(_OnDeck) == _LBIT, "invariant"); 593 594 // Pass OnDeck role to w, ensuring that _EntryList has been set first. 595 // w will clear _OnDeck once it acquires the outer lock. 596 // Note that once we set _OnDeck that thread can acquire the mutex, proceed 597 // with its critical section and then enter this code to unlock the mutex. So 598 // you can have multiple threads active in IUnlock at the same time. 599 OrderAccess::release_store(&_OnDeck, w); 600 601 // Another optional optimization ... 602 // For heavily contended locks it's not uncommon that some other 603 // thread acquired the lock while this thread was arranging succession. 604 // Try to defer the unpark() operation - Delegate the responsibility 605 // for unpark()ing the OnDeck thread to the current or subsequent owners 606 // That is, the new owner is responsible for unparking the OnDeck thread. 607 OrderAccess::storeload(); 608 cxq = _LockWord.FullWord; 609 if (cxq & _LBIT) return; 610 611 w->unpark(); 612 return; 613 } 614 615 cxq = _LockWord.FullWord; 616 if ((cxq & ~_LBIT) != 0) { 617 // The EntryList is empty but the cxq is populated. 618 // drain RATs from cxq into EntryList 619 // Detach RATs segment with CAS and then merge into EntryList 620 for (;;) { 621 // optional optimization - if locked, the owner is responsible for succession 622 if (cxq & _LBIT) goto Punt; 623 const intptr_t vfy = Atomic::cmpxchg(cxq & _LBIT, &_LockWord.FullWord, cxq); 624 if (vfy == cxq) break; 625 cxq = vfy; 626 // Interference - LockWord changed - Just retry 627 // We can see concurrent interference from contending threads 628 // pushing themselves onto the cxq or from lock-unlock operations. 629 // From the perspective of this thread, EntryList is stable and 630 // the cxq is prepend-only -- the head is volatile but the interior 631 // of the cxq is stable. In theory if we encounter interference from threads 632 // pushing onto cxq we could simply break off the original cxq suffix and 633 // move that segment to the EntryList, avoiding a 2nd or multiple CAS attempts 634 // on the high-traffic LockWord variable. For instance lets say the cxq is "ABCD" 635 // when we first fetch cxq above. Between the fetch -- where we observed "A" 636 // -- and CAS -- where we attempt to CAS null over A -- "PQR" arrive, 637 // yielding cxq = "PQRABCD". In this case we could simply set A.ListNext 638 // null, leaving cxq = "PQRA" and transfer the "BCD" segment to the EntryList. 639 // Note too, that it's safe for this thread to traverse the cxq 640 // without taking any special concurrency precautions. 641 } 642 643 // We don't currently reorder the cxq segment as we move it onto 644 // the EntryList, but it might make sense to reverse the order 645 // or perhaps sort by thread priority. See the comments in 646 // synchronizer.cpp objectMonitor::exit(). 647 assert(_EntryList == NULL, "invariant"); 648 _EntryList = List = (ParkEvent *)(cxq & ~_LBIT); 649 assert(List != NULL, "invariant"); 650 goto WakeOne; 651 } 652 653 // cxq|EntryList is empty. 654 // w == NULL implies that cxq|EntryList == NULL in the past. 655 // Possible race - rare inopportune interleaving. 656 // A thread could have added itself to cxq since this thread previously checked. 657 // Detect and recover by refetching cxq. 658 Punt: 659 assert(intptr_t(_OnDeck) == _LBIT, "invariant"); 660 _OnDeck = NULL; // Release inner lock. 661 OrderAccess::storeload(); // Dekker duality - pivot point 662 663 // Resample LockWord/cxq to recover from possible race. 664 // For instance, while this thread T1 held OnDeck, some other thread T2 might 665 // acquire the outer lock. Another thread T3 might try to acquire the outer 666 // lock, but encounter contention and enqueue itself on cxq. T2 then drops the 667 // outer lock, but skips succession as this thread T1 still holds OnDeck. 668 // T1 is and remains responsible for ensuring succession of T3. 669 // 670 // Note that we don't need to recheck EntryList, just cxq. 671 // If threads moved onto EntryList since we dropped OnDeck 672 // that implies some other thread forced succession. 673 cxq = _LockWord.FullWord; 674 if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) { 675 goto Succession; // potential race -- re-run succession 676 } 677 return; 678 } 679 680 bool Monitor::notify() { 681 assert(_owner == Thread::current(), "invariant"); 682 assert(ILocked(), "invariant"); 683 if (_WaitSet == NULL) return true; 684 NotifyCount++; 685 686 // Transfer one thread from the WaitSet to the EntryList or cxq. 687 // Currently we just unlink the head of the WaitSet and prepend to the cxq. 688 // And of course we could just unlink it and unpark it, too, but 689 // in that case it'd likely impale itself on the reentry. 690 Thread::muxAcquire(_WaitLock, "notify:WaitLock"); 691 ParkEvent * nfy = _WaitSet; 692 if (nfy != NULL) { // DCL idiom 693 _WaitSet = nfy->ListNext; 694 assert(nfy->Notified == 0, "invariant"); 695 // push nfy onto the cxq 696 for (;;) { 697 const intptr_t v = _LockWord.FullWord; 698 assert((v & 0xFF) == _LBIT, "invariant"); 699 nfy->ListNext = (ParkEvent *)(v & ~_LBIT); 700 if (Atomic::cmpxchg(intptr_t(nfy)|_LBIT, &_LockWord.FullWord, v) == v) break; 701 // interference - _LockWord changed -- just retry 702 } 703 // Note that setting Notified before pushing nfy onto the cxq is 704 // also legal and safe, but the safety properties are much more 705 // subtle, so for the sake of code stewardship ... 706 OrderAccess::fence(); 707 nfy->Notified = 1; 708 } 709 Thread::muxRelease(_WaitLock); 710 if (nfy != NULL && (NativeMonitorFlags & 16)) { 711 // Experimental code ... light up the wakee in the hope that this thread (the owner) 712 // will drop the lock just about the time the wakee comes ONPROC. 713 nfy->unpark(); 714 } 715 assert(ILocked(), "invariant"); 716 return true; 717 } 718 719 // Currently notifyAll() transfers the waiters one-at-a-time from the waitset 720 // to the cxq. This could be done more efficiently with a single bulk en-mass transfer, 721 // but in practice notifyAll() for large #s of threads is rare and not time-critical. 722 // Beware too, that we invert the order of the waiters. Lets say that the 723 // waitset is "ABCD" and the cxq is "XYZ". After a notifyAll() the waitset 724 // will be empty and the cxq will be "DCBAXYZ". This is benign, of course. 725 726 bool Monitor::notify_all() { 727 assert(_owner == Thread::current(), "invariant"); 728 assert(ILocked(), "invariant"); 729 while (_WaitSet != NULL) notify(); 730 return true; 731 } 732 733 int Monitor::IWait(Thread * Self, jlong timo) { 734 assert(ILocked(), "invariant"); 735 736 // Phases: 737 // 1. Enqueue Self on WaitSet - currently prepend 738 // 2. unlock - drop the outer lock 739 // 3. wait for either notification or timeout 740 // 4. lock - reentry - reacquire the outer lock 741 742 ParkEvent * const ESelf = Self->_MutexEvent; 743 ESelf->Notified = 0; 744 ESelf->reset(); 745 OrderAccess::fence(); 746 747 // Add Self to WaitSet 748 // Ideally only the holder of the outer lock would manipulate the WaitSet - 749 // That is, the outer lock would implicitly protect the WaitSet. 750 // But if a thread in wait() encounters a timeout it will need to dequeue itself 751 // from the WaitSet _before it becomes the owner of the lock. We need to dequeue 752 // as the ParkEvent -- which serves as a proxy for the thread -- can't reside 753 // on both the WaitSet and the EntryList|cxq at the same time.. That is, a thread 754 // on the WaitSet can't be allowed to compete for the lock until it has managed to 755 // unlink its ParkEvent from WaitSet. Thus the need for WaitLock. 756 // Contention on the WaitLock is minimal. 757 // 758 // Another viable approach would be add another ParkEvent, "WaitEvent" to the 759 // thread class. The WaitSet would be composed of WaitEvents. Only the 760 // owner of the outer lock would manipulate the WaitSet. A thread in wait() 761 // could then compete for the outer lock, and then, if necessary, unlink itself 762 // from the WaitSet only after having acquired the outer lock. More precisely, 763 // there would be no WaitLock. A thread in in wait() would enqueue its WaitEvent 764 // on the WaitSet; release the outer lock; wait for either notification or timeout; 765 // reacquire the inner lock; and then, if needed, unlink itself from the WaitSet. 766 // 767 // Alternatively, a 2nd set of list link fields in the ParkEvent might suffice. 768 // One set would be for the WaitSet and one for the EntryList. 769 // We could also deconstruct the ParkEvent into a "pure" event and add a 770 // new immortal/TSM "ListElement" class that referred to ParkEvents. 771 // In that case we could have one ListElement on the WaitSet and another 772 // on the EntryList, with both referring to the same pure Event. 773 774 Thread::muxAcquire(_WaitLock, "wait:WaitLock:Add"); 775 ESelf->ListNext = _WaitSet; 776 _WaitSet = ESelf; 777 Thread::muxRelease(_WaitLock); 778 779 // Release the outer lock 780 // We call IUnlock (RelaxAssert=true) as a thread T1 might 781 // enqueue itself on the WaitSet, call IUnlock(), drop the lock, 782 // and then stall before it can attempt to wake a successor. 783 // Some other thread T2 acquires the lock, and calls notify(), moving 784 // T1 from the WaitSet to the cxq. T2 then drops the lock. T1 resumes, 785 // and then finds *itself* on the cxq. During the course of a normal 786 // IUnlock() call a thread should _never find itself on the EntryList 787 // or cxq, but in the case of wait() it's possible. 788 // See synchronizer.cpp objectMonitor::wait(). 789 IUnlock(true); 790 791 // Wait for either notification or timeout 792 // Beware that in some circumstances we might propagate 793 // spurious wakeups back to the caller. 794 795 for (;;) { 796 if (ESelf->Notified) break; 797 int err = ParkCommon(ESelf, timo); 798 if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break; 799 } 800 801 // Prepare for reentry - if necessary, remove ESelf from WaitSet 802 // ESelf can be: 803 // 1. Still on the WaitSet. This can happen if we exited the loop by timeout. 804 // 2. On the cxq or EntryList 805 // 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position. 806 807 OrderAccess::fence(); 808 int WasOnWaitSet = 0; 809 if (ESelf->Notified == 0) { 810 Thread::muxAcquire(_WaitLock, "wait:WaitLock:remove"); 811 if (ESelf->Notified == 0) { // DCL idiom 812 assert(_OnDeck != ESelf, "invariant"); // can't be both OnDeck and on WaitSet 813 // ESelf is resident on the WaitSet -- unlink it. 814 // A doubly-linked list would be better here so we can unlink in constant-time. 815 // We have to unlink before we potentially recontend as ESelf might otherwise 816 // end up on the cxq|EntryList -- it can't be on two lists at once. 817 ParkEvent * p = _WaitSet; 818 ParkEvent * q = NULL; // classic q chases p 819 while (p != NULL && p != ESelf) { 820 q = p; 821 p = p->ListNext; 822 } 823 assert(p == ESelf, "invariant"); 824 if (p == _WaitSet) { // found at head 825 assert(q == NULL, "invariant"); 826 _WaitSet = p->ListNext; 827 } else { // found in interior 828 assert(q->ListNext == p, "invariant"); 829 q->ListNext = p->ListNext; 830 } 831 WasOnWaitSet = 1; // We were *not* notified but instead encountered timeout 832 } 833 Thread::muxRelease(_WaitLock); 834 } 835 836 // Reentry phase - reacquire the lock 837 if (WasOnWaitSet) { 838 // ESelf was previously on the WaitSet but we just unlinked it above 839 // because of a timeout. ESelf is not resident on any list and is not OnDeck 840 assert(_OnDeck != ESelf, "invariant"); 841 ILock(Self); 842 } else { 843 // A prior notify() operation moved ESelf from the WaitSet to the cxq. 844 // ESelf is now on the cxq, EntryList or at the OnDeck position. 845 // The following fragment is extracted from Monitor::ILock() 846 for (;;) { 847 if (OrderAccess::load_acquire(&_OnDeck) == ESelf && TrySpin(Self)) break; 848 ParkCommon(ESelf, 0); 849 } 850 assert(_OnDeck == ESelf, "invariant"); 851 _OnDeck = NULL; 852 } 853 854 assert(ILocked(), "invariant"); 855 return WasOnWaitSet != 0; // return true IFF timeout 856 } 857 858 859 // ON THE VMTHREAD SNEAKING PAST HELD LOCKS: 860 // In particular, there are certain types of global lock that may be held 861 // by a Java thread while it is blocked at a safepoint but before it has 862 // written the _owner field. These locks may be sneakily acquired by the 863 // VM thread during a safepoint to avoid deadlocks. Alternatively, one should 864 // identify all such locks, and ensure that Java threads never block at 865 // safepoints while holding them (_no_safepoint_check_flag). While it 866 // seems as though this could increase the time to reach a safepoint 867 // (or at least increase the mean, if not the variance), the latter 868 // approach might make for a cleaner, more maintainable JVM design. 869 // 870 // Sneaking is vile and reprehensible and should be excised at the 1st 871 // opportunity. It's possible that the need for sneaking could be obviated 872 // as follows. Currently, a thread might (a) while TBIVM, call pthread_mutex_lock 873 // or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex. 874 // (b) stall at the TBIVM exit point as a safepoint is in effect. Critically, 875 // it'll stall at the TBIVM reentry state transition after having acquired the 876 // underlying lock, but before having set _owner and having entered the actual 877 // critical section. The lock-sneaking facility leverages that fact and allowed the 878 // VM thread to logically acquire locks that had already be physically locked by mutators 879 // but where mutators were known blocked by the reentry thread state transition. 880 // 881 // If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly 882 // wrapped calls to park(), then we could likely do away with sneaking. We'd 883 // decouple lock acquisition and parking. The critical invariant to eliminating 884 // sneaking is to ensure that we never "physically" acquire the lock while TBIVM. 885 // An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket. 886 // One difficulty with this approach is that the TBIVM wrapper could recurse and 887 // call lock() deep from within a lock() call, while the MutexEvent was already enqueued. 888 // Using a stack (N=2 at minimum) of ParkEvents would take care of that problem. 889 // 890 // But of course the proper ultimate approach is to avoid schemes that require explicit 891 // sneaking or dependence on any any clever invariants or subtle implementation properties 892 // of Mutex-Monitor and instead directly address the underlying design flaw. 893 894 void Monitor::lock(Thread * Self) { 895 // Ensure that the Monitor requires/allows safepoint checks. 896 assert(_safepoint_check_required != Monitor::_safepoint_check_never, 897 "This lock should never have a safepoint check: %s", name()); 898 899 #ifdef CHECK_UNHANDLED_OOPS 900 // Clear unhandled oops so we get a crash right away. Only clear for non-vm 901 // or GC threads. 902 if (Self->is_Java_thread()) { 903 Self->clear_unhandled_oops(); 904 } 905 #endif // CHECK_UNHANDLED_OOPS 906 907 debug_only(check_prelock_state(Self)); 908 assert(_owner != Self, "invariant"); 909 assert(_OnDeck != Self->_MutexEvent, "invariant"); 910 911 if (TryFast()) { 912 Exeunt: 913 assert(ILocked(), "invariant"); 914 assert(owner() == NULL, "invariant"); 915 set_owner(Self); 916 return; 917 } 918 919 // The lock is contended ... 920 921 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint(); 922 if (can_sneak && _owner == NULL) { 923 // a java thread has locked the lock but has not entered the 924 // critical region -- let's just pretend we've locked the lock 925 // and go on. we note this with _snuck so we can also 926 // pretend to unlock when the time comes. 927 _snuck = true; 928 goto Exeunt; 929 } 930 931 // Try a brief spin to avoid passing thru thread state transition ... 932 if (TrySpin(Self)) goto Exeunt; 933 934 check_block_state(Self); 935 if (Self->is_Java_thread()) { 936 // Horrible dictu - we suffer through a state transition 937 assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex"); 938 ThreadBlockInVM tbivm((JavaThread *) Self); 939 ILock(Self); 940 } else { 941 // Mirabile dictu 942 ILock(Self); 943 } 944 goto Exeunt; 945 } 946 947 void Monitor::lock() { 948 this->lock(Thread::current()); 949 } 950 951 // Lock without safepoint check - a degenerate variant of lock(). 952 // Should ONLY be used by safepoint code and other code 953 // that is guaranteed not to block while running inside the VM. If this is called with 954 // thread state set to be in VM, the safepoint synchronization code will deadlock! 955 956 void Monitor::lock_without_safepoint_check(Thread * Self) { 957 // Ensure that the Monitor does not require or allow safepoint checks. 958 assert(_safepoint_check_required != Monitor::_safepoint_check_always, 959 "This lock should always have a safepoint check: %s", name()); 960 assert(_owner != Self, "invariant"); 961 ILock(Self); 962 assert(_owner == NULL, "invariant"); 963 set_owner(Self); 964 } 965 966 void Monitor::lock_without_safepoint_check() { 967 lock_without_safepoint_check(Thread::current()); 968 } 969 970 971 // Returns true if thread succeeds in grabbing the lock, otherwise false. 972 973 bool Monitor::try_lock() { 974 Thread * const Self = Thread::current(); 975 debug_only(check_prelock_state(Self)); 976 // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler"); 977 978 // Special case, where all Java threads are stopped. 979 // The lock may have been acquired but _owner is not yet set. 980 // In that case the VM thread can safely grab the lock. 981 // It strikes me this should appear _after the TryLock() fails, below. 982 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint(); 983 if (can_sneak && _owner == NULL) { 984 set_owner(Self); // Do not need to be atomic, since we are at a safepoint 985 _snuck = true; 986 return true; 987 } 988 989 if (TryLock()) { 990 // We got the lock 991 assert(_owner == NULL, "invariant"); 992 set_owner(Self); 993 return true; 994 } 995 return false; 996 } 997 998 void Monitor::unlock() { 999 assert(_owner == Thread::current(), "invariant"); 1000 assert(_OnDeck != Thread::current()->_MutexEvent, "invariant"); 1001 set_owner(NULL); 1002 if (_snuck) { 1003 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak"); 1004 _snuck = false; 1005 return; 1006 } 1007 IUnlock(false); 1008 } 1009 1010 // Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check() 1011 // jvm_raw_lock() and _unlock() can be called by non-Java threads via JVM_RawMonitorEnter. 1012 // 1013 // There's no expectation that JVM_RawMonitors will interoperate properly with the native 1014 // Mutex-Monitor constructs. We happen to implement JVM_RawMonitors in terms of 1015 // native Mutex-Monitors simply as a matter of convenience. A simple abstraction layer 1016 // over a pthread_mutex_t would work equally as well, but require more platform-specific 1017 // code -- a "PlatformMutex". Alternatively, a simply layer over muxAcquire-muxRelease 1018 // would work too. 1019 // 1020 // Since the caller might be a foreign thread, we don't necessarily have a Thread.MutexEvent 1021 // instance available. Instead, we transiently allocate a ParkEvent on-demand if 1022 // we encounter contention. That ParkEvent remains associated with the thread 1023 // until it manages to acquire the lock, at which time we return the ParkEvent 1024 // to the global ParkEvent free list. This is correct and suffices for our purposes. 1025 // 1026 // Beware that the original jvm_raw_unlock() had a "_snuck" test but that 1027 // jvm_raw_lock() didn't have the corresponding test. I suspect that's an 1028 // oversight, but I've replicated the original suspect logic in the new code ... 1029 1030 void Monitor::jvm_raw_lock() { 1031 assert(rank() == native, "invariant"); 1032 1033 if (TryLock()) { 1034 Exeunt: 1035 assert(ILocked(), "invariant"); 1036 assert(_owner == NULL, "invariant"); 1037 // This can potentially be called by non-java Threads. Thus, the Thread::current_or_null() 1038 // might return NULL. Don't call set_owner since it will break on an NULL owner 1039 // Consider installing a non-null "ANON" distinguished value instead of just NULL. 1040 _owner = Thread::current_or_null(); 1041 return; 1042 } 1043 1044 if (TrySpin(NULL)) goto Exeunt; 1045 1046 // slow-path - apparent contention 1047 // Allocate a ParkEvent for transient use. 1048 // The ParkEvent remains associated with this thread until 1049 // the time the thread manages to acquire the lock. 1050 ParkEvent * const ESelf = ParkEvent::Allocate(NULL); 1051 ESelf->reset(); 1052 OrderAccess::storeload(); 1053 1054 // Either Enqueue Self on cxq or acquire the outer lock. 1055 if (AcquireOrPush (ESelf)) { 1056 ParkEvent::Release(ESelf); // surrender the ParkEvent 1057 goto Exeunt; 1058 } 1059 1060 // At any given time there is at most one ondeck thread. 1061 // ondeck implies not resident on cxq and not resident on EntryList 1062 // Only the OnDeck thread can try to acquire -- contend for -- the lock. 1063 // CONSIDER: use Self->OnDeck instead of m->OnDeck. 1064 for (;;) { 1065 if (OrderAccess::load_acquire(&_OnDeck) == ESelf && TrySpin(NULL)) break; 1066 ParkCommon(ESelf, 0); 1067 } 1068 1069 assert(_OnDeck == ESelf, "invariant"); 1070 _OnDeck = NULL; 1071 ParkEvent::Release(ESelf); // surrender the ParkEvent 1072 goto Exeunt; 1073 } 1074 1075 void Monitor::jvm_raw_unlock() { 1076 // Nearly the same as Monitor::unlock() ... 1077 // directly set _owner instead of using set_owner(null) 1078 _owner = NULL; 1079 if (_snuck) { // ??? 1080 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak"); 1081 _snuck = false; 1082 return; 1083 } 1084 IUnlock(false); 1085 } 1086 1087 bool Monitor::wait(bool no_safepoint_check, long timeout, 1088 bool as_suspend_equivalent) { 1089 // Make sure safepoint checking is used properly. 1090 assert(!(_safepoint_check_required == Monitor::_safepoint_check_never && no_safepoint_check == false), 1091 "This lock should never have a safepoint check: %s", name()); 1092 assert(!(_safepoint_check_required == Monitor::_safepoint_check_always && no_safepoint_check == true), 1093 "This lock should always have a safepoint check: %s", name()); 1094 1095 Thread * const Self = Thread::current(); 1096 assert(_owner == Self, "invariant"); 1097 assert(ILocked(), "invariant"); 1098 1099 // as_suspend_equivalent logically implies !no_safepoint_check 1100 guarantee(!as_suspend_equivalent || !no_safepoint_check, "invariant"); 1101 // !no_safepoint_check logically implies java_thread 1102 guarantee(no_safepoint_check || Self->is_Java_thread(), "invariant"); 1103 1104 #ifdef ASSERT 1105 Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks()); 1106 assert(least != this, "Specification of get_least_... call above"); 1107 if (least != NULL && least->rank() <= special) { 1108 tty->print("Attempting to wait on monitor %s/%d while holding" 1109 " lock %s/%d -- possible deadlock", 1110 name(), rank(), least->name(), least->rank()); 1111 assert(false, "Shouldn't block(wait) while holding a lock of rank special"); 1112 } 1113 #endif // ASSERT 1114 1115 int wait_status; 1116 // conceptually set the owner to NULL in anticipation of 1117 // abdicating the lock in wait 1118 set_owner(NULL); 1119 if (no_safepoint_check) { 1120 wait_status = IWait(Self, timeout); 1121 } else { 1122 assert(Self->is_Java_thread(), "invariant"); 1123 JavaThread *jt = (JavaThread *)Self; 1124 1125 // Enter safepoint region - ornate and Rococo ... 1126 ThreadBlockInVM tbivm(jt); 1127 OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */); 1128 1129 if (as_suspend_equivalent) { 1130 jt->set_suspend_equivalent(); 1131 // cleared by handle_special_suspend_equivalent_condition() or 1132 // java_suspend_self() 1133 } 1134 1135 wait_status = IWait(Self, timeout); 1136 1137 // were we externally suspended while we were waiting? 1138 if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) { 1139 // Our event wait has finished and we own the lock, but 1140 // while we were waiting another thread suspended us. We don't 1141 // want to hold the lock while suspended because that 1142 // would surprise the thread that suspended us. 1143 assert(ILocked(), "invariant"); 1144 IUnlock(true); 1145 jt->java_suspend_self(); 1146 ILock(Self); 1147 assert(ILocked(), "invariant"); 1148 } 1149 } 1150 1151 // Conceptually reestablish ownership of the lock. 1152 // The "real" lock -- the LockByte -- was reacquired by IWait(). 1153 assert(ILocked(), "invariant"); 1154 assert(_owner == NULL, "invariant"); 1155 set_owner(Self); 1156 return wait_status != 0; // return true IFF timeout 1157 } 1158 1159 Monitor::~Monitor() { 1160 #ifdef ASSERT 1161 uintptr_t owner = UNS(_owner); 1162 uintptr_t lockword = UNS(_LockWord.FullWord); 1163 uintptr_t entrylist = UNS(_EntryList); 1164 uintptr_t waitset = UNS(_WaitSet); 1165 uintptr_t ondeck = UNS(_OnDeck); 1166 // Print _name with precision limit, in case failure is due to memory 1167 // corruption that also trashed _name. 1168 assert((owner|lockword|entrylist|waitset|ondeck) == 0, 1169 "%.*s: _owner(" INTPTR_FORMAT ")|_LockWord(" INTPTR_FORMAT ")|_EntryList(" INTPTR_FORMAT ")|_WaitSet(" 1170 INTPTR_FORMAT ")|_OnDeck(" INTPTR_FORMAT ") != 0", 1171 MONITOR_NAME_LEN, _name, owner, lockword, entrylist, waitset, ondeck); 1172 #endif 1173 } 1174 1175 void Monitor::ClearMonitor(Monitor * m, const char *name) { 1176 m->_owner = NULL; 1177 m->_snuck = false; 1178 if (name == NULL) { 1179 strcpy(m->_name, "UNKNOWN"); 1180 } else { 1181 strncpy(m->_name, name, MONITOR_NAME_LEN - 1); 1182 m->_name[MONITOR_NAME_LEN - 1] = '\0'; 1183 } 1184 m->_LockWord.FullWord = 0; 1185 m->_EntryList = NULL; 1186 m->_OnDeck = NULL; 1187 m->_WaitSet = NULL; 1188 m->_WaitLock[0] = 0; 1189 } 1190 1191 Monitor::Monitor() { ClearMonitor(this); } 1192 1193 Monitor::Monitor(int Rank, const char * name, bool allow_vm_block, 1194 SafepointCheckRequired safepoint_check_required) { 1195 ClearMonitor(this, name); 1196 #ifdef ASSERT 1197 _allow_vm_block = allow_vm_block; 1198 _rank = Rank; 1199 NOT_PRODUCT(_safepoint_check_required = safepoint_check_required;) 1200 #endif 1201 } 1202 1203 Mutex::Mutex(int Rank, const char * name, bool allow_vm_block, 1204 SafepointCheckRequired safepoint_check_required) { 1205 ClearMonitor((Monitor *) this, name); 1206 #ifdef ASSERT 1207 _allow_vm_block = allow_vm_block; 1208 _rank = Rank; 1209 NOT_PRODUCT(_safepoint_check_required = safepoint_check_required;) 1210 #endif 1211 } 1212 1213 bool Monitor::owned_by_self() const { 1214 bool ret = _owner == Thread::current(); 1215 assert(!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant"); 1216 return ret; 1217 } 1218 1219 void Monitor::print_on_error(outputStream* st) const { 1220 st->print("[" PTR_FORMAT, p2i(this)); 1221 st->print("] %s", _name); 1222 st->print(" - owner thread: " PTR_FORMAT, p2i(_owner)); 1223 } 1224 1225 1226 1227 1228 // ---------------------------------------------------------------------------------- 1229 // Non-product code 1230 1231 #ifndef PRODUCT 1232 void Monitor::print_on(outputStream* st) const { 1233 st->print_cr("Mutex: [" PTR_FORMAT "/" PTR_FORMAT "] %s - owner: " PTR_FORMAT, 1234 p2i(this), _LockWord.FullWord, _name, p2i(_owner)); 1235 } 1236 #endif 1237 1238 #ifndef PRODUCT 1239 #ifdef ASSERT 1240 Monitor * Monitor::get_least_ranked_lock(Monitor * locks) { 1241 Monitor *res, *tmp; 1242 for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) { 1243 if (tmp->rank() < res->rank()) { 1244 res = tmp; 1245 } 1246 } 1247 if (!SafepointSynchronize::is_at_safepoint()) { 1248 // In this case, we expect the held locks to be 1249 // in increasing rank order (modulo any native ranks) 1250 for (tmp = locks; tmp != NULL; tmp = tmp->next()) { 1251 if (tmp->next() != NULL) { 1252 assert(tmp->rank() == Mutex::native || 1253 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); 1254 } 1255 } 1256 } 1257 return res; 1258 } 1259 1260 Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) { 1261 Monitor *res, *tmp; 1262 for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) { 1263 if (tmp != this && (res == NULL || tmp->rank() < res->rank())) { 1264 res = tmp; 1265 } 1266 } 1267 if (!SafepointSynchronize::is_at_safepoint()) { 1268 // In this case, we expect the held locks to be 1269 // in increasing rank order (modulo any native ranks) 1270 for (tmp = locks; tmp != NULL; tmp = tmp->next()) { 1271 if (tmp->next() != NULL) { 1272 assert(tmp->rank() == Mutex::native || 1273 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); 1274 } 1275 } 1276 } 1277 return res; 1278 } 1279 1280 1281 bool Monitor::contains(Monitor* locks, Monitor * lock) { 1282 for (; locks != NULL; locks = locks->next()) { 1283 if (locks == lock) { 1284 return true; 1285 } 1286 } 1287 return false; 1288 } 1289 #endif 1290 1291 // Called immediately after lock acquisition or release as a diagnostic 1292 // to track the lock-set of the thread and test for rank violations that 1293 // might indicate exposure to deadlock. 1294 // Rather like an EventListener for _owner (:>). 1295 1296 void Monitor::set_owner_implementation(Thread *new_owner) { 1297 // This function is solely responsible for maintaining 1298 // and checking the invariant that threads and locks 1299 // are in a 1/N relation, with some some locks unowned. 1300 // It uses the Mutex::_owner, Mutex::_next, and 1301 // Thread::_owned_locks fields, and no other function 1302 // changes those fields. 1303 // It is illegal to set the mutex from one non-NULL 1304 // owner to another--it must be owned by NULL as an 1305 // intermediate state. 1306 1307 if (new_owner != NULL) { 1308 // the thread is acquiring this lock 1309 1310 assert(new_owner == Thread::current(), "Should I be doing this?"); 1311 assert(_owner == NULL, "setting the owner thread of an already owned mutex"); 1312 _owner = new_owner; // set the owner 1313 1314 // link "this" into the owned locks list 1315 1316 #ifdef ASSERT // Thread::_owned_locks is under the same ifdef 1317 Monitor* locks = get_least_ranked_lock(new_owner->owned_locks()); 1318 // Mutex::set_owner_implementation is a friend of Thread 1319 1320 assert(this->rank() >= 0, "bad lock rank"); 1321 1322 // Deadlock avoidance rules require us to acquire Mutexes only in 1323 // a global total order. For example m1 is the lowest ranked mutex 1324 // that the thread holds and m2 is the mutex the thread is trying 1325 // to acquire, then deadlock avoidance rules require that the rank 1326 // of m2 be less than the rank of m1. 1327 // The rank Mutex::native is an exception in that it is not subject 1328 // to the verification rules. 1329 // Here are some further notes relating to mutex acquisition anomalies: 1330 // . it is also ok to acquire Safepoint_lock at the very end while we 1331 // already hold Terminator_lock - may happen because of periodic safepoints 1332 if (this->rank() != Mutex::native && 1333 this->rank() != Mutex::suspend_resume && 1334 locks != NULL && locks->rank() <= this->rank() && 1335 !SafepointSynchronize::is_at_safepoint() && 1336 !(this == Safepoint_lock && contains(locks, Terminator_lock) && 1337 SafepointSynchronize::is_synchronizing())) { 1338 new_owner->print_owned_locks(); 1339 fatal("acquiring lock %s/%d out of order with lock %s/%d -- " 1340 "possible deadlock", this->name(), this->rank(), 1341 locks->name(), locks->rank()); 1342 } 1343 1344 this->_next = new_owner->_owned_locks; 1345 new_owner->_owned_locks = this; 1346 #endif 1347 1348 } else { 1349 // the thread is releasing this lock 1350 1351 Thread* old_owner = _owner; 1352 debug_only(_last_owner = old_owner); 1353 1354 assert(old_owner != NULL, "removing the owner thread of an unowned mutex"); 1355 assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex"); 1356 1357 _owner = NULL; // set the owner 1358 1359 #ifdef ASSERT 1360 Monitor *locks = old_owner->owned_locks(); 1361 1362 // remove "this" from the owned locks list 1363 1364 Monitor *prev = NULL; 1365 bool found = false; 1366 for (; locks != NULL; prev = locks, locks = locks->next()) { 1367 if (locks == this) { 1368 found = true; 1369 break; 1370 } 1371 } 1372 assert(found, "Removing a lock not owned"); 1373 if (prev == NULL) { 1374 old_owner->_owned_locks = _next; 1375 } else { 1376 prev->_next = _next; 1377 } 1378 _next = NULL; 1379 #endif 1380 } 1381 } 1382 1383 1384 // Factored out common sanity checks for locking mutex'es. Used by lock() and try_lock() 1385 void Monitor::check_prelock_state(Thread *thread) { 1386 assert((!thread->is_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm) 1387 || rank() == Mutex::special, "wrong thread state for using locks"); 1388 if (StrictSafepointChecks) { 1389 if (thread->is_VM_thread() && !allow_vm_block()) { 1390 fatal("VM thread using lock %s (not allowed to block on)", name()); 1391 } 1392 debug_only(if (rank() != Mutex::special) \ 1393 thread->check_for_valid_safepoint_state(false);) 1394 } 1395 assert(!os::ThreadCrashProtection::is_crash_protected(thread), 1396 "locking not allowed when crash protection is set"); 1397 } 1398 1399 void Monitor::check_block_state(Thread *thread) { 1400 if (!_allow_vm_block && thread->is_VM_thread()) { 1401 warning("VM thread blocked on lock"); 1402 print(); 1403 BREAKPOINT; 1404 } 1405 assert(_owner != thread, "deadlock: blocking on monitor owned by current thread"); 1406 } 1407 1408 #endif // PRODUCT