rev 47413 : Introduce SafepointMechanism
rev 47415 : Add Thread Local handshakes and thread local polling
1 /*
2 * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "runtime/atomic.hpp"
27 #include "runtime/interfaceSupport.hpp"
28 #include "runtime/mutex.hpp"
29 #include "runtime/orderAccess.inline.hpp"
30 #include "runtime/osThread.hpp"
31 #include "runtime/safepointMechanism.inline.hpp"
32 #include "runtime/thread.inline.hpp"
33 #include "utilities/events.hpp"
34 #include "utilities/macros.hpp"
35
36 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
37 //
38 // Native Monitor-Mutex locking - theory of operations
39 //
40 // * Native Monitors are completely unrelated to Java-level monitors,
41 // although the "back-end" slow-path implementations share a common lineage.
42 // See objectMonitor:: in synchronizer.cpp.
43 // Native Monitors do *not* support nesting or recursion but otherwise
44 // they're basically Hoare-flavor monitors.
45 //
46 // * A thread acquires ownership of a Monitor/Mutex by CASing the LockByte
47 // in the _LockWord from zero to non-zero. Note that the _Owner field
48 // is advisory and is used only to verify that the thread calling unlock()
49 // is indeed the last thread to have acquired the lock.
50 //
51 // * Contending threads "push" themselves onto the front of the contention
52 // queue -- called the cxq -- with CAS and then spin/park.
53 // The _LockWord contains the LockByte as well as the pointer to the head
54 // of the cxq. Colocating the LockByte with the cxq precludes certain races.
55 //
56 // * Using a separately addressable LockByte allows for CAS:MEMBAR or CAS:0
57 // idioms. We currently use MEMBAR in the uncontended unlock() path, as
58 // MEMBAR often has less latency than CAS. If warranted, we could switch to
59 // a CAS:0 mode, using timers to close the resultant race, as is done
60 // with Java Monitors in synchronizer.cpp.
61 //
62 // See the following for a discussion of the relative cost of atomics (CAS)
63 // MEMBAR, and ways to eliminate such instructions from the common-case paths:
64 // -- http://blogs.sun.com/dave/entry/biased_locking_in_hotspot
65 // -- http://blogs.sun.com/dave/resource/MustangSync.pdf
66 // -- http://blogs.sun.com/dave/resource/synchronization-public2.pdf
67 // -- synchronizer.cpp
68 //
69 // * Overall goals - desiderata
70 // 1. Minimize context switching
71 // 2. Minimize lock migration
72 // 3. Minimize CPI -- affinity and locality
73 // 4. Minimize the execution of high-latency instructions such as CAS or MEMBAR
74 // 5. Minimize outer lock hold times
75 // 6. Behave gracefully on a loaded system
76 //
77 // * Thread flow and list residency:
78 //
79 // Contention queue --> EntryList --> OnDeck --> Owner --> !Owner
80 // [..resident on monitor list..]
81 // [...........contending..................]
82 //
83 // -- The contention queue (cxq) contains recently-arrived threads (RATs).
84 // Threads on the cxq eventually drain into the EntryList.
85 // -- Invariant: a thread appears on at most one list -- cxq, EntryList
86 // or WaitSet -- at any one time.
87 // -- For a given monitor there can be at most one "OnDeck" thread at any
88 // given time but if needbe this particular invariant could be relaxed.
89 //
90 // * The WaitSet and EntryList linked lists are composed of ParkEvents.
91 // I use ParkEvent instead of threads as ParkEvents are immortal and
92 // type-stable, meaning we can safely unpark() a possibly stale
93 // list element in the unlock()-path. (That's benign).
94 //
95 // * Succession policy - providing for progress:
96 //
97 // As necessary, the unlock()ing thread identifies, unlinks, and unparks
98 // an "heir presumptive" tentative successor thread from the EntryList.
99 // This becomes the so-called "OnDeck" thread, of which there can be only
100 // one at any given time for a given monitor. The wakee will recontend
101 // for ownership of monitor.
102 //
103 // Succession is provided for by a policy of competitive handoff.
104 // The exiting thread does _not_ grant or pass ownership to the
105 // successor thread. (This is also referred to as "handoff" succession").
106 // Instead the exiting thread releases ownership and possibly wakes
107 // a successor, so the successor can (re)compete for ownership of the lock.
108 //
109 // Competitive handoff provides excellent overall throughput at the expense
110 // of short-term fairness. If fairness is a concern then one remedy might
111 // be to add an AcquireCounter field to the monitor. After a thread acquires
112 // the lock it will decrement the AcquireCounter field. When the count
113 // reaches 0 the thread would reset the AcquireCounter variable, abdicate
114 // the lock directly to some thread on the EntryList, and then move itself to the
115 // tail of the EntryList.
116 //
117 // But in practice most threads engage or otherwise participate in resource
118 // bounded producer-consumer relationships, so lock domination is not usually
119 // a practical concern. Recall too, that in general it's easier to construct
120 // a fair lock from a fast lock, but not vice-versa.
121 //
122 // * The cxq can have multiple concurrent "pushers" but only one concurrent
123 // detaching thread. This mechanism is immune from the ABA corruption.
124 // More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
125 // We use OnDeck as a pseudo-lock to enforce the at-most-one detaching
126 // thread constraint.
127 //
128 // * Taken together, the cxq and the EntryList constitute or form a
129 // single logical queue of threads stalled trying to acquire the lock.
130 // We use two distinct lists to reduce heat on the list ends.
131 // Threads in lock() enqueue onto cxq while threads in unlock() will
132 // dequeue from the EntryList. (c.f. Michael Scott's "2Q" algorithm).
133 // A key desideratum is to minimize queue & monitor metadata manipulation
134 // that occurs while holding the "outer" monitor lock -- that is, we want to
135 // minimize monitor lock holds times.
136 //
137 // The EntryList is ordered by the prevailing queue discipline and
138 // can be organized in any convenient fashion, such as a doubly-linked list or
139 // a circular doubly-linked list. If we need a priority queue then something akin
140 // to Solaris' sleepq would work nicely. Viz.,
141 // -- http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
142 // -- http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/os/sleepq.c
143 // Queue discipline is enforced at ::unlock() time, when the unlocking thread
144 // drains the cxq into the EntryList, and orders or reorders the threads on the
145 // EntryList accordingly.
146 //
147 // Barring "lock barging", this mechanism provides fair cyclic ordering,
148 // somewhat similar to an elevator-scan.
149 //
150 // * OnDeck
151 // -- For a given monitor there can be at most one OnDeck thread at any given
152 // instant. The OnDeck thread is contending for the lock, but has been
153 // unlinked from the EntryList and cxq by some previous unlock() operations.
154 // Once a thread has been designated the OnDeck thread it will remain so
155 // until it manages to acquire the lock -- being OnDeck is a stable property.
156 // -- Threads on the EntryList or cxq are _not allowed to attempt lock acquisition.
157 // -- OnDeck also serves as an "inner lock" as follows. Threads in unlock() will, after
158 // having cleared the LockByte and dropped the outer lock, attempt to "trylock"
159 // OnDeck by CASing the field from null to non-null. If successful, that thread
160 // is then responsible for progress and succession and can use CAS to detach and
161 // drain the cxq into the EntryList. By convention, only this thread, the holder of
162 // the OnDeck inner lock, can manipulate the EntryList or detach and drain the
163 // RATs on the cxq into the EntryList. This avoids ABA corruption on the cxq as
164 // we allow multiple concurrent "push" operations but restrict detach concurrency
165 // to at most one thread. Having selected and detached a successor, the thread then
166 // changes the OnDeck to refer to that successor, and then unparks the successor.
167 // That successor will eventually acquire the lock and clear OnDeck. Beware
168 // that the OnDeck usage as a lock is asymmetric. A thread in unlock() transiently
169 // "acquires" OnDeck, performs queue manipulations, passes OnDeck to some successor,
170 // and then the successor eventually "drops" OnDeck. Note that there's never
171 // any sense of contention on the inner lock, however. Threads never contend
172 // or wait for the inner lock.
173 // -- OnDeck provides for futile wakeup throttling a described in section 3.3 of
174 // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
175 // In a sense, OnDeck subsumes the ObjectMonitor _Succ and ObjectWaiter
176 // TState fields found in Java-level objectMonitors. (See synchronizer.cpp).
177 //
178 // * Waiting threads reside on the WaitSet list -- wait() puts
179 // the caller onto the WaitSet. Notify() or notifyAll() simply
180 // transfers threads from the WaitSet to either the EntryList or cxq.
181 // Subsequent unlock() operations will eventually unpark the notifyee.
182 // Unparking a notifee in notify() proper is inefficient - if we were to do so
183 // it's likely the notifyee would simply impale itself on the lock held
184 // by the notifier.
185 //
186 // * The mechanism is obstruction-free in that if the holder of the transient
187 // OnDeck lock in unlock() is preempted or otherwise stalls, other threads
188 // can still acquire and release the outer lock and continue to make progress.
189 // At worst, waking of already blocked contending threads may be delayed,
190 // but nothing worse. (We only use "trylock" operations on the inner OnDeck
191 // lock).
192 //
193 // * Note that thread-local storage must be initialized before a thread
194 // uses Native monitors or mutexes. The native monitor-mutex subsystem
195 // depends on Thread::current().
196 //
197 // * The monitor synchronization subsystem avoids the use of native
198 // synchronization primitives except for the narrow platform-specific
199 // park-unpark abstraction. See the comments in os_solaris.cpp regarding
200 // the semantics of park-unpark. Put another way, this monitor implementation
201 // depends only on atomic operations and park-unpark. The monitor subsystem
202 // manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
203 // underlying OS manages the READY<->RUN transitions.
204 //
205 // * The memory consistency model provide by lock()-unlock() is at least as
206 // strong or stronger than the Java Memory model defined by JSR-133.
207 // That is, we guarantee at least entry consistency, if not stronger.
208 // See http://g.oswego.edu/dl/jmm/cookbook.html.
209 //
210 // * Thread:: currently contains a set of purpose-specific ParkEvents:
211 // _MutexEvent, _ParkEvent, etc. A better approach might be to do away with
212 // the purpose-specific ParkEvents and instead implement a general per-thread
213 // stack of available ParkEvents which we could provision on-demand. The
214 // stack acts as a local cache to avoid excessive calls to ParkEvent::Allocate()
215 // and ::Release(). A thread would simply pop an element from the local stack before it
216 // enqueued or park()ed. When the contention was over the thread would
217 // push the no-longer-needed ParkEvent back onto its stack.
218 //
219 // * A slightly reduced form of ILock() and IUnlock() have been partially
220 // model-checked (Murphi) for safety and progress at T=1,2,3 and 4.
221 // It'd be interesting to see if TLA/TLC could be useful as well.
222 //
223 // * Mutex-Monitor is a low-level "leaf" subsystem. That is, the monitor
224 // code should never call other code in the JVM that might itself need to
225 // acquire monitors or mutexes. That's true *except* in the case of the
226 // ThreadBlockInVM state transition wrappers. The ThreadBlockInVM DTOR handles
227 // mutator reentry (ingress) by checking for a pending safepoint in which case it will
228 // call SafepointSynchronize::block(), which in turn may call Safepoint_lock->lock(), etc.
229 // In that particular case a call to lock() for a given Monitor can end up recursively
230 // calling lock() on another monitor. While distasteful, this is largely benign
231 // as the calls come from jacket that wraps lock(), and not from deep within lock() itself.
232 //
233 // It's unfortunate that native mutexes and thread state transitions were convolved.
234 // They're really separate concerns and should have remained that way. Melding
235 // them together was facile -- a bit too facile. The current implementation badly
236 // conflates the two concerns.
237 //
238 // * TODO-FIXME:
239 //
240 // -- Add DTRACE probes for contended acquire, contended acquired, contended unlock
241 // We should also add DTRACE probes in the ParkEvent subsystem for
242 // Park-entry, Park-exit, and Unpark.
243 //
244 // -- We have an excess of mutex-like constructs in the JVM, namely:
245 // 1. objectMonitors for Java-level synchronization (synchronizer.cpp)
246 // 2. low-level muxAcquire and muxRelease
247 // 3. low-level spinAcquire and spinRelease
248 // 4. native Mutex:: and Monitor::
249 // 5. jvm_raw_lock() and _unlock()
250 // 6. JVMTI raw monitors -- distinct from (5) despite having a confusingly
251 // similar name.
252 //
253 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
254
255
256 // CASPTR() uses the canonical argument order that dominates in the literature.
257 // Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates.
258
259 #define CASPTR(a, c, s) \
260 intptr_t(Atomic::cmpxchg_ptr((void *)(s), (void *)(a), (void *)(c)))
261 #define UNS(x) (uintptr_t(x))
262 #define TRACE(m) \
263 { \
264 static volatile int ctr = 0; \
265 int x = ++ctr; \
266 if ((x & (x - 1)) == 0) { \
267 ::printf("%d:%s\n", x, #m); \
268 ::fflush(stdout); \
269 } \
270 }
271
272 // Simplistic low-quality Marsaglia SHIFT-XOR RNG.
273 // Bijective except for the trailing mask operation.
274 // Useful for spin loops as the compiler can't optimize it away.
275
276 static inline jint MarsagliaXORV(jint x) {
277 if (x == 0) x = 1|os::random();
278 x ^= x << 6;
279 x ^= ((unsigned)x) >> 21;
280 x ^= x << 7;
281 return x & 0x7FFFFFFF;
282 }
283
284 static int Stall(int its) {
285 static volatile jint rv = 1;
286 volatile int OnFrame = 0;
287 jint v = rv ^ UNS(OnFrame);
288 while (--its >= 0) {
289 v = MarsagliaXORV(v);
290 }
291 // Make this impossible for the compiler to optimize away,
292 // but (mostly) avoid W coherency sharing on MP systems.
293 if (v == 0x12345) rv = v;
294 return v;
295 }
296
297 int Monitor::TryLock() {
298 intptr_t v = _LockWord.FullWord;
299 for (;;) {
300 if ((v & _LBIT) != 0) return 0;
301 const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT);
302 if (v == u) return 1;
303 v = u;
304 }
305 }
306
307 int Monitor::TryFast() {
308 // Optimistic fast-path form ...
309 // Fast-path attempt for the common uncontended case.
310 // Avoid RTS->RTO $ coherence upgrade on typical SMP systems.
311 intptr_t v = CASPTR(&_LockWord, 0, _LBIT); // agro ...
312 if (v == 0) return 1;
313
314 for (;;) {
315 if ((v & _LBIT) != 0) return 0;
316 const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT);
317 if (v == u) return 1;
318 v = u;
319 }
320 }
321
322 int Monitor::ILocked() {
323 const intptr_t w = _LockWord.FullWord & 0xFF;
324 assert(w == 0 || w == _LBIT, "invariant");
325 return w == _LBIT;
326 }
327
328 // Polite TATAS spinlock with exponential backoff - bounded spin.
329 // Ideally we'd use processor cycles, time or vtime to control
330 // the loop, but we currently use iterations.
331 // All the constants within were derived empirically but work over
332 // over the spectrum of J2SE reference platforms.
333 // On Niagara-class systems the back-off is unnecessary but
334 // is relatively harmless. (At worst it'll slightly retard
335 // acquisition times). The back-off is critical for older SMP systems
336 // where constant fetching of the LockWord would otherwise impair
337 // scalability.
338 //
339 // Clamp spinning at approximately 1/2 of a context-switch round-trip.
340 // See synchronizer.cpp for details and rationale.
341
342 int Monitor::TrySpin(Thread * const Self) {
343 if (TryLock()) return 1;
344 if (!os::is_MP()) return 0;
345
346 int Probes = 0;
347 int Delay = 0;
348 int Steps = 0;
349 int SpinMax = NativeMonitorSpinLimit;
350 int flgs = NativeMonitorFlags;
351 for (;;) {
352 intptr_t v = _LockWord.FullWord;
353 if ((v & _LBIT) == 0) {
354 if (CASPTR (&_LockWord, v, v|_LBIT) == v) {
355 return 1;
356 }
357 continue;
358 }
359
360 if ((flgs & 8) == 0) {
361 SpinPause();
362 }
363
364 // Periodically increase Delay -- variable Delay form
365 // conceptually: delay *= 1 + 1/Exponent
366 ++Probes;
367 if (Probes > SpinMax) return 0;
368
369 if ((Probes & 0x7) == 0) {
370 Delay = ((Delay << 1)|1) & 0x7FF;
371 // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ;
372 }
373
374 if (flgs & 2) continue;
375
376 // Consider checking _owner's schedctl state, if OFFPROC abort spin.
377 // If the owner is OFFPROC then it's unlike that the lock will be dropped
378 // in a timely fashion, which suggests that spinning would not be fruitful
379 // or profitable.
380
381 // Stall for "Delay" time units - iterations in the current implementation.
382 // Avoid generating coherency traffic while stalled.
383 // Possible ways to delay:
384 // PAUSE, SLEEP, MEMBAR #sync, MEMBAR #halt,
385 // wr %g0,%asi, gethrtime, rdstick, rdtick, rdtsc, etc. ...
386 // Note that on Niagara-class systems we want to minimize STs in the
387 // spin loop. N1 and brethren write-around the L1$ over the xbar into the L2$.
388 // Furthermore, they don't have a W$ like traditional SPARC processors.
389 // We currently use a Marsaglia Shift-Xor RNG loop.
390 Steps += Delay;
391 if (Self != NULL) {
392 jint rv = Self->rng[0];
393 for (int k = Delay; --k >= 0;) {
394 rv = MarsagliaXORV(rv);
395 if ((flgs & 4) == 0 && SafepointMechanism::poll()) return 0;
396 }
397 Self->rng[0] = rv;
398 } else {
399 Stall(Delay);
400 }
401 }
402 }
403
404 static int ParkCommon(ParkEvent * ev, jlong timo) {
405 // Diagnostic support - periodically unwedge blocked threads
406 intx nmt = NativeMonitorTimeout;
407 if (nmt > 0 && (nmt < timo || timo <= 0)) {
408 timo = nmt;
409 }
410 int err = OS_OK;
411 if (0 == timo) {
412 ev->park();
413 } else {
414 err = ev->park(timo);
415 }
416 return err;
417 }
418
419 inline int Monitor::AcquireOrPush(ParkEvent * ESelf) {
420 intptr_t v = _LockWord.FullWord;
421 for (;;) {
422 if ((v & _LBIT) == 0) {
423 const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT);
424 if (u == v) return 1; // indicate acquired
425 v = u;
426 } else {
427 // Anticipate success ...
428 ESelf->ListNext = (ParkEvent *)(v & ~_LBIT);
429 const intptr_t u = CASPTR(&_LockWord, v, intptr_t(ESelf)|_LBIT);
430 if (u == v) return 0; // indicate pushed onto cxq
431 v = u;
432 }
433 // Interference - LockWord change - just retry
434 }
435 }
436
437 // ILock and IWait are the lowest level primitive internal blocking
438 // synchronization functions. The callers of IWait and ILock must have
439 // performed any needed state transitions beforehand.
440 // IWait and ILock may directly call park() without any concern for thread state.
441 // Note that ILock and IWait do *not* access _owner.
442 // _owner is a higher-level logical concept.
443
444 void Monitor::ILock(Thread * Self) {
445 assert(_OnDeck != Self->_MutexEvent, "invariant");
446
447 if (TryFast()) {
448 Exeunt:
449 assert(ILocked(), "invariant");
450 return;
451 }
452
453 ParkEvent * const ESelf = Self->_MutexEvent;
454 assert(_OnDeck != ESelf, "invariant");
455
456 // As an optimization, spinners could conditionally try to set _OnDeck to _LBIT
457 // Synchronizer.cpp uses a similar optimization.
458 if (TrySpin(Self)) goto Exeunt;
459
460 // Slow-path - the lock is contended.
461 // Either Enqueue Self on cxq or acquire the outer lock.
462 // LockWord encoding = (cxq,LOCKBYTE)
463 ESelf->reset();
464 OrderAccess::fence();
465
466 // Optional optimization ... try barging on the inner lock
467 if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(ESelf)) == 0) {
468 goto OnDeck_LOOP;
469 }
470
471 if (AcquireOrPush(ESelf)) goto Exeunt;
472
473 // At any given time there is at most one ondeck thread.
474 // ondeck implies not resident on cxq and not resident on EntryList
475 // Only the OnDeck thread can try to acquire -- contend for -- the lock.
476 // CONSIDER: use Self->OnDeck instead of m->OnDeck.
477 // Deschedule Self so that others may run.
478 while (OrderAccess::load_ptr_acquire(&_OnDeck) != ESelf) {
479 ParkCommon(ESelf, 0);
480 }
481
482 // Self is now in the OnDeck position and will remain so until it
483 // manages to acquire the lock.
484 OnDeck_LOOP:
485 for (;;) {
486 assert(_OnDeck == ESelf, "invariant");
487 if (TrySpin(Self)) break;
488 // It's probably wise to spin only if we *actually* blocked
489 // CONSIDER: check the lockbyte, if it remains set then
490 // preemptively drain the cxq into the EntryList.
491 // The best place and time to perform queue operations -- lock metadata --
492 // is _before having acquired the outer lock, while waiting for the lock to drop.
493 ParkCommon(ESelf, 0);
494 }
495
496 assert(_OnDeck == ESelf, "invariant");
497 _OnDeck = NULL;
498
499 // Note that we current drop the inner lock (clear OnDeck) in the slow-path
500 // epilogue immediately after having acquired the outer lock.
501 // But instead we could consider the following optimizations:
502 // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation.
503 // This might avoid potential reacquisition of the inner lock in IUlock().
504 // B. While still holding the inner lock, attempt to opportunistically select
505 // and unlink the next OnDeck thread from the EntryList.
506 // If successful, set OnDeck to refer to that thread, otherwise clear OnDeck.
507 // It's critical that the select-and-unlink operation run in constant-time as
508 // it executes when holding the outer lock and may artificially increase the
509 // effective length of the critical section.
510 // Note that (A) and (B) are tantamount to succession by direct handoff for
511 // the inner lock.
512 goto Exeunt;
513 }
514
515 void Monitor::IUnlock(bool RelaxAssert) {
516 assert(ILocked(), "invariant");
517 // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately
518 // before the store that releases the lock. Crucially, all the stores and loads in the
519 // critical section must be globally visible before the store of 0 into the lock-word
520 // that releases the lock becomes globally visible. That is, memory accesses in the
521 // critical section should not be allowed to bypass or overtake the following ST that
522 // releases the lock. As such, to prevent accesses within the critical section
523 // from "leaking" out, we need a release fence between the critical section and the
524 // store that releases the lock. In practice that release barrier is elided on
525 // platforms with strong memory models such as TSO.
526 //
527 // Note that the OrderAccess::storeload() fence that appears after unlock store
528 // provides for progress conditions and succession and is _not related to exclusion
529 // safety or lock release consistency.
530 OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], jbyte(0)); // drop outer lock
531
532 OrderAccess::storeload();
533 ParkEvent * const w = _OnDeck; // raw load as we will just return if non-NULL
534 assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant");
535 if (w != NULL) {
536 // Either we have a valid ondeck thread or ondeck is transiently "locked"
537 // by some exiting thread as it arranges for succession. The LSBit of
538 // OnDeck allows us to discriminate two cases. If the latter, the
539 // responsibility for progress and succession lies with that other thread.
540 // For good performance, we also depend on the fact that redundant unpark()
541 // operations are cheap. That is, repeated Unpark()ing of the OnDeck thread
542 // is inexpensive. This approach provides implicit futile wakeup throttling.
543 // Note that the referent "w" might be stale with respect to the lock.
544 // In that case the following unpark() is harmless and the worst that'll happen
545 // is a spurious return from a park() operation. Critically, if "w" _is stale,
546 // then progress is known to have occurred as that means the thread associated
547 // with "w" acquired the lock. In that case this thread need take no further
548 // action to guarantee progress.
549 if ((UNS(w) & _LBIT) == 0) w->unpark();
550 return;
551 }
552
553 intptr_t cxq = _LockWord.FullWord;
554 if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) {
555 return; // normal fast-path exit - cxq and EntryList both empty
556 }
557 if (cxq & _LBIT) {
558 // Optional optimization ...
559 // Some other thread acquired the lock in the window since this
560 // thread released it. Succession is now that thread's responsibility.
561 return;
562 }
563
564 Succession:
565 // Slow-path exit - this thread must ensure succession and progress.
566 // OnDeck serves as lock to protect cxq and EntryList.
567 // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq.
568 // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS)
569 // but only one concurrent consumer (detacher of RATs).
570 // Consider protecting this critical section with schedctl on Solaris.
571 // Unlike a normal lock, however, the exiting thread "locks" OnDeck,
572 // picks a successor and marks that thread as OnDeck. That successor
573 // thread will then clear OnDeck once it eventually acquires the outer lock.
574 if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) {
575 return;
576 }
577
578 ParkEvent * List = _EntryList;
579 if (List != NULL) {
580 // Transfer the head of the EntryList to the OnDeck position.
581 // Once OnDeck, a thread stays OnDeck until it acquires the lock.
582 // For a given lock there is at most OnDeck thread at any one instant.
583 WakeOne:
584 assert(List == _EntryList, "invariant");
585 ParkEvent * const w = List;
586 assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant");
587 _EntryList = w->ListNext;
588 // as a diagnostic measure consider setting w->_ListNext = BAD
589 assert(UNS(_OnDeck) == _LBIT, "invariant");
590
591 // Pass OnDeck role to w, ensuring that _EntryList has been set first.
592 // w will clear _OnDeck once it acquires the outer lock.
593 // Note that once we set _OnDeck that thread can acquire the mutex, proceed
594 // with its critical section and then enter this code to unlock the mutex. So
595 // you can have multiple threads active in IUnlock at the same time.
596 OrderAccess::release_store_ptr(&_OnDeck, w);
597
598 // Another optional optimization ...
599 // For heavily contended locks it's not uncommon that some other
600 // thread acquired the lock while this thread was arranging succession.
601 // Try to defer the unpark() operation - Delegate the responsibility
602 // for unpark()ing the OnDeck thread to the current or subsequent owners
603 // That is, the new owner is responsible for unparking the OnDeck thread.
604 OrderAccess::storeload();
605 cxq = _LockWord.FullWord;
606 if (cxq & _LBIT) return;
607
608 w->unpark();
609 return;
610 }
611
612 cxq = _LockWord.FullWord;
613 if ((cxq & ~_LBIT) != 0) {
614 // The EntryList is empty but the cxq is populated.
615 // drain RATs from cxq into EntryList
616 // Detach RATs segment with CAS and then merge into EntryList
617 for (;;) {
618 // optional optimization - if locked, the owner is responsible for succession
619 if (cxq & _LBIT) goto Punt;
620 const intptr_t vfy = CASPTR(&_LockWord, cxq, cxq & _LBIT);
621 if (vfy == cxq) break;
622 cxq = vfy;
623 // Interference - LockWord changed - Just retry
624 // We can see concurrent interference from contending threads
625 // pushing themselves onto the cxq or from lock-unlock operations.
626 // From the perspective of this thread, EntryList is stable and
627 // the cxq is prepend-only -- the head is volatile but the interior
628 // of the cxq is stable. In theory if we encounter interference from threads
629 // pushing onto cxq we could simply break off the original cxq suffix and
630 // move that segment to the EntryList, avoiding a 2nd or multiple CAS attempts
631 // on the high-traffic LockWord variable. For instance lets say the cxq is "ABCD"
632 // when we first fetch cxq above. Between the fetch -- where we observed "A"
633 // -- and CAS -- where we attempt to CAS null over A -- "PQR" arrive,
634 // yielding cxq = "PQRABCD". In this case we could simply set A.ListNext
635 // null, leaving cxq = "PQRA" and transfer the "BCD" segment to the EntryList.
636 // Note too, that it's safe for this thread to traverse the cxq
637 // without taking any special concurrency precautions.
638 }
639
640 // We don't currently reorder the cxq segment as we move it onto
641 // the EntryList, but it might make sense to reverse the order
642 // or perhaps sort by thread priority. See the comments in
643 // synchronizer.cpp objectMonitor::exit().
644 assert(_EntryList == NULL, "invariant");
645 _EntryList = List = (ParkEvent *)(cxq & ~_LBIT);
646 assert(List != NULL, "invariant");
647 goto WakeOne;
648 }
649
650 // cxq|EntryList is empty.
651 // w == NULL implies that cxq|EntryList == NULL in the past.
652 // Possible race - rare inopportune interleaving.
653 // A thread could have added itself to cxq since this thread previously checked.
654 // Detect and recover by refetching cxq.
655 Punt:
656 assert(UNS(_OnDeck) == _LBIT, "invariant");
657 _OnDeck = NULL; // Release inner lock.
658 OrderAccess::storeload(); // Dekker duality - pivot point
659
660 // Resample LockWord/cxq to recover from possible race.
661 // For instance, while this thread T1 held OnDeck, some other thread T2 might
662 // acquire the outer lock. Another thread T3 might try to acquire the outer
663 // lock, but encounter contention and enqueue itself on cxq. T2 then drops the
664 // outer lock, but skips succession as this thread T1 still holds OnDeck.
665 // T1 is and remains responsible for ensuring succession of T3.
666 //
667 // Note that we don't need to recheck EntryList, just cxq.
668 // If threads moved onto EntryList since we dropped OnDeck
669 // that implies some other thread forced succession.
670 cxq = _LockWord.FullWord;
671 if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) {
672 goto Succession; // potential race -- re-run succession
673 }
674 return;
675 }
676
677 bool Monitor::notify() {
678 assert(_owner == Thread::current(), "invariant");
679 assert(ILocked(), "invariant");
680 if (_WaitSet == NULL) return true;
681 NotifyCount++;
682
683 // Transfer one thread from the WaitSet to the EntryList or cxq.
684 // Currently we just unlink the head of the WaitSet and prepend to the cxq.
685 // And of course we could just unlink it and unpark it, too, but
686 // in that case it'd likely impale itself on the reentry.
687 Thread::muxAcquire(_WaitLock, "notify:WaitLock");
688 ParkEvent * nfy = _WaitSet;
689 if (nfy != NULL) { // DCL idiom
690 _WaitSet = nfy->ListNext;
691 assert(nfy->Notified == 0, "invariant");
692 // push nfy onto the cxq
693 for (;;) {
694 const intptr_t v = _LockWord.FullWord;
695 assert((v & 0xFF) == _LBIT, "invariant");
696 nfy->ListNext = (ParkEvent *)(v & ~_LBIT);
697 if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break;
698 // interference - _LockWord changed -- just retry
699 }
700 // Note that setting Notified before pushing nfy onto the cxq is
701 // also legal and safe, but the safety properties are much more
702 // subtle, so for the sake of code stewardship ...
703 OrderAccess::fence();
704 nfy->Notified = 1;
705 }
706 Thread::muxRelease(_WaitLock);
707 if (nfy != NULL && (NativeMonitorFlags & 16)) {
708 // Experimental code ... light up the wakee in the hope that this thread (the owner)
709 // will drop the lock just about the time the wakee comes ONPROC.
710 nfy->unpark();
711 }
712 assert(ILocked(), "invariant");
713 return true;
714 }
715
716 // Currently notifyAll() transfers the waiters one-at-a-time from the waitset
717 // to the cxq. This could be done more efficiently with a single bulk en-mass transfer,
718 // but in practice notifyAll() for large #s of threads is rare and not time-critical.
719 // Beware too, that we invert the order of the waiters. Lets say that the
720 // waitset is "ABCD" and the cxq is "XYZ". After a notifyAll() the waitset
721 // will be empty and the cxq will be "DCBAXYZ". This is benign, of course.
722
723 bool Monitor::notify_all() {
724 assert(_owner == Thread::current(), "invariant");
725 assert(ILocked(), "invariant");
726 while (_WaitSet != NULL) notify();
727 return true;
728 }
729
730 int Monitor::IWait(Thread * Self, jlong timo) {
731 assert(ILocked(), "invariant");
732
733 // Phases:
734 // 1. Enqueue Self on WaitSet - currently prepend
735 // 2. unlock - drop the outer lock
736 // 3. wait for either notification or timeout
737 // 4. lock - reentry - reacquire the outer lock
738
739 ParkEvent * const ESelf = Self->_MutexEvent;
740 ESelf->Notified = 0;
741 ESelf->reset();
742 OrderAccess::fence();
743
744 // Add Self to WaitSet
745 // Ideally only the holder of the outer lock would manipulate the WaitSet -
746 // That is, the outer lock would implicitly protect the WaitSet.
747 // But if a thread in wait() encounters a timeout it will need to dequeue itself
748 // from the WaitSet _before it becomes the owner of the lock. We need to dequeue
749 // as the ParkEvent -- which serves as a proxy for the thread -- can't reside
750 // on both the WaitSet and the EntryList|cxq at the same time.. That is, a thread
751 // on the WaitSet can't be allowed to compete for the lock until it has managed to
752 // unlink its ParkEvent from WaitSet. Thus the need for WaitLock.
753 // Contention on the WaitLock is minimal.
754 //
755 // Another viable approach would be add another ParkEvent, "WaitEvent" to the
756 // thread class. The WaitSet would be composed of WaitEvents. Only the
757 // owner of the outer lock would manipulate the WaitSet. A thread in wait()
758 // could then compete for the outer lock, and then, if necessary, unlink itself
759 // from the WaitSet only after having acquired the outer lock. More precisely,
760 // there would be no WaitLock. A thread in in wait() would enqueue its WaitEvent
761 // on the WaitSet; release the outer lock; wait for either notification or timeout;
762 // reacquire the inner lock; and then, if needed, unlink itself from the WaitSet.
763 //
764 // Alternatively, a 2nd set of list link fields in the ParkEvent might suffice.
765 // One set would be for the WaitSet and one for the EntryList.
766 // We could also deconstruct the ParkEvent into a "pure" event and add a
767 // new immortal/TSM "ListElement" class that referred to ParkEvents.
768 // In that case we could have one ListElement on the WaitSet and another
769 // on the EntryList, with both referring to the same pure Event.
770
771 Thread::muxAcquire(_WaitLock, "wait:WaitLock:Add");
772 ESelf->ListNext = _WaitSet;
773 _WaitSet = ESelf;
774 Thread::muxRelease(_WaitLock);
775
776 // Release the outer lock
777 // We call IUnlock (RelaxAssert=true) as a thread T1 might
778 // enqueue itself on the WaitSet, call IUnlock(), drop the lock,
779 // and then stall before it can attempt to wake a successor.
780 // Some other thread T2 acquires the lock, and calls notify(), moving
781 // T1 from the WaitSet to the cxq. T2 then drops the lock. T1 resumes,
782 // and then finds *itself* on the cxq. During the course of a normal
783 // IUnlock() call a thread should _never find itself on the EntryList
784 // or cxq, but in the case of wait() it's possible.
785 // See synchronizer.cpp objectMonitor::wait().
786 IUnlock(true);
787
788 // Wait for either notification or timeout
789 // Beware that in some circumstances we might propagate
790 // spurious wakeups back to the caller.
791
792 for (;;) {
793 if (ESelf->Notified) break;
794 int err = ParkCommon(ESelf, timo);
795 if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break;
796 }
797
798 // Prepare for reentry - if necessary, remove ESelf from WaitSet
799 // ESelf can be:
800 // 1. Still on the WaitSet. This can happen if we exited the loop by timeout.
801 // 2. On the cxq or EntryList
802 // 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position.
803
804 OrderAccess::fence();
805 int WasOnWaitSet = 0;
806 if (ESelf->Notified == 0) {
807 Thread::muxAcquire(_WaitLock, "wait:WaitLock:remove");
808 if (ESelf->Notified == 0) { // DCL idiom
809 assert(_OnDeck != ESelf, "invariant"); // can't be both OnDeck and on WaitSet
810 // ESelf is resident on the WaitSet -- unlink it.
811 // A doubly-linked list would be better here so we can unlink in constant-time.
812 // We have to unlink before we potentially recontend as ESelf might otherwise
813 // end up on the cxq|EntryList -- it can't be on two lists at once.
814 ParkEvent * p = _WaitSet;
815 ParkEvent * q = NULL; // classic q chases p
816 while (p != NULL && p != ESelf) {
817 q = p;
818 p = p->ListNext;
819 }
820 assert(p == ESelf, "invariant");
821 if (p == _WaitSet) { // found at head
822 assert(q == NULL, "invariant");
823 _WaitSet = p->ListNext;
824 } else { // found in interior
825 assert(q->ListNext == p, "invariant");
826 q->ListNext = p->ListNext;
827 }
828 WasOnWaitSet = 1; // We were *not* notified but instead encountered timeout
829 }
830 Thread::muxRelease(_WaitLock);
831 }
832
833 // Reentry phase - reacquire the lock
834 if (WasOnWaitSet) {
835 // ESelf was previously on the WaitSet but we just unlinked it above
836 // because of a timeout. ESelf is not resident on any list and is not OnDeck
837 assert(_OnDeck != ESelf, "invariant");
838 ILock(Self);
839 } else {
840 // A prior notify() operation moved ESelf from the WaitSet to the cxq.
841 // ESelf is now on the cxq, EntryList or at the OnDeck position.
842 // The following fragment is extracted from Monitor::ILock()
843 for (;;) {
844 if (OrderAccess::load_ptr_acquire(&_OnDeck) == ESelf && TrySpin(Self)) break;
845 ParkCommon(ESelf, 0);
846 }
847 assert(_OnDeck == ESelf, "invariant");
848 _OnDeck = NULL;
849 }
850
851 assert(ILocked(), "invariant");
852 return WasOnWaitSet != 0; // return true IFF timeout
853 }
854
855
856 // ON THE VMTHREAD SNEAKING PAST HELD LOCKS:
857 // In particular, there are certain types of global lock that may be held
858 // by a Java thread while it is blocked at a safepoint but before it has
859 // written the _owner field. These locks may be sneakily acquired by the
860 // VM thread during a safepoint to avoid deadlocks. Alternatively, one should
861 // identify all such locks, and ensure that Java threads never block at
862 // safepoints while holding them (_no_safepoint_check_flag). While it
863 // seems as though this could increase the time to reach a safepoint
864 // (or at least increase the mean, if not the variance), the latter
865 // approach might make for a cleaner, more maintainable JVM design.
866 //
867 // Sneaking is vile and reprehensible and should be excised at the 1st
868 // opportunity. It's possible that the need for sneaking could be obviated
869 // as follows. Currently, a thread might (a) while TBIVM, call pthread_mutex_lock
870 // or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex.
871 // (b) stall at the TBIVM exit point as a safepoint is in effect. Critically,
872 // it'll stall at the TBIVM reentry state transition after having acquired the
873 // underlying lock, but before having set _owner and having entered the actual
874 // critical section. The lock-sneaking facility leverages that fact and allowed the
875 // VM thread to logically acquire locks that had already be physically locked by mutators
876 // but where mutators were known blocked by the reentry thread state transition.
877 //
878 // If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly
879 // wrapped calls to park(), then we could likely do away with sneaking. We'd
880 // decouple lock acquisition and parking. The critical invariant to eliminating
881 // sneaking is to ensure that we never "physically" acquire the lock while TBIVM.
882 // An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket.
883 // One difficulty with this approach is that the TBIVM wrapper could recurse and
884 // call lock() deep from within a lock() call, while the MutexEvent was already enqueued.
885 // Using a stack (N=2 at minimum) of ParkEvents would take care of that problem.
886 //
887 // But of course the proper ultimate approach is to avoid schemes that require explicit
888 // sneaking or dependence on any any clever invariants or subtle implementation properties
889 // of Mutex-Monitor and instead directly address the underlying design flaw.
890
891 void Monitor::lock(Thread * Self) {
892 // Ensure that the Monitor requires/allows safepoint checks.
893 assert(_safepoint_check_required != Monitor::_safepoint_check_never,
894 "This lock should never have a safepoint check: %s", name());
895
896 #ifdef CHECK_UNHANDLED_OOPS
897 // Clear unhandled oops so we get a crash right away. Only clear for non-vm
898 // or GC threads.
899 if (Self->is_Java_thread()) {
900 Self->clear_unhandled_oops();
901 }
902 #endif // CHECK_UNHANDLED_OOPS
903
904 debug_only(check_prelock_state(Self));
905 assert(_owner != Self, "invariant");
906 assert(_OnDeck != Self->_MutexEvent, "invariant");
907
908 if (TryFast()) {
909 Exeunt:
910 assert(ILocked(), "invariant");
911 assert(owner() == NULL, "invariant");
912 set_owner(Self);
913 return;
914 }
915
916 // The lock is contended ...
917
918 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
919 if (can_sneak && _owner == NULL) {
920 // a java thread has locked the lock but has not entered the
921 // critical region -- let's just pretend we've locked the lock
922 // and go on. we note this with _snuck so we can also
923 // pretend to unlock when the time comes.
924 _snuck = true;
925 goto Exeunt;
926 }
927
928 // Try a brief spin to avoid passing thru thread state transition ...
929 if (TrySpin(Self)) goto Exeunt;
930
931 check_block_state(Self);
932 if (Self->is_Java_thread()) {
933 // Horrible dictu - we suffer through a state transition
934 assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex");
935 ThreadBlockInVM tbivm((JavaThread *) Self);
936 ILock(Self);
937 } else {
938 // Mirabile dictu
939 ILock(Self);
940 }
941 goto Exeunt;
942 }
943
944 void Monitor::lock() {
945 this->lock(Thread::current());
946 }
947
948 // Lock without safepoint check - a degenerate variant of lock().
949 // Should ONLY be used by safepoint code and other code
950 // that is guaranteed not to block while running inside the VM. If this is called with
951 // thread state set to be in VM, the safepoint synchronization code will deadlock!
952
953 void Monitor::lock_without_safepoint_check(Thread * Self) {
954 // Ensure that the Monitor does not require or allow safepoint checks.
955 assert(_safepoint_check_required != Monitor::_safepoint_check_always,
956 "This lock should always have a safepoint check: %s", name());
957 assert(_owner != Self, "invariant");
958 ILock(Self);
959 assert(_owner == NULL, "invariant");
960 set_owner(Self);
961 }
962
963 void Monitor::lock_without_safepoint_check() {
964 lock_without_safepoint_check(Thread::current());
965 }
966
967
968 // Returns true if thread succeeds in grabbing the lock, otherwise false.
969
970 bool Monitor::try_lock() {
971 Thread * const Self = Thread::current();
972 debug_only(check_prelock_state(Self));
973 // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler");
974
975 // Special case, where all Java threads are stopped.
976 // The lock may have been acquired but _owner is not yet set.
977 // In that case the VM thread can safely grab the lock.
978 // It strikes me this should appear _after the TryLock() fails, below.
979 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
980 if (can_sneak && _owner == NULL) {
981 set_owner(Self); // Do not need to be atomic, since we are at a safepoint
982 _snuck = true;
983 return true;
984 }
985
986 if (TryLock()) {
987 // We got the lock
988 assert(_owner == NULL, "invariant");
989 set_owner(Self);
990 return true;
991 }
992 return false;
993 }
994
995 void Monitor::unlock() {
996 assert(_owner == Thread::current(), "invariant");
997 assert(_OnDeck != Thread::current()->_MutexEvent, "invariant");
998 set_owner(NULL);
999 if (_snuck) {
1000 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
1001 _snuck = false;
1002 return;
1003 }
1004 IUnlock(false);
1005 }
1006
1007 // Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check()
1008 // jvm_raw_lock() and _unlock() can be called by non-Java threads via JVM_RawMonitorEnter.
1009 //
1010 // There's no expectation that JVM_RawMonitors will interoperate properly with the native
1011 // Mutex-Monitor constructs. We happen to implement JVM_RawMonitors in terms of
1012 // native Mutex-Monitors simply as a matter of convenience. A simple abstraction layer
1013 // over a pthread_mutex_t would work equally as well, but require more platform-specific
1014 // code -- a "PlatformMutex". Alternatively, a simply layer over muxAcquire-muxRelease
1015 // would work too.
1016 //
1017 // Since the caller might be a foreign thread, we don't necessarily have a Thread.MutexEvent
1018 // instance available. Instead, we transiently allocate a ParkEvent on-demand if
1019 // we encounter contention. That ParkEvent remains associated with the thread
1020 // until it manages to acquire the lock, at which time we return the ParkEvent
1021 // to the global ParkEvent free list. This is correct and suffices for our purposes.
1022 //
1023 // Beware that the original jvm_raw_unlock() had a "_snuck" test but that
1024 // jvm_raw_lock() didn't have the corresponding test. I suspect that's an
1025 // oversight, but I've replicated the original suspect logic in the new code ...
1026
1027 void Monitor::jvm_raw_lock() {
1028 assert(rank() == native, "invariant");
1029
1030 if (TryLock()) {
1031 Exeunt:
1032 assert(ILocked(), "invariant");
1033 assert(_owner == NULL, "invariant");
1034 // This can potentially be called by non-java Threads. Thus, the Thread::current_or_null()
1035 // might return NULL. Don't call set_owner since it will break on an NULL owner
1036 // Consider installing a non-null "ANON" distinguished value instead of just NULL.
1037 _owner = Thread::current_or_null();
1038 return;
1039 }
1040
1041 if (TrySpin(NULL)) goto Exeunt;
1042
1043 // slow-path - apparent contention
1044 // Allocate a ParkEvent for transient use.
1045 // The ParkEvent remains associated with this thread until
1046 // the time the thread manages to acquire the lock.
1047 ParkEvent * const ESelf = ParkEvent::Allocate(NULL);
1048 ESelf->reset();
1049 OrderAccess::storeload();
1050
1051 // Either Enqueue Self on cxq or acquire the outer lock.
1052 if (AcquireOrPush (ESelf)) {
1053 ParkEvent::Release(ESelf); // surrender the ParkEvent
1054 goto Exeunt;
1055 }
1056
1057 // At any given time there is at most one ondeck thread.
1058 // ondeck implies not resident on cxq and not resident on EntryList
1059 // Only the OnDeck thread can try to acquire -- contend for -- the lock.
1060 // CONSIDER: use Self->OnDeck instead of m->OnDeck.
1061 for (;;) {
1062 if (OrderAccess::load_ptr_acquire(&_OnDeck) == ESelf && TrySpin(NULL)) break;
1063 ParkCommon(ESelf, 0);
1064 }
1065
1066 assert(_OnDeck == ESelf, "invariant");
1067 _OnDeck = NULL;
1068 ParkEvent::Release(ESelf); // surrender the ParkEvent
1069 goto Exeunt;
1070 }
1071
1072 void Monitor::jvm_raw_unlock() {
1073 // Nearly the same as Monitor::unlock() ...
1074 // directly set _owner instead of using set_owner(null)
1075 _owner = NULL;
1076 if (_snuck) { // ???
1077 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
1078 _snuck = false;
1079 return;
1080 }
1081 IUnlock(false);
1082 }
1083
1084 bool Monitor::wait(bool no_safepoint_check, long timeout,
1085 bool as_suspend_equivalent) {
1086 // Make sure safepoint checking is used properly.
1087 assert(!(_safepoint_check_required == Monitor::_safepoint_check_never && no_safepoint_check == false),
1088 "This lock should never have a safepoint check: %s", name());
1089 assert(!(_safepoint_check_required == Monitor::_safepoint_check_always && no_safepoint_check == true),
1090 "This lock should always have a safepoint check: %s", name());
1091
1092 Thread * const Self = Thread::current();
1093 assert(_owner == Self, "invariant");
1094 assert(ILocked(), "invariant");
1095
1096 // as_suspend_equivalent logically implies !no_safepoint_check
1097 guarantee(!as_suspend_equivalent || !no_safepoint_check, "invariant");
1098 // !no_safepoint_check logically implies java_thread
1099 guarantee(no_safepoint_check || Self->is_Java_thread(), "invariant");
1100
1101 #ifdef ASSERT
1102 Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
1103 assert(least != this, "Specification of get_least_... call above");
1104 if (least != NULL && least->rank() <= special) {
1105 tty->print("Attempting to wait on monitor %s/%d while holding"
1106 " lock %s/%d -- possible deadlock",
1107 name(), rank(), least->name(), least->rank());
1108 assert(false, "Shouldn't block(wait) while holding a lock of rank special");
1109 }
1110 #endif // ASSERT
1111
1112 int wait_status;
1113 // conceptually set the owner to NULL in anticipation of
1114 // abdicating the lock in wait
1115 set_owner(NULL);
1116 if (no_safepoint_check) {
1117 wait_status = IWait(Self, timeout);
1118 } else {
1119 assert(Self->is_Java_thread(), "invariant");
1120 JavaThread *jt = (JavaThread *)Self;
1121
1122 // Enter safepoint region - ornate and Rococo ...
1123 ThreadBlockInVM tbivm(jt);
1124 OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */);
1125
1126 if (as_suspend_equivalent) {
1127 jt->set_suspend_equivalent();
1128 // cleared by handle_special_suspend_equivalent_condition() or
1129 // java_suspend_self()
1130 }
1131
1132 wait_status = IWait(Self, timeout);
1133
1134 // were we externally suspended while we were waiting?
1135 if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) {
1136 // Our event wait has finished and we own the lock, but
1137 // while we were waiting another thread suspended us. We don't
1138 // want to hold the lock while suspended because that
1139 // would surprise the thread that suspended us.
1140 assert(ILocked(), "invariant");
1141 IUnlock(true);
1142 jt->java_suspend_self();
1143 ILock(Self);
1144 assert(ILocked(), "invariant");
1145 }
1146 }
1147
1148 // Conceptually reestablish ownership of the lock.
1149 // The "real" lock -- the LockByte -- was reacquired by IWait().
1150 assert(ILocked(), "invariant");
1151 assert(_owner == NULL, "invariant");
1152 set_owner(Self);
1153 return wait_status != 0; // return true IFF timeout
1154 }
1155
1156 Monitor::~Monitor() {
1157 #ifdef ASSERT
1158 uintptr_t owner = UNS(_owner);
1159 uintptr_t lockword = UNS(_LockWord.FullWord);
1160 uintptr_t entrylist = UNS(_EntryList);
1161 uintptr_t waitset = UNS(_WaitSet);
1162 uintptr_t ondeck = UNS(_OnDeck);
1163 // Print _name with precision limit, in case failure is due to memory
1164 // corruption that also trashed _name.
1165 assert((owner|lockword|entrylist|waitset|ondeck) == 0,
1166 "%.*s: _owner(" INTPTR_FORMAT ")|_LockWord(" INTPTR_FORMAT ")|_EntryList(" INTPTR_FORMAT ")|_WaitSet("
1167 INTPTR_FORMAT ")|_OnDeck(" INTPTR_FORMAT ") != 0",
1168 MONITOR_NAME_LEN, _name, owner, lockword, entrylist, waitset, ondeck);
1169 #endif
1170 }
1171
1172 void Monitor::ClearMonitor(Monitor * m, const char *name) {
1173 m->_owner = NULL;
1174 m->_snuck = false;
1175 if (name == NULL) {
1176 strcpy(m->_name, "UNKNOWN");
1177 } else {
1178 strncpy(m->_name, name, MONITOR_NAME_LEN - 1);
1179 m->_name[MONITOR_NAME_LEN - 1] = '\0';
1180 }
1181 m->_LockWord.FullWord = 0;
1182 m->_EntryList = NULL;
1183 m->_OnDeck = NULL;
1184 m->_WaitSet = NULL;
1185 m->_WaitLock[0] = 0;
1186 }
1187
1188 Monitor::Monitor() { ClearMonitor(this); }
1189
1190 Monitor::Monitor(int Rank, const char * name, bool allow_vm_block,
1191 SafepointCheckRequired safepoint_check_required) {
1192 ClearMonitor(this, name);
1193 #ifdef ASSERT
1194 _allow_vm_block = allow_vm_block;
1195 _rank = Rank;
1196 NOT_PRODUCT(_safepoint_check_required = safepoint_check_required;)
1197 #endif
1198 }
1199
1200 Mutex::Mutex(int Rank, const char * name, bool allow_vm_block,
1201 SafepointCheckRequired safepoint_check_required) {
1202 ClearMonitor((Monitor *) this, name);
1203 #ifdef ASSERT
1204 _allow_vm_block = allow_vm_block;
1205 _rank = Rank;
1206 NOT_PRODUCT(_safepoint_check_required = safepoint_check_required;)
1207 #endif
1208 }
1209
1210 bool Monitor::owned_by_self() const {
1211 bool ret = _owner == Thread::current();
1212 assert(!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant");
1213 return ret;
1214 }
1215
1216 void Monitor::print_on_error(outputStream* st) const {
1217 st->print("[" PTR_FORMAT, p2i(this));
1218 st->print("] %s", _name);
1219 st->print(" - owner thread: " PTR_FORMAT, p2i(_owner));
1220 }
1221
1222
1223
1224
1225 // ----------------------------------------------------------------------------------
1226 // Non-product code
1227
1228 #ifndef PRODUCT
1229 void Monitor::print_on(outputStream* st) const {
1230 st->print_cr("Mutex: [" PTR_FORMAT "/" PTR_FORMAT "] %s - owner: " PTR_FORMAT,
1231 p2i(this), _LockWord.FullWord, _name, p2i(_owner));
1232 }
1233 #endif
1234
1235 #ifndef PRODUCT
1236 #ifdef ASSERT
1237 Monitor * Monitor::get_least_ranked_lock(Monitor * locks) {
1238 Monitor *res, *tmp;
1239 for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) {
1240 if (tmp->rank() < res->rank()) {
1241 res = tmp;
1242 }
1243 }
1244 if (!SafepointSynchronize::is_at_safepoint()) {
1245 // In this case, we expect the held locks to be
1246 // in increasing rank order (modulo any native ranks)
1247 for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
1248 if (tmp->next() != NULL) {
1249 assert(tmp->rank() == Mutex::native ||
1250 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
1251 }
1252 }
1253 }
1254 return res;
1255 }
1256
1257 Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) {
1258 Monitor *res, *tmp;
1259 for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) {
1260 if (tmp != this && (res == NULL || tmp->rank() < res->rank())) {
1261 res = tmp;
1262 }
1263 }
1264 if (!SafepointSynchronize::is_at_safepoint()) {
1265 // In this case, we expect the held locks to be
1266 // in increasing rank order (modulo any native ranks)
1267 for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
1268 if (tmp->next() != NULL) {
1269 assert(tmp->rank() == Mutex::native ||
1270 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
1271 }
1272 }
1273 }
1274 return res;
1275 }
1276
1277
1278 bool Monitor::contains(Monitor* locks, Monitor * lock) {
1279 for (; locks != NULL; locks = locks->next()) {
1280 if (locks == lock) {
1281 return true;
1282 }
1283 }
1284 return false;
1285 }
1286 #endif
1287
1288 // Called immediately after lock acquisition or release as a diagnostic
1289 // to track the lock-set of the thread and test for rank violations that
1290 // might indicate exposure to deadlock.
1291 // Rather like an EventListener for _owner (:>).
1292
1293 void Monitor::set_owner_implementation(Thread *new_owner) {
1294 // This function is solely responsible for maintaining
1295 // and checking the invariant that threads and locks
1296 // are in a 1/N relation, with some some locks unowned.
1297 // It uses the Mutex::_owner, Mutex::_next, and
1298 // Thread::_owned_locks fields, and no other function
1299 // changes those fields.
1300 // It is illegal to set the mutex from one non-NULL
1301 // owner to another--it must be owned by NULL as an
1302 // intermediate state.
1303
1304 if (new_owner != NULL) {
1305 // the thread is acquiring this lock
1306
1307 assert(new_owner == Thread::current(), "Should I be doing this?");
1308 assert(_owner == NULL, "setting the owner thread of an already owned mutex");
1309 _owner = new_owner; // set the owner
1310
1311 // link "this" into the owned locks list
1312
1313 #ifdef ASSERT // Thread::_owned_locks is under the same ifdef
1314 Monitor* locks = get_least_ranked_lock(new_owner->owned_locks());
1315 // Mutex::set_owner_implementation is a friend of Thread
1316
1317 assert(this->rank() >= 0, "bad lock rank");
1318
1319 // Deadlock avoidance rules require us to acquire Mutexes only in
1320 // a global total order. For example m1 is the lowest ranked mutex
1321 // that the thread holds and m2 is the mutex the thread is trying
1322 // to acquire, then deadlock avoidance rules require that the rank
1323 // of m2 be less than the rank of m1.
1324 // The rank Mutex::native is an exception in that it is not subject
1325 // to the verification rules.
1326 // Here are some further notes relating to mutex acquisition anomalies:
1327 // . it is also ok to acquire Safepoint_lock at the very end while we
1328 // already hold Terminator_lock - may happen because of periodic safepoints
1329 if (this->rank() != Mutex::native &&
1330 this->rank() != Mutex::suspend_resume &&
1331 locks != NULL && locks->rank() <= this->rank() &&
1332 !SafepointSynchronize::is_at_safepoint() &&
1333 !(this == Safepoint_lock && contains(locks, Terminator_lock) &&
1334 SafepointSynchronize::is_synchronizing())) {
1335 new_owner->print_owned_locks();
1336 fatal("acquiring lock %s/%d out of order with lock %s/%d -- "
1337 "possible deadlock", this->name(), this->rank(),
1338 locks->name(), locks->rank());
1339 }
1340
1341 this->_next = new_owner->_owned_locks;
1342 new_owner->_owned_locks = this;
1343 #endif
1344
1345 } else {
1346 // the thread is releasing this lock
1347
1348 Thread* old_owner = _owner;
1349 debug_only(_last_owner = old_owner);
1350
1351 assert(old_owner != NULL, "removing the owner thread of an unowned mutex");
1352 assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex");
1353
1354 _owner = NULL; // set the owner
1355
1356 #ifdef ASSERT
1357 Monitor *locks = old_owner->owned_locks();
1358
1359 // remove "this" from the owned locks list
1360
1361 Monitor *prev = NULL;
1362 bool found = false;
1363 for (; locks != NULL; prev = locks, locks = locks->next()) {
1364 if (locks == this) {
1365 found = true;
1366 break;
1367 }
1368 }
1369 assert(found, "Removing a lock not owned");
1370 if (prev == NULL) {
1371 old_owner->_owned_locks = _next;
1372 } else {
1373 prev->_next = _next;
1374 }
1375 _next = NULL;
1376 #endif
1377 }
1378 }
1379
1380
1381 // Factored out common sanity checks for locking mutex'es. Used by lock() and try_lock()
1382 void Monitor::check_prelock_state(Thread *thread) {
1383 assert((!thread->is_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm)
1384 || rank() == Mutex::special, "wrong thread state for using locks");
1385 if (StrictSafepointChecks) {
1386 if (thread->is_VM_thread() && !allow_vm_block()) {
1387 fatal("VM thread using lock %s (not allowed to block on)", name());
1388 }
1389 debug_only(if (rank() != Mutex::special) \
1390 thread->check_for_valid_safepoint_state(false);)
1391 }
1392 assert(!os::ThreadCrashProtection::is_crash_protected(thread),
1393 "locking not allowed when crash protection is set");
1394 }
1395
1396 void Monitor::check_block_state(Thread *thread) {
1397 if (!_allow_vm_block && thread->is_VM_thread()) {
1398 warning("VM thread blocked on lock");
1399 print();
1400 BREAKPOINT;
1401 }
1402 assert(_owner != thread, "deadlock: blocking on monitor owned by current thread");
1403 }
1404
1405 #endif // PRODUCT
--- EOF ---