rev 47413 : Introduce SafepointMechanism
1 /*
2 * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "runtime/atomic.hpp"
27 #include "runtime/interfaceSupport.hpp"
28 #include "runtime/mutex.hpp"
29 #include "runtime/orderAccess.inline.hpp"
30 #include "runtime/osThread.hpp"
31 #include "runtime/thread.inline.hpp"
32 #include "utilities/events.hpp"
33 #include "utilities/macros.hpp"
34
35 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
36 //
37 // Native Monitor-Mutex locking - theory of operations
38 //
39 // * Native Monitors are completely unrelated to Java-level monitors,
40 // although the "back-end" slow-path implementations share a common lineage.
41 // See objectMonitor:: in synchronizer.cpp.
42 // Native Monitors do *not* support nesting or recursion but otherwise
43 // they're basically Hoare-flavor monitors.
44 //
45 // * A thread acquires ownership of a Monitor/Mutex by CASing the LockByte
46 // in the _LockWord from zero to non-zero. Note that the _Owner field
47 // is advisory and is used only to verify that the thread calling unlock()
48 // is indeed the last thread to have acquired the lock.
49 //
50 // * Contending threads "push" themselves onto the front of the contention
51 // queue -- called the cxq -- with CAS and then spin/park.
52 // The _LockWord contains the LockByte as well as the pointer to the head
53 // of the cxq. Colocating the LockByte with the cxq precludes certain races.
54 //
55 // * Using a separately addressable LockByte allows for CAS:MEMBAR or CAS:0
56 // idioms. We currently use MEMBAR in the uncontended unlock() path, as
57 // MEMBAR often has less latency than CAS. If warranted, we could switch to
58 // a CAS:0 mode, using timers to close the resultant race, as is done
59 // with Java Monitors in synchronizer.cpp.
60 //
61 // See the following for a discussion of the relative cost of atomics (CAS)
62 // MEMBAR, and ways to eliminate such instructions from the common-case paths:
63 // -- http://blogs.sun.com/dave/entry/biased_locking_in_hotspot
64 // -- http://blogs.sun.com/dave/resource/MustangSync.pdf
65 // -- http://blogs.sun.com/dave/resource/synchronization-public2.pdf
66 // -- synchronizer.cpp
67 //
68 // * Overall goals - desiderata
69 // 1. Minimize context switching
70 // 2. Minimize lock migration
71 // 3. Minimize CPI -- affinity and locality
72 // 4. Minimize the execution of high-latency instructions such as CAS or MEMBAR
73 // 5. Minimize outer lock hold times
74 // 6. Behave gracefully on a loaded system
75 //
76 // * Thread flow and list residency:
77 //
78 // Contention queue --> EntryList --> OnDeck --> Owner --> !Owner
79 // [..resident on monitor list..]
80 // [...........contending..................]
81 //
82 // -- The contention queue (cxq) contains recently-arrived threads (RATs).
83 // Threads on the cxq eventually drain into the EntryList.
84 // -- Invariant: a thread appears on at most one list -- cxq, EntryList
85 // or WaitSet -- at any one time.
86 // -- For a given monitor there can be at most one "OnDeck" thread at any
87 // given time but if needbe this particular invariant could be relaxed.
88 //
89 // * The WaitSet and EntryList linked lists are composed of ParkEvents.
90 // I use ParkEvent instead of threads as ParkEvents are immortal and
91 // type-stable, meaning we can safely unpark() a possibly stale
92 // list element in the unlock()-path. (That's benign).
93 //
94 // * Succession policy - providing for progress:
95 //
96 // As necessary, the unlock()ing thread identifies, unlinks, and unparks
97 // an "heir presumptive" tentative successor thread from the EntryList.
98 // This becomes the so-called "OnDeck" thread, of which there can be only
99 // one at any given time for a given monitor. The wakee will recontend
100 // for ownership of monitor.
101 //
102 // Succession is provided for by a policy of competitive handoff.
103 // The exiting thread does _not_ grant or pass ownership to the
104 // successor thread. (This is also referred to as "handoff" succession").
105 // Instead the exiting thread releases ownership and possibly wakes
106 // a successor, so the successor can (re)compete for ownership of the lock.
107 //
108 // Competitive handoff provides excellent overall throughput at the expense
109 // of short-term fairness. If fairness is a concern then one remedy might
110 // be to add an AcquireCounter field to the monitor. After a thread acquires
111 // the lock it will decrement the AcquireCounter field. When the count
112 // reaches 0 the thread would reset the AcquireCounter variable, abdicate
113 // the lock directly to some thread on the EntryList, and then move itself to the
114 // tail of the EntryList.
115 //
116 // But in practice most threads engage or otherwise participate in resource
117 // bounded producer-consumer relationships, so lock domination is not usually
118 // a practical concern. Recall too, that in general it's easier to construct
119 // a fair lock from a fast lock, but not vice-versa.
120 //
121 // * The cxq can have multiple concurrent "pushers" but only one concurrent
122 // detaching thread. This mechanism is immune from the ABA corruption.
123 // More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
124 // We use OnDeck as a pseudo-lock to enforce the at-most-one detaching
125 // thread constraint.
126 //
127 // * Taken together, the cxq and the EntryList constitute or form a
128 // single logical queue of threads stalled trying to acquire the lock.
129 // We use two distinct lists to reduce heat on the list ends.
130 // Threads in lock() enqueue onto cxq while threads in unlock() will
131 // dequeue from the EntryList. (c.f. Michael Scott's "2Q" algorithm).
132 // A key desideratum is to minimize queue & monitor metadata manipulation
133 // that occurs while holding the "outer" monitor lock -- that is, we want to
134 // minimize monitor lock holds times.
135 //
136 // The EntryList is ordered by the prevailing queue discipline and
137 // can be organized in any convenient fashion, such as a doubly-linked list or
138 // a circular doubly-linked list. If we need a priority queue then something akin
139 // to Solaris' sleepq would work nicely. Viz.,
140 // -- http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
141 // -- http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/os/sleepq.c
142 // Queue discipline is enforced at ::unlock() time, when the unlocking thread
143 // drains the cxq into the EntryList, and orders or reorders the threads on the
144 // EntryList accordingly.
145 //
146 // Barring "lock barging", this mechanism provides fair cyclic ordering,
147 // somewhat similar to an elevator-scan.
148 //
149 // * OnDeck
150 // -- For a given monitor there can be at most one OnDeck thread at any given
151 // instant. The OnDeck thread is contending for the lock, but has been
152 // unlinked from the EntryList and cxq by some previous unlock() operations.
153 // Once a thread has been designated the OnDeck thread it will remain so
154 // until it manages to acquire the lock -- being OnDeck is a stable property.
155 // -- Threads on the EntryList or cxq are _not allowed to attempt lock acquisition.
156 // -- OnDeck also serves as an "inner lock" as follows. Threads in unlock() will, after
157 // having cleared the LockByte and dropped the outer lock, attempt to "trylock"
158 // OnDeck by CASing the field from null to non-null. If successful, that thread
159 // is then responsible for progress and succession and can use CAS to detach and
160 // drain the cxq into the EntryList. By convention, only this thread, the holder of
161 // the OnDeck inner lock, can manipulate the EntryList or detach and drain the
162 // RATs on the cxq into the EntryList. This avoids ABA corruption on the cxq as
163 // we allow multiple concurrent "push" operations but restrict detach concurrency
164 // to at most one thread. Having selected and detached a successor, the thread then
165 // changes the OnDeck to refer to that successor, and then unparks the successor.
166 // That successor will eventually acquire the lock and clear OnDeck. Beware
167 // that the OnDeck usage as a lock is asymmetric. A thread in unlock() transiently
168 // "acquires" OnDeck, performs queue manipulations, passes OnDeck to some successor,
169 // and then the successor eventually "drops" OnDeck. Note that there's never
170 // any sense of contention on the inner lock, however. Threads never contend
171 // or wait for the inner lock.
172 // -- OnDeck provides for futile wakeup throttling a described in section 3.3 of
173 // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
174 // In a sense, OnDeck subsumes the ObjectMonitor _Succ and ObjectWaiter
175 // TState fields found in Java-level objectMonitors. (See synchronizer.cpp).
176 //
177 // * Waiting threads reside on the WaitSet list -- wait() puts
178 // the caller onto the WaitSet. Notify() or notifyAll() simply
179 // transfers threads from the WaitSet to either the EntryList or cxq.
180 // Subsequent unlock() operations will eventually unpark the notifyee.
181 // Unparking a notifee in notify() proper is inefficient - if we were to do so
182 // it's likely the notifyee would simply impale itself on the lock held
183 // by the notifier.
184 //
185 // * The mechanism is obstruction-free in that if the holder of the transient
186 // OnDeck lock in unlock() is preempted or otherwise stalls, other threads
187 // can still acquire and release the outer lock and continue to make progress.
188 // At worst, waking of already blocked contending threads may be delayed,
189 // but nothing worse. (We only use "trylock" operations on the inner OnDeck
190 // lock).
191 //
192 // * Note that thread-local storage must be initialized before a thread
193 // uses Native monitors or mutexes. The native monitor-mutex subsystem
194 // depends on Thread::current().
195 //
196 // * The monitor synchronization subsystem avoids the use of native
197 // synchronization primitives except for the narrow platform-specific
198 // park-unpark abstraction. See the comments in os_solaris.cpp regarding
199 // the semantics of park-unpark. Put another way, this monitor implementation
200 // depends only on atomic operations and park-unpark. The monitor subsystem
201 // manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
202 // underlying OS manages the READY<->RUN transitions.
203 //
204 // * The memory consistency model provide by lock()-unlock() is at least as
205 // strong or stronger than the Java Memory model defined by JSR-133.
206 // That is, we guarantee at least entry consistency, if not stronger.
207 // See http://g.oswego.edu/dl/jmm/cookbook.html.
208 //
209 // * Thread:: currently contains a set of purpose-specific ParkEvents:
210 // _MutexEvent, _ParkEvent, etc. A better approach might be to do away with
211 // the purpose-specific ParkEvents and instead implement a general per-thread
212 // stack of available ParkEvents which we could provision on-demand. The
213 // stack acts as a local cache to avoid excessive calls to ParkEvent::Allocate()
214 // and ::Release(). A thread would simply pop an element from the local stack before it
215 // enqueued or park()ed. When the contention was over the thread would
216 // push the no-longer-needed ParkEvent back onto its stack.
217 //
218 // * A slightly reduced form of ILock() and IUnlock() have been partially
219 // model-checked (Murphi) for safety and progress at T=1,2,3 and 4.
220 // It'd be interesting to see if TLA/TLC could be useful as well.
221 //
222 // * Mutex-Monitor is a low-level "leaf" subsystem. That is, the monitor
223 // code should never call other code in the JVM that might itself need to
224 // acquire monitors or mutexes. That's true *except* in the case of the
225 // ThreadBlockInVM state transition wrappers. The ThreadBlockInVM DTOR handles
226 // mutator reentry (ingress) by checking for a pending safepoint in which case it will
227 // call SafepointSynchronize::block(), which in turn may call Safepoint_lock->lock(), etc.
228 // In that particular case a call to lock() for a given Monitor can end up recursively
229 // calling lock() on another monitor. While distasteful, this is largely benign
230 // as the calls come from jacket that wraps lock(), and not from deep within lock() itself.
231 //
232 // It's unfortunate that native mutexes and thread state transitions were convolved.
233 // They're really separate concerns and should have remained that way. Melding
234 // them together was facile -- a bit too facile. The current implementation badly
235 // conflates the two concerns.
236 //
237 // * TODO-FIXME:
238 //
239 // -- Add DTRACE probes for contended acquire, contended acquired, contended unlock
240 // We should also add DTRACE probes in the ParkEvent subsystem for
241 // Park-entry, Park-exit, and Unpark.
242 //
243 // -- We have an excess of mutex-like constructs in the JVM, namely:
244 // 1. objectMonitors for Java-level synchronization (synchronizer.cpp)
245 // 2. low-level muxAcquire and muxRelease
246 // 3. low-level spinAcquire and spinRelease
247 // 4. native Mutex:: and Monitor::
248 // 5. jvm_raw_lock() and _unlock()
249 // 6. JVMTI raw monitors -- distinct from (5) despite having a confusingly
250 // similar name.
251 //
252 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
253
254
255 // CASPTR() uses the canonical argument order that dominates in the literature.
256 // Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates.
257
258 #define CASPTR(a, c, s) \
259 intptr_t(Atomic::cmpxchg_ptr((void *)(s), (void *)(a), (void *)(c)))
260 #define UNS(x) (uintptr_t(x))
261 #define TRACE(m) \
262 { \
263 static volatile int ctr = 0; \
264 int x = ++ctr; \
265 if ((x & (x - 1)) == 0) { \
266 ::printf("%d:%s\n", x, #m); \
267 ::fflush(stdout); \
268 } \
269 }
270
271 // Simplistic low-quality Marsaglia SHIFT-XOR RNG.
272 // Bijective except for the trailing mask operation.
273 // Useful for spin loops as the compiler can't optimize it away.
274
275 static inline jint MarsagliaXORV(jint x) {
276 if (x == 0) x = 1|os::random();
277 x ^= x << 6;
278 x ^= ((unsigned)x) >> 21;
279 x ^= x << 7;
280 return x & 0x7FFFFFFF;
281 }
282
283 static int Stall(int its) {
284 static volatile jint rv = 1;
285 volatile int OnFrame = 0;
286 jint v = rv ^ UNS(OnFrame);
287 while (--its >= 0) {
288 v = MarsagliaXORV(v);
289 }
290 // Make this impossible for the compiler to optimize away,
291 // but (mostly) avoid W coherency sharing on MP systems.
292 if (v == 0x12345) rv = v;
293 return v;
294 }
295
296 int Monitor::TryLock() {
297 intptr_t v = _LockWord.FullWord;
298 for (;;) {
299 if ((v & _LBIT) != 0) return 0;
300 const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT);
301 if (v == u) return 1;
302 v = u;
303 }
304 }
305
306 int Monitor::TryFast() {
307 // Optimistic fast-path form ...
308 // Fast-path attempt for the common uncontended case.
309 // Avoid RTS->RTO $ coherence upgrade on typical SMP systems.
310 intptr_t v = CASPTR(&_LockWord, 0, _LBIT); // agro ...
311 if (v == 0) return 1;
312
313 for (;;) {
314 if ((v & _LBIT) != 0) return 0;
315 const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT);
316 if (v == u) return 1;
317 v = u;
318 }
319 }
320
321 int Monitor::ILocked() {
322 const intptr_t w = _LockWord.FullWord & 0xFF;
323 assert(w == 0 || w == _LBIT, "invariant");
324 return w == _LBIT;
325 }
326
327 // Polite TATAS spinlock with exponential backoff - bounded spin.
328 // Ideally we'd use processor cycles, time or vtime to control
329 // the loop, but we currently use iterations.
330 // All the constants within were derived empirically but work over
331 // over the spectrum of J2SE reference platforms.
332 // On Niagara-class systems the back-off is unnecessary but
333 // is relatively harmless. (At worst it'll slightly retard
334 // acquisition times). The back-off is critical for older SMP systems
335 // where constant fetching of the LockWord would otherwise impair
336 // scalability.
337 //
338 // Clamp spinning at approximately 1/2 of a context-switch round-trip.
339 // See synchronizer.cpp for details and rationale.
340
341 int Monitor::TrySpin(Thread * const Self) {
342 if (TryLock()) return 1;
343 if (!os::is_MP()) return 0;
344
345 int Probes = 0;
346 int Delay = 0;
347 int Steps = 0;
348 int SpinMax = NativeMonitorSpinLimit;
349 int flgs = NativeMonitorFlags;
350 for (;;) {
351 intptr_t v = _LockWord.FullWord;
352 if ((v & _LBIT) == 0) {
353 if (CASPTR (&_LockWord, v, v|_LBIT) == v) {
354 return 1;
355 }
356 continue;
357 }
358
359 if ((flgs & 8) == 0) {
360 SpinPause();
361 }
362
363 // Periodically increase Delay -- variable Delay form
364 // conceptually: delay *= 1 + 1/Exponent
365 ++Probes;
366 if (Probes > SpinMax) return 0;
367
368 if ((Probes & 0x7) == 0) {
369 Delay = ((Delay << 1)|1) & 0x7FF;
370 // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ;
371 }
372
373 if (flgs & 2) continue;
374
375 // Consider checking _owner's schedctl state, if OFFPROC abort spin.
376 // If the owner is OFFPROC then it's unlike that the lock will be dropped
377 // in a timely fashion, which suggests that spinning would not be fruitful
378 // or profitable.
379
380 // Stall for "Delay" time units - iterations in the current implementation.
381 // Avoid generating coherency traffic while stalled.
382 // Possible ways to delay:
383 // PAUSE, SLEEP, MEMBAR #sync, MEMBAR #halt,
384 // wr %g0,%asi, gethrtime, rdstick, rdtick, rdtsc, etc. ...
385 // Note that on Niagara-class systems we want to minimize STs in the
386 // spin loop. N1 and brethren write-around the L1$ over the xbar into the L2$.
387 // Furthermore, they don't have a W$ like traditional SPARC processors.
388 // We currently use a Marsaglia Shift-Xor RNG loop.
389 Steps += Delay;
390 if (Self != NULL) {
391 jint rv = Self->rng[0];
392 for (int k = Delay; --k >= 0;) {
393 rv = MarsagliaXORV(rv);
394 if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0;
395 }
396 Self->rng[0] = rv;
397 } else {
398 Stall(Delay);
399 }
400 }
401 }
402
403 static int ParkCommon(ParkEvent * ev, jlong timo) {
404 // Diagnostic support - periodically unwedge blocked threads
405 intx nmt = NativeMonitorTimeout;
406 if (nmt > 0 && (nmt < timo || timo <= 0)) {
407 timo = nmt;
408 }
409 int err = OS_OK;
410 if (0 == timo) {
411 ev->park();
412 } else {
413 err = ev->park(timo);
414 }
415 return err;
416 }
417
418 inline int Monitor::AcquireOrPush(ParkEvent * ESelf) {
419 intptr_t v = _LockWord.FullWord;
420 for (;;) {
421 if ((v & _LBIT) == 0) {
422 const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT);
423 if (u == v) return 1; // indicate acquired
424 v = u;
425 } else {
426 // Anticipate success ...
427 ESelf->ListNext = (ParkEvent *)(v & ~_LBIT);
428 const intptr_t u = CASPTR(&_LockWord, v, intptr_t(ESelf)|_LBIT);
429 if (u == v) return 0; // indicate pushed onto cxq
430 v = u;
431 }
432 // Interference - LockWord change - just retry
433 }
434 }
435
436 // ILock and IWait are the lowest level primitive internal blocking
437 // synchronization functions. The callers of IWait and ILock must have
438 // performed any needed state transitions beforehand.
439 // IWait and ILock may directly call park() without any concern for thread state.
440 // Note that ILock and IWait do *not* access _owner.
441 // _owner is a higher-level logical concept.
442
443 void Monitor::ILock(Thread * Self) {
444 assert(_OnDeck != Self->_MutexEvent, "invariant");
445
446 if (TryFast()) {
447 Exeunt:
448 assert(ILocked(), "invariant");
449 return;
450 }
451
452 ParkEvent * const ESelf = Self->_MutexEvent;
453 assert(_OnDeck != ESelf, "invariant");
454
455 // As an optimization, spinners could conditionally try to set _OnDeck to _LBIT
456 // Synchronizer.cpp uses a similar optimization.
457 if (TrySpin(Self)) goto Exeunt;
458
459 // Slow-path - the lock is contended.
460 // Either Enqueue Self on cxq or acquire the outer lock.
461 // LockWord encoding = (cxq,LOCKBYTE)
462 ESelf->reset();
463 OrderAccess::fence();
464
465 // Optional optimization ... try barging on the inner lock
466 if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(ESelf)) == 0) {
467 goto OnDeck_LOOP;
468 }
469
470 if (AcquireOrPush(ESelf)) goto Exeunt;
471
472 // At any given time there is at most one ondeck thread.
473 // ondeck implies not resident on cxq and not resident on EntryList
474 // Only the OnDeck thread can try to acquire -- contend for -- the lock.
475 // CONSIDER: use Self->OnDeck instead of m->OnDeck.
476 // Deschedule Self so that others may run.
477 while (OrderAccess::load_ptr_acquire(&_OnDeck) != ESelf) {
478 ParkCommon(ESelf, 0);
479 }
480
481 // Self is now in the OnDeck position and will remain so until it
482 // manages to acquire the lock.
483 OnDeck_LOOP:
484 for (;;) {
485 assert(_OnDeck == ESelf, "invariant");
486 if (TrySpin(Self)) break;
487 // It's probably wise to spin only if we *actually* blocked
488 // CONSIDER: check the lockbyte, if it remains set then
489 // preemptively drain the cxq into the EntryList.
490 // The best place and time to perform queue operations -- lock metadata --
491 // is _before having acquired the outer lock, while waiting for the lock to drop.
492 ParkCommon(ESelf, 0);
493 }
494
495 assert(_OnDeck == ESelf, "invariant");
496 _OnDeck = NULL;
497
498 // Note that we current drop the inner lock (clear OnDeck) in the slow-path
499 // epilogue immediately after having acquired the outer lock.
500 // But instead we could consider the following optimizations:
501 // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation.
502 // This might avoid potential reacquisition of the inner lock in IUlock().
503 // B. While still holding the inner lock, attempt to opportunistically select
504 // and unlink the next OnDeck thread from the EntryList.
505 // If successful, set OnDeck to refer to that thread, otherwise clear OnDeck.
506 // It's critical that the select-and-unlink operation run in constant-time as
507 // it executes when holding the outer lock and may artificially increase the
508 // effective length of the critical section.
509 // Note that (A) and (B) are tantamount to succession by direct handoff for
510 // the inner lock.
511 goto Exeunt;
512 }
513
514 void Monitor::IUnlock(bool RelaxAssert) {
515 assert(ILocked(), "invariant");
516 // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately
517 // before the store that releases the lock. Crucially, all the stores and loads in the
518 // critical section must be globally visible before the store of 0 into the lock-word
519 // that releases the lock becomes globally visible. That is, memory accesses in the
520 // critical section should not be allowed to bypass or overtake the following ST that
521 // releases the lock. As such, to prevent accesses within the critical section
522 // from "leaking" out, we need a release fence between the critical section and the
523 // store that releases the lock. In practice that release barrier is elided on
524 // platforms with strong memory models such as TSO.
525 //
526 // Note that the OrderAccess::storeload() fence that appears after unlock store
527 // provides for progress conditions and succession and is _not related to exclusion
528 // safety or lock release consistency.
529 OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], jbyte(0)); // drop outer lock
530
531 OrderAccess::storeload();
532 ParkEvent * const w = _OnDeck; // raw load as we will just return if non-NULL
533 assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant");
534 if (w != NULL) {
535 // Either we have a valid ondeck thread or ondeck is transiently "locked"
536 // by some exiting thread as it arranges for succession. The LSBit of
537 // OnDeck allows us to discriminate two cases. If the latter, the
538 // responsibility for progress and succession lies with that other thread.
539 // For good performance, we also depend on the fact that redundant unpark()
540 // operations are cheap. That is, repeated Unpark()ing of the OnDeck thread
541 // is inexpensive. This approach provides implicit futile wakeup throttling.
542 // Note that the referent "w" might be stale with respect to the lock.
543 // In that case the following unpark() is harmless and the worst that'll happen
544 // is a spurious return from a park() operation. Critically, if "w" _is stale,
545 // then progress is known to have occurred as that means the thread associated
546 // with "w" acquired the lock. In that case this thread need take no further
547 // action to guarantee progress.
548 if ((UNS(w) & _LBIT) == 0) w->unpark();
549 return;
550 }
551
552 intptr_t cxq = _LockWord.FullWord;
553 if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) {
554 return; // normal fast-path exit - cxq and EntryList both empty
555 }
556 if (cxq & _LBIT) {
557 // Optional optimization ...
558 // Some other thread acquired the lock in the window since this
559 // thread released it. Succession is now that thread's responsibility.
560 return;
561 }
562
563 Succession:
564 // Slow-path exit - this thread must ensure succession and progress.
565 // OnDeck serves as lock to protect cxq and EntryList.
566 // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq.
567 // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS)
568 // but only one concurrent consumer (detacher of RATs).
569 // Consider protecting this critical section with schedctl on Solaris.
570 // Unlike a normal lock, however, the exiting thread "locks" OnDeck,
571 // picks a successor and marks that thread as OnDeck. That successor
572 // thread will then clear OnDeck once it eventually acquires the outer lock.
573 if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) {
574 return;
575 }
576
577 ParkEvent * List = _EntryList;
578 if (List != NULL) {
579 // Transfer the head of the EntryList to the OnDeck position.
580 // Once OnDeck, a thread stays OnDeck until it acquires the lock.
581 // For a given lock there is at most OnDeck thread at any one instant.
582 WakeOne:
583 assert(List == _EntryList, "invariant");
584 ParkEvent * const w = List;
585 assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant");
586 _EntryList = w->ListNext;
587 // as a diagnostic measure consider setting w->_ListNext = BAD
588 assert(UNS(_OnDeck) == _LBIT, "invariant");
589
590 // Pass OnDeck role to w, ensuring that _EntryList has been set first.
591 // w will clear _OnDeck once it acquires the outer lock.
592 // Note that once we set _OnDeck that thread can acquire the mutex, proceed
593 // with its critical section and then enter this code to unlock the mutex. So
594 // you can have multiple threads active in IUnlock at the same time.
595 OrderAccess::release_store_ptr(&_OnDeck, w);
596
597 // Another optional optimization ...
598 // For heavily contended locks it's not uncommon that some other
599 // thread acquired the lock while this thread was arranging succession.
600 // Try to defer the unpark() operation - Delegate the responsibility
601 // for unpark()ing the OnDeck thread to the current or subsequent owners
602 // That is, the new owner is responsible for unparking the OnDeck thread.
603 OrderAccess::storeload();
604 cxq = _LockWord.FullWord;
605 if (cxq & _LBIT) return;
606
607 w->unpark();
608 return;
609 }
610
611 cxq = _LockWord.FullWord;
612 if ((cxq & ~_LBIT) != 0) {
613 // The EntryList is empty but the cxq is populated.
614 // drain RATs from cxq into EntryList
615 // Detach RATs segment with CAS and then merge into EntryList
616 for (;;) {
617 // optional optimization - if locked, the owner is responsible for succession
618 if (cxq & _LBIT) goto Punt;
619 const intptr_t vfy = CASPTR(&_LockWord, cxq, cxq & _LBIT);
620 if (vfy == cxq) break;
621 cxq = vfy;
622 // Interference - LockWord changed - Just retry
623 // We can see concurrent interference from contending threads
624 // pushing themselves onto the cxq or from lock-unlock operations.
625 // From the perspective of this thread, EntryList is stable and
626 // the cxq is prepend-only -- the head is volatile but the interior
627 // of the cxq is stable. In theory if we encounter interference from threads
628 // pushing onto cxq we could simply break off the original cxq suffix and
629 // move that segment to the EntryList, avoiding a 2nd or multiple CAS attempts
630 // on the high-traffic LockWord variable. For instance lets say the cxq is "ABCD"
631 // when we first fetch cxq above. Between the fetch -- where we observed "A"
632 // -- and CAS -- where we attempt to CAS null over A -- "PQR" arrive,
633 // yielding cxq = "PQRABCD". In this case we could simply set A.ListNext
634 // null, leaving cxq = "PQRA" and transfer the "BCD" segment to the EntryList.
635 // Note too, that it's safe for this thread to traverse the cxq
636 // without taking any special concurrency precautions.
637 }
638
639 // We don't currently reorder the cxq segment as we move it onto
640 // the EntryList, but it might make sense to reverse the order
641 // or perhaps sort by thread priority. See the comments in
642 // synchronizer.cpp objectMonitor::exit().
643 assert(_EntryList == NULL, "invariant");
644 _EntryList = List = (ParkEvent *)(cxq & ~_LBIT);
645 assert(List != NULL, "invariant");
646 goto WakeOne;
647 }
648
649 // cxq|EntryList is empty.
650 // w == NULL implies that cxq|EntryList == NULL in the past.
651 // Possible race - rare inopportune interleaving.
652 // A thread could have added itself to cxq since this thread previously checked.
653 // Detect and recover by refetching cxq.
654 Punt:
655 assert(UNS(_OnDeck) == _LBIT, "invariant");
656 _OnDeck = NULL; // Release inner lock.
657 OrderAccess::storeload(); // Dekker duality - pivot point
658
659 // Resample LockWord/cxq to recover from possible race.
660 // For instance, while this thread T1 held OnDeck, some other thread T2 might
661 // acquire the outer lock. Another thread T3 might try to acquire the outer
662 // lock, but encounter contention and enqueue itself on cxq. T2 then drops the
663 // outer lock, but skips succession as this thread T1 still holds OnDeck.
664 // T1 is and remains responsible for ensuring succession of T3.
665 //
666 // Note that we don't need to recheck EntryList, just cxq.
667 // If threads moved onto EntryList since we dropped OnDeck
668 // that implies some other thread forced succession.
669 cxq = _LockWord.FullWord;
670 if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) {
671 goto Succession; // potential race -- re-run succession
672 }
673 return;
674 }
675
676 bool Monitor::notify() {
677 assert(_owner == Thread::current(), "invariant");
678 assert(ILocked(), "invariant");
679 if (_WaitSet == NULL) return true;
680 NotifyCount++;
681
682 // Transfer one thread from the WaitSet to the EntryList or cxq.
683 // Currently we just unlink the head of the WaitSet and prepend to the cxq.
684 // And of course we could just unlink it and unpark it, too, but
685 // in that case it'd likely impale itself on the reentry.
686 Thread::muxAcquire(_WaitLock, "notify:WaitLock");
687 ParkEvent * nfy = _WaitSet;
688 if (nfy != NULL) { // DCL idiom
689 _WaitSet = nfy->ListNext;
690 assert(nfy->Notified == 0, "invariant");
691 // push nfy onto the cxq
692 for (;;) {
693 const intptr_t v = _LockWord.FullWord;
694 assert((v & 0xFF) == _LBIT, "invariant");
695 nfy->ListNext = (ParkEvent *)(v & ~_LBIT);
696 if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break;
697 // interference - _LockWord changed -- just retry
698 }
699 // Note that setting Notified before pushing nfy onto the cxq is
700 // also legal and safe, but the safety properties are much more
701 // subtle, so for the sake of code stewardship ...
702 OrderAccess::fence();
703 nfy->Notified = 1;
704 }
705 Thread::muxRelease(_WaitLock);
706 if (nfy != NULL && (NativeMonitorFlags & 16)) {
707 // Experimental code ... light up the wakee in the hope that this thread (the owner)
708 // will drop the lock just about the time the wakee comes ONPROC.
709 nfy->unpark();
710 }
711 assert(ILocked(), "invariant");
712 return true;
713 }
714
715 // Currently notifyAll() transfers the waiters one-at-a-time from the waitset
716 // to the cxq. This could be done more efficiently with a single bulk en-mass transfer,
717 // but in practice notifyAll() for large #s of threads is rare and not time-critical.
718 // Beware too, that we invert the order of the waiters. Lets say that the
719 // waitset is "ABCD" and the cxq is "XYZ". After a notifyAll() the waitset
720 // will be empty and the cxq will be "DCBAXYZ". This is benign, of course.
721
722 bool Monitor::notify_all() {
723 assert(_owner == Thread::current(), "invariant");
724 assert(ILocked(), "invariant");
725 while (_WaitSet != NULL) notify();
726 return true;
727 }
728
729 int Monitor::IWait(Thread * Self, jlong timo) {
730 assert(ILocked(), "invariant");
731
732 // Phases:
733 // 1. Enqueue Self on WaitSet - currently prepend
734 // 2. unlock - drop the outer lock
735 // 3. wait for either notification or timeout
736 // 4. lock - reentry - reacquire the outer lock
737
738 ParkEvent * const ESelf = Self->_MutexEvent;
739 ESelf->Notified = 0;
740 ESelf->reset();
741 OrderAccess::fence();
742
743 // Add Self to WaitSet
744 // Ideally only the holder of the outer lock would manipulate the WaitSet -
745 // That is, the outer lock would implicitly protect the WaitSet.
746 // But if a thread in wait() encounters a timeout it will need to dequeue itself
747 // from the WaitSet _before it becomes the owner of the lock. We need to dequeue
748 // as the ParkEvent -- which serves as a proxy for the thread -- can't reside
749 // on both the WaitSet and the EntryList|cxq at the same time.. That is, a thread
750 // on the WaitSet can't be allowed to compete for the lock until it has managed to
751 // unlink its ParkEvent from WaitSet. Thus the need for WaitLock.
752 // Contention on the WaitLock is minimal.
753 //
754 // Another viable approach would be add another ParkEvent, "WaitEvent" to the
755 // thread class. The WaitSet would be composed of WaitEvents. Only the
756 // owner of the outer lock would manipulate the WaitSet. A thread in wait()
757 // could then compete for the outer lock, and then, if necessary, unlink itself
758 // from the WaitSet only after having acquired the outer lock. More precisely,
759 // there would be no WaitLock. A thread in in wait() would enqueue its WaitEvent
760 // on the WaitSet; release the outer lock; wait for either notification or timeout;
761 // reacquire the inner lock; and then, if needed, unlink itself from the WaitSet.
762 //
763 // Alternatively, a 2nd set of list link fields in the ParkEvent might suffice.
764 // One set would be for the WaitSet and one for the EntryList.
765 // We could also deconstruct the ParkEvent into a "pure" event and add a
766 // new immortal/TSM "ListElement" class that referred to ParkEvents.
767 // In that case we could have one ListElement on the WaitSet and another
768 // on the EntryList, with both referring to the same pure Event.
769
770 Thread::muxAcquire(_WaitLock, "wait:WaitLock:Add");
771 ESelf->ListNext = _WaitSet;
772 _WaitSet = ESelf;
773 Thread::muxRelease(_WaitLock);
774
775 // Release the outer lock
776 // We call IUnlock (RelaxAssert=true) as a thread T1 might
777 // enqueue itself on the WaitSet, call IUnlock(), drop the lock,
778 // and then stall before it can attempt to wake a successor.
779 // Some other thread T2 acquires the lock, and calls notify(), moving
780 // T1 from the WaitSet to the cxq. T2 then drops the lock. T1 resumes,
781 // and then finds *itself* on the cxq. During the course of a normal
782 // IUnlock() call a thread should _never find itself on the EntryList
783 // or cxq, but in the case of wait() it's possible.
784 // See synchronizer.cpp objectMonitor::wait().
785 IUnlock(true);
786
787 // Wait for either notification or timeout
788 // Beware that in some circumstances we might propagate
789 // spurious wakeups back to the caller.
790
791 for (;;) {
792 if (ESelf->Notified) break;
793 int err = ParkCommon(ESelf, timo);
794 if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break;
795 }
796
797 // Prepare for reentry - if necessary, remove ESelf from WaitSet
798 // ESelf can be:
799 // 1. Still on the WaitSet. This can happen if we exited the loop by timeout.
800 // 2. On the cxq or EntryList
801 // 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position.
802
803 OrderAccess::fence();
804 int WasOnWaitSet = 0;
805 if (ESelf->Notified == 0) {
806 Thread::muxAcquire(_WaitLock, "wait:WaitLock:remove");
807 if (ESelf->Notified == 0) { // DCL idiom
808 assert(_OnDeck != ESelf, "invariant"); // can't be both OnDeck and on WaitSet
809 // ESelf is resident on the WaitSet -- unlink it.
810 // A doubly-linked list would be better here so we can unlink in constant-time.
811 // We have to unlink before we potentially recontend as ESelf might otherwise
812 // end up on the cxq|EntryList -- it can't be on two lists at once.
813 ParkEvent * p = _WaitSet;
814 ParkEvent * q = NULL; // classic q chases p
815 while (p != NULL && p != ESelf) {
816 q = p;
817 p = p->ListNext;
818 }
819 assert(p == ESelf, "invariant");
820 if (p == _WaitSet) { // found at head
821 assert(q == NULL, "invariant");
822 _WaitSet = p->ListNext;
823 } else { // found in interior
824 assert(q->ListNext == p, "invariant");
825 q->ListNext = p->ListNext;
826 }
827 WasOnWaitSet = 1; // We were *not* notified but instead encountered timeout
828 }
829 Thread::muxRelease(_WaitLock);
830 }
831
832 // Reentry phase - reacquire the lock
833 if (WasOnWaitSet) {
834 // ESelf was previously on the WaitSet but we just unlinked it above
835 // because of a timeout. ESelf is not resident on any list and is not OnDeck
836 assert(_OnDeck != ESelf, "invariant");
837 ILock(Self);
838 } else {
839 // A prior notify() operation moved ESelf from the WaitSet to the cxq.
840 // ESelf is now on the cxq, EntryList or at the OnDeck position.
841 // The following fragment is extracted from Monitor::ILock()
842 for (;;) {
843 if (OrderAccess::load_ptr_acquire(&_OnDeck) == ESelf && TrySpin(Self)) break;
844 ParkCommon(ESelf, 0);
845 }
846 assert(_OnDeck == ESelf, "invariant");
847 _OnDeck = NULL;
848 }
849
850 assert(ILocked(), "invariant");
851 return WasOnWaitSet != 0; // return true IFF timeout
852 }
853
854
855 // ON THE VMTHREAD SNEAKING PAST HELD LOCKS:
856 // In particular, there are certain types of global lock that may be held
857 // by a Java thread while it is blocked at a safepoint but before it has
858 // written the _owner field. These locks may be sneakily acquired by the
859 // VM thread during a safepoint to avoid deadlocks. Alternatively, one should
860 // identify all such locks, and ensure that Java threads never block at
861 // safepoints while holding them (_no_safepoint_check_flag). While it
862 // seems as though this could increase the time to reach a safepoint
863 // (or at least increase the mean, if not the variance), the latter
864 // approach might make for a cleaner, more maintainable JVM design.
865 //
866 // Sneaking is vile and reprehensible and should be excised at the 1st
867 // opportunity. It's possible that the need for sneaking could be obviated
868 // as follows. Currently, a thread might (a) while TBIVM, call pthread_mutex_lock
869 // or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex.
870 // (b) stall at the TBIVM exit point as a safepoint is in effect. Critically,
871 // it'll stall at the TBIVM reentry state transition after having acquired the
872 // underlying lock, but before having set _owner and having entered the actual
873 // critical section. The lock-sneaking facility leverages that fact and allowed the
874 // VM thread to logically acquire locks that had already be physically locked by mutators
875 // but where mutators were known blocked by the reentry thread state transition.
876 //
877 // If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly
878 // wrapped calls to park(), then we could likely do away with sneaking. We'd
879 // decouple lock acquisition and parking. The critical invariant to eliminating
880 // sneaking is to ensure that we never "physically" acquire the lock while TBIVM.
881 // An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket.
882 // One difficulty with this approach is that the TBIVM wrapper could recurse and
883 // call lock() deep from within a lock() call, while the MutexEvent was already enqueued.
884 // Using a stack (N=2 at minimum) of ParkEvents would take care of that problem.
885 //
886 // But of course the proper ultimate approach is to avoid schemes that require explicit
887 // sneaking or dependence on any any clever invariants or subtle implementation properties
888 // of Mutex-Monitor and instead directly address the underlying design flaw.
889
890 void Monitor::lock(Thread * Self) {
891 // Ensure that the Monitor requires/allows safepoint checks.
892 assert(_safepoint_check_required != Monitor::_safepoint_check_never,
893 "This lock should never have a safepoint check: %s", name());
894
895 #ifdef CHECK_UNHANDLED_OOPS
896 // Clear unhandled oops so we get a crash right away. Only clear for non-vm
897 // or GC threads.
898 if (Self->is_Java_thread()) {
899 Self->clear_unhandled_oops();
900 }
901 #endif // CHECK_UNHANDLED_OOPS
902
903 debug_only(check_prelock_state(Self));
904 assert(_owner != Self, "invariant");
905 assert(_OnDeck != Self->_MutexEvent, "invariant");
906
907 if (TryFast()) {
908 Exeunt:
909 assert(ILocked(), "invariant");
910 assert(owner() == NULL, "invariant");
911 set_owner(Self);
912 return;
913 }
914
915 // The lock is contended ...
916
917 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
918 if (can_sneak && _owner == NULL) {
919 // a java thread has locked the lock but has not entered the
920 // critical region -- let's just pretend we've locked the lock
921 // and go on. we note this with _snuck so we can also
922 // pretend to unlock when the time comes.
923 _snuck = true;
924 goto Exeunt;
925 }
926
927 // Try a brief spin to avoid passing thru thread state transition ...
928 if (TrySpin(Self)) goto Exeunt;
929
930 check_block_state(Self);
931 if (Self->is_Java_thread()) {
932 // Horrible dictu - we suffer through a state transition
933 assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex");
934 ThreadBlockInVM tbivm((JavaThread *) Self);
935 ILock(Self);
936 } else {
937 // Mirabile dictu
938 ILock(Self);
939 }
940 goto Exeunt;
941 }
942
943 void Monitor::lock() {
944 this->lock(Thread::current());
945 }
946
947 // Lock without safepoint check - a degenerate variant of lock().
948 // Should ONLY be used by safepoint code and other code
949 // that is guaranteed not to block while running inside the VM. If this is called with
950 // thread state set to be in VM, the safepoint synchronization code will deadlock!
951
952 void Monitor::lock_without_safepoint_check(Thread * Self) {
953 // Ensure that the Monitor does not require or allow safepoint checks.
954 assert(_safepoint_check_required != Monitor::_safepoint_check_always,
955 "This lock should always have a safepoint check: %s", name());
956 assert(_owner != Self, "invariant");
957 ILock(Self);
958 assert(_owner == NULL, "invariant");
959 set_owner(Self);
960 }
961
962 void Monitor::lock_without_safepoint_check() {
963 lock_without_safepoint_check(Thread::current());
964 }
965
966
967 // Returns true if thread succeeds in grabbing the lock, otherwise false.
968
969 bool Monitor::try_lock() {
970 Thread * const Self = Thread::current();
971 debug_only(check_prelock_state(Self));
972 // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler");
973
974 // Special case, where all Java threads are stopped.
975 // The lock may have been acquired but _owner is not yet set.
976 // In that case the VM thread can safely grab the lock.
977 // It strikes me this should appear _after the TryLock() fails, below.
978 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
979 if (can_sneak && _owner == NULL) {
980 set_owner(Self); // Do not need to be atomic, since we are at a safepoint
981 _snuck = true;
982 return true;
983 }
984
985 if (TryLock()) {
986 // We got the lock
987 assert(_owner == NULL, "invariant");
988 set_owner(Self);
989 return true;
990 }
991 return false;
992 }
993
994 void Monitor::unlock() {
995 assert(_owner == Thread::current(), "invariant");
996 assert(_OnDeck != Thread::current()->_MutexEvent, "invariant");
997 set_owner(NULL);
998 if (_snuck) {
999 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
1000 _snuck = false;
1001 return;
1002 }
1003 IUnlock(false);
1004 }
1005
1006 // Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check()
1007 // jvm_raw_lock() and _unlock() can be called by non-Java threads via JVM_RawMonitorEnter.
1008 //
1009 // There's no expectation that JVM_RawMonitors will interoperate properly with the native
1010 // Mutex-Monitor constructs. We happen to implement JVM_RawMonitors in terms of
1011 // native Mutex-Monitors simply as a matter of convenience. A simple abstraction layer
1012 // over a pthread_mutex_t would work equally as well, but require more platform-specific
1013 // code -- a "PlatformMutex". Alternatively, a simply layer over muxAcquire-muxRelease
1014 // would work too.
1015 //
1016 // Since the caller might be a foreign thread, we don't necessarily have a Thread.MutexEvent
1017 // instance available. Instead, we transiently allocate a ParkEvent on-demand if
1018 // we encounter contention. That ParkEvent remains associated with the thread
1019 // until it manages to acquire the lock, at which time we return the ParkEvent
1020 // to the global ParkEvent free list. This is correct and suffices for our purposes.
1021 //
1022 // Beware that the original jvm_raw_unlock() had a "_snuck" test but that
1023 // jvm_raw_lock() didn't have the corresponding test. I suspect that's an
1024 // oversight, but I've replicated the original suspect logic in the new code ...
1025
1026 void Monitor::jvm_raw_lock() {
1027 assert(rank() == native, "invariant");
1028
1029 if (TryLock()) {
1030 Exeunt:
1031 assert(ILocked(), "invariant");
1032 assert(_owner == NULL, "invariant");
1033 // This can potentially be called by non-java Threads. Thus, the Thread::current_or_null()
1034 // might return NULL. Don't call set_owner since it will break on an NULL owner
1035 // Consider installing a non-null "ANON" distinguished value instead of just NULL.
1036 _owner = Thread::current_or_null();
1037 return;
1038 }
1039
1040 if (TrySpin(NULL)) goto Exeunt;
1041
1042 // slow-path - apparent contention
1043 // Allocate a ParkEvent for transient use.
1044 // The ParkEvent remains associated with this thread until
1045 // the time the thread manages to acquire the lock.
1046 ParkEvent * const ESelf = ParkEvent::Allocate(NULL);
1047 ESelf->reset();
1048 OrderAccess::storeload();
1049
1050 // Either Enqueue Self on cxq or acquire the outer lock.
1051 if (AcquireOrPush (ESelf)) {
1052 ParkEvent::Release(ESelf); // surrender the ParkEvent
1053 goto Exeunt;
1054 }
1055
1056 // At any given time there is at most one ondeck thread.
1057 // ondeck implies not resident on cxq and not resident on EntryList
1058 // Only the OnDeck thread can try to acquire -- contend for -- the lock.
1059 // CONSIDER: use Self->OnDeck instead of m->OnDeck.
1060 for (;;) {
1061 if (OrderAccess::load_ptr_acquire(&_OnDeck) == ESelf && TrySpin(NULL)) break;
1062 ParkCommon(ESelf, 0);
1063 }
1064
1065 assert(_OnDeck == ESelf, "invariant");
1066 _OnDeck = NULL;
1067 ParkEvent::Release(ESelf); // surrender the ParkEvent
1068 goto Exeunt;
1069 }
1070
1071 void Monitor::jvm_raw_unlock() {
1072 // Nearly the same as Monitor::unlock() ...
1073 // directly set _owner instead of using set_owner(null)
1074 _owner = NULL;
1075 if (_snuck) { // ???
1076 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
1077 _snuck = false;
1078 return;
1079 }
1080 IUnlock(false);
1081 }
1082
1083 bool Monitor::wait(bool no_safepoint_check, long timeout,
1084 bool as_suspend_equivalent) {
1085 // Make sure safepoint checking is used properly.
1086 assert(!(_safepoint_check_required == Monitor::_safepoint_check_never && no_safepoint_check == false),
1087 "This lock should never have a safepoint check: %s", name());
1088 assert(!(_safepoint_check_required == Monitor::_safepoint_check_always && no_safepoint_check == true),
1089 "This lock should always have a safepoint check: %s", name());
1090
1091 Thread * const Self = Thread::current();
1092 assert(_owner == Self, "invariant");
1093 assert(ILocked(), "invariant");
1094
1095 // as_suspend_equivalent logically implies !no_safepoint_check
1096 guarantee(!as_suspend_equivalent || !no_safepoint_check, "invariant");
1097 // !no_safepoint_check logically implies java_thread
1098 guarantee(no_safepoint_check || Self->is_Java_thread(), "invariant");
1099
1100 #ifdef ASSERT
1101 Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
1102 assert(least != this, "Specification of get_least_... call above");
1103 if (least != NULL && least->rank() <= special) {
1104 tty->print("Attempting to wait on monitor %s/%d while holding"
1105 " lock %s/%d -- possible deadlock",
1106 name(), rank(), least->name(), least->rank());
1107 assert(false, "Shouldn't block(wait) while holding a lock of rank special");
1108 }
1109 #endif // ASSERT
1110
1111 int wait_status;
1112 // conceptually set the owner to NULL in anticipation of
1113 // abdicating the lock in wait
1114 set_owner(NULL);
1115 if (no_safepoint_check) {
1116 wait_status = IWait(Self, timeout);
1117 } else {
1118 assert(Self->is_Java_thread(), "invariant");
1119 JavaThread *jt = (JavaThread *)Self;
1120
1121 // Enter safepoint region - ornate and Rococo ...
1122 ThreadBlockInVM tbivm(jt);
1123 OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */);
1124
1125 if (as_suspend_equivalent) {
1126 jt->set_suspend_equivalent();
1127 // cleared by handle_special_suspend_equivalent_condition() or
1128 // java_suspend_self()
1129 }
1130
1131 wait_status = IWait(Self, timeout);
1132
1133 // were we externally suspended while we were waiting?
1134 if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) {
1135 // Our event wait has finished and we own the lock, but
1136 // while we were waiting another thread suspended us. We don't
1137 // want to hold the lock while suspended because that
1138 // would surprise the thread that suspended us.
1139 assert(ILocked(), "invariant");
1140 IUnlock(true);
1141 jt->java_suspend_self();
1142 ILock(Self);
1143 assert(ILocked(), "invariant");
1144 }
1145 }
1146
1147 // Conceptually reestablish ownership of the lock.
1148 // The "real" lock -- the LockByte -- was reacquired by IWait().
1149 assert(ILocked(), "invariant");
1150 assert(_owner == NULL, "invariant");
1151 set_owner(Self);
1152 return wait_status != 0; // return true IFF timeout
1153 }
1154
1155 Monitor::~Monitor() {
1156 #ifdef ASSERT
1157 uintptr_t owner = UNS(_owner);
1158 uintptr_t lockword = UNS(_LockWord.FullWord);
1159 uintptr_t entrylist = UNS(_EntryList);
1160 uintptr_t waitset = UNS(_WaitSet);
1161 uintptr_t ondeck = UNS(_OnDeck);
1162 // Print _name with precision limit, in case failure is due to memory
1163 // corruption that also trashed _name.
1164 assert((owner|lockword|entrylist|waitset|ondeck) == 0,
1165 "%.*s: _owner(" INTPTR_FORMAT ")|_LockWord(" INTPTR_FORMAT ")|_EntryList(" INTPTR_FORMAT ")|_WaitSet("
1166 INTPTR_FORMAT ")|_OnDeck(" INTPTR_FORMAT ") != 0",
1167 MONITOR_NAME_LEN, _name, owner, lockword, entrylist, waitset, ondeck);
1168 #endif
1169 }
1170
1171 void Monitor::ClearMonitor(Monitor * m, const char *name) {
1172 m->_owner = NULL;
1173 m->_snuck = false;
1174 if (name == NULL) {
1175 strcpy(m->_name, "UNKNOWN");
1176 } else {
1177 strncpy(m->_name, name, MONITOR_NAME_LEN - 1);
1178 m->_name[MONITOR_NAME_LEN - 1] = '\0';
1179 }
1180 m->_LockWord.FullWord = 0;
1181 m->_EntryList = NULL;
1182 m->_OnDeck = NULL;
1183 m->_WaitSet = NULL;
1184 m->_WaitLock[0] = 0;
1185 }
1186
1187 Monitor::Monitor() { ClearMonitor(this); }
1188
1189 Monitor::Monitor(int Rank, const char * name, bool allow_vm_block,
1190 SafepointCheckRequired safepoint_check_required) {
1191 ClearMonitor(this, name);
1192 #ifdef ASSERT
1193 _allow_vm_block = allow_vm_block;
1194 _rank = Rank;
1195 NOT_PRODUCT(_safepoint_check_required = safepoint_check_required;)
1196 #endif
1197 }
1198
1199 Mutex::Mutex(int Rank, const char * name, bool allow_vm_block,
1200 SafepointCheckRequired safepoint_check_required) {
1201 ClearMonitor((Monitor *) this, name);
1202 #ifdef ASSERT
1203 _allow_vm_block = allow_vm_block;
1204 _rank = Rank;
1205 NOT_PRODUCT(_safepoint_check_required = safepoint_check_required;)
1206 #endif
1207 }
1208
1209 bool Monitor::owned_by_self() const {
1210 bool ret = _owner == Thread::current();
1211 assert(!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant");
1212 return ret;
1213 }
1214
1215 void Monitor::print_on_error(outputStream* st) const {
1216 st->print("[" PTR_FORMAT, p2i(this));
1217 st->print("] %s", _name);
1218 st->print(" - owner thread: " PTR_FORMAT, p2i(_owner));
1219 }
1220
1221
1222
1223
1224 // ----------------------------------------------------------------------------------
1225 // Non-product code
1226
1227 #ifndef PRODUCT
1228 void Monitor::print_on(outputStream* st) const {
1229 st->print_cr("Mutex: [" PTR_FORMAT "/" PTR_FORMAT "] %s - owner: " PTR_FORMAT,
1230 p2i(this), _LockWord.FullWord, _name, p2i(_owner));
1231 }
1232 #endif
1233
1234 #ifndef PRODUCT
1235 #ifdef ASSERT
1236 Monitor * Monitor::get_least_ranked_lock(Monitor * locks) {
1237 Monitor *res, *tmp;
1238 for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) {
1239 if (tmp->rank() < res->rank()) {
1240 res = tmp;
1241 }
1242 }
1243 if (!SafepointSynchronize::is_at_safepoint()) {
1244 // In this case, we expect the held locks to be
1245 // in increasing rank order (modulo any native ranks)
1246 for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
1247 if (tmp->next() != NULL) {
1248 assert(tmp->rank() == Mutex::native ||
1249 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
1250 }
1251 }
1252 }
1253 return res;
1254 }
1255
1256 Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) {
1257 Monitor *res, *tmp;
1258 for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) {
1259 if (tmp != this && (res == NULL || tmp->rank() < res->rank())) {
1260 res = tmp;
1261 }
1262 }
1263 if (!SafepointSynchronize::is_at_safepoint()) {
1264 // In this case, we expect the held locks to be
1265 // in increasing rank order (modulo any native ranks)
1266 for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
1267 if (tmp->next() != NULL) {
1268 assert(tmp->rank() == Mutex::native ||
1269 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
1270 }
1271 }
1272 }
1273 return res;
1274 }
1275
1276
1277 bool Monitor::contains(Monitor* locks, Monitor * lock) {
1278 for (; locks != NULL; locks = locks->next()) {
1279 if (locks == lock) {
1280 return true;
1281 }
1282 }
1283 return false;
1284 }
1285 #endif
1286
1287 // Called immediately after lock acquisition or release as a diagnostic
1288 // to track the lock-set of the thread and test for rank violations that
1289 // might indicate exposure to deadlock.
1290 // Rather like an EventListener for _owner (:>).
1291
1292 void Monitor::set_owner_implementation(Thread *new_owner) {
1293 // This function is solely responsible for maintaining
1294 // and checking the invariant that threads and locks
1295 // are in a 1/N relation, with some some locks unowned.
1296 // It uses the Mutex::_owner, Mutex::_next, and
1297 // Thread::_owned_locks fields, and no other function
1298 // changes those fields.
1299 // It is illegal to set the mutex from one non-NULL
1300 // owner to another--it must be owned by NULL as an
1301 // intermediate state.
1302
1303 if (new_owner != NULL) {
1304 // the thread is acquiring this lock
1305
1306 assert(new_owner == Thread::current(), "Should I be doing this?");
1307 assert(_owner == NULL, "setting the owner thread of an already owned mutex");
1308 _owner = new_owner; // set the owner
1309
1310 // link "this" into the owned locks list
1311
1312 #ifdef ASSERT // Thread::_owned_locks is under the same ifdef
1313 Monitor* locks = get_least_ranked_lock(new_owner->owned_locks());
1314 // Mutex::set_owner_implementation is a friend of Thread
1315
1316 assert(this->rank() >= 0, "bad lock rank");
1317
1318 // Deadlock avoidance rules require us to acquire Mutexes only in
1319 // a global total order. For example m1 is the lowest ranked mutex
1320 // that the thread holds and m2 is the mutex the thread is trying
1321 // to acquire, then deadlock avoidance rules require that the rank
1322 // of m2 be less than the rank of m1.
1323 // The rank Mutex::native is an exception in that it is not subject
1324 // to the verification rules.
1325 // Here are some further notes relating to mutex acquisition anomalies:
1326 // . it is also ok to acquire Safepoint_lock at the very end while we
1327 // already hold Terminator_lock - may happen because of periodic safepoints
1328 if (this->rank() != Mutex::native &&
1329 this->rank() != Mutex::suspend_resume &&
1330 locks != NULL && locks->rank() <= this->rank() &&
1331 !SafepointSynchronize::is_at_safepoint() &&
1332 !(this == Safepoint_lock && contains(locks, Terminator_lock) &&
1333 SafepointSynchronize::is_synchronizing())) {
1334 new_owner->print_owned_locks();
1335 fatal("acquiring lock %s/%d out of order with lock %s/%d -- "
1336 "possible deadlock", this->name(), this->rank(),
1337 locks->name(), locks->rank());
1338 }
1339
1340 this->_next = new_owner->_owned_locks;
1341 new_owner->_owned_locks = this;
1342 #endif
1343
1344 } else {
1345 // the thread is releasing this lock
1346
1347 Thread* old_owner = _owner;
1348 debug_only(_last_owner = old_owner);
1349
1350 assert(old_owner != NULL, "removing the owner thread of an unowned mutex");
1351 assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex");
1352
1353 _owner = NULL; // set the owner
1354
1355 #ifdef ASSERT
1356 Monitor *locks = old_owner->owned_locks();
1357
1358 // remove "this" from the owned locks list
1359
1360 Monitor *prev = NULL;
1361 bool found = false;
1362 for (; locks != NULL; prev = locks, locks = locks->next()) {
1363 if (locks == this) {
1364 found = true;
1365 break;
1366 }
1367 }
1368 assert(found, "Removing a lock not owned");
1369 if (prev == NULL) {
1370 old_owner->_owned_locks = _next;
1371 } else {
1372 prev->_next = _next;
1373 }
1374 _next = NULL;
1375 #endif
1376 }
1377 }
1378
1379
1380 // Factored out common sanity checks for locking mutex'es. Used by lock() and try_lock()
1381 void Monitor::check_prelock_state(Thread *thread) {
1382 assert((!thread->is_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm)
1383 || rank() == Mutex::special, "wrong thread state for using locks");
1384 if (StrictSafepointChecks) {
1385 if (thread->is_VM_thread() && !allow_vm_block()) {
1386 fatal("VM thread using lock %s (not allowed to block on)", name());
1387 }
1388 debug_only(if (rank() != Mutex::special) \
1389 thread->check_for_valid_safepoint_state(false);)
1390 }
1391 assert(!os::ThreadCrashProtection::is_crash_protected(thread),
1392 "locking not allowed when crash protection is set");
1393 }
1394
1395 void Monitor::check_block_state(Thread *thread) {
1396 if (!_allow_vm_block && thread->is_VM_thread()) {
1397 warning("VM thread blocked on lock");
1398 print();
1399 BREAKPOINT;
1400 }
1401 assert(_owner != thread, "deadlock: blocking on monitor owned by current thread");
1402 }
1403
1404 #endif // PRODUCT
--- EOF ---