223 //
224 // * See also http://blogs.sun.com/dave
225
226
227 void* ObjectMonitor::operator new (size_t size) throw() {
228 return AllocateHeap(size, mtInternal);
229 }
230 void* ObjectMonitor::operator new[] (size_t size) throw() {
231 return operator new (size);
232 }
233 void ObjectMonitor::operator delete(void* p) {
234 FreeHeap(p);
235 }
236 void ObjectMonitor::operator delete[] (void *p) {
237 operator delete(p);
238 }
239
240 // -----------------------------------------------------------------------------
241 // Enter support
242
243 void ObjectMonitor::enter(TRAPS) {
244 // The following code is ordered to check the most common cases first
245 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
246 Thread * const Self = THREAD;
247
248 void* cur = try_set_owner_from(NULL, Self);
249 if (cur == NULL) {
250 assert(_recursions == 0, "invariant");
251 return;
252 }
253
254 if (cur == Self) {
255 // TODO-FIXME: check for integer overflow! BUGID 6557169.
256 _recursions++;
257 return;
258 }
259
260 if (Self->is_lock_owned((address)cur)) {
261 assert(_recursions == 0, "internal state error");
262 _recursions = 1;
263 set_owner_from_BasicLock(cur, Self); // Convert from BasicLock* to Thread*.
264 return;
265 }
266
267 // We've encountered genuine contention.
268 assert(Self->_Stalled == 0, "invariant");
269 Self->_Stalled = intptr_t(this);
270
271 // Try one round of spinning *before* enqueueing Self
272 // and before going through the awkward and expensive state
273 // transitions. The following spin is strictly optional ...
274 // Note that if we acquire the monitor from an initial spin
275 // we forgo posting JVMTI events and firing DTRACE probes.
276 if (TrySpin(Self) > 0) {
277 assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
278 assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);
279 assert(((oop)object())->mark() == markWord::encode(this),
280 "object mark must match encoded this: mark=" INTPTR_FORMAT
281 ", encoded this=" INTPTR_FORMAT, ((oop)object())->mark().value(),
282 markWord::encode(this).value());
283 Self->_Stalled = 0;
284 return;
285 }
286
287 assert(_owner != Self, "invariant");
288 assert(_succ != Self, "invariant");
289 assert(Self->is_Java_thread(), "invariant");
290 JavaThread * jt = (JavaThread *) Self;
291 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
292 assert(jt->thread_state() != _thread_blocked, "invariant");
293 assert(this->object() != NULL, "invariant");
294 assert(_contentions >= 0, "invariant");
295
296 // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy().
297 // Ensure the object-monitor relationship remains stable while there's contention.
298 Atomic::inc(&_contentions);
299
300 JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
301 EventJavaMonitorEnter event;
302 if (event.should_commit()) {
303 event.set_monitorClass(((oop)this->object())->klass());
304 event.set_address((uintptr_t)(this->object_addr()));
305 }
306
307 { // Change java thread status to indicate blocked on monitor enter.
308 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
309
310 Self->set_current_pending_monitor(this);
311
312 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
313 if (JvmtiExport::should_post_monitor_contended_enter()) {
314 JvmtiExport::post_monitor_contended_enter(jt, this);
315
316 // The current thread does not yet own the monitor and does not
317 // yet appear on any queues that would get it made the successor.
318 // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
340 //
341 _recursions = 0;
342 _succ = NULL;
343 exit(false, Self);
344
345 jt->java_suspend_self();
346 }
347 Self->set_current_pending_monitor(NULL);
348
349 // We cleared the pending monitor info since we've just gotten past
350 // the enter-check-for-suspend dance and we now own the monitor free
351 // and clear, i.e., it is no longer pending. The ThreadBlockInVM
352 // destructor can go to a safepoint at the end of this block. If we
353 // do a thread dump during that safepoint, then this thread will show
354 // as having "-locked" the monitor, but the OS and java.lang.Thread
355 // states will still report that the thread is blocked trying to
356 // acquire it.
357 }
358
359 Atomic::dec(&_contentions);
360 assert(_contentions >= 0, "invariant");
361 Self->_Stalled = 0;
362
363 // Must either set _recursions = 0 or ASSERT _recursions == 0.
364 assert(_recursions == 0, "invariant");
365 assert(_owner == Self, "invariant");
366 assert(_succ != Self, "invariant");
367 assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
368
369 // The thread -- now the owner -- is back in vm mode.
370 // Report the glorious news via TI,DTrace and jvmstat.
371 // The probe effect is non-trivial. All the reportage occurs
372 // while we hold the monitor, increasing the length of the critical
373 // section. Amdahl's parallel speedup law comes vividly into play.
374 //
375 // Another option might be to aggregate the events (thread local or
376 // per-monitor aggregation) and defer reporting until a more opportune
377 // time -- such as next time some thread encounters contention but has
378 // yet to acquire the lock. While spinning that thread could
379 // spinning we could increment JVMStat counters, etc.
380
381 DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
382 if (JvmtiExport::should_post_monitor_contended_entered()) {
383 JvmtiExport::post_monitor_contended_entered(jt, this);
384
385 // The current thread already owns the monitor and is not going to
386 // call park() for the remainder of the monitor enter protocol. So
387 // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
388 // event handler consumed an unpark() issued by the thread that
389 // just exited the monitor.
390 }
391 if (event.should_commit()) {
392 event.set_previousOwner((uintptr_t)_previous_owner_tid);
393 event.commit();
394 }
395 OM_PERFDATA_OP(ContendedLockAttempts, inc());
396 }
397
398 // Caveat: TryLock() is not necessarily serializing if it returns failure.
399 // Callers must compensate as needed.
400
401 int ObjectMonitor::TryLock(Thread * Self) {
402 void * own = _owner;
403 if (own != NULL) return 0;
404 if (try_set_owner_from(NULL, Self) == NULL) {
405 assert(_recursions == 0, "invariant");
406 return 1;
407 }
408 // The lock had been free momentarily, but we lost the race to the lock.
409 // Interference -- the CAS failed.
410 // We can either return -1 or retry.
411 // Retry doesn't make as much sense because the lock was just acquired.
412 return -1;
413 }
414
415 // Convert the fields used by is_busy() to a string that can be
416 // used for diagnostic output.
417 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
418 ss->print("is_busy: contentions=%d, waiters=%d, owner=" INTPTR_FORMAT
419 ", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, _contentions,
420 _waiters, p2i(_owner), p2i(_cxq), p2i(_EntryList));
421 return ss->base();
422 }
423
424 #define MAX_RECHECK_INTERVAL 1000
425
426 void ObjectMonitor::EnterI(TRAPS) {
427 Thread * const Self = THREAD;
428 assert(Self->is_Java_thread(), "invariant");
429 assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
430
431 // Try the lock - TATAS
432 if (TryLock (Self) > 0) {
433 assert(_succ != Self, "invariant");
434 assert(_owner == Self, "invariant");
435 assert(_Responsible != Self, "invariant");
436 return;
437 }
438
439 assert(InitDone, "Unexpectedly not initialized");
440
441 // We try one round of spinning *before* enqueueing Self.
442 //
443 // If the _owner is ready but OFFPROC we could use a YieldTo()
444 // operation to donate the remainder of this thread's quantum
445 // to the owner. This has subtle but beneficial affinity
446 // effects.
447
448 if (TrySpin(Self) > 0) {
449 assert(_owner == Self, "invariant");
450 assert(_succ != Self, "invariant");
451 assert(_Responsible != Self, "invariant");
452 return;
453 }
454
455 // The Spin failed -- Enqueue and park the thread ...
456 assert(_succ != Self, "invariant");
457 assert(_owner != Self, "invariant");
458 assert(_Responsible != Self, "invariant");
535
536 for (;;) {
537
538 if (TryLock(Self) > 0) break;
539 assert(_owner != Self, "invariant");
540
541 // park self
542 if (_Responsible == Self) {
543 Self->_ParkEvent->park((jlong) recheckInterval);
544 // Increase the recheckInterval, but clamp the value.
545 recheckInterval *= 8;
546 if (recheckInterval > MAX_RECHECK_INTERVAL) {
547 recheckInterval = MAX_RECHECK_INTERVAL;
548 }
549 } else {
550 Self->_ParkEvent->park();
551 }
552
553 if (TryLock(Self) > 0) break;
554
555 // The lock is still contested.
556 // Keep a tally of the # of futile wakeups.
557 // Note that the counter is not protected by a lock or updated by atomics.
558 // That is by design - we trade "lossy" counters which are exposed to
559 // races during updates for a lower probe effect.
560
561 // This PerfData object can be used in parallel with a safepoint.
562 // See the work around in PerfDataManager::destroy().
563 OM_PERFDATA_OP(FutileWakeups, inc());
564 ++nWakeups;
565
566 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
567 // We can defer clearing _succ until after the spin completes
568 // TrySpin() must tolerate being called with _succ == Self.
569 // Try yet another round of adaptive spinning.
570 if (TrySpin(Self) > 0) break;
571
572 // We can find that we were unpark()ed and redesignated _succ while
573 // we were spinning. That's harmless. If we iterate and call park(),
574 // park() will consume the event and return immediately and we'll
799 SelfNode->_prev = (ObjectWaiter *) 0xBAD;
800 SelfNode->_next = (ObjectWaiter *) 0xBAD;
801 SelfNode->TState = ObjectWaiter::TS_RUN;
802 #endif
803 }
804
805 // -----------------------------------------------------------------------------
806 // Exit support
807 //
808 // exit()
809 // ~~~~~~
810 // Note that the collector can't reclaim the objectMonitor or deflate
811 // the object out from underneath the thread calling ::exit() as the
812 // thread calling ::exit() never transitions to a stable state.
813 // This inhibits GC, which in turn inhibits asynchronous (and
814 // inopportune) reclamation of "this".
815 //
816 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
817 // There's one exception to the claim above, however. EnterI() can call
818 // exit() to drop a lock if the acquirer has been externally suspended.
819 // In that case exit() is called with _thread_state as _thread_blocked,
820 // but the monitor's _contentions field is > 0, which inhibits reclamation.
821 //
822 // 1-0 exit
823 // ~~~~~~~~
824 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
825 // the fast-path operators have been optimized so the common ::exit()
826 // operation is 1-0, e.g., see macroAssembler_x86.cpp: fast_unlock().
827 // The code emitted by fast_unlock() elides the usual MEMBAR. This
828 // greatly improves latency -- MEMBAR and CAS having considerable local
829 // latency on modern processors -- but at the cost of "stranding". Absent the
830 // MEMBAR, a thread in fast_unlock() can race a thread in the slow
831 // ::enter() path, resulting in the entering thread being stranding
832 // and a progress-liveness failure. Stranding is extremely rare.
833 // We use timers (timed park operations) & periodic polling to detect
834 // and recover from stranding. Potentially stranded threads periodically
835 // wake up and poll the lock. See the usage of the _Responsible variable.
836 //
837 // The CAS() in enter provides for safety and exclusion, while the CAS or
838 // MEMBAR in exit provides for progress and avoids stranding. 1-0 locking
839 // eliminates the CAS/MEMBAR from the exit path, but it admits stranding.
1074 }
1075
1076
1077 void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1078 assert(_owner == Self, "invariant");
1079
1080 // Exit protocol:
1081 // 1. ST _succ = wakee
1082 // 2. membar #loadstore|#storestore;
1083 // 2. ST _owner = NULL
1084 // 3. unpark(wakee)
1085
1086 _succ = Wakee->_thread;
1087 ParkEvent * Trigger = Wakee->_event;
1088
1089 // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1090 // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1091 // out-of-scope (non-extant).
1092 Wakee = NULL;
1093
1094 // Drop the lock
1095 // Uses a fence to separate release_store(owner) from the LD in unpark().
1096 release_clear_owner(Self);
1097 OrderAccess::fence();
1098
1099 DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1100 Trigger->unpark();
1101
1102 // Maintain stats and report events to JVMTI
1103 OM_PERFDATA_OP(Parks, inc());
1104 }
1105
1106
1107 // -----------------------------------------------------------------------------
1108 // Class Loader deadlock handling.
1109 //
1110 // complete_exit exits a lock returning recursion count
1111 // complete_exit/reenter operate as a wait without waiting
1112 // complete_exit requires an inflated monitor
1113 // The _owner field is not always the Thread addr even with an
1114 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1122
1123 void* cur = Atomic::load(&_owner);
1124 if (THREAD != cur) {
1125 if (THREAD->is_lock_owned((address)cur)) {
1126 assert(_recursions == 0, "internal state error");
1127 set_owner_from_BasicLock(cur, Self); // Convert from BasicLock* to Thread*.
1128 _recursions = 0;
1129 }
1130 }
1131
1132 guarantee(Self == _owner, "complete_exit not owner");
1133 intx save = _recursions; // record the old recursion count
1134 _recursions = 0; // set the recursion level to be 0
1135 exit(true, Self); // exit the monitor
1136 guarantee(_owner != Self, "invariant");
1137 return save;
1138 }
1139
1140 // reenter() enters a lock and sets recursion count
1141 // complete_exit/reenter operate as a wait without waiting
1142 void ObjectMonitor::reenter(intx recursions, TRAPS) {
1143 Thread * const Self = THREAD;
1144 assert(Self->is_Java_thread(), "Must be Java thread!");
1145 JavaThread *jt = (JavaThread *)THREAD;
1146
1147 guarantee(_owner != Self, "reenter already owner");
1148 enter(THREAD); // enter the monitor
1149 guarantee(_recursions == 0, "reenter recursion");
1150 _recursions = recursions;
1151 return;
1152 }
1153
1154 // Checks that the current THREAD owns this monitor and causes an
1155 // immediate return if it doesn't. We don't use the CHECK macro
1156 // because we want the IMSE to be the only exception that is thrown
1157 // from the call site when false is returned. Any other pending
1158 // exception is ignored.
1159 #define CHECK_OWNER() \
1160 do { \
1161 if (!check_owner(THREAD)) { \
1162 assert(HAS_PENDING_EXCEPTION, "expected a pending IMSE here."); \
1163 return; \
1164 } \
1165 } while (false)
1166
1167 // Returns true if the specified thread owns the ObjectMonitor.
1168 // Otherwise returns false and throws IllegalMonitorStateException
1169 // (IMSE). If there is a pending exception and the specified thread
1170 // is not the owner, that exception will be replaced by the IMSE.
1171 bool ObjectMonitor::check_owner(Thread* THREAD) {
1945 }
1946
1947 DEBUG_ONLY(InitDone = true;)
1948 }
1949
1950 void ObjectMonitor::print_on(outputStream* st) const {
1951 // The minimal things to print for markWord printing, more can be added for debugging and logging.
1952 st->print("{contentions=0x%08x,waiters=0x%08x"
1953 ",recursions=" INTX_FORMAT ",owner=" INTPTR_FORMAT "}",
1954 contentions(), waiters(), recursions(),
1955 p2i(owner()));
1956 }
1957 void ObjectMonitor::print() const { print_on(tty); }
1958
1959 #ifdef ASSERT
1960 // Print the ObjectMonitor like a debugger would:
1961 //
1962 // (ObjectMonitor) 0x00007fdfb6012e40 = {
1963 // _header = 0x0000000000000001
1964 // _object = 0x000000070ff45fd0
1965 // _next_om = 0x0000000000000000
1966 // _pad_buf0 = {
1967 // [0] = '\0'
1968 // ...
1969 // [103] = '\0'
1970 // }
1971 // _owner = 0x0000000000000000
1972 // _previous_owner_tid = 0
1973 // _recursions = 0
1974 // _EntryList = 0x0000000000000000
1975 // _cxq = 0x0000000000000000
1976 // _succ = 0x0000000000000000
1977 // _Responsible = 0x0000000000000000
1978 // _Spinner = 0
1979 // _SpinDuration = 5000
1980 // _contentions = 0
1981 // _WaitSet = 0x0000700009756248
1982 // _waiters = 1
1983 // _WaitSetLock = 0
1984 // }
1985 //
1986 void ObjectMonitor::print_debug_style_on(outputStream* st) const {
1987 st->print_cr("(ObjectMonitor*) " INTPTR_FORMAT " = {", p2i(this));
1988 st->print_cr(" _header = " INTPTR_FORMAT, header().value());
1989 st->print_cr(" _object = " INTPTR_FORMAT, p2i(_object));
1990 st->print_cr(" _next_om = " INTPTR_FORMAT, p2i(next_om()));
1991 st->print_cr(" _pad_buf0 = {");
1992 st->print_cr(" [0] = '\\0'");
1993 st->print_cr(" ...");
1994 st->print_cr(" [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1);
1995 st->print_cr(" }");
1996 st->print_cr(" _owner = " INTPTR_FORMAT, p2i(_owner));
1997 st->print_cr(" _previous_owner_tid = " JLONG_FORMAT, _previous_owner_tid);
1998 st->print_cr(" _recursions = " INTX_FORMAT, _recursions);
1999 st->print_cr(" _EntryList = " INTPTR_FORMAT, p2i(_EntryList));
2000 st->print_cr(" _cxq = " INTPTR_FORMAT, p2i(_cxq));
2001 st->print_cr(" _succ = " INTPTR_FORMAT, p2i(_succ));
2002 st->print_cr(" _Responsible = " INTPTR_FORMAT, p2i(_Responsible));
2003 st->print_cr(" _Spinner = %d", _Spinner);
2004 st->print_cr(" _SpinDuration = %d", _SpinDuration);
2005 st->print_cr(" _contentions = %d", _contentions);
2006 st->print_cr(" _WaitSet = " INTPTR_FORMAT, p2i(_WaitSet));
2007 st->print_cr(" _waiters = %d", _waiters);
2008 st->print_cr(" _WaitSetLock = %d", _WaitSetLock);
2009 st->print_cr("}");
2010 }
2011 #endif
|
223 //
224 // * See also http://blogs.sun.com/dave
225
226
227 void* ObjectMonitor::operator new (size_t size) throw() {
228 return AllocateHeap(size, mtInternal);
229 }
230 void* ObjectMonitor::operator new[] (size_t size) throw() {
231 return operator new (size);
232 }
233 void ObjectMonitor::operator delete(void* p) {
234 FreeHeap(p);
235 }
236 void ObjectMonitor::operator delete[] (void *p) {
237 operator delete(p);
238 }
239
240 // -----------------------------------------------------------------------------
241 // Enter support
242
243 bool ObjectMonitor::enter(TRAPS) {
244 // The following code is ordered to check the most common cases first
245 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
246 Thread * const Self = THREAD;
247
248 void* cur = try_set_owner_from(NULL, Self);
249 if (cur == NULL) {
250 assert(_recursions == 0, "invariant");
251 return true;
252 }
253
254 if (cur == Self) {
255 // TODO-FIXME: check for integer overflow! BUGID 6557169.
256 _recursions++;
257 return true;
258 }
259
260 if (Self->is_lock_owned((address)cur)) {
261 assert(_recursions == 0, "internal state error");
262 _recursions = 1;
263 set_owner_from_BasicLock(cur, Self); // Convert from BasicLock* to Thread*.
264 return true;
265 }
266
267 // We've encountered genuine contention.
268 assert(Self->_Stalled == 0, "invariant");
269 Self->_Stalled = intptr_t(this);
270
271 // Try one round of spinning *before* enqueueing Self
272 // and before going through the awkward and expensive state
273 // transitions. The following spin is strictly optional ...
274 // Note that if we acquire the monitor from an initial spin
275 // we forgo posting JVMTI events and firing DTRACE probes.
276 if (TrySpin(Self) > 0) {
277 assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
278 assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);
279 assert(((oop)object())->mark() == markWord::encode(this),
280 "object mark must match encoded this: mark=" INTPTR_FORMAT
281 ", encoded this=" INTPTR_FORMAT, ((oop)object())->mark().value(),
282 markWord::encode(this).value());
283 Self->_Stalled = 0;
284 return true;
285 }
286
287 assert(_owner != Self, "invariant");
288 assert(_succ != Self, "invariant");
289 assert(Self->is_Java_thread(), "invariant");
290 JavaThread * jt = (JavaThread *) Self;
291 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
292 assert(jt->thread_state() != _thread_blocked, "invariant");
293 assert(AsyncDeflateIdleMonitors || this->object() != NULL, "invariant");
294 assert(AsyncDeflateIdleMonitors || contentions() >= 0, "must not be negative: contentions=%d", contentions());
295
296 // Keep track of contention for JVM/TI and M&M queries.
297 Atomic::inc(&_contentions);
298 if (AsyncDeflateIdleMonitors && is_being_async_deflated()) {
299 // Async deflation is in progress and our contentions increment
300 // above lost the race to async deflation. Undo the work and
301 // force the caller to retry.
302 const oop l_object = (oop)object();
303 if (l_object != NULL) {
304 // Attempt to restore the header/dmw to the object's header so that
305 // we only retry once if the deflater thread happens to be slow.
306 install_displaced_markword_in_object(l_object);
307 }
308 Self->_Stalled = 0;
309 Atomic::dec(&_contentions);
310 return false;
311 }
312
313 JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
314 EventJavaMonitorEnter event;
315 if (event.should_commit()) {
316 event.set_monitorClass(((oop)this->object())->klass());
317 event.set_address((uintptr_t)(this->object_addr()));
318 }
319
320 { // Change java thread status to indicate blocked on monitor enter.
321 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
322
323 Self->set_current_pending_monitor(this);
324
325 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
326 if (JvmtiExport::should_post_monitor_contended_enter()) {
327 JvmtiExport::post_monitor_contended_enter(jt, this);
328
329 // The current thread does not yet own the monitor and does not
330 // yet appear on any queues that would get it made the successor.
331 // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
353 //
354 _recursions = 0;
355 _succ = NULL;
356 exit(false, Self);
357
358 jt->java_suspend_self();
359 }
360 Self->set_current_pending_monitor(NULL);
361
362 // We cleared the pending monitor info since we've just gotten past
363 // the enter-check-for-suspend dance and we now own the monitor free
364 // and clear, i.e., it is no longer pending. The ThreadBlockInVM
365 // destructor can go to a safepoint at the end of this block. If we
366 // do a thread dump during that safepoint, then this thread will show
367 // as having "-locked" the monitor, but the OS and java.lang.Thread
368 // states will still report that the thread is blocked trying to
369 // acquire it.
370 }
371
372 Atomic::dec(&_contentions);
373 assert(contentions() >= 0, "must not be negative: contentions=%d", contentions());
374 Self->_Stalled = 0;
375
376 // Must either set _recursions = 0 or ASSERT _recursions == 0.
377 assert(_recursions == 0, "invariant");
378 assert(_owner == Self, "invariant");
379 assert(_succ != Self, "invariant");
380 assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
381
382 // The thread -- now the owner -- is back in vm mode.
383 // Report the glorious news via TI,DTrace and jvmstat.
384 // The probe effect is non-trivial. All the reportage occurs
385 // while we hold the monitor, increasing the length of the critical
386 // section. Amdahl's parallel speedup law comes vividly into play.
387 //
388 // Another option might be to aggregate the events (thread local or
389 // per-monitor aggregation) and defer reporting until a more opportune
390 // time -- such as next time some thread encounters contention but has
391 // yet to acquire the lock. While spinning that thread could
392 // spinning we could increment JVMStat counters, etc.
393
394 DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
395 if (JvmtiExport::should_post_monitor_contended_entered()) {
396 JvmtiExport::post_monitor_contended_entered(jt, this);
397
398 // The current thread already owns the monitor and is not going to
399 // call park() for the remainder of the monitor enter protocol. So
400 // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
401 // event handler consumed an unpark() issued by the thread that
402 // just exited the monitor.
403 }
404 if (event.should_commit()) {
405 event.set_previousOwner((uintptr_t)_previous_owner_tid);
406 event.commit();
407 }
408 OM_PERFDATA_OP(ContendedLockAttempts, inc());
409 return true;
410 }
411
412 // Caveat: TryLock() is not necessarily serializing if it returns failure.
413 // Callers must compensate as needed.
414
415 int ObjectMonitor::TryLock(Thread * Self) {
416 void * own = _owner;
417 if (own != NULL) return 0;
418 if (try_set_owner_from(NULL, Self) == NULL) {
419 assert(_recursions == 0, "invariant");
420 return 1;
421 }
422 // The lock had been free momentarily, but we lost the race to the lock.
423 // Interference -- the CAS failed.
424 // We can either return -1 or retry.
425 // Retry doesn't make as much sense because the lock was just acquired.
426 return -1;
427 }
428
429 // Install the displaced mark word (dmw) of a deflating ObjectMonitor
430 // into the header of the object associated with the monitor. This
431 // idempotent method is called by a thread that is deflating a
432 // monitor and by other threads that have detected a race with the
433 // deflation process.
434 void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
435 // This function must only be called when (owner == DEFLATER_MARKER
436 // && contentions <= 0), but we can't guarantee that here because
437 // those values could change when the ObjectMonitor gets moved from
438 // the global free list to a per-thread free list.
439
440 guarantee(obj != NULL, "must be non-NULL");
441
442 const oop l_object = (oop)object();
443 if (l_object == NULL) {
444 // ObjectMonitor's object ref has already been cleared by async
445 // deflation so we're done here.
446 return;
447 }
448 ADIM_guarantee(l_object == obj, "object=" INTPTR_FORMAT " must equal obj="
449 INTPTR_FORMAT, p2i(l_object), p2i(obj));
450
451 markWord dmw = header();
452 // The dmw has to be neutral (not NULL, not locked and not marked).
453 ADIM_guarantee(dmw.is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, dmw.value());
454
455 // Install displaced mark word if the object's header still points
456 // to this ObjectMonitor. All racing callers to this function will
457 // reach this point, but only one can win.
458 markWord res = obj->cas_set_mark(dmw, markWord::encode(this));
459 if (res != markWord::encode(this)) {
460 // This should be rare so log at the Info level when it happens.
461 log_info(monitorinflation)("install_displaced_markword_in_object: "
462 "failed cas_set_mark: new_mark=" INTPTR_FORMAT
463 ", old_mark=" INTPTR_FORMAT ", res=" INTPTR_FORMAT,
464 dmw.value(), markWord::encode(this).value(),
465 res.value());
466 }
467
468 // Note: It does not matter which thread restored the header/dmw
469 // into the object's header. The thread deflating the monitor just
470 // wanted the object's header restored and it is. The threads that
471 // detected a race with the deflation process also wanted the
472 // object's header restored before they retry their operation and
473 // because it is restored they will only retry once.
474 }
475
476 // Convert the fields used by is_busy() to a string that can be
477 // used for diagnostic output.
478 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
479 ss->print("is_busy: waiters=%d, ", _waiters);
480 if (!AsyncDeflateIdleMonitors) {
481 ss->print("contentions=%d, ", contentions());
482 ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
483 } else {
484 if (contentions() > 0) {
485 ss->print("contentions=%d, ", contentions());
486 } else {
487 ss->print("contentions=0");
488 }
489 if (_owner != DEFLATER_MARKER) {
490 ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
491 } else {
492 // We report NULL instead of DEFLATER_MARKER here because is_busy()
493 // ignores DEFLATER_MARKER values.
494 ss->print("owner=" INTPTR_FORMAT, NULL);
495 }
496 }
497 ss->print(", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, p2i(_cxq),
498 p2i(_EntryList));
499 return ss->base();
500 }
501
502 #define MAX_RECHECK_INTERVAL 1000
503
504 void ObjectMonitor::EnterI(TRAPS) {
505 Thread * const Self = THREAD;
506 assert(Self->is_Java_thread(), "invariant");
507 assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
508
509 // Try the lock - TATAS
510 if (TryLock (Self) > 0) {
511 assert(_succ != Self, "invariant");
512 assert(_owner == Self, "invariant");
513 assert(_Responsible != Self, "invariant");
514 return;
515 }
516
517 if (AsyncDeflateIdleMonitors &&
518 try_set_owner_from(DEFLATER_MARKER, Self) == DEFLATER_MARKER) {
519 // The deflation protocol finished the first part (setting owner),
520 // but it failed the second part (making contentions negative) and
521 // bailed. Acquired the monitor.
522 assert(_succ != Self, "invariant");
523 assert(_Responsible != Self, "invariant");
524 return;
525 }
526
527 assert(InitDone, "Unexpectedly not initialized");
528
529 // We try one round of spinning *before* enqueueing Self.
530 //
531 // If the _owner is ready but OFFPROC we could use a YieldTo()
532 // operation to donate the remainder of this thread's quantum
533 // to the owner. This has subtle but beneficial affinity
534 // effects.
535
536 if (TrySpin(Self) > 0) {
537 assert(_owner == Self, "invariant");
538 assert(_succ != Self, "invariant");
539 assert(_Responsible != Self, "invariant");
540 return;
541 }
542
543 // The Spin failed -- Enqueue and park the thread ...
544 assert(_succ != Self, "invariant");
545 assert(_owner != Self, "invariant");
546 assert(_Responsible != Self, "invariant");
623
624 for (;;) {
625
626 if (TryLock(Self) > 0) break;
627 assert(_owner != Self, "invariant");
628
629 // park self
630 if (_Responsible == Self) {
631 Self->_ParkEvent->park((jlong) recheckInterval);
632 // Increase the recheckInterval, but clamp the value.
633 recheckInterval *= 8;
634 if (recheckInterval > MAX_RECHECK_INTERVAL) {
635 recheckInterval = MAX_RECHECK_INTERVAL;
636 }
637 } else {
638 Self->_ParkEvent->park();
639 }
640
641 if (TryLock(Self) > 0) break;
642
643 if (AsyncDeflateIdleMonitors &&
644 try_set_owner_from(DEFLATER_MARKER, Self) == DEFLATER_MARKER) {
645 // The deflation protocol finished the first part (setting owner),
646 // but it failed the second part (making contentions negative) and
647 // bailed. Acquired the monitor.
648 break;
649 }
650
651 // The lock is still contested.
652 // Keep a tally of the # of futile wakeups.
653 // Note that the counter is not protected by a lock or updated by atomics.
654 // That is by design - we trade "lossy" counters which are exposed to
655 // races during updates for a lower probe effect.
656
657 // This PerfData object can be used in parallel with a safepoint.
658 // See the work around in PerfDataManager::destroy().
659 OM_PERFDATA_OP(FutileWakeups, inc());
660 ++nWakeups;
661
662 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
663 // We can defer clearing _succ until after the spin completes
664 // TrySpin() must tolerate being called with _succ == Self.
665 // Try yet another round of adaptive spinning.
666 if (TrySpin(Self) > 0) break;
667
668 // We can find that we were unpark()ed and redesignated _succ while
669 // we were spinning. That's harmless. If we iterate and call park(),
670 // park() will consume the event and return immediately and we'll
895 SelfNode->_prev = (ObjectWaiter *) 0xBAD;
896 SelfNode->_next = (ObjectWaiter *) 0xBAD;
897 SelfNode->TState = ObjectWaiter::TS_RUN;
898 #endif
899 }
900
901 // -----------------------------------------------------------------------------
902 // Exit support
903 //
904 // exit()
905 // ~~~~~~
906 // Note that the collector can't reclaim the objectMonitor or deflate
907 // the object out from underneath the thread calling ::exit() as the
908 // thread calling ::exit() never transitions to a stable state.
909 // This inhibits GC, which in turn inhibits asynchronous (and
910 // inopportune) reclamation of "this".
911 //
912 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
913 // There's one exception to the claim above, however. EnterI() can call
914 // exit() to drop a lock if the acquirer has been externally suspended.
915 // In that case exit() is called with _thread_state == _thread_blocked,
916 // but the monitor's _contentions field is > 0, which inhibits reclamation.
917 //
918 // 1-0 exit
919 // ~~~~~~~~
920 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
921 // the fast-path operators have been optimized so the common ::exit()
922 // operation is 1-0, e.g., see macroAssembler_x86.cpp: fast_unlock().
923 // The code emitted by fast_unlock() elides the usual MEMBAR. This
924 // greatly improves latency -- MEMBAR and CAS having considerable local
925 // latency on modern processors -- but at the cost of "stranding". Absent the
926 // MEMBAR, a thread in fast_unlock() can race a thread in the slow
927 // ::enter() path, resulting in the entering thread being stranding
928 // and a progress-liveness failure. Stranding is extremely rare.
929 // We use timers (timed park operations) & periodic polling to detect
930 // and recover from stranding. Potentially stranded threads periodically
931 // wake up and poll the lock. See the usage of the _Responsible variable.
932 //
933 // The CAS() in enter provides for safety and exclusion, while the CAS or
934 // MEMBAR in exit provides for progress and avoids stranding. 1-0 locking
935 // eliminates the CAS/MEMBAR from the exit path, but it admits stranding.
1170 }
1171
1172
1173 void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1174 assert(_owner == Self, "invariant");
1175
1176 // Exit protocol:
1177 // 1. ST _succ = wakee
1178 // 2. membar #loadstore|#storestore;
1179 // 2. ST _owner = NULL
1180 // 3. unpark(wakee)
1181
1182 _succ = Wakee->_thread;
1183 ParkEvent * Trigger = Wakee->_event;
1184
1185 // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1186 // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1187 // out-of-scope (non-extant).
1188 Wakee = NULL;
1189
1190 // Drop the lock.
1191 // Uses a fence to separate release_store(owner) from the LD in unpark().
1192 release_clear_owner(Self);
1193 OrderAccess::fence();
1194
1195 DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1196 Trigger->unpark();
1197
1198 // Maintain stats and report events to JVMTI
1199 OM_PERFDATA_OP(Parks, inc());
1200 }
1201
1202
1203 // -----------------------------------------------------------------------------
1204 // Class Loader deadlock handling.
1205 //
1206 // complete_exit exits a lock returning recursion count
1207 // complete_exit/reenter operate as a wait without waiting
1208 // complete_exit requires an inflated monitor
1209 // The _owner field is not always the Thread addr even with an
1210 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1218
1219 void* cur = Atomic::load(&_owner);
1220 if (THREAD != cur) {
1221 if (THREAD->is_lock_owned((address)cur)) {
1222 assert(_recursions == 0, "internal state error");
1223 set_owner_from_BasicLock(cur, Self); // Convert from BasicLock* to Thread*.
1224 _recursions = 0;
1225 }
1226 }
1227
1228 guarantee(Self == _owner, "complete_exit not owner");
1229 intx save = _recursions; // record the old recursion count
1230 _recursions = 0; // set the recursion level to be 0
1231 exit(true, Self); // exit the monitor
1232 guarantee(_owner != Self, "invariant");
1233 return save;
1234 }
1235
1236 // reenter() enters a lock and sets recursion count
1237 // complete_exit/reenter operate as a wait without waiting
1238 bool ObjectMonitor::reenter(intx recursions, TRAPS) {
1239 Thread * const Self = THREAD;
1240 assert(Self->is_Java_thread(), "Must be Java thread!");
1241 JavaThread *jt = (JavaThread *)THREAD;
1242
1243 guarantee(_owner != Self, "reenter already owner");
1244 if (!enter(THREAD)) {
1245 return false;
1246 }
1247 // Entered the monitor.
1248 guarantee(_recursions == 0, "reenter recursion");
1249 _recursions = recursions;
1250 return true;
1251 }
1252
1253 // Checks that the current THREAD owns this monitor and causes an
1254 // immediate return if it doesn't. We don't use the CHECK macro
1255 // because we want the IMSE to be the only exception that is thrown
1256 // from the call site when false is returned. Any other pending
1257 // exception is ignored.
1258 #define CHECK_OWNER() \
1259 do { \
1260 if (!check_owner(THREAD)) { \
1261 assert(HAS_PENDING_EXCEPTION, "expected a pending IMSE here."); \
1262 return; \
1263 } \
1264 } while (false)
1265
1266 // Returns true if the specified thread owns the ObjectMonitor.
1267 // Otherwise returns false and throws IllegalMonitorStateException
1268 // (IMSE). If there is a pending exception and the specified thread
1269 // is not the owner, that exception will be replaced by the IMSE.
1270 bool ObjectMonitor::check_owner(Thread* THREAD) {
2044 }
2045
2046 DEBUG_ONLY(InitDone = true;)
2047 }
2048
2049 void ObjectMonitor::print_on(outputStream* st) const {
2050 // The minimal things to print for markWord printing, more can be added for debugging and logging.
2051 st->print("{contentions=0x%08x,waiters=0x%08x"
2052 ",recursions=" INTX_FORMAT ",owner=" INTPTR_FORMAT "}",
2053 contentions(), waiters(), recursions(),
2054 p2i(owner()));
2055 }
2056 void ObjectMonitor::print() const { print_on(tty); }
2057
2058 #ifdef ASSERT
2059 // Print the ObjectMonitor like a debugger would:
2060 //
2061 // (ObjectMonitor) 0x00007fdfb6012e40 = {
2062 // _header = 0x0000000000000001
2063 // _object = 0x000000070ff45fd0
2064 // _allocation_state = Old
2065 // _pad_buf0 = {
2066 // [0] = '\0'
2067 // ...
2068 // [43] = '\0'
2069 // }
2070 // _owner = 0x0000000000000000
2071 // _previous_owner_tid = 0
2072 // _pad_buf1 = {
2073 // [0] = '\0'
2074 // ...
2075 // [47] = '\0'
2076 // }
2077 // _next_om = 0x0000000000000000
2078 // _recursions = 0
2079 // _EntryList = 0x0000000000000000
2080 // _cxq = 0x0000000000000000
2081 // _succ = 0x0000000000000000
2082 // _Responsible = 0x0000000000000000
2083 // _Spinner = 0
2084 // _SpinDuration = 5000
2085 // _contentions = 0
2086 // _WaitSet = 0x0000700009756248
2087 // _waiters = 1
2088 // _WaitSetLock = 0
2089 // }
2090 //
2091 void ObjectMonitor::print_debug_style_on(outputStream* st) const {
2092 st->print_cr("(ObjectMonitor*) " INTPTR_FORMAT " = {", p2i(this));
2093 st->print_cr(" _header = " INTPTR_FORMAT, header().value());
2094 st->print_cr(" _object = " INTPTR_FORMAT, p2i(_object));
2095 st->print(" _allocation_state = ");
2096 if (is_free()) {
2097 st->print("Free");
2098 } else if (is_old()) {
2099 st->print("Old");
2100 } else if (is_new()) {
2101 st->print("New");
2102 } else {
2103 st->print("unknown=%d", _allocation_state);
2104 }
2105 st->cr();
2106 st->print_cr(" _pad_buf0 = {");
2107 st->print_cr(" [0] = '\\0'");
2108 st->print_cr(" ...");
2109 st->print_cr(" [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1);
2110 st->print_cr(" }");
2111 st->print_cr(" _owner = " INTPTR_FORMAT, p2i(_owner));
2112 st->print_cr(" _previous_owner_tid = " JLONG_FORMAT, _previous_owner_tid);
2113 st->print_cr(" _pad_buf1 = {");
2114 st->print_cr(" [0] = '\\0'");
2115 st->print_cr(" ...");
2116 st->print_cr(" [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1);
2117 st->print_cr(" }");
2118 st->print_cr(" _next_om = " INTPTR_FORMAT, p2i(next_om()));
2119 st->print_cr(" _recursions = " INTX_FORMAT, _recursions);
2120 st->print_cr(" _EntryList = " INTPTR_FORMAT, p2i(_EntryList));
2121 st->print_cr(" _cxq = " INTPTR_FORMAT, p2i(_cxq));
2122 st->print_cr(" _succ = " INTPTR_FORMAT, p2i(_succ));
2123 st->print_cr(" _Responsible = " INTPTR_FORMAT, p2i(_Responsible));
2124 st->print_cr(" _Spinner = %d", _Spinner);
2125 st->print_cr(" _SpinDuration = %d", _SpinDuration);
2126 st->print_cr(" _contentions = %d", contentions());
2127 st->print_cr(" _WaitSet = " INTPTR_FORMAT, p2i(_WaitSet));
2128 st->print_cr(" _waiters = %d", _waiters);
2129 st->print_cr(" _WaitSetLock = %d", _WaitSetLock);
2130 st->print_cr("}");
2131 }
2132 #endif
|