224 // * See also http://blogs.sun.com/dave
225
226
227 void* ObjectMonitor::operator new (size_t size) throw() {
228 return AllocateHeap(size, mtInternal);
229 }
230 void* ObjectMonitor::operator new[] (size_t size) throw() {
231 return operator new (size);
232 }
233 void ObjectMonitor::operator delete(void* p) {
234 FreeHeap(p);
235 }
236 void ObjectMonitor::operator delete[] (void *p) {
237 operator delete(p);
238 }
239
240 // -----------------------------------------------------------------------------
241 // Enter support
242
243 void ObjectMonitor::enter(TRAPS) {
244 // The following code is ordered to check the most common cases first
245 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
246 Thread * const Self = THREAD;
247
248 void* cur = try_set_owner_from(NULL, Self);
249 if (cur == NULL) {
250 assert(_recursions == 0, "invariant");
251 return;
252 }
253
254 if (cur == Self) {
255 // TODO-FIXME: check for integer overflow! BUGID 6557169.
256 _recursions++;
257 return;
258 }
259
260 if (Self->is_lock_owned((address)cur)) {
261 assert(_recursions == 0, "internal state error");
262 _recursions = 1;
263 set_owner_from_BasicLock(cur, Self); // Convert from BasicLock* to Thread*.
264 return;
265 }
266
267 // We've encountered genuine contention.
268 assert(Self->_Stalled == 0, "invariant");
269 Self->_Stalled = intptr_t(this);
270
271 // Try one round of spinning *before* enqueueing Self
272 // and before going through the awkward and expensive state
273 // transitions. The following spin is strictly optional ...
274 // Note that if we acquire the monitor from an initial spin
275 // we forgo posting JVMTI events and firing DTRACE probes.
276 if (TrySpin(Self) > 0) {
277 assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
278 assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);
279 assert(((oop)object())->mark() == markWord::encode(this),
280 "object mark must match encoded this: mark=" INTPTR_FORMAT
281 ", encoded this=" INTPTR_FORMAT, ((oop)object())->mark().value(),
282 markWord::encode(this).value());
283 Self->_Stalled = 0;
284 return;
285 }
286
287 assert(_owner != Self, "invariant");
288 assert(_succ != Self, "invariant");
289 assert(Self->is_Java_thread(), "invariant");
290 JavaThread * jt = (JavaThread *) Self;
291 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
292 assert(jt->thread_state() != _thread_blocked, "invariant");
293 assert(this->object() != NULL, "invariant");
294 assert(_contentions >= 0, "invariant");
295
296 // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy().
297 // Ensure the object-monitor relationship remains stable while there's contention.
298 Atomic::inc(&_contentions);
299
300 JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
301 EventJavaMonitorEnter event;
302 if (event.should_commit()) {
303 event.set_monitorClass(((oop)this->object())->klass());
304 event.set_address((uintptr_t)(this->object_addr()));
305 }
306
307 { // Change java thread status to indicate blocked on monitor enter.
308 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
309
310 Self->set_current_pending_monitor(this);
311
312 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
313 if (JvmtiExport::should_post_monitor_contended_enter()) {
314 JvmtiExport::post_monitor_contended_enter(jt, this);
315
316 // The current thread does not yet own the monitor and does not
317 // yet appear on any queues that would get it made the successor.
340 //
341 _recursions = 0;
342 _succ = NULL;
343 exit(false, Self);
344
345 jt->java_suspend_self();
346 }
347 Self->set_current_pending_monitor(NULL);
348
349 // We cleared the pending monitor info since we've just gotten past
350 // the enter-check-for-suspend dance and we now own the monitor free
351 // and clear, i.e., it is no longer pending. The ThreadBlockInVM
352 // destructor can go to a safepoint at the end of this block. If we
353 // do a thread dump during that safepoint, then this thread will show
354 // as having "-locked" the monitor, but the OS and java.lang.Thread
355 // states will still report that the thread is blocked trying to
356 // acquire it.
357 }
358
359 Atomic::dec(&_contentions);
360 assert(_contentions >= 0, "invariant");
361 Self->_Stalled = 0;
362
363 // Must either set _recursions = 0 or ASSERT _recursions == 0.
364 assert(_recursions == 0, "invariant");
365 assert(_owner == Self, "invariant");
366 assert(_succ != Self, "invariant");
367 assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
368
369 // The thread -- now the owner -- is back in vm mode.
370 // Report the glorious news via TI,DTrace and jvmstat.
371 // The probe effect is non-trivial. All the reportage occurs
372 // while we hold the monitor, increasing the length of the critical
373 // section. Amdahl's parallel speedup law comes vividly into play.
374 //
375 // Another option might be to aggregate the events (thread local or
376 // per-monitor aggregation) and defer reporting until a more opportune
377 // time -- such as next time some thread encounters contention but has
378 // yet to acquire the lock. While spinning that thread could
379 // spinning we could increment JVMStat counters, etc.
380
395 OM_PERFDATA_OP(ContendedLockAttempts, inc());
396 }
397
398 // Caveat: TryLock() is not necessarily serializing if it returns failure.
399 // Callers must compensate as needed.
400
401 int ObjectMonitor::TryLock(Thread * Self) {
402 void * own = _owner;
403 if (own != NULL) return 0;
404 if (try_set_owner_from(NULL, Self) == NULL) {
405 assert(_recursions == 0, "invariant");
406 return 1;
407 }
408 // The lock had been free momentarily, but we lost the race to the lock.
409 // Interference -- the CAS failed.
410 // We can either return -1 or retry.
411 // Retry doesn't make as much sense because the lock was just acquired.
412 return -1;
413 }
414
415 // Convert the fields used by is_busy() to a string that can be
416 // used for diagnostic output.
417 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
418 ss->print("is_busy: contentions=%d, waiters=%d, owner=" INTPTR_FORMAT
419 ", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, _contentions,
420 _waiters, p2i(_owner), p2i(_cxq), p2i(_EntryList));
421 return ss->base();
422 }
423
424 #define MAX_RECHECK_INTERVAL 1000
425
426 void ObjectMonitor::EnterI(TRAPS) {
427 Thread * const Self = THREAD;
428 assert(Self->is_Java_thread(), "invariant");
429 assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
430
431 // Try the lock - TATAS
432 if (TryLock (Self) > 0) {
433 assert(_succ != Self, "invariant");
434 assert(_owner == Self, "invariant");
435 assert(_Responsible != Self, "invariant");
436 return;
437 }
438
439 assert(InitDone, "Unexpectedly not initialized");
440
441 // We try one round of spinning *before* enqueueing Self.
442 //
443 // If the _owner is ready but OFFPROC we could use a YieldTo()
444 // operation to donate the remainder of this thread's quantum
445 // to the owner. This has subtle but beneficial affinity
446 // effects.
447
448 if (TrySpin(Self) > 0) {
449 assert(_owner == Self, "invariant");
450 assert(_succ != Self, "invariant");
451 assert(_Responsible != Self, "invariant");
452 return;
453 }
454
455 // The Spin failed -- Enqueue and park the thread ...
456 assert(_succ != Self, "invariant");
457 assert(_owner != Self, "invariant");
458 assert(_Responsible != Self, "invariant");
535
536 for (;;) {
537
538 if (TryLock(Self) > 0) break;
539 assert(_owner != Self, "invariant");
540
541 // park self
542 if (_Responsible == Self) {
543 Self->_ParkEvent->park((jlong) recheckInterval);
544 // Increase the recheckInterval, but clamp the value.
545 recheckInterval *= 8;
546 if (recheckInterval > MAX_RECHECK_INTERVAL) {
547 recheckInterval = MAX_RECHECK_INTERVAL;
548 }
549 } else {
550 Self->_ParkEvent->park();
551 }
552
553 if (TryLock(Self) > 0) break;
554
555 // The lock is still contested.
556 // Keep a tally of the # of futile wakeups.
557 // Note that the counter is not protected by a lock or updated by atomics.
558 // That is by design - we trade "lossy" counters which are exposed to
559 // races during updates for a lower probe effect.
560
561 // This PerfData object can be used in parallel with a safepoint.
562 // See the work around in PerfDataManager::destroy().
563 OM_PERFDATA_OP(FutileWakeups, inc());
564 ++nWakeups;
565
566 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
567 // We can defer clearing _succ until after the spin completes
568 // TrySpin() must tolerate being called with _succ == Self.
569 // Try yet another round of adaptive spinning.
570 if (TrySpin(Self) > 0) break;
571
572 // We can find that we were unpark()ed and redesignated _succ while
573 // we were spinning. That's harmless. If we iterate and call park(),
574 // park() will consume the event and return immediately and we'll
639 // the lock. The barrier ensures that changes to monitor meta-data and data
640 // protected by the lock will be visible before we release the lock, and
641 // therefore before some other thread (CPU) has a chance to acquire the lock.
642 // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
643 //
644 // Critically, any prior STs to _succ or EntryList must be visible before
645 // the ST of null into _owner in the *subsequent* (following) corresponding
646 // monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily
647 // execute a serializing instruction.
648
649 return;
650 }
651
652 // ReenterI() is a specialized inline form of the latter half of the
653 // contended slow-path from EnterI(). We use ReenterI() only for
654 // monitor reentry in wait().
655 //
656 // In the future we should reconcile EnterI() and ReenterI().
657
658 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
659 assert(Self != NULL, "invariant");
660 assert(SelfNode != NULL, "invariant");
661 assert(SelfNode->_thread == Self, "invariant");
662 assert(_waiters > 0, "invariant");
663 assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
664 assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
665 JavaThread * jt = (JavaThread *) Self;
666
667 int nWakeups = 0;
668 for (;;) {
669 ObjectWaiter::TStates v = SelfNode->TState;
670 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
671 assert(_owner != Self, "invariant");
672
673 if (TryLock(Self) > 0) break;
674 if (TrySpin(Self) > 0) break;
675
676 // State transition wrappers around park() ...
677 // ReenterI() wisely defers state transitions until
678 // it's clear we must park the thread.
679 {
680 OSThreadContendState osts(Self->osthread());
681 ThreadBlockInVM tbivm(jt);
682
683 // cleared by handle_special_suspend_equivalent_condition()
684 // or java_suspend_self()
685 jt->set_suspend_equivalent();
686 Self->_ParkEvent->park();
687
688 // were we externally suspended while we were waiting?
689 for (;;) {
690 if (!ExitSuspendEquivalent(jt)) break;
691 if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
692 jt->java_suspend_self();
693 jt->set_suspend_equivalent();
694 }
695 }
799 SelfNode->_prev = (ObjectWaiter *) 0xBAD;
800 SelfNode->_next = (ObjectWaiter *) 0xBAD;
801 SelfNode->TState = ObjectWaiter::TS_RUN;
802 #endif
803 }
804
805 // -----------------------------------------------------------------------------
806 // Exit support
807 //
808 // exit()
809 // ~~~~~~
810 // Note that the collector can't reclaim the objectMonitor or deflate
811 // the object out from underneath the thread calling ::exit() as the
812 // thread calling ::exit() never transitions to a stable state.
813 // This inhibits GC, which in turn inhibits asynchronous (and
814 // inopportune) reclamation of "this".
815 //
816 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
817 // There's one exception to the claim above, however. EnterI() can call
818 // exit() to drop a lock if the acquirer has been externally suspended.
819 // In that case exit() is called with _thread_state as _thread_blocked,
820 // but the monitor's _contentions field is > 0, which inhibits reclamation.
821 //
822 // 1-0 exit
823 // ~~~~~~~~
824 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
825 // the fast-path operators have been optimized so the common ::exit()
826 // operation is 1-0, e.g., see macroAssembler_x86.cpp: fast_unlock().
827 // The code emitted by fast_unlock() elides the usual MEMBAR. This
828 // greatly improves latency -- MEMBAR and CAS having considerable local
829 // latency on modern processors -- but at the cost of "stranding". Absent the
830 // MEMBAR, a thread in fast_unlock() can race a thread in the slow
831 // ::enter() path, resulting in the entering thread being stranding
832 // and a progress-liveness failure. Stranding is extremely rare.
833 // We use timers (timed park operations) & periodic polling to detect
834 // and recover from stranding. Potentially stranded threads periodically
835 // wake up and poll the lock. See the usage of the _Responsible variable.
836 //
837 // The CAS() in enter provides for safety and exclusion, while the CAS or
838 // MEMBAR in exit provides for progress and avoids stranding. 1-0 locking
839 // eliminates the CAS/MEMBAR from the exit path, but it admits stranding.
840 // We detect and recover from stranding with timers.
1074 }
1075
1076
1077 void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1078 assert(_owner == Self, "invariant");
1079
1080 // Exit protocol:
1081 // 1. ST _succ = wakee
1082 // 2. membar #loadstore|#storestore;
1083 // 2. ST _owner = NULL
1084 // 3. unpark(wakee)
1085
1086 _succ = Wakee->_thread;
1087 ParkEvent * Trigger = Wakee->_event;
1088
1089 // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1090 // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1091 // out-of-scope (non-extant).
1092 Wakee = NULL;
1093
1094 // Drop the lock
1095 // Uses a fence to separate release_store(owner) from the LD in unpark().
1096 release_clear_owner(Self);
1097 OrderAccess::fence();
1098
1099 DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1100 Trigger->unpark();
1101
1102 // Maintain stats and report events to JVMTI
1103 OM_PERFDATA_OP(Parks, inc());
1104 }
1105
1106
1107 // -----------------------------------------------------------------------------
1108 // Class Loader deadlock handling.
1109 //
1110 // complete_exit exits a lock returning recursion count
1111 // complete_exit/reenter operate as a wait without waiting
1112 // complete_exit requires an inflated monitor
1113 // The _owner field is not always the Thread addr even with an
1114 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1128 _recursions = 0;
1129 }
1130 }
1131
1132 guarantee(Self == _owner, "complete_exit not owner");
1133 intx save = _recursions; // record the old recursion count
1134 _recursions = 0; // set the recursion level to be 0
1135 exit(true, Self); // exit the monitor
1136 guarantee(_owner != Self, "invariant");
1137 return save;
1138 }
1139
1140 // reenter() enters a lock and sets recursion count
1141 // complete_exit/reenter operate as a wait without waiting
1142 void ObjectMonitor::reenter(intx recursions, TRAPS) {
1143 Thread * const Self = THREAD;
1144 assert(Self->is_Java_thread(), "Must be Java thread!");
1145 JavaThread *jt = (JavaThread *)THREAD;
1146
1147 guarantee(_owner != Self, "reenter already owner");
1148 enter(THREAD); // enter the monitor
1149 guarantee(_recursions == 0, "reenter recursion");
1150 _recursions = recursions;
1151 return;
1152 }
1153
1154 // Checks that the current THREAD owns this monitor and causes an
1155 // immediate return if it doesn't. We don't use the CHECK macro
1156 // because we want the IMSE to be the only exception that is thrown
1157 // from the call site when false is returned. Any other pending
1158 // exception is ignored.
1159 #define CHECK_OWNER() \
1160 do { \
1161 if (!check_owner(THREAD)) { \
1162 assert(HAS_PENDING_EXCEPTION, "expected a pending IMSE here."); \
1163 return; \
1164 } \
1165 } while (false)
1166
1167 // Returns true if the specified thread owns the ObjectMonitor.
1168 // Otherwise returns false and throws IllegalMonitorStateException
1169 // (IMSE). If there is a pending exception and the specified thread
1170 // is not the owner, that exception will be replaced by the IMSE.
1171 bool ObjectMonitor::check_owner(Thread* THREAD) {
1930 }
1931 #define NEWPERFVARIABLE(n) \
1932 { \
1933 n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events, \
1934 CHECK); \
1935 }
1936 NEWPERFCOUNTER(_sync_Inflations);
1937 NEWPERFCOUNTER(_sync_Deflations);
1938 NEWPERFCOUNTER(_sync_ContendedLockAttempts);
1939 NEWPERFCOUNTER(_sync_FutileWakeups);
1940 NEWPERFCOUNTER(_sync_Parks);
1941 NEWPERFCOUNTER(_sync_Notifications);
1942 NEWPERFVARIABLE(_sync_MonExtant);
1943 #undef NEWPERFCOUNTER
1944 #undef NEWPERFVARIABLE
1945 }
1946
1947 DEBUG_ONLY(InitDone = true;)
1948 }
1949
1950 void ObjectMonitor::print_on(outputStream* st) const {
1951 // The minimal things to print for markWord printing, more can be added for debugging and logging.
1952 st->print("{contentions=0x%08x,waiters=0x%08x"
1953 ",recursions=" INTX_FORMAT ",owner=" INTPTR_FORMAT "}",
1954 contentions(), waiters(), recursions(),
1955 p2i(owner()));
1956 }
1957 void ObjectMonitor::print() const { print_on(tty); }
1958
1959 #ifdef ASSERT
1960 // Print the ObjectMonitor like a debugger would:
1961 //
1962 // (ObjectMonitor) 0x00007fdfb6012e40 = {
1963 // _header = 0x0000000000000001
1964 // _object = 0x000000070ff45fd0
1965 // _next_om = 0x0000000000000000
1966 // _pad_buf0 = {
1967 // [0] = '\0'
1968 // ...
1969 // [103] = '\0'
1970 // }
1971 // _owner = 0x0000000000000000
1972 // _previous_owner_tid = 0
1973 // _recursions = 0
1974 // _EntryList = 0x0000000000000000
1975 // _cxq = 0x0000000000000000
1976 // _succ = 0x0000000000000000
1977 // _Responsible = 0x0000000000000000
1978 // _Spinner = 0
1979 // _SpinDuration = 5000
1980 // _contentions = 0
1981 // _WaitSet = 0x0000700009756248
1982 // _waiters = 1
1983 // _WaitSetLock = 0
1984 // }
1985 //
1986 void ObjectMonitor::print_debug_style_on(outputStream* st) const {
1987 st->print_cr("(ObjectMonitor*) " INTPTR_FORMAT " = {", p2i(this));
1988 st->print_cr(" _header = " INTPTR_FORMAT, header().value());
1989 st->print_cr(" _object = " INTPTR_FORMAT, p2i(_object));
1990 st->print_cr(" _next_om = " INTPTR_FORMAT, p2i(next_om()));
1991 st->print_cr(" _pad_buf0 = {");
1992 st->print_cr(" [0] = '\\0'");
1993 st->print_cr(" ...");
1994 st->print_cr(" [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1);
1995 st->print_cr(" }");
1996 st->print_cr(" _owner = " INTPTR_FORMAT, p2i(_owner));
1997 st->print_cr(" _previous_owner_tid = " JLONG_FORMAT, _previous_owner_tid);
1998 st->print_cr(" _recursions = " INTX_FORMAT, _recursions);
1999 st->print_cr(" _EntryList = " INTPTR_FORMAT, p2i(_EntryList));
2000 st->print_cr(" _cxq = " INTPTR_FORMAT, p2i(_cxq));
2001 st->print_cr(" _succ = " INTPTR_FORMAT, p2i(_succ));
2002 st->print_cr(" _Responsible = " INTPTR_FORMAT, p2i(_Responsible));
2003 st->print_cr(" _Spinner = %d", _Spinner);
2004 st->print_cr(" _SpinDuration = %d", _SpinDuration);
2005 st->print_cr(" _contentions = %d", _contentions);
2006 st->print_cr(" _WaitSet = " INTPTR_FORMAT, p2i(_WaitSet));
2007 st->print_cr(" _waiters = %d", _waiters);
2008 st->print_cr(" _WaitSetLock = %d", _WaitSetLock);
2009 st->print_cr("}");
2010 }
2011 #endif
|
224 // * See also http://blogs.sun.com/dave
225
226
227 void* ObjectMonitor::operator new (size_t size) throw() {
228 return AllocateHeap(size, mtInternal);
229 }
230 void* ObjectMonitor::operator new[] (size_t size) throw() {
231 return operator new (size);
232 }
233 void ObjectMonitor::operator delete(void* p) {
234 FreeHeap(p);
235 }
236 void ObjectMonitor::operator delete[] (void *p) {
237 operator delete(p);
238 }
239
240 // -----------------------------------------------------------------------------
241 // Enter support
242
243 void ObjectMonitor::enter(TRAPS) {
244 jint l_ref_count = ref_count();
245 ADIM_guarantee(l_ref_count > 0, "must be positive: l_ref_count=%d, ref_count=%d", l_ref_count, ref_count());
246
247 // The following code is ordered to check the most common cases first
248 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
249 Thread * const Self = THREAD;
250
251 void* cur = try_set_owner_from(NULL, Self);
252 if (cur == NULL) {
253 assert(_recursions == 0, "invariant");
254 return;
255 }
256
257 if (cur == Self) {
258 // TODO-FIXME: check for integer overflow! BUGID 6557169.
259 _recursions++;
260 return;
261 }
262
263 if (Self->is_lock_owned((address)cur)) {
264 assert(_recursions == 0, "internal state error");
265 _recursions = 1;
266 set_owner_from_BasicLock(cur, Self); // Convert from BasicLock* to Thread*.
267 return;
268 }
269
270 if (AsyncDeflateIdleMonitors &&
271 try_set_owner_from(DEFLATER_MARKER, Self) == DEFLATER_MARKER) {
272 // The deflation protocol finished the first part (setting owner),
273 // but it failed the second part (making ref_count negative) and
274 // bailed. Acquired the monitor.
275 assert(_recursions == 0, "invariant");
276 return;
277 }
278
279 // We've encountered genuine contention.
280 assert(Self->_Stalled == 0, "invariant");
281 Self->_Stalled = intptr_t(this);
282
283 // Try one round of spinning *before* enqueueing Self
284 // and before going through the awkward and expensive state
285 // transitions. The following spin is strictly optional ...
286 // Note that if we acquire the monitor from an initial spin
287 // we forgo posting JVMTI events and firing DTRACE probes.
288 if (TrySpin(Self) > 0) {
289 assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
290 assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);
291 assert(((oop)object())->mark() == markWord::encode(this),
292 "object mark must match encoded this: mark=" INTPTR_FORMAT
293 ", encoded this=" INTPTR_FORMAT, ((oop)object())->mark().value(),
294 markWord::encode(this).value());
295 Self->_Stalled = 0;
296 return;
297 }
298
299 assert(_owner != Self, "invariant");
300 assert(_succ != Self, "invariant");
301 assert(Self->is_Java_thread(), "invariant");
302 JavaThread * jt = (JavaThread *) Self;
303 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
304 assert(jt->thread_state() != _thread_blocked, "invariant");
305 assert(this->object() != NULL, "invariant");
306 assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions);
307
308 // Keep track of contention for JVM/TI and M&M queries.
309 Atomic::inc(&_contentions);
310
311 JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
312 EventJavaMonitorEnter event;
313 if (event.should_commit()) {
314 event.set_monitorClass(((oop)this->object())->klass());
315 event.set_address((uintptr_t)(this->object_addr()));
316 }
317
318 { // Change java thread status to indicate blocked on monitor enter.
319 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
320
321 Self->set_current_pending_monitor(this);
322
323 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
324 if (JvmtiExport::should_post_monitor_contended_enter()) {
325 JvmtiExport::post_monitor_contended_enter(jt, this);
326
327 // The current thread does not yet own the monitor and does not
328 // yet appear on any queues that would get it made the successor.
351 //
352 _recursions = 0;
353 _succ = NULL;
354 exit(false, Self);
355
356 jt->java_suspend_self();
357 }
358 Self->set_current_pending_monitor(NULL);
359
360 // We cleared the pending monitor info since we've just gotten past
361 // the enter-check-for-suspend dance and we now own the monitor free
362 // and clear, i.e., it is no longer pending. The ThreadBlockInVM
363 // destructor can go to a safepoint at the end of this block. If we
364 // do a thread dump during that safepoint, then this thread will show
365 // as having "-locked" the monitor, but the OS and java.lang.Thread
366 // states will still report that the thread is blocked trying to
367 // acquire it.
368 }
369
370 Atomic::dec(&_contentions);
371 assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions);
372 Self->_Stalled = 0;
373
374 // Must either set _recursions = 0 or ASSERT _recursions == 0.
375 assert(_recursions == 0, "invariant");
376 assert(_owner == Self, "invariant");
377 assert(_succ != Self, "invariant");
378 assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
379
380 // The thread -- now the owner -- is back in vm mode.
381 // Report the glorious news via TI,DTrace and jvmstat.
382 // The probe effect is non-trivial. All the reportage occurs
383 // while we hold the monitor, increasing the length of the critical
384 // section. Amdahl's parallel speedup law comes vividly into play.
385 //
386 // Another option might be to aggregate the events (thread local or
387 // per-monitor aggregation) and defer reporting until a more opportune
388 // time -- such as next time some thread encounters contention but has
389 // yet to acquire the lock. While spinning that thread could
390 // spinning we could increment JVMStat counters, etc.
391
406 OM_PERFDATA_OP(ContendedLockAttempts, inc());
407 }
408
409 // Caveat: TryLock() is not necessarily serializing if it returns failure.
410 // Callers must compensate as needed.
411
412 int ObjectMonitor::TryLock(Thread * Self) {
413 void * own = _owner;
414 if (own != NULL) return 0;
415 if (try_set_owner_from(NULL, Self) == NULL) {
416 assert(_recursions == 0, "invariant");
417 return 1;
418 }
419 // The lock had been free momentarily, but we lost the race to the lock.
420 // Interference -- the CAS failed.
421 // We can either return -1 or retry.
422 // Retry doesn't make as much sense because the lock was just acquired.
423 return -1;
424 }
425
426 // Install the displaced mark word (dmw) of a deflating ObjectMonitor
427 // into the header of the object associated with the monitor. This
428 // idempotent method is called by a thread that is deflating a
429 // monitor and by other threads that have detected a race with the
430 // deflation process.
431 void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
432 // This function must only be called when (owner == DEFLATER_MARKER
433 // && ref_count <= 0), but we can't guarantee that here because
434 // those values could change when the ObjectMonitor gets moved from
435 // the global free list to a per-thread free list.
436
437 guarantee(obj != NULL, "must be non-NULL");
438 if (object() != obj) {
439 // ObjectMonitor's object ref no longer refers to the target object
440 // so the object's header has already been restored.
441 return;
442 }
443
444 markWord dmw = header();
445 if (dmw.value() == 0) {
446 // ObjectMonitor's header/dmw has been cleared so the ObjectMonitor
447 // has been deflated and taken off the global free list.
448 return;
449 }
450
451 // A non-NULL dmw has to be neutral (not locked and not marked).
452 ADIM_guarantee(dmw.is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, dmw.value());
453
454 // Install displaced mark word if the object's header still points
455 // to this ObjectMonitor. All racing callers to this function will
456 // reach this point, but only one can win.
457 markWord res = obj->cas_set_mark(dmw, markWord::encode(this));
458 if (res != markWord::encode(this)) {
459 // This should be rare so log at the Info level when it happens.
460 log_info(monitorinflation)("install_displaced_markword_in_object: "
461 "failed cas_set_mark: new_mark=" INTPTR_FORMAT
462 ", old_mark=" INTPTR_FORMAT ", res=" INTPTR_FORMAT,
463 dmw.value(), markWord::encode(this).value(),
464 res.value());
465 }
466
467 // Note: It does not matter which thread restored the header/dmw
468 // into the object's header. The thread deflating the monitor just
469 // wanted the object's header restored and it is. The threads that
470 // detected a race with the deflation process also wanted the
471 // object's header restored before they retry their operation and
472 // because it is restored they will only retry once.
473 }
474
475 // Convert the fields used by is_busy() to a string that can be
476 // used for diagnostic output.
477 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
478 ss->print("is_busy: contentions=%d, waiters=%d, ", _contentions, _waiters);
479 if (!AsyncDeflateIdleMonitors) {
480 ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
481 } else if (_owner != DEFLATER_MARKER) {
482 ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
483 } else {
484 // We report NULL instead of DEFLATER_MARKER here because is_busy()
485 // ignores DEFLATER_MARKER values.
486 ss->print("owner=" INTPTR_FORMAT, NULL);
487 }
488 ss->print(", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, p2i(_cxq),
489 p2i(_EntryList));
490 return ss->base();
491 }
492
493 #define MAX_RECHECK_INTERVAL 1000
494
495 void ObjectMonitor::EnterI(TRAPS) {
496 jint l_ref_count = ref_count();
497 ADIM_guarantee(l_ref_count > 0, "must be positive: l_ref_count=%d, ref_count=%d", l_ref_count, ref_count());
498
499 Thread * const Self = THREAD;
500 assert(Self->is_Java_thread(), "invariant");
501 assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
502
503 // Try the lock - TATAS
504 if (TryLock (Self) > 0) {
505 assert(_succ != Self, "invariant");
506 assert(_owner == Self, "invariant");
507 assert(_Responsible != Self, "invariant");
508 return;
509 }
510
511 if (AsyncDeflateIdleMonitors &&
512 try_set_owner_from(DEFLATER_MARKER, Self) == DEFLATER_MARKER) {
513 // The deflation protocol finished the first part (setting owner),
514 // but it failed the second part (making ref_count negative) and
515 // bailed. Acquired the monitor.
516 assert(_succ != Self, "invariant");
517 assert(_Responsible != Self, "invariant");
518 return;
519 }
520
521 assert(InitDone, "Unexpectedly not initialized");
522
523 // We try one round of spinning *before* enqueueing Self.
524 //
525 // If the _owner is ready but OFFPROC we could use a YieldTo()
526 // operation to donate the remainder of this thread's quantum
527 // to the owner. This has subtle but beneficial affinity
528 // effects.
529
530 if (TrySpin(Self) > 0) {
531 assert(_owner == Self, "invariant");
532 assert(_succ != Self, "invariant");
533 assert(_Responsible != Self, "invariant");
534 return;
535 }
536
537 // The Spin failed -- Enqueue and park the thread ...
538 assert(_succ != Self, "invariant");
539 assert(_owner != Self, "invariant");
540 assert(_Responsible != Self, "invariant");
617
618 for (;;) {
619
620 if (TryLock(Self) > 0) break;
621 assert(_owner != Self, "invariant");
622
623 // park self
624 if (_Responsible == Self) {
625 Self->_ParkEvent->park((jlong) recheckInterval);
626 // Increase the recheckInterval, but clamp the value.
627 recheckInterval *= 8;
628 if (recheckInterval > MAX_RECHECK_INTERVAL) {
629 recheckInterval = MAX_RECHECK_INTERVAL;
630 }
631 } else {
632 Self->_ParkEvent->park();
633 }
634
635 if (TryLock(Self) > 0) break;
636
637 if (AsyncDeflateIdleMonitors &&
638 try_set_owner_from(DEFLATER_MARKER, Self) == DEFLATER_MARKER) {
639 // The deflation protocol finished the first part (setting owner),
640 // but it failed the second part (making ref_count negative) and
641 // bailed. Acquired the monitor.
642 break;
643 }
644
645 // The lock is still contested.
646 // Keep a tally of the # of futile wakeups.
647 // Note that the counter is not protected by a lock or updated by atomics.
648 // That is by design - we trade "lossy" counters which are exposed to
649 // races during updates for a lower probe effect.
650
651 // This PerfData object can be used in parallel with a safepoint.
652 // See the work around in PerfDataManager::destroy().
653 OM_PERFDATA_OP(FutileWakeups, inc());
654 ++nWakeups;
655
656 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
657 // We can defer clearing _succ until after the spin completes
658 // TrySpin() must tolerate being called with _succ == Self.
659 // Try yet another round of adaptive spinning.
660 if (TrySpin(Self) > 0) break;
661
662 // We can find that we were unpark()ed and redesignated _succ while
663 // we were spinning. That's harmless. If we iterate and call park(),
664 // park() will consume the event and return immediately and we'll
729 // the lock. The barrier ensures that changes to monitor meta-data and data
730 // protected by the lock will be visible before we release the lock, and
731 // therefore before some other thread (CPU) has a chance to acquire the lock.
732 // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
733 //
734 // Critically, any prior STs to _succ or EntryList must be visible before
735 // the ST of null into _owner in the *subsequent* (following) corresponding
736 // monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily
737 // execute a serializing instruction.
738
739 return;
740 }
741
742 // ReenterI() is a specialized inline form of the latter half of the
743 // contended slow-path from EnterI(). We use ReenterI() only for
744 // monitor reentry in wait().
745 //
746 // In the future we should reconcile EnterI() and ReenterI().
747
748 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
749 jint l_ref_count = ref_count();
750 ADIM_guarantee(l_ref_count > 0, "must be positive: l_ref_count=%d, ref_count=%d", l_ref_count, ref_count());
751
752 assert(Self != NULL, "invariant");
753 assert(SelfNode != NULL, "invariant");
754 assert(SelfNode->_thread == Self, "invariant");
755 assert(_waiters > 0, "invariant");
756 assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
757 assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
758 JavaThread * jt = (JavaThread *) Self;
759
760 int nWakeups = 0;
761 for (;;) {
762 ObjectWaiter::TStates v = SelfNode->TState;
763 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
764 assert(_owner != Self, "invariant");
765
766 if (TryLock(Self) > 0) break;
767 if (TrySpin(Self) > 0) break;
768
769 if (AsyncDeflateIdleMonitors &&
770 try_set_owner_from(DEFLATER_MARKER, Self) == DEFLATER_MARKER) {
771 // The deflation protocol finished the first part (setting owner),
772 // but it failed the second part (making ref_count negative) and
773 // bailed. Acquired the monitor.
774 break;
775 }
776
777 // State transition wrappers around park() ...
778 // ReenterI() wisely defers state transitions until
779 // it's clear we must park the thread.
780 {
781 OSThreadContendState osts(Self->osthread());
782 ThreadBlockInVM tbivm(jt);
783
784 // cleared by handle_special_suspend_equivalent_condition()
785 // or java_suspend_self()
786 jt->set_suspend_equivalent();
787 Self->_ParkEvent->park();
788
789 // were we externally suspended while we were waiting?
790 for (;;) {
791 if (!ExitSuspendEquivalent(jt)) break;
792 if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
793 jt->java_suspend_self();
794 jt->set_suspend_equivalent();
795 }
796 }
900 SelfNode->_prev = (ObjectWaiter *) 0xBAD;
901 SelfNode->_next = (ObjectWaiter *) 0xBAD;
902 SelfNode->TState = ObjectWaiter::TS_RUN;
903 #endif
904 }
905
906 // -----------------------------------------------------------------------------
907 // Exit support
908 //
909 // exit()
910 // ~~~~~~
911 // Note that the collector can't reclaim the objectMonitor or deflate
912 // the object out from underneath the thread calling ::exit() as the
913 // thread calling ::exit() never transitions to a stable state.
914 // This inhibits GC, which in turn inhibits asynchronous (and
915 // inopportune) reclamation of "this".
916 //
917 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
918 // There's one exception to the claim above, however. EnterI() can call
919 // exit() to drop a lock if the acquirer has been externally suspended.
920 // In that case exit() is called with _thread_state == _thread_blocked,
921 // but the monitor's ref_count is > 0, which inhibits reclamation.
922 //
923 // 1-0 exit
924 // ~~~~~~~~
925 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
926 // the fast-path operators have been optimized so the common ::exit()
927 // operation is 1-0, e.g., see macroAssembler_x86.cpp: fast_unlock().
928 // The code emitted by fast_unlock() elides the usual MEMBAR. This
929 // greatly improves latency -- MEMBAR and CAS having considerable local
930 // latency on modern processors -- but at the cost of "stranding". Absent the
931 // MEMBAR, a thread in fast_unlock() can race a thread in the slow
932 // ::enter() path, resulting in the entering thread being stranding
933 // and a progress-liveness failure. Stranding is extremely rare.
934 // We use timers (timed park operations) & periodic polling to detect
935 // and recover from stranding. Potentially stranded threads periodically
936 // wake up and poll the lock. See the usage of the _Responsible variable.
937 //
938 // The CAS() in enter provides for safety and exclusion, while the CAS or
939 // MEMBAR in exit provides for progress and avoids stranding. 1-0 locking
940 // eliminates the CAS/MEMBAR from the exit path, but it admits stranding.
941 // We detect and recover from stranding with timers.
1175 }
1176
1177
1178 void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1179 assert(_owner == Self, "invariant");
1180
1181 // Exit protocol:
1182 // 1. ST _succ = wakee
1183 // 2. membar #loadstore|#storestore;
1184 // 2. ST _owner = NULL
1185 // 3. unpark(wakee)
1186
1187 _succ = Wakee->_thread;
1188 ParkEvent * Trigger = Wakee->_event;
1189
1190 // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1191 // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1192 // out-of-scope (non-extant).
1193 Wakee = NULL;
1194
1195 // Drop the lock.
1196 // Uses a fence to separate release_store(owner) from the LD in unpark().
1197 release_clear_owner(Self);
1198 OrderAccess::fence();
1199
1200 DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1201 Trigger->unpark();
1202
1203 // Maintain stats and report events to JVMTI
1204 OM_PERFDATA_OP(Parks, inc());
1205 }
1206
1207
1208 // -----------------------------------------------------------------------------
1209 // Class Loader deadlock handling.
1210 //
1211 // complete_exit exits a lock returning recursion count
1212 // complete_exit/reenter operate as a wait without waiting
1213 // complete_exit requires an inflated monitor
1214 // The _owner field is not always the Thread addr even with an
1215 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1229 _recursions = 0;
1230 }
1231 }
1232
1233 guarantee(Self == _owner, "complete_exit not owner");
1234 intx save = _recursions; // record the old recursion count
1235 _recursions = 0; // set the recursion level to be 0
1236 exit(true, Self); // exit the monitor
1237 guarantee(_owner != Self, "invariant");
1238 return save;
1239 }
1240
1241 // reenter() enters a lock and sets recursion count
1242 // complete_exit/reenter operate as a wait without waiting
1243 void ObjectMonitor::reenter(intx recursions, TRAPS) {
1244 Thread * const Self = THREAD;
1245 assert(Self->is_Java_thread(), "Must be Java thread!");
1246 JavaThread *jt = (JavaThread *)THREAD;
1247
1248 guarantee(_owner != Self, "reenter already owner");
1249 enter(THREAD);
1250 // Entered the monitor.
1251 guarantee(_recursions == 0, "reenter recursion");
1252 _recursions = recursions;
1253 }
1254
1255 // Checks that the current THREAD owns this monitor and causes an
1256 // immediate return if it doesn't. We don't use the CHECK macro
1257 // because we want the IMSE to be the only exception that is thrown
1258 // from the call site when false is returned. Any other pending
1259 // exception is ignored.
1260 #define CHECK_OWNER() \
1261 do { \
1262 if (!check_owner(THREAD)) { \
1263 assert(HAS_PENDING_EXCEPTION, "expected a pending IMSE here."); \
1264 return; \
1265 } \
1266 } while (false)
1267
1268 // Returns true if the specified thread owns the ObjectMonitor.
1269 // Otherwise returns false and throws IllegalMonitorStateException
1270 // (IMSE). If there is a pending exception and the specified thread
1271 // is not the owner, that exception will be replaced by the IMSE.
1272 bool ObjectMonitor::check_owner(Thread* THREAD) {
2031 }
2032 #define NEWPERFVARIABLE(n) \
2033 { \
2034 n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events, \
2035 CHECK); \
2036 }
2037 NEWPERFCOUNTER(_sync_Inflations);
2038 NEWPERFCOUNTER(_sync_Deflations);
2039 NEWPERFCOUNTER(_sync_ContendedLockAttempts);
2040 NEWPERFCOUNTER(_sync_FutileWakeups);
2041 NEWPERFCOUNTER(_sync_Parks);
2042 NEWPERFCOUNTER(_sync_Notifications);
2043 NEWPERFVARIABLE(_sync_MonExtant);
2044 #undef NEWPERFCOUNTER
2045 #undef NEWPERFVARIABLE
2046 }
2047
2048 DEBUG_ONLY(InitDone = true;)
2049 }
2050
2051 ObjectMonitorHandle::~ObjectMonitorHandle() {
2052 if (_om_ptr != NULL) {
2053 _om_ptr->dec_ref_count();
2054 _om_ptr = NULL;
2055 }
2056 }
2057
2058 // Save the ObjectMonitor* associated with the specified markWord and
2059 // increment the ref_count. This function should only be called if
2060 // the caller has verified mark.has_monitor() == true. The object
2061 // parameter is needed to verify that ObjectMonitor* has not been
2062 // deflated and reused for another object.
2063 //
2064 // This function returns true if the ObjectMonitor* has been safely
2065 // saved. This function returns false if we have lost a race with
2066 // async deflation; the caller should retry as appropriate.
2067 //
2068 bool ObjectMonitorHandle::save_om_ptr(oop object, markWord mark) {
2069 // is_marked() is a superset of has_monitor() so make sure we
2070 // are called with the proper markWord value.
2071 guarantee(mark.has_monitor() && !mark.is_marked(), "sanity check: mark="
2072 INTPTR_FORMAT, mark.value());
2073
2074 ObjectMonitor* om_ptr = mark.monitor();
2075 om_ptr->inc_ref_count();
2076
2077 if (AsyncDeflateIdleMonitors) {
2078 // Race here if monitor is not owned! The above ref_count bump
2079 // will cause subsequent async deflation to skip it. However,
2080 // previous or concurrent async deflation is a race.
2081 if (om_ptr->owner_is_DEFLATER_MARKER() && om_ptr->ref_count() <= 0) {
2082 // Async deflation is in progress and our ref_count increment
2083 // above lost the race to async deflation. Attempt to restore
2084 // the header/dmw to the object's header so that we only retry
2085 // once if the deflater thread happens to be slow.
2086 om_ptr->install_displaced_markword_in_object(object);
2087 om_ptr->dec_ref_count();
2088 return false;
2089 }
2090 if (om_ptr->ref_count() <= 0) {
2091 // Async deflation is in the process of bailing out, but has not
2092 // yet restored the ref_count field so we return false to force
2093 // a retry. We want a positive ref_count value for a true return.
2094 om_ptr->dec_ref_count();
2095 return false;
2096 }
2097 // The ObjectMonitor could have been deflated and reused for
2098 // another object before we bumped the ref_count so make sure
2099 // our object still refers to this ObjectMonitor.
2100 // Note: With handshakes after deflation is this race even
2101 // possible anymore?
2102 const markWord tmp = object->mark();
2103 if (!tmp.has_monitor() || tmp.monitor() != om_ptr) {
2104 // Async deflation and reuse won the race so we have to retry.
2105 // Skip object header restoration since that's already done.
2106 om_ptr->dec_ref_count();
2107 return false;
2108 }
2109 }
2110
2111 ADIM_guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT,
2112 p2i(_om_ptr));
2113 _om_ptr = om_ptr;
2114 return true;
2115 }
2116
2117 // For internal use by ObjectSynchronizer::inflate().
2118 // This function is only used when we don't have to worry about async
2119 // deflation of the specified ObjectMonitor*.
2120 void ObjectMonitorHandle::set_om_ptr(ObjectMonitor* om_ptr) {
2121 if (_om_ptr == NULL) {
2122 ADIM_guarantee(om_ptr != NULL, "cannot clear an unset om_ptr");
2123 om_ptr->inc_ref_count();
2124 _om_ptr = om_ptr;
2125 } else {
2126 ADIM_guarantee(om_ptr == NULL, "can only clear a set om_ptr");
2127 _om_ptr->dec_ref_count();
2128 _om_ptr = NULL;
2129 }
2130 }
2131
2132 // Save the specified ObjectMonitor* if it is safe, i.e., not being
2133 // async deflated.
2134 //
2135 // This function returns true if the ObjectMonitor* has been safely
2136 // saved. This function returns false if the specified ObjectMonitor*
2137 // is NULL or if we have lost a race with async deflation; the caller
2138 // can retry as appropriate.
2139 bool ObjectMonitorHandle::save_om_ptr_if_safe(ObjectMonitor* om_ptr) {
2140 if (om_ptr == NULL) {
2141 return false; // Nothing to save if input is NULL
2142 }
2143
2144 om_ptr->inc_ref_count();
2145
2146 if (AsyncDeflateIdleMonitors) {
2147 if (om_ptr->owner_is_DEFLATER_MARKER() && om_ptr->ref_count() <= 0) {
2148 // Async deflation is in progress and our ref_count increment
2149 // above lost the race to async deflation.
2150 om_ptr->dec_ref_count();
2151 return false;
2152 }
2153 if (om_ptr->ref_count() <= 0) {
2154 // Async deflation is in the process of bailing out, but has not
2155 // yet restored the ref_count field so we return false to force
2156 // a retry. We want a positive ref_count value for a true return.
2157 om_ptr->dec_ref_count();
2158 return false;
2159 }
2160 // Unlike save_om_ptr(), we don't have context to determine if
2161 // the ObjectMonitor has been deflated and reused for another
2162 // object.
2163 }
2164
2165 ADIM_guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT,
2166 p2i(_om_ptr));
2167 _om_ptr = om_ptr;
2168 return true;
2169 }
2170
2171 // Unset the _om_ptr field and decrement the ref_count field.
2172 void ObjectMonitorHandle::unset_om_ptr() {
2173 ADIM_guarantee(_om_ptr != NULL, "_om_ptr must not be NULL");
2174 _om_ptr->dec_ref_count();
2175 _om_ptr = NULL;
2176 }
2177
2178 void ObjectMonitor::print_on(outputStream* st) const {
2179 // The minimal things to print for markWord printing, more can be added for debugging and logging.
2180 st->print("{contentions=0x%08x,waiters=0x%08x"
2181 ",recursions=" INTX_FORMAT ",owner=" INTPTR_FORMAT "}",
2182 contentions(), waiters(), recursions(),
2183 p2i(owner()));
2184 }
2185 void ObjectMonitor::print() const { print_on(tty); }
2186
2187 #ifdef ASSERT
2188 // Print the ObjectMonitor like a debugger would:
2189 //
2190 // (ObjectMonitor) 0x00007fdfb6012e40 = {
2191 // _header = 0x0000000000000001
2192 // _object = 0x000000070ff45fd0
2193 // _allocation_state = Old
2194 // _pad_buf0 = {
2195 // [0] = '\0'
2196 // ...
2197 // [43] = '\0'
2198 // }
2199 // _owner = 0x0000000000000000
2200 // _previous_owner_tid = 0
2201 // _pad_buf1 = {
2202 // [0] = '\0'
2203 // ...
2204 // [47] = '\0'
2205 // }
2206 // _ref_count = 1
2207 // _pad_buf2 = {
2208 // [0] = '\0'
2209 // ...
2210 // [47] = '\0'
2211 // }
2212 // _next_om = 0x0000000000000000
2213 // _recursions = 0
2214 // _EntryList = 0x0000000000000000
2215 // _cxq = 0x0000000000000000
2216 // _succ = 0x0000000000000000
2217 // _Responsible = 0x0000000000000000
2218 // _Spinner = 0
2219 // _SpinDuration = 5000
2220 // _contentions = 0
2221 // _WaitSet = 0x0000700009756248
2222 // _waiters = 1
2223 // _WaitSetLock = 0
2224 // }
2225 //
2226 void ObjectMonitor::print_debug_style_on(outputStream* st) const {
2227 st->print_cr("(ObjectMonitor*) " INTPTR_FORMAT " = {", p2i(this));
2228 st->print_cr(" _header = " INTPTR_FORMAT, header().value());
2229 st->print_cr(" _object = " INTPTR_FORMAT, p2i(_object));
2230 st->print(" _allocation_state = ");
2231 if (is_free()) {
2232 st->print("Free");
2233 } else if (is_old()) {
2234 st->print("Old");
2235 } else if (is_new()) {
2236 st->print("New");
2237 } else {
2238 st->print("unknown=%d", _allocation_state);
2239 }
2240 st->cr();
2241 st->print_cr(" _pad_buf0 = {");
2242 st->print_cr(" [0] = '\\0'");
2243 st->print_cr(" ...");
2244 st->print_cr(" [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1);
2245 st->print_cr(" }");
2246 st->print_cr(" _owner = " INTPTR_FORMAT, p2i(_owner));
2247 st->print_cr(" _previous_owner_tid = " JLONG_FORMAT, _previous_owner_tid);
2248 st->print_cr(" _pad_buf1 = {");
2249 st->print_cr(" [0] = '\\0'");
2250 st->print_cr(" ...");
2251 st->print_cr(" [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1);
2252 st->print_cr(" }");
2253 st->print_cr(" _ref_count = %d", ref_count());
2254 st->print_cr(" _pad_buf2 = {");
2255 st->print_cr(" [0] = '\\0'");
2256 st->print_cr(" ...");
2257 st->print_cr(" [%d] = '\\0'", (int)sizeof(_pad_buf2) - 1);
2258 st->print_cr(" }");
2259 st->print_cr(" _next_om = " INTPTR_FORMAT, p2i(next_om()));
2260 st->print_cr(" _recursions = " INTX_FORMAT, _recursions);
2261 st->print_cr(" _EntryList = " INTPTR_FORMAT, p2i(_EntryList));
2262 st->print_cr(" _cxq = " INTPTR_FORMAT, p2i(_cxq));
2263 st->print_cr(" _succ = " INTPTR_FORMAT, p2i(_succ));
2264 st->print_cr(" _Responsible = " INTPTR_FORMAT, p2i(_Responsible));
2265 st->print_cr(" _Spinner = %d", _Spinner);
2266 st->print_cr(" _SpinDuration = %d", _SpinDuration);
2267 st->print_cr(" _contentions = %d", _contentions);
2268 st->print_cr(" _WaitSet = " INTPTR_FORMAT, p2i(_WaitSet));
2269 st->print_cr(" _waiters = %d", _waiters);
2270 st->print_cr(" _WaitSetLock = %d", _WaitSetLock);
2271 st->print_cr("}");
2272 }
2273 #endif
|