223 //
224 // * See also http://blogs.sun.com/dave
225
226
227 void* ObjectMonitor::operator new (size_t size) throw() {
228 return AllocateHeap(size, mtInternal);
229 }
230 void* ObjectMonitor::operator new[] (size_t size) throw() {
231 return operator new (size);
232 }
233 void ObjectMonitor::operator delete(void* p) {
234 FreeHeap(p);
235 }
236 void ObjectMonitor::operator delete[] (void *p) {
237 operator delete(p);
238 }
239
240 // -----------------------------------------------------------------------------
241 // Enter support
242
243 void ObjectMonitor::enter(TRAPS) {
244 // The following code is ordered to check the most common cases first
245 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
246 Thread * const Self = THREAD;
247
248 void* cur = try_set_owner_from(NULL, Self);
249 if (cur == NULL) {
250 assert(_recursions == 0, "invariant");
251 return;
252 }
253
254 if (cur == Self) {
255 // TODO-FIXME: check for integer overflow! BUGID 6557169.
256 _recursions++;
257 return;
258 }
259
260 if (Self->is_lock_owned((address)cur)) {
261 assert(_recursions == 0, "internal state error");
262 _recursions = 1;
263 set_owner_from_BasicLock(cur, Self); // Convert from BasicLock* to Thread*.
264 return;
265 }
266
267 // We've encountered genuine contention.
268 assert(Self->_Stalled == 0, "invariant");
269 Self->_Stalled = intptr_t(this);
270
271 // Try one round of spinning *before* enqueueing Self
272 // and before going through the awkward and expensive state
273 // transitions. The following spin is strictly optional ...
274 // Note that if we acquire the monitor from an initial spin
275 // we forgo posting JVMTI events and firing DTRACE probes.
276 if (TrySpin(Self) > 0) {
277 assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
278 assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);
279 assert(((oop)object())->mark() == markWord::encode(this),
280 "object mark must match encoded this: mark=" INTPTR_FORMAT
281 ", encoded this=" INTPTR_FORMAT, ((oop)object())->mark().value(),
282 markWord::encode(this).value());
283 Self->_Stalled = 0;
284 return;
285 }
286
287 assert(_owner != Self, "invariant");
288 assert(_succ != Self, "invariant");
289 assert(Self->is_Java_thread(), "invariant");
290 JavaThread * jt = (JavaThread *) Self;
291 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
292 assert(jt->thread_state() != _thread_blocked, "invariant");
293 assert(this->object() != NULL, "invariant");
294 assert(_contentions >= 0, "invariant");
295
296 // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy().
297 // Ensure the object-monitor relationship remains stable while there's contention.
298 Atomic::inc(&_contentions);
299
300 JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
301 EventJavaMonitorEnter event;
302 if (event.should_commit()) {
303 event.set_monitorClass(((oop)this->object())->klass());
304 event.set_address((uintptr_t)(this->object_addr()));
305 }
306
307 { // Change java thread status to indicate blocked on monitor enter.
308 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
309
310 Self->set_current_pending_monitor(this);
311
312 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
313 if (JvmtiExport::should_post_monitor_contended_enter()) {
314 JvmtiExport::post_monitor_contended_enter(jt, this);
315
316 // The current thread does not yet own the monitor and does not
317 // yet appear on any queues that would get it made the successor.
318 // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
339 // thread that suspended us.
340 //
341 _recursions = 0;
342 _succ = NULL;
343 exit(false, Self);
344
345 jt->java_suspend_self();
346 }
347 Self->set_current_pending_monitor(NULL);
348
349 // We cleared the pending monitor info since we've just gotten past
350 // the enter-check-for-suspend dance and we now own the monitor free
351 // and clear, i.e., it is no longer pending. The ThreadBlockInVM
352 // destructor can go to a safepoint at the end of this block. If we
353 // do a thread dump during that safepoint, then this thread will show
354 // as having "-locked" the monitor, but the OS and java.lang.Thread
355 // states will still report that the thread is blocked trying to
356 // acquire it.
357 }
358
359 Atomic::dec(&_contentions);
360 assert(_contentions >= 0, "invariant");
361 Self->_Stalled = 0;
362
363 // Must either set _recursions = 0 or ASSERT _recursions == 0.
364 assert(_recursions == 0, "invariant");
365 assert(_owner == Self, "invariant");
366 assert(_succ != Self, "invariant");
367 assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
368
369 // The thread -- now the owner -- is back in vm mode.
370 // Report the glorious news via TI,DTrace and jvmstat.
371 // The probe effect is non-trivial. All the reportage occurs
372 // while we hold the monitor, increasing the length of the critical
373 // section. Amdahl's parallel speedup law comes vividly into play.
374 //
375 // Another option might be to aggregate the events (thread local or
376 // per-monitor aggregation) and defer reporting until a more opportune
377 // time -- such as next time some thread encounters contention but has
378 // yet to acquire the lock. While spinning that thread could
379 // spinning we could increment JVMStat counters, etc.
380
381 DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
382 if (JvmtiExport::should_post_monitor_contended_entered()) {
383 JvmtiExport::post_monitor_contended_entered(jt, this);
384
385 // The current thread already owns the monitor and is not going to
386 // call park() for the remainder of the monitor enter protocol. So
387 // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
388 // event handler consumed an unpark() issued by the thread that
389 // just exited the monitor.
390 }
391 if (event.should_commit()) {
392 event.set_previousOwner((uintptr_t)_previous_owner_tid);
393 event.commit();
394 }
395 OM_PERFDATA_OP(ContendedLockAttempts, inc());
396 }
397
398 // Caveat: TryLock() is not necessarily serializing if it returns failure.
399 // Callers must compensate as needed.
400
401 int ObjectMonitor::TryLock(Thread * Self) {
402 void * own = _owner;
403 if (own != NULL) return 0;
404 if (try_set_owner_from(NULL, Self) == NULL) {
405 assert(_recursions == 0, "invariant");
406 return 1;
407 }
408 // The lock had been free momentarily, but we lost the race to the lock.
409 // Interference -- the CAS failed.
410 // We can either return -1 or retry.
411 // Retry doesn't make as much sense because the lock was just acquired.
412 return -1;
413 }
414
415 // Convert the fields used by is_busy() to a string that can be
416 // used for diagnostic output.
417 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
418 ss->print("is_busy: contentions=%d, waiters=%d, owner=" INTPTR_FORMAT
419 ", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, _contentions,
420 _waiters, p2i(_owner), p2i(_cxq), p2i(_EntryList));
421 return ss->base();
422 }
423
424 #define MAX_RECHECK_INTERVAL 1000
425
426 void ObjectMonitor::EnterI(TRAPS) {
427 Thread * const Self = THREAD;
428 assert(Self->is_Java_thread(), "invariant");
429 assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
430
431 // Try the lock - TATAS
432 if (TryLock (Self) > 0) {
433 assert(_succ != Self, "invariant");
434 assert(_owner == Self, "invariant");
435 assert(_Responsible != Self, "invariant");
436 return;
437 }
438
439 assert(InitDone, "Unexpectedly not initialized");
440
441 // We try one round of spinning *before* enqueueing Self.
442 //
443 // If the _owner is ready but OFFPROC we could use a YieldTo()
444 // operation to donate the remainder of this thread's quantum
445 // to the owner. This has subtle but beneficial affinity
446 // effects.
447
448 if (TrySpin(Self) > 0) {
449 assert(_owner == Self, "invariant");
450 assert(_succ != Self, "invariant");
451 assert(_Responsible != Self, "invariant");
452 return;
453 }
454
455 // The Spin failed -- Enqueue and park the thread ...
456 assert(_succ != Self, "invariant");
457 assert(_owner != Self, "invariant");
458 assert(_Responsible != Self, "invariant");
535
536 for (;;) {
537
538 if (TryLock(Self) > 0) break;
539 assert(_owner != Self, "invariant");
540
541 // park self
542 if (_Responsible == Self) {
543 Self->_ParkEvent->park((jlong) recheckInterval);
544 // Increase the recheckInterval, but clamp the value.
545 recheckInterval *= 8;
546 if (recheckInterval > MAX_RECHECK_INTERVAL) {
547 recheckInterval = MAX_RECHECK_INTERVAL;
548 }
549 } else {
550 Self->_ParkEvent->park();
551 }
552
553 if (TryLock(Self) > 0) break;
554
555 // The lock is still contested.
556 // Keep a tally of the # of futile wakeups.
557 // Note that the counter is not protected by a lock or updated by atomics.
558 // That is by design - we trade "lossy" counters which are exposed to
559 // races during updates for a lower probe effect.
560
561 // This PerfData object can be used in parallel with a safepoint.
562 // See the work around in PerfDataManager::destroy().
563 OM_PERFDATA_OP(FutileWakeups, inc());
564 ++nWakeups;
565
566 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
567 // We can defer clearing _succ until after the spin completes
568 // TrySpin() must tolerate being called with _succ == Self.
569 // Try yet another round of adaptive spinning.
570 if (TrySpin(Self) > 0) break;
571
572 // We can find that we were unpark()ed and redesignated _succ while
573 // we were spinning. That's harmless. If we iterate and call park(),
574 // park() will consume the event and return immediately and we'll
799 SelfNode->_prev = (ObjectWaiter *) 0xBAD;
800 SelfNode->_next = (ObjectWaiter *) 0xBAD;
801 SelfNode->TState = ObjectWaiter::TS_RUN;
802 #endif
803 }
804
805 // -----------------------------------------------------------------------------
806 // Exit support
807 //
808 // exit()
809 // ~~~~~~
810 // Note that the collector can't reclaim the objectMonitor or deflate
811 // the object out from underneath the thread calling ::exit() as the
812 // thread calling ::exit() never transitions to a stable state.
813 // This inhibits GC, which in turn inhibits asynchronous (and
814 // inopportune) reclamation of "this".
815 //
816 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
817 // There's one exception to the claim above, however. EnterI() can call
818 // exit() to drop a lock if the acquirer has been externally suspended.
819 // In that case exit() is called with _thread_state as _thread_blocked,
820 // but the monitor's _contentions field is > 0, which inhibits reclamation.
821 //
822 // 1-0 exit
823 // ~~~~~~~~
824 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
825 // the fast-path operators have been optimized so the common ::exit()
826 // operation is 1-0, e.g., see macroAssembler_x86.cpp: fast_unlock().
827 // The code emitted by fast_unlock() elides the usual MEMBAR. This
828 // greatly improves latency -- MEMBAR and CAS having considerable local
829 // latency on modern processors -- but at the cost of "stranding". Absent the
830 // MEMBAR, a thread in fast_unlock() can race a thread in the slow
831 // ::enter() path, resulting in the entering thread being stranding
832 // and a progress-liveness failure. Stranding is extremely rare.
833 // We use timers (timed park operations) & periodic polling to detect
834 // and recover from stranding. Potentially stranded threads periodically
835 // wake up and poll the lock. See the usage of the _Responsible variable.
836 //
837 // The CAS() in enter provides for safety and exclusion, while the CAS or
838 // MEMBAR in exit provides for progress and avoids stranding. 1-0 locking
839 // eliminates the CAS/MEMBAR from the exit path, but it admits stranding.
1074 }
1075
1076
1077 void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1078 assert(_owner == Self, "invariant");
1079
1080 // Exit protocol:
1081 // 1. ST _succ = wakee
1082 // 2. membar #loadstore|#storestore;
1083 // 2. ST _owner = NULL
1084 // 3. unpark(wakee)
1085
1086 _succ = Wakee->_thread;
1087 ParkEvent * Trigger = Wakee->_event;
1088
1089 // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1090 // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1091 // out-of-scope (non-extant).
1092 Wakee = NULL;
1093
1094 // Drop the lock
1095 // Uses a fence to separate release_store(owner) from the LD in unpark().
1096 release_clear_owner(Self);
1097 OrderAccess::fence();
1098
1099 DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1100 Trigger->unpark();
1101
1102 // Maintain stats and report events to JVMTI
1103 OM_PERFDATA_OP(Parks, inc());
1104 }
1105
1106
1107 // -----------------------------------------------------------------------------
1108 // Class Loader deadlock handling.
1109 //
1110 // complete_exit exits a lock returning recursion count
1111 // complete_exit/reenter operate as a wait without waiting
1112 // complete_exit requires an inflated monitor
1113 // The _owner field is not always the Thread addr even with an
1114 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1122
1123 void* cur = Atomic::load(&_owner);
1124 if (THREAD != cur) {
1125 if (THREAD->is_lock_owned((address)cur)) {
1126 assert(_recursions == 0, "internal state error");
1127 set_owner_from_BasicLock(cur, Self); // Convert from BasicLock* to Thread*.
1128 _recursions = 0;
1129 }
1130 }
1131
1132 guarantee(Self == _owner, "complete_exit not owner");
1133 intx save = _recursions; // record the old recursion count
1134 _recursions = 0; // set the recursion level to be 0
1135 exit(true, Self); // exit the monitor
1136 guarantee(_owner != Self, "invariant");
1137 return save;
1138 }
1139
1140 // reenter() enters a lock and sets recursion count
1141 // complete_exit/reenter operate as a wait without waiting
1142 void ObjectMonitor::reenter(intx recursions, TRAPS) {
1143 Thread * const Self = THREAD;
1144 assert(Self->is_Java_thread(), "Must be Java thread!");
1145 JavaThread *jt = (JavaThread *)THREAD;
1146
1147 guarantee(_owner != Self, "reenter already owner");
1148 enter(THREAD); // enter the monitor
1149 guarantee(_recursions == 0, "reenter recursion");
1150 _recursions = recursions;
1151 return;
1152 }
1153
1154 // Checks that the current THREAD owns this monitor and causes an
1155 // immediate return if it doesn't. We don't use the CHECK macro
1156 // because we want the IMSE to be the only exception that is thrown
1157 // from the call site when false is returned. Any other pending
1158 // exception is ignored.
1159 #define CHECK_OWNER() \
1160 do { \
1161 if (!check_owner(THREAD)) { \
1162 assert(HAS_PENDING_EXCEPTION, "expected a pending IMSE here."); \
1163 return; \
1164 } \
1165 } while (false)
1166
1167 // Returns true if the specified thread owns the ObjectMonitor.
1168 // Otherwise returns false and throws IllegalMonitorStateException
1169 // (IMSE). If there is a pending exception and the specified thread
1170 // is not the owner, that exception will be replaced by the IMSE.
1171 bool ObjectMonitor::check_owner(Thread* THREAD) {
1945 }
1946
1947 DEBUG_ONLY(InitDone = true;)
1948 }
1949
1950 void ObjectMonitor::print_on(outputStream* st) const {
1951 // The minimal things to print for markWord printing, more can be added for debugging and logging.
1952 st->print("{contentions=0x%08x,waiters=0x%08x"
1953 ",recursions=" INTX_FORMAT ",owner=" INTPTR_FORMAT "}",
1954 contentions(), waiters(), recursions(),
1955 p2i(owner()));
1956 }
1957 void ObjectMonitor::print() const { print_on(tty); }
1958
1959 #ifdef ASSERT
1960 // Print the ObjectMonitor like a debugger would:
1961 //
1962 // (ObjectMonitor) 0x00007fdfb6012e40 = {
1963 // _header = 0x0000000000000001
1964 // _object = 0x000000070ff45fd0
1965 // _next_om = 0x0000000000000000
1966 // _pad_buf0 = {
1967 // [0] = '\0'
1968 // ...
1969 // [103] = '\0'
1970 // }
1971 // _owner = 0x0000000000000000
1972 // _previous_owner_tid = 0
1973 // _recursions = 0
1974 // _EntryList = 0x0000000000000000
1975 // _cxq = 0x0000000000000000
1976 // _succ = 0x0000000000000000
1977 // _Responsible = 0x0000000000000000
1978 // _Spinner = 0
1979 // _SpinDuration = 5000
1980 // _contentions = 0
1981 // _WaitSet = 0x0000700009756248
1982 // _waiters = 1
1983 // _WaitSetLock = 0
1984 // }
1985 //
1986 void ObjectMonitor::print_debug_style_on(outputStream* st) const {
1987 st->print_cr("(ObjectMonitor*) " INTPTR_FORMAT " = {", p2i(this));
1988 st->print_cr(" _header = " INTPTR_FORMAT, header().value());
1989 st->print_cr(" _object = " INTPTR_FORMAT, p2i(_object));
1990 st->print_cr(" _next_om = " INTPTR_FORMAT, p2i(next_om()));
1991 st->print_cr(" _pad_buf0 = {");
1992 st->print_cr(" [0] = '\\0'");
1993 st->print_cr(" ...");
1994 st->print_cr(" [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1);
1995 st->print_cr(" }");
1996 st->print_cr(" _owner = " INTPTR_FORMAT, p2i(_owner));
1997 st->print_cr(" _previous_owner_tid = " JLONG_FORMAT, _previous_owner_tid);
1998 st->print_cr(" _recursions = " INTX_FORMAT, _recursions);
1999 st->print_cr(" _EntryList = " INTPTR_FORMAT, p2i(_EntryList));
2000 st->print_cr(" _cxq = " INTPTR_FORMAT, p2i(_cxq));
2001 st->print_cr(" _succ = " INTPTR_FORMAT, p2i(_succ));
2002 st->print_cr(" _Responsible = " INTPTR_FORMAT, p2i(_Responsible));
2003 st->print_cr(" _Spinner = %d", _Spinner);
2004 st->print_cr(" _SpinDuration = %d", _SpinDuration);
2005 st->print_cr(" _contentions = %d", _contentions);
2006 st->print_cr(" _WaitSet = " INTPTR_FORMAT, p2i(_WaitSet));
2007 st->print_cr(" _waiters = %d", _waiters);
2008 st->print_cr(" _WaitSetLock = %d", _WaitSetLock);
2009 st->print_cr("}");
2010 }
2011 #endif
|
223 //
224 // * See also http://blogs.sun.com/dave
225
226
227 void* ObjectMonitor::operator new (size_t size) throw() {
228 return AllocateHeap(size, mtInternal);
229 }
230 void* ObjectMonitor::operator new[] (size_t size) throw() {
231 return operator new (size);
232 }
233 void ObjectMonitor::operator delete(void* p) {
234 FreeHeap(p);
235 }
236 void ObjectMonitor::operator delete[] (void *p) {
237 operator delete(p);
238 }
239
240 // -----------------------------------------------------------------------------
241 // Enter support
242
243 bool ObjectMonitor::enter(TRAPS) {
244 // The following code is ordered to check the most common cases first
245 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
246 Thread * const Self = THREAD;
247
248 void* cur = try_set_owner_from(NULL, Self);
249 if (cur == NULL) {
250 assert(_recursions == 0, "invariant");
251 return true;
252 }
253
254 if (cur == Self) {
255 // TODO-FIXME: check for integer overflow! BUGID 6557169.
256 _recursions++;
257 return true;
258 }
259
260 if (Self->is_lock_owned((address)cur)) {
261 assert(_recursions == 0, "internal state error");
262 _recursions = 1;
263 set_owner_from_BasicLock(cur, Self); // Convert from BasicLock* to Thread*.
264 return true;
265 }
266
267 // We've encountered genuine contention.
268 assert(Self->_Stalled == 0, "invariant");
269 Self->_Stalled = intptr_t(this);
270
271 // Try one round of spinning *before* enqueueing Self
272 // and before going through the awkward and expensive state
273 // transitions. The following spin is strictly optional ...
274 // Note that if we acquire the monitor from an initial spin
275 // we forgo posting JVMTI events and firing DTRACE probes.
276 if (TrySpin(Self) > 0) {
277 assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
278 assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);
279 assert(((oop)object())->mark() == markWord::encode(this),
280 "object mark must match encoded this: mark=" INTPTR_FORMAT
281 ", encoded this=" INTPTR_FORMAT, ((oop)object())->mark().value(),
282 markWord::encode(this).value());
283 Self->_Stalled = 0;
284 return true;
285 }
286
287 assert(_owner != Self, "invariant");
288 assert(_succ != Self, "invariant");
289 assert(Self->is_Java_thread(), "invariant");
290 JavaThread * jt = (JavaThread *) Self;
291 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
292 assert(jt->thread_state() != _thread_blocked, "invariant");
293 assert(AsyncDeflateIdleMonitors || this->object() != NULL, "invariant");
294 assert(AsyncDeflateIdleMonitors || contentions() >= 0, "must not be negative: contentions=%d", contentions());
295
296 // Keep track of contention for JVM/TI and M&M queries.
297 add_to_contentions(1);
298 if (is_being_async_deflated()) {
299 // Async deflation is in progress and our contentions increment
300 // above lost the race to async deflation. Undo the work and
301 // force the caller to retry.
302 const oop l_object = (oop)object();
303 if (l_object != NULL) {
304 // Attempt to restore the header/dmw to the object's header so that
305 // we only retry once if the deflater thread happens to be slow.
306 install_displaced_markword_in_object(l_object);
307 }
308 Self->_Stalled = 0;
309 add_to_contentions(-1);
310 return false;
311 }
312
313 JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
314 EventJavaMonitorEnter event;
315 if (event.should_commit()) {
316 event.set_monitorClass(((oop)this->object())->klass());
317 event.set_address((uintptr_t)(this->object_addr()));
318 }
319
320 { // Change java thread status to indicate blocked on monitor enter.
321 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
322
323 Self->set_current_pending_monitor(this);
324
325 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
326 if (JvmtiExport::should_post_monitor_contended_enter()) {
327 JvmtiExport::post_monitor_contended_enter(jt, this);
328
329 // The current thread does not yet own the monitor and does not
330 // yet appear on any queues that would get it made the successor.
331 // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
352 // thread that suspended us.
353 //
354 _recursions = 0;
355 _succ = NULL;
356 exit(false, Self);
357
358 jt->java_suspend_self();
359 }
360 Self->set_current_pending_monitor(NULL);
361
362 // We cleared the pending monitor info since we've just gotten past
363 // the enter-check-for-suspend dance and we now own the monitor free
364 // and clear, i.e., it is no longer pending. The ThreadBlockInVM
365 // destructor can go to a safepoint at the end of this block. If we
366 // do a thread dump during that safepoint, then this thread will show
367 // as having "-locked" the monitor, but the OS and java.lang.Thread
368 // states will still report that the thread is blocked trying to
369 // acquire it.
370 }
371
372 add_to_contentions(-1);
373 assert(contentions() >= 0, "must not be negative: contentions=%d", contentions());
374 Self->_Stalled = 0;
375
376 // Must either set _recursions = 0 or ASSERT _recursions == 0.
377 assert(_recursions == 0, "invariant");
378 assert(_owner == Self, "invariant");
379 assert(_succ != Self, "invariant");
380 assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
381
382 // The thread -- now the owner -- is back in vm mode.
383 // Report the glorious news via TI,DTrace and jvmstat.
384 // The probe effect is non-trivial. All the reportage occurs
385 // while we hold the monitor, increasing the length of the critical
386 // section. Amdahl's parallel speedup law comes vividly into play.
387 //
388 // Another option might be to aggregate the events (thread local or
389 // per-monitor aggregation) and defer reporting until a more opportune
390 // time -- such as next time some thread encounters contention but has
391 // yet to acquire the lock. While spinning that thread could
392 // spinning we could increment JVMStat counters, etc.
393
394 DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
395 if (JvmtiExport::should_post_monitor_contended_entered()) {
396 JvmtiExport::post_monitor_contended_entered(jt, this);
397
398 // The current thread already owns the monitor and is not going to
399 // call park() for the remainder of the monitor enter protocol. So
400 // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
401 // event handler consumed an unpark() issued by the thread that
402 // just exited the monitor.
403 }
404 if (event.should_commit()) {
405 event.set_previousOwner((uintptr_t)_previous_owner_tid);
406 event.commit();
407 }
408 OM_PERFDATA_OP(ContendedLockAttempts, inc());
409 return true;
410 }
411
412 // Caveat: TryLock() is not necessarily serializing if it returns failure.
413 // Callers must compensate as needed.
414
415 int ObjectMonitor::TryLock(Thread * Self) {
416 void * own = _owner;
417 if (own != NULL) return 0;
418 if (try_set_owner_from(NULL, Self) == NULL) {
419 assert(_recursions == 0, "invariant");
420 return 1;
421 }
422 // The lock had been free momentarily, but we lost the race to the lock.
423 // Interference -- the CAS failed.
424 // We can either return -1 or retry.
425 // Retry doesn't make as much sense because the lock was just acquired.
426 return -1;
427 }
428
429 // Install the displaced mark word (dmw) of a deflating ObjectMonitor
430 // into the header of the object associated with the monitor. This
431 // idempotent method is called by a thread that is deflating a
432 // monitor and by other threads that have detected a race with the
433 // deflation process.
434 void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
435 // This function must only be called when (owner == DEFLATER_MARKER
436 // && contentions <= 0), but we can't guarantee that here because
437 // those values could change when the ObjectMonitor gets moved from
438 // the global free list to a per-thread free list.
439
440 guarantee(obj != NULL, "must be non-NULL");
441
442 // Separate loads in is_being_async_deflated(), which is almost always
443 // called before this function, from the load of dmw/header below.
444 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
445 // A non-multiple copy atomic (nMCA) machine needs a bigger
446 // hammer to separate the loads before and the load below.
447 OrderAccess::fence();
448 } else {
449 OrderAccess::loadload();
450 }
451
452 const oop l_object = (oop)object();
453 if (l_object == NULL) {
454 // ObjectMonitor's object ref has already been cleared by async
455 // deflation so we're done here.
456 return;
457 }
458 ADIM_guarantee(l_object == obj, "object=" INTPTR_FORMAT " must equal obj="
459 INTPTR_FORMAT, p2i(l_object), p2i(obj));
460
461 markWord dmw = header();
462 // The dmw has to be neutral (not NULL, not locked and not marked).
463 ADIM_guarantee(dmw.is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, dmw.value());
464
465 // Install displaced mark word if the object's header still points
466 // to this ObjectMonitor. More than one racing caller to this function
467 // can rarely reach this point, but only one can win.
468 markWord res = obj->cas_set_mark(dmw, markWord::encode(this));
469 if (res != markWord::encode(this)) {
470 // This should be rare so log at the Info level when it happens.
471 log_info(monitorinflation)("install_displaced_markword_in_object: "
472 "failed cas_set_mark: new_mark=" INTPTR_FORMAT
473 ", old_mark=" INTPTR_FORMAT ", res=" INTPTR_FORMAT,
474 dmw.value(), markWord::encode(this).value(),
475 res.value());
476 }
477
478 // Note: It does not matter which thread restored the header/dmw
479 // into the object's header. The thread deflating the monitor just
480 // wanted the object's header restored and it is. The threads that
481 // detected a race with the deflation process also wanted the
482 // object's header restored before they retry their operation and
483 // because it is restored they will only retry once.
484 }
485
486 // Convert the fields used by is_busy() to a string that can be
487 // used for diagnostic output.
488 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
489 ss->print("is_busy: waiters=%d, ", _waiters);
490 if (!AsyncDeflateIdleMonitors) {
491 ss->print("contentions=%d, ", contentions());
492 ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
493 } else {
494 if (contentions() > 0) {
495 ss->print("contentions=%d, ", contentions());
496 } else {
497 ss->print("contentions=0");
498 }
499 if (_owner != DEFLATER_MARKER) {
500 ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
501 } else {
502 // We report NULL instead of DEFLATER_MARKER here because is_busy()
503 // ignores DEFLATER_MARKER values.
504 ss->print("owner=" INTPTR_FORMAT, NULL);
505 }
506 }
507 ss->print(", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, p2i(_cxq),
508 p2i(_EntryList));
509 return ss->base();
510 }
511
512 #define MAX_RECHECK_INTERVAL 1000
513
514 void ObjectMonitor::EnterI(TRAPS) {
515 Thread * const Self = THREAD;
516 assert(Self->is_Java_thread(), "invariant");
517 assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
518
519 // Try the lock - TATAS
520 if (TryLock (Self) > 0) {
521 assert(_succ != Self, "invariant");
522 assert(_owner == Self, "invariant");
523 assert(_Responsible != Self, "invariant");
524 return;
525 }
526
527 if (AsyncDeflateIdleMonitors &&
528 try_set_owner_from(DEFLATER_MARKER, Self) == DEFLATER_MARKER) {
529 // Cancelled the in-progress async deflation. We bump contentions an
530 // extra time to prevent the async deflater thread from temporarily
531 // changing it to -max_jint and back to zero (no flicker to confuse
532 // is_being_async_deflated()). The async deflater thread will
533 // decrement contentions after it recognizes that the async
534 // deflation was cancelled.
535 add_to_contentions(1);
536 assert(_succ != Self, "invariant");
537 assert(_Responsible != Self, "invariant");
538 return;
539 }
540
541 assert(InitDone, "Unexpectedly not initialized");
542
543 // We try one round of spinning *before* enqueueing Self.
544 //
545 // If the _owner is ready but OFFPROC we could use a YieldTo()
546 // operation to donate the remainder of this thread's quantum
547 // to the owner. This has subtle but beneficial affinity
548 // effects.
549
550 if (TrySpin(Self) > 0) {
551 assert(_owner == Self, "invariant");
552 assert(_succ != Self, "invariant");
553 assert(_Responsible != Self, "invariant");
554 return;
555 }
556
557 // The Spin failed -- Enqueue and park the thread ...
558 assert(_succ != Self, "invariant");
559 assert(_owner != Self, "invariant");
560 assert(_Responsible != Self, "invariant");
637
638 for (;;) {
639
640 if (TryLock(Self) > 0) break;
641 assert(_owner != Self, "invariant");
642
643 // park self
644 if (_Responsible == Self) {
645 Self->_ParkEvent->park((jlong) recheckInterval);
646 // Increase the recheckInterval, but clamp the value.
647 recheckInterval *= 8;
648 if (recheckInterval > MAX_RECHECK_INTERVAL) {
649 recheckInterval = MAX_RECHECK_INTERVAL;
650 }
651 } else {
652 Self->_ParkEvent->park();
653 }
654
655 if (TryLock(Self) > 0) break;
656
657 if (AsyncDeflateIdleMonitors &&
658 try_set_owner_from(DEFLATER_MARKER, Self) == DEFLATER_MARKER) {
659 // Cancelled the in-progress async deflation. We bump contentions an
660 // extra time to prevent the async deflater thread from temporarily
661 // changing it to -max_jint and back to zero (no flicker to confuse
662 // is_being_async_deflated()). The async deflater thread will
663 // decrement contentions after it recognizes that the async
664 // deflation was cancelled.
665 add_to_contentions(1);
666 break;
667 }
668
669 // The lock is still contested.
670 // Keep a tally of the # of futile wakeups.
671 // Note that the counter is not protected by a lock or updated by atomics.
672 // That is by design - we trade "lossy" counters which are exposed to
673 // races during updates for a lower probe effect.
674
675 // This PerfData object can be used in parallel with a safepoint.
676 // See the work around in PerfDataManager::destroy().
677 OM_PERFDATA_OP(FutileWakeups, inc());
678 ++nWakeups;
679
680 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
681 // We can defer clearing _succ until after the spin completes
682 // TrySpin() must tolerate being called with _succ == Self.
683 // Try yet another round of adaptive spinning.
684 if (TrySpin(Self) > 0) break;
685
686 // We can find that we were unpark()ed and redesignated _succ while
687 // we were spinning. That's harmless. If we iterate and call park(),
688 // park() will consume the event and return immediately and we'll
913 SelfNode->_prev = (ObjectWaiter *) 0xBAD;
914 SelfNode->_next = (ObjectWaiter *) 0xBAD;
915 SelfNode->TState = ObjectWaiter::TS_RUN;
916 #endif
917 }
918
919 // -----------------------------------------------------------------------------
920 // Exit support
921 //
922 // exit()
923 // ~~~~~~
924 // Note that the collector can't reclaim the objectMonitor or deflate
925 // the object out from underneath the thread calling ::exit() as the
926 // thread calling ::exit() never transitions to a stable state.
927 // This inhibits GC, which in turn inhibits asynchronous (and
928 // inopportune) reclamation of "this".
929 //
930 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
931 // There's one exception to the claim above, however. EnterI() can call
932 // exit() to drop a lock if the acquirer has been externally suspended.
933 // In that case exit() is called with _thread_state == _thread_blocked,
934 // but the monitor's _contentions field is > 0, which inhibits reclamation.
935 //
936 // 1-0 exit
937 // ~~~~~~~~
938 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
939 // the fast-path operators have been optimized so the common ::exit()
940 // operation is 1-0, e.g., see macroAssembler_x86.cpp: fast_unlock().
941 // The code emitted by fast_unlock() elides the usual MEMBAR. This
942 // greatly improves latency -- MEMBAR and CAS having considerable local
943 // latency on modern processors -- but at the cost of "stranding". Absent the
944 // MEMBAR, a thread in fast_unlock() can race a thread in the slow
945 // ::enter() path, resulting in the entering thread being stranding
946 // and a progress-liveness failure. Stranding is extremely rare.
947 // We use timers (timed park operations) & periodic polling to detect
948 // and recover from stranding. Potentially stranded threads periodically
949 // wake up and poll the lock. See the usage of the _Responsible variable.
950 //
951 // The CAS() in enter provides for safety and exclusion, while the CAS or
952 // MEMBAR in exit provides for progress and avoids stranding. 1-0 locking
953 // eliminates the CAS/MEMBAR from the exit path, but it admits stranding.
1188 }
1189
1190
1191 void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1192 assert(_owner == Self, "invariant");
1193
1194 // Exit protocol:
1195 // 1. ST _succ = wakee
1196 // 2. membar #loadstore|#storestore;
1197 // 2. ST _owner = NULL
1198 // 3. unpark(wakee)
1199
1200 _succ = Wakee->_thread;
1201 ParkEvent * Trigger = Wakee->_event;
1202
1203 // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1204 // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1205 // out-of-scope (non-extant).
1206 Wakee = NULL;
1207
1208 // Drop the lock.
1209 // Uses a fence to separate release_store(owner) from the LD in unpark().
1210 release_clear_owner(Self);
1211 OrderAccess::fence();
1212
1213 DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1214 Trigger->unpark();
1215
1216 // Maintain stats and report events to JVMTI
1217 OM_PERFDATA_OP(Parks, inc());
1218 }
1219
1220
1221 // -----------------------------------------------------------------------------
1222 // Class Loader deadlock handling.
1223 //
1224 // complete_exit exits a lock returning recursion count
1225 // complete_exit/reenter operate as a wait without waiting
1226 // complete_exit requires an inflated monitor
1227 // The _owner field is not always the Thread addr even with an
1228 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1236
1237 void* cur = Atomic::load(&_owner);
1238 if (THREAD != cur) {
1239 if (THREAD->is_lock_owned((address)cur)) {
1240 assert(_recursions == 0, "internal state error");
1241 set_owner_from_BasicLock(cur, Self); // Convert from BasicLock* to Thread*.
1242 _recursions = 0;
1243 }
1244 }
1245
1246 guarantee(Self == _owner, "complete_exit not owner");
1247 intx save = _recursions; // record the old recursion count
1248 _recursions = 0; // set the recursion level to be 0
1249 exit(true, Self); // exit the monitor
1250 guarantee(_owner != Self, "invariant");
1251 return save;
1252 }
1253
1254 // reenter() enters a lock and sets recursion count
1255 // complete_exit/reenter operate as a wait without waiting
1256 bool ObjectMonitor::reenter(intx recursions, TRAPS) {
1257 Thread * const Self = THREAD;
1258 assert(Self->is_Java_thread(), "Must be Java thread!");
1259 JavaThread *jt = (JavaThread *)THREAD;
1260
1261 guarantee(_owner != Self, "reenter already owner");
1262 if (!enter(THREAD)) {
1263 return false;
1264 }
1265 // Entered the monitor.
1266 guarantee(_recursions == 0, "reenter recursion");
1267 _recursions = recursions;
1268 return true;
1269 }
1270
1271 // Checks that the current THREAD owns this monitor and causes an
1272 // immediate return if it doesn't. We don't use the CHECK macro
1273 // because we want the IMSE to be the only exception that is thrown
1274 // from the call site when false is returned. Any other pending
1275 // exception is ignored.
1276 #define CHECK_OWNER() \
1277 do { \
1278 if (!check_owner(THREAD)) { \
1279 assert(HAS_PENDING_EXCEPTION, "expected a pending IMSE here."); \
1280 return; \
1281 } \
1282 } while (false)
1283
1284 // Returns true if the specified thread owns the ObjectMonitor.
1285 // Otherwise returns false and throws IllegalMonitorStateException
1286 // (IMSE). If there is a pending exception and the specified thread
1287 // is not the owner, that exception will be replaced by the IMSE.
1288 bool ObjectMonitor::check_owner(Thread* THREAD) {
2062 }
2063
2064 DEBUG_ONLY(InitDone = true;)
2065 }
2066
2067 void ObjectMonitor::print_on(outputStream* st) const {
2068 // The minimal things to print for markWord printing, more can be added for debugging and logging.
2069 st->print("{contentions=0x%08x,waiters=0x%08x"
2070 ",recursions=" INTX_FORMAT ",owner=" INTPTR_FORMAT "}",
2071 contentions(), waiters(), recursions(),
2072 p2i(owner()));
2073 }
2074 void ObjectMonitor::print() const { print_on(tty); }
2075
2076 #ifdef ASSERT
2077 // Print the ObjectMonitor like a debugger would:
2078 //
2079 // (ObjectMonitor) 0x00007fdfb6012e40 = {
2080 // _header = 0x0000000000000001
2081 // _object = 0x000000070ff45fd0
2082 // _allocation_state = Old
2083 // _pad_buf0 = {
2084 // [0] = '\0'
2085 // ...
2086 // [43] = '\0'
2087 // }
2088 // _owner = 0x0000000000000000
2089 // _previous_owner_tid = 0
2090 // _pad_buf1 = {
2091 // [0] = '\0'
2092 // ...
2093 // [47] = '\0'
2094 // }
2095 // _next_om = 0x0000000000000000
2096 // _recursions = 0
2097 // _EntryList = 0x0000000000000000
2098 // _cxq = 0x0000000000000000
2099 // _succ = 0x0000000000000000
2100 // _Responsible = 0x0000000000000000
2101 // _Spinner = 0
2102 // _SpinDuration = 5000
2103 // _contentions = 0
2104 // _WaitSet = 0x0000700009756248
2105 // _waiters = 1
2106 // _WaitSetLock = 0
2107 // }
2108 //
2109 void ObjectMonitor::print_debug_style_on(outputStream* st) const {
2110 st->print_cr("(ObjectMonitor*) " INTPTR_FORMAT " = {", p2i(this));
2111 st->print_cr(" _header = " INTPTR_FORMAT, header().value());
2112 st->print_cr(" _object = " INTPTR_FORMAT, p2i(_object));
2113 st->print(" _allocation_state = ");
2114 if (is_free()) {
2115 st->print("Free");
2116 } else if (is_old()) {
2117 st->print("Old");
2118 } else if (is_new()) {
2119 st->print("New");
2120 } else {
2121 st->print("unknown=%d", _allocation_state);
2122 }
2123 st->cr();
2124 st->print_cr(" _pad_buf0 = {");
2125 st->print_cr(" [0] = '\\0'");
2126 st->print_cr(" ...");
2127 st->print_cr(" [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1);
2128 st->print_cr(" }");
2129 st->print_cr(" _owner = " INTPTR_FORMAT, p2i(_owner));
2130 st->print_cr(" _previous_owner_tid = " JLONG_FORMAT, _previous_owner_tid);
2131 st->print_cr(" _pad_buf1 = {");
2132 st->print_cr(" [0] = '\\0'");
2133 st->print_cr(" ...");
2134 st->print_cr(" [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1);
2135 st->print_cr(" }");
2136 st->print_cr(" _next_om = " INTPTR_FORMAT, p2i(next_om()));
2137 st->print_cr(" _recursions = " INTX_FORMAT, _recursions);
2138 st->print_cr(" _EntryList = " INTPTR_FORMAT, p2i(_EntryList));
2139 st->print_cr(" _cxq = " INTPTR_FORMAT, p2i(_cxq));
2140 st->print_cr(" _succ = " INTPTR_FORMAT, p2i(_succ));
2141 st->print_cr(" _Responsible = " INTPTR_FORMAT, p2i(_Responsible));
2142 st->print_cr(" _Spinner = %d", _Spinner);
2143 st->print_cr(" _SpinDuration = %d", _SpinDuration);
2144 st->print_cr(" _contentions = %d", contentions());
2145 st->print_cr(" _WaitSet = " INTPTR_FORMAT, p2i(_WaitSet));
2146 st->print_cr(" _waiters = %d", _waiters);
2147 st->print_cr(" _WaitSetLock = %d", _WaitSetLock);
2148 st->print_cr("}");
2149 }
2150 #endif
|