< prev index next >

src/share/vm/runtime/objectMonitor.cpp

Print this page

        

*** 37,46 **** --- 37,47 ---- #include "runtime/stubRoutines.hpp" #include "runtime/thread.inline.hpp" #include "services/threadService.hpp" #include "trace/tracing.hpp" #include "trace/traceMacros.hpp" + #include "evtrace/traceEvents.hpp" #include "utilities/dtrace.hpp" #include "utilities/macros.hpp" #include "utilities/preserveException.hpp" #ifdef TARGET_OS_FAMILY_linux # include "os_linux.inline.hpp"
*** 313,323 **** _recursions++; return true; } } ! void ATTR ObjectMonitor::enter(TRAPS) { // The following code is ordered to check the most common cases first // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. Thread * const Self = THREAD ; void * cur ; --- 314,324 ---- _recursions++; return true; } } ! void ATTR ObjectMonitor::enter(int after_wait, TRAPS) { // The following code is ordered to check the most common cases first // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. Thread * const Self = THREAD ; void * cur ;
*** 376,388 **** --- 377,394 ---- // Ensure the object-monitor relationship remains stable while there's contention. Atomic::inc_ptr(&_count); EventJavaMonitorEnter event; + int trace_flags = 0; { // Change java thread status to indicate blocked on monitor enter. JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this); + if (EnableEventTracing) { + TraceEvents::write_monitor_contended_enter(this, (TraceTypes::monitor_enter_wait) after_wait); + } + DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt); if (JvmtiExport::should_post_monitor_contended_enter()) { JvmtiExport::post_monitor_contended_enter(jt, this); // The current thread does not yet own the monitor and does not
*** 401,411 **** for (;;) { jt->set_suspend_equivalent(); // cleared by handle_special_suspend_equivalent_condition() // or java_suspend_self() ! EnterI (THREAD) ; if (!ExitSuspendEquivalent(jt)) break ; // // We have acquired the contended monitor, but while we were --- 407,417 ---- for (;;) { jt->set_suspend_equivalent(); // cleared by handle_special_suspend_equivalent_condition() // or java_suspend_self() ! trace_flags |= EnterI (THREAD) ; if (!ExitSuspendEquivalent(jt)) break ; // // We have acquired the contended monitor, but while we were
*** 451,460 **** --- 457,470 ---- // per-monitor aggregation) and defer reporting until a more opportune // time -- such as next time some thread encounters contention but has // yet to acquire the lock. While spinning that thread could // spinning we could increment JVMStat counters, etc. + if (EnableEventTracing) { + TraceEvents::write_monitor_contended_entered(this, (TraceTypes::monitor_entered_flags) trace_flags); + } + DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt); if (JvmtiExport::should_post_monitor_contended_entered()) { JvmtiExport::post_monitor_contended_entered(jt, this); // The current thread already owns the monitor and is not going to
*** 497,517 **** // Retry doesn't make as much sense because the lock was just acquired. if (true) return -1 ; } } ! void ATTR ObjectMonitor::EnterI (TRAPS) { Thread * Self = THREAD ; assert (Self->is_Java_thread(), "invariant") ; assert (((JavaThread *) Self)->thread_state() == _thread_blocked , "invariant") ; // Try the lock - TATAS if (TryLock (Self) > 0) { assert (_succ != Self , "invariant") ; assert (_owner == Self , "invariant") ; assert (_Responsible != Self , "invariant") ; ! return ; } DeferredInitialize () ; // We try one round of spinning *before* enqueueing Self. --- 507,529 ---- // Retry doesn't make as much sense because the lock was just acquired. if (true) return -1 ; } } ! int ATTR ObjectMonitor::EnterI (TRAPS) { ! int trace_flags = 0; ! Thread * Self = THREAD ; assert (Self->is_Java_thread(), "invariant") ; assert (((JavaThread *) Self)->thread_state() == _thread_blocked , "invariant") ; // Try the lock - TATAS if (TryLock (Self) > 0) { assert (_succ != Self , "invariant") ; assert (_owner == Self , "invariant") ; assert (_Responsible != Self , "invariant") ; ! return trace_flags; } DeferredInitialize () ; // We try one round of spinning *before* enqueueing Self.
*** 523,533 **** if (TrySpin (Self) > 0) { assert (_owner == Self , "invariant") ; assert (_succ != Self , "invariant") ; assert (_Responsible != Self , "invariant") ; ! return ; } // The Spin failed -- Enqueue and park the thread ... assert (_succ != Self , "invariant") ; assert (_owner != Self , "invariant") ; --- 535,545 ---- if (TrySpin (Self) > 0) { assert (_owner == Self , "invariant") ; assert (_succ != Self , "invariant") ; assert (_Responsible != Self , "invariant") ; ! return trace_flags; } // The Spin failed -- Enqueue and park the thread ... assert (_succ != Self , "invariant") ; assert (_owner != Self , "invariant") ;
*** 561,574 **** // As an optional optimization we retry the lock. if (TryLock (Self) > 0) { assert (_succ != Self , "invariant") ; assert (_owner == Self , "invariant") ; assert (_Responsible != Self , "invariant") ; ! return ; } } // Check for cxq|EntryList edge transition to non-null. This indicates // the onset of contention. While contention persists exiting threads // will use a ST:MEMBAR:LD 1-1 exit protocol. When contention abates exit // operations revert to the faster 1-0 mode. This enter operation may interleave // (race) a concurrent 1-0 exit operation, resulting in stranding, so we --- 573,588 ---- // As an optional optimization we retry the lock. if (TryLock (Self) > 0) { assert (_succ != Self , "invariant") ; assert (_owner == Self , "invariant") ; assert (_Responsible != Self , "invariant") ; ! return trace_flags; } } + trace_flags |= TraceTypes::entered_queued; + // Check for cxq|EntryList edge transition to non-null. This indicates // the onset of contention. While contention persists exiting threads // will use a ST:MEMBAR:LD 1-1 exit protocol. When contention abates exit // operations revert to the faster 1-0 mode. This enter operation may interleave // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
*** 630,639 **** --- 644,655 ---- } else { TEVENT (Inflated enter - park UNTIMED) ; Self->_ParkEvent->park() ; } + trace_flags |= TraceTypes::entered_parked; + if (TryLock(Self) > 0) break ; // The lock is still contested. // Keep a tally of the # of futile wakeups. // Note that the counter is not protected by a lock or updated by atomics.
*** 735,745 **** // execute a serializing instruction. if (SyncFlags & 8) { OrderAccess::fence() ; } ! return ; } // ReenterI() is a specialized inline form of the latter half of the // contended slow-path from EnterI(). We use ReenterI() only for // monitor reentry in wait(). --- 751,761 ---- // execute a serializing instruction. if (SyncFlags & 8) { OrderAccess::fence() ; } ! return trace_flags; } // ReenterI() is a specialized inline form of the latter half of the // contended slow-path from EnterI(). We use ReenterI() only for // monitor reentry in wait().
*** 951,961 **** // any one time. (more precisely, we want to minimize timer-seconds, which is // the integral of the # of active timers at any instant over time). // Both impinge on OS scalability. Given that, at most one thread parked on // a monitor will use a timer. ! void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) { Thread * Self = THREAD ; if (THREAD != _owner) { if (THREAD->is_lock_owned((address) _owner)) { // Transmute _owner from a BasicLock pointer to a Thread address. // We don't need to hold _mutex for this transition. --- 967,977 ---- // any one time. (more precisely, we want to minimize timer-seconds, which is // the integral of the # of active timers at any instant over time). // Both impinge on OS scalability. Given that, at most one thread parked on // a monitor will use a timer. ! void ATTR ObjectMonitor::exit(intptr_t *exit_stack_id_for_wait, bool not_suspended, TRAPS) { Thread * Self = THREAD ; if (THREAD != _owner) { if (THREAD->is_lock_owned((address) _owner)) { // Transmute _owner from a BasicLock pointer to a Thread address. // We don't need to hold _mutex for this transition.
*** 996,1010 **** if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) { _previous_owner_tid = SharedRuntime::get_java_tid(Self); } #endif for (;;) { assert (THREAD == _owner, "invariant") ; - if (Knob_ExitPolicy == 0) { // release semantics: prior loads and stores from within the critical section // must not float (reorder) past the following store that drops the lock. // On SPARC that requires MEMBAR #loadstore|#storestore. // But of course in TSO #loadstore|#storestore is not required. // I'd like to write one of the following: --- 1012,1053 ---- if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) { _previous_owner_tid = SharedRuntime::get_java_tid(Self); } #endif + TraceEventMonitorContendedExited event(this); + if (exit_stack_id_for_wait != NULL) { + // This is a temporary exit for Object.wait(). + // We don't want to use the current stack trace as the lock site, so if we + // end up writing the event, we allocate a stack id that we resolve later + // when the monitor is really exited. When there are multiple waits, we + // reuse the first preallocated stack id. + event.set_use_or_preallocate_stack_id_at((TraceTypes::stack_id *) exit_stack_id_for_wait); + event.set_resolve_stack(false); + } else { + // true exit + event.set_resolve_stack(true); + if (_trace_exit_stack != 0) { + event.set_use_stack_id(_trace_exit_stack); + event.enable(); // always write the exit event to resolve the stack + } + } + if ((intptr_t(_EntryList) | intptr_t(_cxq)) != 0) { + // there are queued threads -- we are definitely writing a trace event + event.enable(); + } + + _trace_exit_stack = 0; + for (;;) { assert (THREAD == _owner, "invariant") ; + // + // NOTE: we have removed all code paths for ExitPolicy != 0 and QMode != 0 + // knob values for simplicity of event tracing. + // // release semantics: prior loads and stores from within the critical section // must not float (reorder) past the following store that drops the lock. // On SPARC that requires MEMBAR #loadstore|#storestore. // But of course in TSO #loadstore|#storestore is not required. // I'd like to write one of the following:
*** 1015,1025 **** // in massive wasteful coherency traffic on classic SMP systems. // Instead, I use release_store(), which is implemented as just a simple // ST on x64, x86 and SPARC. OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock OrderAccess::storeload() ; // See if we need to wake a successor ! if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { TEVENT (Inflated exit - simple egress) ; return ; } TEVENT (Inflated exit - complex egress) ; --- 1058,1074 ---- // in massive wasteful coherency traffic on classic SMP systems. // Instead, I use release_store(), which is implemented as just a simple // ST on x64, x86 and SPARC. OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock OrderAccess::storeload() ; // See if we need to wake a successor ! bool queues_empty = ((intptr_t(_EntryList) | intptr_t(_cxq)) == 0); ! bool have_succ = (_succ != NULL); ! if (!queues_empty) { ! // some thread might have entered itself on _cxq in the meantime ! event.enable(); ! } ! if (queues_empty || have_succ) { TEVENT (Inflated exit - simple egress) ; return ; } TEVENT (Inflated exit - complex egress) ;
*** 1062,1190 **** // if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { return ; } TEVENT (Exit - Reacquired) ; - } else { - if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { - OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock - OrderAccess::storeload() ; - // Ratify the previously observed values. - if (_cxq == NULL || _succ != NULL) { - TEVENT (Inflated exit - simple egress) ; - return ; - } - - // inopportune interleaving -- the exiting thread (this thread) - // in the fast-exit path raced an entering thread in the slow-enter - // path. - // We have two choices: - // A. Try to reacquire the lock. - // If the CAS() fails return immediately, otherwise - // we either restart/rerun the exit operation, or simply - // fall-through into the code below which wakes a successor. - // B. If the elements forming the EntryList|cxq are TSM - // we could simply unpark() the lead thread and return - // without having set _succ. - if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { - TEVENT (Inflated exit - reacquired succeeded) ; - return ; - } - TEVENT (Inflated exit - reacquired failed) ; - } else { - TEVENT (Inflated exit - complex egress) ; - } - } guarantee (_owner == THREAD, "invariant") ; ObjectWaiter * w = NULL ; - int QMode = Knob_QMode ; - - if (QMode == 2 && _cxq != NULL) { - // QMode == 2 : cxq has precedence over EntryList. - // Try to directly wake a successor from the cxq. - // If successful, the successor will need to unlink itself from cxq. - w = _cxq ; - assert (w != NULL, "invariant") ; - assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ; - ExitEpilog (Self, w) ; - return ; - } - - if (QMode == 3 && _cxq != NULL) { - // Aggressively drain cxq into EntryList at the first opportunity. - // This policy ensure that recently-run threads live at the head of EntryList. - // Drain _cxq into EntryList - bulk transfer. - // First, detach _cxq. - // The following loop is tantamount to: w = swap (&cxq, NULL) - w = _cxq ; - for (;;) { - assert (w != NULL, "Invariant") ; - ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; - if (u == w) break ; - w = u ; - } - assert (w != NULL , "invariant") ; - - ObjectWaiter * q = NULL ; - ObjectWaiter * p ; - for (p = w ; p != NULL ; p = p->_next) { - guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; - p->TState = ObjectWaiter::TS_ENTER ; - p->_prev = q ; - q = p ; - } - - // Append the RATs to the EntryList - // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time. - ObjectWaiter * Tail ; - for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ; - if (Tail == NULL) { - _EntryList = w ; - } else { - Tail->_next = w ; - w->_prev = Tail ; - } - - // Fall thru into code that tries to wake a successor from EntryList - } - - if (QMode == 4 && _cxq != NULL) { - // Aggressively drain cxq into EntryList at the first opportunity. - // This policy ensure that recently-run threads live at the head of EntryList. - - // Drain _cxq into EntryList - bulk transfer. - // First, detach _cxq. - // The following loop is tantamount to: w = swap (&cxq, NULL) - w = _cxq ; - for (;;) { - assert (w != NULL, "Invariant") ; - ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; - if (u == w) break ; - w = u ; - } - assert (w != NULL , "invariant") ; - - ObjectWaiter * q = NULL ; - ObjectWaiter * p ; - for (p = w ; p != NULL ; p = p->_next) { - guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; - p->TState = ObjectWaiter::TS_ENTER ; - p->_prev = q ; - q = p ; - } - - // Prepend the RATs to the EntryList - if (_EntryList != NULL) { - q->_next = _EntryList ; - _EntryList->_prev = q ; - } - _EntryList = w ; - - // Fall thru into code that tries to wake a successor from EntryList - } w = _EntryList ; if (w != NULL) { // I'd like to write: guarantee (w->_thread != Self). // But in practice an exiting thread may find itself on the EntryList. --- 1111,1124 ----
*** 1228,1266 **** // and effectively lengthening the critical section. // Invariant: s chases t chases u. // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so // we have faster access to the tail. - if (QMode == 1) { - // QMode == 1 : drain cxq to EntryList, reversing order - // We also reverse the order of the list. - ObjectWaiter * s = NULL ; - ObjectWaiter * t = w ; - ObjectWaiter * u = NULL ; - while (t != NULL) { - guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ; - t->TState = ObjectWaiter::TS_ENTER ; - u = t->_next ; - t->_prev = u ; - t->_next = s ; - s = t; - t = u ; - } - _EntryList = s ; - assert (s != NULL, "invariant") ; - } else { - // QMode == 0 or QMode == 2 _EntryList = w ; ObjectWaiter * q = NULL ; ObjectWaiter * p ; for (p = w ; p != NULL ; p = p->_next) { guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; p->TState = ObjectWaiter::TS_ENTER ; p->_prev = q ; q = p ; } - } // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL // The MEMBAR is satisfied by the release_store() operation in ExitEpilog(). // See if we can abdicate to a spinner instead of waking a thread. --- 1162,1180 ----
*** 1366,1376 **** // complete_exit/reenter operate as a wait without waiting // complete_exit requires an inflated monitor // The _owner field is not always the Thread addr even with an // inflated monitor, e.g. the monitor can be inflated by a non-owning // thread due to contention. ! intptr_t ObjectMonitor::complete_exit(TRAPS) { Thread * const Self = THREAD; assert(Self->is_Java_thread(), "Must be Java thread!"); JavaThread *jt = (JavaThread *)THREAD; DeferredInitialize(); --- 1280,1290 ---- // complete_exit/reenter operate as a wait without waiting // complete_exit requires an inflated monitor // The _owner field is not always the Thread addr even with an // inflated monitor, e.g. the monitor can be inflated by a non-owning // thread due to contention. ! void ObjectMonitor::complete_exit(intptr_t *saved_recursions, intptr_t *saved_trace_exit_stack, TRAPS) { Thread * const Self = THREAD; assert(Self->is_Java_thread(), "Must be Java thread!"); JavaThread *jt = (JavaThread *)THREAD; DeferredInitialize();
*** 1383,1411 **** OwnerIsThread = 1 ; } } guarantee(Self == _owner, "complete_exit not owner"); ! intptr_t save = _recursions; // record the old recursion count ! _recursions = 0; // set the recursion level to be 0 ! exit (true, Self) ; // exit the monitor guarantee (_owner != Self, "invariant"); - return save; } // reenter() enters a lock and sets recursion count // complete_exit/reenter operate as a wait without waiting ! void ObjectMonitor::reenter(intptr_t recursions, TRAPS) { Thread * const Self = THREAD; assert(Self->is_Java_thread(), "Must be Java thread!"); JavaThread *jt = (JavaThread *)THREAD; guarantee(_owner != Self, "reenter already owner"); enter (THREAD); // enter the monitor guarantee (_recursions == 0, "reenter recursion"); ! _recursions = recursions; ! return; } // ----------------------------------------------------------------------------- // A macro is used below because there may already be a pending --- 1297,1326 ---- OwnerIsThread = 1 ; } } guarantee(Self == _owner, "complete_exit not owner"); ! // record old recursion level and exit stack ! if (saved_recursions != NULL) *saved_recursions = _recursions; ! if (saved_recursions != NULL) *saved_trace_exit_stack = _trace_exit_stack; ! _recursions = 0; ! exit(saved_trace_exit_stack, true, Self); guarantee (_owner != Self, "invariant"); } // reenter() enters a lock and sets recursion count // complete_exit/reenter operate as a wait without waiting ! void ObjectMonitor::reenter(intptr_t saved_recursions, intptr_t saved_trace_exit_stack, TRAPS) { Thread * const Self = THREAD; assert(Self->is_Java_thread(), "Must be Java thread!"); JavaThread *jt = (JavaThread *)THREAD; guarantee(_owner != Self, "reenter already owner"); enter (THREAD); // enter the monitor guarantee (_recursions == 0, "reenter recursion"); ! _recursions = saved_recursions; ! _trace_exit_stack = saved_trace_exit_stack; } // ----------------------------------------------------------------------------- // A macro is used below because there may already be a pending
*** 1522,1535 **** Thread::SpinRelease (&_WaitSetLock) ; if ((SyncFlags & 4) == 0) { _Responsible = NULL ; } ! intptr_t save = _recursions; // record the old recursion count _waiters++; // increment the number of waiters _recursions = 0; // set the recursion level to be 1 ! exit (true, Self) ; // exit the monitor guarantee (_owner != Self, "invariant") ; // The thread is on the WaitSet list - now park() it. // On MP systems it's conceivable that a brief spin before we park // could be profitable. --- 1437,1451 ---- Thread::SpinRelease (&_WaitSetLock) ; if ((SyncFlags & 4) == 0) { _Responsible = NULL ; } ! intptr_t saved_recursions = _recursions; // record the old recursion count ! intptr_t saved_trace_exit_stack = _trace_exit_stack; _waiters++; // increment the number of waiters _recursions = 0; // set the recursion level to be 1 ! exit(&saved_trace_exit_stack, true, Self); // exit, knows how to handle exit stack guarantee (_owner != Self, "invariant") ; // The thread is on the WaitSet list - now park() it. // On MP systems it's conceivable that a brief spin before we park // could be profitable.
*** 1642,1652 **** Self->_Stalled = 0 ; assert (_owner != Self, "invariant") ; ObjectWaiter::TStates v = node.TState ; if (v == ObjectWaiter::TS_RUN) { ! enter (Self) ; } else { guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ; ReenterI (Self, &node) ; node.wait_reenter_end(this); } --- 1558,1574 ---- Self->_Stalled = 0 ; assert (_owner != Self, "invariant") ; ObjectWaiter::TStates v = node.TState ; if (v == ObjectWaiter::TS_RUN) { ! int after_wait = TraceTypes::enter_after_wait_other; ! if (node._notified) { ! after_wait = TraceTypes::enter_after_wait_notify; ! } else if (ret == OS_TIMEOUT) { ! after_wait = TraceTypes::enter_after_wait_timeout; ! } ! enter (after_wait, Self) ; } else { guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ; ReenterI (Self, &node) ; node.wait_reenter_end(this); }
*** 1661,1671 **** } // OSThreadWaitState() jt->set_current_waiting_monitor(NULL); guarantee (_recursions == 0, "invariant") ; ! _recursions = save; // restore the old recursion count _waiters--; // decrement the number of waiters // Verify a few postconditions assert (_owner == Self , "invariant") ; assert (_succ != Self , "invariant") ; --- 1583,1595 ---- } // OSThreadWaitState() jt->set_current_waiting_monitor(NULL); guarantee (_recursions == 0, "invariant") ; ! // restore the saved recursion count and exit stack ! _recursions = saved_recursions; ! _trace_exit_stack = saved_trace_exit_stack; _waiters--; // decrement the number of waiters // Verify a few postconditions assert (_owner == Self , "invariant") ; assert (_succ != Self , "invariant") ;
*** 2527,2536 **** --- 2451,2463 ---- SETKNOB(ResetEvent) ; SETKNOB(MoveNotifyee) ; SETKNOB(FastHSSEC) ; #undef SETKNOB + guarantee(Knob_ExitPolicy == 0, "Sorry, event tracing does not support non-default ExitPolicy"); + guarantee(Knob_QMode == 0, "Sorry, event tracing does not support non-default QMode"); + if (os::is_MP()) { BackOffMask = (1 << Knob_SpinBackOff) - 1 ; if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ; // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1) } else {
< prev index next >