--- old/src/hotspot/share/runtime/objectMonitor.cpp 2019-04-18 22:08:10.880306187 -0400 +++ new/src/hotspot/share/runtime/objectMonitor.cpp 2019-04-18 22:08:10.152306199 -0400 @@ -238,7 +238,7 @@ // ----------------------------------------------------------------------------- // Enter support -void ObjectMonitor::enter(TRAPS) { +bool ObjectMonitor::enter(TRAPS) { // The following code is ordered to check the most common cases first // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. Thread * const Self = THREAD; @@ -248,13 +248,13 @@ // Either ASSERT _recursions == 0 or explicitly set _recursions = 0. assert(_recursions == 0, "invariant"); assert(_owner == Self, "invariant"); - return; + return true; } if (cur == Self) { // TODO-FIXME: check for integer overflow! BUGID 6557169. _recursions++; - return; + return true; } if (Self->is_lock_owned ((address)cur)) { @@ -263,7 +263,7 @@ // Commute owner from a thread-specific on-stack BasicLockObject address to // a full-fledged "Thread *". _owner = Self; - return; + return true; } // We've encountered genuine contention. @@ -284,7 +284,7 @@ ", encoded this=" INTPTR_FORMAT, p2i(((oop)object())->mark()), p2i(markOopDesc::encode(this))); Self->_Stalled = 0; - return; + return true; } assert(_owner != Self, "invariant"); @@ -293,12 +293,22 @@ JavaThread * jt = (JavaThread *) Self; assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); assert(jt->thread_state() != _thread_blocked, "invariant"); - assert(this->object() != NULL, "invariant"); - assert(_contentions >= 0, "invariant"); + assert(AsyncDeflateIdleMonitors || this->object() != NULL, "invariant"); + assert(AsyncDeflateIdleMonitors || _contentions >= 0, "invariant"); - // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy(). + // Prevent deflation. See ObjectSynchronizer::deflate_monitor() and is_busy(). // Ensure the object-monitor relationship remains stable while there's contention. - Atomic::inc(&_contentions); + const jint contentions = Atomic::add(1, &_contentions); + if (contentions <= 0 && _owner == DEFLATER_MARKER) { + // Async deflation is in progress. Attempt to restore the + // header/dmw to the object's header so that we only retry once + // if the deflater thread happens to be slow. + const oop obj = (oop) object(); + install_displaced_markword_in_object(obj); + Self->_Stalled = 0; + return false; // Caller should retry. Never mind about _contentions as this monitor has been deflated. + } + // The deflater thread will not deflate this monitor and the monitor is contended, continue. JFR_ONLY(JfrConditionalFlushWithStacktrace flush(jt);) EventJavaMonitorEnter event; @@ -360,7 +370,7 @@ } Atomic::dec(&_contentions); - assert(_contentions >= 0, "invariant"); + assert(AsyncDeflateIdleMonitors || _contentions >= 0, "invariant"); Self->_Stalled = 0; // Must either set _recursions = 0 or ASSERT _recursions == 0. @@ -396,6 +406,7 @@ event.commit(); } OM_PERFDATA_OP(ContendedLockAttempts, inc()); + return true; } // Caveat: TryLock() is not necessarily serializing if it returns failure. @@ -417,6 +428,85 @@ return -1; } +// Install the displaced mark word (dmw) of a deflating ObjectMonitor +// into the header of the object associated with the monitor. This +// idempotent method is called by a thread that is deflating a +// monitor and by other threads that have detected a race with the +// deflation process. +void ObjectMonitor::install_displaced_markword_in_object(const oop obj) { + // This function must only be called when (owner == DEFLATER_MARKER + // && contentions <= 0), but we can't guarantee that here because + // those values could change when the ObjectMonitor gets moved from + // the global free list to a per-thread free list. + + guarantee(obj != NULL, "must be non-NULL"); + if (object() != obj) { + // ObjectMonitor's object ref no longer refers to the target object + // so the object's header has already been restored. + return; + } + + markOop dmw = header(); + if (dmw == NULL) { + // ObjectMonitor's header/dmw has been cleared by the deflating + // thread so the object's header has already been restored. + return; + } + + // A non-NULL dmw has to be either neutral (not locked and not marked) + // or is already participating in this restoration protocol. + assert(dmw->is_neutral() || (dmw->is_marked() && dmw->hash() == 0), + "failed precondition: dmw=" INTPTR_FORMAT, p2i(dmw)); + + markOop marked_dmw = NULL; + if (!dmw->is_marked() && dmw->hash() == 0) { + // This dmw has not yet started the restoration protocol so we + // mark a copy of the dmw to begin the protocol. + // Note: A dmw with a hashcode does not take this code path. + marked_dmw = dmw->set_marked(); + + // All of the callers to this function can be racing with each + // other trying to update the _header field. + dmw = (markOop) Atomic::cmpxchg(marked_dmw, &_header, dmw); + if (dmw == NULL) { + // ObjectMonitor's header/dmw has been cleared by the deflating + // thread so the object's header has already been restored. + return; + } + // The _header field is now marked. The winner's 'dmw' variable + // contains the original, unmarked header/dmw value and any + // losers have a marked header/dmw value that will be cleaned + // up below. + } + + if (dmw->is_marked()) { + // Clear the mark from the header/dmw copy in preparation for + // possible restoration from this thread. + assert(dmw->hash() == 0, "hashcode must be 0: dmw=" INTPTR_FORMAT, + p2i(dmw)); + dmw = dmw->set_unmarked(); + } + assert(dmw->is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, p2i(dmw)); + + // Install displaced mark word if the object's header still points + // to this ObjectMonitor. All racing callers to this function will + // reach this point, but only one can win. + obj->cas_set_mark(dmw, markOopDesc::encode(this)); + + // Note: It does not matter which thread restored the header/dmw + // into the object's header. The thread deflating the monitor just + // wanted the object's header restored and it is. The threads that + // detected a race with the deflation process also wanted the + // object's header restored before they retry their operation and + // because it is restored they will only retry once. + + if (marked_dmw != NULL) { + // Clear _header to NULL if it is still marked_dmw so a racing + // install_displaced_markword_in_object() can bail out sooner. + Atomic::cmpxchg((markOop)NULL, &_header, marked_dmw); + } +} + #define MAX_RECHECK_INTERVAL 1000 void ObjectMonitor::EnterI(TRAPS) { @@ -432,6 +522,21 @@ return; } + if (_owner == DEFLATER_MARKER) { + // The deflation protocol finished the first part (setting _owner), but + // it failed the second part (making _contentions negative) and bailed. + // Because we're called from enter() we have at least one contention. + guarantee(_contentions > 0, "owner == DEFLATER_MARKER && contentions <= 0 " + "should have been handled by the caller: contentions=%d", + _contentions); + if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) { + // Acquired the monitor. + assert(_succ != Self, "invariant"); + assert(_Responsible != Self, "invariant"); + return; + } + } + assert(InitDone, "Unexpectedly not initialized"); // We try one round of spinning *before* enqueueing Self. @@ -548,6 +653,19 @@ if (TryLock(Self) > 0) break; + if (_owner == DEFLATER_MARKER) { + // The deflation protocol finished the first part (setting _owner), but + // it failed the second part (making _contentions negative) and bailed. + // Because we're called from enter() we have at least one contention. + guarantee(_contentions > 0, "owner == DEFLATER_MARKER && contentions <= 0 " + "should have been handled by the caller: contentions=%d", + _contentions); + if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) { + // Acquired the monitor. + break; + } + } + // The lock is still contested. // Keep a tally of the # of futile wakeups. // Note that the counter is not protected by a lock or updated by atomics. @@ -669,6 +787,19 @@ if (TryLock(Self) > 0) break; if (TrySpin(Self) > 0) break; + if (_owner == DEFLATER_MARKER) { + // The deflation protocol finished the first part (setting _owner), + // but it will observe _waiters != 0 and will bail out. Because we're + // called from wait() we may or may not have any contentions. + guarantee(_contentions >= 0, "owner == DEFLATER_MARKER && contentions < 0 " + "should have been handled by the caller: contentions=%d", + _contentions); + if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) { + // Acquired the monitor. + break; + } + } + // State transition wrappers around park() ... // ReenterI() wisely defers state transitions until // it's clear we must park the thread. @@ -876,7 +1007,8 @@ // way we should encounter this situation is in the presence of // unbalanced JNI locking. TODO: CheckJNICalls. // See also: CR4414101 - assert(false, "Non-balanced monitor enter/exit! Likely JNI locking"); + assert(false, "Non-balanced monitor enter/exit! Likely JNI locking: " + "owner=" INTPTR_FORMAT, p2i(_owner)); return; } } @@ -1126,16 +1258,20 @@ // reenter() enters a lock and sets recursion count // complete_exit/reenter operate as a wait without waiting -void ObjectMonitor::reenter(intptr_t recursions, TRAPS) { +bool ObjectMonitor::reenter(intptr_t recursions, TRAPS) { Thread * const Self = THREAD; assert(Self->is_Java_thread(), "Must be Java thread!"); JavaThread *jt = (JavaThread *)THREAD; guarantee(_owner != Self, "reenter already owner"); - enter(THREAD); // enter the monitor + if (!enter(THREAD)) { + // Failed to enter the monitor so return for a retry. + return false; + } + // Entered the monitor. guarantee(_recursions == 0, "reenter recursion"); _recursions = recursions; - return; + return true; } @@ -1363,7 +1499,8 @@ assert(_owner != Self, "invariant"); ObjectWaiter::TStates v = node.TState; if (v == ObjectWaiter::TS_RUN) { - enter(Self); + const bool success = enter(Self); + ADIM_guarantee(success, "enter signaled for a retry, but monitor should not have been deflated as waiters > 0"); } else { guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant"); ReenterI(Self, &node); @@ -1926,3 +2063,76 @@ DEBUG_ONLY(InitDone = true;) } + +// For internal used by ObjectSynchronizer::monitors_iterate(). +ObjectMonitorHandle::ObjectMonitorHandle(ObjectMonitor * om_ptr) { + om_ptr->inc_ref_count(); + _om_ptr = om_ptr; +} + +ObjectMonitorHandle::~ObjectMonitorHandle() { + if (_om_ptr != NULL) { + _om_ptr->dec_ref_count(); + _om_ptr = NULL; + } +} + +// Save the ObjectMonitor* associated with the specified markOop and +// increment the ref_count. This function should only be called if +// the caller has verified mark->has_monitor() == true. The object +// parameter is needed to verify that ObjectMonitor* has not been +// deflated and reused for another object. +// +// This function returns true if the ObjectMonitor* has been safely +// saved. This function returns false if we have lost a race with +// async deflation; the caller should retry as appropriate. +// +bool ObjectMonitorHandle::save_om_ptr(oop object, markOop mark) { + guarantee(mark->has_monitor(), "sanity check: mark=" INTPTR_FORMAT, + p2i(mark)); + + ObjectMonitor * om_ptr = mark->monitor(); + om_ptr->inc_ref_count(); + + if (AsyncDeflateIdleMonitors) { + // Race here if monitor is not owned! The above ref_count bump + // will cause subsequent async deflation to skip it. However, + // previous or concurrent async deflation is a race. + if (om_ptr->_owner == DEFLATER_MARKER && om_ptr->_contentions <= 0) { + // Async deflation is in progress. Attempt to restore the + // header/dmw to the object's header so that we only retry once + // if the deflater thread happens to be slow. + om_ptr->install_displaced_markword_in_object(object); + om_ptr->dec_ref_count(); + return false; + } + // The ObjectMonitor could have been deflated and reused for + // another object before we bumped the ref_count so make sure + // our object still refers to this ObjectMonitor. + const markOop tmp = object->mark(); + if (!tmp->has_monitor() || tmp->monitor() != om_ptr) { + // Async deflation and reuse won the race so we have to retry. + // Skip object header restoration since that's already done. + om_ptr->dec_ref_count(); + return false; + } + } + + guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT, + p2i(_om_ptr)); + _om_ptr = om_ptr; + return true; +} + +// For internal use by ObjectSynchronizer::inflate(). +void ObjectMonitorHandle::set_om_ptr(ObjectMonitor * om_ptr) { + if (_om_ptr == NULL) { + guarantee(om_ptr != NULL, "cannot clear an unset om_ptr"); + om_ptr->inc_ref_count(); + _om_ptr = om_ptr; + } else { + guarantee(om_ptr == NULL, "can only clear a set om_ptr"); + _om_ptr->dec_ref_count(); + _om_ptr = NULL; + } +}