< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page
rev 54415 : 8222295: more baseline cleanups from Async Monitor Deflation project
rev 54416 : Checkpoint latest preliminary review patches for full OpenJDK review; merge with 8222295.patch.
rev 54417 : imported patch dcubed.monitor_deflate_conc.v2.01

*** 236,271 **** } // ----------------------------------------------------------------------------- // Enter support ! void ObjectMonitor::enter(TRAPS) { // The following code is ordered to check the most common cases first // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. Thread * const Self = THREAD; void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL); if (cur == NULL) { // Either ASSERT _recursions == 0 or explicitly set _recursions = 0. assert(_recursions == 0, "invariant"); assert(_owner == Self, "invariant"); ! return; } if (cur == Self) { // TODO-FIXME: check for integer overflow! BUGID 6557169. _recursions++; ! return; } if (Self->is_lock_owned ((address)cur)) { assert(_recursions == 0, "internal state error"); _recursions = 1; // Commute owner from a thread-specific on-stack BasicLockObject address to // a full-fledged "Thread *". _owner = Self; ! return; } // We've encountered genuine contention. assert(Self->_Stalled == 0, "invariant"); Self->_Stalled = intptr_t(this); --- 236,271 ---- } // ----------------------------------------------------------------------------- // Enter support ! bool ObjectMonitor::enter(TRAPS) { // The following code is ordered to check the most common cases first // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. Thread * const Self = THREAD; void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL); if (cur == NULL) { // Either ASSERT _recursions == 0 or explicitly set _recursions = 0. assert(_recursions == 0, "invariant"); assert(_owner == Self, "invariant"); ! return true; } if (cur == Self) { // TODO-FIXME: check for integer overflow! BUGID 6557169. _recursions++; ! return true; } if (Self->is_lock_owned ((address)cur)) { assert(_recursions == 0, "internal state error"); _recursions = 1; // Commute owner from a thread-specific on-stack BasicLockObject address to // a full-fledged "Thread *". _owner = Self; ! return true; } // We've encountered genuine contention. assert(Self->_Stalled == 0, "invariant"); Self->_Stalled = intptr_t(this);
*** 282,306 **** assert(((oop)object())->mark() == markOopDesc::encode(this), "object mark must match encoded this: mark=" INTPTR_FORMAT ", encoded this=" INTPTR_FORMAT, p2i(((oop)object())->mark()), p2i(markOopDesc::encode(this))); Self->_Stalled = 0; ! return; } assert(_owner != Self, "invariant"); assert(_succ != Self, "invariant"); assert(Self->is_Java_thread(), "invariant"); JavaThread * jt = (JavaThread *) Self; assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); assert(jt->thread_state() != _thread_blocked, "invariant"); ! assert(this->object() != NULL, "invariant"); ! assert(_contentions >= 0, "invariant"); ! // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy(). // Ensure the object-monitor relationship remains stable while there's contention. ! Atomic::inc(&_contentions); JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);) EventJavaMonitorEnter event; if (event.should_commit()) { event.set_monitorClass(((oop)this->object())->klass()); --- 282,316 ---- assert(((oop)object())->mark() == markOopDesc::encode(this), "object mark must match encoded this: mark=" INTPTR_FORMAT ", encoded this=" INTPTR_FORMAT, p2i(((oop)object())->mark()), p2i(markOopDesc::encode(this))); Self->_Stalled = 0; ! return true; } assert(_owner != Self, "invariant"); assert(_succ != Self, "invariant"); assert(Self->is_Java_thread(), "invariant"); JavaThread * jt = (JavaThread *) Self; assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); assert(jt->thread_state() != _thread_blocked, "invariant"); ! assert(AsyncDeflateIdleMonitors || this->object() != NULL, "invariant"); ! assert(AsyncDeflateIdleMonitors || _contentions >= 0, "invariant"); ! // Prevent deflation. See ObjectSynchronizer::deflate_monitor() and is_busy(). // Ensure the object-monitor relationship remains stable while there's contention. ! const jint contentions = Atomic::add(1, &_contentions); ! if (contentions <= 0 && _owner == DEFLATER_MARKER) { ! // Async deflation is in progress. Attempt to restore the ! // header/dmw to the object's header so that we only retry once ! // if the deflater thread happens to be slow. ! const oop obj = (oop) object(); ! install_displaced_markword_in_object(obj); ! Self->_Stalled = 0; ! return false; // Caller should retry. Never mind about _contentions as this monitor has been deflated. ! } ! // The deflater thread will not deflate this monitor and the monitor is contended, continue. JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);) EventJavaMonitorEnter event; if (event.should_commit()) { event.set_monitorClass(((oop)this->object())->klass());
*** 358,368 **** // states will still report that the thread is blocked trying to // acquire it. } Atomic::dec(&_contentions); ! assert(_contentions >= 0, "invariant"); Self->_Stalled = 0; // Must either set _recursions = 0 or ASSERT _recursions == 0. assert(_recursions == 0, "invariant"); assert(_owner == Self, "invariant"); --- 368,378 ---- // states will still report that the thread is blocked trying to // acquire it. } Atomic::dec(&_contentions); ! assert(AsyncDeflateIdleMonitors || _contentions >= 0, "invariant"); Self->_Stalled = 0; // Must either set _recursions = 0 or ASSERT _recursions == 0. assert(_recursions == 0, "invariant"); assert(_owner == Self, "invariant");
*** 394,403 **** --- 404,414 ---- if (event.should_commit()) { event.set_previousOwner((uintptr_t)_previous_owner_tid); event.commit(); } OM_PERFDATA_OP(ContendedLockAttempts, inc()); + return true; } // Caveat: TryLock() is not necessarily serializing if it returns failure. // Callers must compensate as needed.
*** 415,424 **** --- 426,514 ---- // We can either return -1 or retry. // Retry doesn't make as much sense because the lock was just acquired. return -1; } + // Install the displaced mark word (dmw) of a deflating ObjectMonitor + // into the header of the object associated with the monitor. This + // idempotent method is called by a thread that is deflating a + // monitor and by other threads that have detected a race with the + // deflation process. + void ObjectMonitor::install_displaced_markword_in_object(const oop obj) { + // This function must only be called when (owner == DEFLATER_MARKER + // && contentions <= 0), but we can't guarantee that here because + // those values could change when the ObjectMonitor gets moved from + // the global free list to a per-thread free list. + + guarantee(obj != NULL, "must be non-NULL"); + if (object() != obj) { + // ObjectMonitor's object ref no longer refers to the target object + // so the object's header has already been restored. + return; + } + + markOop dmw = header(); + if (dmw == NULL) { + // ObjectMonitor's header/dmw has been cleared by the deflating + // thread so the object's header has already been restored. + return; + } + + // A non-NULL dmw has to be either neutral (not locked and not marked) + // or is already participating in this restoration protocol. + assert(dmw->is_neutral() || (dmw->is_marked() && dmw->hash() == 0), + "failed precondition: dmw=" INTPTR_FORMAT, p2i(dmw)); + + markOop marked_dmw = NULL; + if (!dmw->is_marked() && dmw->hash() == 0) { + // This dmw has not yet started the restoration protocol so we + // mark a copy of the dmw to begin the protocol. + // Note: A dmw with a hashcode does not take this code path. + marked_dmw = dmw->set_marked(); + + // All of the callers to this function can be racing with each + // other trying to update the _header field. + dmw = (markOop) Atomic::cmpxchg(marked_dmw, &_header, dmw); + if (dmw == NULL) { + // ObjectMonitor's header/dmw has been cleared by the deflating + // thread so the object's header has already been restored. + return; + } + // The _header field is now marked. The winner's 'dmw' variable + // contains the original, unmarked header/dmw value and any + // losers have a marked header/dmw value that will be cleaned + // up below. + } + + if (dmw->is_marked()) { + // Clear the mark from the header/dmw copy in preparation for + // possible restoration from this thread. + assert(dmw->hash() == 0, "hashcode must be 0: dmw=" INTPTR_FORMAT, + p2i(dmw)); + dmw = dmw->set_unmarked(); + } + assert(dmw->is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, p2i(dmw)); + + // Install displaced mark word if the object's header still points + // to this ObjectMonitor. All racing callers to this function will + // reach this point, but only one can win. + obj->cas_set_mark(dmw, markOopDesc::encode(this)); + + // Note: It does not matter which thread restored the header/dmw + // into the object's header. The thread deflating the monitor just + // wanted the object's header restored and it is. The threads that + // detected a race with the deflation process also wanted the + // object's header restored before they retry their operation and + // because it is restored they will only retry once. + + if (marked_dmw != NULL) { + // Clear _header to NULL if it is still marked_dmw so a racing + // install_displaced_markword_in_object() can bail out sooner. + Atomic::cmpxchg((markOop)NULL, &_header, marked_dmw); + } + } + #define MAX_RECHECK_INTERVAL 1000 void ObjectMonitor::EnterI(TRAPS) { Thread * const Self = THREAD; assert(Self->is_Java_thread(), "invariant");
*** 430,439 **** --- 520,544 ---- assert(_owner == Self, "invariant"); assert(_Responsible != Self, "invariant"); return; } + if (_owner == DEFLATER_MARKER) { + // The deflation protocol finished the first part (setting _owner), but + // it failed the second part (making _contentions negative) and bailed. + // Because we're called from enter() we have at least one contention. + guarantee(_contentions > 0, "owner == DEFLATER_MARKER && contentions <= 0 " + "should have been handled by the caller: contentions=%d", + _contentions); + if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) { + // Acquired the monitor. + assert(_succ != Self, "invariant"); + assert(_Responsible != Self, "invariant"); + return; + } + } + assert(InitDone, "Unexpectedly not initialized"); // We try one round of spinning *before* enqueueing Self. // // If the _owner is ready but OFFPROC we could use a YieldTo()
*** 546,555 **** --- 651,673 ---- Self->_ParkEvent->park(); } if (TryLock(Self) > 0) break; + if (_owner == DEFLATER_MARKER) { + // The deflation protocol finished the first part (setting _owner), but + // it failed the second part (making _contentions negative) and bailed. + // Because we're called from enter() we have at least one contention. + guarantee(_contentions > 0, "owner == DEFLATER_MARKER && contentions <= 0 " + "should have been handled by the caller: contentions=%d", + _contentions); + if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) { + // Acquired the monitor. + break; + } + } + // The lock is still contested. // Keep a tally of the # of futile wakeups. // Note that the counter is not protected by a lock or updated by atomics. // That is by design - we trade "lossy" counters which are exposed to // races during updates for a lower probe effect.
*** 667,676 **** --- 785,807 ---- assert(_owner != Self, "invariant"); if (TryLock(Self) > 0) break; if (TrySpin(Self) > 0) break; + if (_owner == DEFLATER_MARKER) { + // The deflation protocol finished the first part (setting _owner), + // but it will observe _waiters != 0 and will bail out. Because we're + // called from wait() we may or may not have any contentions. + guarantee(_contentions >= 0, "owner == DEFLATER_MARKER && contentions < 0 " + "should have been handled by the caller: contentions=%d", + _contentions); + if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) { + // Acquired the monitor. + break; + } + } + // State transition wrappers around park() ... // ReenterI() wisely defers state transitions until // it's clear we must park the thread. { OSThreadContendState osts(Self->osthread());
*** 874,884 **** // see x86_32.ad Fast_Unlock() and the I1 and I2 properties. // Upon deeper reflection, however, in a properly run JVM the only // way we should encounter this situation is in the presence of // unbalanced JNI locking. TODO: CheckJNICalls. // See also: CR4414101 ! assert(false, "Non-balanced monitor enter/exit! Likely JNI locking"); return; } } if (_recursions != 0) { --- 1005,1016 ---- // see x86_32.ad Fast_Unlock() and the I1 and I2 properties. // Upon deeper reflection, however, in a properly run JVM the only // way we should encounter this situation is in the presence of // unbalanced JNI locking. TODO: CheckJNICalls. // See also: CR4414101 ! assert(false, "Non-balanced monitor enter/exit! Likely JNI locking: " ! "owner=" INTPTR_FORMAT, p2i(_owner)); return; } } if (_recursions != 0) {
*** 1124,1143 **** return save; } // reenter() enters a lock and sets recursion count // complete_exit/reenter operate as a wait without waiting ! void ObjectMonitor::reenter(intptr_t recursions, TRAPS) { Thread * const Self = THREAD; assert(Self->is_Java_thread(), "Must be Java thread!"); JavaThread *jt = (JavaThread *)THREAD; guarantee(_owner != Self, "reenter already owner"); ! enter(THREAD); // enter the monitor guarantee(_recursions == 0, "reenter recursion"); _recursions = recursions; ! return; } // ----------------------------------------------------------------------------- // A macro is used below because there may already be a pending --- 1256,1279 ---- return save; } // reenter() enters a lock and sets recursion count // complete_exit/reenter operate as a wait without waiting ! bool ObjectMonitor::reenter(intptr_t recursions, TRAPS) { Thread * const Self = THREAD; assert(Self->is_Java_thread(), "Must be Java thread!"); JavaThread *jt = (JavaThread *)THREAD; guarantee(_owner != Self, "reenter already owner"); ! if (!enter(THREAD)) { ! // Failed to enter the monitor so return for a retry. ! return false; ! } ! // Entered the monitor. guarantee(_recursions == 0, "reenter recursion"); _recursions = recursions; ! return true; } // ----------------------------------------------------------------------------- // A macro is used below because there may already be a pending
*** 1361,1371 **** Self->_Stalled = 0; assert(_owner != Self, "invariant"); ObjectWaiter::TStates v = node.TState; if (v == ObjectWaiter::TS_RUN) { ! enter(Self); } else { guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant"); ReenterI(Self, &node); node.wait_reenter_end(this); } --- 1497,1508 ---- Self->_Stalled = 0; assert(_owner != Self, "invariant"); ObjectWaiter::TStates v = node.TState; if (v == ObjectWaiter::TS_RUN) { ! const bool success = enter(Self); ! ADIM_guarantee(success, "enter signaled for a retry, but monitor should not have been deflated as waiters > 0"); } else { guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant"); ReenterI(Self, &node); node.wait_reenter_end(this); }
*** 1924,1928 **** --- 2061,2138 ---- #undef NEWPERFVARIABLE } DEBUG_ONLY(InitDone = true;) } + + // For internal used by ObjectSynchronizer::monitors_iterate(). + ObjectMonitorHandle::ObjectMonitorHandle(ObjectMonitor * om_ptr) { + om_ptr->inc_ref_count(); + _om_ptr = om_ptr; + } + + ObjectMonitorHandle::~ObjectMonitorHandle() { + if (_om_ptr != NULL) { + _om_ptr->dec_ref_count(); + _om_ptr = NULL; + } + } + + // Save the ObjectMonitor* associated with the specified markOop and + // increment the ref_count. This function should only be called if + // the caller has verified mark->has_monitor() == true. The object + // parameter is needed to verify that ObjectMonitor* has not been + // deflated and reused for another object. + // + // This function returns true if the ObjectMonitor* has been safely + // saved. This function returns false if we have lost a race with + // async deflation; the caller should retry as appropriate. + // + bool ObjectMonitorHandle::save_om_ptr(oop object, markOop mark) { + guarantee(mark->has_monitor(), "sanity check: mark=" INTPTR_FORMAT, + p2i(mark)); + + ObjectMonitor * om_ptr = mark->monitor(); + om_ptr->inc_ref_count(); + + if (AsyncDeflateIdleMonitors) { + // Race here if monitor is not owned! The above ref_count bump + // will cause subsequent async deflation to skip it. However, + // previous or concurrent async deflation is a race. + if (om_ptr->_owner == DEFLATER_MARKER && om_ptr->_contentions <= 0) { + // Async deflation is in progress. Attempt to restore the + // header/dmw to the object's header so that we only retry once + // if the deflater thread happens to be slow. + om_ptr->install_displaced_markword_in_object(object); + om_ptr->dec_ref_count(); + return false; + } + // The ObjectMonitor could have been deflated and reused for + // another object before we bumped the ref_count so make sure + // our object still refers to this ObjectMonitor. + const markOop tmp = object->mark(); + if (!tmp->has_monitor() || tmp->monitor() != om_ptr) { + // Async deflation and reuse won the race so we have to retry. + // Skip object header restoration since that's already done. + om_ptr->dec_ref_count(); + return false; + } + } + + guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT, + p2i(_om_ptr)); + _om_ptr = om_ptr; + return true; + } + + // For internal use by ObjectSynchronizer::inflate(). + void ObjectMonitorHandle::set_om_ptr(ObjectMonitor * om_ptr) { + if (_om_ptr == NULL) { + guarantee(om_ptr != NULL, "cannot clear an unset om_ptr"); + om_ptr->inc_ref_count(); + _om_ptr = om_ptr; + } else { + guarantee(om_ptr == NULL, "can only clear a set om_ptr"); + _om_ptr->dec_ref_count(); + _om_ptr = NULL; + } + }
< prev index next >