< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page
rev 56634 : imported patch 8230876.patch
rev 56635 : v2.00 -> v2.05 (CR5/v2.05/8-for-jdk13) patches combined into one; merge with 8229212.patch; merge with jdk-14+11; merge with 8230184.patch; merge with 8230876.patch; merge with jdk-14+15; merge with jdk-14+18.
rev 56637 : Add OM_CACHE_LINE_SIZE so that ObjectMonitor cache line sizes can be experimented with independently of DEFAULT_CACHE_LINE_SIZE; for SPARC and X64 configs that use 128 for DEFAULT_CACHE_LINE_SIZE, we are experimenting with 64; move _previous_owner_tid and _allocation_state fields to share the cache line with ObjectMonitor::_header; put ObjectMonitor::_ref_count on its own cache line after _owner; add 'int* count_p' parameter to deflate_monitor_list() and deflate_monitor_list_using_JT() and push counter updates down to where the ObjectMonitors are actually removed from the in-use lists; monitors_iterate() async deflation check should use negative ref_count; add 'JavaThread* target' param to deflate_per_thread_idle_monitors_using_JT() add deflate_common_idle_monitors_using_JT() to make it clear which JavaThread* is the target of the work and which is the calling JavaThread* (self); g_free_list, g_om_in_use_list and g_om_in_use_count are now static to synchronizer.cpp (reduce scope); add more diagnostic info to some assert()'s; minor code cleanups and code motion; save_om_ptr() should detect a race with a deflating thread that is bailing out and cause a retry when the ref_count field is not positive; merge with jdk-14+11; add special GC support for TestHumongousClassLoader.java; merge with 8230184.patch; merge with jdk-14+14; merge with jdk-14+18.
rev 56639 : loosen a couple more counter checks due to races observed in testing; simplify om_release() extraction of mid since list head or cur_mid_in_use is marked; simplify deflate_monitor_list() extraction of mid since there are no parallel deleters due to the safepoint; simplify deflate_monitor_list_using_JT() extraction of mid since list head or cur_mid_in_use is marked; prepend_block_to_lists() - simplify based on David H's comments; does not need load_acquire() or release_store() because of the cmpxchg(); prepend_to_common() - simplify to use mark_next_loop() for m and use mark_list_head() and release_store() for the non-empty list case; add more debugging for "Non-balanced monitor enter/exit" failure mode; fix race in inflate() in the "CASE: neutral" code path; install_displaced_markword_in_object() does not need to clear the header field since that is handled when the ObjectMonitor is moved from the global free list; LSuccess should clear boxReg to set ICC.ZF=1 to avoid depending on existing boxReg contents; update fast_unlock() to detect when object no longer refers to the same ObjectMonitor and take fast path exit instead; clarify fast_lock() code where we detect when object no longer refers to the same ObjectMonitor; add/update comments for movptr() calls where we move a literal into an Address; remove set_owner(); refactor setting of owner field into set_owner_from(2 versions), set_owner_from_BasicLock(), and try_set_owner_from(); the new functions include monitorinflation+owner logging; extract debug code from v2.06 and v2.07 and move to v2.07.debug; change 'jccb' -> 'jcc' and 'jmpb' -> 'jmp' as needed; checkpoint initial version of MacroAssembler::inc_om_ref_count(); update LP64 MacroAssembler::fast_lock() and fast_unlock() to use inc_om_ref_count(); fast_lock() return flag setting logic can use 'testptr(tmpReg, tmpReg)' instead of 'cmpptr(tmpReg, 0)' since that's more efficient; fast_unlock() LSuccess return flag setting logic can use 'testl (boxReg, 0)' instead of 'xorptr(boxReg, boxReg)' since that's more efficient; cleanup "fast-path" vs "fast path" and "slow-path" vs "slow path"; update MacroAssembler::rtm_inflated_locking() to use inc_om_ref_count(); update MacroAssembler::fast_lock() to preserve the flags before decrementing ref_count and restore the flags afterwards; this is more clean than depending on the contents of rax/tmpReg; coleenp CR - refactor async monitor deflation work from ServiceThread::service_thread_entry() to ObjectSynchronizer::deflate_idle_monitors_using_JT(); rehn,eosterlund CR - add support for HandshakeAfterDeflateIdleMonitors for platforms that don't have ObjectMonitor ref_count support implemented in C2 fast_lock() and fast_unlock().

*** 237,251 **** // ----------------------------------------------------------------------------- // Enter support void ObjectMonitor::enter(TRAPS) { // The following code is ordered to check the most common cases first // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. Thread * const Self = THREAD; ! void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL); if (cur == NULL) { assert(_recursions == 0, "invariant"); return; } --- 237,253 ---- // ----------------------------------------------------------------------------- // Enter support void ObjectMonitor::enter(TRAPS) { + ADIM_guarantee(ref_count() > 0, "must be positive: ref_count=%d", ref_count()); + // The following code is ordered to check the most common cases first // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. Thread * const Self = THREAD; ! void* cur = try_set_owner_from(Self, NULL); if (cur == NULL) { assert(_recursions == 0, "invariant"); return; }
*** 256,268 **** } if (Self->is_lock_owned ((address)cur)) { assert(_recursions == 0, "internal state error"); _recursions = 1; ! // Commute owner from a thread-specific on-stack BasicLockObject address to ! // a full-fledged "Thread *". ! _owner = Self; return; } // We've encountered genuine contention. assert(Self->_Stalled == 0, "invariant"); --- 258,278 ---- } if (Self->is_lock_owned ((address)cur)) { assert(_recursions == 0, "internal state error"); _recursions = 1; ! set_owner_from_BasicLock(Self, cur); // Convert from BasicLock* to Thread*. ! return; ! } ! ! if (AsyncDeflateIdleMonitors && ! try_set_owner_from(Self, DEFLATER_MARKER) == DEFLATER_MARKER) { ! // The deflation protocol finished the first part (setting owner), ! // but it failed the second part (making ref_count negative) and ! // bailed. Or the ObjectMonitor was async deflated and reused. ! // Acquired the monitor. ! assert(_recursions == 0, "invariant"); return; } // We've encountered genuine contention. assert(Self->_Stalled == 0, "invariant");
*** 273,284 **** // transitions. The following spin is strictly optional ... // Note that if we acquire the monitor from an initial spin // we forgo posting JVMTI events and firing DTRACE probes. if (TrySpin(Self) > 0) { assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner)); ! assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT, ! _recursions); assert(((oop)object())->mark() == markWord::encode(this), "object mark must match encoded this: mark=" INTPTR_FORMAT ", encoded this=" INTPTR_FORMAT, ((oop)object())->mark().value(), markWord::encode(this).value()); Self->_Stalled = 0; --- 283,293 ---- // transitions. The following spin is strictly optional ... // Note that if we acquire the monitor from an initial spin // we forgo posting JVMTI events and firing DTRACE probes. if (TrySpin(Self) > 0) { assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner)); ! assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions); assert(((oop)object())->mark() == markWord::encode(this), "object mark must match encoded this: mark=" INTPTR_FORMAT ", encoded this=" INTPTR_FORMAT, ((oop)object())->mark().value(), markWord::encode(this).value()); Self->_Stalled = 0;
*** 289,304 **** assert(_succ != Self, "invariant"); assert(Self->is_Java_thread(), "invariant"); JavaThread * jt = (JavaThread *) Self; assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); assert(jt->thread_state() != _thread_blocked, "invariant"); ! assert(this->object() != NULL, "invariant"); ! assert(_contentions >= 0, "invariant"); ! // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy(). ! // Ensure the object-monitor relationship remains stable while there's contention. ! Atomic::inc(&_contentions); JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);) EventJavaMonitorEnter event; if (event.should_commit()) { event.set_monitorClass(((oop)this->object())->klass()); --- 298,315 ---- assert(_succ != Self, "invariant"); assert(Self->is_Java_thread(), "invariant"); JavaThread * jt = (JavaThread *) Self; assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); assert(jt->thread_state() != _thread_blocked, "invariant"); ! assert(AsyncDeflateIdleMonitors || this->object() != NULL, "invariant"); ! assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions); ! // Prevent deflation. See ObjectSynchronizer::deflate_monitor(), ! // ObjectSynchronizer::deflate_monitor_using_JT() and is_busy(). ! // Ensure the object <-> monitor relationship remains stable while ! // there's contention. ! Atomic::add(1, &_contentions); JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);) EventJavaMonitorEnter event; if (event.should_commit()) { event.set_monitorClass(((oop)this->object())->klass());
*** 356,366 **** // states will still report that the thread is blocked trying to // acquire it. } Atomic::dec(&_contentions); ! assert(_contentions >= 0, "invariant"); Self->_Stalled = 0; // Must either set _recursions = 0 or ASSERT _recursions == 0. assert(_recursions == 0, "invariant"); assert(_owner == Self, "invariant"); --- 367,377 ---- // states will still report that the thread is blocked trying to // acquire it. } Atomic::dec(&_contentions); ! assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions); Self->_Stalled = 0; // Must either set _recursions = 0 or ASSERT _recursions == 0. assert(_recursions == 0, "invariant"); assert(_owner == Self, "invariant");
*** 400,432 **** // Callers must compensate as needed. int ObjectMonitor::TryLock(Thread * Self) { void * own = _owner; if (own != NULL) return 0; ! if (Atomic::replace_if_null(Self, &_owner)) { assert(_recursions == 0, "invariant"); return 1; } // The lock had been free momentarily, but we lost the race to the lock. // Interference -- the CAS failed. // We can either return -1 or retry. // Retry doesn't make as much sense because the lock was just acquired. return -1; } // Convert the fields used by is_busy() to a string that can be // used for diagnostic output. const char* ObjectMonitor::is_busy_to_string(stringStream* ss) { ! ss->print("is_busy: contentions=%d, waiters=%d, owner=" INTPTR_FORMAT ! ", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, _contentions, ! _waiters, p2i(_owner), p2i(_cxq), p2i(_EntryList)); return ss->base(); } #define MAX_RECHECK_INTERVAL 1000 void ObjectMonitor::EnterI(TRAPS) { Thread * const Self = THREAD; assert(Self->is_Java_thread(), "invariant"); assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant"); // Try the lock - TATAS --- 411,525 ---- // Callers must compensate as needed. int ObjectMonitor::TryLock(Thread * Self) { void * own = _owner; if (own != NULL) return 0; ! if (try_set_owner_from(Self, NULL) == NULL) { assert(_recursions == 0, "invariant"); return 1; } // The lock had been free momentarily, but we lost the race to the lock. // Interference -- the CAS failed. // We can either return -1 or retry. // Retry doesn't make as much sense because the lock was just acquired. return -1; } + // Install the displaced mark word (dmw) of a deflating ObjectMonitor + // into the header of the object associated with the monitor. This + // idempotent method is called by a thread that is deflating a + // monitor and by other threads that have detected a race with the + // deflation process. + void ObjectMonitor::install_displaced_markword_in_object(const oop obj) { + // This function must only be called when (owner == DEFLATER_MARKER + // && ref_count <= 0), but we can't guarantee that here because + // those values could change when the ObjectMonitor gets moved from + // the global free list to a per-thread free list. + + guarantee(obj != NULL, "must be non-NULL"); + if (object() != obj) { + // ObjectMonitor's object ref no longer refers to the target object + // so the object's header has already been restored. + return; + } + + markWord dmw = header(); + if (dmw.value() == 0) { + // ObjectMonitor's header/dmw has been cleared so the ObjectMonitor + // has been deflated and taken off the global free list. + return; + } + + // A non-NULL dmw has to be either neutral (not locked and not marked) + // or is already participating in this restoration protocol. + assert(dmw.is_neutral() || (dmw.is_marked() && dmw.hash() == 0), + "failed precondition: dmw=" INTPTR_FORMAT, dmw.value()); + + markWord marked_dmw = markWord::zero(); + if (!dmw.is_marked() && dmw.hash() == 0) { + // This dmw has not yet started the restoration protocol so we + // mark a copy of the dmw to begin the protocol. + // Note: A dmw with a hashcode does not take this code path. + marked_dmw = dmw.set_marked(); + + // All of the callers to this function can be racing with each + // other trying to update the _header field. + dmw = (markWord) Atomic::cmpxchg(marked_dmw, &_header, dmw); + if (dmw.value() == 0) { + // ObjectMonitor's header/dmw has been cleared so the object's + // header has already been restored. + return; + } + // The _header field is now marked. The winner's 'dmw' variable + // contains the original, unmarked header/dmw value and any + // losers have a marked header/dmw value that will be cleaned + // up below. + } + + if (dmw.is_marked()) { + // Clear the mark from the header/dmw copy in preparation for + // possible restoration from this thread. + assert(dmw.hash() == 0, "hashcode must be 0: dmw=" INTPTR_FORMAT, + dmw.value()); + dmw = dmw.set_unmarked(); + } + assert(dmw.is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, dmw.value()); + + // Install displaced mark word if the object's header still points + // to this ObjectMonitor. All racing callers to this function will + // reach this point, but only one can win. + obj->cas_set_mark(dmw, markWord::encode(this)); + + // Note: It does not matter which thread restored the header/dmw + // into the object's header. The thread deflating the monitor just + // wanted the object's header restored and it is. The threads that + // detected a race with the deflation process also wanted the + // object's header restored before they retry their operation and + // because it is restored they will only retry once. + } + // Convert the fields used by is_busy() to a string that can be // used for diagnostic output. const char* ObjectMonitor::is_busy_to_string(stringStream* ss) { ! ss->print("is_busy: contentions=%d, waiters=%d, ", _contentions, _waiters); ! if (!AsyncDeflateIdleMonitors) { ! ss->print("owner=" INTPTR_FORMAT, p2i(_owner)); ! } else if (_owner != DEFLATER_MARKER) { ! ss->print("owner=" INTPTR_FORMAT, p2i(_owner)); ! } else { ! ss->print("owner=" INTPTR_FORMAT, NULL); ! } ! ss->print(", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, p2i(_cxq), ! p2i(_EntryList)); return ss->base(); } #define MAX_RECHECK_INTERVAL 1000 void ObjectMonitor::EnterI(TRAPS) { + ADIM_guarantee(ref_count() > 0, "must be positive: ref_count=%d", ref_count()); + Thread * const Self = THREAD; assert(Self->is_Java_thread(), "invariant"); assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant"); // Try the lock - TATAS
*** 435,444 **** --- 528,548 ---- assert(_owner == Self, "invariant"); assert(_Responsible != Self, "invariant"); return; } + if (AsyncDeflateIdleMonitors && + try_set_owner_from(Self, DEFLATER_MARKER) == DEFLATER_MARKER) { + // The deflation protocol finished the first part (setting owner), + // but it failed the second part (making ref_count negative) and + // bailed. Or the ObjectMonitor was async deflated and reused. + // Acquired the monitor. + assert(_succ != Self, "invariant"); + assert(_Responsible != Self, "invariant"); + return; + } + assert(InitDone, "Unexpectedly not initialized"); // We try one round of spinning *before* enqueueing Self. // // If the _owner is ready but OFFPROC we could use a YieldTo()
*** 551,560 **** --- 655,673 ---- Self->_ParkEvent->park(); } if (TryLock(Self) > 0) break; + if (AsyncDeflateIdleMonitors && + try_set_owner_from(Self, DEFLATER_MARKER) == DEFLATER_MARKER) { + // The deflation protocol finished the first part (setting owner), + // but it failed the second part (making ref_count negative) and + // bailed. Or the ObjectMonitor was async deflated and reused. + // Acquired the monitor. + break; + } + // The lock is still contested. // Keep a tally of the # of futile wakeups. // Note that the counter is not protected by a lock or updated by atomics. // That is by design - we trade "lossy" counters which are exposed to // races during updates for a lower probe effect.
*** 655,664 **** --- 768,779 ---- // monitor reentry in wait(). // // In the future we should reconcile EnterI() and ReenterI(). void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) { + ADIM_guarantee(ref_count() > 0, "must be positive: ref_count=%d", ref_count()); + assert(Self != NULL, "invariant"); assert(SelfNode != NULL, "invariant"); assert(SelfNode->_thread == Self, "invariant"); assert(_waiters > 0, "invariant"); assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
*** 672,681 **** --- 787,805 ---- assert(_owner != Self, "invariant"); if (TryLock(Self) > 0) break; if (TrySpin(Self) > 0) break; + if (AsyncDeflateIdleMonitors && + try_set_owner_from(Self, DEFLATER_MARKER) == DEFLATER_MARKER) { + // The deflation protocol finished the first part (setting owner), + // but it failed the second part (making ref_count negative) and + // bailed. Or the ObjectMonitor was async deflated and reused. + // Acquired the monitor. + break; + } + // State transition wrappers around park() ... // ReenterI() wisely defers state transitions until // it's clear we must park the thread. { OSThreadContendState osts(Self->osthread());
*** 861,877 **** // of such futile wakups is low. void ObjectMonitor::exit(bool not_suspended, TRAPS) { Thread * const Self = THREAD; if (THREAD != _owner) { ! if (THREAD->is_lock_owned((address) _owner)) { ! // Transmute _owner from a BasicLock pointer to a Thread address. ! // We don't need to hold _mutex for this transition. ! // Non-null to Non-null is safe as long as all readers can ! // tolerate either flavor. assert(_recursions == 0, "invariant"); ! _owner = THREAD; _recursions = 0; } else { // Apparent unbalanced locking ... // Naively we'd like to throw IllegalMonitorStateException. // As a practical matter we can neither allocate nor throw an --- 985,998 ---- // of such futile wakups is low. void ObjectMonitor::exit(bool not_suspended, TRAPS) { Thread * const Self = THREAD; if (THREAD != _owner) { ! void* cur = _owner; ! if (THREAD->is_lock_owned((address)cur)) { assert(_recursions == 0, "invariant"); ! set_owner_from_BasicLock(Self, cur); // Convert from BasicLock* to Thread*. _recursions = 0; } else { // Apparent unbalanced locking ... // Naively we'd like to throw IllegalMonitorStateException. // As a practical matter we can neither allocate nor throw an
*** 879,889 **** // see x86_32.ad Fast_Unlock() and the I1 and I2 properties. // Upon deeper reflection, however, in a properly run JVM the only // way we should encounter this situation is in the presence of // unbalanced JNI locking. TODO: CheckJNICalls. // See also: CR4414101 ! assert(false, "Non-balanced monitor enter/exit! Likely JNI locking"); return; } } if (_recursions != 0) { --- 1000,1017 ---- // see x86_32.ad Fast_Unlock() and the I1 and I2 properties. // Upon deeper reflection, however, in a properly run JVM the only // way we should encounter this situation is in the presence of // unbalanced JNI locking. TODO: CheckJNICalls. // See also: CR4414101 ! tty->print_cr("ERROR: ObjectMonitor::exit(): thread=" INTPTR_FORMAT ! " is exiting an ObjectMonitor it does not own.", ! p2i(THREAD)); ! tty->print_cr("The imbalance is possibly caused by JNI locking."); ! print_debug_style_on(tty); ! // Changing this from an assert() to ADIM_guarantee() may run ! // afoul of any test that is inducing non-balanced JNI locking. ! ADIM_guarantee(false, "Non-balanced monitor enter/exit!"); return; } } if (_recursions != 0) {
*** 908,919 **** --- 1036,1051 ---- // release semantics: prior loads and stores from within the critical section // must not float (reorder) past the following store that drops the lock. // On SPARC that requires MEMBAR #loadstore|#storestore. // But of course in TSO #loadstore|#storestore is not required. + if (AsyncDeflateIdleMonitors) { + set_owner_from(NULL, Self); + } else { OrderAccess::release_store(&_owner, (void*)NULL); // drop the lock OrderAccess::storeload(); // See if we need to wake a successor + } if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { return; } // Other threads are blocked trying to acquire the lock.
*** 951,961 **** // Only the current lock owner can manipulate the EntryList or // drain _cxq, so we need to reacquire the lock. If we fail // to reacquire the lock the responsibility for ensuring succession // falls to the new owner. // ! if (!Atomic::replace_if_null(THREAD, &_owner)) { return; } guarantee(_owner == THREAD, "invariant"); --- 1083,1093 ---- // Only the current lock owner can manipulate the EntryList or // drain _cxq, so we need to reacquire the lock. If we fail // to reacquire the lock the responsibility for ensuring succession // falls to the new owner. // ! if (try_set_owner_from(Self, NULL) != NULL) { return; } guarantee(_owner == THREAD, "invariant");
*** 1084,1095 **** --- 1216,1231 ---- // The thread associated with Wakee may have grabbed the lock and "Wakee" may be // out-of-scope (non-extant). Wakee = NULL; // Drop the lock + if (AsyncDeflateIdleMonitors) { + set_owner_from(NULL, Self); + } else { OrderAccess::release_store(&_owner, (void*)NULL); OrderAccess::fence(); // ST _owner vs LD in unpark() + } DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self); Trigger->unpark(); // Maintain stats and report events to JVMTI
*** 1112,1124 **** JavaThread *jt = (JavaThread *)THREAD; assert(InitDone, "Unexpectedly not initialized"); if (THREAD != _owner) { ! if (THREAD->is_lock_owned ((address)_owner)) { assert(_recursions == 0, "internal state error"); ! _owner = THREAD; // Convert from basiclock addr to Thread addr _recursions = 0; } } guarantee(Self == _owner, "complete_exit not owner"); --- 1248,1261 ---- JavaThread *jt = (JavaThread *)THREAD; assert(InitDone, "Unexpectedly not initialized"); if (THREAD != _owner) { ! void* cur = _owner; ! if (THREAD->is_lock_owned((address)cur)) { assert(_recursions == 0, "internal state error"); ! set_owner_from_BasicLock(Self, cur); // Convert from BasicLock* to Thread*. _recursions = 0; } } guarantee(Self == _owner, "complete_exit not owner");
*** 1135,1148 **** Thread * const Self = THREAD; assert(Self->is_Java_thread(), "Must be Java thread!"); JavaThread *jt = (JavaThread *)THREAD; guarantee(_owner != Self, "reenter already owner"); ! enter(THREAD); // enter the monitor guarantee(_recursions == 0, "reenter recursion"); _recursions = recursions; - return; } // Checks that the current THREAD owns this monitor and causes an // immediate return if it doesn't. We don't use the CHECK macro // because we want the IMSE to be the only exception that is thrown --- 1272,1285 ---- Thread * const Self = THREAD; assert(Self->is_Java_thread(), "Must be Java thread!"); JavaThread *jt = (JavaThread *)THREAD; guarantee(_owner != Self, "reenter already owner"); ! enter(THREAD); ! // Entered the monitor. guarantee(_recursions == 0, "reenter recursion"); _recursions = recursions; } // Checks that the current THREAD owns this monitor and causes an // immediate return if it doesn't. We don't use the CHECK macro // because we want the IMSE to be the only exception that is thrown
*** 1162,1173 **** // is not the owner, that exception will be replaced by the IMSE. bool ObjectMonitor::check_owner(Thread* THREAD) { if (_owner == THREAD) { return true; } ! if (THREAD->is_lock_owned((address)_owner)) { ! _owner = THREAD; // convert from BasicLock addr to Thread addr _recursions = 0; return true; } THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread is not owner", false); --- 1299,1311 ---- // is not the owner, that exception will be replaced by the IMSE. bool ObjectMonitor::check_owner(Thread* THREAD) { if (_owner == THREAD) { return true; } ! void* cur = _owner; ! if (THREAD->is_lock_owned((address)cur)) { ! set_owner_from_BasicLock(THREAD, cur); // Convert from BasicLock* to Thread*. _recursions = 0; return true; } THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread is not owner", false);
*** 1668,1678 **** // the spin without prejudice or apply a "penalty" to the // spin count-down variable "ctr", reducing it by 100, say. Thread * ox = (Thread *) _owner; if (ox == NULL) { ! ox = (Thread*)Atomic::cmpxchg(Self, &_owner, (void*)NULL); if (ox == NULL) { // The CAS succeeded -- this thread acquired ownership // Take care of some bookkeeping to exit spin state. if (_succ == Self) { _succ = NULL; --- 1806,1816 ---- // the spin without prejudice or apply a "penalty" to the // spin count-down variable "ctr", reducing it by 100, say. Thread * ox = (Thread *) _owner; if (ox == NULL) { ! ox = (Thread*)try_set_owner_from(Self, NULL); if (ox == NULL) { // The CAS succeeded -- this thread acquired ownership // Take care of some bookkeeping to exit spin state. if (_succ == Self) { _succ = NULL;
*** 1932,1944 **** } DEBUG_ONLY(InitDone = true;) } void ObjectMonitor::print_on(outputStream* st) const { // The minimal things to print for markWord printing, more can be added for debugging and logging. st->print("{contentions=0x%08x,waiters=0x%08x" ! ",recursions=" INTPTR_FORMAT ",owner=" INTPTR_FORMAT "}", contentions(), waiters(), recursions(), p2i(owner())); } void ObjectMonitor::print() const { print_on(tty); } --- 2070,2249 ---- } DEBUG_ONLY(InitDone = true;) } + // For internal use by ObjectSynchronizer::monitors_iterate(). + ObjectMonitorHandle::ObjectMonitorHandle(ObjectMonitor * om_ptr) { + om_ptr->inc_ref_count(); + _om_ptr = om_ptr; + } + + ObjectMonitorHandle::~ObjectMonitorHandle() { + if (_om_ptr != NULL) { + _om_ptr->dec_ref_count(); + _om_ptr = NULL; + } + } + + // Save the ObjectMonitor* associated with the specified markWord and + // increment the ref_count. This function should only be called if + // the caller has verified mark.has_monitor() == true. The object + // parameter is needed to verify that ObjectMonitor* has not been + // deflated and reused for another object. + // + // This function returns true if the ObjectMonitor* has been safely + // saved. This function returns false if we have lost a race with + // async deflation; the caller should retry as appropriate. + // + bool ObjectMonitorHandle::save_om_ptr(oop object, markWord mark) { + guarantee(mark.has_monitor(), "sanity check: mark=" INTPTR_FORMAT, + mark.value()); + + ObjectMonitor * om_ptr = mark.monitor(); + om_ptr->inc_ref_count(); + + if (AsyncDeflateIdleMonitors) { + // Race here if monitor is not owned! The above ref_count bump + // will cause subsequent async deflation to skip it. However, + // previous or concurrent async deflation is a race. + if (om_ptr->owner_is_DEFLATER_MARKER() && om_ptr->ref_count() <= 0) { + // Async deflation is in progress and our ref_count increment + // above lost the race to async deflation. Attempt to restore + // the header/dmw to the object's header so that we only retry + // once if the deflater thread happens to be slow. + om_ptr->install_displaced_markword_in_object(object); + om_ptr->dec_ref_count(); + return false; + } + if (om_ptr->ref_count() <= 0) { + // Async deflation is in the process of bailing out, but has not + // yet restored the ref_count field so we return false to force + // a retry. We want a positive ref_count value for a true return. + om_ptr->dec_ref_count(); + return false; + } + // The ObjectMonitor could have been deflated and reused for + // another object before we bumped the ref_count so make sure + // our object still refers to this ObjectMonitor. + const markWord tmp = object->mark(); + if (!tmp.has_monitor() || tmp.monitor() != om_ptr) { + // Async deflation and reuse won the race so we have to retry. + // Skip object header restoration since that's already done. + om_ptr->dec_ref_count(); + return false; + } + } + + ADIM_guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT, + p2i(_om_ptr)); + _om_ptr = om_ptr; + return true; + } + + // For internal use by ObjectSynchronizer::inflate(). + void ObjectMonitorHandle::set_om_ptr(ObjectMonitor * om_ptr) { + if (_om_ptr == NULL) { + ADIM_guarantee(om_ptr != NULL, "cannot clear an unset om_ptr"); + om_ptr->inc_ref_count(); + _om_ptr = om_ptr; + } else { + ADIM_guarantee(om_ptr == NULL, "can only clear a set om_ptr"); + _om_ptr->dec_ref_count(); + _om_ptr = NULL; + } + } + void ObjectMonitor::print_on(outputStream* st) const { // The minimal things to print for markWord printing, more can be added for debugging and logging. st->print("{contentions=0x%08x,waiters=0x%08x" ! ",recursions=" INTX_FORMAT ",owner=" INTPTR_FORMAT "}", contentions(), waiters(), recursions(), p2i(owner())); } void ObjectMonitor::print() const { print_on(tty); } + + // Print the ObjectMonitor like a debugger would: + // + // (ObjectMonitor) 0x00007fdfb6012e40 = { + // _header = (_value = 1) + // _object = 0x000000070ff45fd0 + // _allocation_state = Old + // _pad_buf0 = { + // [0] = '\0' + // ... + // [43] = '\0' + // } + // _owner = 0x0000000000000000 + // _previous_owner_tid = 0 + // _pad_buf1 = { + // [0] = '\0' + // ... + // [47] = '\0' + // } + // _ref_count = 1 + // _pad_buf2 = { + // [0] = '\0' + // ... + // [59] = '\0' + // } + // _next_om = 0x0000000000000000 + // _recursions = 0 + // _EntryList = 0x0000000000000000 + // _cxq = 0x0000000000000000 + // _succ = 0x0000000000000000 + // _Responsible = 0x0000000000000000 + // _Spinner = 0 + // _SpinDuration = 5000 + // _contentions = 0 + // _WaitSet = 0x0000700009756248 + // _waiters = 1 + // _WaitSetLock = 0 + // } + // + void ObjectMonitor::print_debug_style_on(outputStream* st) const { + st->print_cr("(ObjectMonitor *) " INTPTR_FORMAT " = {", p2i(this)); + st->print_cr(" _header = " INTPTR_FORMAT, header().value()); + st->print_cr(" _object = " INTPTR_FORMAT, p2i(_object)); + st->print(" _allocation_state = "); + if (is_free()) { + st->print("Free"); + } else if (is_old()) { + st->print("Old"); + } else if (is_new()) { + st->print("New"); + } else { + st->print("unknown=%d", _allocation_state); + } + st->cr(); + st->print_cr(" _pad_buf0 = {"); + st->print_cr(" [0] = '\\0'"); + st->print_cr(" ..."); + st->print_cr(" [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1); + st->print_cr(" }"); + st->print_cr(" _owner = " INTPTR_FORMAT, p2i(_owner)); + st->print_cr(" _previous_owner_tid = " JLONG_FORMAT, _previous_owner_tid); + st->print_cr(" _pad_buf1 = {"); + st->print_cr(" [0] = '\\0'"); + st->print_cr(" ..."); + st->print_cr(" [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1); + st->print_cr(" }"); + st->print_cr(" _ref_count = %d", ref_count()); + st->print_cr(" _pad_buf2 = {"); + st->print_cr(" [0] = '\\0'"); + st->print_cr(" ..."); + st->print_cr(" [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1); + st->print_cr(" }"); + st->print_cr(" _next_om = " INTPTR_FORMAT, p2i(_next_om)); + st->print_cr(" _recursions = " INTX_FORMAT, _recursions); + st->print_cr(" _EntryList = " INTPTR_FORMAT, p2i(_EntryList)); + st->print_cr(" _cxq = " INTPTR_FORMAT, p2i(_cxq)); + st->print_cr(" _succ = " INTPTR_FORMAT, p2i(_succ)); + st->print_cr(" _Responsible = " INTPTR_FORMAT, p2i(_Responsible)); + st->print_cr(" _Spinner = %d", _Spinner); + st->print_cr(" _SpinDuration = %d", _SpinDuration); + st->print_cr(" _contentions = %d", _contentions); + st->print_cr(" _WaitSet = " INTPTR_FORMAT, p2i(_WaitSet)); + st->print_cr(" _waiters = %d", _waiters); + st->print_cr(" _WaitSetLock = %d", _WaitSetLock); + st->print_cr("}"); + }
< prev index next >