# HG changeset patch # User Carsten Varming # Date 1496456263 14400 # Fri Jun 02 22:17:43 2017 -0400 # Node ID 81c7d02671c33cbfbbb8955e9319fdef0360f7d2 # Parent bcc3c04c2d150bc407bd76e90a484ed858bab630 imported patch monitor_deflate_conc diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp --- a/src/share/vm/runtime/globals.hpp +++ b/src/share/vm/runtime/globals.hpp @@ -1184,6 +1184,10 @@ \ product(bool, MonitorInUseLists, true, "Track Monitors for Deflation") \ \ + product(bool, AsyncDeflateIdleMonitors, false, \ + "Deflate idle monitors in the service thread. This flag is " \ + "ignored if +MonitorInUseLists") \ + \ experimental(intx, SyncFlags, 0, "(Unsafe, Unstable) " \ "Experimental Sync flags") \ \ diff --git a/src/share/vm/runtime/objectMonitor.cpp b/src/share/vm/runtime/objectMonitor.cpp --- a/src/share/vm/runtime/objectMonitor.cpp +++ b/src/share/vm/runtime/objectMonitor.cpp @@ -240,11 +240,12 @@ // // * See also http://blogs.sun.com/dave +#define DEFLATER_MARKER reinterpret_cast(-1) // ----------------------------------------------------------------------------- // Enter support -void ObjectMonitor::enter(TRAPS) { +bool ObjectMonitor::enter(TRAPS) { // The following code is ordered to check the most common cases first // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. Thread * const Self = THREAD; @@ -254,13 +255,13 @@ // Either ASSERT _recursions == 0 or explicitly set _recursions = 0. assert(_recursions == 0, "invariant"); assert(_owner == Self, "invariant"); - return; + return true; } if (cur == Self) { // TODO-FIXME: check for integer overflow! BUGID 6557169. _recursions++; - return; + return true; } if (Self->is_lock_owned ((address)cur)) { @@ -269,7 +270,7 @@ // Commute owner from a thread-specific on-stack BasicLockObject address to // a full-fledged "Thread *". _owner = Self; - return; + return true; } // We've encountered genuine contention. @@ -286,7 +287,7 @@ assert(_recursions == 0, "invariant"); assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant"); Self->_Stalled = 0; - return; + return true; } assert(_owner != Self, "invariant"); @@ -296,11 +297,18 @@ assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); assert(jt->thread_state() != _thread_blocked, "invariant"); assert(this->object() != NULL, "invariant"); - assert(_count >= 0, "invariant"); - // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy(). + // Prevent deflation. See deflate_idle_monitors(), try_disable_monitor, and is_busy(). // Ensure the object-monitor relationship remains stable while there's contention. - Atomic::inc(&_count); + const jint count = Atomic::add(1, &_count); + if (count <= 0 && _owner == DEFLATER_MARKER) { + // Deflation in progress. + // Help deflater thread install the mark word (in case deflater thread is slow). + install_displaced_markword_in_object(); + Self->_Stalled = 0; + return false; // Caller should retry. Never mind about _count as this monitor has been deflated. + } + // The deflater thread will not deflate this monitor and the monitor is contended, continue. EventJavaMonitorEnter event; @@ -357,7 +365,6 @@ } Atomic::dec(&_count); - assert(_count >= 0, "invariant"); Self->_Stalled = 0; // Must either set _recursions = 0 or ASSERT _recursions == 0. @@ -397,6 +404,7 @@ } OM_PERFDATA_OP(ContendedLockAttempts, inc()); + return true; } @@ -419,6 +427,96 @@ return -1; } +// Try disabling this monitor. Returns true iff successful. +// The method will install DEFLATER_MARKER (-1) as the owner of the monitor, +// check _waiters == 0, and make _count negative if it is currently 0. The +// monitor is successfully disabled if _count is negative and this monitor is +// still owned by DEFLATER_MARKER. +// +// All threads trying to acquire the monitor must before parking themselves +// increment _count and check that _owner != DEFLATER_MARKER. If _owner == +// DEFLATER_MARKER and _count is positive, then a thread can still win the lock +// by atomically installing its thread pointer in _owner. If _count is +// negative and _owned == DEFLATER_MARKER, then the monitor has been +// successfully disabled and the acquiring threads should help install the +// displaced mark word back into the object and retry acquiring the lock. +// +// A thread wanting to wait on the monitor must increase _waiters while owning the monitor. +bool ObjectMonitor::try_disable_monitor() { + assert(Thread::current()->is_Java_thread(), "precondition"); + // We don't want to disable newly allocated monitors as it could result in a endless inflate/deflate cycle. + assert(is_old(), "precondition"); + + // Set _owned to DEFLATER_MARKER if monitor is not owned by another thread. + // This forced contending thread through the slow path. + if (!is_busy() && Atomic::cmpxchg_ptr(DEFLATER_MARKER, &_owner, NULL) == NULL) { + // Another thread might still enter the monitor. + // Signal that other threads should retry if the owner is DEFLATER_MARKER by making _count negative. + if (_waiters == 0 && Atomic::cmpxchg(- max_jint, &_count, 0) == 0) { + // ABA problem with _count: + // Another thread might have acquired this monitor and finished using it. + // Check owner to see if that happened (no other thread installs DEFLATER_MARKER as owner). + if (_owner == DEFLATER_MARKER) { + // We successfully signalled to all threads entering that they should + // retry. + // Nobody acquired this monitor between installing DEFLATER_MARKER into + // _owner and now (such a thread would have changed _owner). If any + // thread is now waiting on the monitor, then _waiters must have been + // incremented as it was 0 before. _waiters is changed only when owning + // the monitor, but no other thread can have owned the monitor since we + // installed DEFLATER_MARKER, and thus _waiters must still be 0. + guarantee(_waiters == 0, "Not changed since the previous read"); + guarantee(_cxq == NULL, "All contending threads should retry"); + guarantee(_EntryList == NULL, "All contending threads should retry"); + // Install the old mark word if nobody else has already done it. + install_displaced_markword_in_object(); + set_allocation_state(Free); + // Leave this monitor locked to ensure acquiring threads take the slow-path and + // leave _count negative to make them retry. + return true; // Success, lock has been deflated. + } + // We do not own the monitor. Do not deflate. + Atomic::add(max_jint, &_count); + } + // Never mind. Another thread managed to acquire this monitor or there were + // threads waiting. The threads that saw (or will see) 0 <= _count and + // _owner == DEFLATER_MARKER will compete for ownership and one will + // eventually install itself as owner and subsequently run the exit protocol. + } + assert(0 <= _count, "Nobody else should be make _count negative"); + return false; +} + +// Install the displaced mark word of a disabled monitor into the object +// associated with the monitor. +// This method is idempotent and is expected to be executed by both mutators +// wanting to acquire a monitor for an object, mutators wanting to install a +// hashcode in an object, and the thread deflating monitors. +void ObjectMonitor::install_displaced_markword_in_object() { + markOop dmw = header(); + assert(dmw->is_neutral() || dmw->hash() == 0 && dmw->is_marked(), "precondition"); + if (dmw->hash() == 0 && !dmw->is_marked()) { + // Another thread might update the displaced mark word by computing a hash + // code for the oject or running this method. + // Signal to other threads that the object mark word should be installed + // and read from the object by marking the displaced mark word. + markOop marked_dmw = dmw->set_marked(); + assert(marked_dmw->hash() == 0 && marked_dmw->is_marked(), "oops"); + dmw = (markOop) Atomic::cmpxchg_ptr(marked_dmw, &_header, dmw); + // If the CAS failed because another thread installed a hash value, then dmw + // will contain the hash and be unmarked. If the CAS failed because another thread + // marked the displaced mark word, then dmw->hash() is zero and dmw is marked. + } + if (dmw->is_marked()) { + assert(dmw->hash() == 0, "invariant"); + dmw = dmw->set_unmarked(); + } + oop const obj = (oop) object(); + // Install displaced mark word if object mark word still points to this monitor. + assert(dmw->is_neutral(), "Must not install non-neutral markword into object"); + obj->cas_set_mark(dmw, markOopDesc::encode(this)); +} + #define MAX_RECHECK_INTERVAL 1000 void ObjectMonitor::EnterI(TRAPS) { @@ -434,6 +532,18 @@ return; } + if (_owner == DEFLATER_MARKER) { + guarantee(0 < _count, "_owner == DEFLATER_MARKER && _count <= 0 should have been handled by the caller"); + // Deflater thread tried to lock this monitor, but it failed to make _count negative and gave up. + // Try to acquire monitor. + if (Atomic::cmpxchg_ptr(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) { + assert(_succ != Self, "invariant"); + assert(_owner == Self, "invariant"); + assert(_Responsible != Self, "invariant"); + return; + } + } + DeferredInitialize(); // We try one round of spinning *before* enqueueing Self. @@ -557,6 +667,15 @@ if (TryLock(Self) > 0) break; + if (_owner == DEFLATER_MARKER) { + guarantee(0 < _count, "_owner == DEFLATER_MARKER && _count <= 0 should have been handled by the caller"); + // Deflater thread tried to lock this monitor, but it failed to make _count negative and gave up. + // Try to acquire monitor. + if (Atomic::cmpxchg_ptr(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) { + break; + } + } + // The lock is still contested. // Keep a tally of the # of futile wakeups. // Note that the counter is not protected by a lock or updated by atomics. @@ -689,6 +808,13 @@ if (TryLock(Self) > 0) break; if (TrySpin(Self) > 0) break; + if (_owner == DEFLATER_MARKER) { + guarantee(0 <= _count, "Impossible: _owner == DEFLATER_MARKER && _count < 0, monitor must not be owned by deflater thread here"); + if (Atomic::cmpxchg_ptr(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) { + break; + } + } + TEVENT(Wait Reentry - parking); // State transition wrappers around park() ... @@ -1328,16 +1454,16 @@ // reenter() enters a lock and sets recursion count // complete_exit/reenter operate as a wait without waiting -void ObjectMonitor::reenter(intptr_t recursions, TRAPS) { +bool ObjectMonitor::reenter(intptr_t recursions, TRAPS) { Thread * const Self = THREAD; assert(Self->is_Java_thread(), "Must be Java thread!"); JavaThread *jt = (JavaThread *)THREAD; guarantee(_owner != Self, "reenter already owner"); - enter(THREAD); // enter the monitor + if (!enter(THREAD)) { return false; } // enter the monitor guarantee(_recursions == 0, "reenter recursion"); _recursions = recursions; - return; + return true; } @@ -1576,7 +1702,8 @@ assert(_owner != Self, "invariant"); ObjectWaiter::TStates v = node.TState; if (v == ObjectWaiter::TS_RUN) { - enter(Self); + DEBUG_ONLY(const bool success = ) enter(Self); + assert(success, "enter signalled that we should retry, but monitor should not be deflated as waiters > 0"); } else { guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant"); ReenterI(Self, &node); diff --git a/src/share/vm/runtime/objectMonitor.hpp b/src/share/vm/runtime/objectMonitor.hpp --- a/src/share/vm/runtime/objectMonitor.hpp +++ b/src/share/vm/runtime/objectMonitor.hpp @@ -171,6 +171,8 @@ volatile jint _waiters; // number of waiting threads private: volatile int _WaitSetLock; // protects Wait Queue - simple spinlock + typedef enum { Free = 0, New, Old } AllocationState; // Free must be 0 for monitor to be free after memset(..,0,..). + AllocationState _allocation_state; public: static void Initialize(); @@ -307,6 +309,12 @@ void* object() const; void* object_addr(); void set_object(void* obj); + void set_allocation_state(AllocationState s) { _allocation_state = s; } + AllocationState allocation_state() const { return _allocation_state; } + bool is_free() const { return _allocation_state == Free; } + bool is_active() const { return !is_free(); } + bool is_old() const { return _allocation_state == Old; } + bool is_new() const { return _allocation_state == New; } bool check(TRAPS); // true if the thread owns the monitor. void check_slow(TRAPS); @@ -314,7 +322,7 @@ static void sanity_checks(); // public for -XX:+ExecuteInternalVMTests // in PRODUCT for -XX:SyncKnobs=Verbose=1 - void enter(TRAPS); + bool enter(TRAPS); // returns false if monitor is being deflated and caller should retry locking the object. void exit(bool not_suspended, TRAPS); void wait(jlong millis, bool interruptable, TRAPS); void notify(TRAPS); @@ -322,7 +330,7 @@ // Use the following at your own risk intptr_t complete_exit(TRAPS); - void reenter(intptr_t recursions, TRAPS); + bool reenter(intptr_t recursions, TRAPS); // returns false if monitor is being deflated and caller should retry locking the object. private: void AddWaiter(ObjectWaiter * waiter); @@ -343,6 +351,8 @@ jlong timeout, bool timedout); + bool try_disable_monitor(); // Must be run by java thread in VM mode. + void install_displaced_markword_in_object(); }; #undef TEVENT diff --git a/src/share/vm/runtime/objectMonitor.inline.hpp b/src/share/vm/runtime/objectMonitor.inline.hpp --- a/src/share/vm/runtime/objectMonitor.inline.hpp +++ b/src/share/vm/runtime/objectMonitor.inline.hpp @@ -49,7 +49,8 @@ } inline void* ObjectMonitor::owner() const { - return _owner; + void* owner = _owner; + return owner != (void*) -1 ? owner : NULL; } inline void ObjectMonitor::clear() { @@ -60,6 +61,7 @@ assert(_object != NULL, "Fatal logic error in ObjectMonitor object!"); assert(_owner == 0, "Fatal logic error in ObjectMonitor owner!"); + set_allocation_state(Free); _header = NULL; _object = NULL; } diff --git a/src/share/vm/runtime/safepoint.cpp b/src/share/vm/runtime/safepoint.cpp --- a/src/share/vm/runtime/safepoint.cpp +++ b/src/share/vm/runtime/safepoint.cpp @@ -544,7 +544,7 @@ const char* name = "deflating idle monitors"; EventSafepointCleanupTask event; TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup)); - ObjectSynchronizer::deflate_idle_monitors(); + ObjectSynchronizer::do_safepoint_work(); event_safepoint_cleanup_task_commit(event, name); } diff --git a/src/share/vm/runtime/serviceThread.cpp b/src/share/vm/runtime/serviceThread.cpp --- a/src/share/vm/runtime/serviceThread.cpp +++ b/src/share/vm/runtime/serviceThread.cpp @@ -89,6 +89,7 @@ bool has_gc_notification_event = false; bool has_dcmd_notification_event = false; bool acs_notify = false; + bool deflate_idle_monitors = false; JvmtiDeferredEvent jvmti_event; { // Need state transition ThreadBlockInVM so that this thread @@ -106,7 +107,8 @@ !(has_jvmti_events = JvmtiDeferredEventQueue::has_events()) && !(has_gc_notification_event = GCNotifier::has_event()) && !(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification()) && - !(acs_notify = AllocationContextService::should_notify())) { + !(acs_notify = AllocationContextService::should_notify()) && + !(deflate_idle_monitors = ObjectSynchronizer::should_deflate_idle_monitors_conc())) { // wait until one of the sensors has pending requests, or there is a // pending JVMTI event or JMX GC notification to post Service_lock->wait(Mutex::_no_safepoint_check_flag); @@ -136,6 +138,10 @@ if (acs_notify) { AllocationContextService::notify(CHECK); } + + if (deflate_idle_monitors) { + ObjectSynchronizer::deflate_idle_monitors_conc(); + } } } diff --git a/src/share/vm/runtime/synchronizer.cpp b/src/share/vm/runtime/synchronizer.cpp --- a/src/share/vm/runtime/synchronizer.cpp +++ b/src/share/vm/runtime/synchronizer.cpp @@ -115,14 +115,19 @@ ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL; // global monitor free list ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; +ObjectMonitor * ObjectSynchronizer::gFreeListNextSafepoint = NULL; +ObjectMonitor * ObjectSynchronizer::gFreeListNextSafepointTail = NULL; // global monitor in-use list, for moribund threads, // monitors they inflated need to be scanned for deflation ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL; // count of entries in gOmInUseList int ObjectSynchronizer::gOmInUseCount = 0; +bool ObjectSynchronizer::_should_deflate_idle_monitors_conc = false; + static volatile intptr_t gListLock = 0; // protects global monitor lists static volatile int gMonitorFreeCount = 0; // # on gFreeList +static int gMonitorFreeCountNextSafepoint = 0; static volatile int gMonitorPopulation = 0; // # Extant -- in circulation static void post_monitor_inflate_event(EventJavaMonitorInflate&, @@ -340,34 +345,35 @@ // We don't need to use fast path here, because it must have been // failed in the interpreter/compiler code. void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { - markOop mark = obj->mark(); - assert(!mark->has_bias_pattern(), "should not see bias pattern here"); + do { + markOop mark = obj->mark(); + assert(!mark->has_bias_pattern(), "should not see bias pattern here"); - if (mark->is_neutral()) { - // Anticipate successful CAS -- the ST of the displaced mark must - // be visible <= the ST performed by the CAS. - lock->set_displaced_header(mark); - if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { - TEVENT(slow_enter: release stacklock); + if (mark->is_neutral()) { + // Anticipate successful CAS -- the ST of the displaced mark must + // be visible <= the ST performed by the CAS. + lock->set_displaced_header(mark); + if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { + TEVENT(slow_enter: release stacklock); + return; + } + // Fall through to inflate() ... + } else if (mark->has_locker() && + THREAD->is_lock_owned((address)mark->locker())) { + assert(lock != mark->locker(), "must not re-lock the same lock"); + assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); + lock->set_displaced_header(NULL); return; } - // Fall through to inflate() ... - } else if (mark->has_locker() && - THREAD->is_lock_owned((address)mark->locker())) { - assert(lock != mark->locker(), "must not re-lock the same lock"); - assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); - lock->set_displaced_header(NULL); - return; - } - // The object header will never be displaced to this lock, - // so it does not matter what the value is, except that it - // must be non-zero to avoid looking like a re-entrant lock, - // and must not look locked either. - lock->set_displaced_header(markOopDesc::unused_mark()); - ObjectSynchronizer::inflate(THREAD, - obj(), - inflate_cause_monitor_enter)->enter(THREAD); + // The object header will never be displaced to this lock, + // so it does not matter what the value is, except that it + // must be non-zero to avoid looking like a re-entrant lock, + // and must not look locked either. + lock->set_displaced_header(markOopDesc::unused_mark()); + } while (!ObjectSynchronizer::inflate(THREAD, + obj(), + inflate_cause_monitor_enter)->enter(THREAD)); } // This routine is used to handle interpreter/compiler slow case @@ -412,11 +418,12 @@ assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); } - ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, - obj(), - inflate_cause_vm_internal); - - monitor->reenter(recursion, THREAD); + ObjectMonitor* monitor; + do { + monitor = ObjectSynchronizer::inflate(THREAD, + obj(), + inflate_cause_vm_internal); + } while(!monitor->reenter(recursion, THREAD)); } // ----------------------------------------------------------------------------- // JNI locks on java objects @@ -429,7 +436,7 @@ assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); } THREAD->set_current_pending_monitor_is_from_java(false); - ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD); + while (!ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD)); THREAD->set_current_pending_monitor_is_from_java(true); } @@ -712,6 +719,7 @@ } intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) { + Retry: if (UseBiasedLocking) { // NOTE: many places throughout the JVM do not expect a safepoint // to be taken here, in particular most operations on perm gen @@ -768,7 +776,7 @@ } else if (mark->has_monitor()) { monitor = mark->monitor(); temp = monitor->header(); - assert(temp->is_neutral(), "invariant"); + assert(temp->is_neutral() || temp->hash() == 0 && temp->is_marked(), "invariant"); hash = temp->hash(); if (hash) { return hash; @@ -796,17 +804,34 @@ monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code); // Load displaced header and check it has hash code mark = monitor->header(); - assert(mark->is_neutral(), "invariant"); + assert(mark->is_neutral() || mark->hash() == 0 && mark->is_marked(), "invariant"); hash = mark->hash(); if (hash == 0) { hash = get_next_hash(Self, obj); - temp = mark->copy_set_hash(hash); // merge hash code into header + temp = mark->set_unmarked()->copy_set_hash(hash); // merge hash code into header assert(temp->is_neutral(), "invariant"); + if (mark->is_marked()) { + // Monitor is being deflated. Try installing mark word with hash code into obj. + markOop monitor_mark = markOopDesc::encode(monitor); + if (obj->cas_set_mark(temp, monitor_mark) == monitor_mark) { + return hash; + } else { + // Somebody else installed a new mark word in obj. Start over. We are making progress, + // as the new mark word is not a pointer to monitor. + goto Retry; + } + } test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); if (test != mark) { - // The only update to the header in the monitor (outside GC) - // is install the hash code. If someone add new usage of - // displaced header, please update this code + // The only update to the header in the monitor (outside GC) is install + // the hash code or mark the header to signal that the monitor is being + // deflated. If someone add new usage of displaced header, please update + // this code. + if (test->is_marked()) { + // Monitor is being deflated. Make progress by starting over. + assert(test->hash() == 0, "invariant"); + goto Retry; + } hash = test->hash(); assert(test->is_neutral(), "invariant"); assert(hash != 0, "Trivial unexpected object/monitor header usage."); @@ -982,7 +1007,8 @@ assert(block->object() == CHAINMARKER, "must be a block header"); for (int i = 1; i < _BLOCKSIZE; i++) { ObjectMonitor* mid = (ObjectMonitor *)&block[i]; - if (mid->object() != NULL) { + if (mid->is_active()) { + assert(mid->object() != NULL, "invariant"); f->do_oop((oop*)mid->object_addr()); } } @@ -1078,14 +1104,16 @@ int in_use_tally = 0; for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) { in_use_tally++; + guarantee(mid->is_active(), "invariant"); } - assert(in_use_tally == Self->omInUseCount, "in-use count off"); + guarantee(in_use_tally == Self->omInUseCount, "in-use count off"); int free_tally = 0; for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) { free_tally++; + guarantee(mid->is_free(), "invariant"); } - assert(free_tally == Self->omFreeCount, "free count off"); + guarantee(free_tally == Self->omFreeCount, "free count off"); } ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) { @@ -1110,6 +1138,7 @@ Self->omFreeCount--; // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene guarantee(m->object() == NULL, "invariant"); + m->set_allocation_state(ObjectMonitor::New); if (MonitorInUseLists) { m->FreeNext = Self->omInUseList; Self->omInUseList = m; @@ -1120,6 +1149,7 @@ } else { m->FreeNext = NULL; } + assert(!m->is_free(), "post-condition"); return m; } @@ -1137,9 +1167,12 @@ gMonitorFreeCount--; ObjectMonitor * take = gFreeList; gFreeList = take->FreeNext; - guarantee(take->object() == NULL, "invariant"); + take->set_object(NULL); + take->set_owner(NULL); + take->_count = 0; guarantee(!take->is_busy(), "invariant"); take->Recycle(); + assert(take->is_free(), "invariant"); omRelease(Self, take, false); } Thread::muxRelease(&gListLock); @@ -1194,6 +1227,7 @@ for (int i = 1; i < _BLOCKSIZE; i++) { temp[i].FreeNext = (ObjectMonitor *)&temp[i+1]; + assert(temp[i].is_free(), "invariant"); } // terminate the last monitor as the end of list @@ -1243,6 +1277,7 @@ bool fromPerThreadAlloc) { guarantee(m->object() == NULL, "invariant"); guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor"); + m->set_allocation_state(ObjectMonitor::Free); // Remove from omInUseList if (MonitorInUseLists && fromPerThreadAlloc) { ObjectMonitor* cur_mid_in_use = NULL; @@ -1357,8 +1392,11 @@ markOop mark = obj->mark(); if (mark->has_monitor()) { assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); - assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); - return mark->monitor(); + markOop dmw = mark->monitor()->header(); + assert(dmw->is_neutral() || dmw->hash() == 0 && dmw->is_marked(), "monitor must record a good object header"); + if (dmw->is_neutral()) { + return mark->monitor(); + } } return ObjectSynchronizer::inflate(Thread::current(), obj, @@ -1368,7 +1406,7 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self, oop object, const InflateCause cause) { - + Retry: // Inflate mutates the heap ... // Relaxing assertion for bug 6320749. assert(Universe::verify_in_progress() || @@ -1390,7 +1428,11 @@ // CASE: inflated if (mark->has_monitor()) { ObjectMonitor * inf = mark->monitor(); - assert(inf->header()->is_neutral(), "invariant"); + markOop dmw = inf->header(); + assert(dmw->is_neutral() || dmw->hash() == 0 && dmw->is_marked(), "invariant"); + if (dmw->is_marked()) { + goto Retry; + } assert(inf->object() == object, "invariant"); assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); event.cancel(); // let's not post an inflation event, unless we did the deed ourselves @@ -1597,6 +1639,114 @@ ClearResponsibleAtSTW = 0 }; +void ObjectSynchronizer::do_safepoint_work() { + if (MonitorInUseLists || !AsyncDeflateIdleMonitors) { + ObjectSynchronizer::deflate_idle_monitors(); + return; + } + assert(AsyncDeflateIdleMonitors, "oops"); + if (gFreeListNextSafepoint != NULL) { +#ifdef ASSERT + for (ObjectMonitor* monitor = gFreeListNextSafepoint; monitor != NULL; monitor = monitor->FreeNext) { + guarantee(monitor->owner() == NULL, "invariant"); + guarantee(monitor->waiters() == 0, "invariant"); + guarantee(monitor->recursions() == 0, "invariant"); + guarantee(monitor->object() != NULL, "invariant"); + guarantee(monitor->header() != 0, "invariant"); + guarantee(monitor->is_free(), "invariant"); + } + guarantee(gFreeListNextSafepointTail != NULL, "invariant"); +#endif // def ASSERT + + Thread::muxAcquire(&gListLock, "do_safepoint_work"); + gFreeListNextSafepointTail->FreeNext = gFreeList; + gFreeList = gFreeListNextSafepoint; + gMonitorFreeCount += gMonitorFreeCountNextSafepoint; + Thread::muxRelease(&gListLock); + + gFreeListNextSafepoint = NULL; + gFreeListNextSafepointTail = NULL; + gMonitorFreeCountNextSafepoint = 0; + } + set_should_deflate_idle_monitors_conc(); + MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); + Service_lock->notify_all(); +} + +void ObjectSynchronizer::append_to_freelist_for_after_safepoint(int nScavenged, ObjectMonitor* const head, ObjectMonitor* const tail) { +#ifdef ASSERT + int count = 0; + for(ObjectMonitor* m = head; m != NULL; m = m->FreeNext) { count++; } + guarantee(count == nScavenged, "invariant"); +#endif // def ASSERT + if (head != NULL) { + assert(tail->FreeNext == NULL, "invariant"); + tail->FreeNext = gFreeListNextSafepoint; + gFreeListNextSafepoint = head; + } + if (gFreeListNextSafepointTail == NULL) { + gFreeListNextSafepointTail = tail; + } + gMonitorFreeCountNextSafepoint += nScavenged; + OM_PERFDATA_OP(Deflations, inc(nScavenged)); +} + +void ObjectSynchronizer::deflate_idle_monitors_conc() { + assert(Thread::current()->is_Java_thread(), "precondition"); + _should_deflate_idle_monitors_conc = false; + if (MonitorInUseLists) { + return; // Don't want to run over the thread list for now. + } + + ObjectMonitor* freeHeadp = NULL; + ObjectMonitor* freeTailp = NULL; + int nScavenged = 0; + int nInuse = 0; + int nInCirculation = 0; + + PaddedEnd * block = + (PaddedEnd *)OrderAccess::load_ptr_acquire(&gBlockList); + for (; block != NULL; block = (PaddedEnd *)next(block)) { + // Iterate over all extant monitors - Scavenge all idle monitors. + assert(block->object() == CHAINMARKER, "must be a block header"); + if (SafepointSynchronize::is_synchronizing()) { + append_to_freelist_for_after_safepoint(nScavenged, freeHeadp, freeTailp); + nScavenged = 0; + freeHeadp = NULL; + freeTailp = NULL; + JavaThread* const jt = (JavaThread*) Thread::current(); + ThreadBlockInVM blocker(jt); + } + nInCirculation += _BLOCKSIZE; + for (int i = 1; i < _BLOCKSIZE; i++) { + ObjectMonitor* mid = (ObjectMonitor*)&block[i]; + if (!mid->is_old()) { + // Skip deflating newly allocated or free monitors. + if (mid->is_new()) { + // Mark mid as "old". + mid->set_allocation_state(ObjectMonitor::Old); + } + continue; + } + + oop obj = (oop)mid->object(); + assert(obj != NULL, "invariant"); + + if (mid->try_disable_monitor()) { + mid->FreeNext = NULL; + if (freeHeadp == NULL) { freeHeadp = mid; } + if (freeTailp != NULL) { freeTailp->FreeNext = mid; } + freeTailp = mid; + nScavenged++; + } else { + nInuse++; + } + } + } + append_to_freelist_for_after_safepoint(nScavenged, freeHeadp, freeTailp); + OM_PERFDATA_OP(MonExtant, set_value(nInCirculation)); +} + // Deflate a single monitor if not in-use // Return true if deflated, false if in-use bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, @@ -1631,6 +1781,7 @@ mid->clear(); assert(mid->object() == NULL, "invariant"); + assert(mid->is_free(), "invariant"); // Move the object to the working free list defined by freeHeadp, freeTailp if (*freeHeadp == NULL) *freeHeadp = mid; diff --git a/src/share/vm/runtime/synchronizer.hpp b/src/share/vm/runtime/synchronizer.hpp --- a/src/share/vm/runtime/synchronizer.hpp +++ b/src/share/vm/runtime/synchronizer.hpp @@ -145,7 +145,14 @@ int *error_cnt_ptr, int *warning_cnt_ptr); static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0; + static void deflate_idle_monitors_conc(); + static bool should_deflate_idle_monitors_conc() { return _should_deflate_idle_monitors_conc; } + static void set_should_deflate_idle_monitors_conc() { _should_deflate_idle_monitors_conc = true; } + static void do_safepoint_work(); + private: + static void append_to_freelist_for_after_safepoint(int nScavenged, ObjectMonitor* head, ObjectMonitor* tail); + enum { _BLOCKSIZE = 128 }; // global list of blocks of monitors // gBlockList is really PaddedEnd *, but we don't @@ -153,11 +160,14 @@ static ObjectMonitor * volatile gBlockList; // global monitor free list static ObjectMonitor * volatile gFreeList; + static ObjectMonitor * gFreeListNextSafepoint; + static ObjectMonitor * gFreeListNextSafepointTail; // global monitor in-use list, for moribund threads, // monitors they inflated need to be scanned for deflation static ObjectMonitor * volatile gOmInUseList; // count of entries in gOmInUseList static int gOmInUseCount; + static bool _should_deflate_idle_monitors_conc; // Process oops in all monitors static void global_oops_do(OopClosure* f); diff --git a/src/share/vm/services/threadService.cpp b/src/share/vm/services/threadService.cpp --- a/src/share/vm/services/threadService.cpp +++ b/src/share/vm/services/threadService.cpp @@ -480,7 +480,7 @@ _locked_monitors = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(length, true); for (int i = 0; i < length; i++) { MonitorInfo* monitor = list->at(i); - assert(monitor->owner(), "This monitor must have an owning object"); + assert(monitor->owner() != NULL, "This monitor must have an owning object"); _locked_monitors->append(monitor->owner()); } }