--- old/src/hotspot/share/oops/markOop.hpp 2019-05-25 10:46:19.750882925 -0400 +++ new/src/hotspot/share/oops/markOop.hpp 2019-05-25 10:46:18.874882879 -0400 @@ -209,6 +209,10 @@ bool is_unlocked() const { return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value); } + // ObjectMonitor::install_displaced_markword_in_object() uses + // is_marked() on ObjectMonitor::_header as part of the restoration + // protocol for an object's header. In this usage, the mark bits are + // only ever set (and cleared) on the ObjectMonitor::_header field. bool is_marked() const { return (mask_bits(value(), lock_mask_in_place) == marked_value); } --- old/src/hotspot/share/prims/jvm.cpp 2019-05-25 10:46:21.690883026 -0400 +++ new/src/hotspot/share/prims/jvm.cpp 2019-05-25 10:46:20.794882979 -0400 @@ -73,6 +73,7 @@ #include "runtime/os.inline.hpp" #include "runtime/perfData.hpp" #include "runtime/reflection.hpp" +#include "runtime/synchronizer.hpp" #include "runtime/thread.inline.hpp" #include "runtime/threadSMR.hpp" #include "runtime/vframe.inline.hpp" @@ -484,6 +485,11 @@ JVM_ENTRY_NO_ENV(void, JVM_GC(void)) JVMWrapper("JVM_GC"); if (!DisableExplicitGC) { + if (AsyncDeflateIdleMonitors) { + // AsyncDeflateIdleMonitors needs to know when System.gc() is + // called so any special deflation can be done at a safepoint. + ObjectSynchronizer::set_is_special_deflation_requested(true); + } Universe::heap()->collect(GCCause::_java_lang_system_gc); } JVM_END --- old/src/hotspot/share/runtime/basicLock.cpp 2019-05-25 10:46:23.266883108 -0400 +++ new/src/hotspot/share/runtime/basicLock.cpp 2019-05-25 10:46:22.606883074 -0400 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "runtime/basicLock.hpp" +#include "runtime/objectMonitor.hpp" #include "runtime/synchronizer.hpp" void BasicLock::print_on(outputStream* st) const { @@ -62,8 +63,11 @@ // is small (given the support for inflated fast-path locking in the fast_lock, etc) // we'll leave that optimization for another time. + // Disallow async deflation of the inflated monitor so the + // displaced header stays stable until we've copied it. + ObjectMonitorHandle omh; if (displaced_header()->is_neutral()) { - ObjectSynchronizer::inflate_helper(obj); + ObjectSynchronizer::inflate_helper(&omh, obj); // WARNING: We can not put check here, because the inflation // will not update the displaced header. Once BasicLock is inflated, // no one should ever look at its content. --- old/src/hotspot/share/runtime/globals.hpp 2019-05-25 10:46:24.714883184 -0400 +++ new/src/hotspot/share/runtime/globals.hpp 2019-05-25 10:46:24.102883152 -0400 @@ -726,11 +726,21 @@ product(intx, MonitorBound, 0, "Bound Monitor population") \ range(0, max_jint) \ \ + diagnostic(bool, AsyncDeflateIdleMonitors, true, \ + "Deflate idle monitors using JavaThreads and the ServiceThread.") \ + \ + /* notice: the max range value here is max_jint, not max_intx */ \ + /* because of overflow issue */ \ + diagnostic(intx, AsyncDeflationInterval, 250, \ + "Async deflate idle monitors every so many milliseconds when " \ + "MonitorUsedDeflationThreshold is exceeded (0 is off).") \ + range(0, max_jint) \ + \ experimental(intx, MonitorUsedDeflationThreshold, 90, \ - "Percentage of used monitors before triggering cleanup " \ - "safepoint which deflates monitors (0 is off). " \ - "The check is performed on GuaranteedSafepointInterval.") \ - range(0, 100) \ + "Percentage of used monitors before triggering deflation (0 is " \ + "off). The check is performed on GuaranteedSafepointInterval " \ + "or AsyncDeflateInterval.") \ + range(0, 100) \ \ experimental(intx, hashCode, 5, \ "(Unstable) select hashCode generation algorithm") \ --- old/src/hotspot/share/runtime/objectMonitor.cpp 2019-05-25 10:46:26.758883291 -0400 +++ new/src/hotspot/share/runtime/objectMonitor.cpp 2019-05-25 10:46:25.718883236 -0400 @@ -239,6 +239,8 @@ // Enter support void ObjectMonitor::enter(TRAPS) { + ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count); + // The following code is ordered to check the most common cases first // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. Thread * const Self = THREAD; @@ -293,12 +295,14 @@ JavaThread * jt = (JavaThread *) Self; assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); assert(jt->thread_state() != _thread_blocked, "invariant"); - assert(this->object() != NULL, "invariant"); - assert(_contentions >= 0, "invariant"); + assert(AsyncDeflateIdleMonitors || this->object() != NULL, "invariant"); + assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions); - // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy(). - // Ensure the object-monitor relationship remains stable while there's contention. - Atomic::inc(&_contentions); + // Prevent deflation. See ObjectSynchronizer::deflate_monitor(), + // ObjectSynchronizer::deflate_monitor_using_JT() and is_busy(). + // Ensure the object <-> monitor relationship remains stable while + // there's contention. + Atomic::add(1, &_contentions); JFR_ONLY(JfrConditionalFlushWithStacktrace flush(jt);) EventJavaMonitorEnter event; @@ -360,7 +364,7 @@ } Atomic::dec(&_contentions); - assert(_contentions >= 0, "invariant"); + assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions); Self->_Stalled = 0; // Must either set _recursions = 0 or ASSERT _recursions == 0. @@ -417,9 +421,90 @@ return -1; } +// Install the displaced mark word (dmw) of a deflating ObjectMonitor +// into the header of the object associated with the monitor. This +// idempotent method is called by a thread that is deflating a +// monitor and by other threads that have detected a race with the +// deflation process. +void ObjectMonitor::install_displaced_markword_in_object(const oop obj) { + // This function must only be called when (owner == DEFLATER_MARKER + // && ref_count <= 0), but we can't guarantee that here because + // those values could change when the ObjectMonitor gets moved from + // the global free list to a per-thread free list. + + guarantee(obj != NULL, "must be non-NULL"); + if (object() != obj) { + // ObjectMonitor's object ref no longer refers to the target object + // so the object's header has already been restored. + return; + } + + markOop dmw = header(); + if (dmw == NULL) { + // ObjectMonitor's header/dmw has been cleared by the deflating + // thread so the object's header has already been restored. + return; + } + + // A non-NULL dmw has to be either neutral (not locked and not marked) + // or is already participating in this restoration protocol. + assert(dmw->is_neutral() || (dmw->is_marked() && dmw->hash() == 0), + "failed precondition: dmw=" INTPTR_FORMAT, p2i(dmw)); + + markOop marked_dmw = NULL; + if (!dmw->is_marked() && dmw->hash() == 0) { + // This dmw has not yet started the restoration protocol so we + // mark a copy of the dmw to begin the protocol. + // Note: A dmw with a hashcode does not take this code path. + marked_dmw = dmw->set_marked(); + + // All of the callers to this function can be racing with each + // other trying to update the _header field. + dmw = (markOop) Atomic::cmpxchg(marked_dmw, &_header, dmw); + if (dmw == NULL) { + // ObjectMonitor's header/dmw has been cleared by the deflating + // thread so the object's header has already been restored. + return; + } + // The _header field is now marked. The winner's 'dmw' variable + // contains the original, unmarked header/dmw value and any + // losers have a marked header/dmw value that will be cleaned + // up below. + } + + if (dmw->is_marked()) { + // Clear the mark from the header/dmw copy in preparation for + // possible restoration from this thread. + assert(dmw->hash() == 0, "hashcode must be 0: dmw=" INTPTR_FORMAT, + p2i(dmw)); + dmw = dmw->set_unmarked(); + } + assert(dmw->is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, p2i(dmw)); + + // Install displaced mark word if the object's header still points + // to this ObjectMonitor. All racing callers to this function will + // reach this point, but only one can win. + obj->cas_set_mark(dmw, markOopDesc::encode(this)); + + // Note: It does not matter which thread restored the header/dmw + // into the object's header. The thread deflating the monitor just + // wanted the object's header restored and it is. The threads that + // detected a race with the deflation process also wanted the + // object's header restored before they retry their operation and + // because it is restored they will only retry once. + + if (marked_dmw != NULL) { + // Clear _header to NULL if it is still marked_dmw so a racing + // install_displaced_markword_in_object() can bail out sooner. + Atomic::cmpxchg((markOop)NULL, &_header, marked_dmw); + } +} + #define MAX_RECHECK_INTERVAL 1000 void ObjectMonitor::EnterI(TRAPS) { + ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count); + Thread * const Self = THREAD; assert(Self->is_Java_thread(), "invariant"); assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant"); @@ -432,6 +517,17 @@ return; } + if (_owner == DEFLATER_MARKER) { + // The deflation protocol finished the first part (setting owner), but + // it failed the second part (making ref_count negative) and bailed. + if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) { + // Acquired the monitor. + assert(_succ != Self, "invariant"); + assert(_Responsible != Self, "invariant"); + return; + } + } + assert(InitDone, "Unexpectedly not initialized"); // We try one round of spinning *before* enqueueing Self. @@ -548,6 +644,15 @@ if (TryLock(Self) > 0) break; + if (_owner == DEFLATER_MARKER) { + // The deflation protocol finished the first part (setting owner), but + // it failed the second part (making ref_count negative) and bailed. + if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) { + // Acquired the monitor. + break; + } + } + // The lock is still contested. // Keep a tally of the # of futile wakeups. // Note that the counter is not protected by a lock or updated by atomics. @@ -652,6 +757,8 @@ // In the future we should reconcile EnterI() and ReenterI(). void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) { + ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count); + assert(Self != NULL, "invariant"); assert(SelfNode != NULL, "invariant"); assert(SelfNode->_thread == Self, "invariant"); @@ -669,6 +776,15 @@ if (TryLock(Self) > 0) break; if (TrySpin(Self) > 0) break; + if (_owner == DEFLATER_MARKER) { + // The deflation protocol finished the first part (setting owner), but + // it failed the second part (making ref_count negative) and bailed. + if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) { + // Acquired the monitor. + break; + } + } + // State transition wrappers around park() ... // ReenterI() wisely defers state transitions until // it's clear we must park the thread. @@ -876,7 +992,8 @@ // way we should encounter this situation is in the presence of // unbalanced JNI locking. TODO: CheckJNICalls. // See also: CR4414101 - assert(false, "Non-balanced monitor enter/exit! Likely JNI locking"); + assert(false, "Non-balanced monitor enter/exit! Likely JNI locking: " + "owner=" INTPTR_FORMAT, p2i(_owner)); return; } } @@ -1132,10 +1249,10 @@ JavaThread *jt = (JavaThread *)THREAD; guarantee(_owner != Self, "reenter already owner"); - enter(THREAD); // enter the monitor + enter(THREAD); + // Entered the monitor. guarantee(_recursions == 0, "reenter recursion"); _recursions = recursions; - return; } @@ -1927,6 +2044,80 @@ DEBUG_ONLY(InitDone = true;) } +// For internal use by ObjectSynchronizer::monitors_iterate(). +ObjectMonitorHandle::ObjectMonitorHandle(ObjectMonitor * om_ptr) { + om_ptr->inc_ref_count(); + _om_ptr = om_ptr; +} + +ObjectMonitorHandle::~ObjectMonitorHandle() { + if (_om_ptr != NULL) { + _om_ptr->dec_ref_count(); + _om_ptr = NULL; + } +} + +// Save the ObjectMonitor* associated with the specified markOop and +// increment the ref_count. This function should only be called if +// the caller has verified mark->has_monitor() == true. The object +// parameter is needed to verify that ObjectMonitor* has not been +// deflated and reused for another object. +// +// This function returns true if the ObjectMonitor* has been safely +// saved. This function returns false if we have lost a race with +// async deflation; the caller should retry as appropriate. +// +bool ObjectMonitorHandle::save_om_ptr(oop object, markOop mark) { + guarantee(mark->has_monitor(), "sanity check: mark=" INTPTR_FORMAT, + p2i(mark)); + + ObjectMonitor * om_ptr = mark->monitor(); + om_ptr->inc_ref_count(); + + if (AsyncDeflateIdleMonitors) { + // Race here if monitor is not owned! The above ref_count bump + // will cause subsequent async deflation to skip it. However, + // previous or concurrent async deflation is a race. + if (om_ptr->_owner == DEFLATER_MARKER && om_ptr->ref_count() <= 0) { + // Async deflation is in progress and our ref_count increment + // above lost the race to async deflation. Attempt to restore + // the header/dmw to the object's header so that we only retry + // once if the deflater thread happens to be slow. + om_ptr->install_displaced_markword_in_object(object); + om_ptr->dec_ref_count(); + return false; + } + // The ObjectMonitor could have been deflated and reused for + // another object before we bumped the ref_count so make sure + // our object still refers to this ObjectMonitor. + const markOop tmp = object->mark(); + if (!tmp->has_monitor() || tmp->monitor() != om_ptr) { + // Async deflation and reuse won the race so we have to retry. + // Skip object header restoration since that's already done. + om_ptr->dec_ref_count(); + return false; + } + } + + ADIM_guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT, + p2i(_om_ptr)); + _om_ptr = om_ptr; + return true; +} + +// For internal use by ObjectSynchronizer::inflate(). +void ObjectMonitorHandle::set_om_ptr(ObjectMonitor * om_ptr) { + if (_om_ptr == NULL) { + ADIM_guarantee(om_ptr != NULL, "cannot clear an unset om_ptr"); + om_ptr->inc_ref_count(); + _om_ptr = om_ptr; + } else { + ADIM_guarantee(om_ptr == NULL, "can only clear a set om_ptr"); + _om_ptr->dec_ref_count(); + _om_ptr = NULL; + } +} + void ObjectMonitor::print_on(outputStream* st) const { // The minimal things to print for markOop printing, more can be added for debugging and logging. st->print("{contentions=0x%08x,waiters=0x%08x" --- old/src/hotspot/share/runtime/objectMonitor.hpp 2019-05-25 10:46:28.558883385 -0400 +++ new/src/hotspot/share/runtime/objectMonitor.hpp 2019-05-25 10:46:27.766883343 -0400 @@ -136,6 +136,7 @@ }; private: + friend class ObjectMonitorHandle; friend class ObjectSynchronizer; friend class ObjectWaiter; friend class VMStructs; @@ -150,6 +151,8 @@ sizeof(volatile markOop) + sizeof(void * volatile) + sizeof(ObjectMonitor *)); protected: // protected for JvmtiRawMonitor + // Used by async deflation as a marker in the _owner field: + #define DEFLATER_MARKER reinterpret_cast(-1) void * volatile _owner; // pointer to owning thread OR BasicLock volatile jlong _previous_owner_tid; // thread id of the previous owner of the monitor volatile intptr_t _recursions; // recursion count, 0 for first entry @@ -166,12 +169,21 @@ volatile jint _contentions; // Number of active contentions in enter(). It is used by is_busy() // along with other fields to determine if an ObjectMonitor can be - // deflated. See ObjectSynchronizer::deflate_monitor(). + // deflated. See ObjectSynchronizer::deflate_monitor() and + // ObjectSynchronizer::deflate_monitor_using_JT(). protected: ObjectWaiter * volatile _WaitSet; // LL of threads wait()ing on the monitor volatile jint _waiters; // number of waiting threads private: volatile int _WaitSetLock; // protects Wait Queue - simple spinlock + volatile jint _ref_count; // ref count for ObjectMonitor* and used by the async deflation + // protocol. See ObjectSynchronizer::deflate_monitor_using_JT(). + typedef enum { + Free = 0, // Free must be 0 for monitor to be free after memset(..,0,..). + New, + Old + } AllocationState; + AllocationState _allocation_state; public: static void Initialize(); @@ -233,12 +245,30 @@ intptr_t is_busy() const { // TODO-FIXME: assert _owner == null implies _recursions = 0 + // We do not include _ref_count in the is_busy() check because + // _ref_count is for indicating that the ObjectMonitor* is in + // use which is orthogonal to whether the ObjectMonitor itself + // is in use for a locking operation. return _contentions|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList); } + // Version of is_busy() that accounts for the special value in + // _owner when AsyncDeflateIdleMonitors is enabled. + intptr_t is_busy_async() const { + intptr_t ret_code = _contentions | _waiters | intptr_t(_cxq) | intptr_t(_EntryList); + if (!AsyncDeflateIdleMonitors) { + ret_code |= intptr_t(_owner); + } else { + if (_owner != DEFLATER_MARKER) { + ret_code |= intptr_t(_owner); + } + } + return ret_code; + } + intptr_t is_entered(Thread* current) const; - void* owner() const; + void* owner() const; // Returns NULL if DEFLATER_MARKER is observed. void set_owner(void* owner); jint waiters() const; @@ -281,10 +311,20 @@ void* object() const; void* object_addr(); void set_object(void* obj); + void set_allocation_state(AllocationState s); + AllocationState allocation_state() const; + bool is_free() const; + bool is_active() const; + bool is_old() const; + bool is_new() const; + void dec_ref_count(); + void inc_ref_count(); + jint ref_count() const; bool check(TRAPS); // true if the thread owns the monitor. void check_slow(TRAPS); void clear(); + void clear_using_JT(); void enter(TRAPS); void exit(bool not_suspended, TRAPS); @@ -312,6 +352,38 @@ int TrySpin(Thread * Self); void ExitEpilog(Thread * Self, ObjectWaiter * Wakee); bool ExitSuspendEquivalent(JavaThread * Self); + void install_displaced_markword_in_object(const oop obj); +}; + +// A helper object for managing an ObjectMonitor*'s ref_count. There +// are special safety considerations when async deflation is used. +class ObjectMonitorHandle : public StackObj { + private: + ObjectMonitor * _om_ptr; + public: + ObjectMonitorHandle() { _om_ptr = NULL; } + ~ObjectMonitorHandle(); + + ObjectMonitor * om_ptr() const { return _om_ptr; } + // Save the ObjectMonitor* associated with the specified markOop and + // increment the ref_count. + bool save_om_ptr(oop object, markOop mark); + + // For internal used by ObjectSynchronizer::monitors_iterate(). + ObjectMonitorHandle(ObjectMonitor * _om_ptr); + // For internal use by ObjectSynchronizer::inflate(). + void set_om_ptr(ObjectMonitor * om_ptr); }; +// Macro to use guarantee() for more strict AsyncDeflateIdleMonitors +// checks and assert() otherwise. +#define ADIM_guarantee(p, ...) \ + do { \ + if (AsyncDeflateIdleMonitors) { \ + guarantee(p, __VA_ARGS__); \ + } else { \ + assert(p, __VA_ARGS__); \ + } \ + } while (0) + #endif // SHARE_RUNTIME_OBJECTMONITOR_HPP --- old/src/hotspot/share/runtime/objectMonitor.inline.hpp 2019-05-25 10:46:30.398883481 -0400 +++ new/src/hotspot/share/runtime/objectMonitor.inline.hpp 2019-05-25 10:46:29.598883439 -0400 @@ -49,19 +49,46 @@ return _waiters; } +// Returns NULL if DEFLATER_MARKER is observed. inline void* ObjectMonitor::owner() const { - return _owner; + void* owner = _owner; + return owner != DEFLATER_MARKER ? owner : NULL; } inline void ObjectMonitor::clear() { assert(_header != NULL, "must be non-NULL"); + assert(_owner == NULL, "must be NULL: owner=" INTPTR_FORMAT, p2i(_owner)); + assert(_ref_count == 0, "must be 0: ref_count=%d", _ref_count); + + _header = NULL; + + clear_using_JT(); +} + +inline void ObjectMonitor::clear_using_JT() { + // Unlike other *_using_JT() functions, we cannot assert + // AsyncDeflateIdleMonitors or Thread::current()->is_Java_thread() + // because clear() calls this function for the rest of its checks. + + if (AsyncDeflateIdleMonitors) { + // Async deflation protocol uses the header, owner and ref_count + // fields. While the ObjectMonitor being deflated is on the global free + // list, we leave those three fields alone; owner == DEFLATER_MARKER + // and ref_count < 0 will force any racing threads to retry. The + // header field is used by install_displaced_markword_in_object() + // in the last part of the deflation protocol so we cannot check + // its value here. + guarantee(_owner == NULL || _owner == DEFLATER_MARKER, + "must be NULL or DEFLATER_MARKER: owner=" INTPTR_FORMAT, + p2i(_owner)); + guarantee(_ref_count <= 0, "must be <= 0: ref_count=%d", _ref_count); + } assert(_contentions == 0, "must be 0: contentions=%d", _contentions); assert(_waiters == 0, "must be 0: waiters=%d", _waiters); assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT, _recursions); assert(_object != NULL, "must be non-NULL"); - assert(_owner == NULL, "must be NULL: owner=" INTPTR_FORMAT, p2i(_owner)); - _header = NULL; + set_allocation_state(Free); _object = NULL; } @@ -103,4 +130,51 @@ _recursions = 0; } +inline void ObjectMonitor::set_allocation_state(ObjectMonitor::AllocationState s) { + _allocation_state = s; +} + +inline ObjectMonitor::AllocationState ObjectMonitor::allocation_state() const { + return _allocation_state; +} + +inline bool ObjectMonitor::is_free() const { + return _allocation_state == Free; +} + +inline bool ObjectMonitor::is_active() const { + return !is_free(); +} + +inline bool ObjectMonitor::is_old() const { + return _allocation_state == Old; +} + +inline bool ObjectMonitor::is_new() const { + return _allocation_state == New; +} + +inline void ObjectMonitor::dec_ref_count() { + // The decrement only needs to be MO_ACQ_REL since the reference + // counter is volatile. + Atomic::dec(&_ref_count); + // Can be negative as part of async deflation protocol. + guarantee(AsyncDeflateIdleMonitors || _ref_count >= 0, + "sanity check: ref_count=%d", _ref_count); +} + +inline void ObjectMonitor::inc_ref_count() { + // The increment needs to be MO_SEQ_CST so that the reference + // counter update is seen as soon as possible in a race with the + // async deflation protocol. + Atomic::inc(&_ref_count); + // Can be negative as part of async deflation protocol. + guarantee(AsyncDeflateIdleMonitors || _ref_count > 0, + "sanity check: ref_count=%d", _ref_count); +} + +inline jint ObjectMonitor::ref_count() const { + return OrderAccess::load_acquire(&_ref_count); +} + #endif // SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP --- old/src/hotspot/share/runtime/safepoint.cpp 2019-05-25 10:46:32.122883571 -0400 +++ new/src/hotspot/share/runtime/safepoint.cpp 2019-05-25 10:46:31.390883532 -0400 @@ -510,8 +510,9 @@ } bool SafepointSynchronize::is_cleanup_needed() { - // Need a safepoint if there are many monitors to deflate. - if (ObjectSynchronizer::is_cleanup_needed()) return true; + // Need a cleanup safepoint if there are too many monitors in use + // and the monitor deflation needs to be done at a safepoint. + if (ObjectSynchronizer::is_safepoint_deflation_needed()) return true; // Need a safepoint if some inline cache buffers is non-empty if (!InlineCacheBuffer::is_empty()) return true; if (StringTable::needs_rehashing()) return true; @@ -530,6 +531,10 @@ _counters(counters) {} void do_thread(Thread* thread) { + // deflate_thread_local_monitors() handles or requests deflation of + // this thread's idle monitors. If !AsyncDeflateIdleMonitors or if + // there is a special cleanup request, deflation is handled now. + // Otherwise, async deflation is requested via a flag. ObjectSynchronizer::deflate_thread_local_monitors(thread, _counters); if (_nmethod_cl != NULL && thread->is_Java_thread() && ! thread->is_Code_cache_sweeper_thread()) { @@ -562,7 +567,11 @@ const char* name = "deflating global idle monitors"; EventSafepointCleanupTask event; TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup)); - ObjectSynchronizer::deflate_idle_monitors(_counters); + // AsyncDeflateIdleMonitors only uses DeflateMonitorCounters + // when a special cleanup has been requested. + // Note: This logging output will include global idle monitor + // elapsed times, but not global idle monitor deflation count. + ObjectSynchronizer::do_safepoint_work(_counters); post_safepoint_cleanup_task_event(event, safepoint_id, name); } --- old/src/hotspot/share/runtime/serviceThread.cpp 2019-05-25 10:46:33.902883664 -0400 +++ new/src/hotspot/share/runtime/serviceThread.cpp 2019-05-25 10:46:33.186883626 -0400 @@ -127,6 +127,7 @@ bool protection_domain_table_work = false; bool oopstorage_work = false; bool oopstorages_cleanup[oopstorage_count] = {}; // Zero (false) initialize. + bool deflate_idle_monitors = false; JvmtiDeferredEvent jvmti_event; { // Need state transition ThreadBlockInVM so that this thread @@ -154,11 +155,14 @@ (protection_domain_table_work = SystemDictionary::pd_cache_table()->has_work()) | (oopstorage_work = needs_oopstorage_cleanup(oopstorages, oopstorages_cleanup, - oopstorage_count))) - - == 0) { + oopstorage_count)) | + (deflate_idle_monitors = ObjectSynchronizer::is_async_deflation_needed()) + ) == 0) { // Wait until notified that there is some work to do. - ml.wait(); + // If AsyncDeflateIdleMonitors, then we wait for + // GuaranteedSafepointInterval so that is_async_deflation_needed() + // is checked at the same interval. + ml.wait(AsyncDeflateIdleMonitors ? GuaranteedSafepointInterval : 0); } if (has_jvmti_events) { @@ -201,6 +205,26 @@ if (oopstorage_work) { cleanup_oopstorages(oopstorages, oopstorages_cleanup, oopstorage_count); } + + if (deflate_idle_monitors) { + // Deflate any global idle monitors. + ObjectSynchronizer::deflate_global_idle_monitors_using_JT(); + + // deflate_per_thread_idle_monitors_using_JT() is called by + // each JavaThread from ObjectSynchronizer::omAlloc() as needed. + int count = 0; + for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { + if (jt->omInUseCount > 0) { + // This JavaThread is using monitors so mark it. + jt->omShouldDeflateIdleMonitors = true; + count++; + } + } + if (count > 0) { + log_debug(monitorinflation)("requesting async deflation of idle monitors for %d thread(s).", count); + } + ObjectSynchronizer::set_is_async_deflation_requested(false); // async deflation has been requested + } } } --- old/src/hotspot/share/runtime/sharedRuntime.cpp 2019-05-25 10:46:35.882883767 -0400 +++ new/src/hotspot/share/runtime/sharedRuntime.cpp 2019-05-25 10:46:34.930883717 -0400 @@ -64,8 +64,10 @@ #include "runtime/interfaceSupport.inline.hpp" #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" +#include "runtime/objectMonitor.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" +#include "runtime/synchronizer.hpp" #include "runtime/vframe.inline.hpp" #include "runtime/vframeArray.hpp" #include "utilities/copy.hpp" @@ -3104,9 +3106,13 @@ kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) { if (kptr2->obj() != NULL) { // Avoid 'holes' in the monitor array BasicLock *lock = kptr2->lock(); + // Disallow async deflation of the inflated monitor so the + // displaced header stays stable until we've copied it. + ObjectMonitorHandle omh; // Inflate so the displaced header becomes position-independent - if (lock->displaced_header()->is_unlocked()) - ObjectSynchronizer::inflate_helper(kptr2->obj()); + if (lock->displaced_header()->is_unlocked()) { + ObjectSynchronizer::inflate_helper(&omh, kptr2->obj()); + } // Now the displaced header is free to move buf[i++] = (intptr_t)lock->displaced_header(); buf[i++] = cast_from_oop(kptr2->obj()); --- old/src/hotspot/share/runtime/synchronizer.cpp 2019-05-25 10:46:37.354883844 -0400 +++ new/src/hotspot/share/runtime/synchronizer.cpp 2019-05-25 10:46:36.830883816 -0400 @@ -125,6 +125,9 @@ ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL; // count of entries in gOmInUseList int ObjectSynchronizer::gOmInUseCount = 0; +bool volatile ObjectSynchronizer::_is_async_deflation_requested = false; +bool volatile ObjectSynchronizer::_is_special_deflation_requested = false; +jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0; static volatile intptr_t gListLock = 0; // protects global monitor lists static volatile int gMonitorFreeCount = 0; // # on gFreeList @@ -211,40 +214,50 @@ assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant"); NoSafepointVerifier nsv; if (obj == NULL) return false; // Need to throw NPE - const markOop mark = obj->mark(); - if (mark->has_monitor()) { - ObjectMonitor * const m = mark->monitor(); - assert(oopDesc::equals((oop) m->object(), obj), "invariant"); - Thread * const owner = (Thread *) m->_owner; - - // Lock contention and Transactional Lock Elision (TLE) diagnostics - // and observability - // Case: light contention possibly amenable to TLE - // Case: TLE inimical operations such as nested/recursive synchronization + while (true) { + const markOop mark = obj->mark(); - if (owner == Self) { - m->_recursions++; - return true; - } + if (mark->has_monitor()) { + ObjectMonitorHandle omh; + if (!omh.save_om_ptr(obj, mark)) { + // Lost a race with async deflation so try again. + assert(AsyncDeflateIdleMonitors, "sanity check"); + continue; + } + ObjectMonitor * const m = omh.om_ptr(); + assert(oopDesc::equals((oop) m->object(), obj), "invariant"); + Thread * const owner = (Thread *) m->_owner; + + // Lock contention and Transactional Lock Elision (TLE) diagnostics + // and observability + // Case: light contention possibly amenable to TLE + // Case: TLE inimical operations such as nested/recursive synchronization + + if (owner == Self) { + m->_recursions++; + return true; + } - // This Java Monitor is inflated so obj's header will never be - // displaced to this thread's BasicLock. Make the displaced header - // non-NULL so this BasicLock is not seen as recursive nor as - // being locked. We do this unconditionally so that this thread's - // BasicLock cannot be mis-interpreted by any stack walkers. For - // performance reasons, stack walkers generally first check for - // Biased Locking in the object's header, the second check is for - // stack-locking in the object's header, the third check is for - // recursive stack-locking in the displaced header in the BasicLock, - // and last are the inflated Java Monitor (ObjectMonitor) checks. - lock->set_displaced_header(markOopDesc::unused_mark()); - - if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) { - assert(m->_recursions == 0, "invariant"); - assert(m->_owner == Self, "invariant"); - return true; + // This Java Monitor is inflated so obj's header will never be + // displaced to this thread's BasicLock. Make the displaced header + // non-NULL so this BasicLock is not seen as recursive nor as + // being locked. We do this unconditionally so that this thread's + // BasicLock cannot be mis-interpreted by any stack walkers. For + // performance reasons, stack walkers generally first check for + // Biased Locking in the object's header, the second check is for + // stack-locking in the object's header, the third check is for + // recursive stack-locking in the displaced header in the BasicLock, + // and last are the inflated Java Monitor (ObjectMonitor) checks. + lock->set_displaced_header(markOopDesc::unused_mark()); + + if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) { + assert(m->_recursions == 0, "invariant"); + assert(m->_owner == Self, "invariant"); + return true; + } } + break; } // Note that we could inflate in quick_enter. @@ -328,7 +341,9 @@ } // We have to take the slow-path of possible inflation and then exit. - inflate(THREAD, object, inflate_cause_vm_internal)->exit(true, THREAD); + ObjectMonitorHandle omh; + inflate(&omh, THREAD, object, inflate_cause_vm_internal); + omh.om_ptr()->exit(true, THREAD); } // ----------------------------------------------------------------------------- @@ -361,7 +376,9 @@ // must be non-zero to avoid looking like a re-entrant lock, // and must not look locked either. lock->set_displaced_header(markOopDesc::unused_mark()); - inflate(THREAD, obj(), inflate_cause_monitor_enter)->enter(THREAD); + ObjectMonitorHandle omh; + inflate(&omh, THREAD, obj(), inflate_cause_monitor_enter); + omh.om_ptr()->enter(THREAD); } // This routine is used to handle interpreter/compiler slow case @@ -390,9 +407,10 @@ assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); } - ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal); - - return monitor->complete_exit(THREAD); + ObjectMonitorHandle omh; + inflate(&omh, THREAD, obj(), inflate_cause_vm_internal); + intptr_t ret_code = omh.om_ptr()->complete_exit(THREAD); + return ret_code; } // NOTE: must use heavy weight monitor to handle complete_exit/reenter() @@ -402,9 +420,9 @@ assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); } - ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal); - - monitor->reenter(recursion, THREAD); + ObjectMonitorHandle omh; + inflate(&omh, THREAD, obj(), inflate_cause_vm_internal); + omh.om_ptr()->reenter(recursion, THREAD); } // ----------------------------------------------------------------------------- // JNI locks on java objects @@ -416,7 +434,9 @@ assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); } THREAD->set_current_pending_monitor_is_from_java(false); - inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD); + ObjectMonitorHandle omh; + inflate(&omh, THREAD, obj(), inflate_cause_jni_enter); + omh.om_ptr()->enter(THREAD); THREAD->set_current_pending_monitor_is_from_java(true); } @@ -429,7 +449,9 @@ } assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); - ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit); + ObjectMonitorHandle omh; + inflate(&omh, THREAD, obj, inflate_cause_jni_exit); + ObjectMonitor * monitor = omh.om_ptr(); // If this thread has locked the object, exit the monitor. Note: can't use // monitor->check(CHECK); must exit even if an exception is pending. if (monitor->check(THREAD)) { @@ -469,7 +491,9 @@ if (millis < 0) { THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); } - ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait); + ObjectMonitorHandle omh; + inflate(&omh, THREAD, obj(), inflate_cause_wait); + ObjectMonitor * monitor = omh.om_ptr(); DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); monitor->wait(millis, true, THREAD); @@ -478,7 +502,8 @@ // that's fixed we can uncomment the following line, remove the call // and change this function back into a "void" func. // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); - return dtrace_waited_probe(monitor, obj, THREAD); + int ret_code = dtrace_waited_probe(monitor, obj, THREAD); + return ret_code; } void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) { @@ -489,7 +514,9 @@ if (millis < 0) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); } - inflate(THREAD, obj(), inflate_cause_wait)->wait(millis, false, THREAD); + ObjectMonitorHandle omh; + inflate(&omh, THREAD, obj(), inflate_cause_wait); + omh.om_ptr()->wait(millis, false, THREAD); } void ObjectSynchronizer::notify(Handle obj, TRAPS) { @@ -502,7 +529,9 @@ if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { return; } - inflate(THREAD, obj(), inflate_cause_notify)->notify(THREAD); + ObjectMonitorHandle omh; + inflate(&omh, THREAD, obj(), inflate_cause_notify); + omh.om_ptr()->notify(THREAD); } // NOTE: see comment of notify() @@ -516,7 +545,9 @@ if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { return; } - inflate(THREAD, obj(), inflate_cause_notify)->notifyAll(THREAD); + ObjectMonitorHandle omh; + inflate(&omh, THREAD, obj(), inflate_cause_notify); + omh.om_ptr()->notifyAll(THREAD); } // ----------------------------------------------------------------------------- @@ -710,77 +741,92 @@ assert(Universe::verify_in_progress() || DumpSharedSpaces || ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); - ObjectMonitor* monitor = NULL; - markOop temp, test; - intptr_t hash; - markOop mark = ReadStableMark(obj); + while (true) { + ObjectMonitor* monitor = NULL; + markOop temp, test; + intptr_t hash; + markOop mark = ReadStableMark(obj); - // object should remain ineligible for biased locking - assert(!mark->has_bias_pattern(), "invariant"); + // object should remain ineligible for biased locking + assert(!mark->has_bias_pattern(), "invariant"); - if (mark->is_neutral()) { - hash = mark->hash(); // this is a normal header - if (hash != 0) { // if it has hash, just return it - return hash; - } - hash = get_next_hash(Self, obj); // allocate a new hash code - temp = mark->copy_set_hash(hash); // merge the hash code into header - // use (machine word version) atomic operation to install the hash - test = obj->cas_set_mark(temp, mark); - if (test == mark) { - return hash; - } - // If atomic operation failed, we must inflate the header - // into heavy weight monitor. We could add more code here - // for fast path, but it does not worth the complexity. - } else if (mark->has_monitor()) { - monitor = mark->monitor(); - temp = monitor->header(); - assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp)); - hash = temp->hash(); - if (hash != 0) { - return hash; - } - // Skip to the following code to reduce code size - } else if (Self->is_lock_owned((address)mark->locker())) { - temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned - assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp)); - hash = temp->hash(); // by current thread, check if the displaced - if (hash != 0) { // header contains hash code - return hash; - } - // WARNING: - // The displaced header in the BasicLock on a thread's stack - // is strictly immutable. It CANNOT be changed in ANY cases. - // So we have to inflate the stack lock into an ObjectMonitor - // even if the current thread owns the lock. The BasicLock on - // a thread's stack can be asynchronously read by other threads - // during an inflate() call so any change to that stack memory - // may not propagate to other threads correctly. - } - - // Inflate the monitor to set hash code - monitor = inflate(Self, obj, inflate_cause_hash_code); - // Load displaced header and check it has hash code - mark = monitor->header(); - assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark)); - hash = mark->hash(); - if (hash == 0) { - hash = get_next_hash(Self, obj); - temp = mark->copy_set_hash(hash); // merge hash code into header - assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp)); - test = Atomic::cmpxchg(temp, monitor->header_addr(), mark); - if (test != mark) { - // The only update to the ObjectMonitor's header/dmw field - // is to merge in the hash code. If someone adds a new usage - // of the header/dmw field, please update this code. - hash = test->hash(); - assert(test->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(test)); - assert(hash != 0, "Trivial unexpected object/monitor header usage."); + if (mark->is_neutral()) { + hash = mark->hash(); // this is a normal header + if (hash != 0) { // if it has hash, just return it + return hash; + } + hash = get_next_hash(Self, obj); // allocate a new hash code + temp = mark->copy_set_hash(hash); // merge the hash code into header + // use (machine word version) atomic operation to install the hash + test = obj->cas_set_mark(temp, mark); + if (test == mark) { + return hash; + } + // If atomic operation failed, we must inflate the header + // into heavy weight monitor. We could add more code here + // for fast path, but it does not worth the complexity. + } else if (mark->has_monitor()) { + ObjectMonitorHandle omh; + if (!omh.save_om_ptr(obj, mark)) { + // Lost a race with async deflation so try again. + assert(AsyncDeflateIdleMonitors, "sanity check"); + continue; + } + monitor = omh.om_ptr(); + temp = monitor->header(); + assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp)); + hash = temp->hash(); + if (hash != 0) { + return hash; + } + // Skip to the following code to reduce code size + } else if (Self->is_lock_owned((address)mark->locker())) { + temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned + assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp)); + hash = temp->hash(); // by current thread, check if the displaced + if (hash != 0) { // header contains hash code + return hash; + } + // WARNING: + // The displaced header in the BasicLock on a thread's stack + // is strictly immutable. It CANNOT be changed in ANY cases. + // So we have to inflate the stack lock into an ObjectMonitor + // even if the current thread owns the lock. The BasicLock on + // a thread's stack can be asynchronously read by other threads + // during an inflate() call so any change to that stack memory + // may not propagate to other threads correctly. + } + + // Inflate the monitor to set hash code + ObjectMonitorHandle omh; + inflate(&omh, Self, obj, inflate_cause_hash_code); + monitor = omh.om_ptr(); + // Load displaced header and check it has hash code + mark = monitor->header(); + assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark)); + hash = mark->hash(); + if (hash == 0) { + hash = get_next_hash(Self, obj); + temp = mark->copy_set_hash(hash); // merge hash code into header + assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp)); + test = Atomic::cmpxchg(temp, monitor->header_addr(), mark); + if (test != mark) { + // The only non-deflation update to the ObjectMonitor's + // header/dmw field is to merge in the hash code. If someone + // adds a new usage of the header/dmw field, please update + // this code. + // ObjectMonitor::install_displaced_markword_in_object() + // does mark the header/dmw field as part of async deflation, + // but that protocol cannot happen now due to the + // ObjectMonitorHandle above. + hash = test->hash(); + assert(test->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(test)); + assert(hash != 0, "Trivial unexpected object/monitor header usage."); + } } + // We finally get the hash + return hash; } - // We finally get the hash - return hash; } // Deprecated -- use FastHashCode() instead. @@ -800,20 +846,28 @@ assert(thread == JavaThread::current(), "Can only be called on current thread"); oop obj = h_obj(); - markOop mark = ReadStableMark(obj); + while (true) { + markOop mark = ReadStableMark(obj); - // Uncontended case, header points to stack - if (mark->has_locker()) { - return thread->is_lock_owned((address)mark->locker()); - } - // Contended case, header points to ObjectMonitor (tagged pointer) - if (mark->has_monitor()) { - ObjectMonitor* monitor = mark->monitor(); - return monitor->is_entered(thread) != 0; + // Uncontended case, header points to stack + if (mark->has_locker()) { + return thread->is_lock_owned((address)mark->locker()); + } + // Contended case, header points to ObjectMonitor (tagged pointer) + if (mark->has_monitor()) { + ObjectMonitorHandle omh; + if (!omh.save_om_ptr(obj, mark)) { + // Lost a race with async deflation so try again. + assert(AsyncDeflateIdleMonitors, "sanity check"); + continue; + } + bool ret_code = omh.om_ptr()->is_entered(thread) != 0; + return ret_code; + } + // Unlocked case, header in place + assert(mark->is_neutral(), "sanity check"); + return false; } - // Unlocked case, header in place - assert(mark->is_neutral(), "sanity check"); - return false; } // Be aware of this method could revoke bias of the lock object. @@ -839,27 +893,37 @@ assert(self == JavaThread::current(), "Can only be called on current thread"); oop obj = h_obj(); - markOop mark = ReadStableMark(obj); - // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. - if (mark->has_locker()) { - return self->is_lock_owned((address)mark->locker()) ? - owner_self : owner_other; - } + while (true) { + markOop mark = ReadStableMark(obj); - // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor. - // The Object:ObjectMonitor relationship is stable as long as we're - // not at a safepoint. - if (mark->has_monitor()) { - void * owner = mark->monitor()->_owner; - if (owner == NULL) return owner_none; - return (owner == self || - self->is_lock_owned((address)owner)) ? owner_self : owner_other; - } + // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. + if (mark->has_locker()) { + return self->is_lock_owned((address)mark->locker()) ? + owner_self : owner_other; + } + + // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor. + // The Object:ObjectMonitor relationship is stable as long as we're + // not at a safepoint and AsyncDeflateIdleMonitors is false. + if (mark->has_monitor()) { + ObjectMonitorHandle omh; + if (!omh.save_om_ptr(obj, mark)) { + // Lost a race with async deflation so try again. + assert(AsyncDeflateIdleMonitors, "sanity check"); + continue; + } + ObjectMonitor * monitor = omh.om_ptr(); + void * owner = monitor->_owner; + if (owner == NULL) return owner_none; + return (owner == self || + self->is_lock_owned((address)owner)) ? owner_self : owner_other; + } - // CASE: neutral - assert(mark->is_neutral(), "sanity check"); - return owner_none; // it's unlocked + // CASE: neutral + assert(mark->is_neutral(), "sanity check"); + return owner_none; // it's unlocked + } } // FIXME: jvmti should call this @@ -874,33 +938,41 @@ } oop obj = h_obj(); - address owner = NULL; - markOop mark = ReadStableMark(obj); + while (true) { + address owner = NULL; + markOop mark = ReadStableMark(obj); - // Uncontended case, header points to stack - if (mark->has_locker()) { - owner = (address) mark->locker(); - } + // Uncontended case, header points to stack + if (mark->has_locker()) { + owner = (address) mark->locker(); + } - // Contended case, header points to ObjectMonitor (tagged pointer) - else if (mark->has_monitor()) { - ObjectMonitor* monitor = mark->monitor(); - assert(monitor != NULL, "monitor should be non-null"); - owner = (address) monitor->owner(); - } + // Contended case, header points to ObjectMonitor (tagged pointer) + else if (mark->has_monitor()) { + ObjectMonitorHandle omh; + if (!omh.save_om_ptr(obj, mark)) { + // Lost a race with async deflation so try again. + assert(AsyncDeflateIdleMonitors, "sanity check"); + continue; + } + ObjectMonitor* monitor = omh.om_ptr(); + assert(monitor != NULL, "monitor should be non-null"); + owner = (address) monitor->owner(); + } - if (owner != NULL) { - // owning_thread_from_monitor_owner() may also return NULL here - return Threads::owning_thread_from_monitor_owner(t_list, owner); - } + if (owner != NULL) { + // owning_thread_from_monitor_owner() may also return NULL here + return Threads::owning_thread_from_monitor_owner(t_list, owner); + } - // Unlocked case, header in place - // Cannot have assertion since this object may have been - // locked by another thread when reaching here. - // assert(mark->is_neutral(), "sanity check"); + // Unlocked case, header in place + // Cannot have assertion since this object may have been + // locked by another thread when reaching here. + // assert(mark->is_neutral(), "sanity check"); - return NULL; + return NULL; + } } // Visitors ... @@ -911,8 +983,18 @@ assert(block->object() == CHAINMARKER, "must be a block header"); for (int i = _BLOCKSIZE - 1; i > 0; i--) { ObjectMonitor* mid = (ObjectMonitor *)(block + i); - oop object = (oop)mid->object(); - if (object != NULL) { + if (mid->is_active()) { + ObjectMonitorHandle omh(mid); + + if (mid->object() == NULL || + (AsyncDeflateIdleMonitors && mid->_owner == DEFLATER_MARKER)) { + // Only process with closure if the object is set. + // For async deflation, race here if monitor is not owned! + // The above ref_count bump (in ObjectMonitorHandle ctr) + // will cause subsequent async deflation to skip it. + // However, previous or concurrent async deflation is a race. + continue; + } closure->do_monitor(mid); } } @@ -932,18 +1014,55 @@ if (gMonitorPopulation == 0) { return false; } - int monitors_used = gMonitorPopulation - gMonitorFreeCount; - int monitor_usage = (monitors_used * 100LL) / gMonitorPopulation; - return monitor_usage > MonitorUsedDeflationThreshold; + if (MonitorUsedDeflationThreshold > 0) { + int monitors_used = gMonitorPopulation - gMonitorFreeCount; + int monitor_usage = (monitors_used * 100LL) / gMonitorPopulation; + return monitor_usage > MonitorUsedDeflationThreshold; + } + return false; } -bool ObjectSynchronizer::is_cleanup_needed() { - if (MonitorUsedDeflationThreshold > 0) { - return monitors_used_above_threshold(); +bool ObjectSynchronizer::is_async_deflation_needed() { + if (!AsyncDeflateIdleMonitors) { + return false; + } + if (is_async_deflation_requested()) { + // Async deflation request. + return true; + } + if (AsyncDeflationInterval > 0 && + time_since_last_async_deflation_ms() > AsyncDeflationInterval && + monitors_used_above_threshold()) { + // It's been longer than our specified deflate interval and there + // are too many monitors in use. We don't deflate more frequently + // than AsyncDeflationInterval (unless is_async_deflation_requested) + // in order to not swamp the ServiceThread. + _last_async_deflation_time_ns = os::javaTimeNanos(); + return true; } return false; } +bool ObjectSynchronizer::is_safepoint_deflation_needed() { + if (!AsyncDeflateIdleMonitors) { + if (monitors_used_above_threshold()) { + // Too many monitors in use. + return true; + } + return false; + } + if (is_special_deflation_requested()) { + // For AsyncDeflateIdleMonitors only do a safepoint deflation + // if there is a special deflation request. + return true; + } + return false; +} + +jlong ObjectSynchronizer::time_since_last_async_deflation_ms() { + return (os::javaTimeNanos() - _last_async_deflation_time_ns) / (NANOUNITS / MILLIUNITS); +} + void ObjectSynchronizer::oops_do(OopClosure* f) { // We only scan the global used list here (for moribund threads), and // the thread-local monitors in Thread::oops_do(). @@ -1023,13 +1142,30 @@ } } -ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) { +ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self, + const InflateCause cause) { // A large MAXPRIVATE value reduces both list lock contention // and list coherency traffic, but also tends to increase the // number of objectMonitors in circulation as well as the STW // scavenge costs. As usual, we lean toward time in space-time // tradeoffs. const int MAXPRIVATE = 1024; + + if (AsyncDeflateIdleMonitors) { + JavaThread * jt = (JavaThread *)Self; + if (jt->omShouldDeflateIdleMonitors && jt->omInUseCount > 0 && + cause != inflate_cause_vm_internal) { + // Deflate any per-thread idle monitors for this JavaThread if + // this is not an internal inflation; internal inflations can + // occur in places where it is not safe to pause for a safepoint. + // Clean up your own mess. (Gibbs Rule 45) Otherwise, skip this + // deflation. deflate_global_idle_monitors_using_JT() is called + // by the ServiceThread. + debug_only(jt->check_for_valid_safepoint_state(false);) + ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(); + } + } + for (;;) { ObjectMonitor * m; @@ -1044,6 +1180,7 @@ Self->omFreeList = m->FreeNext; Self->omFreeCount--; guarantee(m->object() == NULL, "invariant"); + m->set_allocation_state(ObjectMonitor::New); m->FreeNext = Self->omInUseList; Self->omInUseList = m; Self->omInUseCount++; @@ -1065,8 +1202,23 @@ ObjectMonitor * take = gFreeList; gFreeList = take->FreeNext; guarantee(take->object() == NULL, "invariant"); + if (AsyncDeflateIdleMonitors) { + // Clear any values we allowed to linger during async deflation. + take->_header = NULL; + take->set_owner(NULL); + + if (take->ref_count() < 0) { + // Add back max_jint to restore the ref_count field to its + // proper value. + Atomic::add(max_jint, &take->_ref_count); + + assert(take->ref_count() >= 0, "must not be negative: ref_count=%d", + take->ref_count()); + } + } guarantee(!take->is_busy(), "invariant"); take->Recycle(); + assert(take->is_free(), "invariant"); omRelease(Self, take, false); } Thread::muxRelease(&gListLock); @@ -1119,6 +1271,7 @@ for (int i = 1; i < _BLOCKSIZE; i++) { temp[i].FreeNext = (ObjectMonitor *)&temp[i+1]; + assert(temp[i].is_free(), "invariant"); } // terminate the last monitor as the end of list @@ -1161,13 +1314,15 @@ // // Key constraint: all ObjectMonitors on a thread's free list and the global // free list must have their object field set to null. This prevents the -// scavenger -- deflate_monitor_list() -- from reclaiming them. +// scavenger -- deflate_monitor_list() or deflate_monitor_list_using_JT() +// -- from reclaiming them while we are trying to release them. void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m, bool fromPerThreadAlloc) { guarantee(m->header() == NULL, "invariant"); guarantee(m->object() == NULL, "invariant"); guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor"); + m->set_allocation_state(ObjectMonitor::Free); // Remove from omInUseList if (fromPerThreadAlloc) { ObjectMonitor* cur_mid_in_use = NULL; @@ -1190,6 +1345,7 @@ // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new m->FreeNext = Self->omFreeList; + guarantee(m->is_free(), "invariant"); Self->omFreeList = m; Self->omFreeCount++; } @@ -1213,6 +1369,10 @@ // either via Thread::oops_do() (if safepoint happens before omFlush()) or via // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's // monitors have been transferred to the global in-use list). +// +// With AsyncDeflateIdleMonitors, deflate_global_idle_monitors_using_JT() +// and deflate_per_thread_idle_monitors_using_JT() (in another thread) can +// run at the same time as omFlush() so we have to be careful. void ObjectSynchronizer::omFlush(Thread * Self) { ObjectMonitor * list = Self->omFreeList; // Null-terminated SLL @@ -1232,7 +1392,7 @@ s->set_owner(NULL); // redundant but good hygiene } guarantee(tail != NULL, "invariant"); - assert(Self->omFreeCount == tally, "free-count off"); + ADIM_guarantee(Self->omFreeCount == tally, "free-count off"); Self->omFreeList = NULL; Self->omFreeCount = 0; } @@ -1249,9 +1409,10 @@ for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) { inUseTail = cur_om; inUseTally++; + ADIM_guarantee(cur_om->is_active(), "invariant"); } guarantee(inUseTail != NULL, "invariant"); - assert(Self->omInUseCount == inUseTally, "in-use count off"); + ADIM_guarantee(Self->omInUseCount == inUseTally, "in-use count off"); Self->omInUseList = NULL; Self->omInUseCount = 0; } @@ -1299,19 +1460,28 @@ } // Fast path code shared by multiple functions -void ObjectSynchronizer::inflate_helper(oop obj) { - markOop mark = obj->mark(); - if (mark->has_monitor()) { - assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); - assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); +void ObjectSynchronizer::inflate_helper(ObjectMonitorHandle * omh_p, oop obj) { + while (true) { + markOop mark = obj->mark(); + if (mark->has_monitor()) { + if (!omh_p->save_om_ptr(obj, mark)) { + // Lost a race with async deflation so try again. + assert(AsyncDeflateIdleMonitors, "sanity check"); + continue; + } + ObjectMonitor * monitor = omh_p->om_ptr(); + assert(ObjectSynchronizer::verify_objmon_isinpool(monitor), "monitor is invalid"); + markOop dmw = monitor->header(); + assert(dmw->is_neutral(), "sanity check: header=" INTPTR_FORMAT, p2i(dmw)); + return; + } + inflate(omh_p, Thread::current(), obj, inflate_cause_vm_internal); return; } - inflate(Thread::current(), obj, inflate_cause_vm_internal); } -ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self, - oop object, - const InflateCause cause) { +void ObjectSynchronizer::inflate(ObjectMonitorHandle * omh_p, Thread * Self, + oop object, const InflateCause cause) { // Inflate mutates the heap ... // Relaxing assertion for bug 6320749. assert(Universe::verify_in_progress() || @@ -1332,12 +1502,17 @@ // CASE: inflated if (mark->has_monitor()) { - ObjectMonitor * inf = mark->monitor(); + if (!omh_p->save_om_ptr(object, mark)) { + // Lost a race with async deflation so try again. + assert(AsyncDeflateIdleMonitors, "sanity check"); + continue; + } + ObjectMonitor * inf = omh_p->om_ptr(); markOop dmw = inf->header(); assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw)); assert(oopDesc::equals((oop) inf->object(), object), "invariant"); assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); - return inf; + return; } // CASE: inflation in progress - inflating over a stack-lock. @@ -1373,7 +1548,18 @@ LogStreamHandle(Trace, monitorinflation) lsh; if (mark->has_locker()) { - ObjectMonitor * m = omAlloc(Self); + ObjectMonitor * m; + if (!AsyncDeflateIdleMonitors || cause == inflate_cause_vm_internal) { + // If !AsyncDeflateIdleMonitors or if an internal inflation, then + // we won't stop for a potential safepoint in omAlloc. + m = omAlloc(Self, cause); + } else { + // If AsyncDeflateIdleMonitors and not an internal inflation, then + // we may stop for a safepoint in omAlloc() so protect object. + Handle h_obj(Self, object); + m = omAlloc(Self, cause); + object = h_obj(); // Refresh object. + } // Optimistically prepare the objectmonitor - anticipate successful CAS // We do this before the CAS in order to minimize the length of time // in which INFLATING appears in the mark. @@ -1421,7 +1607,7 @@ markOop dmw = mark->displaced_mark_helper(); // Catch if the object's header is not neutral (not locked and // not marked is what we care about here). - assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw)); + ADIM_guarantee(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw)); // Setup monitor fields to proper values -- prepare the monitor m->set_header(dmw); @@ -1435,6 +1621,10 @@ m->set_object(object); // TODO-FIXME: assert BasicLock->dhw != 0. + omh_p->set_om_ptr(m); + assert(m->is_new(), "freshly allocated monitor must be new"); + m->set_allocation_state(ObjectMonitor::Old); + // Must preserve store ordering. The monitor state must // be stable at the time of publishing the monitor address. guarantee(object->mark() == markOopDesc::INFLATING(), "invariant"); @@ -1452,7 +1642,8 @@ if (event.should_commit()) { post_monitor_inflate_event(&event, object, cause); } - return m; + ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free"); + return; } // CASE: neutral @@ -1467,8 +1658,19 @@ // Catch if the object's header is not neutral (not locked and // not marked is what we care about here). - assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark)); - ObjectMonitor * m = omAlloc(Self); + ADIM_guarantee(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark)); + ObjectMonitor * m; + if (!AsyncDeflateIdleMonitors || cause == inflate_cause_vm_internal) { + // If !AsyncDeflateIdleMonitors or if an internal inflation, then + // we won't stop for a potential safepoint in omAlloc. + m = omAlloc(Self, cause); + } else { + // If AsyncDeflateIdleMonitors and not an internal inflation, then + // we may stop for a safepoint in omAlloc() so protect object. + Handle h_obj(Self, object); + m = omAlloc(Self, cause); + object = h_obj(); // Refresh object. + } // prepare m for installation - set monitor to initial state m->Recycle(); m->set_header(mark); @@ -1478,10 +1680,16 @@ m->_Responsible = NULL; m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class + omh_p->set_om_ptr(m); + assert(m->is_new(), "freshly allocated monitor must be new"); + m->set_allocation_state(ObjectMonitor::Old); + if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) { m->set_header(NULL); m->set_object(NULL); m->Recycle(); + omh_p->set_om_ptr(NULL); + // omRelease() will reset the allocation state omRelease(Self, m, true); m = NULL; continue; @@ -1502,7 +1710,8 @@ if (event.should_commit()) { post_monitor_inflate_event(&event, object, cause); } - return m; + ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free"); + return; } } @@ -1528,6 +1737,26 @@ // which in turn can mean large(r) numbers of ObjectMonitors in circulation. // This is an unfortunate aspect of this design. +void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* _counters) { + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + + // The per-thread in-use lists are handled in + // ParallelSPCleanupThreadClosure::do_thread(). + + if (!AsyncDeflateIdleMonitors || is_special_deflation_requested()) { + // Use the older mechanism for the global in-use list or if a + // special deflation has been requested before the safepoint. + ObjectSynchronizer::deflate_idle_monitors(_counters); + return; + } + + log_debug(monitorinflation)("requesting async deflation of idle monitors."); + // Request deflation of idle monitors by the ServiceThread: + set_is_async_deflation_requested(true); + MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); + ml.notify_all(); +} + // Deflate a single monitor if not in-use // Return true if deflated, false if in-use bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, @@ -1565,6 +1794,7 @@ assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT, p2i(mid->object())); + assert(mid->is_free(), "invariant"); // Move the object to the working free list defined by freeHeadp, freeTailp if (*freeHeadp == NULL) *freeHeadp = mid; @@ -1579,6 +1809,146 @@ return deflated; } +// Deflate the specified ObjectMonitor if not in-use using a JavaThread. +// Returns true if it was deflated and false otherwise. +// +// The async deflation protocol sets owner to DEFLATER_MARKER and +// makes ref_count negative as signals to contending threads that +// an async deflation is in progress. There are a number of checks +// as part of the protocol to make sure that the calling thread has +// not lost the race to a contending thread or to a thread that just +// wants to use the ObjectMonitor*. +// +// The ObjectMonitor has been successfully async deflated when: +// (owner == DEFLATER_MARKER && ref_count < 0) +// Contending threads or ObjectMonitor* using threads that see those +// values know to retry their operation. +// +bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid, + ObjectMonitor** freeHeadp, + ObjectMonitor** freeTailp) { + assert(AsyncDeflateIdleMonitors, "sanity check"); + assert(Thread::current()->is_Java_thread(), "precondition"); + // A newly allocated ObjectMonitor should not be seen here so we + // avoid an endless inflate/deflate cycle. + assert(mid->is_old(), "must be old: allocation_state=%d", + (int) mid->allocation_state()); + + if (mid->is_busy() || mid->ref_count() != 0) { + // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor* + // is in use so no deflation. + return false; + } + + if (Atomic::replace_if_null(DEFLATER_MARKER, &(mid->_owner))) { + // ObjectMonitor is not owned by another thread. Our setting + // owner to DEFLATER_MARKER forces any contending thread through + // the slow path. This is just the first part of the async + // deflation dance. + + if (mid->_contentions != 0 || mid->_waiters != 0) { + // Another thread has raced to enter the ObjectMonitor after + // mid->is_busy() above or has already entered and waited on + // it which makes it busy so no deflation. Restore owner to + // NULL if it is still DEFLATER_MARKER. + Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER); + return false; + } + + if (Atomic::cmpxchg(-max_jint, &mid->_ref_count, (jint)0) == 0) { + // Make ref_count negative to force any contending threads or + // ObjectMonitor* using threads to retry. This is the second + // part of the async deflation dance. + + if (mid->_owner == DEFLATER_MARKER) { + // If owner is still DEFLATER_MARKER, then we have successfully + // signaled any contending threads to retry. If it is not, then we + // have lost the race to an entering thread and the ObjectMonitor + // is now busy. This is the third and final part of the async + // deflation dance. + // Note: This owner check solves the ABA problem with ref_count + // where another thread acquired the ObjectMonitor, finished + // using it and restored the ref_count to zero. + + // Sanity checks for the races: + guarantee(mid->_contentions == 0, "must be 0: contentions=%d", + mid->_contentions); + guarantee(mid->_waiters == 0, "must be 0: waiters=%d", mid->_waiters); + guarantee(mid->_cxq == NULL, "must be no contending threads: cxq=" + INTPTR_FORMAT, p2i(mid->_cxq)); + guarantee(mid->_EntryList == NULL, + "must be no entering threads: EntryList=" INTPTR_FORMAT, + p2i(mid->_EntryList)); + + const oop obj = (oop) mid->object(); + if (log_is_enabled(Trace, monitorinflation)) { + ResourceMark rm; + log_trace(monitorinflation)("deflate_monitor_using_JT: " + "object=" INTPTR_FORMAT ", mark=" + INTPTR_FORMAT ", type='%s'", + p2i(obj), p2i(obj->mark()), + obj->klass()->external_name()); + } + + // Install the old mark word if nobody else has already done it. + mid->install_displaced_markword_in_object(obj); + mid->clear_using_JT(); + + assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT, + p2i(mid->object())); + assert(mid->is_free(), "must be free: allocation_state=%d", + (int) mid->allocation_state()); + + // Move the deflated ObjectMonitor to the working free list + // defined by freeHeadp and freeTailp. + if (*freeHeadp == NULL) { + // First one on the list. + *freeHeadp = mid; + } + if (*freeTailp != NULL) { + // We append to the list so the caller can use mid->FreeNext + // to fix the linkages in its context. + ObjectMonitor * prevtail = *freeTailp; + // Should have been cleaned up by the caller: + assert(prevtail->FreeNext == NULL, "must be NULL: FreeNext=" + INTPTR_FORMAT, p2i(prevtail->FreeNext)); + prevtail->FreeNext = mid; + } + *freeTailp = mid; + + // At this point, mid->FreeNext still refers to its current + // value and another ObjectMonitor's FreeNext field still + // refers to this ObjectMonitor. Those linkages have to be + // cleaned up by the caller who has the complete context. + + // We leave owner == DEFLATER_MARKER and ref_count < 0 + // to force any racing threads to retry. + return true; // Success, ObjectMonitor has been deflated. + } + + // The owner was changed from DEFLATER_MARKER so we lost the + // race since the ObjectMonitor is now busy. + + // Add back max_jint to restore the ref_count field to its + // proper value (which may not be what we saw above): + Atomic::add(max_jint, &mid->_ref_count); + + assert(mid->ref_count() >= 0, "must not be negative: ref_count=%d", + mid->ref_count()); + return false; + } + + // The ref_count was no longer 0 so we lost the race since the + // ObjectMonitor is now busy or the ObjectMonitor* is now is use. + // Restore owner to NULL if it is still DEFLATER_MARKER: + Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER); + } + + // The owner field is no longer NULL so we lost the race since the + // ObjectMonitor is now busy. + return false; +} + // Walk a given monitor list, and deflate idle monitors // The given list could be a per-thread list or a global list // Caller acquires gListLock as needed. @@ -1622,6 +1992,82 @@ return deflated_count; } +// Walk a given ObjectMonitor list and deflate idle ObjectMonitors using +// a JavaThread. Returns the number of deflated ObjectMonitors. The given +// list could be a per-thread in-use list or the global in-use list. +// Caller acquires gListLock as appropriate. If a safepoint has started, +// then we save state via savedMidInUsep and return to the caller to +// honor the safepoint. +// +int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** listHeadp, + ObjectMonitor** freeHeadp, + ObjectMonitor** freeTailp, + ObjectMonitor** savedMidInUsep) { + assert(AsyncDeflateIdleMonitors, "sanity check"); + assert(Thread::current()->is_Java_thread(), "precondition"); + + ObjectMonitor* mid; + ObjectMonitor* next; + ObjectMonitor* cur_mid_in_use = NULL; + int deflated_count = 0; + + if (*savedMidInUsep == NULL) { + // No saved state so start at the beginning. + mid = *listHeadp; + } else { + // We're restarting after a safepoint so restore the necessary state + // before we resume. + cur_mid_in_use = *savedMidInUsep; + mid = cur_mid_in_use->FreeNext; + } + while (mid != NULL) { + // Only try to deflate if there is an associated Java object and if + // mid is old (is not newly allocated and is not newly freed). + if (mid->object() != NULL && mid->is_old() && + deflate_monitor_using_JT(mid, freeHeadp, freeTailp)) { + // Deflation succeeded so update the in-use list. + if (mid == *listHeadp) { + *listHeadp = mid->FreeNext; + } else if (cur_mid_in_use != NULL) { + // Maintain the current in-use list. + cur_mid_in_use->FreeNext = mid->FreeNext; + } + next = mid->FreeNext; + mid->FreeNext = NULL; + // At this point mid is disconnected from the in-use list + // and is the current tail in the freeHeadp list. + mid = next; + deflated_count++; + } else { + // mid is considered in-use if it does not have an associated + // Java object or mid is not old or deflation did not succeed. + // A mid->is_new() node can be seen here when it is freshly + // returned by omAlloc() (and skips the deflation code path). + // A mid->is_old() node can be seen here when deflation failed. + // A mid->is_free() node can be seen here when a fresh node from + // omAlloc() is released by omRelease() due to losing the race + // in inflate(). + + cur_mid_in_use = mid; + mid = mid->FreeNext; + + if (SafepointSynchronize::is_synchronizing() && + cur_mid_in_use != *listHeadp && cur_mid_in_use->is_old()) { + // If a safepoint has started and cur_mid_in_use is not the list + // head and is old, then it is safe to use as saved state. Return + // to the caller so gListLock can be dropped as appropriate + // before blocking. + *savedMidInUsep = cur_mid_in_use; + return deflated_count; + } + } + } + // We finished the list without a safepoint starting so there's + // no need to save state. + *savedMidInUsep = NULL; + return deflated_count; +} + void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) { counters->nInuse = 0; // currently associated with objects counters->nInCirculation = 0; // extant @@ -1632,6 +2078,15 @@ void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + + if (AsyncDeflateIdleMonitors) { + // Nothing to do when global idle ObjectMonitors are deflated using + // a JavaThread unless a special deflation has been requested. + if (!is_special_deflation_requested()) { + return; + } + } + bool deflated = false; ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors @@ -1684,14 +2139,145 @@ } } +// Deflate global idle ObjectMonitors using a JavaThread. +// +void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() { + assert(AsyncDeflateIdleMonitors, "sanity check"); + assert(Thread::current()->is_Java_thread(), "precondition"); + JavaThread * self = JavaThread::current(); + + deflate_common_idle_monitors_using_JT(true /* is_global */, self); +} + +// Deflate per-thread idle ObjectMonitors using a JavaThread. +// +void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT() { + assert(AsyncDeflateIdleMonitors, "sanity check"); + assert(Thread::current()->is_Java_thread(), "precondition"); + JavaThread * self = JavaThread::current(); + + self->omShouldDeflateIdleMonitors = false; + + deflate_common_idle_monitors_using_JT(false /* !is_global */, self); +} + +// Deflate global or per-thread idle ObjectMonitors using a JavaThread. +// +void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread * self) { + int deflated_count = 0; + ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged ObjectMonitors + ObjectMonitor * freeTailp = NULL; + ObjectMonitor * savedMidInUsep = NULL; + elapsedTimer timer; + + if (log_is_enabled(Info, monitorinflation)) { + timer.start(); + } + + if (is_global) { + Thread::muxAcquire(&gListLock, "deflate_global_idle_monitors_using_JT(1)"); + OM_PERFDATA_OP(MonExtant, set_value(gOmInUseCount)); + } else { + OM_PERFDATA_OP(MonExtant, inc(self->omInUseCount)); + } + + do { + int local_deflated_count; + if (is_global) { + local_deflated_count = deflate_monitor_list_using_JT((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp, &savedMidInUsep); + gOmInUseCount -= local_deflated_count; + } else { + local_deflated_count = deflate_monitor_list_using_JT(self->omInUseList_addr(), &freeHeadp, &freeTailp, &savedMidInUsep); + self->omInUseCount -= local_deflated_count; + } + deflated_count += local_deflated_count; + + if (freeHeadp != NULL) { + // Move the scavenged ObjectMonitors to the global free list. + guarantee(freeTailp != NULL && local_deflated_count > 0, "freeTailp=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(freeTailp), local_deflated_count); + assert(freeTailp->FreeNext == NULL, "invariant"); + + if (!is_global) { + Thread::muxAcquire(&gListLock, "deflate_per_thread_idle_monitors_using_JT(2)"); + } + // Constant-time list splice - prepend scavenged segment to gFreeList. + freeTailp->FreeNext = gFreeList; + gFreeList = freeHeadp; + + gMonitorFreeCount += local_deflated_count; + OM_PERFDATA_OP(Deflations, inc(local_deflated_count)); + if (!is_global) { + Thread::muxRelease(&gListLock); + } + } + + if (savedMidInUsep != NULL) { + // deflate_monitor_list_using_JT() detected a safepoint starting. + if (is_global) { + Thread::muxRelease(&gListLock); + } + timer.stop(); + { + if (is_global) { + log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint."); + } else { + log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(self)); + } + assert(SafepointSynchronize::is_synchronizing(), "sanity check"); + ThreadBlockInVM blocker(self); + } + // Prepare for another loop after the safepoint. + freeHeadp = NULL; + freeTailp = NULL; + if (log_is_enabled(Info, monitorinflation)) { + timer.start(); + } + if (is_global) { + Thread::muxAcquire(&gListLock, "deflate_global_idle_monitors_using_JT(3)"); + } + } + } while (savedMidInUsep != NULL); + if (is_global) { + Thread::muxRelease(&gListLock); + } + timer.stop(); + + LogStreamHandle(Debug, monitorinflation) lsh_debug; + LogStreamHandle(Info, monitorinflation) lsh_info; + LogStream * ls = NULL; + if (log_is_enabled(Debug, monitorinflation)) { + ls = &lsh_debug; + } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { + ls = &lsh_info; + } + if (ls != NULL) { + if (is_global) { + ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); + } else { + ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(self), timer.seconds(), deflated_count); + } + } +} + void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { // Report the cumulative time for deflating each thread's idle // monitors. Note: if the work is split among more than one // worker thread, then the reported time will likely be more // than a beginning to end measurement of the phase. + // Note: AsyncDeflateIdleMonitors only deflates per-thread idle + // monitors at a safepoint when a special deflation has been requested. log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->perThreadTimes, counters->perThreadScavenged); - gMonitorFreeCount += counters->nScavenged; + bool needs_special_deflation = is_special_deflation_requested(); + if (!AsyncDeflateIdleMonitors || needs_special_deflation) { + // AsyncDeflateIdleMonitors does not use these counters unless + // there is a special deflation request. + + gMonitorFreeCount += counters->nScavenged; + + OM_PERFDATA_OP(Deflations, inc(counters->nScavenged)); + OM_PERFDATA_OP(MonExtant, set_value(counters->nInCirculation)); + } if (log_is_enabled(Debug, monitorinflation)) { // exit_globals()'s call to audit_and_print_stats() is done @@ -1706,17 +2292,28 @@ } ForceMonitorScavenge = 0; // Reset - - OM_PERFDATA_OP(Deflations, inc(counters->nScavenged)); - OM_PERFDATA_OP(MonExtant, set_value(counters->nInCirculation)); - GVars.stwRandom = os::random(); GVars.stwCycle++; + if (needs_special_deflation) { + set_is_special_deflation_requested(false); // special deflation is done + } } void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + if (AsyncDeflateIdleMonitors) { + if (!is_special_deflation_requested()) { + // Mark the JavaThread for idle monitor deflation if a special + // deflation has NOT been requested. + if (thread->omInUseCount > 0) { + // This JavaThread is using monitors so mark it. + thread->omShouldDeflateIdleMonitors = true; + } + return; + } + } + ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors ObjectMonitor * freeTailp = NULL; elapsedTimer timer; @@ -1925,7 +2522,8 @@ // Check a free monitor entry; log any errors. void ObjectSynchronizer::chk_free_entry(JavaThread * jt, ObjectMonitor * n, outputStream * out, int *error_cnt_p) { - if (n->is_busy()) { + if ((!AsyncDeflateIdleMonitors && n->is_busy()) || + (AsyncDeflateIdleMonitors && n->is_busy_async())) { if (jt != NULL) { out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT ": free per-thread monitor must not be busy.", p2i(jt), @@ -1942,12 +2540,13 @@ ": free per-thread monitor must have NULL _header " "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n), p2i(n->header())); - } else { + *error_cnt_p = *error_cnt_p + 1; + } else if (!AsyncDeflateIdleMonitors) { out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " "must have NULL _header field: _header=" INTPTR_FORMAT, p2i(n), p2i(n->header())); + *error_cnt_p = *error_cnt_p + 1; } - *error_cnt_p = *error_cnt_p + 1; } if (n->object() != NULL) { if (jt != NULL) { @@ -2114,16 +2713,17 @@ if (gOmInUseCount > 0) { out->print_cr("In-use global monitor info:"); out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); - out->print_cr("%18s %s %18s %18s", - "monitor", "BHL", "object", "object type"); - out->print_cr("================== === ================== =================="); + out->print_cr("%18s %s %7s %18s %18s", + "monitor", "BHL", "ref_cnt", "object", "object type"); + out->print_cr("================== === ======= ================== =================="); for (ObjectMonitor * n = gOmInUseList; n != NULL; n = n->FreeNext) { const oop obj = (oop) n->object(); const markOop mark = n->header(); ResourceMark rm; - out->print_cr(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(n), - n->is_busy() != 0, mark->hash() != 0, n->owner() != NULL, - p2i(obj), obj->klass()->external_name()); + out->print_cr(INTPTR_FORMAT " %d%d%d %7d " INTPTR_FORMAT " %s", + p2i(n), n->is_busy() != 0, mark->hash() != 0, + n->owner() != NULL, (int)n->ref_count(), p2i(obj), + obj->klass()->external_name()); } } @@ -2133,18 +2733,18 @@ out->print_cr("In-use per-thread monitor info:"); out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); - out->print_cr("%18s %18s %s %18s %18s", - "jt", "monitor", "BHL", "object", "object type"); - out->print_cr("================== ================== === ================== =================="); + out->print_cr("%18s %18s %s %7s %18s %18s", + "jt", "monitor", "BHL", "ref_cnt", "object", "object type"); + out->print_cr("================== ================== === ======= ================== =================="); for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { for (ObjectMonitor * n = jt->omInUseList; n != NULL; n = n->FreeNext) { const oop obj = (oop) n->object(); const markOop mark = n->header(); ResourceMark rm; - out->print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT - " %s", p2i(jt), p2i(n), n->is_busy() != 0, - mark->hash() != 0, n->owner() != NULL, p2i(obj), - obj->klass()->external_name()); + out->print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d %7d " + INTPTR_FORMAT " %s", p2i(jt), p2i(n), n->is_busy() != 0, + mark->hash() != 0, n->owner() != NULL, (int)n->ref_count(), + p2i(obj), obj->klass()->external_name()); } } --- old/src/hotspot/share/runtime/synchronizer.hpp 2019-05-25 10:46:39.182883939 -0400 +++ new/src/hotspot/share/runtime/synchronizer.hpp 2019-05-25 10:46:38.318883894 -0400 @@ -32,6 +32,7 @@ #include "runtime/perfData.hpp" class ObjectMonitor; +class ObjectMonitorHandle; class ThreadsList; struct DeflateMonitorCounters { @@ -107,15 +108,16 @@ static void reenter (Handle obj, intptr_t recursion, TRAPS); // thread-specific and global objectMonitor free list accessors - static ObjectMonitor * omAlloc(Thread * Self); + static ObjectMonitor * omAlloc(Thread * Self, const InflateCause cause); static void omRelease(Thread * Self, ObjectMonitor * m, bool FromPerThreadAlloc); static void omFlush(Thread * Self); // Inflate light weight monitor to heavy weight monitor - static ObjectMonitor* inflate(Thread * Self, oop obj, const InflateCause cause); + static void inflate(ObjectMonitorHandle * omh_p, Thread * Self, oop obj, + const InflateCause cause); // This version is only for internal use - static void inflate_helper(oop obj); + static void inflate_helper(ObjectMonitorHandle * omh_p, oop obj); static const char* inflate_cause_name(const InflateCause cause); // Returns the identity hash value for an oop @@ -137,6 +139,9 @@ // Basically we deflate all monitors that are not busy. // An adaptive profile-based deflation policy could be used if needed static void deflate_idle_monitors(DeflateMonitorCounters* counters); + static void deflate_global_idle_monitors_using_JT(); + static void deflate_per_thread_idle_monitors_using_JT(); + static void deflate_common_idle_monitors_using_JT(bool is_global, JavaThread * self); static void deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters); static void prepare_deflate_idle_monitors(DeflateMonitorCounters* counters); static void finish_deflate_idle_monitors(DeflateMonitorCounters* counters); @@ -145,10 +150,25 @@ static int deflate_monitor_list(ObjectMonitor** listheadp, ObjectMonitor** freeHeadp, ObjectMonitor** freeTailp); + // For a given in-use monitor list: global or per-thread, deflate idle + // monitors using a JavaThread. + static int deflate_monitor_list_using_JT(ObjectMonitor** listHeadp, + ObjectMonitor** freeHeadp, + ObjectMonitor** freeTailp, + ObjectMonitor** savedMidInUsep); static bool deflate_monitor(ObjectMonitor* mid, oop obj, ObjectMonitor** freeHeadp, ObjectMonitor** freeTailp); - static bool is_cleanup_needed(); + static bool deflate_monitor_using_JT(ObjectMonitor* mid, + ObjectMonitor** freeHeadp, + ObjectMonitor** freeTailp); + static bool is_async_deflation_needed(); + static bool is_safepoint_deflation_needed(); + static bool is_async_deflation_requested() { return _is_async_deflation_requested; } + static bool is_special_deflation_requested() { return _is_special_deflation_requested; } + static void set_is_async_deflation_requested(bool new_value) { _is_async_deflation_requested = new_value; } + static void set_is_special_deflation_requested(bool new_value) { _is_special_deflation_requested = new_value; } + static jlong time_since_last_async_deflation_ms(); static void oops_do(OopClosure* f); // Process oops in thread local used monitors static void thread_local_used_oops_do(Thread* thread, OopClosure* f); @@ -173,6 +193,8 @@ static int log_monitor_list_counts(outputStream * out); static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0; + static void do_safepoint_work(DeflateMonitorCounters* _counters); + private: friend class SynchronizerTest; @@ -186,6 +208,9 @@ static ObjectMonitor * volatile gOmInUseList; // count of entries in gOmInUseList static int gOmInUseCount; + static volatile bool _is_async_deflation_requested; + static volatile bool _is_special_deflation_requested; + static jlong _last_async_deflation_time_ns; // Process oops in all global used monitors (i.e. moribund thread's monitors) static void global_used_oops_do(OopClosure* f); --- old/src/hotspot/share/runtime/thread.cpp 2019-05-25 10:46:40.986884033 -0400 +++ new/src/hotspot/share/runtime/thread.cpp 2019-05-25 10:46:40.186883992 -0400 @@ -265,6 +265,7 @@ omFreeProvision = 32; omInUseList = NULL; omInUseCount = 0; + omShouldDeflateIdleMonitors = false; #ifdef ASSERT _visited_for_critical_count = false; --- old/src/hotspot/share/runtime/thread.hpp 2019-05-25 10:46:42.806884128 -0400 +++ new/src/hotspot/share/runtime/thread.hpp 2019-05-25 10:46:42.130884093 -0400 @@ -419,6 +419,7 @@ int omFreeProvision; // reload chunk size ObjectMonitor* omInUseList; // SLL to track monitors in circulation int omInUseCount; // length of omInUseList + volatile bool omShouldDeflateIdleMonitors; // should deflate idle monitors #ifdef ASSERT private: --- old/src/hotspot/share/runtime/vmOperations.cpp 2019-05-25 10:46:44.662884225 -0400 +++ new/src/hotspot/share/runtime/vmOperations.cpp 2019-05-25 10:46:43.786884179 -0400 @@ -41,6 +41,7 @@ #include "runtime/frame.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/sweeper.hpp" +#include "runtime/synchronizer.hpp" #include "runtime/thread.inline.hpp" #include "runtime/threadSMR.inline.hpp" #include "runtime/vmOperations.hpp" @@ -471,6 +472,17 @@ } } +bool VM_Exit::doit_prologue() { + if (AsyncDeflateIdleMonitors) { + // AsyncDeflateIdleMonitors does a special deflation at the VM_Exit + // safepoint in order to reduce the in-use monitor population that + // is reported ObjectSynchronizer::log_in_use_monitor_details() at + // VM exit. + ObjectSynchronizer::set_is_special_deflation_requested(true); + } + return true; +} + void VM_Exit::doit() { if (VerifyBeforeExit) { --- old/src/hotspot/share/runtime/vmOperations.hpp 2019-05-25 10:46:46.514884322 -0400 +++ new/src/hotspot/share/runtime/vmOperations.hpp 2019-05-25 10:46:45.746884282 -0400 @@ -498,6 +498,7 @@ } } VMOp_Type type() const { return VMOp_Exit; } + bool doit_prologue(); void doit(); }; --- old/src/hotspot/share/runtime/vmThread.cpp 2019-05-25 10:46:48.018884400 -0400 +++ new/src/hotspot/share/runtime/vmThread.cpp 2019-05-25 10:46:47.446884370 -0400 @@ -40,6 +40,7 @@ #include "runtime/mutexLocker.hpp" #include "runtime/os.hpp" #include "runtime/safepoint.hpp" +#include "runtime/synchronizer.hpp" #include "runtime/thread.inline.hpp" #include "runtime/vmThread.hpp" #include "runtime/vmOperations.hpp" @@ -316,6 +317,14 @@ assert(should_terminate(), "termination flag must be set"); } + if (AsyncDeflateIdleMonitors) { + // AsyncDeflateIdleMonitors does a special deflation at the final + // safepoint in order to reduce the in-use monitor population that + // is reported ObjectSynchronizer::log_in_use_monitor_details() at + // VM exit. + ObjectSynchronizer::set_is_special_deflation_requested(true); + } + // 4526887 let VM thread exit at Safepoint _cur_vm_operation = &halt_op; SafepointSynchronize::begin(); --- old/test/hotspot/gtest/oops/test_markOop.cpp 2019-05-25 10:46:49.854884496 -0400 +++ new/test/hotspot/gtest/oops/test_markOop.cpp 2019-05-25 10:46:49.082884456 -0400 @@ -117,6 +117,10 @@ // This is no longer biased, because ObjectLocker revokes the bias. assert_test_pattern(h_obj, "is_neutral no_hash"); + // Hash the object then print it. + intx hash = h_obj->identity_hash(); + assert_test_pattern(h_obj, "is_neutral hash=0x"); + // Wait gets the lock inflated. { ObjectLocker ol(h_obj, THREAD); @@ -131,14 +135,18 @@ done.wait_with_safepoint_check(THREAD); // wait till the thread is done. } - // Make the object older. Not all GCs use this field. - Universe::heap()->collect(GCCause::_java_lang_system_gc); - if (UseParallelGC) { - assert_test_pattern(h_obj, "is_neutral no_hash age 1"); - } + if (!AsyncDeflateIdleMonitors) { + // With AsyncDeflateIdleMonitors, the collect() call below + // does not guarantee monitor deflation. + // Make the object older. Not all GCs use this field. + Universe::heap()->collect(GCCause::_java_lang_system_gc); + if (UseParallelGC) { + assert_test_pattern(h_obj, "is_neutral no_hash age 1"); + } - // Hash the object then print it. - intx hash = h_obj->identity_hash(); - assert_test_pattern(h_obj, "is_neutral hash=0x"); + // Hash the object then print it. + intx hash = h_obj->identity_hash(); + assert_test_pattern(h_obj, "is_neutral hash=0x"); + } } #endif // PRODUCT --- old/test/hotspot/jtreg/runtime/logging/SafepointCleanupTest.java 2019-05-25 10:46:51.606884588 -0400 +++ new/test/hotspot/jtreg/runtime/logging/SafepointCleanupTest.java 2019-05-25 10:46:50.926884552 -0400 @@ -29,12 +29,17 @@ * @modules java.base/jdk.internal.misc * java.management * @run driver SafepointCleanupTest + * @run driver SafepointCleanupTest -XX:+AsyncDeflateIdleMonitors */ import jdk.test.lib.process.OutputAnalyzer; import jdk.test.lib.process.ProcessTools; public class SafepointCleanupTest { + static final String ASYNC_DISABLE_OPTION = "-XX:-AsyncDeflateIdleMonitors"; + static final String ASYNC_ENABLE_OPTION = "-XX:+AsyncDeflateIdleMonitors"; + static final String UNLOCK_DIAG_OPTION = "-XX:+UnlockDiagnosticVMOptions"; + static void analyzeOutputOn(ProcessBuilder pb) throws Exception { OutputAnalyzer output = new OutputAnalyzer(pb.start()); output.shouldContain("[safepoint,cleanup]"); @@ -53,19 +58,40 @@ } public static void main(String[] args) throws Exception { + String async_option; + if (args.length == 0) { + // By default test deflating idle monitors at a safepoint. + async_option = ASYNC_DISABLE_OPTION; + } else { + async_option = args[0]; + } + if (!async_option.equals(ASYNC_DISABLE_OPTION) && + !async_option.equals(ASYNC_ENABLE_OPTION)) { + throw new RuntimeException("Unknown async_option value: '" + + async_option + "'"); + } + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xlog:safepoint+cleanup=info", + UNLOCK_DIAG_OPTION, + async_option, InnerClass.class.getName()); analyzeOutputOn(pb); pb = ProcessTools.createJavaProcessBuilder("-XX:+TraceSafepointCleanupTime", + UNLOCK_DIAG_OPTION, + async_option, InnerClass.class.getName()); analyzeOutputOn(pb); pb = ProcessTools.createJavaProcessBuilder("-Xlog:safepoint+cleanup=off", + UNLOCK_DIAG_OPTION, + async_option, InnerClass.class.getName()); analyzeOutputOff(pb); pb = ProcessTools.createJavaProcessBuilder("-XX:-TraceSafepointCleanupTime", + UNLOCK_DIAG_OPTION, + async_option, InnerClass.class.getName()); analyzeOutputOff(pb); } --- old/test/jdk/java/rmi/server/UnicastRemoteObject/unexportObject/UnexportLeak.java 2019-05-25 10:46:53.486884686 -0400 +++ new/test/jdk/java/rmi/server/UnicastRemoteObject/unexportObject/UnexportLeak.java 2019-05-25 10:46:52.714884645 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,7 +74,7 @@ } /** - * Force desparate garbage collection so that all WeakReference instances + * Force desperate garbage collection so that all WeakReference instances * will be cleared. */ private static void flushRefs() { @@ -85,6 +85,9 @@ chain.addElement(hungry); } } catch (OutOfMemoryError e) { + // An inflated Java monitor can keep 'obj' alive so request + // an explicit GC to make sure things are cleaned up. + System.gc(); } } } --- old/test/jdk/tools/jlink/multireleasejar/JLinkMultiReleaseJarTest.java 2019-05-25 10:46:55.278884779 -0400 +++ new/test/jdk/tools/jlink/multireleasejar/JLinkMultiReleaseJarTest.java 2019-05-25 10:46:54.570884742 -0400 @@ -193,6 +193,15 @@ int version = (int) getVersion.invoke(clazz.getConstructor().newInstance()); Assert.assertEquals(version, JarFile.runtimeVersion().major()); } + // Very rarely this test fails on Windows due to: + // Error. failed to clean up files after test + // and this mesg shows the problem file (variable jimage): + // Can't delete T:\\testOutput\\test-support\\jtreg_open_test_jdk_core_tools\\scratch\\0\\myimage\\lib\\modules + // The failure happens more with async monitor deflation + // so I think that an inflated monitor is keeping the above + // try-with-resources block from cleaning up in a timely + // fashion. Forcing a GC here appears to solve the problem. + System.gc(); } @Test