--- old/src/hotspot/share/runtime/synchronizer.cpp 2019-12-11 14:56:15.000000000 -0500 +++ new/src/hotspot/share/runtime/synchronizer.cpp 2019-12-11 14:56:15.000000000 -0500 @@ -43,6 +43,7 @@ #include "runtime/objectMonitor.hpp" #include "runtime/objectMonitor.inline.hpp" #include "runtime/osThread.hpp" +#include "runtime/safepointMechanism.inline.hpp" #include "runtime/safepointVerifiers.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" @@ -118,142 +119,149 @@ static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; // global list of blocks of monitors -PaddedObjectMonitor* volatile ObjectSynchronizer::g_block_list = NULL; +PaddedObjectMonitor* ObjectSynchronizer::g_block_list = NULL; bool volatile ObjectSynchronizer::_is_async_deflation_requested = false; bool volatile ObjectSynchronizer::_is_special_deflation_requested = false; jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0; -// Global ObjectMonitor free list. Newly allocated and deflated -// ObjectMonitors are prepended here. -static ObjectMonitor* volatile g_free_list = NULL; -// Global ObjectMonitor in-use list. When a JavaThread is exiting, -// ObjectMonitors on its per-thread in-use list are prepended here. -static ObjectMonitor* volatile g_om_in_use_list = NULL; -// Global ObjectMonitor wait list. If HandshakeAfterDeflateIdleMonitors -// is true, deflated ObjectMonitors wait on this list until after a -// handshake or a safepoint for platforms that don't support handshakes. -// After the handshake or safepoint, the deflated ObjectMonitors are -// prepended to g_free_list. -static ObjectMonitor* volatile g_wait_list = NULL; - -static volatile int g_om_free_count = 0; // # on g_free_list -static volatile int g_om_in_use_count = 0; // # on g_om_in_use_list -static volatile int g_om_population = 0; // # Extant -- in circulation -static volatile int g_om_wait_count = 0; // # on g_wait_list +struct ListGlobals { + char _pad_prefix[OM_CACHE_LINE_SIZE]; + // These are highly shared list related variables. + // To avoid false-sharing they need to be the sole occupants of a cache line. + + // Global ObjectMonitor free list. Newly allocated and deflated + // ObjectMonitors are prepended here. + ObjectMonitor* free_list; + DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); + + // Global ObjectMonitor in-use list. When a JavaThread is exiting, + // ObjectMonitors on its per-thread in-use list are prepended here. + ObjectMonitor* in_use_list; + DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); + + // Global ObjectMonitor wait list. If HandshakeAfterDeflateIdleMonitors + // is true, deflated ObjectMonitors wait on this list until after a + // handshake or a safepoint for platforms that don't support handshakes. + // After the handshake or safepoint, the deflated ObjectMonitors are + // prepended to free_list. + ObjectMonitor* wait_list; + DEFINE_PAD_MINUS_SIZE(3, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); + + int free_count; // # on free_list + DEFINE_PAD_MINUS_SIZE(4, OM_CACHE_LINE_SIZE, sizeof(int)); + + int in_use_count; // # on in_use_list + DEFINE_PAD_MINUS_SIZE(5, OM_CACHE_LINE_SIZE, sizeof(int)); + + int population; // # Extant -- in circulation + DEFINE_PAD_MINUS_SIZE(6, OM_CACHE_LINE_SIZE, sizeof(int)); + + int wait_count; // # on wait_list + DEFINE_PAD_MINUS_SIZE(7, OM_CACHE_LINE_SIZE, sizeof(int)); +}; +static ListGlobals LVars; #define CHAINMARKER (cast_to_oop(-1)) -// =====================> List Management functions +// =====================> Spinlock functions -// Return true if the ObjectMonitor's next field is marked. +// ObjectMonitors are not lockable outside of this file. We use spinlocks +// implemented using a bit in the _next_om field instead of the heavier +// weight locking mechanisms for faster list management. + +#define OM_LOCK_BIT 0x1 + +// Return true if the ObjectMonitor is locked. // Otherwise returns false. -static bool is_next_marked(ObjectMonitor* om) { - // Use load_acquire() since _next_om fields are updated with a - // release_store(). - return ((intptr_t)Atomic::load_acquire(&om->_next_om) & 0x1) != 0; +static bool is_locked(ObjectMonitor* om) { + return ((intptr_t)Atomic::load(&om->_next_om) & OM_LOCK_BIT) == OM_LOCK_BIT; } -// Mark an ObjectMonitor* and return it. Note: the om parameter -// may or may not have been marked originally. +// Mark an ObjectMonitor* with OM_LOCK_BIT and return it. +// Note: the om parameter may or may not have been marked originally. static ObjectMonitor* mark_om_ptr(ObjectMonitor* om) { - return (ObjectMonitor*)((intptr_t)om | 0x1); + return (ObjectMonitor*)((intptr_t)om | OM_LOCK_BIT); } -// Mark the next field in an ObjectMonitor. If marking was successful, -// then the unmarked next field is returned via parameter and true is -// returned. Otherwise false is returned. -static bool mark_next(ObjectMonitor* om, ObjectMonitor** next_p) { - // Get current next field without any marking value. - ObjectMonitor* next = (ObjectMonitor*)((intptr_t)om->_next_om & ~0x1); +// Try to lock an ObjectMonitor. Returns true if locking was successful. +// Otherwise returns false. +static bool try_om_lock(ObjectMonitor* om) { + // Get current next field without any OM_LOCK_BIT value. + ObjectMonitor* next = (ObjectMonitor*)((intptr_t)Atomic::load(&om->_next_om) & ~OM_LOCK_BIT); if (Atomic::cmpxchg(&om->_next_om, next, mark_om_ptr(next)) != next) { - return false; // Could not mark the next field or it was already marked. + return false; // Cannot lock the ObjectMonitor. } - *next_p = next; return true; } -// Loop until we mark the next field in an ObjectMonitor. The unmarked -// next field is returned. -static ObjectMonitor* mark_next_loop(ObjectMonitor* om) { - ObjectMonitor* next; +// Lock an ObjectMonitor. +static void om_lock(ObjectMonitor* om) { while (true) { - if (mark_next(om, &next)) { - // Marked om's next field so return the unmarked value. - return next; + if (try_om_lock(om)) { + return; } } } -// Set the next field in an ObjectMonitor to the specified value. -// The caller of set_next() must be the same thread that marked the -// ObjectMonitor. -static void set_next(ObjectMonitor* om, ObjectMonitor* value) { - Atomic::release_store(&om->_next_om, value); +// Unlock an ObjectMonitor. +static void om_unlock(ObjectMonitor* om) { + ObjectMonitor* next = Atomic::load(&om->_next_om); + guarantee(((intptr_t)next & OM_LOCK_BIT) == OM_LOCK_BIT, "next=" INTPTR_FORMAT + " must have OM_LOCK_BIT=%x set.", p2i(next), OM_LOCK_BIT); + + next = (ObjectMonitor*)((intptr_t)next & ~OM_LOCK_BIT); // Clear OM_LOCK_BIT. + Atomic::store(&om->_next_om, next); } -// Mark the next field in the list head ObjectMonitor. If marking was -// successful, then the mid and the unmarked next field are returned -// via parameter and true is returned. Otherwise false is returned. -static bool mark_list_head(ObjectMonitor* volatile * list_p, - ObjectMonitor** mid_p, ObjectMonitor** next_p) { +// Get the list head after locking it. Returns the list head or NULL +// if the list is empty. +static ObjectMonitor* get_list_head_locked(ObjectMonitor** list_p) { while (true) { - ObjectMonitor* mid = *list_p; + ObjectMonitor* mid = Atomic::load(list_p); if (mid == NULL) { - return false; // The list is empty so nothing to mark. + return NULL; // The list is empty. } - if (mark_next(mid, next_p)) { - if (*list_p != mid) { + if (try_om_lock(mid)) { + if (Atomic::load(list_p) != mid) { // The list head changed so we have to retry. - set_next(mid, *next_p); // unmark mid + om_unlock(mid); continue; } - // We marked next field to guard against races. - *mid_p = mid; - return true; + return mid; } } } // Return the unmarked next field in an ObjectMonitor. Note: the next -// field may or may not have been marked originally. +// field may or may not have been marked with OM_LOCK_BIT originally. static ObjectMonitor* unmarked_next(ObjectMonitor* om) { - // Use load_acquire() since _next_om fields are updated with a - // release_store(). - return (ObjectMonitor*)((intptr_t)Atomic::load_acquire(&om->_next_om) & ~0x1); + return (ObjectMonitor*)((intptr_t)Atomic::load(&om->_next_om) & ~OM_LOCK_BIT); } -// Mark the next ObjectMonitor for traversal. The current ObjectMonitor -// is unmarked after the next ObjectMonitor is marked. *cur_p and *next_p -// are updated to their next values in the list traversal. *cur_p is set -// to NULL when the end of the list is reached. -static void mark_next_for_traversal(ObjectMonitor** cur_p, ObjectMonitor** next_p) { - ObjectMonitor* prev = *cur_p; // Save current for unmarking. - if (*next_p == NULL) { // Reached the end of the list. - set_next(prev, NULL); // Unmark previous. - *cur_p = NULL; // Tell the caller we are done. - return; - } - (void)mark_next_loop(*next_p); // Mark next. - set_next(prev, *next_p); // Unmark previous. - *cur_p = *next_p; // Update current. - *next_p = unmarked_next(*cur_p); // Update next. +#undef OM_LOCK_BIT + + +// =====================> List Management functions + +// Set the next field in an ObjectMonitor to the specified value. +static void set_next(ObjectMonitor* om, ObjectMonitor* value) { + Atomic::store(&om->_next_om, value); } // Prepend a list of ObjectMonitors to the specified *list_p. 'tail' is // the last ObjectMonitor in the list and there are 'count' on the list. // Also updates the specified *count_p. static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail, - int count, ObjectMonitor* volatile* list_p, - volatile int* count_p) { + int count, ObjectMonitor** list_p, + int* count_p) { while (true) { - ObjectMonitor* cur = *list_p; + ObjectMonitor* cur = Atomic::load(list_p); // Prepend list to *list_p. - ObjectMonitor* next = NULL; - if (!mark_next(tail, &next)) { - continue; // failed to mark next field so try it all again + if (!try_om_lock(tail)) { + continue; // failed to lock tail so try it all again } - set_next(tail, cur); // tail now points to cur (and unmarks tail) + set_next(tail, cur); // tail now points to cur (and unlocks tail) if (cur == NULL) { // No potential race with takers or other prependers since // *list_p is empty. @@ -264,93 +272,89 @@ } // Implied else: try it all again } else { - // Try to mark next field to guard against races: - if (!mark_next(cur, &next)) { - continue; // failed to mark next field so try it all again + if (!try_om_lock(cur)) { + continue; // failed to lock cur so try it all again } - // We marked the next field so try to switch *list_p to the list value. + // We locked cur so try to switch *list_p to the list value. if (Atomic::cmpxchg(list_p, cur, list) != cur) { - // The list head has changed so unmark the next field and try again: - set_next(cur, next); + // The list head has changed so unlock cur and try again: + om_unlock(cur); continue; } Atomic::add(count_p, count); - set_next(cur, next); // unmark next field + om_unlock(cur); break; } } } // Prepend a newly allocated block of ObjectMonitors to g_block_list and -// g_free_list. Also updates g_om_population and g_om_free_count. +// LVars.free_list. Also updates LVars.population and LVars.free_count. void ObjectSynchronizer::prepend_block_to_lists(PaddedObjectMonitor* new_blk) { // First we handle g_block_list: while (true) { - PaddedObjectMonitor* cur = g_block_list; + PaddedObjectMonitor* cur = Atomic::load(&g_block_list); // Prepend new_blk to g_block_list. The first ObjectMonitor in // a block is reserved for use as linkage to the next block. new_blk[0]._next_om = cur; if (Atomic::cmpxchg(&g_block_list, cur, new_blk) == cur) { // Successfully switched g_block_list to the new_blk value. - Atomic::add(&g_om_population, _BLOCKSIZE - 1); + Atomic::add(&LVars.population, _BLOCKSIZE - 1); break; } // Implied else: try it all again } - // Second we handle g_free_list: + // Second we handle LVars.free_list: prepend_list_to_common(new_blk + 1, &new_blk[_BLOCKSIZE - 1], _BLOCKSIZE - 1, - &g_free_list, &g_om_free_count); + &LVars.free_list, &LVars.free_count); } -// Prepend a list of ObjectMonitors to g_free_list. 'tail' is the last +// Prepend a list of ObjectMonitors to LVars.free_list. 'tail' is the last // ObjectMonitor in the list and there are 'count' on the list. Also -// updates g_om_free_count. -static void prepend_list_to_g_free_list(ObjectMonitor* list, - ObjectMonitor* tail, int count) { - prepend_list_to_common(list, tail, count, &g_free_list, &g_om_free_count); +// updates LVars.free_count. +static void prepend_list_to_global_free_list(ObjectMonitor* list, + ObjectMonitor* tail, int count) { + prepend_list_to_common(list, tail, count, &LVars.free_list, &LVars.free_count); } -// Prepend a list of ObjectMonitors to g_wait_list. 'tail' is the last +// Prepend a list of ObjectMonitors to LVars.wait_list. 'tail' is the last // ObjectMonitor in the list and there are 'count' on the list. Also -// updates g_om_wait_count. -static void prepend_list_to_g_wait_list(ObjectMonitor* list, - ObjectMonitor* tail, int count) { +// updates LVars.wait_count. +static void prepend_list_to_global_wait_list(ObjectMonitor* list, + ObjectMonitor* tail, int count) { assert(HandshakeAfterDeflateIdleMonitors, "sanity check"); - prepend_list_to_common(list, tail, count, &g_wait_list, &g_om_wait_count); + prepend_list_to_common(list, tail, count, &LVars.wait_list, &LVars.wait_count); } -// Prepend a list of ObjectMonitors to g_om_in_use_list. 'tail' is the last +// Prepend a list of ObjectMonitors to LVars.in_use_list. 'tail' is the last // ObjectMonitor in the list and there are 'count' on the list. Also -// updates g_om_in_use_list. -static void prepend_list_to_g_om_in_use_list(ObjectMonitor* list, - ObjectMonitor* tail, int count) { - prepend_list_to_common(list, tail, count, &g_om_in_use_list, &g_om_in_use_count); +// updates LVars.in_use_list. +static void prepend_list_to_global_in_use_list(ObjectMonitor* list, + ObjectMonitor* tail, int count) { + prepend_list_to_common(list, tail, count, &LVars.in_use_list, &LVars.in_use_count); } // Prepend an ObjectMonitor to the specified list. Also updates // the specified counter. -static void prepend_to_common(ObjectMonitor* m, ObjectMonitor* volatile * list_p, - int volatile * count_p) { +static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p, + int* count_p) { while (true) { - (void)mark_next_loop(m); // mark m so we can safely update its next field + om_lock(m); // Lock m so we can safely update its next field. ObjectMonitor* cur = NULL; - ObjectMonitor* next = NULL; - // Mark the list head to guard against A-B-A race: - if (mark_list_head(list_p, &cur, &next)) { - // List head is now marked so we can safely switch it. - set_next(m, cur); // m now points to cur (and unmarks m) - *list_p = m; // Switch list head to unmarked m. - // mark_list_head() used cmpxchg() above, switching list head can be lazier: - OrderAccess::storestore(); - set_next(cur, next); // Unmark the previous list head. + // Lock the list head to guard against A-B-A race: + if ((cur = get_list_head_locked(list_p)) != NULL) { + // List head is now locked so we can safely switch it. + set_next(m, cur); // m now points to cur (and unlocks m) + Atomic::store(list_p, m); // Switch list head to unlocked m. + om_unlock(cur); break; } // The list is empty so try to set the list head. assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur)); - set_next(m, cur); // m now points to NULL (and unmarks m) + set_next(m, cur); // m now points to NULL (and unlocks m) if (Atomic::cmpxchg(list_p, cur, m) == cur) { - // List head is now unmarked m. + // List head is now unlocked m. break; } // Implied else: try it all again @@ -372,31 +376,29 @@ // Take an ObjectMonitor from the start of the specified list. Also // decrements the specified counter. Returns NULL if none are available. -static ObjectMonitor* take_from_start_of_common(ObjectMonitor* volatile * list_p, - int volatile * count_p) { - ObjectMonitor* next = NULL; +static ObjectMonitor* take_from_start_of_common(ObjectMonitor** list_p, + int* count_p) { ObjectMonitor* take = NULL; - // Mark the list head to guard against A-B-A race: - if (!mark_list_head(list_p, &take, &next)) { + // Lock the list head to guard against A-B-A race: + if ((take = get_list_head_locked(list_p)) == NULL) { return NULL; // None are available. } - // Switch marked list head to next (which unmarks the list head, but - // leaves take marked): - *list_p = next; + ObjectMonitor* next = unmarked_next(take); + // Switch locked list head to next (which unlocks the list head, but + // leaves take locked): + Atomic::store(list_p, next); Atomic::dec(count_p); - // mark_list_head() used cmpxchg() above, switching list head can be lazier: - OrderAccess::storestore(); - // Unmark take, but leave the next value for any lagging list + // Unlock take, but leave the next value for any lagging list // walkers. It will get cleaned up when take is prepended to // the in-use list: - set_next(take, next); + om_unlock(take); return take; } -// Take an ObjectMonitor from the start of the global free-list. Also -// updates g_om_free_count. Returns NULL if none are available. -static ObjectMonitor* take_from_start_of_g_free_list() { - return take_from_start_of_common(&g_free_list, &g_om_free_count); +// Take an ObjectMonitor from the start of the LVars.free_list. Also +// updates LVars.free_count. Returns NULL if none are available. +static ObjectMonitor* take_from_start_of_global_free_list() { + return take_from_start_of_common(&LVars.free_list, &LVars.free_count); } // Take an ObjectMonitor from the start of a per-thread free-list. @@ -1030,7 +1032,10 @@ } monitor = omh.om_ptr(); temp = monitor->header(); - assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); + // Allow for a lagging install_displaced_markword_in_object() to + // have marked the ObjectMonitor's header/dmw field. + assert(temp.is_neutral() || (AsyncDeflateIdleMonitors && temp.is_marked()), + "invariant: header=" INTPTR_FORMAT, temp.value()); hash = temp.hash(); if (hash != 0) { // if it has a hash, just return it return hash; @@ -1062,24 +1067,36 @@ monitor = omh.om_ptr(); // Load ObjectMonitor's header/dmw field and see if it has a hash. mark = monitor->header(); - assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); + // Allow for a lagging install_displaced_markword_in_object() to + // have marked the ObjectMonitor's header/dmw field. + assert(mark.is_neutral() || (AsyncDeflateIdleMonitors && mark.is_marked()), + "invariant: header=" INTPTR_FORMAT, mark.value()); hash = mark.hash(); if (hash == 0) { // if it does not have a hash hash = get_next_hash(self, obj); // get a new hash temp = mark.copy_set_hash(hash); // merge the hash into header + if (AsyncDeflateIdleMonitors && temp.is_marked()) { + // A lagging install_displaced_markword_in_object() has marked + // the ObjectMonitor's header/dmw field. We clear it to avoid + // any confusion if we are able to set the hash. + temp.set_unmarked(); + } assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value()); test = markWord(v); if (test != mark) { // The attempt to update the ObjectMonitor's header/dmw field // did not work. This can happen if another thread managed to - // merge in the hash just before our cmpxchg(). - // ObjectMonitor::install_displaced_markword_in_object() - // does mark the header/dmw field as part of async deflation, - // but that protocol cannot happen now due to the - // ObjectMonitorHandle above. + // merge in the hash just before our cmpxchg(). With async + // deflation, a lagging install_displaced_markword_in_object() + // could have just marked or just unmarked the header/dmw field. // If we add any new usages of the header/dmw field, this code // will need to be updated. + if (AsyncDeflateIdleMonitors) { + // Since async deflation gives us two possible reasons for + // the cmwxchg() to fail, it is easier to simply retry. + continue; + } hash = test.hash(); assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value()); assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash"); @@ -1239,7 +1256,7 @@ // Visitors ... void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { - PaddedObjectMonitor* block = g_block_list; + PaddedObjectMonitor* block = Atomic::load(&g_block_list); while (block != NULL) { assert(block->object() == CHAINMARKER, "must be a block header"); for (int i = _BLOCKSIZE - 1; i > 0; i--) { @@ -1254,23 +1271,22 @@ closure->do_monitor(mid); } } - // unmarked_next() is not needed with g_block_list (no next field - // marking) and no load_acquire() needed because _next_om is - // updated before g_block_list is changed with cmpxchg(). - block = (PaddedObjectMonitor*)block->_next_om; + // unmarked_next() is not needed with g_block_list (no locking + // used with with block linkage _next_om fields). + block = (PaddedObjectMonitor*)Atomic::load(&block->_next_om); } } static bool monitors_used_above_threshold() { - if (g_om_population == 0) { + if (Atomic::load(&LVars.population) == 0) { return false; } if (MonitorUsedDeflationThreshold > 0) { - int monitors_used = g_om_population - g_om_free_count; + int monitors_used = Atomic::load(&LVars.population) - Atomic::load(&LVars.free_count); if (HandshakeAfterDeflateIdleMonitors) { - monitors_used -= g_om_wait_count; + monitors_used -= Atomic::load(&LVars.wait_count); } - int monitor_usage = (monitors_used * 100LL) / g_om_population; + int monitor_usage = (monitors_used * 100LL) / Atomic::load(&LVars.population); return monitor_usage > MonitorUsedDeflationThreshold; } return false; @@ -1301,9 +1317,9 @@ _last_async_deflation_time_ns = os::javaTimeNanos(); return true; } - int monitors_used = g_om_population - g_om_free_count; + int monitors_used = Atomic::load(&LVars.population) - Atomic::load(&LVars.free_count); if (HandshakeAfterDeflateIdleMonitors) { - monitors_used -= g_om_wait_count; + monitors_used -= Atomic::load(&LVars.wait_count); } if (is_MonitorBound_exceeded(monitors_used)) { // Not enough ObjectMonitors on the global free list. @@ -1348,7 +1364,7 @@ void ObjectSynchronizer::global_used_oops_do(OopClosure* f) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); - list_oops_do(g_om_in_use_list, g_om_in_use_count, f); + list_oops_do(Atomic::load(&LVars.in_use_list), Atomic::load(&LVars.in_use_count), f); } void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) { @@ -1373,18 +1389,18 @@ // ----------------------------------------------------------------------------- // ObjectMonitor Lifecycle // ----------------------- -// Inflation unlinks monitors from the global g_free_list and -// associates them with objects. Deflation -- which occurs at -// STW-time -- disassociates idle monitors from objects. Such -// scavenged monitors are returned to the g_free_list. +// Inflation unlinks monitors from LVars.free_list or a per-thread free +// list and associates them with objects. Deflation -- which occurs at +// STW-time or asynchronously -- disassociates idle monitors from objects. +// Such scavenged monitors are returned to the LVars.free_list. // // ObjectMonitors reside in type-stable memory (TSM) and are immortal. // // Lifecycle: -// -- unassigned and on the global free list -// -- unassigned and on a thread's private om_free_list +// -- unassigned and on the LVars.free_list +// -- unassigned and on a per-thread free list // -- assigned to an object. The object is inflated and the mark refers -// to the objectmonitor. +// to the ObjectMonitor. // Constraining monitor pool growth via MonitorBound ... @@ -1406,7 +1422,7 @@ // // When safepoint deflation is being used and MonitorBound is set, the // boundry applies to -// (g_om_population - g_om_free_count) +// (LVars.population - LVars.free_count) // i.e., if there are not enough ObjectMonitors on the global free list, // then a safepoint deflation is induced. Picking a good MonitorBound value // is non-trivial. @@ -1462,17 +1478,17 @@ return m; } - // 2: try to allocate from the global g_free_list + // 2: try to allocate from the global LVars.free_list // CONSIDER: use muxTry() instead of muxAcquire(). // If the muxTry() fails then drop immediately into case 3. // If we're using thread-local free lists then try // to reprovision the caller's free list. - if (g_free_list != NULL) { + if (Atomic::load(&LVars.free_list) != NULL) { // Reprovision the thread's om_free_list. // Use bulk transfers to reduce the allocation rate and heat // on various locks. for (int i = self->om_free_provision; --i >= 0;) { - ObjectMonitor* take = take_from_start_of_g_free_list(); + ObjectMonitor* take = take_from_start_of_global_free_list(); if (take == NULL) { break; // No more are available. } @@ -1489,7 +1505,9 @@ // proper value. Atomic::add(&take->_ref_count, max_jint); - DEBUG_ONLY(jint l_ref_count = take->ref_count();) +#ifdef ASSERT + jint l_ref_count = take->ref_count(); +#endif assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d", l_ref_count, take->ref_count()); } @@ -1505,7 +1523,7 @@ if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE; if (!AsyncDeflateIdleMonitors && - is_MonitorBound_exceeded(g_om_population - g_om_free_count)) { + is_MonitorBound_exceeded(Atomic::load(&LVars.population) - Atomic::load(&LVars.free_count))) { // Not enough ObjectMonitors on the global free list. // We can't safely induce a STW safepoint from om_alloc() as our thread // state may not be appropriate for such activities and callers may hold @@ -1583,45 +1601,44 @@ // we have to remove 'm' from the in-use list first (as needed). if (from_per_thread_alloc) { // Need to remove 'm' from om_in_use_list. - // We use the more complicated mark-cur_mid_in_use-and-mid-as-we-go + // We use the more complicated lock-cur_mid_in_use-and-mid-as-we-go // protocol because async deflation can do list deletions in parallel. ObjectMonitor* cur_mid_in_use = NULL; ObjectMonitor* mid = NULL; ObjectMonitor* next = NULL; bool extracted = false; - if (!mark_list_head(&self->om_in_use_list, &mid, &next)) { + if ((mid = get_list_head_locked(&self->om_in_use_list)) == NULL) { fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self)); } + next = unmarked_next(mid); while (true) { if (m == mid) { // We found 'm' on the per-thread in-use list so try to extract it. if (cur_mid_in_use == NULL) { - // mid is the list head and it is marked. Switch the list head - // to next which unmarks the list head, but leaves mid marked: - self->om_in_use_list = next; - // mark_list_head() used cmpxchg() above, switching list head can be lazier: - OrderAccess::storestore(); + // mid is the list head and it is locked. Switch the list head + // to next which unlocks the list head, but leaves mid locked: + Atomic::store(&self->om_in_use_list, next); } else { - // mid and cur_mid_in_use are marked. Switch cur_mid_in_use's - // next field to next which unmarks cur_mid_in_use, but leaves - // mid marked: - Atomic::release_store(&cur_mid_in_use->_next_om, next); + // mid and cur_mid_in_use are locked. Switch cur_mid_in_use's + // next field to next which unlocks cur_mid_in_use, but leaves + // mid locked: + set_next(cur_mid_in_use, next); } extracted = true; Atomic::dec(&self->om_in_use_count); - // Unmark mid, but leave the next value for any lagging list + // Unlock mid, but leave the next value for any lagging list // walkers. It will get cleaned up when mid is prepended to // the thread's free list: - set_next(mid, next); + om_unlock(mid); break; } if (cur_mid_in_use != NULL) { - set_next(cur_mid_in_use, mid); // umark cur_mid_in_use + om_unlock(cur_mid_in_use); } - // The next cur_mid_in_use keeps mid's marked next field so + // The next cur_mid_in_use keeps mid's locked state so // that it is stable for a possible next field change. It - // cannot be deflated while it is marked. + // cannot be deflated while it is locked. cur_mid_in_use = mid; mid = next; if (mid == NULL) { @@ -1629,8 +1646,9 @@ fatal("must find m=" INTPTR_FORMAT "on om_in_use_list=" INTPTR_FORMAT, p2i(m), p2i(self->om_in_use_list)); } - // Mark mid's next field so we can possibly extract it: - next = mark_next_loop(mid); + // Lock mid so we can possibly extract it: + om_lock(mid); + next = unmarked_next(mid); } } @@ -1665,12 +1683,11 @@ int in_use_count = 0; ObjectMonitor* in_use_list = NULL; ObjectMonitor* in_use_tail = NULL; - ObjectMonitor* next = NULL; // An async deflation thread checks to see if the target thread // is exiting, but if it has made it past that check before we // started exiting, then it is racing to get to the in-use list. - if (mark_list_head(&self->om_in_use_list, &in_use_list, &next)) { + if ((in_use_list = get_list_head_locked(&self->om_in_use_list)) != NULL) { // At this point, we have marked the in-use list head so an // async deflation thread cannot come in after us. If an async // deflation thread is ahead of us, then we'll detect that and @@ -1679,17 +1696,17 @@ // The thread is going away, however the ObjectMonitors on the // om_in_use_list may still be in-use by other threads. Link // them to in_use_tail, which will be linked into the global - // in-use list g_om_in_use_list below. + // in-use list (LVars.in_use_list) below. // // Account for the in-use list head before the loop since it is // already marked (by this thread): in_use_tail = in_use_list; in_use_count++; for (ObjectMonitor* cur_om = unmarked_next(in_use_list); cur_om != NULL;) { - if (is_next_marked(cur_om)) { - // This next field is marked so there must be an async deflater + if (is_locked(cur_om)) { + // cur_om is locked so there must be an async deflater // thread ahead of us so we'll give it a chance to finish. - while (is_next_marked(cur_om)) { + while (is_locked(cur_om)) { os::naked_short_sleep(1); } // Refetch the possibly changed next field and try again. @@ -1714,11 +1731,9 @@ "match: l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count); self->om_in_use_count = 0; - // Clear the in-use list head (which also unmarks it): - self->om_in_use_list = (ObjectMonitor*)NULL; - // mark_list_head() used cmpxchg() above, clearing the disconnected list head can be lazier: - OrderAccess::storestore(); - set_next(in_use_list, next); + // Clear the in-use list head (which also unlocks it): + Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL); + om_unlock(in_use_list); } int free_count = 0; @@ -1726,7 +1741,7 @@ ObjectMonitor* free_tail = NULL; if (free_list != NULL) { // The thread is going away. Set 'free_tail' to the last per-thread free - // monitor which will be linked to g_free_list below. + // monitor which will be linked to LVars.free_list below. stringStream ss; for (ObjectMonitor* s = free_list; s != NULL; s = unmarked_next(s)) { free_count++; @@ -1740,16 +1755,15 @@ "l_om_free_count=%d, free_count=%d", l_om_free_count, free_count); self->om_free_count = 0; - self->om_free_list = NULL; - OrderAccess::storestore(); // Lazier memory is okay for list walkers. + Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL); } if (free_tail != NULL) { - prepend_list_to_g_free_list(free_list, free_tail, free_count); + prepend_list_to_global_free_list(free_list, free_tail, free_count); } if (in_use_tail != NULL) { - prepend_list_to_g_om_in_use_list(in_use_list, in_use_tail, in_use_count); + prepend_list_to_global_in_use_list(in_use_list, in_use_tail, in_use_count); } LogStreamHandle(Debug, monitorinflation) lsh_debug; @@ -2137,7 +2151,7 @@ // to fix the linkages in its context. ObjectMonitor* prevtail = *free_tail_p; // Should have been cleaned up by the caller: - // Note: Should not have to mark prevtail here since we're at a + // Note: Should not have to lock prevtail here since we're at a // safepoint and ObjectMonitors on the local free list should // not be accessed in parallel. assert(prevtail->_next_om == NULL, "must be NULL: _next_om=" @@ -2256,10 +2270,10 @@ // to fix the linkages in its context. ObjectMonitor* prevtail = *free_tail_p; // Should have been cleaned up by the caller: - ObjectMonitor* next = mark_next_loop(prevtail); + om_lock(prevtail); assert(unmarked_next(prevtail) == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(unmarked_next(prevtail))); - set_next(prevtail, mid); // prevtail now points to mid (and is unmarked) + set_next(prevtail, mid); // prevtail now points to mid (and is unlocked) } *free_tail_p = mid; @@ -2280,7 +2294,9 @@ // proper value (which may not be what we saw above): Atomic::add(&mid->_ref_count, max_jint); - DEBUG_ONLY(jint l_ref_count = mid->ref_count();) +#ifdef ASSERT + jint l_ref_count = mid->ref_count(); +#endif assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d", l_ref_count, mid->ref_count()); return false; @@ -2309,8 +2325,8 @@ // See also ParallelSPCleanupTask and // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and // Threads::parallel_java_threads_do() in thread.cpp. -int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor* volatile * list_p, - int volatile * count_p, +int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p, + int* count_p, ObjectMonitor** free_head_p, ObjectMonitor** free_tail_p) { ObjectMonitor* cur_mid_in_use = NULL; @@ -2318,11 +2334,12 @@ ObjectMonitor* next = NULL; int deflated_count = 0; - // We use the simpler mark-mid-as-we-go protocol since there are no + // We use the simpler lock-mid-as-we-go protocol since there are no // parallel list deletions since we are at a safepoint. - if (!mark_list_head(list_p, &mid, &next)) { + if ((mid = get_list_head_locked(list_p)) == NULL) { return 0; // The list is empty so nothing to deflate. } + next = unmarked_next(mid); while (true) { oop obj = (oop) mid->object(); @@ -2331,39 +2348,34 @@ // free_tail_p as needed. Finish the move to the local free list // by unlinking mid from the global or per-thread in-use list. if (cur_mid_in_use == NULL) { - // mid is the list head and it is marked. Switch the list head - // to next which unmarks the list head, but leaves mid marked: - *list_p = next; - // mark_list_head() used cmpxchg() above, switching list head can be lazier: - OrderAccess::storestore(); + // mid is the list head and it is locked. Switch the list head + // to next which unlocks the list head, but leaves mid locked: + Atomic::store(list_p, next); } else { - // mid is marked. Switch cur_mid_in_use's next field to next + // mid is locked. Switch cur_mid_in_use's next field to next // which is safe because we have no parallel list deletions, - // but we leave mid marked: - Atomic::release_store(&cur_mid_in_use->_next_om, next); + // but we leave mid locked: + set_next(cur_mid_in_use, next); } // At this point mid is disconnected from the in-use list so - // its marked next field no longer has any effects. + // its lock no longer has any effects on the in-use list. deflated_count++; Atomic::dec(count_p); // mid is current tail in the free_head_p list so NULL terminate it - // (which also unmarks it): + // (which also unlocks it): set_next(mid, NULL); - - // All the list management is done so move on to the next one: - mid = next; } else { - set_next(mid, next); // unmark next field - - // All the list management is done so move on to the next one: + om_unlock(mid); cur_mid_in_use = mid; - mid = next; } + // All the list management is done so move on to the next one: + mid = next; if (mid == NULL) { break; // Reached end of the list so nothing more to deflate. } - // Mark mid's next field so we can possibly deflate it: - next = mark_next_loop(mid); + // Lock mid so we can possibly deflate it: + om_lock(mid); + next = unmarked_next(mid); } return deflated_count; } @@ -2374,13 +2386,13 @@ // If a safepoint has started, then we save state via saved_mid_in_use_p // and return to the caller to honor the safepoint. // -int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor* volatile * list_p, - int volatile * count_p, +int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p, + int* count_p, ObjectMonitor** free_head_p, ObjectMonitor** free_tail_p, ObjectMonitor** saved_mid_in_use_p) { assert(AsyncDeflateIdleMonitors, "sanity check"); - assert(Thread::current()->is_Java_thread(), "precondition"); + JavaThread* self = JavaThread::current(); ObjectMonitor* cur_mid_in_use = NULL; ObjectMonitor* mid = NULL; @@ -2388,30 +2400,33 @@ ObjectMonitor* next_next = NULL; int deflated_count = 0; - // We use the more complicated mark-cur_mid_in_use-and-mid-as-we-go + // We use the more complicated lock-cur_mid_in_use-and-mid-as-we-go // protocol because om_release() can do list deletions in parallel. - // We also mark-next-next-as-we-go to prevent an om_flush() that is + // We also lock-next-next-as-we-go to prevent an om_flush() that is // behind this thread from passing us. if (*saved_mid_in_use_p == NULL) { // No saved state so start at the beginning. - // Mark the list head's next field so we can possibly deflate it: - if (!mark_list_head(list_p, &mid, &next)) { + // Lock the list head so we can possibly deflate it: + if ((mid = get_list_head_locked(list_p)) == NULL) { return 0; // The list is empty so nothing to deflate. } + next = unmarked_next(mid); } else { // We're restarting after a safepoint so restore the necessary state // before we resume. cur_mid_in_use = *saved_mid_in_use_p; - // Mark cur_mid_in_use's next field so we can possibly update its + // Lock cur_mid_in_use so we can possibly update its // next field to extract a deflated ObjectMonitor. - mid = mark_next_loop(cur_mid_in_use); + om_lock(cur_mid_in_use); + mid = unmarked_next(cur_mid_in_use); if (mid == NULL) { - set_next(cur_mid_in_use, NULL); // unmark next field + om_unlock(cur_mid_in_use); *saved_mid_in_use_p = NULL; return 0; // The remainder is empty so nothing more to deflate. } - // Mark mid's next field so we can possibly deflate it: - next = mark_next_loop(mid); + // Lock mid so we can possibly deflate it: + om_lock(mid); + next = unmarked_next(mid); } while (true) { @@ -2419,10 +2434,10 @@ // a cur_mid_in_use, then its next field is also marked at this point. if (next != NULL) { - // We mark next's next field so that an om_flush() - // thread that is behind us cannot pass us when we - // unmark the current mid's next field. - next_next = mark_next_loop(next); + // We lock next so that an om_flush() thread that is behind us + // cannot pass us when we unlock the current mid. + om_lock(next); + next_next = unmarked_next(next); } // Only try to deflate if there is an associated Java object and if @@ -2433,28 +2448,26 @@ // free_tail_p as needed. Finish the move to the local free list // by unlinking mid from the global or per-thread in-use list. if (cur_mid_in_use == NULL) { - // mid is the list head and it is marked. Switch the list head - // to next which is also marked (if not NULL) and also leave - // mid marked: - *list_p = next; - // mark_list_head() used cmpxchg() above, switching list head can be lazier: - OrderAccess::storestore(); + // mid is the list head and it is locked. Switch the list head + // to next which is also locked (if not NULL) and also leave + // mid locked: + Atomic::store(list_p, next); } else { - ObjectMonitor* marked_next = mark_om_ptr(next); - // mid and cur_mid_in_use are marked. Switch cur_mid_in_use's - // next field to marked_next and also leave mid marked: - Atomic::release_store(&cur_mid_in_use->_next_om, marked_next); + ObjectMonitor* locked_next = mark_om_ptr(next); + // mid and cur_mid_in_use are locked. Switch cur_mid_in_use's + // next field to locked_next and also leave mid locked: + set_next(cur_mid_in_use, locked_next); } // At this point mid is disconnected from the in-use list so - // its marked next field no longer has any effects. + // its lock longer has any effects on in-use list. deflated_count++; Atomic::dec(count_p); // mid is current tail in the free_head_p list so NULL terminate it - // (which also unmarks it): + // (which also unlocks it): set_next(mid, NULL); // All the list management is done so move on to the next one: - mid = next; // mid keeps non-NULL next's marked next field + mid = next; // mid keeps non-NULL next's locked next field next = next_next; } else { // mid is considered in-use if it does not have an associated @@ -2468,37 +2481,37 @@ // All the list management is done so move on to the next one: if (cur_mid_in_use != NULL) { - set_next(cur_mid_in_use, mid); // umark cur_mid_in_use + om_unlock(cur_mid_in_use); } - // The next cur_mid_in_use keeps mid's marked next field so + // The next cur_mid_in_use keeps mid's lock state so // that it is stable for a possible next field change. It - // cannot be modified by om_release() while it is marked. + // cannot be modified by om_release() while it is locked. cur_mid_in_use = mid; - mid = next; // mid keeps non-NULL next's marked next field + mid = next; // mid keeps non-NULL next's locked state next = next_next; - if (SafepointSynchronize::is_synchronizing() && - cur_mid_in_use != *list_p && cur_mid_in_use->is_old()) { + if (SafepointMechanism::should_block(self) && + cur_mid_in_use != Atomic::load(list_p) && cur_mid_in_use->is_old()) { // If a safepoint has started and cur_mid_in_use is not the list // head and is old, then it is safe to use as saved state. Return // to the caller before blocking. *saved_mid_in_use_p = cur_mid_in_use; - set_next(cur_mid_in_use, mid); // umark cur_mid_in_use + om_unlock(cur_mid_in_use); if (mid != NULL) { - set_next(mid, next); // umark mid + om_unlock(mid); } return deflated_count; } } if (mid == NULL) { if (cur_mid_in_use != NULL) { - set_next(cur_mid_in_use, mid); // umark cur_mid_in_use + om_unlock(cur_mid_in_use); } break; // Reached end of the list so nothing more to deflate. } - // The current mid's next field is marked at this point. If we have - // a cur_mid_in_use, then its next field is also marked at this point. + // The current mid's next field is locked at this point. If we have + // a cur_mid_in_use, then it is also locked at this point. } // We finished the list without a safepoint starting so there's // no need to save state. @@ -2539,14 +2552,14 @@ // Note: the thread-local monitors lists get deflated in // a separate pass. See deflate_thread_local_monitors(). - // For moribund threads, scan g_om_in_use_list + // For moribund threads, scan LVars.in_use_list int deflated_count = 0; - if (g_om_in_use_list != NULL) { - // Update n_in_circulation before g_om_in_use_count is updated by deflation. - Atomic::add(&counters->n_in_circulation, g_om_in_use_count); + if (Atomic::load(&LVars.in_use_list) != NULL) { + // Update n_in_circulation before LVars.in_use_count is updated by deflation. + Atomic::add(&counters->n_in_circulation, Atomic::load(&LVars.in_use_count)); - deflated_count = deflate_monitor_list(&g_om_in_use_list, &g_om_in_use_count, &free_head_p, &free_tail_p); - Atomic::add(&counters->n_in_use, g_om_in_use_count); + deflated_count = deflate_monitor_list(&LVars.in_use_list, &LVars.in_use_count, &free_head_p, &free_tail_p); + Atomic::add(&counters->n_in_use, Atomic::load(&LVars.in_use_count)); } if (free_head_p != NULL) { @@ -2555,7 +2568,7 @@ guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(free_tail_p->_next_om)); - prepend_list_to_g_free_list(free_head_p, free_tail_p, deflated_count); + prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count); Atomic::add(&counters->n_scavenged, deflated_count); } timer.stop(); @@ -2603,26 +2616,23 @@ log_debug(monitorinflation)("did async deflation of idle monitors for %d thread(s).", count); } - log_info(monitorinflation)("async g_om_population=%d, g_om_in_use_count=%d, " - "g_om_free_count=%d, g_om_wait_count=%d", - g_om_population, g_om_in_use_count, - g_om_free_count, g_om_wait_count); + log_info(monitorinflation)("async global_population=%d, global_in_use_count=%d, " + "global_free_count=%d, global_wait_count=%d", + Atomic::load(&LVars.population), Atomic::load(&LVars.in_use_count), + Atomic::load(&LVars.free_count), Atomic::load(&LVars.wait_count)); // The ServiceThread's async deflation request has been processed. set_is_async_deflation_requested(false); - if (HandshakeAfterDeflateIdleMonitors && g_om_wait_count > 0) { + if (HandshakeAfterDeflateIdleMonitors && Atomic::load(&LVars.wait_count) > 0) { // There are deflated ObjectMonitors waiting for a handshake // (or a safepoint) for safety. - // g_wait_list and g_om_wait_count are only updated by the calling - // thread so no need for load_acquire() or release_store(). - ObjectMonitor* list = g_wait_list; - ADIM_guarantee(list != NULL, "g_wait_list must not be NULL"); - int count = g_om_wait_count; - g_om_wait_count = 0; - g_wait_list = NULL; - OrderAccess::storestore(); // Lazier memory sync is okay for list walkers. + ObjectMonitor* list = Atomic::load(&LVars.wait_list); + ADIM_guarantee(list != NULL, "LVars.wait_list must not be NULL"); + int count = Atomic::load(&LVars.wait_count); + Atomic::store(&LVars.wait_count, 0); + Atomic::store(&LVars.wait_list, (ObjectMonitor*)NULL); // Find the tail for prepend_list_to_common(). No need to mark // ObjectMonitors for this list walk since only the deflater @@ -2639,7 +2649,7 @@ HandshakeForDeflation hfd_hc; Handshake::execute(&hfd_hc); - prepend_list_to_common(list, tail, count, &g_free_list, &g_om_free_count); + prepend_list_to_common(list, tail, count, &LVars.free_list, &LVars.free_count); log_info(monitorinflation)("moved %d idle monitors from global waiting list to global free list", count); } @@ -2680,7 +2690,7 @@ } if (is_global) { - OM_PERFDATA_OP(MonExtant, set_value(g_om_in_use_count)); + OM_PERFDATA_OP(MonExtant, set_value(Atomic::load(&LVars.in_use_count))); } else { OM_PERFDATA_OP(MonExtant, inc(target->om_in_use_count)); } @@ -2688,7 +2698,7 @@ do { int local_deflated_count; if (is_global) { - local_deflated_count = deflate_monitor_list_using_JT(&g_om_in_use_list, &g_om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p); + local_deflated_count = deflate_monitor_list_using_JT(&LVars.in_use_list, &LVars.in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p); } else { local_deflated_count = deflate_monitor_list_using_JT(&target->om_in_use_list, &target->om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p); } @@ -2710,9 +2720,9 @@ INTPTR_FORMAT, p2i(unmarked_next(free_tail_p))); if (HandshakeAfterDeflateIdleMonitors) { - prepend_list_to_g_wait_list(free_head_p, free_tail_p, local_deflated_count); + prepend_list_to_global_wait_list(free_head_p, free_tail_p, local_deflated_count); } else { - prepend_list_to_g_free_list(free_head_p, free_tail_p, local_deflated_count); + prepend_list_to_global_free_list(free_head_p, free_tail_p, local_deflated_count); } OM_PERFDATA_OP(Deflations, inc(local_deflated_count)); @@ -2727,7 +2737,7 @@ } else { log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(target)); } - assert(SafepointSynchronize::is_synchronizing(), "sanity check"); + assert(SafepointMechanism::should_block(self), "sanity check"); ThreadBlockInVM blocker(self); } // Prepare for another loop after the safepoint. @@ -2779,10 +2789,10 @@ // at a safepoint. ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); } else if (log_is_enabled(Info, monitorinflation)) { - log_info(monitorinflation)("g_om_population=%d, g_om_in_use_count=%d, " - "g_om_free_count=%d, g_om_wait_count=%d", - g_om_population, g_om_in_use_count, - g_om_free_count, g_om_wait_count); + log_info(monitorinflation)("global_population=%d, global_in_use_count=%d, " + "global_free_count=%d, global_wait_count=%d", + Atomic::load(&LVars.population), Atomic::load(&LVars.in_use_count), + Atomic::load(&LVars.free_count), Atomic::load(&LVars.wait_count)); } Atomic::store(&_forceMonitorScavenge, 0); // Reset @@ -2827,7 +2837,7 @@ guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(free_tail_p->_next_om)); - prepend_list_to_g_free_list(free_head_p, free_tail_p, deflated_count); + prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count); Atomic::add(&counters->n_scavenged, deflated_count); Atomic::add(&counters->per_thread_scavenged, deflated_count); } @@ -2948,27 +2958,27 @@ ls->print_cr("Checking global lists:"); - // Check g_om_population: - if (g_om_population == chk_om_population) { - ls->print_cr("g_om_population=%d equals chk_om_population=%d", - g_om_population, chk_om_population); + // Check LVars.population: + if (Atomic::load(&LVars.population) == chk_om_population) { + ls->print_cr("global_population=%d equals chk_om_population=%d", + Atomic::load(&LVars.population), chk_om_population); } else { // With lock free access to the monitor lists, it is possible for // log_monitor_list_counts() to return a value that doesn't match - // g_om_population. So far a higher value has been seen in testing + // LVars.population. So far a higher value has been seen in testing // so something is being double counted by log_monitor_list_counts(). - ls->print_cr("WARNING: g_om_population=%d is not equal to " - "chk_om_population=%d", g_om_population, chk_om_population); + ls->print_cr("WARNING: global_population=%d is not equal to " + "chk_om_population=%d", Atomic::load(&LVars.population), chk_om_population); } - // Check g_om_in_use_list and g_om_in_use_count: + // Check LVars.in_use_list and LVars.in_use_count: chk_global_in_use_list_and_count(ls, &error_cnt); - // Check g_free_list and g_om_free_count: + // Check LVars.free_list and LVars.free_count: chk_global_free_list_and_count(ls, &error_cnt); if (HandshakeAfterDeflateIdleMonitors) { - // Check g_wait_list and g_om_wait_count: + // Check LVars.wait_list and LVars.wait_count: chk_global_wait_list_and_count(ls, &error_cnt); } @@ -3045,34 +3055,52 @@ } } +// Lock the next ObjectMonitor for traversal. The current ObjectMonitor +// is unlocked after the next ObjectMonitor is locked. *cur_p and *next_p +// are updated to their next values in the list traversal. *cur_p is set +// to NULL when the end of the list is reached. +static void lock_next_for_traversal(ObjectMonitor** cur_p, ObjectMonitor** next_p) { + ObjectMonitor* prev = *cur_p; // Save current for unlocking. + if (*next_p == NULL) { // Reached the end of the list. + om_unlock(prev); // Unlock previous. + *cur_p = NULL; // Tell the caller we are done. + return; + } + om_lock(*next_p); // Lock next. + om_unlock(prev); // Unlock previous. + *cur_p = *next_p; // Update current. + *next_p = unmarked_next(*cur_p); // Update next. +} + // Check the global free list and count; log the results of the checks. void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out, int *error_cnt_p) { int chk_om_free_count = 0; ObjectMonitor* cur = NULL; ObjectMonitor* next = NULL; - if (mark_list_head(&g_free_list, &cur, &next)) { + if ((cur = get_list_head_locked(&LVars.free_list)) != NULL) { + next = unmarked_next(cur); // Marked the global free list head so process the list. while (true) { chk_free_entry(NULL /* jt */, cur, out, error_cnt_p); chk_om_free_count++; - mark_next_for_traversal(&cur, &next); + lock_next_for_traversal(&cur, &next); if (cur == NULL) { break; } } } - if (g_om_free_count == chk_om_free_count) { - out->print_cr("g_om_free_count=%d equals chk_om_free_count=%d", - g_om_free_count, chk_om_free_count); + if (Atomic::load(&LVars.free_count) == chk_om_free_count) { + out->print_cr("global_free_count=%d equals chk_om_free_count=%d", + Atomic::load(&LVars.free_count), chk_om_free_count); } else { - // With lock free access to g_free_list, it is possible for an - // ObjectMonitor to be prepended to g_free_list after we started - // calculating chk_om_free_count so g_om_free_count may not + // With lock free access to LVars.free_list, it is possible for an + // ObjectMonitor to be prepended to LVars.free_list after we started + // calculating chk_om_free_count so LVars.free_count may not // match anymore. - out->print_cr("WARNING: g_om_free_count=%d is not equal to " - "chk_om_free_count=%d", g_om_free_count, chk_om_free_count); + out->print_cr("WARNING: global_free_count=%d is not equal to " + "chk_om_free_count=%d", Atomic::load(&LVars.free_count), chk_om_free_count); } } @@ -3082,25 +3110,26 @@ int chk_om_wait_count = 0; ObjectMonitor* cur = NULL; ObjectMonitor* next = NULL; - if (mark_list_head(&g_wait_list, &cur, &next)) { + if ((cur = get_list_head_locked(&LVars.wait_list)) != NULL) { + next = unmarked_next(cur); // Marked the global wait list head so process the list. while (true) { - // Rules for g_wait_list are the same as of g_free_list: + // Rules for LVars.wait_list are the same as of LVars.free_list: chk_free_entry(NULL /* jt */, cur, out, error_cnt_p); chk_om_wait_count++; - mark_next_for_traversal(&cur, &next); + lock_next_for_traversal(&cur, &next); if (cur == NULL) { break; } } } - if (g_om_wait_count == chk_om_wait_count) { - out->print_cr("g_om_wait_count=%d equals chk_om_wait_count=%d", - g_om_wait_count, chk_om_wait_count); + if (Atomic::load(&LVars.wait_count) == chk_om_wait_count) { + out->print_cr("global_wait_count=%d equals chk_om_wait_count=%d", + Atomic::load(&LVars.wait_count), chk_om_wait_count); } else { - out->print_cr("ERROR: g_om_wait_count=%d is not equal to " - "chk_om_wait_count=%d", g_om_wait_count, chk_om_wait_count); + out->print_cr("ERROR: global_wait_count=%d is not equal to " + "chk_om_wait_count=%d", Atomic::load(&LVars.wait_count), chk_om_wait_count); *error_cnt_p = *error_cnt_p + 1; } } @@ -3111,27 +3140,28 @@ int chk_om_in_use_count = 0; ObjectMonitor* cur = NULL; ObjectMonitor* next = NULL; - if (mark_list_head(&g_om_in_use_list, &cur, &next)) { + if ((cur = get_list_head_locked(&LVars.in_use_list)) != NULL) { + next = unmarked_next(cur); // Marked the global in-use list head so process the list. while (true) { chk_in_use_entry(NULL /* jt */, cur, out, error_cnt_p); chk_om_in_use_count++; - mark_next_for_traversal(&cur, &next); + lock_next_for_traversal(&cur, &next); if (cur == NULL) { break; } } } - if (g_om_in_use_count == chk_om_in_use_count) { - out->print_cr("g_om_in_use_count=%d equals chk_om_in_use_count=%d", - g_om_in_use_count, chk_om_in_use_count); + if (Atomic::load(&LVars.in_use_count) == chk_om_in_use_count) { + out->print_cr("global_in_use_count=%d equals chk_om_in_use_count=%d", + Atomic::load(&LVars.in_use_count), chk_om_in_use_count); } else { // With lock free access to the monitor lists, it is possible for // an exiting JavaThread to put its in-use ObjectMonitors on the // global in-use list after chk_om_in_use_count is calculated above. - out->print_cr("WARNING: g_om_in_use_count=%d is not equal to chk_om_in_use_count=%d", - g_om_in_use_count, chk_om_in_use_count); + out->print_cr("WARNING: global_in_use_count=%d is not equal to chk_om_in_use_count=%d", + Atomic::load(&LVars.in_use_count), chk_om_in_use_count); } } @@ -3201,13 +3231,14 @@ int chk_om_free_count = 0; ObjectMonitor* cur = NULL; ObjectMonitor* next = NULL; - if (mark_list_head(&jt->om_free_list, &cur, &next)) { + if ((cur = get_list_head_locked(&jt->om_free_list)) != NULL) { + next = unmarked_next(cur); // Marked the per-thread free list head so process the list. while (true) { chk_free_entry(jt, cur, out, error_cnt_p); chk_om_free_count++; - mark_next_for_traversal(&cur, &next); + lock_next_for_traversal(&cur, &next); if (cur == NULL) { break; } @@ -3232,13 +3263,14 @@ int chk_om_in_use_count = 0; ObjectMonitor* cur = NULL; ObjectMonitor* next = NULL; - if (mark_list_head(&jt->om_in_use_list, &cur, &next)) { + if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) { + next = unmarked_next(cur); // Marked the per-thread in-use list head so process the list. while (true) { chk_in_use_entry(jt, cur, out, error_cnt_p); chk_om_in_use_count++; - mark_next_for_traversal(&cur, &next); + lock_next_for_traversal(&cur, &next); if (cur == NULL) { break; } @@ -3261,7 +3293,7 @@ // indicate the associated object and its type. void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out) { stringStream ss; - if (g_om_in_use_count > 0) { + if (Atomic::load(&LVars.in_use_count) > 0) { out->print_cr("In-use global monitor info:"); out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); out->print_cr("%18s %s %7s %18s %18s", @@ -3269,7 +3301,8 @@ out->print_cr("================== === ======= ================== =================="); ObjectMonitor* cur = NULL; ObjectMonitor* next = NULL; - if (mark_list_head(&g_om_in_use_list, &cur, &next)) { + if ((cur = get_list_head_locked(&LVars.in_use_list)) != NULL) { + next = unmarked_next(cur); // Marked the global in-use list head so process the list. while (true) { const oop obj = (oop) cur->object(); @@ -3285,7 +3318,7 @@ } out->cr(); - mark_next_for_traversal(&cur, &next); + lock_next_for_traversal(&cur, &next); if (cur == NULL) { break; } @@ -3301,7 +3334,8 @@ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { ObjectMonitor* cur = NULL; ObjectMonitor* next = NULL; - if (mark_list_head(&jt->om_in_use_list, &cur, &next)) { + if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) { + next = unmarked_next(cur); // Marked the global in-use list head so process the list. while (true) { const oop obj = (oop) cur->object(); @@ -3317,7 +3351,7 @@ } out->cr(); - mark_next_for_traversal(&cur, &next); + lock_next_for_traversal(&cur, &next); if (cur == NULL) { break; } @@ -3335,11 +3369,11 @@ out->print_cr("%18s %10s %10s %10s %10s", "Global Lists:", "InUse", "Free", "Wait", "Total"); out->print_cr("================== ========== ========== ========== =========="); - out->print_cr("%18s %10d %10d %10d %10d", "", g_om_in_use_count, - g_om_free_count, g_om_wait_count, g_om_population); - pop_count += g_om_in_use_count + g_om_free_count; + out->print_cr("%18s %10d %10d %10d %10d", "", Atomic::load(&LVars.in_use_count), + Atomic::load(&LVars.free_count), Atomic::load(&LVars.wait_count), Atomic::load(&LVars.population)); + pop_count += Atomic::load(&LVars.in_use_count) + Atomic::load(&LVars.free_count); if (HandshakeAfterDeflateIdleMonitors) { - pop_count += g_om_wait_count; + pop_count += Atomic::load(&LVars.wait_count); } out->print_cr("%18s %10s %10s %10s", @@ -3361,7 +3395,7 @@ // the list of extant blocks without taking a lock. int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { - PaddedObjectMonitor* block = g_block_list; + PaddedObjectMonitor* block = Atomic::load(&g_block_list); while (block != NULL) { assert(block->object() == CHAINMARKER, "must be a block header"); if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { @@ -3371,10 +3405,9 @@ assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned"); return 1; } - // unmarked_next() is not needed with g_block_list (no next field - // marking) and no load_acquire() needed because _next_om is - // updated before g_block_list is changed with cmpxchg(). - block = (PaddedObjectMonitor*)block->_next_om; + // unmarked_next() is not needed with g_block_list (no locking + // used with with block linkage _next_om fields). + block = (PaddedObjectMonitor*)Atomic::load(&block->_next_om); } return 0; }