< prev index next >
src/hotspot/share/runtime/synchronizer.cpp
Print this page
rev 57232 : v2.00 -> v2.08 (CR8/v2.08/11-for-jdk14) patches combined into one; merge with jdk-14+25 snapshot; merge with jdk-14+26 snapshot.
rev 57233 : See CR8-to-CR9-changes; merge with 8230876.patch (2019.11.15); merge with jdk-14+25 snapshot; fuzzy merge with jdk-14+26 snapshot.
@@ -41,10 +41,11 @@
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/objectMonitor.inline.hpp"
#include "runtime/osThread.hpp"
+#include "runtime/safepointMechanism.inline.hpp"
#include "runtime/safepointVerifiers.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/thread.inline.hpp"
@@ -116,243 +117,246 @@
#define NINFLATIONLOCKS 256
static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
// global list of blocks of monitors
-PaddedObjectMonitor* volatile ObjectSynchronizer::g_block_list = NULL;
+PaddedObjectMonitor* ObjectSynchronizer::g_block_list = NULL;
bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
bool volatile ObjectSynchronizer::_is_special_deflation_requested = false;
jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
-// Global ObjectMonitor free list. Newly allocated and deflated
-// ObjectMonitors are prepended here.
-static ObjectMonitor* volatile g_free_list = NULL;
-// Global ObjectMonitor in-use list. When a JavaThread is exiting,
-// ObjectMonitors on its per-thread in-use list are prepended here.
-static ObjectMonitor* volatile g_om_in_use_list = NULL;
-// Global ObjectMonitor wait list. If HandshakeAfterDeflateIdleMonitors
-// is true, deflated ObjectMonitors wait on this list until after a
-// handshake or a safepoint for platforms that don't support handshakes.
-// After the handshake or safepoint, the deflated ObjectMonitors are
-// prepended to g_free_list.
-static ObjectMonitor* volatile g_wait_list = NULL;
-
-static volatile int g_om_free_count = 0; // # on g_free_list
-static volatile int g_om_in_use_count = 0; // # on g_om_in_use_list
-static volatile int g_om_population = 0; // # Extant -- in circulation
-static volatile int g_om_wait_count = 0; // # on g_wait_list
+struct ListGlobals {
+ char _pad_prefix[OM_CACHE_LINE_SIZE];
+ // These are highly shared list related variables.
+ // To avoid false-sharing they need to be the sole occupants of a cache line.
+
+ // Global ObjectMonitor free list. Newly allocated and deflated
+ // ObjectMonitors are prepended here.
+ ObjectMonitor* free_list;
+ DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
+
+ // Global ObjectMonitor in-use list. When a JavaThread is exiting,
+ // ObjectMonitors on its per-thread in-use list are prepended here.
+ ObjectMonitor* in_use_list;
+ DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
+
+ // Global ObjectMonitor wait list. If HandshakeAfterDeflateIdleMonitors
+ // is true, deflated ObjectMonitors wait on this list until after a
+ // handshake or a safepoint for platforms that don't support handshakes.
+ // After the handshake or safepoint, the deflated ObjectMonitors are
+ // prepended to free_list.
+ ObjectMonitor* wait_list;
+ DEFINE_PAD_MINUS_SIZE(3, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
+
+ int free_count; // # on free_list
+ DEFINE_PAD_MINUS_SIZE(4, OM_CACHE_LINE_SIZE, sizeof(int));
+
+ int in_use_count; // # on in_use_list
+ DEFINE_PAD_MINUS_SIZE(5, OM_CACHE_LINE_SIZE, sizeof(int));
+
+ int population; // # Extant -- in circulation
+ DEFINE_PAD_MINUS_SIZE(6, OM_CACHE_LINE_SIZE, sizeof(int));
+
+ int wait_count; // # on wait_list
+ DEFINE_PAD_MINUS_SIZE(7, OM_CACHE_LINE_SIZE, sizeof(int));
+};
+static ListGlobals LVars;
#define CHAINMARKER (cast_to_oop<intptr_t>(-1))
-// =====================> List Management functions
+// =====================> Spinlock functions
-// Return true if the ObjectMonitor's next field is marked.
+// ObjectMonitors are not lockable outside of this file. We use spinlocks
+// implemented using a bit in the _next_om field instead of the heavier
+// weight locking mechanisms for faster list management.
+
+#define OM_LOCK_BIT 0x1
+
+// Return true if the ObjectMonitor is locked.
// Otherwise returns false.
-static bool is_next_marked(ObjectMonitor* om) {
- // Use load_acquire() since _next_om fields are updated with a
- // release_store().
- return ((intptr_t)Atomic::load_acquire(&om->_next_om) & 0x1) != 0;
+static bool is_locked(ObjectMonitor* om) {
+ return ((intptr_t)Atomic::load(&om->_next_om) & OM_LOCK_BIT) == OM_LOCK_BIT;
}
-// Mark an ObjectMonitor* and return it. Note: the om parameter
-// may or may not have been marked originally.
+// Mark an ObjectMonitor* with OM_LOCK_BIT and return it.
+// Note: the om parameter may or may not have been marked originally.
static ObjectMonitor* mark_om_ptr(ObjectMonitor* om) {
- return (ObjectMonitor*)((intptr_t)om | 0x1);
+ return (ObjectMonitor*)((intptr_t)om | OM_LOCK_BIT);
}
-// Mark the next field in an ObjectMonitor. If marking was successful,
-// then the unmarked next field is returned via parameter and true is
-// returned. Otherwise false is returned.
-static bool mark_next(ObjectMonitor* om, ObjectMonitor** next_p) {
- // Get current next field without any marking value.
- ObjectMonitor* next = (ObjectMonitor*)((intptr_t)om->_next_om & ~0x1);
+// Try to lock an ObjectMonitor. Returns true if locking was successful.
+// Otherwise returns false.
+static bool try_om_lock(ObjectMonitor* om) {
+ // Get current next field without any OM_LOCK_BIT value.
+ ObjectMonitor* next = (ObjectMonitor*)((intptr_t)Atomic::load(&om->_next_om) & ~OM_LOCK_BIT);
if (Atomic::cmpxchg(&om->_next_om, next, mark_om_ptr(next)) != next) {
- return false; // Could not mark the next field or it was already marked.
+ return false; // Cannot lock the ObjectMonitor.
}
- *next_p = next;
return true;
}
-// Loop until we mark the next field in an ObjectMonitor. The unmarked
-// next field is returned.
-static ObjectMonitor* mark_next_loop(ObjectMonitor* om) {
- ObjectMonitor* next;
- while (true) {
- if (mark_next(om, &next)) {
- // Marked om's next field so return the unmarked value.
- return next;
+// Lock an ObjectMonitor.
+static void om_lock(ObjectMonitor* om) {
+ while (true) {
+ if (try_om_lock(om)) {
+ return;
}
}
}
-// Set the next field in an ObjectMonitor to the specified value.
-// The caller of set_next() must be the same thread that marked the
-// ObjectMonitor.
-static void set_next(ObjectMonitor* om, ObjectMonitor* value) {
- Atomic::release_store(&om->_next_om, value);
+// Unlock an ObjectMonitor.
+static void om_unlock(ObjectMonitor* om) {
+ ObjectMonitor* next = Atomic::load(&om->_next_om);
+ guarantee(((intptr_t)next & OM_LOCK_BIT) == OM_LOCK_BIT, "next=" INTPTR_FORMAT
+ " must have OM_LOCK_BIT=%x set.", p2i(next), OM_LOCK_BIT);
+
+ next = (ObjectMonitor*)((intptr_t)next & ~OM_LOCK_BIT); // Clear OM_LOCK_BIT.
+ Atomic::store(&om->_next_om, next);
}
-// Mark the next field in the list head ObjectMonitor. If marking was
-// successful, then the mid and the unmarked next field are returned
-// via parameter and true is returned. Otherwise false is returned.
-static bool mark_list_head(ObjectMonitor* volatile * list_p,
- ObjectMonitor** mid_p, ObjectMonitor** next_p) {
+// Get the list head after locking it. Returns the list head or NULL
+// if the list is empty.
+static ObjectMonitor* get_list_head_locked(ObjectMonitor** list_p) {
while (true) {
- ObjectMonitor* mid = *list_p;
+ ObjectMonitor* mid = Atomic::load(list_p);
if (mid == NULL) {
- return false; // The list is empty so nothing to mark.
+ return NULL; // The list is empty.
}
- if (mark_next(mid, next_p)) {
- if (*list_p != mid) {
+ if (try_om_lock(mid)) {
+ if (Atomic::load(list_p) != mid) {
// The list head changed so we have to retry.
- set_next(mid, *next_p); // unmark mid
+ om_unlock(mid);
continue;
}
- // We marked next field to guard against races.
- *mid_p = mid;
- return true;
+ return mid;
}
}
}
// Return the unmarked next field in an ObjectMonitor. Note: the next
-// field may or may not have been marked originally.
+// field may or may not have been marked with OM_LOCK_BIT originally.
static ObjectMonitor* unmarked_next(ObjectMonitor* om) {
- // Use load_acquire() since _next_om fields are updated with a
- // release_store().
- return (ObjectMonitor*)((intptr_t)Atomic::load_acquire(&om->_next_om) & ~0x1);
+ return (ObjectMonitor*)((intptr_t)Atomic::load(&om->_next_om) & ~OM_LOCK_BIT);
}
-// Mark the next ObjectMonitor for traversal. The current ObjectMonitor
-// is unmarked after the next ObjectMonitor is marked. *cur_p and *next_p
-// are updated to their next values in the list traversal. *cur_p is set
-// to NULL when the end of the list is reached.
-static void mark_next_for_traversal(ObjectMonitor** cur_p, ObjectMonitor** next_p) {
- ObjectMonitor* prev = *cur_p; // Save current for unmarking.
- if (*next_p == NULL) { // Reached the end of the list.
- set_next(prev, NULL); // Unmark previous.
- *cur_p = NULL; // Tell the caller we are done.
- return;
- }
- (void)mark_next_loop(*next_p); // Mark next.
- set_next(prev, *next_p); // Unmark previous.
- *cur_p = *next_p; // Update current.
- *next_p = unmarked_next(*cur_p); // Update next.
+#undef OM_LOCK_BIT
+
+
+// =====================> List Management functions
+
+// Set the next field in an ObjectMonitor to the specified value.
+static void set_next(ObjectMonitor* om, ObjectMonitor* value) {
+ Atomic::store(&om->_next_om, value);
}
// Prepend a list of ObjectMonitors to the specified *list_p. 'tail' is
// the last ObjectMonitor in the list and there are 'count' on the list.
// Also updates the specified *count_p.
static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail,
- int count, ObjectMonitor* volatile* list_p,
- volatile int* count_p) {
+ int count, ObjectMonitor** list_p,
+ int* count_p) {
while (true) {
- ObjectMonitor* cur = *list_p;
+ ObjectMonitor* cur = Atomic::load(list_p);
// Prepend list to *list_p.
- ObjectMonitor* next = NULL;
- if (!mark_next(tail, &next)) {
- continue; // failed to mark next field so try it all again
+ if (!try_om_lock(tail)) {
+ continue; // failed to lock tail so try it all again
}
- set_next(tail, cur); // tail now points to cur (and unmarks tail)
+ set_next(tail, cur); // tail now points to cur (and unlocks tail)
if (cur == NULL) {
// No potential race with takers or other prependers since
// *list_p is empty.
if (Atomic::cmpxchg(list_p, cur, list) == cur) {
// Successfully switched *list_p to the list value.
Atomic::add(count_p, count);
break;
}
// Implied else: try it all again
} else {
- // Try to mark next field to guard against races:
- if (!mark_next(cur, &next)) {
- continue; // failed to mark next field so try it all again
+ if (!try_om_lock(cur)) {
+ continue; // failed to lock cur so try it all again
}
- // We marked the next field so try to switch *list_p to the list value.
+ // We locked cur so try to switch *list_p to the list value.
if (Atomic::cmpxchg(list_p, cur, list) != cur) {
- // The list head has changed so unmark the next field and try again:
- set_next(cur, next);
+ // The list head has changed so unlock cur and try again:
+ om_unlock(cur);
continue;
}
Atomic::add(count_p, count);
- set_next(cur, next); // unmark next field
+ om_unlock(cur);
break;
}
}
}
// Prepend a newly allocated block of ObjectMonitors to g_block_list and
-// g_free_list. Also updates g_om_population and g_om_free_count.
+// LVars.free_list. Also updates LVars.population and LVars.free_count.
void ObjectSynchronizer::prepend_block_to_lists(PaddedObjectMonitor* new_blk) {
// First we handle g_block_list:
while (true) {
- PaddedObjectMonitor* cur = g_block_list;
+ PaddedObjectMonitor* cur = Atomic::load(&g_block_list);
// Prepend new_blk to g_block_list. The first ObjectMonitor in
// a block is reserved for use as linkage to the next block.
new_blk[0]._next_om = cur;
if (Atomic::cmpxchg(&g_block_list, cur, new_blk) == cur) {
// Successfully switched g_block_list to the new_blk value.
- Atomic::add(&g_om_population, _BLOCKSIZE - 1);
+ Atomic::add(&LVars.population, _BLOCKSIZE - 1);
break;
}
// Implied else: try it all again
}
- // Second we handle g_free_list:
+ // Second we handle LVars.free_list:
prepend_list_to_common(new_blk + 1, &new_blk[_BLOCKSIZE - 1], _BLOCKSIZE - 1,
- &g_free_list, &g_om_free_count);
+ &LVars.free_list, &LVars.free_count);
}
-// Prepend a list of ObjectMonitors to g_free_list. 'tail' is the last
+// Prepend a list of ObjectMonitors to LVars.free_list. 'tail' is the last
// ObjectMonitor in the list and there are 'count' on the list. Also
-// updates g_om_free_count.
-static void prepend_list_to_g_free_list(ObjectMonitor* list,
+// updates LVars.free_count.
+static void prepend_list_to_global_free_list(ObjectMonitor* list,
ObjectMonitor* tail, int count) {
- prepend_list_to_common(list, tail, count, &g_free_list, &g_om_free_count);
+ prepend_list_to_common(list, tail, count, &LVars.free_list, &LVars.free_count);
}
-// Prepend a list of ObjectMonitors to g_wait_list. 'tail' is the last
+// Prepend a list of ObjectMonitors to LVars.wait_list. 'tail' is the last
// ObjectMonitor in the list and there are 'count' on the list. Also
-// updates g_om_wait_count.
-static void prepend_list_to_g_wait_list(ObjectMonitor* list,
+// updates LVars.wait_count.
+static void prepend_list_to_global_wait_list(ObjectMonitor* list,
ObjectMonitor* tail, int count) {
assert(HandshakeAfterDeflateIdleMonitors, "sanity check");
- prepend_list_to_common(list, tail, count, &g_wait_list, &g_om_wait_count);
+ prepend_list_to_common(list, tail, count, &LVars.wait_list, &LVars.wait_count);
}
-// Prepend a list of ObjectMonitors to g_om_in_use_list. 'tail' is the last
+// Prepend a list of ObjectMonitors to LVars.in_use_list. 'tail' is the last
// ObjectMonitor in the list and there are 'count' on the list. Also
-// updates g_om_in_use_list.
-static void prepend_list_to_g_om_in_use_list(ObjectMonitor* list,
+// updates LVars.in_use_list.
+static void prepend_list_to_global_in_use_list(ObjectMonitor* list,
ObjectMonitor* tail, int count) {
- prepend_list_to_common(list, tail, count, &g_om_in_use_list, &g_om_in_use_count);
+ prepend_list_to_common(list, tail, count, &LVars.in_use_list, &LVars.in_use_count);
}
// Prepend an ObjectMonitor to the specified list. Also updates
// the specified counter.
-static void prepend_to_common(ObjectMonitor* m, ObjectMonitor* volatile * list_p,
- int volatile * count_p) {
+static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p,
+ int* count_p) {
while (true) {
- (void)mark_next_loop(m); // mark m so we can safely update its next field
+ om_lock(m); // Lock m so we can safely update its next field.
ObjectMonitor* cur = NULL;
- ObjectMonitor* next = NULL;
- // Mark the list head to guard against A-B-A race:
- if (mark_list_head(list_p, &cur, &next)) {
- // List head is now marked so we can safely switch it.
- set_next(m, cur); // m now points to cur (and unmarks m)
- *list_p = m; // Switch list head to unmarked m.
- // mark_list_head() used cmpxchg() above, switching list head can be lazier:
- OrderAccess::storestore();
- set_next(cur, next); // Unmark the previous list head.
+ // Lock the list head to guard against A-B-A race:
+ if ((cur = get_list_head_locked(list_p)) != NULL) {
+ // List head is now locked so we can safely switch it.
+ set_next(m, cur); // m now points to cur (and unlocks m)
+ Atomic::store(list_p, m); // Switch list head to unlocked m.
+ om_unlock(cur);
break;
}
// The list is empty so try to set the list head.
assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur));
- set_next(m, cur); // m now points to NULL (and unmarks m)
+ set_next(m, cur); // m now points to NULL (and unlocks m)
if (Atomic::cmpxchg(list_p, cur, m) == cur) {
- // List head is now unmarked m.
+ // List head is now unlocked m.
break;
}
// Implied else: try it all again
}
Atomic::inc(count_p);
@@ -370,35 +374,33 @@
prepend_to_common(m, &self->om_in_use_list, &self->om_in_use_count);
}
// Take an ObjectMonitor from the start of the specified list. Also
// decrements the specified counter. Returns NULL if none are available.
-static ObjectMonitor* take_from_start_of_common(ObjectMonitor* volatile * list_p,
- int volatile * count_p) {
- ObjectMonitor* next = NULL;
+static ObjectMonitor* take_from_start_of_common(ObjectMonitor** list_p,
+ int* count_p) {
ObjectMonitor* take = NULL;
- // Mark the list head to guard against A-B-A race:
- if (!mark_list_head(list_p, &take, &next)) {
+ // Lock the list head to guard against A-B-A race:
+ if ((take = get_list_head_locked(list_p)) == NULL) {
return NULL; // None are available.
}
- // Switch marked list head to next (which unmarks the list head, but
- // leaves take marked):
- *list_p = next;
+ ObjectMonitor* next = unmarked_next(take);
+ // Switch locked list head to next (which unlocks the list head, but
+ // leaves take locked):
+ Atomic::store(list_p, next);
Atomic::dec(count_p);
- // mark_list_head() used cmpxchg() above, switching list head can be lazier:
- OrderAccess::storestore();
- // Unmark take, but leave the next value for any lagging list
+ // Unlock take, but leave the next value for any lagging list
// walkers. It will get cleaned up when take is prepended to
// the in-use list:
- set_next(take, next);
+ om_unlock(take);
return take;
}
-// Take an ObjectMonitor from the start of the global free-list. Also
-// updates g_om_free_count. Returns NULL if none are available.
-static ObjectMonitor* take_from_start_of_g_free_list() {
- return take_from_start_of_common(&g_free_list, &g_om_free_count);
+// Take an ObjectMonitor from the start of the LVars.free_list. Also
+// updates LVars.free_count. Returns NULL if none are available.
+static ObjectMonitor* take_from_start_of_global_free_list() {
+ return take_from_start_of_common(&LVars.free_list, &LVars.free_count);
}
// Take an ObjectMonitor from the start of a per-thread free-list.
// Also updates om_free_count. Returns NULL if none are available.
static ObjectMonitor* take_from_start_of_om_free_list(Thread* self) {
@@ -1028,11 +1030,14 @@
assert(AsyncDeflateIdleMonitors, "sanity check");
continue;
}
monitor = omh.om_ptr();
temp = monitor->header();
- assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
+ // Allow for a lagging install_displaced_markword_in_object() to
+ // have marked the ObjectMonitor's header/dmw field.
+ assert(temp.is_neutral() || (AsyncDeflateIdleMonitors && temp.is_marked()),
+ "invariant: header=" INTPTR_FORMAT, temp.value());
hash = temp.hash();
if (hash != 0) { // if it has a hash, just return it
return hash;
}
// Fall thru so we only have one place that installs the hash in
@@ -1060,28 +1065,40 @@
ObjectMonitorHandle omh;
inflate(&omh, self, obj, inflate_cause_hash_code);
monitor = omh.om_ptr();
// Load ObjectMonitor's header/dmw field and see if it has a hash.
mark = monitor->header();
- assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
+ // Allow for a lagging install_displaced_markword_in_object() to
+ // have marked the ObjectMonitor's header/dmw field.
+ assert(mark.is_neutral() || (AsyncDeflateIdleMonitors && mark.is_marked()),
+ "invariant: header=" INTPTR_FORMAT, mark.value());
hash = mark.hash();
if (hash == 0) { // if it does not have a hash
hash = get_next_hash(self, obj); // get a new hash
temp = mark.copy_set_hash(hash); // merge the hash into header
+ if (AsyncDeflateIdleMonitors && temp.is_marked()) {
+ // A lagging install_displaced_markword_in_object() has marked
+ // the ObjectMonitor's header/dmw field. We clear it to avoid
+ // any confusion if we are able to set the hash.
+ temp.set_unmarked();
+ }
assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value());
test = markWord(v);
if (test != mark) {
// The attempt to update the ObjectMonitor's header/dmw field
// did not work. This can happen if another thread managed to
- // merge in the hash just before our cmpxchg().
- // ObjectMonitor::install_displaced_markword_in_object()
- // does mark the header/dmw field as part of async deflation,
- // but that protocol cannot happen now due to the
- // ObjectMonitorHandle above.
+ // merge in the hash just before our cmpxchg(). With async
+ // deflation, a lagging install_displaced_markword_in_object()
+ // could have just marked or just unmarked the header/dmw field.
// If we add any new usages of the header/dmw field, this code
// will need to be updated.
+ if (AsyncDeflateIdleMonitors) {
+ // Since async deflation gives us two possible reasons for
+ // the cmwxchg() to fail, it is easier to simply retry.
+ continue;
+ }
hash = test.hash();
assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
}
}
@@ -1237,11 +1254,11 @@
}
// Visitors ...
void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
- PaddedObjectMonitor* block = g_block_list;
+ PaddedObjectMonitor* block = Atomic::load(&g_block_list);
while (block != NULL) {
assert(block->object() == CHAINMARKER, "must be a block header");
for (int i = _BLOCKSIZE - 1; i > 0; i--) {
ObjectMonitor* mid = (ObjectMonitor *)(block + i);
ObjectMonitorHandle omh;
@@ -1252,27 +1269,26 @@
continue;
}
closure->do_monitor(mid);
}
}
- // unmarked_next() is not needed with g_block_list (no next field
- // marking) and no load_acquire() needed because _next_om is
- // updated before g_block_list is changed with cmpxchg().
- block = (PaddedObjectMonitor*)block->_next_om;
+ // unmarked_next() is not needed with g_block_list (no locking
+ // used with with block linkage _next_om fields).
+ block = (PaddedObjectMonitor*)Atomic::load(&block->_next_om);
}
}
static bool monitors_used_above_threshold() {
- if (g_om_population == 0) {
+ if (Atomic::load(&LVars.population) == 0) {
return false;
}
if (MonitorUsedDeflationThreshold > 0) {
- int monitors_used = g_om_population - g_om_free_count;
+ int monitors_used = Atomic::load(&LVars.population) - Atomic::load(&LVars.free_count);
if (HandshakeAfterDeflateIdleMonitors) {
- monitors_used -= g_om_wait_count;
+ monitors_used -= Atomic::load(&LVars.wait_count);
}
- int monitor_usage = (monitors_used * 100LL) / g_om_population;
+ int monitor_usage = (monitors_used * 100LL) / Atomic::load(&LVars.population);
return monitor_usage > MonitorUsedDeflationThreshold;
}
return false;
}
@@ -1299,13 +1315,13 @@
// than AsyncDeflationInterval (unless is_async_deflation_requested)
// in order to not swamp the ServiceThread.
_last_async_deflation_time_ns = os::javaTimeNanos();
return true;
}
- int monitors_used = g_om_population - g_om_free_count;
+ int monitors_used = Atomic::load(&LVars.population) - Atomic::load(&LVars.free_count);
if (HandshakeAfterDeflateIdleMonitors) {
- monitors_used -= g_om_wait_count;
+ monitors_used -= Atomic::load(&LVars.wait_count);
}
if (is_MonitorBound_exceeded(monitors_used)) {
// Not enough ObjectMonitors on the global free list.
return true;
}
@@ -1346,11 +1362,11 @@
global_used_oops_do(f);
}
void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
- list_oops_do(g_om_in_use_list, g_om_in_use_count, f);
+ list_oops_do(Atomic::load(&LVars.in_use_list), Atomic::load(&LVars.in_use_count), f);
}
void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
list_oops_do(thread->om_in_use_list, thread->om_in_use_count, f);
@@ -1371,22 +1387,22 @@
// -----------------------------------------------------------------------------
// ObjectMonitor Lifecycle
// -----------------------
-// Inflation unlinks monitors from the global g_free_list and
-// associates them with objects. Deflation -- which occurs at
-// STW-time -- disassociates idle monitors from objects. Such
-// scavenged monitors are returned to the g_free_list.
+// Inflation unlinks monitors from LVars.free_list or a per-thread free
+// list and associates them with objects. Deflation -- which occurs at
+// STW-time or asynchronously -- disassociates idle monitors from objects.
+// Such scavenged monitors are returned to the LVars.free_list.
//
// ObjectMonitors reside in type-stable memory (TSM) and are immortal.
//
// Lifecycle:
-// -- unassigned and on the global free list
-// -- unassigned and on a thread's private om_free_list
+// -- unassigned and on the LVars.free_list
+// -- unassigned and on a per-thread free list
// -- assigned to an object. The object is inflated and the mark refers
-// to the objectmonitor.
+// to the ObjectMonitor.
// Constraining monitor pool growth via MonitorBound ...
//
// If MonitorBound is not set (<= 0), MonitorBound checks are disabled.
@@ -1404,11 +1420,11 @@
// we'll incur more safepoints, which are harmful to performance.
// See also: GuaranteedSafepointInterval
//
// When safepoint deflation is being used and MonitorBound is set, the
// boundry applies to
-// (g_om_population - g_om_free_count)
+// (LVars.population - LVars.free_count)
// i.e., if there are not enough ObjectMonitors on the global free list,
// then a safepoint deflation is induced. Picking a good MonitorBound value
// is non-trivial.
//
// When async deflation is being used:
@@ -1460,21 +1476,21 @@
m->set_allocation_state(ObjectMonitor::New);
prepend_to_om_in_use_list(self, m);
return m;
}
- // 2: try to allocate from the global g_free_list
+ // 2: try to allocate from the global LVars.free_list
// CONSIDER: use muxTry() instead of muxAcquire().
// If the muxTry() fails then drop immediately into case 3.
// If we're using thread-local free lists then try
// to reprovision the caller's free list.
- if (g_free_list != NULL) {
+ if (Atomic::load(&LVars.free_list) != NULL) {
// Reprovision the thread's om_free_list.
// Use bulk transfers to reduce the allocation rate and heat
// on various locks.
for (int i = self->om_free_provision; --i >= 0;) {
- ObjectMonitor* take = take_from_start_of_g_free_list();
+ ObjectMonitor* take = take_from_start_of_global_free_list();
if (take == NULL) {
break; // No more are available.
}
guarantee(take->object() == NULL, "invariant");
if (AsyncDeflateIdleMonitors) {
@@ -1487,11 +1503,13 @@
if (take->ref_count() < 0) {
// Add back max_jint to restore the ref_count field to its
// proper value.
Atomic::add(&take->_ref_count, max_jint);
- DEBUG_ONLY(jint l_ref_count = take->ref_count();)
+#ifdef ASSERT
+ jint l_ref_count = take->ref_count();
+#endif
assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d",
l_ref_count, take->ref_count());
}
}
take->Recycle();
@@ -1503,11 +1521,11 @@
}
self->om_free_provision += 1 + (self->om_free_provision / 2);
if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE;
if (!AsyncDeflateIdleMonitors &&
- is_MonitorBound_exceeded(g_om_population - g_om_free_count)) {
+ is_MonitorBound_exceeded(Atomic::load(&LVars.population) - Atomic::load(&LVars.free_count))) {
// Not enough ObjectMonitors on the global free list.
// We can't safely induce a STW safepoint from om_alloc() as our thread
// state may not be appropriate for such activities and callers may hold
// naked oops, so instead we defer the action.
InduceScavenge(self, "om_alloc");
@@ -1581,58 +1599,58 @@
m->set_allocation_state(ObjectMonitor::Free);
// _next_om is used for both per-thread in-use and free lists so
// we have to remove 'm' from the in-use list first (as needed).
if (from_per_thread_alloc) {
// Need to remove 'm' from om_in_use_list.
- // We use the more complicated mark-cur_mid_in_use-and-mid-as-we-go
+ // We use the more complicated lock-cur_mid_in_use-and-mid-as-we-go
// protocol because async deflation can do list deletions in parallel.
ObjectMonitor* cur_mid_in_use = NULL;
ObjectMonitor* mid = NULL;
ObjectMonitor* next = NULL;
bool extracted = false;
- if (!mark_list_head(&self->om_in_use_list, &mid, &next)) {
+ if ((mid = get_list_head_locked(&self->om_in_use_list)) == NULL) {
fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self));
}
+ next = unmarked_next(mid);
while (true) {
if (m == mid) {
// We found 'm' on the per-thread in-use list so try to extract it.
if (cur_mid_in_use == NULL) {
- // mid is the list head and it is marked. Switch the list head
- // to next which unmarks the list head, but leaves mid marked:
- self->om_in_use_list = next;
- // mark_list_head() used cmpxchg() above, switching list head can be lazier:
- OrderAccess::storestore();
- } else {
- // mid and cur_mid_in_use are marked. Switch cur_mid_in_use's
- // next field to next which unmarks cur_mid_in_use, but leaves
- // mid marked:
- Atomic::release_store(&cur_mid_in_use->_next_om, next);
+ // mid is the list head and it is locked. Switch the list head
+ // to next which unlocks the list head, but leaves mid locked:
+ Atomic::store(&self->om_in_use_list, next);
+ } else {
+ // mid and cur_mid_in_use are locked. Switch cur_mid_in_use's
+ // next field to next which unlocks cur_mid_in_use, but leaves
+ // mid locked:
+ set_next(cur_mid_in_use, next);
}
extracted = true;
Atomic::dec(&self->om_in_use_count);
- // Unmark mid, but leave the next value for any lagging list
+ // Unlock mid, but leave the next value for any lagging list
// walkers. It will get cleaned up when mid is prepended to
// the thread's free list:
- set_next(mid, next);
+ om_unlock(mid);
break;
}
if (cur_mid_in_use != NULL) {
- set_next(cur_mid_in_use, mid); // umark cur_mid_in_use
+ om_unlock(cur_mid_in_use);
}
- // The next cur_mid_in_use keeps mid's marked next field so
+ // The next cur_mid_in_use keeps mid's locked state so
// that it is stable for a possible next field change. It
- // cannot be deflated while it is marked.
+ // cannot be deflated while it is locked.
cur_mid_in_use = mid;
mid = next;
if (mid == NULL) {
// Reached end of the list and didn't find m so:
fatal("must find m=" INTPTR_FORMAT "on om_in_use_list=" INTPTR_FORMAT,
p2i(m), p2i(self->om_in_use_list));
}
- // Mark mid's next field so we can possibly extract it:
- next = mark_next_loop(mid);
+ // Lock mid so we can possibly extract it:
+ om_lock(mid);
+ next = unmarked_next(mid);
}
}
prepend_to_om_free_list(self, m);
guarantee(m->is_free(), "invariant");
@@ -1663,35 +1681,34 @@
// we process the per-thread lists in the same order to prevent
// ordering races.
int in_use_count = 0;
ObjectMonitor* in_use_list = NULL;
ObjectMonitor* in_use_tail = NULL;
- ObjectMonitor* next = NULL;
// An async deflation thread checks to see if the target thread
// is exiting, but if it has made it past that check before we
// started exiting, then it is racing to get to the in-use list.
- if (mark_list_head(&self->om_in_use_list, &in_use_list, &next)) {
+ if ((in_use_list = get_list_head_locked(&self->om_in_use_list)) != NULL) {
// At this point, we have marked the in-use list head so an
// async deflation thread cannot come in after us. If an async
// deflation thread is ahead of us, then we'll detect that and
// wait for it to finish its work.
//
// The thread is going away, however the ObjectMonitors on the
// om_in_use_list may still be in-use by other threads. Link
// them to in_use_tail, which will be linked into the global
- // in-use list g_om_in_use_list below.
+ // in-use list (LVars.in_use_list) below.
//
// Account for the in-use list head before the loop since it is
// already marked (by this thread):
in_use_tail = in_use_list;
in_use_count++;
for (ObjectMonitor* cur_om = unmarked_next(in_use_list); cur_om != NULL;) {
- if (is_next_marked(cur_om)) {
- // This next field is marked so there must be an async deflater
+ if (is_locked(cur_om)) {
+ // cur_om is locked so there must be an async deflater
// thread ahead of us so we'll give it a chance to finish.
- while (is_next_marked(cur_om)) {
+ while (is_locked(cur_om)) {
os::naked_short_sleep(1);
}
// Refetch the possibly changed next field and try again.
cur_om = unmarked_next(in_use_tail);
continue;
@@ -1712,23 +1729,21 @@
int l_om_in_use_count = self->om_in_use_count;
ADIM_guarantee(l_om_in_use_count == in_use_count, "in-use counts don't "
"match: l_om_in_use_count=%d, in_use_count=%d",
l_om_in_use_count, in_use_count);
self->om_in_use_count = 0;
- // Clear the in-use list head (which also unmarks it):
- self->om_in_use_list = (ObjectMonitor*)NULL;
- // mark_list_head() used cmpxchg() above, clearing the disconnected list head can be lazier:
- OrderAccess::storestore();
- set_next(in_use_list, next);
+ // Clear the in-use list head (which also unlocks it):
+ Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL);
+ om_unlock(in_use_list);
}
int free_count = 0;
ObjectMonitor* free_list = self->om_free_list;
ObjectMonitor* free_tail = NULL;
if (free_list != NULL) {
// The thread is going away. Set 'free_tail' to the last per-thread free
- // monitor which will be linked to g_free_list below.
+ // monitor which will be linked to LVars.free_list below.
stringStream ss;
for (ObjectMonitor* s = free_list; s != NULL; s = unmarked_next(s)) {
free_count++;
free_tail = s;
guarantee(s->object() == NULL, "invariant");
@@ -1738,20 +1753,19 @@
int l_om_free_count = self->om_free_count;
ADIM_guarantee(l_om_free_count == free_count, "free counts don't match: "
"l_om_free_count=%d, free_count=%d", l_om_free_count,
free_count);
self->om_free_count = 0;
- self->om_free_list = NULL;
- OrderAccess::storestore(); // Lazier memory is okay for list walkers.
+ Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL);
}
if (free_tail != NULL) {
- prepend_list_to_g_free_list(free_list, free_tail, free_count);
+ prepend_list_to_global_free_list(free_list, free_tail, free_count);
}
if (in_use_tail != NULL) {
- prepend_list_to_g_om_in_use_list(in_use_list, in_use_tail, in_use_count);
+ prepend_list_to_global_in_use_list(in_use_list, in_use_tail, in_use_count);
}
LogStreamHandle(Debug, monitorinflation) lsh_debug;
LogStreamHandle(Info, monitorinflation) lsh_info;
LogStream* ls = NULL;
@@ -2135,11 +2149,11 @@
if (*free_tail_p != NULL) {
// We append to the list so the caller can use mid->_next_om
// to fix the linkages in its context.
ObjectMonitor* prevtail = *free_tail_p;
// Should have been cleaned up by the caller:
- // Note: Should not have to mark prevtail here since we're at a
+ // Note: Should not have to lock prevtail here since we're at a
// safepoint and ObjectMonitors on the local free list should
// not be accessed in parallel.
assert(prevtail->_next_om == NULL, "must be NULL: _next_om="
INTPTR_FORMAT, p2i(prevtail->_next_om));
set_next(prevtail, mid);
@@ -2254,14 +2268,14 @@
if (*free_tail_p != NULL) {
// We append to the list so the caller can use mid->_next_om
// to fix the linkages in its context.
ObjectMonitor* prevtail = *free_tail_p;
// Should have been cleaned up by the caller:
- ObjectMonitor* next = mark_next_loop(prevtail);
+ om_lock(prevtail);
assert(unmarked_next(prevtail) == NULL, "must be NULL: _next_om="
INTPTR_FORMAT, p2i(unmarked_next(prevtail)));
- set_next(prevtail, mid); // prevtail now points to mid (and is unmarked)
+ set_next(prevtail, mid); // prevtail now points to mid (and is unlocked)
}
*free_tail_p = mid;
// At this point, mid->_next_om still refers to its current
// value and another ObjectMonitor's _next_om field still
@@ -2278,11 +2292,13 @@
// Add back max_jint to restore the ref_count field to its
// proper value (which may not be what we saw above):
Atomic::add(&mid->_ref_count, max_jint);
- DEBUG_ONLY(jint l_ref_count = mid->ref_count();)
+#ifdef ASSERT
+ jint l_ref_count = mid->ref_count();
+#endif
assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d",
l_ref_count, mid->ref_count());
return false;
}
@@ -2307,156 +2323,153 @@
// process the same monitor lists concurrently.
//
// See also ParallelSPCleanupTask and
// SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
// Threads::parallel_java_threads_do() in thread.cpp.
-int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor* volatile * list_p,
- int volatile * count_p,
+int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p,
+ int* count_p,
ObjectMonitor** free_head_p,
ObjectMonitor** free_tail_p) {
ObjectMonitor* cur_mid_in_use = NULL;
ObjectMonitor* mid = NULL;
ObjectMonitor* next = NULL;
int deflated_count = 0;
- // We use the simpler mark-mid-as-we-go protocol since there are no
+ // We use the simpler lock-mid-as-we-go protocol since there are no
// parallel list deletions since we are at a safepoint.
- if (!mark_list_head(list_p, &mid, &next)) {
+ if ((mid = get_list_head_locked(list_p)) == NULL) {
return 0; // The list is empty so nothing to deflate.
}
+ next = unmarked_next(mid);
while (true) {
oop obj = (oop) mid->object();
if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) {
// Deflation succeeded and already updated free_head_p and
// free_tail_p as needed. Finish the move to the local free list
// by unlinking mid from the global or per-thread in-use list.
if (cur_mid_in_use == NULL) {
- // mid is the list head and it is marked. Switch the list head
- // to next which unmarks the list head, but leaves mid marked:
- *list_p = next;
- // mark_list_head() used cmpxchg() above, switching list head can be lazier:
- OrderAccess::storestore();
+ // mid is the list head and it is locked. Switch the list head
+ // to next which unlocks the list head, but leaves mid locked:
+ Atomic::store(list_p, next);
} else {
- // mid is marked. Switch cur_mid_in_use's next field to next
+ // mid is locked. Switch cur_mid_in_use's next field to next
// which is safe because we have no parallel list deletions,
- // but we leave mid marked:
- Atomic::release_store(&cur_mid_in_use->_next_om, next);
+ // but we leave mid locked:
+ set_next(cur_mid_in_use, next);
}
// At this point mid is disconnected from the in-use list so
- // its marked next field no longer has any effects.
+ // its lock no longer has any effects on the in-use list.
deflated_count++;
Atomic::dec(count_p);
// mid is current tail in the free_head_p list so NULL terminate it
- // (which also unmarks it):
+ // (which also unlocks it):
set_next(mid, NULL);
-
- // All the list management is done so move on to the next one:
- mid = next;
} else {
- set_next(mid, next); // unmark next field
-
- // All the list management is done so move on to the next one:
+ om_unlock(mid);
cur_mid_in_use = mid;
- mid = next;
}
+ // All the list management is done so move on to the next one:
+ mid = next;
if (mid == NULL) {
break; // Reached end of the list so nothing more to deflate.
}
- // Mark mid's next field so we can possibly deflate it:
- next = mark_next_loop(mid);
+ // Lock mid so we can possibly deflate it:
+ om_lock(mid);
+ next = unmarked_next(mid);
}
return deflated_count;
}
// Walk a given ObjectMonitor list and deflate idle ObjectMonitors using
// a JavaThread. Returns the number of deflated ObjectMonitors. The given
// list could be a per-thread in-use list or the global in-use list.
// If a safepoint has started, then we save state via saved_mid_in_use_p
// and return to the caller to honor the safepoint.
//
-int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor* volatile * list_p,
- int volatile * count_p,
+int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p,
+ int* count_p,
ObjectMonitor** free_head_p,
ObjectMonitor** free_tail_p,
ObjectMonitor** saved_mid_in_use_p) {
assert(AsyncDeflateIdleMonitors, "sanity check");
- assert(Thread::current()->is_Java_thread(), "precondition");
+ JavaThread* self = JavaThread::current();
ObjectMonitor* cur_mid_in_use = NULL;
ObjectMonitor* mid = NULL;
ObjectMonitor* next = NULL;
ObjectMonitor* next_next = NULL;
int deflated_count = 0;
- // We use the more complicated mark-cur_mid_in_use-and-mid-as-we-go
+ // We use the more complicated lock-cur_mid_in_use-and-mid-as-we-go
// protocol because om_release() can do list deletions in parallel.
- // We also mark-next-next-as-we-go to prevent an om_flush() that is
+ // We also lock-next-next-as-we-go to prevent an om_flush() that is
// behind this thread from passing us.
if (*saved_mid_in_use_p == NULL) {
// No saved state so start at the beginning.
- // Mark the list head's next field so we can possibly deflate it:
- if (!mark_list_head(list_p, &mid, &next)) {
+ // Lock the list head so we can possibly deflate it:
+ if ((mid = get_list_head_locked(list_p)) == NULL) {
return 0; // The list is empty so nothing to deflate.
}
+ next = unmarked_next(mid);
} else {
// We're restarting after a safepoint so restore the necessary state
// before we resume.
cur_mid_in_use = *saved_mid_in_use_p;
- // Mark cur_mid_in_use's next field so we can possibly update its
+ // Lock cur_mid_in_use so we can possibly update its
// next field to extract a deflated ObjectMonitor.
- mid = mark_next_loop(cur_mid_in_use);
+ om_lock(cur_mid_in_use);
+ mid = unmarked_next(cur_mid_in_use);
if (mid == NULL) {
- set_next(cur_mid_in_use, NULL); // unmark next field
+ om_unlock(cur_mid_in_use);
*saved_mid_in_use_p = NULL;
return 0; // The remainder is empty so nothing more to deflate.
}
- // Mark mid's next field so we can possibly deflate it:
- next = mark_next_loop(mid);
+ // Lock mid so we can possibly deflate it:
+ om_lock(mid);
+ next = unmarked_next(mid);
}
while (true) {
// The current mid's next field is marked at this point. If we have
// a cur_mid_in_use, then its next field is also marked at this point.
if (next != NULL) {
- // We mark next's next field so that an om_flush()
- // thread that is behind us cannot pass us when we
- // unmark the current mid's next field.
- next_next = mark_next_loop(next);
+ // We lock next so that an om_flush() thread that is behind us
+ // cannot pass us when we unlock the current mid.
+ om_lock(next);
+ next_next = unmarked_next(next);
}
// Only try to deflate if there is an associated Java object and if
// mid is old (is not newly allocated and is not newly freed).
if (mid->object() != NULL && mid->is_old() &&
deflate_monitor_using_JT(mid, free_head_p, free_tail_p)) {
// Deflation succeeded and already updated free_head_p and
// free_tail_p as needed. Finish the move to the local free list
// by unlinking mid from the global or per-thread in-use list.
if (cur_mid_in_use == NULL) {
- // mid is the list head and it is marked. Switch the list head
- // to next which is also marked (if not NULL) and also leave
- // mid marked:
- *list_p = next;
- // mark_list_head() used cmpxchg() above, switching list head can be lazier:
- OrderAccess::storestore();
- } else {
- ObjectMonitor* marked_next = mark_om_ptr(next);
- // mid and cur_mid_in_use are marked. Switch cur_mid_in_use's
- // next field to marked_next and also leave mid marked:
- Atomic::release_store(&cur_mid_in_use->_next_om, marked_next);
+ // mid is the list head and it is locked. Switch the list head
+ // to next which is also locked (if not NULL) and also leave
+ // mid locked:
+ Atomic::store(list_p, next);
+ } else {
+ ObjectMonitor* locked_next = mark_om_ptr(next);
+ // mid and cur_mid_in_use are locked. Switch cur_mid_in_use's
+ // next field to locked_next and also leave mid locked:
+ set_next(cur_mid_in_use, locked_next);
}
// At this point mid is disconnected from the in-use list so
- // its marked next field no longer has any effects.
+ // its lock longer has any effects on in-use list.
deflated_count++;
Atomic::dec(count_p);
// mid is current tail in the free_head_p list so NULL terminate it
- // (which also unmarks it):
+ // (which also unlocks it):
set_next(mid, NULL);
// All the list management is done so move on to the next one:
- mid = next; // mid keeps non-NULL next's marked next field
+ mid = next; // mid keeps non-NULL next's locked next field
next = next_next;
} else {
// mid is considered in-use if it does not have an associated
// Java object or mid is not old or deflation did not succeed.
// A mid->is_new() node can be seen here when it is freshly
@@ -2466,41 +2479,41 @@
// om_alloc() is released by om_release() due to losing the race
// in inflate().
// All the list management is done so move on to the next one:
if (cur_mid_in_use != NULL) {
- set_next(cur_mid_in_use, mid); // umark cur_mid_in_use
+ om_unlock(cur_mid_in_use);
}
- // The next cur_mid_in_use keeps mid's marked next field so
+ // The next cur_mid_in_use keeps mid's lock state so
// that it is stable for a possible next field change. It
- // cannot be modified by om_release() while it is marked.
+ // cannot be modified by om_release() while it is locked.
cur_mid_in_use = mid;
- mid = next; // mid keeps non-NULL next's marked next field
+ mid = next; // mid keeps non-NULL next's locked state
next = next_next;
- if (SafepointSynchronize::is_synchronizing() &&
- cur_mid_in_use != *list_p && cur_mid_in_use->is_old()) {
+ if (SafepointMechanism::should_block(self) &&
+ cur_mid_in_use != Atomic::load(list_p) && cur_mid_in_use->is_old()) {
// If a safepoint has started and cur_mid_in_use is not the list
// head and is old, then it is safe to use as saved state. Return
// to the caller before blocking.
*saved_mid_in_use_p = cur_mid_in_use;
- set_next(cur_mid_in_use, mid); // umark cur_mid_in_use
+ om_unlock(cur_mid_in_use);
if (mid != NULL) {
- set_next(mid, next); // umark mid
+ om_unlock(mid);
}
return deflated_count;
}
}
if (mid == NULL) {
if (cur_mid_in_use != NULL) {
- set_next(cur_mid_in_use, mid); // umark cur_mid_in_use
+ om_unlock(cur_mid_in_use);
}
break; // Reached end of the list so nothing more to deflate.
}
- // The current mid's next field is marked at this point. If we have
- // a cur_mid_in_use, then its next field is also marked at this point.
+ // The current mid's next field is locked at this point. If we have
+ // a cur_mid_in_use, then it is also locked at this point.
}
// We finished the list without a safepoint starting so there's
// no need to save state.
*saved_mid_in_use_p = NULL;
return deflated_count;
@@ -2537,27 +2550,27 @@
}
// Note: the thread-local monitors lists get deflated in
// a separate pass. See deflate_thread_local_monitors().
- // For moribund threads, scan g_om_in_use_list
+ // For moribund threads, scan LVars.in_use_list
int deflated_count = 0;
- if (g_om_in_use_list != NULL) {
- // Update n_in_circulation before g_om_in_use_count is updated by deflation.
- Atomic::add(&counters->n_in_circulation, g_om_in_use_count);
+ if (Atomic::load(&LVars.in_use_list) != NULL) {
+ // Update n_in_circulation before LVars.in_use_count is updated by deflation.
+ Atomic::add(&counters->n_in_circulation, Atomic::load(&LVars.in_use_count));
- deflated_count = deflate_monitor_list(&g_om_in_use_list, &g_om_in_use_count, &free_head_p, &free_tail_p);
- Atomic::add(&counters->n_in_use, g_om_in_use_count);
+ deflated_count = deflate_monitor_list(&LVars.in_use_list, &LVars.in_use_count, &free_head_p, &free_tail_p);
+ Atomic::add(&counters->n_in_use, Atomic::load(&LVars.in_use_count));
}
if (free_head_p != NULL) {
// Move the deflated ObjectMonitors back to the global free list.
// No races on the working free list so no need for load_acquire().
guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om="
INTPTR_FORMAT, p2i(free_tail_p->_next_om));
- prepend_list_to_g_free_list(free_head_p, free_tail_p, deflated_count);
+ prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count);
Atomic::add(&counters->n_scavenged, deflated_count);
}
timer.stop();
LogStreamHandle(Debug, monitorinflation) lsh_debug;
@@ -2601,30 +2614,27 @@
}
if (count > 0) {
log_debug(monitorinflation)("did async deflation of idle monitors for %d thread(s).", count);
}
- log_info(monitorinflation)("async g_om_population=%d, g_om_in_use_count=%d, "
- "g_om_free_count=%d, g_om_wait_count=%d",
- g_om_population, g_om_in_use_count,
- g_om_free_count, g_om_wait_count);
+ log_info(monitorinflation)("async global_population=%d, global_in_use_count=%d, "
+ "global_free_count=%d, global_wait_count=%d",
+ Atomic::load(&LVars.population), Atomic::load(&LVars.in_use_count),
+ Atomic::load(&LVars.free_count), Atomic::load(&LVars.wait_count));
// The ServiceThread's async deflation request has been processed.
set_is_async_deflation_requested(false);
- if (HandshakeAfterDeflateIdleMonitors && g_om_wait_count > 0) {
+ if (HandshakeAfterDeflateIdleMonitors && Atomic::load(&LVars.wait_count) > 0) {
// There are deflated ObjectMonitors waiting for a handshake
// (or a safepoint) for safety.
- // g_wait_list and g_om_wait_count are only updated by the calling
- // thread so no need for load_acquire() or release_store().
- ObjectMonitor* list = g_wait_list;
- ADIM_guarantee(list != NULL, "g_wait_list must not be NULL");
- int count = g_om_wait_count;
- g_om_wait_count = 0;
- g_wait_list = NULL;
- OrderAccess::storestore(); // Lazier memory sync is okay for list walkers.
+ ObjectMonitor* list = Atomic::load(&LVars.wait_list);
+ ADIM_guarantee(list != NULL, "LVars.wait_list must not be NULL");
+ int count = Atomic::load(&LVars.wait_count);
+ Atomic::store(&LVars.wait_count, 0);
+ Atomic::store(&LVars.wait_list, (ObjectMonitor*)NULL);
// Find the tail for prepend_list_to_common(). No need to mark
// ObjectMonitors for this list walk since only the deflater
// thread manages the wait list.
int l_count = 0;
@@ -2637,11 +2647,11 @@
// Will execute a safepoint if !ThreadLocalHandshakes:
HandshakeForDeflation hfd_hc;
Handshake::execute(&hfd_hc);
- prepend_list_to_common(list, tail, count, &g_free_list, &g_om_free_count);
+ prepend_list_to_common(list, tail, count, &LVars.free_list, &LVars.free_count);
log_info(monitorinflation)("moved %d idle monitors from global waiting list to global free list", count);
}
}
@@ -2678,19 +2688,19 @@
if (log_is_enabled(Info, monitorinflation)) {
timer.start();
}
if (is_global) {
- OM_PERFDATA_OP(MonExtant, set_value(g_om_in_use_count));
+ OM_PERFDATA_OP(MonExtant, set_value(Atomic::load(&LVars.in_use_count)));
} else {
OM_PERFDATA_OP(MonExtant, inc(target->om_in_use_count));
}
do {
int local_deflated_count;
if (is_global) {
- local_deflated_count = deflate_monitor_list_using_JT(&g_om_in_use_list, &g_om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p);
+ local_deflated_count = deflate_monitor_list_using_JT(&LVars.in_use_list, &LVars.in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p);
} else {
local_deflated_count = deflate_monitor_list_using_JT(&target->om_in_use_list, &target->om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p);
}
deflated_count += local_deflated_count;
@@ -2708,13 +2718,13 @@
// all out.
assert(unmarked_next(free_tail_p) == NULL, "must be NULL: _next_om="
INTPTR_FORMAT, p2i(unmarked_next(free_tail_p)));
if (HandshakeAfterDeflateIdleMonitors) {
- prepend_list_to_g_wait_list(free_head_p, free_tail_p, local_deflated_count);
+ prepend_list_to_global_wait_list(free_head_p, free_tail_p, local_deflated_count);
} else {
- prepend_list_to_g_free_list(free_head_p, free_tail_p, local_deflated_count);
+ prepend_list_to_global_free_list(free_head_p, free_tail_p, local_deflated_count);
}
OM_PERFDATA_OP(Deflations, inc(local_deflated_count));
}
@@ -2725,11 +2735,11 @@
if (is_global) {
log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint.");
} else {
log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(target));
}
- assert(SafepointSynchronize::is_synchronizing(), "sanity check");
+ assert(SafepointMechanism::should_block(self), "sanity check");
ThreadBlockInVM blocker(self);
}
// Prepare for another loop after the safepoint.
free_head_p = NULL;
free_tail_p = NULL;
@@ -2777,14 +2787,14 @@
// For async deflation, audit_and_print_stats() is called in
// ObjectSynchronizer::do_safepoint_work() at the Debug level
// at a safepoint.
ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
} else if (log_is_enabled(Info, monitorinflation)) {
- log_info(monitorinflation)("g_om_population=%d, g_om_in_use_count=%d, "
- "g_om_free_count=%d, g_om_wait_count=%d",
- g_om_population, g_om_in_use_count,
- g_om_free_count, g_om_wait_count);
+ log_info(monitorinflation)("global_population=%d, global_in_use_count=%d, "
+ "global_free_count=%d, global_wait_count=%d",
+ Atomic::load(&LVars.population), Atomic::load(&LVars.in_use_count),
+ Atomic::load(&LVars.free_count), Atomic::load(&LVars.wait_count));
}
Atomic::store(&_forceMonitorScavenge, 0); // Reset
OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged));
@@ -2825,11 +2835,11 @@
// Move the deflated ObjectMonitors back to the global free list.
// No races on the working list so no need for load_acquire().
guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om="
INTPTR_FORMAT, p2i(free_tail_p->_next_om));
- prepend_list_to_g_free_list(free_head_p, free_tail_p, deflated_count);
+ prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count);
Atomic::add(&counters->n_scavenged, deflated_count);
Atomic::add(&counters->per_thread_scavenged, deflated_count);
}
timer.stop();
@@ -2946,31 +2956,31 @@
int chk_om_population = log_monitor_list_counts(ls);
int error_cnt = 0;
ls->print_cr("Checking global lists:");
- // Check g_om_population:
- if (g_om_population == chk_om_population) {
- ls->print_cr("g_om_population=%d equals chk_om_population=%d",
- g_om_population, chk_om_population);
+ // Check LVars.population:
+ if (Atomic::load(&LVars.population) == chk_om_population) {
+ ls->print_cr("global_population=%d equals chk_om_population=%d",
+ Atomic::load(&LVars.population), chk_om_population);
} else {
// With lock free access to the monitor lists, it is possible for
// log_monitor_list_counts() to return a value that doesn't match
- // g_om_population. So far a higher value has been seen in testing
+ // LVars.population. So far a higher value has been seen in testing
// so something is being double counted by log_monitor_list_counts().
- ls->print_cr("WARNING: g_om_population=%d is not equal to "
- "chk_om_population=%d", g_om_population, chk_om_population);
+ ls->print_cr("WARNING: global_population=%d is not equal to "
+ "chk_om_population=%d", Atomic::load(&LVars.population), chk_om_population);
}
- // Check g_om_in_use_list and g_om_in_use_count:
+ // Check LVars.in_use_list and LVars.in_use_count:
chk_global_in_use_list_and_count(ls, &error_cnt);
- // Check g_free_list and g_om_free_count:
+ // Check LVars.free_list and LVars.free_count:
chk_global_free_list_and_count(ls, &error_cnt);
if (HandshakeAfterDeflateIdleMonitors) {
- // Check g_wait_list and g_om_wait_count:
+ // Check LVars.wait_list and LVars.wait_count:
chk_global_wait_list_and_count(ls, &error_cnt);
}
ls->print_cr("Checking per-thread lists:");
@@ -3043,97 +3053,117 @@
}
*error_cnt_p = *error_cnt_p + 1;
}
}
+// Lock the next ObjectMonitor for traversal. The current ObjectMonitor
+// is unlocked after the next ObjectMonitor is locked. *cur_p and *next_p
+// are updated to their next values in the list traversal. *cur_p is set
+// to NULL when the end of the list is reached.
+static void lock_next_for_traversal(ObjectMonitor** cur_p, ObjectMonitor** next_p) {
+ ObjectMonitor* prev = *cur_p; // Save current for unlocking.
+ if (*next_p == NULL) { // Reached the end of the list.
+ om_unlock(prev); // Unlock previous.
+ *cur_p = NULL; // Tell the caller we are done.
+ return;
+ }
+ om_lock(*next_p); // Lock next.
+ om_unlock(prev); // Unlock previous.
+ *cur_p = *next_p; // Update current.
+ *next_p = unmarked_next(*cur_p); // Update next.
+}
+
// Check the global free list and count; log the results of the checks.
void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out,
int *error_cnt_p) {
int chk_om_free_count = 0;
ObjectMonitor* cur = NULL;
ObjectMonitor* next = NULL;
- if (mark_list_head(&g_free_list, &cur, &next)) {
+ if ((cur = get_list_head_locked(&LVars.free_list)) != NULL) {
+ next = unmarked_next(cur);
// Marked the global free list head so process the list.
while (true) {
chk_free_entry(NULL /* jt */, cur, out, error_cnt_p);
chk_om_free_count++;
- mark_next_for_traversal(&cur, &next);
+ lock_next_for_traversal(&cur, &next);
if (cur == NULL) {
break;
}
}
}
- if (g_om_free_count == chk_om_free_count) {
- out->print_cr("g_om_free_count=%d equals chk_om_free_count=%d",
- g_om_free_count, chk_om_free_count);
- } else {
- // With lock free access to g_free_list, it is possible for an
- // ObjectMonitor to be prepended to g_free_list after we started
- // calculating chk_om_free_count so g_om_free_count may not
+ if (Atomic::load(&LVars.free_count) == chk_om_free_count) {
+ out->print_cr("global_free_count=%d equals chk_om_free_count=%d",
+ Atomic::load(&LVars.free_count), chk_om_free_count);
+ } else {
+ // With lock free access to LVars.free_list, it is possible for an
+ // ObjectMonitor to be prepended to LVars.free_list after we started
+ // calculating chk_om_free_count so LVars.free_count may not
// match anymore.
- out->print_cr("WARNING: g_om_free_count=%d is not equal to "
- "chk_om_free_count=%d", g_om_free_count, chk_om_free_count);
+ out->print_cr("WARNING: global_free_count=%d is not equal to "
+ "chk_om_free_count=%d", Atomic::load(&LVars.free_count), chk_om_free_count);
}
}
// Check the global wait list and count; log the results of the checks.
void ObjectSynchronizer::chk_global_wait_list_and_count(outputStream * out,
int *error_cnt_p) {
int chk_om_wait_count = 0;
ObjectMonitor* cur = NULL;
ObjectMonitor* next = NULL;
- if (mark_list_head(&g_wait_list, &cur, &next)) {
+ if ((cur = get_list_head_locked(&LVars.wait_list)) != NULL) {
+ next = unmarked_next(cur);
// Marked the global wait list head so process the list.
while (true) {
- // Rules for g_wait_list are the same as of g_free_list:
+ // Rules for LVars.wait_list are the same as of LVars.free_list:
chk_free_entry(NULL /* jt */, cur, out, error_cnt_p);
chk_om_wait_count++;
- mark_next_for_traversal(&cur, &next);
+ lock_next_for_traversal(&cur, &next);
if (cur == NULL) {
break;
}
}
}
- if (g_om_wait_count == chk_om_wait_count) {
- out->print_cr("g_om_wait_count=%d equals chk_om_wait_count=%d",
- g_om_wait_count, chk_om_wait_count);
+ if (Atomic::load(&LVars.wait_count) == chk_om_wait_count) {
+ out->print_cr("global_wait_count=%d equals chk_om_wait_count=%d",
+ Atomic::load(&LVars.wait_count), chk_om_wait_count);
} else {
- out->print_cr("ERROR: g_om_wait_count=%d is not equal to "
- "chk_om_wait_count=%d", g_om_wait_count, chk_om_wait_count);
+ out->print_cr("ERROR: global_wait_count=%d is not equal to "
+ "chk_om_wait_count=%d", Atomic::load(&LVars.wait_count), chk_om_wait_count);
*error_cnt_p = *error_cnt_p + 1;
}
}
// Check the global in-use list and count; log the results of the checks.
void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out,
int *error_cnt_p) {
int chk_om_in_use_count = 0;
ObjectMonitor* cur = NULL;
ObjectMonitor* next = NULL;
- if (mark_list_head(&g_om_in_use_list, &cur, &next)) {
+ if ((cur = get_list_head_locked(&LVars.in_use_list)) != NULL) {
+ next = unmarked_next(cur);
// Marked the global in-use list head so process the list.
while (true) {
chk_in_use_entry(NULL /* jt */, cur, out, error_cnt_p);
chk_om_in_use_count++;
- mark_next_for_traversal(&cur, &next);
+ lock_next_for_traversal(&cur, &next);
if (cur == NULL) {
break;
}
}
}
- if (g_om_in_use_count == chk_om_in_use_count) {
- out->print_cr("g_om_in_use_count=%d equals chk_om_in_use_count=%d",
- g_om_in_use_count, chk_om_in_use_count);
+ if (Atomic::load(&LVars.in_use_count) == chk_om_in_use_count) {
+ out->print_cr("global_in_use_count=%d equals chk_om_in_use_count=%d",
+ Atomic::load(&LVars.in_use_count), chk_om_in_use_count);
} else {
// With lock free access to the monitor lists, it is possible for
// an exiting JavaThread to put its in-use ObjectMonitors on the
// global in-use list after chk_om_in_use_count is calculated above.
- out->print_cr("WARNING: g_om_in_use_count=%d is not equal to chk_om_in_use_count=%d",
- g_om_in_use_count, chk_om_in_use_count);
+ out->print_cr("WARNING: global_in_use_count=%d is not equal to chk_om_in_use_count=%d",
+ Atomic::load(&LVars.in_use_count), chk_om_in_use_count);
}
}
// Check an in-use monitor entry; log any errors.
void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n,
@@ -3199,17 +3229,18 @@
outputStream * out,
int *error_cnt_p) {
int chk_om_free_count = 0;
ObjectMonitor* cur = NULL;
ObjectMonitor* next = NULL;
- if (mark_list_head(&jt->om_free_list, &cur, &next)) {
+ if ((cur = get_list_head_locked(&jt->om_free_list)) != NULL) {
+ next = unmarked_next(cur);
// Marked the per-thread free list head so process the list.
while (true) {
chk_free_entry(jt, cur, out, error_cnt_p);
chk_om_free_count++;
- mark_next_for_traversal(&cur, &next);
+ lock_next_for_traversal(&cur, &next);
if (cur == NULL) {
break;
}
}
}
@@ -3230,17 +3261,18 @@
outputStream * out,
int *error_cnt_p) {
int chk_om_in_use_count = 0;
ObjectMonitor* cur = NULL;
ObjectMonitor* next = NULL;
- if (mark_list_head(&jt->om_in_use_list, &cur, &next)) {
+ if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) {
+ next = unmarked_next(cur);
// Marked the per-thread in-use list head so process the list.
while (true) {
chk_in_use_entry(jt, cur, out, error_cnt_p);
chk_om_in_use_count++;
- mark_next_for_traversal(&cur, &next);
+ lock_next_for_traversal(&cur, &next);
if (cur == NULL) {
break;
}
}
}
@@ -3259,19 +3291,20 @@
// Log details about ObjectMonitors on the in-use lists. The 'BHL'
// flags indicate why the entry is in-use, 'object' and 'object type'
// indicate the associated object and its type.
void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out) {
stringStream ss;
- if (g_om_in_use_count > 0) {
+ if (Atomic::load(&LVars.in_use_count) > 0) {
out->print_cr("In-use global monitor info:");
out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
out->print_cr("%18s %s %7s %18s %18s",
"monitor", "BHL", "ref_cnt", "object", "object type");
out->print_cr("================== === ======= ================== ==================");
ObjectMonitor* cur = NULL;
ObjectMonitor* next = NULL;
- if (mark_list_head(&g_om_in_use_list, &cur, &next)) {
+ if ((cur = get_list_head_locked(&LVars.in_use_list)) != NULL) {
+ next = unmarked_next(cur);
// Marked the global in-use list head so process the list.
while (true) {
const oop obj = (oop) cur->object();
const markWord mark = cur->header();
ResourceMark rm;
@@ -3283,11 +3316,11 @@
out->print(" (%s)", cur->is_busy_to_string(&ss));
ss.reset();
}
out->cr();
- mark_next_for_traversal(&cur, &next);
+ lock_next_for_traversal(&cur, &next);
if (cur == NULL) {
break;
}
}
}
@@ -3299,11 +3332,12 @@
"jt", "monitor", "BHL", "ref_cnt", "object", "object type");
out->print_cr("================== ================== === ======= ================== ==================");
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
ObjectMonitor* cur = NULL;
ObjectMonitor* next = NULL;
- if (mark_list_head(&jt->om_in_use_list, &cur, &next)) {
+ if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) {
+ next = unmarked_next(cur);
// Marked the global in-use list head so process the list.
while (true) {
const oop obj = (oop) cur->object();
const markWord mark = cur->header();
ResourceMark rm;
@@ -3315,11 +3349,11 @@
out->print(" (%s)", cur->is_busy_to_string(&ss));
ss.reset();
}
out->cr();
- mark_next_for_traversal(&cur, &next);
+ lock_next_for_traversal(&cur, &next);
if (cur == NULL) {
break;
}
}
}
@@ -3333,15 +3367,15 @@
int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
int pop_count = 0;
out->print_cr("%18s %10s %10s %10s %10s",
"Global Lists:", "InUse", "Free", "Wait", "Total");
out->print_cr("================== ========== ========== ========== ==========");
- out->print_cr("%18s %10d %10d %10d %10d", "", g_om_in_use_count,
- g_om_free_count, g_om_wait_count, g_om_population);
- pop_count += g_om_in_use_count + g_om_free_count;
+ out->print_cr("%18s %10d %10d %10d %10d", "", Atomic::load(&LVars.in_use_count),
+ Atomic::load(&LVars.free_count), Atomic::load(&LVars.wait_count), Atomic::load(&LVars.population));
+ pop_count += Atomic::load(&LVars.in_use_count) + Atomic::load(&LVars.free_count);
if (HandshakeAfterDeflateIdleMonitors) {
- pop_count += g_om_wait_count;
+ pop_count += Atomic::load(&LVars.wait_count);
}
out->print_cr("%18s %10s %10s %10s",
"Per-Thread Lists:", "InUse", "Free", "Provision");
out->print_cr("================== ========== ========== ==========");
@@ -3359,24 +3393,23 @@
// Check if monitor belongs to the monitor cache
// The list is grow-only so it's *relatively* safe to traverse
// the list of extant blocks without taking a lock.
int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
- PaddedObjectMonitor* block = g_block_list;
+ PaddedObjectMonitor* block = Atomic::load(&g_block_list);
while (block != NULL) {
assert(block->object() == CHAINMARKER, "must be a block header");
if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
address mon = (address)monitor;
address blk = (address)block;
size_t diff = mon - blk;
assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned");
return 1;
}
- // unmarked_next() is not needed with g_block_list (no next field
- // marking) and no load_acquire() needed because _next_om is
- // updated before g_block_list is changed with cmpxchg().
- block = (PaddedObjectMonitor*)block->_next_om;
+ // unmarked_next() is not needed with g_block_list (no locking
+ // used with with block linkage _next_om fields).
+ block = (PaddedObjectMonitor*)Atomic::load(&block->_next_om);
}
return 0;
}
#endif
< prev index next >