< prev index next >
src/hotspot/share/runtime/objectMonitor.cpp
Print this page
rev 55489 : Checkpoint latest preliminary review patches for full OpenJDK review; merge with 8222295.patch.
rev 55490 : imported patch dcubed.monitor_deflate_conc.v2.01
rev 55491 : imported patch dcubed.monitor_deflate_conc.v2.02
rev 55492 : imported patch dcubed.monitor_deflate_conc.v2.03
rev 55494 : imported patch dcubed.monitor_deflate_conc.v2.05
@@ -237,11 +237,11 @@
// -----------------------------------------------------------------------------
// Enter support
void ObjectMonitor::enter(TRAPS) {
- ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count);
+ ADIM_guarantee(ref_count() > 0, "must be positive: ref_count=%d", ref_count());
// The following code is ordered to check the most common cases first
// and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
Thread * const Self = THREAD;
@@ -264,10 +264,20 @@
// a full-fledged "Thread *".
_owner = Self;
return;
}
+ if (AsyncDeflateIdleMonitors &&
+ Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
+ // The deflation protocol finished the first part (setting owner),
+ // but it failed the second part (making ref_count negative) and
+ // bailed. Or the ObjectMonitor was async deflated and reused.
+ // Acquired the monitor.
+ assert(_recursions == 0, "invariant");
+ return;
+ }
+
// We've encountered genuine contention.
assert(Self->_Stalled == 0, "invariant");
Self->_Stalled = intptr_t(this);
// Try one round of spinning *before* enqueueing Self
@@ -435,12 +445,12 @@
return;
}
markOop dmw = header();
if (dmw == NULL) {
- // ObjectMonitor's header/dmw has been cleared by the deflating
- // thread so the object's header has already been restored.
+ // ObjectMonitor's header/dmw has been cleared so the object's
+ // header has already been restored.
return;
}
// A non-NULL dmw has to be either neutral (not locked and not marked)
// or is already participating in this restoration protocol.
@@ -456,12 +466,12 @@
// All of the callers to this function can be racing with each
// other trying to update the _header field.
dmw = (markOop) Atomic::cmpxchg(marked_dmw, &_header, dmw);
if (dmw == NULL) {
- // ObjectMonitor's header/dmw has been cleared by the deflating
- // thread so the object's header has already been restored.
+ // ObjectMonitor's header/dmw has been cleared so the object's
+ // header has already been restored.
return;
}
// The _header field is now marked. The winner's 'dmw' variable
// contains the original, unmarked header/dmw value and any
// losers have a marked header/dmw value that will be cleaned
@@ -497,20 +507,27 @@
}
// Convert the fields used by is_busy() to a string that can be
// used for diagnostic output.
const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
- ss->print("is_busy: contentions=%d, waiters=%d, owner=" INTPTR_FORMAT
- ", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, _contentions,
- _waiters, p2i(_owner), p2i(_cxq), p2i(_EntryList));
+ ss->print("is_busy: contentions=%d, waiters=%d, ", _contentions, _waiters);
+ if (!AsyncDeflateIdleMonitors) {
+ ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
+ } else if (_owner != DEFLATER_MARKER) {
+ ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
+ } else {
+ ss->print("owner=" INTPTR_FORMAT, NULL);
+ }
+ ss->print(", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, p2i(_cxq),
+ p2i(_EntryList));
return ss->base();
}
#define MAX_RECHECK_INTERVAL 1000
void ObjectMonitor::EnterI(TRAPS) {
- ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count);
+ ADIM_guarantee(ref_count() > 0, "must be positive: ref_count=%d", ref_count());
Thread * const Self = THREAD;
assert(Self->is_Java_thread(), "invariant");
assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
@@ -520,20 +537,20 @@
assert(_owner == Self, "invariant");
assert(_Responsible != Self, "invariant");
return;
}
- if (_owner == DEFLATER_MARKER) {
- // The deflation protocol finished the first part (setting owner), but
- // it failed the second part (making ref_count negative) and bailed.
- if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
+ if (AsyncDeflateIdleMonitors &&
+ Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
+ // The deflation protocol finished the first part (setting owner),
+ // but it failed the second part (making ref_count negative) and
+ // bailed. Or the ObjectMonitor was async deflated and reused.
// Acquired the monitor.
assert(_succ != Self, "invariant");
assert(_Responsible != Self, "invariant");
return;
}
- }
assert(InitDone, "Unexpectedly not initialized");
// We try one round of spinning *before* enqueueing Self.
//
@@ -647,18 +664,18 @@
Self->_ParkEvent->park();
}
if (TryLock(Self) > 0) break;
- if (_owner == DEFLATER_MARKER) {
- // The deflation protocol finished the first part (setting owner), but
- // it failed the second part (making ref_count negative) and bailed.
- if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
+ if (AsyncDeflateIdleMonitors &&
+ Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
+ // The deflation protocol finished the first part (setting owner),
+ // but it failed the second part (making ref_count negative) and
+ // bailed. Or the ObjectMonitor was async deflated and reused.
// Acquired the monitor.
break;
}
- }
// The lock is still contested.
// Keep a tally of the # of futile wakeups.
// Note that the counter is not protected by a lock or updated by atomics.
// That is by design - we trade "lossy" counters which are exposed to
@@ -760,11 +777,11 @@
// monitor reentry in wait().
//
// In the future we should reconcile EnterI() and ReenterI().
void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
- ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count);
+ ADIM_guarantee(ref_count() > 0, "must be positive: ref_count=%d", ref_count());
assert(Self != NULL, "invariant");
assert(SelfNode != NULL, "invariant");
assert(SelfNode->_thread == Self, "invariant");
assert(_waiters > 0, "invariant");
@@ -779,18 +796,18 @@
assert(_owner != Self, "invariant");
if (TryLock(Self) > 0) break;
if (TrySpin(Self) > 0) break;
- if (_owner == DEFLATER_MARKER) {
- // The deflation protocol finished the first part (setting owner), but
- // it failed the second part (making ref_count negative) and bailed.
- if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
+ if (AsyncDeflateIdleMonitors &&
+ Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
+ // The deflation protocol finished the first part (setting owner),
+ // but it failed the second part (making ref_count negative) and
+ // bailed. Or the ObjectMonitor was async deflated and reused.
// Acquired the monitor.
break;
}
- }
// State transition wrappers around park() ...
// ReenterI() wisely defers state transitions until
// it's clear we must park the thread.
{
@@ -2081,11 +2098,11 @@
if (AsyncDeflateIdleMonitors) {
// Race here if monitor is not owned! The above ref_count bump
// will cause subsequent async deflation to skip it. However,
// previous or concurrent async deflation is a race.
- if (om_ptr->_owner == DEFLATER_MARKER && om_ptr->ref_count() <= 0) {
+ if (om_ptr->owner_is_DEFLATER_MARKER() && om_ptr->ref_count() <= 0) {
// Async deflation is in progress and our ref_count increment
// above lost the race to async deflation. Attempt to restore
// the header/dmw to the object's header so that we only retry
// once if the deflater thread happens to be slow.
om_ptr->install_displaced_markword_in_object(object);
< prev index next >