< prev index next >
src/share/vm/runtime/synchronizer.cpp
Print this page
rev 13054 : imported patch monitor_deflate_conc
@@ -113,18 +113,23 @@
// gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't
// want to expose the PaddedEnd template more than necessary.
ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL;
// global monitor free list
ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL;
+ObjectMonitor * ObjectSynchronizer::gFreeListNextSafepoint = NULL;
+ObjectMonitor * ObjectSynchronizer::gFreeListNextSafepointTail = NULL;
// global monitor in-use list, for moribund threads,
// monitors they inflated need to be scanned for deflation
ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL;
// count of entries in gOmInUseList
int ObjectSynchronizer::gOmInUseCount = 0;
+bool ObjectSynchronizer::_should_deflate_idle_monitors_conc = false;
+
static volatile intptr_t gListLock = 0; // protects global monitor lists
static volatile int gMonitorFreeCount = 0; // # on gFreeList
+static int gMonitorFreeCountNextSafepoint = 0;
static volatile int gMonitorPopulation = 0; // # Extant -- in circulation
static void post_monitor_inflate_event(EventJavaMonitorInflate&,
const oop,
const ObjectSynchronizer::InflateCause);
@@ -338,10 +343,11 @@
// Interpreter/Compiler Slow Case
// This routine is used to handle interpreter/compiler slow case
// We don't need to use fast path here, because it must have been
// failed in the interpreter/compiler code.
void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
+ do {
markOop mark = obj->mark();
assert(!mark->has_bias_pattern(), "should not see bias pattern here");
if (mark->is_neutral()) {
// Anticipate successful CAS -- the ST of the displaced mark must
@@ -363,13 +369,13 @@
// The object header will never be displaced to this lock,
// so it does not matter what the value is, except that it
// must be non-zero to avoid looking like a re-entrant lock,
// and must not look locked either.
lock->set_displaced_header(markOopDesc::unused_mark());
- ObjectSynchronizer::inflate(THREAD,
+ } while (!ObjectSynchronizer::inflate(THREAD,
obj(),
- inflate_cause_monitor_enter)->enter(THREAD);
+ inflate_cause_monitor_enter)->enter(THREAD));
}
// This routine is used to handle interpreter/compiler slow case
// We don't need to use fast path here, because it must have
// failed in the interpreter/compiler code. Simply use the heavy
@@ -410,15 +416,16 @@
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
}
- ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
+ ObjectMonitor* monitor;
+ do {
+ monitor = ObjectSynchronizer::inflate(THREAD,
obj(),
inflate_cause_vm_internal);
-
- monitor->reenter(recursion, THREAD);
+ } while(!monitor->reenter(recursion, THREAD));
}
// -----------------------------------------------------------------------------
// JNI locks on java objects
// NOTE: must use heavy weight monitor to handle jni monitor enter
void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
@@ -427,11 +434,11 @@
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
}
THREAD->set_current_pending_monitor_is_from_java(false);
- ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
+ while (!ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD));
THREAD->set_current_pending_monitor_is_from_java(true);
}
// NOTE: must use heavy weight monitor to handle jni monitor exit
void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
@@ -710,10 +717,11 @@
TEVENT(hashCode: GENERATE);
return value;
}
intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
+ Retry:
if (UseBiasedLocking) {
// NOTE: many places throughout the JVM do not expect a safepoint
// to be taken here, in particular most operations on perm gen
// objects. However, we only ever bias Java instances and all of
// the call sites of identity_hash that might revoke biases have
@@ -766,11 +774,11 @@
// into heavy weight monitor. We could add more code here
// for fast path, but it does not worth the complexity.
} else if (mark->has_monitor()) {
monitor = mark->monitor();
temp = monitor->header();
- assert(temp->is_neutral(), "invariant");
+ assert(temp->is_neutral() || temp->hash() == 0 && temp->is_marked(), "invariant");
hash = temp->hash();
if (hash) {
return hash;
}
// Skip to the following code to reduce code size
@@ -794,21 +802,38 @@
// Inflate the monitor to set hash code
monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code);
// Load displaced header and check it has hash code
mark = monitor->header();
- assert(mark->is_neutral(), "invariant");
+ assert(mark->is_neutral() || mark->hash() == 0 && mark->is_marked(), "invariant");
hash = mark->hash();
if (hash == 0) {
hash = get_next_hash(Self, obj);
- temp = mark->copy_set_hash(hash); // merge hash code into header
+ temp = mark->set_unmarked()->copy_set_hash(hash); // merge hash code into header
assert(temp->is_neutral(), "invariant");
+ if (mark->is_marked()) {
+ // Monitor is being deflated. Try installing mark word with hash code into obj.
+ markOop monitor_mark = markOopDesc::encode(monitor);
+ if (obj->cas_set_mark(temp, monitor_mark) == monitor_mark) {
+ return hash;
+ } else {
+ // Somebody else installed a new mark word in obj. Start over. We are making progress,
+ // as the new mark word is not a pointer to monitor.
+ goto Retry;
+ }
+ }
test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
if (test != mark) {
- // The only update to the header in the monitor (outside GC)
- // is install the hash code. If someone add new usage of
- // displaced header, please update this code
+ // The only update to the header in the monitor (outside GC) is install
+ // the hash code or mark the header to signal that the monitor is being
+ // deflated. If someone add new usage of displaced header, please update
+ // this code.
+ if (test->is_marked()) {
+ // Monitor is being deflated. Make progress by starting over.
+ assert(test->hash() == 0, "invariant");
+ goto Retry;
+ }
hash = test->hash();
assert(test->is_neutral(), "invariant");
assert(hash != 0, "Trivial unexpected object/monitor header usage.");
}
}
@@ -980,11 +1005,12 @@
(PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
assert(block->object() == CHAINMARKER, "must be a block header");
for (int i = 1; i < _BLOCKSIZE; i++) {
ObjectMonitor* mid = (ObjectMonitor *)&block[i];
- if (mid->object() != NULL) {
+ if (mid->is_active()) {
+ assert(mid->object() != NULL, "invariant");
f->do_oop((oop*)mid->object_addr());
}
}
}
}
@@ -1076,18 +1102,20 @@
void ObjectSynchronizer::verifyInUse(Thread *Self) {
ObjectMonitor* mid;
int in_use_tally = 0;
for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
in_use_tally++;
+ guarantee(mid->is_active(), "invariant");
}
- assert(in_use_tally == Self->omInUseCount, "in-use count off");
+ guarantee(in_use_tally == Self->omInUseCount, "in-use count off");
int free_tally = 0;
for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
free_tally++;
+ guarantee(mid->is_free(), "invariant");
}
- assert(free_tally == Self->omFreeCount, "free count off");
+ guarantee(free_tally == Self->omFreeCount, "free count off");
}
ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) {
// A large MAXPRIVATE value reduces both list lock contention
// and list coherency traffic, but also tends to increase the
@@ -1108,20 +1136,22 @@
if (m != NULL) {
Self->omFreeList = m->FreeNext;
Self->omFreeCount--;
// CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
guarantee(m->object() == NULL, "invariant");
+ m->set_allocation_state(ObjectMonitor::New);
if (MonitorInUseLists) {
m->FreeNext = Self->omInUseList;
Self->omInUseList = m;
Self->omInUseCount++;
if (ObjectMonitor::Knob_VerifyInUse) {
verifyInUse(Self);
}
} else {
m->FreeNext = NULL;
}
+ assert(!m->is_free(), "post-condition");
return m;
}
// 2: try to allocate from the global gFreeList
// CONSIDER: use muxTry() instead of muxAcquire().
@@ -1135,13 +1165,16 @@
Thread::muxAcquire(&gListLock, "omAlloc");
for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
gMonitorFreeCount--;
ObjectMonitor * take = gFreeList;
gFreeList = take->FreeNext;
- guarantee(take->object() == NULL, "invariant");
+ take->set_object(NULL);
+ take->set_owner(NULL);
+ take->_count = 0;
guarantee(!take->is_busy(), "invariant");
take->Recycle();
+ assert(take->is_free(), "invariant");
omRelease(Self, take, false);
}
Thread::muxRelease(&gListLock);
Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
@@ -1192,10 +1225,11 @@
// linkage should be reconsidered. A better implementation would
// look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
for (int i = 1; i < _BLOCKSIZE; i++) {
temp[i].FreeNext = (ObjectMonitor *)&temp[i+1];
+ assert(temp[i].is_free(), "invariant");
}
// terminate the last monitor as the end of list
temp[_BLOCKSIZE - 1].FreeNext = NULL;
@@ -1241,10 +1275,11 @@
void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
bool fromPerThreadAlloc) {
guarantee(m->object() == NULL, "invariant");
guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor");
+ m->set_allocation_state(ObjectMonitor::Free);
// Remove from omInUseList
if (MonitorInUseLists && fromPerThreadAlloc) {
ObjectMonitor* cur_mid_in_use = NULL;
bool extracted = false;
for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) {
@@ -1355,22 +1390,25 @@
// Fast path code shared by multiple functions
ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
markOop mark = obj->mark();
if (mark->has_monitor()) {
assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
- assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
+ markOop dmw = mark->monitor()->header();
+ assert(dmw->is_neutral() || dmw->hash() == 0 && dmw->is_marked(), "monitor must record a good object header");
+ if (dmw->is_neutral()) {
return mark->monitor();
}
+ }
return ObjectSynchronizer::inflate(Thread::current(),
obj,
inflate_cause_vm_internal);
}
ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
oop object,
const InflateCause cause) {
-
+ Retry:
// Inflate mutates the heap ...
// Relaxing assertion for bug 6320749.
assert(Universe::verify_in_progress() ||
!SafepointSynchronize::is_at_safepoint(), "invariant");
@@ -1388,11 +1426,15 @@
// * BIASED - Illegal. We should never see this
// CASE: inflated
if (mark->has_monitor()) {
ObjectMonitor * inf = mark->monitor();
- assert(inf->header()->is_neutral(), "invariant");
+ markOop dmw = inf->header();
+ assert(dmw->is_neutral() || dmw->hash() == 0 && dmw->is_marked(), "invariant");
+ if (dmw->is_marked()) {
+ goto Retry;
+ }
assert(inf->object() == object, "invariant");
assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
event.cancel(); // let's not post an inflation event, unless we did the deed ourselves
return inf;
}
@@ -1595,10 +1637,118 @@
enum ManifestConstants {
ClearResponsibleAtSTW = 0
};
+void ObjectSynchronizer::do_safepoint_work() {
+ if (MonitorInUseLists || !AsyncDeflateIdleMonitors) {
+ ObjectSynchronizer::deflate_idle_monitors();
+ return;
+ }
+ assert(AsyncDeflateIdleMonitors, "oops");
+ if (gFreeListNextSafepoint != NULL) {
+#ifdef ASSERT
+ for (ObjectMonitor* monitor = gFreeListNextSafepoint; monitor != NULL; monitor = monitor->FreeNext) {
+ guarantee(monitor->owner() == NULL, "invariant");
+ guarantee(monitor->waiters() == 0, "invariant");
+ guarantee(monitor->recursions() == 0, "invariant");
+ guarantee(monitor->object() != NULL, "invariant");
+ guarantee(monitor->header() != 0, "invariant");
+ guarantee(monitor->is_free(), "invariant");
+ }
+ guarantee(gFreeListNextSafepointTail != NULL, "invariant");
+#endif // def ASSERT
+
+ Thread::muxAcquire(&gListLock, "do_safepoint_work");
+ gFreeListNextSafepointTail->FreeNext = gFreeList;
+ gFreeList = gFreeListNextSafepoint;
+ gMonitorFreeCount += gMonitorFreeCountNextSafepoint;
+ Thread::muxRelease(&gListLock);
+
+ gFreeListNextSafepoint = NULL;
+ gFreeListNextSafepointTail = NULL;
+ gMonitorFreeCountNextSafepoint = 0;
+ }
+ set_should_deflate_idle_monitors_conc();
+ MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
+ Service_lock->notify_all();
+}
+
+void ObjectSynchronizer::append_to_freelist_for_after_safepoint(int nScavenged, ObjectMonitor* const head, ObjectMonitor* const tail) {
+#ifdef ASSERT
+ int count = 0;
+ for(ObjectMonitor* m = head; m != NULL; m = m->FreeNext) { count++; }
+ guarantee(count == nScavenged, "invariant");
+#endif // def ASSERT
+ if (head != NULL) {
+ assert(tail->FreeNext == NULL, "invariant");
+ tail->FreeNext = gFreeListNextSafepoint;
+ gFreeListNextSafepoint = head;
+ }
+ if (gFreeListNextSafepointTail == NULL) {
+ gFreeListNextSafepointTail = tail;
+ }
+ gMonitorFreeCountNextSafepoint += nScavenged;
+ OM_PERFDATA_OP(Deflations, inc(nScavenged));
+}
+
+void ObjectSynchronizer::deflate_idle_monitors_conc() {
+ assert(Thread::current()->is_Java_thread(), "precondition");
+ _should_deflate_idle_monitors_conc = false;
+ if (MonitorInUseLists) {
+ return; // Don't want to run over the thread list for now.
+ }
+
+ ObjectMonitor* freeHeadp = NULL;
+ ObjectMonitor* freeTailp = NULL;
+ int nScavenged = 0;
+ int nInuse = 0;
+ int nInCirculation = 0;
+
+ PaddedEnd<ObjectMonitor> * block =
+ (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
+ for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
+ // Iterate over all extant monitors - Scavenge all idle monitors.
+ assert(block->object() == CHAINMARKER, "must be a block header");
+ if (SafepointSynchronize::is_synchronizing()) {
+ append_to_freelist_for_after_safepoint(nScavenged, freeHeadp, freeTailp);
+ nScavenged = 0;
+ freeHeadp = NULL;
+ freeTailp = NULL;
+ JavaThread* const jt = (JavaThread*) Thread::current();
+ ThreadBlockInVM blocker(jt);
+ }
+ nInCirculation += _BLOCKSIZE;
+ for (int i = 1; i < _BLOCKSIZE; i++) {
+ ObjectMonitor* mid = (ObjectMonitor*)&block[i];
+ if (!mid->is_old()) {
+ // Skip deflating newly allocated or free monitors.
+ if (mid->is_new()) {
+ // Mark mid as "old".
+ mid->set_allocation_state(ObjectMonitor::Old);
+ }
+ continue;
+ }
+
+ oop obj = (oop)mid->object();
+ assert(obj != NULL, "invariant");
+
+ if (mid->try_disable_monitor()) {
+ mid->FreeNext = NULL;
+ if (freeHeadp == NULL) { freeHeadp = mid; }
+ if (freeTailp != NULL) { freeTailp->FreeNext = mid; }
+ freeTailp = mid;
+ nScavenged++;
+ } else {
+ nInuse++;
+ }
+ }
+ }
+ append_to_freelist_for_after_safepoint(nScavenged, freeHeadp, freeTailp);
+ OM_PERFDATA_OP(MonExtant, set_value(nInCirculation));
+}
+
// Deflate a single monitor if not in-use
// Return true if deflated, false if in-use
bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
ObjectMonitor** freeHeadp,
ObjectMonitor** freeTailp) {
@@ -1629,10 +1779,11 @@
// Restore the header back to obj
obj->release_set_mark(mid->header());
mid->clear();
assert(mid->object() == NULL, "invariant");
+ assert(mid->is_free(), "invariant");
// Move the object to the working free list defined by freeHeadp, freeTailp
if (*freeHeadp == NULL) *freeHeadp = mid;
if (*freeTailp != NULL) {
ObjectMonitor * prevtail = *freeTailp;
< prev index next >