< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page
rev 54415 : 8222295: more baseline cleanups from Async Monitor Deflation project
rev 54416 : Checkpoint latest preliminary review patches for full OpenJDK review; merge with 8222295.patch.
rev 54417 : imported patch dcubed.monitor_deflate_conc.v2.01

*** 746,756 **** assert(Universe::verify_in_progress() || DumpSharedSpaces || Self->is_Java_thread() , "invariant"); assert(Universe::verify_in_progress() || DumpSharedSpaces || ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); ! Retry: ObjectMonitor* monitor = NULL; markOop temp, test; intptr_t hash; markOop mark = ReadStableMark(obj); --- 746,756 ---- assert(Universe::verify_in_progress() || DumpSharedSpaces || Self->is_Java_thread() , "invariant"); assert(Universe::verify_in_progress() || DumpSharedSpaces || ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); ! while (true) { ObjectMonitor* monitor = NULL; markOop temp, test; intptr_t hash; markOop mark = ReadStableMark(obj);
*** 775,785 **** } else if (mark->has_monitor()) { ObjectMonitorHandle omh; if (!omh.save_om_ptr(obj, mark)) { // Lost a race with async deflation so try again. assert(AsyncDeflateIdleMonitors, "sanity check"); ! goto Retry; } monitor = omh.om_ptr(); temp = monitor->header(); assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp)); hash = temp->hash(); --- 775,785 ---- } else if (mark->has_monitor()) { ObjectMonitorHandle omh; if (!omh.save_om_ptr(obj, mark)) { // Lost a race with async deflation so try again. assert(AsyncDeflateIdleMonitors, "sanity check"); ! continue; } monitor = omh.om_ptr(); temp = monitor->header(); assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp)); hash = temp->hash();
*** 816,835 **** hash = get_next_hash(Self, obj); temp = mark->copy_set_hash(hash); // merge hash code into header assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp)); test = Atomic::cmpxchg(temp, monitor->header_addr(), mark); if (test != mark) { ! // The only update to the ObjectMonitor's header/dmw field ! // is to merge in the hash code. If someone adds a new usage ! // of the header/dmw field, please update this code. hash = test->hash(); assert(test->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(test)); assert(hash != 0, "Trivial unexpected object/monitor header usage."); } } // We finally get the hash return hash; } // Deprecated -- use FastHashCode() instead. intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { --- 816,841 ---- hash = get_next_hash(Self, obj); temp = mark->copy_set_hash(hash); // merge hash code into header assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp)); test = Atomic::cmpxchg(temp, monitor->header_addr(), mark); if (test != mark) { ! // The only non-deflation update to the ObjectMonitor's ! // header/dmw field is to merge in the hash code. If someone ! // adds a new usage of the header/dmw field, please update ! // this code. ! // ObjectMonitor::install_displaced_markword_in_object() ! // does mark the header/dmw field as part of async deflation, ! // but that protocol cannot happen now due to the ! // ObjectMonitorHandle above. hash = test->hash(); assert(test->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(test)); assert(hash != 0, "Trivial unexpected object/monitor header usage."); } } // We finally get the hash return hash; + } } // Deprecated -- use FastHashCode() instead. intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
*** 1163,1172 **** --- 1169,1180 ---- gMonitorFreeCount--; ObjectMonitor * take = gFreeList; gFreeList = take->FreeNext; guarantee(take->object() == NULL, "invariant"); if (AsyncDeflateIdleMonitors) { + // Clear any values we allowed to linger during async deflation. + take->_header = NULL; take->set_owner(NULL); take->_contentions = 0; } guarantee(!take->is_busy(), "invariant"); take->Recycle();
*** 1342,1352 **** guarantee(s->object() == NULL, "invariant"); guarantee(!s->is_busy(), "invariant"); s->set_owner(NULL); // redundant but good hygiene } guarantee(tail != NULL, "invariant"); ! guarantee(Self->omFreeCount == tally, "free-count off"); Self->omFreeList = NULL; Self->omFreeCount = 0; } ObjectMonitor * inUseList = Self->omInUseList; --- 1350,1360 ---- guarantee(s->object() == NULL, "invariant"); guarantee(!s->is_busy(), "invariant"); s->set_owner(NULL); // redundant but good hygiene } guarantee(tail != NULL, "invariant"); ! ADIM_guarantee(Self->omFreeCount == tally, "free-count off"); Self->omFreeList = NULL; Self->omFreeCount = 0; } ObjectMonitor * inUseList = Self->omInUseList;
*** 1359,1372 **** // Link them to inUseTail, which will be linked into the global in-use list // gOmInUseList below, under the gListLock for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) { inUseTail = cur_om; inUseTally++; ! guarantee(cur_om->is_active(), "invariant"); } guarantee(inUseTail != NULL, "invariant"); ! guarantee(Self->omInUseCount == inUseTally, "in-use count off"); Self->omInUseList = NULL; Self->omInUseCount = 0; } Thread::muxAcquire(&gListLock, "omFlush"); --- 1367,1380 ---- // Link them to inUseTail, which will be linked into the global in-use list // gOmInUseList below, under the gListLock for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) { inUseTail = cur_om; inUseTally++; ! ADIM_guarantee(cur_om->is_active(), "invariant"); } guarantee(inUseTail != NULL, "invariant"); ! ADIM_guarantee(Self->omInUseCount == inUseTally, "in-use count off"); Self->omInUseList = NULL; Self->omInUseCount = 0; } Thread::muxAcquire(&gListLock, "omFlush");
*** 1557,1567 **** // object is in the mark. Furthermore the owner can't complete // an unlock on the object, either. markOop dmw = mark->displaced_mark_helper(); // Catch if the object's header is not neutral (not locked and // not marked is what we care about here). ! assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw)); // Setup monitor fields to proper values -- prepare the monitor m->set_header(dmw); // Optimization: if the mark->locker stack address is associated --- 1565,1575 ---- // object is in the mark. Furthermore the owner can't complete // an unlock on the object, either. markOop dmw = mark->displaced_mark_helper(); // Catch if the object's header is not neutral (not locked and // not marked is what we care about here). ! ADIM_guarantee(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw)); // Setup monitor fields to proper values -- prepare the monitor m->set_header(dmw); // Optimization: if the mark->locker stack address is associated
*** 1571,1580 **** --- 1579,1592 ---- // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. m->set_owner(mark->locker()); m->set_object(object); // TODO-FIXME: assert BasicLock->dhw != 0. + omh_p->set_om_ptr(m); + assert(m->is_new(), "freshly allocated monitor must be new"); + m->set_allocation_state(ObjectMonitor::Old); + // Must preserve store ordering. The monitor state must // be stable at the time of publishing the monitor address. guarantee(object->mark() == markOopDesc::INFLATING(), "invariant"); object->release_set_mark(markOopDesc::encode(m));
*** 1588,1599 **** p2i(object->mark()), object->klass()->external_name()); } if (event.should_commit()) { post_monitor_inflate_event(&event, object, cause); } ! assert(!m->is_free(), "post-condition"); ! omh_p->set_om_ptr(m); return; } // CASE: neutral // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. --- 1600,1610 ---- p2i(object->mark()), object->klass()->external_name()); } if (event.should_commit()) { post_monitor_inflate_event(&event, object, cause); } ! ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free"); return; } // CASE: neutral // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
*** 1605,1615 **** // An inflateTry() method that we could call from fast_enter() and slow_enter() // would be useful. // Catch if the object's header is not neutral (not locked and // not marked is what we care about here). ! assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark)); ObjectMonitor * m; if (!AsyncDeflateIdleMonitors || cause == inflate_cause_vm_internal) { // If !AsyncDeflateIdleMonitors or if an internal inflation, then // we won't stop for a potential safepoint in omAlloc. m = omAlloc(Self, cause); --- 1616,1626 ---- // An inflateTry() method that we could call from fast_enter() and slow_enter() // would be useful. // Catch if the object's header is not neutral (not locked and // not marked is what we care about here). ! ADIM_guarantee(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark)); ObjectMonitor * m; if (!AsyncDeflateIdleMonitors || cause == inflate_cause_vm_internal) { // If !AsyncDeflateIdleMonitors or if an internal inflation, then // we won't stop for a potential safepoint in omAlloc. m = omAlloc(Self, cause);
*** 1627,1640 **** --- 1638,1657 ---- m->set_object(object); m->_recursions = 0; m->_Responsible = NULL; m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class + omh_p->set_om_ptr(m); + assert(m->is_new(), "freshly allocated monitor must be new"); + m->set_allocation_state(ObjectMonitor::Old); + if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) { m->set_header(NULL); m->set_object(NULL); m->Recycle(); + omh_p->set_om_ptr(NULL); + // omRelease() will reset the allocation state omRelease(Self, m, true); m = NULL; continue; // interference - the markword changed - just retry. // The state-transitions are one-way, so there's no chance of
*** 1651,1661 **** p2i(object->mark()), object->klass()->external_name()); } if (event.should_commit()) { post_monitor_inflate_event(&event, object, cause); } ! omh_p->set_om_ptr(m); return; } } --- 1668,1678 ---- p2i(object->mark()), object->klass()->external_name()); } if (event.should_commit()) { post_monitor_inflate_event(&event, object, cause); } ! ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free"); return; } }
*** 1679,1709 **** // typically drives the scavenge rate. Large heaps can mean infrequent GC, // which in turn can mean large(r) numbers of ObjectMonitors in circulation. // This is an unfortunate aspect of this design. void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* _counters) { ! if (!AsyncDeflateIdleMonitors) { ! // Use the older mechanism for the global in-use list. ObjectSynchronizer::deflate_idle_monitors(_counters); return; } - assert(_counters == NULL, "not used with AsyncDeflateIdleMonitors"); - log_debug(monitorinflation)("requesting deflation of idle monitors."); // Request deflation of global idle monitors by the ServiceThread: _gOmShouldDeflateIdleMonitors = true; MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); Service_lock->notify_all(); - - // Request deflation of per-thread idle monitors by each JavaThread: - for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { - if (jt->omInUseCount > 0) { - // This JavaThread is using monitors so check it. - jt->omShouldDeflateIdleMonitors = true; - } - } } // Deflate a single monitor if not in-use // Return true if deflated, false if in-use bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, --- 1696,1722 ---- // typically drives the scavenge rate. Large heaps can mean infrequent GC, // which in turn can mean large(r) numbers of ObjectMonitors in circulation. // This is an unfortunate aspect of this design. void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* _counters) { ! assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); ! ! // The per-thread in-use lists are handled in ! // ParallelSPCleanupThreadClosure::do_thread(). ! ! if (!AsyncDeflateIdleMonitors || is_cleanup_requested()) { ! // Use the older mechanism for the global in-use list or ! // if a special cleanup has been requested. ObjectSynchronizer::deflate_idle_monitors(_counters); return; } log_debug(monitorinflation)("requesting deflation of idle monitors."); // Request deflation of global idle monitors by the ServiceThread: _gOmShouldDeflateIdleMonitors = true; MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); Service_lock->notify_all(); } // Deflate a single monitor if not in-use // Return true if deflated, false if in-use bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
*** 1757,1844 **** } // Deflate the specified ObjectMonitor if not in-use using a JavaThread. // Returns true if it was deflated and false otherwise. // ! // The async deflation protocol sets _owner to DEFLATER_MARKER and ! // makes _contentions negative as signals to contending threads that // an async deflation is in progress. There are a number of checks // as part of the protocol to make sure that the calling thread has // not lost the race to a contending thread. // // The ObjectMonitor has been successfully async deflated when: ! // (_owner == DEFLATER_MARKER && _contentions < 0). Contending threads // that see those values know to retry their operation. // bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid, ObjectMonitor** freeHeadp, ObjectMonitor** freeTailp) { assert(AsyncDeflateIdleMonitors, "sanity check"); assert(Thread::current()->is_Java_thread(), "precondition"); // A newly allocated ObjectMonitor should not be seen here so we // avoid an endless inflate/deflate cycle. ! assert(mid->is_old(), "precondition"); if (mid->is_busy() || mid->ref_count() != 0) { // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor* // is in use so no deflation. return false; } ! if (Atomic::cmpxchg(DEFLATER_MARKER, &mid->_owner, (void*)NULL) == NULL) { // ObjectMonitor is not owned by another thread. Our setting ! // _owner to DEFLATER_MARKER forces any contending thread through // the slow path. This is just the first part of the async // deflation dance. if (mid->_waiters != 0 || mid->ref_count() != 0) { // Another thread has raced to enter the ObjectMonitor after // mid->is_busy() above and has already waited on it which // makes it busy so no deflation. Or the ObjectMonitor* is // in use for some other operation like inflate(). Restore ! // _owner to NULL if it is still DEFLATER_MARKER. Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER); return false; } if (Atomic::cmpxchg(-max_jint, &mid->_contentions, (jint)0) == 0) { ! // Make _contentions negative to force racing threads to retry. // This is the second part of the async deflation dance. ! if (mid->_owner == DEFLATER_MARKER) { ! // If _owner is still DEFLATER_MARKER, then we have successfully // signaled any racing threads to retry. If it is not, then we ! // have lost the race to another thread and the ObjectMonitor is ! // now busy. This is the third and final part of the async // deflation dance. ! // Note: This _owner check solves the ABA problem with _contentions // where another thread acquired the ObjectMonitor, finished ! // using it and restored the _contentions to zero. // Sanity checks for the races: ! guarantee(mid->_waiters == 0, "should be no waiters"); ! guarantee(mid->_cxq == NULL, "should be no contending threads"); ! guarantee(mid->_EntryList == NULL, "should be no entering threads"); if (log_is_enabled(Trace, monitorinflation)) { - oop obj = (oop) mid->object(); - assert(obj != NULL, "sanity check"); - if (obj->is_instance()) { ResourceMark rm; log_trace(monitorinflation)("deflate_monitor_using_JT: " ! "object=" INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", type='%s'", p2i(obj), p2i(obj->mark()), obj->klass()->external_name()); } - } // Install the old mark word if nobody else has already done it. ! mid->install_displaced_markword_in_object(); mid->clear_using_JT(); ! assert(mid->object() == NULL, "invariant"); ! assert(mid->is_free(), "invariant"); // Move the deflated ObjectMonitor to the working free list // defined by freeHeadp and freeTailp. if (*freeHeadp == NULL) { // First one on the list. --- 1770,1865 ---- } // Deflate the specified ObjectMonitor if not in-use using a JavaThread. // Returns true if it was deflated and false otherwise. // ! // The async deflation protocol sets owner to DEFLATER_MARKER and ! // makes contentions negative as signals to contending threads that // an async deflation is in progress. There are a number of checks // as part of the protocol to make sure that the calling thread has // not lost the race to a contending thread. // // The ObjectMonitor has been successfully async deflated when: ! // (owner == DEFLATER_MARKER && contentions < 0). Contending threads // that see those values know to retry their operation. // bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid, ObjectMonitor** freeHeadp, ObjectMonitor** freeTailp) { assert(AsyncDeflateIdleMonitors, "sanity check"); assert(Thread::current()->is_Java_thread(), "precondition"); // A newly allocated ObjectMonitor should not be seen here so we // avoid an endless inflate/deflate cycle. ! assert(mid->is_old(), "must be old: allocation_state=%d", ! (int) mid->allocation_state()); if (mid->is_busy() || mid->ref_count() != 0) { // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor* // is in use so no deflation. return false; } ! if (Atomic::replace_if_null(DEFLATER_MARKER, &(mid->_owner))) { // ObjectMonitor is not owned by another thread. Our setting ! // owner to DEFLATER_MARKER forces any contending thread through // the slow path. This is just the first part of the async // deflation dance. if (mid->_waiters != 0 || mid->ref_count() != 0) { // Another thread has raced to enter the ObjectMonitor after // mid->is_busy() above and has already waited on it which // makes it busy so no deflation. Or the ObjectMonitor* is // in use for some other operation like inflate(). Restore ! // owner to NULL if it is still DEFLATER_MARKER. Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER); return false; } if (Atomic::cmpxchg(-max_jint, &mid->_contentions, (jint)0) == 0) { ! // Make contentions negative to force racing threads to retry. // This is the second part of the async deflation dance. ! if (mid->_owner == DEFLATER_MARKER && mid->ref_count() == 0) { ! // If owner is still DEFLATER_MARKER, then we have successfully // signaled any racing threads to retry. If it is not, then we ! // have lost the race to an entering thread and the ObjectMonitor ! // is now busy. If the ObjectMonitor* is in use, then we have ! // lost that race. This is the third and final part of the async // deflation dance. ! // Note: This owner check solves the ABA problem with contentions // where another thread acquired the ObjectMonitor, finished ! // using it and restored the contentions to zero. ! // Note: This ref_count check solves the race with save_om_ptr() ! // where its ref_count increment happens after the first ref_count ! // check in this function and before contentions is made negative. // Sanity checks for the races: ! guarantee(mid->_waiters == 0, "must be 0: waiters=%d", mid->_waiters); ! guarantee(mid->_cxq == NULL, "must be no contending threads: cxq=" ! INTPTR_FORMAT, p2i(mid->_cxq)); ! guarantee(mid->_EntryList == NULL, ! "must be no entering threads: EntryList=" INTPTR_FORMAT, ! p2i(mid->_EntryList)); + const oop obj = (oop) mid->object(); if (log_is_enabled(Trace, monitorinflation)) { ResourceMark rm; log_trace(monitorinflation)("deflate_monitor_using_JT: " ! "object=" INTPTR_FORMAT ", mark=" ! INTPTR_FORMAT ", type='%s'", p2i(obj), p2i(obj->mark()), obj->klass()->external_name()); } // Install the old mark word if nobody else has already done it. ! mid->install_displaced_markword_in_object(obj); mid->clear_using_JT(); ! assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT, ! p2i(mid->object())); ! assert(mid->is_free(), "must be free: allocation_state=%d", ! (int) mid->allocation_state()); // Move the deflated ObjectMonitor to the working free list // defined by freeHeadp and freeTailp. if (*freeHeadp == NULL) { // First one on the list.
*** 1846,1885 **** } if (*freeTailp != NULL) { // We append to the list so the caller can use mid->FreeNext // to fix the linkages in its context. ObjectMonitor * prevtail = *freeTailp; ! assert(prevtail->FreeNext == NULL, "not cleaned up by the caller"); prevtail->FreeNext = mid; } *freeTailp = mid; // At this point, mid->FreeNext still refers to its current // value and another ObjectMonitor's FreeNext field still // refers to this ObjectMonitor. Those linkages have to be // cleaned up by the caller who has the complete context. ! // We leave _owner == DEFLATER_MARKER and _contentions < 0 // to force any racing threads to retry. return true; // Success, ObjectMonitor has been deflated. } ! // The _owner was changed from DEFLATER_MARKER so we lost the ! // race since the ObjectMonitor is now busy. Add back max_jint ! // to restore the _contentions field to its proper value (which ! // may not be what we saw above). Atomic::add(max_jint, &mid->_contentions); ! assert(mid->_contentions >= 0, "_contentions should not be negative"); } ! // The _contentions was no longer 0 so we lost the race since the // ObjectMonitor is now busy. ! assert(mid->_owner != DEFLATER_MARKER, "should no longer be set"); } ! // The _owner field is no longer NULL so we lost the race since the // ObjectMonitor is now busy. return false; } // Walk a given monitor list, and deflate idle monitors --- 1867,1914 ---- } if (*freeTailp != NULL) { // We append to the list so the caller can use mid->FreeNext // to fix the linkages in its context. ObjectMonitor * prevtail = *freeTailp; ! // Should have been cleaned up by the caller: ! assert(prevtail->FreeNext == NULL, "must be NULL: FreeNext=" ! INTPTR_FORMAT, p2i(prevtail->FreeNext)); prevtail->FreeNext = mid; } *freeTailp = mid; // At this point, mid->FreeNext still refers to its current // value and another ObjectMonitor's FreeNext field still // refers to this ObjectMonitor. Those linkages have to be // cleaned up by the caller who has the complete context. ! // We leave owner == DEFLATER_MARKER and contentions < 0 // to force any racing threads to retry. return true; // Success, ObjectMonitor has been deflated. } ! // The owner was changed from DEFLATER_MARKER or ObjectMonitor* ! // is in use so we lost the race since the ObjectMonitor is now ! // busy. ! ! // Restore owner to NULL if it is still DEFLATER_MARKER: ! Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER); ! ! // Add back max_jint to restore the contentions field to its ! // proper value (which may not be what we saw above): Atomic::add(max_jint, &mid->_contentions); ! assert(mid->_contentions >= 0, "must not be negative: contentions=%d", ! mid->_contentions); } ! // The contentions was no longer 0 so we lost the race since the // ObjectMonitor is now busy. ! assert(mid->_owner != DEFLATER_MARKER, "must not be DEFLATER_MARKER"); } ! // The owner field is no longer NULL so we lost the race since the // ObjectMonitor is now busy. return false; } // Walk a given monitor list, and deflate idle monitors
*** 1972,1993 **** mid = next; deflated_count++; } else { // mid is considered in-use if it does not have an associated // Java object or mid is not old or deflation did not succeed. ! // A mid->is_new() node can be seen here when it is freshly returned ! // by omAlloc() (and skips the deflation code path). // A mid->is_old() node can be seen here when deflation failed. // A mid->is_free() node can be seen here when a fresh node from // omAlloc() is released by omRelease() due to losing the race // in inflate(). - if (mid->object() != NULL && mid->is_new()) { - // mid has an associated Java object and has now been seen - // as newly allocated so mark it as "old". - mid->set_allocation_state(ObjectMonitor::Old); - } cur_mid_in_use = mid; mid = mid->FreeNext; if (SafepointSynchronize::is_synchronizing() && cur_mid_in_use != *listHeadp && cur_mid_in_use->is_old()) { --- 2001,2017 ---- mid = next; deflated_count++; } else { // mid is considered in-use if it does not have an associated // Java object or mid is not old or deflation did not succeed. ! // A mid->is_new() node can be seen here when it is freshly ! // returned by omAlloc() (and skips the deflation code path). // A mid->is_old() node can be seen here when deflation failed. // A mid->is_free() node can be seen here when a fresh node from // omAlloc() is released by omRelease() due to losing the race // in inflate(). cur_mid_in_use = mid; mid = mid->FreeNext; if (SafepointSynchronize::is_synchronizing() && cur_mid_in_use != *listHeadp && cur_mid_in_use->is_old()) {
*** 2013,2024 **** counters->perThreadScavenged = 0; // per-thread scavenge total counters->perThreadTimes = 0.0; // per-thread scavenge times } void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { - assert(!AsyncDeflateIdleMonitors, "sanity check"); assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); bool deflated = false; ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors ObjectMonitor * freeTailp = NULL; elapsedTimer timer; --- 2037,2056 ---- counters->perThreadScavenged = 0; // per-thread scavenge total counters->perThreadTimes = 0.0; // per-thread scavenge times } void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + + if (AsyncDeflateIdleMonitors) { + // Nothing to do when global idle ObjectMonitors are deflated using + // a JavaThread unless a special cleanup has been requested. + if (!is_cleanup_requested()) { + return; + } + } + bool deflated = false; ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors ObjectMonitor * freeTailp = NULL; elapsedTimer timer;
*** 2072,2134 **** // Deflate global idle ObjectMonitors using a JavaThread. // void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() { assert(AsyncDeflateIdleMonitors, "sanity check"); assert(Thread::current()->is_Java_thread(), "precondition"); ! JavaThread * cur_jt = JavaThread::current(); _gOmShouldDeflateIdleMonitors = false; int deflated_count = 0; ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged ObjectMonitors ObjectMonitor * freeTailp = NULL; ObjectMonitor * savedMidInUsep = NULL; elapsedTimer timer; if (log_is_enabled(Info, monitorinflation)) { timer.start(); } Thread::muxAcquire(&gListLock, "deflate_global_idle_monitors_using_JT(1)"); OM_PERFDATA_OP(MonExtant, set_value(gOmInUseCount)); do { ! int local_deflated_count = deflate_monitor_list_using_JT((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp, &savedMidInUsep); gOmInUseCount -= local_deflated_count; deflated_count += local_deflated_count; if (freeHeadp != NULL) { // Move the scavenged ObjectMonitors to the global free list. guarantee(freeTailp != NULL && local_deflated_count > 0, "freeTailp=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(freeTailp), local_deflated_count); assert(freeTailp->FreeNext == NULL, "invariant"); // Constant-time list splice - prepend scavenged segment to gFreeList. freeTailp->FreeNext = gFreeList; gFreeList = freeHeadp; gMonitorFreeCount += local_deflated_count; OM_PERFDATA_OP(Deflations, inc(local_deflated_count)); } if (savedMidInUsep != NULL) { // deflate_monitor_list_using_JT() detected a safepoint starting. Thread::muxRelease(&gListLock); timer.stop(); { log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint."); assert(SafepointSynchronize::is_synchronizing(), "sanity check"); ! ThreadBlockInVM blocker(cur_jt); } // Prepare for another loop after the safepoint. freeHeadp = NULL; freeTailp = NULL; if (log_is_enabled(Info, monitorinflation)) { timer.start(); } ! Thread::muxAcquire(&gListLock, "deflate_global_idle_monitors_using_JT(2)"); } } while (savedMidInUsep != NULL); Thread::muxRelease(&gListLock); timer.stop(); LogStreamHandle(Debug, monitorinflation) lsh_debug; LogStreamHandle(Info, monitorinflation) lsh_info; LogStream * ls = NULL; --- 2104,2211 ---- // Deflate global idle ObjectMonitors using a JavaThread. // void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() { assert(AsyncDeflateIdleMonitors, "sanity check"); assert(Thread::current()->is_Java_thread(), "precondition"); ! JavaThread * self = JavaThread::current(); _gOmShouldDeflateIdleMonitors = false; + deflate_common_idle_monitors_using_JT(true /* is_global */, self); + } + + // Deflate per-thread idle ObjectMonitors using a JavaThread. + // + void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT() { + assert(AsyncDeflateIdleMonitors, "sanity check"); + assert(Thread::current()->is_Java_thread(), "precondition"); + JavaThread * self = JavaThread::current(); + + self->omShouldDeflateIdleMonitors = false; + + deflate_common_idle_monitors_using_JT(false /* !is_global */, self); + } + + // Deflate global or per-thread idle ObjectMonitors using a JavaThread. + // + void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread * self) { int deflated_count = 0; ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged ObjectMonitors ObjectMonitor * freeTailp = NULL; ObjectMonitor * savedMidInUsep = NULL; elapsedTimer timer; if (log_is_enabled(Info, monitorinflation)) { timer.start(); } + + if (is_global) { Thread::muxAcquire(&gListLock, "deflate_global_idle_monitors_using_JT(1)"); OM_PERFDATA_OP(MonExtant, set_value(gOmInUseCount)); + } else { + OM_PERFDATA_OP(MonExtant, inc(self->omInUseCount)); + } do { ! int local_deflated_count; ! if (is_global) { ! local_deflated_count = deflate_monitor_list_using_JT((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp, &savedMidInUsep); gOmInUseCount -= local_deflated_count; + } else { + local_deflated_count = deflate_monitor_list_using_JT(self->omInUseList_addr(), &freeHeadp, &freeTailp, &savedMidInUsep); + self->omInUseCount -= local_deflated_count; + } deflated_count += local_deflated_count; if (freeHeadp != NULL) { // Move the scavenged ObjectMonitors to the global free list. guarantee(freeTailp != NULL && local_deflated_count > 0, "freeTailp=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(freeTailp), local_deflated_count); assert(freeTailp->FreeNext == NULL, "invariant"); + if (!is_global) { + Thread::muxAcquire(&gListLock, "deflate_per_thread_idle_monitors_using_JT(2)"); + } // Constant-time list splice - prepend scavenged segment to gFreeList. freeTailp->FreeNext = gFreeList; gFreeList = freeHeadp; gMonitorFreeCount += local_deflated_count; OM_PERFDATA_OP(Deflations, inc(local_deflated_count)); + if (!is_global) { + Thread::muxRelease(&gListLock); + } } if (savedMidInUsep != NULL) { // deflate_monitor_list_using_JT() detected a safepoint starting. + if (is_global) { Thread::muxRelease(&gListLock); + } timer.stop(); { + if (is_global) { log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint."); + } else { + log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(self)); + } assert(SafepointSynchronize::is_synchronizing(), "sanity check"); ! ThreadBlockInVM blocker(self); } // Prepare for another loop after the safepoint. freeHeadp = NULL; freeTailp = NULL; if (log_is_enabled(Info, monitorinflation)) { timer.start(); } ! if (is_global) { ! Thread::muxAcquire(&gListLock, "deflate_global_idle_monitors_using_JT(3)"); ! } } } while (savedMidInUsep != NULL); + if (is_global) { Thread::muxRelease(&gListLock); + } timer.stop(); LogStreamHandle(Debug, monitorinflation) lsh_debug; LogStreamHandle(Info, monitorinflation) lsh_info; LogStream * ls = NULL;
*** 2136,2218 **** ls = &lsh_debug; } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { ls = &lsh_info; } if (ls != NULL) { ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); } - } - - // Deflate per-thread idle ObjectMonitors using a JavaThread. - // - void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT() { - assert(AsyncDeflateIdleMonitors, "sanity check"); - assert(Thread::current()->is_Java_thread(), "precondition"); - JavaThread * cur_jt = JavaThread::current(); - - cur_jt->omShouldDeflateIdleMonitors = false; - - int deflated_count = 0; - ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged ObjectMonitors - ObjectMonitor * freeTailp = NULL; - ObjectMonitor * savedMidInUsep = NULL; - elapsedTimer timer; - - if (log_is_enabled(Info, monitorinflation)) { - timer.start(); - } - - OM_PERFDATA_OP(MonExtant, inc(cur_jt->omInUseCount)); - do { - int local_deflated_count = deflate_monitor_list_using_JT(cur_jt->omInUseList_addr(), &freeHeadp, &freeTailp, &savedMidInUsep); - cur_jt->omInUseCount -= local_deflated_count; - deflated_count += local_deflated_count; - - if (freeHeadp != NULL) { - // Move the scavenged ObjectMonitors to the global free list. - Thread::muxAcquire(&gListLock, "deflate_per_thread_idle_monitors_using_JT"); - guarantee(freeTailp != NULL && local_deflated_count > 0, "freeTailp=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(freeTailp), local_deflated_count); - assert(freeTailp->FreeNext == NULL, "invariant"); - - // Constant-time list splice - prepend scavenged segment to gFreeList. - freeTailp->FreeNext = gFreeList; - gFreeList = freeHeadp; - - gMonitorFreeCount += local_deflated_count; - OM_PERFDATA_OP(Deflations, inc(local_deflated_count)); - Thread::muxRelease(&gListLock); - // Prepare for another loop on the current JavaThread. - freeHeadp = NULL; - freeTailp = NULL; - } - timer.stop(); - - if (savedMidInUsep != NULL) { - // deflate_monitor_list_using_JT() detected a safepoint starting. - { - log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(cur_jt)); - assert(SafepointSynchronize::is_synchronizing(), "sanity check"); - ThreadBlockInVM blocker(cur_jt); - } - // Prepare for another loop on the current JavaThread after - // the safepoint. - if (log_is_enabled(Info, monitorinflation)) { - timer.start(); - } - } - } while (savedMidInUsep != NULL); - - LogStreamHandle(Debug, monitorinflation) lsh_debug; - LogStreamHandle(Info, monitorinflation) lsh_info; - LogStream * ls = NULL; - if (log_is_enabled(Debug, monitorinflation)) { - ls = &lsh_debug; - } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { - ls = &lsh_info; - } - if (ls != NULL) { - ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(cur_jt), timer.seconds(), deflated_count); } } void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { // Report the cumulative time for deflating each thread's idle --- 2213,2227 ---- ls = &lsh_debug; } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { ls = &lsh_info; } if (ls != NULL) { + if (is_global) { ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); + } else { + ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(self), timer.seconds(), deflated_count); } } } void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { // Report the cumulative time for deflating each thread's idle
*** 2256,2268 **** void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); if (AsyncDeflateIdleMonitors) { - // Nothing to do when idle ObjectMonitors are deflated using a - // JavaThread unless a special cleanup has been requested. if (!is_cleanup_requested()) { return; } } ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors --- 2265,2281 ---- void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); if (AsyncDeflateIdleMonitors) { if (!is_cleanup_requested()) { + // Mark the JavaThread for idle monitor cleanup if a special + // cleanup has NOT been requested. + if (thread->omInUseCount > 0) { + // This JavaThread is using monitors so mark it. + thread->omShouldDeflateIdleMonitors = true; + } return; } } ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors
*** 2489,2505 **** if (jt != NULL) { out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT ": free per-thread monitor must have NULL _header " "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n), p2i(n->header())); ! } else { out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " "must have NULL _header field: _header=" INTPTR_FORMAT, p2i(n), p2i(n->header())); - } *error_cnt_p = *error_cnt_p + 1; } if (n->object() != NULL) { if (jt != NULL) { out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT ": free per-thread monitor must have NULL _object " "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n), --- 2502,2519 ---- if (jt != NULL) { out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT ": free per-thread monitor must have NULL _header " "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n), p2i(n->header())); ! *error_cnt_p = *error_cnt_p + 1; ! } else if (!AsyncDeflateIdleMonitors) { out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " "must have NULL _header field: _header=" INTPTR_FORMAT, p2i(n), p2i(n->header())); *error_cnt_p = *error_cnt_p + 1; } + } if (n->object() != NULL) { if (jt != NULL) { out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT ": free per-thread monitor must have NULL _object " "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n),
< prev index next >