--- old/src/hotspot/share/runtime/synchronizer.cpp 2019-10-17 17:31:46.000000000 -0400 +++ new/src/hotspot/share/runtime/synchronizer.cpp 2019-10-17 17:31:46.000000000 -0400 @@ -37,6 +37,7 @@ #include "runtime/atomic.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/handles.inline.hpp" +#include "runtime/handshake.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/objectMonitor.hpp" @@ -128,10 +129,17 @@ // Global ObjectMonitor in-use list. When a JavaThread is exiting, // ObjectMonitors on its per-thread in-use list are prepended here. static ObjectMonitor* volatile g_om_in_use_list = NULL; +// Global ObjectMonitor wait list. If HandshakeAfterDeflateIdleMonitors +// is true, deflated ObjectMonitors wait on this list until after a +// handshake or a safepoint for platforms that don't support handshakes. +// After the handshake or safepoint, the deflated ObjectMonitors are +// prepended to g_free_list. +static ObjectMonitor* volatile g_wait_list = NULL; static volatile int g_om_free_count = 0; // # on g_free_list static volatile int g_om_in_use_count = 0; // # on g_om_in_use_list static volatile int g_om_population = 0; // # Extant -- in circulation +static volatile int g_om_wait_count = 0; // # on g_wait_list #define CHAINMARKER (cast_to_oop(-1)) @@ -212,86 +220,12 @@ return (ObjectMonitor*)((intptr_t)OrderAccess::load_acquire(&om->_next_om) & ~0x1); } -#if 0 -// XXX - this is unused -// Unmark the next field in an ObjectMonitor. Requires that the next -// field be marked. -static void unmark_next(ObjectMonitor* om) { - ADIM_guarantee(is_next_marked(om), "next field must be marked: next=" INTPTR_FORMAT, p2i(om->_next_om)); - - ObjectMonitor* next = unmarked_next(om); - set_next(om, next); -} -#endif - -volatile int visit_counter = 42; -static void chk_for_list_loop(ObjectMonitor* list, int count) { - if (!CheckMonitorLists) { - return; - } - int l_visit_counter = Atomic::add(1, &visit_counter); - int l_count = 0; - ObjectMonitor* prev = NULL; - for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) { - if (mid->visit_marker == l_visit_counter) { - log_error(monitorinflation)("ERROR: prev=" INTPTR_FORMAT ", l_count=%d" - " refers to an ObjectMonitor that has" - " already been visited: mid=" INTPTR_FORMAT, - p2i(prev), l_count, p2i(mid)); - fatal("list=" INTPTR_FORMAT " of %d items has a loop.", p2i(list), count); - } - mid->visit_marker = l_visit_counter; - prev = mid; - if (++l_count > count + 1024 * 1024) { - fatal("list=" INTPTR_FORMAT " of %d items may have a loop; l_count=%d", - p2i(list), count, l_count); - } - } -} - -static void chk_om_not_on_list(ObjectMonitor* om, ObjectMonitor* list, int count) { - if (!CheckMonitorLists) { - return; - } - guarantee(list != om, "ERROR: om=" INTPTR_FORMAT " must not be head of the " - "list=" INTPTR_FORMAT ", count=%d", p2i(om), p2i(list), count); - int l_count = 0; - for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) { - if (unmarked_next(mid) == om) { - log_error(monitorinflation)("ERROR: mid=" INTPTR_FORMAT ", l_count=%d" - " next_om refers to om=" INTPTR_FORMAT, - p2i(mid), l_count, p2i(om)); - fatal("list=" INTPTR_FORMAT " of %d items has bad next_om value.", - p2i(list), count); - } - if (++l_count > count + 1024 * 1024) { - fatal("list=" INTPTR_FORMAT " of %d items may have a loop; l_count=%d", - p2i(list), count, l_count); - } - } -} - -static void chk_om_elems_not_on_list(ObjectMonitor* elems, int elems_count, - ObjectMonitor* list, int list_count) { - if (!CheckMonitorLists) { - return; - } - chk_for_list_loop(elems, elems_count); - for (ObjectMonitor* mid = elems; mid != NULL; mid = unmarked_next(mid)) { - chk_om_not_on_list(mid, list, list_count); - } -} - // Prepend a list of ObjectMonitors to the specified *list_p. 'tail' is // the last ObjectMonitor in the list and there are 'count' on the list. // Also updates the specified *count_p. static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail, int count, ObjectMonitor* volatile* list_p, volatile int* count_p) { - chk_for_list_loop(OrderAccess::load_acquire(list_p), - OrderAccess::load_acquire(count_p)); - chk_om_elems_not_on_list(list, count, OrderAccess::load_acquire(list_p), - OrderAccess::load_acquire(count_p)); while (true) { ObjectMonitor* cur = OrderAccess::load_acquire(list_p); // Prepend list to *list_p. @@ -332,10 +266,10 @@ void ObjectSynchronizer::prepend_block_to_lists(PaddedObjectMonitor* new_blk) { // First we handle g_block_list: while (true) { - PaddedObjectMonitor* cur = OrderAccess::load_acquire(&g_block_list); + PaddedObjectMonitor* cur = g_block_list; // Prepend new_blk to g_block_list. The first ObjectMonitor in // a block is reserved for use as linkage to the next block. - OrderAccess::release_store(&new_blk[0]._next_om, cur); + new_blk[0]._next_om = cur; if (Atomic::cmpxchg(new_blk, &g_block_list, cur) == cur) { // Successfully switched g_block_list to the new_blk value. Atomic::add(_BLOCKSIZE - 1, &g_om_population); @@ -357,6 +291,15 @@ prepend_list_to_common(list, tail, count, &g_free_list, &g_om_free_count); } +// Prepend a list of ObjectMonitors to g_wait_list. 'tail' is the last +// ObjectMonitor in the list and there are 'count' on the list. Also +// updates g_om_wait_count. +static void prepend_list_to_g_wait_list(ObjectMonitor* list, + ObjectMonitor* tail, int count) { + assert(HandshakeAfterDeflateIdleMonitors, "sanity check"); + prepend_list_to_common(list, tail, count, &g_wait_list, &g_om_wait_count); +} + // Prepend a list of ObjectMonitors to g_om_in_use_list. 'tail' is the last // ObjectMonitor in the list and there are 'count' on the list. Also // updates g_om_in_use_list. @@ -369,43 +312,28 @@ // the specified counter. static void prepend_to_common(ObjectMonitor* m, ObjectMonitor* volatile * list_p, int volatile * count_p) { - chk_for_list_loop(OrderAccess::load_acquire(list_p), - OrderAccess::load_acquire(count_p)); - chk_om_not_on_list(m, OrderAccess::load_acquire(list_p), - OrderAccess::load_acquire(count_p)); - while (true) { - ObjectMonitor* cur = OrderAccess::load_acquire(list_p); - // Prepend ObjectMonitor to *list_p. + (void)mark_next_loop(m); // mark m so we can safely update its next field + ObjectMonitor* cur = NULL; ObjectMonitor* next = NULL; - if (!mark_next(m, &next)) { - continue; // failed to mark next field so try it all again + // Mark the list head to guard against A-B-A race: + if (mark_list_head(list_p, &cur, &next)) { + // List head is now marked so we can safely switch it. + set_next(m, cur); // m now points to cur (and unmarks m) + OrderAccess::release_store(list_p, m); // Switch list head to unmarked m. + set_next(cur, next); // Unmark the previous list head. + break; } - set_next(m, cur); // m now points to cur (and unmarks m) - if (cur == NULL) { - // No potential race with other prependers since *list_p is empty. - if (Atomic::cmpxchg(m, list_p, cur) == cur) { - // Successfully switched *list_p to 'm'. - Atomic::inc(count_p); - break; - } - // Implied else: try it all again - } else { - // Try to mark next field to guard against races: - if (!mark_next(cur, &next)) { - continue; // failed to mark next field so try it all again - } - // We marked the next field so try to switch *list_p to 'm'. - if (Atomic::cmpxchg(m, list_p, cur) != cur) { - // The list head has changed so unmark the next field and try again: - set_next(cur, next); - continue; - } - Atomic::inc(count_p); - set_next(cur, next); // unmark next field + // The list is empty so try to set the list head. + assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur)); + set_next(m, cur); // m now points to NULL (and unmarks m) + if (Atomic::cmpxchg(m, list_p, cur) == cur) { + // List head is now unmarked m. break; } + // Implied else: try it all again } + Atomic::inc(count_p); } // Prepend an ObjectMonitor to a per-thread om_free_list. @@ -424,9 +352,6 @@ // decrements the specified counter. Returns NULL if none are available. static ObjectMonitor* take_from_start_of_common(ObjectMonitor* volatile * list_p, int volatile * count_p) { - chk_for_list_loop(OrderAccess::load_acquire(list_p), - OrderAccess::load_acquire(count_p)); - ObjectMonitor* next = NULL; ObjectMonitor* take = NULL; // Mark the list head to guard against A-B-A race: @@ -572,13 +497,13 @@ // and last are the inflated Java Monitor (ObjectMonitor) checks. lock->set_displaced_header(markWord::unused_mark()); - if (owner == NULL && Atomic::replace_if_null(self, &(m->_owner))) { + if (owner == NULL && m->try_set_owner_from(self, NULL) == NULL) { assert(m->_recursions == 0, "invariant"); return true; } if (AsyncDeflateIdleMonitors && - Atomic::cmpxchg(self, &m->_owner, DEFLATER_MARKER) == DEFLATER_MARKER) { + m->try_set_owner_from(self, DEFLATER_MARKER) == DEFLATER_MARKER) { // The deflation protocol finished the first part (setting owner), // but it failed the second part (making ref_count negative) and // bailed. Or the ObjectMonitor was async deflated and reused. @@ -1319,6 +1244,9 @@ if (MonitorUsedDeflationThreshold > 0) { int monitors_used = OrderAccess::load_acquire(&g_om_population) - OrderAccess::load_acquire(&g_om_free_count); + if (HandshakeAfterDeflateIdleMonitors) { + monitors_used -= OrderAccess::load_acquire(&g_om_wait_count); + } int monitor_usage = (monitors_used * 100LL) / OrderAccess::load_acquire(&g_om_population); return monitor_usage > MonitorUsedDeflationThreshold; @@ -1351,8 +1279,12 @@ _last_async_deflation_time_ns = os::javaTimeNanos(); return true; } - if (is_MonitorBound_exceeded(OrderAccess::load_acquire(&g_om_population) - - OrderAccess::load_acquire(&g_om_free_count))) { + int monitors_used = OrderAccess::load_acquire(&g_om_population) - + OrderAccess::load_acquire(&g_om_free_count); + if (HandshakeAfterDeflateIdleMonitors) { + monitors_used -= OrderAccess::load_acquire(&g_om_wait_count); + } + if (is_MonitorBound_exceeded(monitors_used)) { // Not enough ObjectMonitors on the global free list. return true; } @@ -1397,7 +1329,6 @@ void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, int count, OopClosure* f) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); - chk_for_list_loop(list, count); // The oops_do() phase does not overlap with monitor deflation // so no need to update the ObjectMonitor's ref_count for this // ObjectMonitor* use. @@ -1539,6 +1470,9 @@ } } take->Recycle(); + // Since we're taking from the global free-list, take must be Free. + // om_release() also sets the allocation state to Free because it + // is called from other code paths. assert(take->is_free(), "invariant"); om_release(self, take, false); } @@ -1638,24 +1572,15 @@ while (true) { if (m == mid) { // We found 'm' on the per-thread in-use list so try to extract it. - // First try the list head: - if (Atomic::cmpxchg(next, &self->om_in_use_list, mid) != mid) { - // We could not switch the list head to next. - ObjectMonitor* marked_mid = mark_om_ptr(mid); - // Switch cur_mid_in_use's next field to next (which also - // unmarks cur_mid_in_use): - ADIM_guarantee(cur_mid_in_use != NULL, "must not be NULL"); - if (Atomic::cmpxchg(next, &cur_mid_in_use->_next_om, marked_mid) - != marked_mid) { - // We could not switch cur_mid_in_use's next field. This - // should not be possible since it was marked so we: - fatal("mid=" INTPTR_FORMAT " must be referred to by the list " - "head: &om_in_use_list=" INTPTR_FORMAT " or by " - "cur_mid_in_use's next field: cur_mid_in_use=" INTPTR_FORMAT - ", next_om=" INTPTR_FORMAT, p2i(mid), - p2i((ObjectMonitor**)&self->om_in_use_list), - p2i(cur_mid_in_use), p2i(cur_mid_in_use->_next_om)); - } + if (cur_mid_in_use == NULL) { + // mid is the list head and it is marked. Switch the list head + // to next which unmarks the list head, but leaves mid marked: + OrderAccess::release_store(&self->om_in_use_list, next); + } else { + // mid and cur_mid_in_use are marked. Switch cur_mid_in_use's + // next field to next which unmarks cur_mid_in_use, but leaves + // mid marked: + OrderAccess::release_store(&cur_mid_in_use->_next_om, next); } extracted = true; Atomic::dec(&self->om_in_use_count); @@ -1720,7 +1645,6 @@ // is exiting, but if it has made it past that check before we // started exiting, then it is racing to get to the in-use list. if (mark_list_head(&self->om_in_use_list, &in_use_list, &next)) { - chk_for_list_loop(in_use_list, OrderAccess::load_acquire(&self->om_in_use_count)); // At this point, we have marked the in-use list head so an // async deflation thread cannot come in after us. If an async // deflation thread is ahead of us, then we'll detect that and @@ -1776,7 +1700,6 @@ ObjectMonitor* free_list = OrderAccess::load_acquire(&self->om_free_list); ObjectMonitor* free_tail = NULL; if (free_list != NULL) { - chk_for_list_loop(free_list, OrderAccess::load_acquire(&self->om_free_count)); // The thread is going away. Set 'free_tail' to the last per-thread free // monitor which will be linked to g_free_list below. stringStream ss; @@ -1929,6 +1852,7 @@ markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark); if (cmp != mark) { + // om_release() will reset the allocation state from New to Free. om_release(self, m, true); continue; // Interference -- just retry } @@ -1976,19 +1900,26 @@ // Note that a thread can inflate an object // that it has stack-locked -- as might happen in wait() -- directly // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. - m->set_owner(mark.locker()); + if (AsyncDeflateIdleMonitors) { + m->set_owner_from(mark.locker(), NULL, DEFLATER_MARKER); + } else { + m->set_owner_from(mark.locker(), NULL); + } m->set_object(object); // TODO-FIXME: assert BasicLock->dhw != 0. omh_p->set_om_ptr(m); - assert(m->is_new(), "freshly allocated monitor must be new"); - m->set_allocation_state(ObjectMonitor::Old); // Must preserve store ordering. The monitor state must // be stable at the time of publishing the monitor address. guarantee(object->mark() == markWord::INFLATING(), "invariant"); object->release_set_mark(markWord::encode(m)); + // Once ObjectMonitor is configured and the object is associated + // with the ObjectMonitor, it is safe to allow async deflation: + assert(m->is_new(), "freshly allocated monitor must be new"); + m->set_allocation_state(ObjectMonitor::Old); + // Hopefully the performance counters are allocated on distinct cache lines // to avoid false sharing on MP systems ... OM_PERFDATA_OP(Inflations, inc()); @@ -2029,19 +1960,13 @@ m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class omh_p->set_om_ptr(m); - assert(m->is_new(), "freshly allocated monitor must be new"); - m->set_allocation_state(ObjectMonitor::Old); if (object->cas_set_mark(markWord::encode(m), mark) != mark) { - guarantee(!m->owner_is_DEFLATER_MARKER() || m->ref_count() >= 0, - "race between deflation and om_release() with m=" INTPTR_FORMAT - ", _owner=" INTPTR_FORMAT ", ref_count=%d", p2i(m), - p2i(m->_owner), m->ref_count()); m->set_header(markWord::zero()); m->set_object(NULL); m->Recycle(); omh_p->set_om_ptr(NULL); - // om_release() will reset the allocation state + // om_release() will reset the allocation state from New to Free. om_release(self, m, true); m = NULL; continue; @@ -2050,6 +1975,11 @@ // live-lock -- "Inflated" is an absorbing state. } + // Once the ObjectMonitor is configured and object is associated + // with the ObjectMonitor, it is safe to allow async deflation: + assert(m->is_new(), "freshly allocated monitor must be new"); + m->set_allocation_state(ObjectMonitor::Old); + // Hopefully the performance counters are allocated on distinct // cache lines to avoid false sharing on MP systems ... OM_PERFDATA_OP(Inflations, inc()); @@ -2153,8 +2083,9 @@ if (AsyncDeflateIdleMonitors) { // clear() expects the owner field to be NULL and we won't race // with the simple C2 ObjectMonitor enter optimization since - // we're at a safepoint. - mid->set_owner(NULL); + // we're at a safepoint. DEFLATER_MARKER is the only non-NULL + // value we should see here. + mid->try_set_owner_from(NULL, DEFLATER_MARKER); } mid->clear(); @@ -2219,7 +2150,7 @@ return false; } - if (Atomic::replace_if_null(DEFLATER_MARKER, &(mid->_owner))) { + if (mid->try_set_owner_from(DEFLATER_MARKER, NULL) == NULL) { // ObjectMonitor is not owned by another thread. Our setting // owner to DEFLATER_MARKER forces any contending thread through // the slow path. This is just the first part of the async @@ -2230,7 +2161,7 @@ // mid->is_busy() above or has already entered and waited on // it which makes it busy so no deflation. Restore owner to // NULL if it is still DEFLATER_MARKER. - Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER); + mid->try_set_owner_from(NULL, DEFLATER_MARKER); return false; } @@ -2322,7 +2253,7 @@ // The ref_count was no longer 0 so we lost the race since the // ObjectMonitor is now busy or the ObjectMonitor* is now is use. // Restore owner to NULL if it is still DEFLATER_MARKER: - Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER); + mid->try_set_owner_from(NULL, DEFLATER_MARKER); } // The owner field is no longer NULL so we lost the race since the @@ -2363,28 +2294,20 @@ // Deflation succeeded and already updated free_head_p and // free_tail_p as needed. Finish the move to the local free list // by unlinking mid from the global or per-thread in-use list. - if (Atomic::cmpxchg(next, list_p, mid) != mid) { - // We could not switch the list head to next. - ADIM_guarantee(cur_mid_in_use != NULL, "must not be NULL"); - if (Atomic::cmpxchg(next, &cur_mid_in_use->_next_om, mid) != mid) { - // deflate_monitor_list() is called at a safepoint so the - // global or per-thread in-use list should not be modified - // in parallel so we: - fatal("mid=" INTPTR_FORMAT " must be referred to by the list head: " - "list_p=" INTPTR_FORMAT " or by cur_mid_in_use's next field: " - "cur_mid_in_use=" INTPTR_FORMAT ", next_om=" INTPTR_FORMAT, - p2i(mid), p2i((ObjectMonitor**)list_p), p2i(cur_mid_in_use), - p2i(cur_mid_in_use->_next_om)); - } + if (cur_mid_in_use == NULL) { + // mid is the list head and it is marked. Switch the list head + // to next which unmarks the list head, but leaves mid marked: + OrderAccess::release_store(list_p, next); + } else { + // mid is marked. Switch cur_mid_in_use's next field to next + // which is safe because we have no parallel list deletions, + // but we leave mid marked: + OrderAccess::release_store(&cur_mid_in_use->_next_om, next); } // At this point mid is disconnected from the in-use list so // its marked next field no longer has any effects. deflated_count++; Atomic::dec(count_p); - chk_for_list_loop(OrderAccess::load_acquire(list_p), - OrderAccess::load_acquire(count_p)); - chk_om_not_on_list(mid, OrderAccess::load_acquire(list_p), - OrderAccess::load_acquire(count_p)); // mid is current tail in the free_head_p list so NULL terminate it // (which also unmarks it): set_next(mid, NULL); @@ -2458,7 +2381,7 @@ // a cur_mid_in_use, then its next field is also marked at this point. if (next != NULL) { - // We mark the next -> next field so that an om_flush() + // We mark next's next field so that an om_flush() // thread that is behind us cannot pass us when we // unmark the current mid's next field. next_next = mark_next_loop(next); @@ -2471,31 +2394,21 @@ // Deflation succeeded and already updated free_head_p and // free_tail_p as needed. Finish the move to the local free list // by unlinking mid from the global or per-thread in-use list. - if (Atomic::cmpxchg(next, list_p, mid) != mid) { - // We could not switch the list head to next. - ObjectMonitor* marked_mid = mark_om_ptr(mid); + if (cur_mid_in_use == NULL) { + // mid is the list head and it is marked. Switch the list head + // to next which is also marked (if not NULL) and also leave + // mid marked: + OrderAccess::release_store(list_p, next); + } else { ObjectMonitor* marked_next = mark_om_ptr(next); - // Switch cur_mid_in_use's next field to marked next: - ADIM_guarantee(cur_mid_in_use != NULL, "must not be NULL"); - if (Atomic::cmpxchg(marked_next, &cur_mid_in_use->_next_om, - marked_mid) != marked_mid) { - // We could not switch cur_mid_in_use's next field. This - // should not be possible since it was marked so we: - fatal("mid=" INTPTR_FORMAT " must be referred to by the list head: " - "&list_p=" INTPTR_FORMAT " or by cur_mid_in_use's next field: " - "cur_mid_in_use=" INTPTR_FORMAT ", next_om=" INTPTR_FORMAT, - p2i(mid), p2i((ObjectMonitor**)list_p), p2i(cur_mid_in_use), - p2i(cur_mid_in_use->_next_om)); - } + // mid and cur_mid_in_use are marked. Switch cur_mid_in_use's + // next field to marked_next and also leave mid marked: + OrderAccess::release_store(&cur_mid_in_use->_next_om, marked_next); } // At this point mid is disconnected from the in-use list so // its marked next field no longer has any effects. deflated_count++; Atomic::dec(count_p); - chk_for_list_loop(OrderAccess::load_acquire(list_p), - OrderAccess::load_acquire(count_p)); - chk_om_not_on_list(mid, OrderAccess::load_acquire(list_p), - OrderAccess::load_acquire(count_p)); // mid is current tail in the free_head_p list so NULL terminate it // (which also unmarks it): set_next(mid, NULL); @@ -2620,6 +2533,67 @@ } } +class HandshakeForDeflation : public ThreadClosure { + public: + void do_thread(Thread* thread) { + log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread=" + INTPTR_FORMAT, p2i(thread)); + } +}; + +void ObjectSynchronizer::deflate_idle_monitors_using_JT() { + assert(AsyncDeflateIdleMonitors, "sanity check"); + + // Deflate any global idle monitors. + deflate_global_idle_monitors_using_JT(); + + int count = 0; + for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { + if (jt->om_in_use_count > 0 && !jt->is_exiting()) { + // This JavaThread is using ObjectMonitors so deflate any that + // are idle unless this JavaThread is exiting; do not race with + // ObjectSynchronizer::om_flush(). + deflate_per_thread_idle_monitors_using_JT(jt); + count++; + } + } + if (count > 0) { + log_debug(monitorinflation)("did async deflation of idle monitors for %d thread(s).", count); + } + // The ServiceThread's async deflation request has been processed. + set_is_async_deflation_requested(false); + + if (HandshakeAfterDeflateIdleMonitors && g_om_wait_count > 0) { + // There are deflated ObjectMonitors waiting for a handshake + // (or a safepoint) for safety. + + // g_wait_list and g_om_wait_count are only updated by the calling + // thread so no need for load_acquire() or release_store(). + ObjectMonitor* list = g_wait_list; + ADIM_guarantee(list != NULL, "g_wait_list must not be NULL"); + int count = g_om_wait_count; + g_wait_list = NULL; + g_om_wait_count = 0; + + // Find the tail for prepend_list_to_common(). + int l_count = 0; + ObjectMonitor* tail = NULL; + for (ObjectMonitor* n = list; n != NULL; n = unmarked_next(n)) { + tail = n; + l_count++; + } + ADIM_guarantee(count == l_count, "count=%d != l_count=%d", count, l_count); + + // Will execute a safepoint if !ThreadLocalHandshakes: + HandshakeForDeflation hfd_tc; + Handshake::execute(&hfd_tc); + + prepend_list_to_common(list, tail, count, &g_free_list, &g_om_free_count); + + log_info(monitorinflation)("moved %d idle monitors from global waiting list to global free list", count); + } +} + // Deflate global idle ObjectMonitors using a JavaThread. // void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() { @@ -2684,7 +2658,11 @@ assert(unmarked_next(free_tail_p) == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(unmarked_next(free_tail_p))); - prepend_list_to_g_free_list(free_head_p, free_tail_p, local_deflated_count); + if (HandshakeAfterDeflateIdleMonitors) { + prepend_list_to_g_wait_list(free_head_p, free_tail_p, local_deflated_count); + } else { + prepend_list_to_g_free_list(free_head_p, free_tail_p, local_deflated_count); + } OM_PERFDATA_OP(Deflations, inc(local_deflated_count)); } @@ -2754,10 +2732,11 @@ ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); } else if (log_is_enabled(Info, monitorinflation)) { log_info(monitorinflation)("g_om_population=%d, g_om_in_use_count=%d, " - "g_om_free_count=%d", + "g_om_free_count=%d, g_om_wait_count=%d", OrderAccess::load_acquire(&g_om_population), OrderAccess::load_acquire(&g_om_in_use_count), - OrderAccess::load_acquire(&g_om_free_count)); + OrderAccess::load_acquire(&g_om_free_count), + OrderAccess::load_acquire(&g_om_wait_count)); } ForceMonitorScavenge = 0; // Reset @@ -2924,11 +2903,14 @@ OrderAccess::load_acquire(&g_om_population), chk_om_population); } else { - ls->print_cr("ERROR: g_om_population=%d is not equal to " + // With lock free access to the monitor lists, it is possible for + // log_monitor_list_counts() to return a value that doesn't match + // g_om_population. So far a higher value has been seen in testing + // so something is being double counted by log_monitor_list_counts(). + ls->print_cr("WARNING: g_om_population=%d is not equal to " "chk_om_population=%d", OrderAccess::load_acquire(&g_om_population), chk_om_population); - error_cnt++; } // Check g_om_in_use_list and g_om_in_use_count: @@ -2937,6 +2919,11 @@ // Check g_free_list and g_om_free_count: chk_global_free_list_and_count(ls, &error_cnt); + if (HandshakeAfterDeflateIdleMonitors) { + // Check g_wait_list and g_om_wait_count: + chk_global_wait_list_and_count(ls, &error_cnt); + } + ls->print_cr("Checking per-thread lists:"); for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { @@ -3034,6 +3021,28 @@ } } +// Check the global wait list and count; log the results of the checks. +void ObjectSynchronizer::chk_global_wait_list_and_count(outputStream * out, + int *error_cnt_p) { + int chk_om_wait_count = 0; + for (ObjectMonitor* n = OrderAccess::load_acquire(&g_wait_list); n != NULL; n = unmarked_next(n)) { + // Rules for g_wait_list are the same as of g_free_list: + chk_free_entry(NULL /* jt */, n, out, error_cnt_p); + chk_om_wait_count++; + } + if (OrderAccess::load_acquire(&g_om_wait_count) == chk_om_wait_count) { + out->print_cr("g_om_wait_count=%d equals chk_om_wait_count=%d", + OrderAccess::load_acquire(&g_om_wait_count), + chk_om_wait_count); + } else { + out->print_cr("ERROR: g_om_wait_count=%d is not equal to " + "chk_om_wait_count=%d", + OrderAccess::load_acquire(&g_om_wait_count), + chk_om_wait_count); + *error_cnt_p = *error_cnt_p + 1; + } +} + // Check the global in-use list and count; log the results of the checks. void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out, int *error_cnt_p) { @@ -3047,10 +3056,12 @@ OrderAccess::load_acquire(&g_om_in_use_count), chk_om_in_use_count); } else { - out->print_cr("ERROR: g_om_in_use_count=%d is not equal to chk_om_in_use_count=%d", + // With lock free access to the monitor lists, it is possible for + // an exiting JavaThread to put its in-use ObjectMonitors on the + // global in-use list after chk_om_in_use_count is calculated above. + out->print_cr("WARNING: g_om_in_use_count=%d is not equal to chk_om_in_use_count=%d", OrderAccess::load_acquire(&g_om_in_use_count), chk_om_in_use_count); - *error_cnt_p = *error_cnt_p + 1; } } @@ -3215,15 +3226,19 @@ // the population count. int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) { int pop_count = 0; - out->print_cr("%18s %10s %10s %10s", - "Global Lists:", "InUse", "Free", "Total"); - out->print_cr("================== ========== ========== =========="); - out->print_cr("%18s %10d %10d %10d", "", + out->print_cr("%18s %10s %10s %10s %10s", + "Global Lists:", "InUse", "Free", "Wait", "Total"); + out->print_cr("================== ========== ========== ========== =========="); + out->print_cr("%18s %10d %10d %10d %10d", "", OrderAccess::load_acquire(&g_om_in_use_count), OrderAccess::load_acquire(&g_om_free_count), + OrderAccess::load_acquire(&g_om_wait_count), OrderAccess::load_acquire(&g_om_population)); pop_count += OrderAccess::load_acquire(&g_om_in_use_count) + OrderAccess::load_acquire(&g_om_free_count); + if (HandshakeAfterDeflateIdleMonitors) { + pop_count += OrderAccess::load_acquire(&g_om_wait_count); + } out->print_cr("%18s %10s %10s %10s", "Per-Thread Lists:", "InUse", "Free", "Provision");