--- old/src/hotspot/share/runtime/synchronizer.cpp 2020-01-27 15:15:53.000000000 -0500 +++ new/src/hotspot/share/runtime/synchronizer.cpp 2020-01-27 15:15:53.000000000 -0500 @@ -163,16 +163,21 @@ } // Mark an ObjectMonitor* with OM_LOCK_BIT and return it. -// Note: the om parameter may or may not have been marked originally. static ObjectMonitor* mark_om_ptr(ObjectMonitor* om) { return (ObjectMonitor*)((intptr_t)om | OM_LOCK_BIT); } +// Return the unmarked next field in an ObjectMonitor. Note: the next +// field may or may not have been marked with OM_LOCK_BIT originally. +static ObjectMonitor* unmarked_next(ObjectMonitor* om) { + return (ObjectMonitor*)((intptr_t)Atomic::load(&om->_next_om) & ~OM_LOCK_BIT); +} + // Try to lock an ObjectMonitor. Returns true if locking was successful. // Otherwise returns false. static bool try_om_lock(ObjectMonitor* om) { // Get current next field without any OM_LOCK_BIT value. - ObjectMonitor* next = (ObjectMonitor*)((intptr_t)Atomic::load(&om->_next_om) & ~OM_LOCK_BIT); + ObjectMonitor* next = unmarked_next(om); if (Atomic::cmpxchg(&om->_next_om, next, mark_om_ptr(next)) != next) { return false; // Cannot lock the ObjectMonitor. } @@ -208,7 +213,7 @@ } if (try_om_lock(mid)) { if (Atomic::load(list_p) != mid) { - // The list head changed so we have to retry. + // The list head changed before we could lock it so we have to retry. om_unlock(mid); continue; } @@ -217,12 +222,6 @@ } } -// Return the unmarked next field in an ObjectMonitor. Note: the next -// field may or may not have been marked with OM_LOCK_BIT originally. -static ObjectMonitor* unmarked_next(ObjectMonitor* om) { - return (ObjectMonitor*)((intptr_t)Atomic::load(&om->_next_om) & ~OM_LOCK_BIT); -} - #undef OM_LOCK_BIT @@ -1152,18 +1151,19 @@ } } // unmarked_next() is not needed with g_block_list (no locking - // used with with block linkage _next_om fields). + // used with block linkage _next_om fields). block = (PaddedObjectMonitor*)Atomic::load(&block->_next_om); } } static bool monitors_used_above_threshold() { - if (Atomic::load(&LVars.population) == 0) { + int population = Atomic::load(&LVars.population); + if (population == 0) { return false; } if (MonitorUsedDeflationThreshold > 0) { - int monitors_used = Atomic::load(&LVars.population) - Atomic::load(&LVars.free_count); - int monitor_usage = (monitors_used * 100LL) / Atomic::load(&LVars.population); + int monitors_used = population - Atomic::load(&LVars.free_count); + int monitor_usage = (monitors_used * 100LL) / population; return monitor_usage > MonitorUsedDeflationThreshold; } return false; @@ -1278,6 +1278,7 @@ // scavenge costs. As usual, we lean toward time in space-time // tradeoffs. const int MAXPRIVATE = 1024; + NoSafepointVerifier nsv; stringStream ss; for (;;) { @@ -1385,6 +1386,8 @@ bool from_per_thread_alloc) { guarantee(m->header().value() == 0, "invariant"); guarantee(m->object() == NULL, "invariant"); + NoSafepointVerifier nsv; + stringStream ss; guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: " "%s, recursions=" INTX_FORMAT, m->is_busy_to_string(&ss), @@ -1393,55 +1396,74 @@ // we have to remove 'm' from the in-use list first (as needed). if (from_per_thread_alloc) { // Need to remove 'm' from om_in_use_list. - ObjectMonitor* cur_mid_in_use = NULL; ObjectMonitor* mid = NULL; ObjectMonitor* next = NULL; - bool extracted = false; - // We use the simpler lock-mid-as-we-go protocol to prevent races - // with a list walker thread since there are no parallel list - // deletions (deflations happen at a safepoint). + // This list walk can only race with another list walker since + // deflation can only happen at a safepoint so we don't have to + // worry about an ObjectMonitor being removed from this list + // while we are walking it. + + // Lock the list head to avoid racing with another list walker. if ((mid = get_list_head_locked(&self->om_in_use_list)) == NULL) { fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self)); } next = unmarked_next(mid); - while (true) { - if (m == mid) { - // We found 'm' on the per-thread in-use list so try to extract it. - if (cur_mid_in_use == NULL) { - // mid is the list head and it is locked. Switch the list head - // to next which unlocks the list head, but leaves mid locked: - Atomic::store(&self->om_in_use_list, next); - } else { - // mid is locked. Switch cur_mid_in_use's next field to next - // which is safe because we have no parallel list deletions, - // but we leave mid locked: - set_next(cur_mid_in_use, next); - } - // At this point mid is disconnected from the in-use list so - // its lock no longer has any effects on the in-use list. - extracted = true; - Atomic::dec(&self->om_in_use_count); - // Unlock mid, but leave the next value for any lagging list - // walkers. It will get cleaned up when mid is prepended to - // the thread's free list: - om_unlock(mid); - break; - } else { - om_unlock(mid); - cur_mid_in_use = mid; - } - // All the list management is done so move on to the next one: + if (m == mid) { + // First special case: + // 'm' matches mid, is the list head and is locked. Switch the list + // head to next which unlocks the list head, but leaves the extracted + // mid locked: + Atomic::store(&self->om_in_use_list, next); + } else if (m == next) { + // Second special case: + // 'm' matches next after the list head and we already have the list + // head locked so set mid to what we are extracting: mid = next; - if (mid == NULL) { - // Reached end of the list and didn't find m so: - fatal("must find m=" INTPTR_FORMAT "on om_in_use_list=" INTPTR_FORMAT, - p2i(m), p2i(self->om_in_use_list)); - } - // Lock mid so we can possibly extract it: + // Lock mid to prevent races with a list walker: om_lock(mid); + // Update next to what follows mid (if anything): next = unmarked_next(mid); + // Switch next after the list head to new next which unlocks the + // list head, but leaves the extracted mid locked: + set_next(self->om_in_use_list, next); + } else { + // We have to search the list to find 'm'. + om_unlock(mid); // unlock the list head + guarantee(next != NULL, "thread=" INTPTR_FORMAT ": om_in_use_list=" INTPTR_FORMAT + " is too short.", p2i(self), p2i(self->om_in_use_list)); + // Our starting anchor is next after the list head which is the + // last ObjectMonitor we checked: + ObjectMonitor* anchor = next; + while ((mid = unmarked_next(anchor)) != NULL) { + if (m == mid) { + // We found 'm' on the per-thread in-use list so extract it. + om_lock(anchor); // Lock the anchor so we can safely modify it. + // Update next to what follows mid (if anything): + next = unmarked_next(mid); + // Switch next after the anchor to new next which unlocks the + // anchor, but leaves the extracted mid locked: + set_next(anchor, next); + break; + } else { + anchor = mid; + } + } + } + + if (mid == NULL) { + // Reached end of the list and didn't find 'm' so: + fatal("thread=" INTPTR_FORMAT " must find m=" INTPTR_FORMAT "on om_in_use_list=" + INTPTR_FORMAT, p2i(self), p2i(m), p2i(self->om_in_use_list)); } + + // At this point mid is disconnected from the in-use list so + // its lock no longer has any effects on the in-use list. + Atomic::dec(&self->om_in_use_count); + // Unlock mid, but leave the next value for any lagging list + // walkers. It will get cleaned up when mid is prepended to + // the thread's free list: + om_unlock(mid); } prepend_to_om_free_list(self, m); @@ -1465,6 +1487,7 @@ int in_use_count = 0; ObjectMonitor* in_use_list = NULL; ObjectMonitor* in_use_tail = NULL; + NoSafepointVerifier nsv; // This function can race with a list walker thread so we lock the // list head to prevent confusion. @@ -1843,8 +1866,7 @@ p2i(mid->object())); // Move the deflated ObjectMonitor to the working free list - // defined by free_head_p and free_tail_p. The working list is - // local so no need for a memory barrier. + // defined by free_head_p and free_tail_p. if (*free_head_p == NULL) *free_head_p = mid; if (*free_tail_p != NULL) { // We append to the list so the caller can use mid->_next_om @@ -1891,49 +1913,31 @@ ObjectMonitor* next = NULL; int deflated_count = 0; - // We use the simpler lock-mid-as-we-go protocol to prevent races - // with a list walker thread since this caller is the only one doing - // deletions on this list during the safepoint. - if ((mid = get_list_head_locked(list_p)) == NULL) { - return 0; // The list is empty so nothing to deflate. - } - next = unmarked_next(mid); + // This list walk executes at a safepoint and does not race with any + // other list walkers. - while (true) { + for (mid = Atomic::load(list_p); mid != NULL; mid = next) { + next = unmarked_next(mid); oop obj = (oop) mid->object(); if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) { // Deflation succeeded and already updated free_head_p and // free_tail_p as needed. Finish the move to the local free list // by unlinking mid from the global or per-thread in-use list. if (cur_mid_in_use == NULL) { - // mid is the list head and it is locked. Switch the list head - // to next which unlocks the list head, but leaves mid locked: + // mid is the list head so switch the list head to next: Atomic::store(list_p, next); } else { - // mid is locked. Switch cur_mid_in_use's next field to next - // which is safe because we have no parallel list deletions, - // but we leave mid locked: + // Switch cur_mid_in_use's next field to next: set_next(cur_mid_in_use, next); } - // At this point mid is disconnected from the in-use list so - // its lock no longer has any effects on the in-use list. + // At this point mid is disconnected from the in-use list. deflated_count++; Atomic::dec(count_p); - // mid is current tail in the free_head_p list so NULL terminate it - // (which also unlocks it): + // mid is current tail in the free_head_p list so NULL terminate it: set_next(mid, NULL); } else { - om_unlock(mid); cur_mid_in_use = mid; } - // All the list management is done so move on to the next one: - mid = next; - if (mid == NULL) { - break; // Reached end of the list so nothing more to deflate. - } - // Lock mid so we can possibly deflate it: - om_lock(mid); - next = unmarked_next(mid); } return deflated_count; } @@ -1944,7 +1948,6 @@ counters->n_scavenged = 0; // reclaimed (global and per-thread) counters->per_thread_scavenged = 0; // per-thread scavenge total counters->per_thread_times = 0.0; // per-thread scavenge times - OrderAccess::storestore(); // flush inits for worker threads } void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { @@ -1974,7 +1977,6 @@ if (free_head_p != NULL) { // Move the deflated ObjectMonitors back to the global free list. - // The working list is local so no need for a memory barrier. guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); #ifdef ASSERT ObjectMonitor* l_next_om = Atomic::load(&free_tail_p->_next_om); @@ -2044,7 +2046,6 @@ if (free_head_p != NULL) { // Move the deflated ObjectMonitors back to the global free list. - // The working list is local so no need for a memory barrier. guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); #ifdef ASSERT ObjectMonitor* l_next_om = Atomic::load(&free_tail_p->_next_om); @@ -2585,7 +2586,7 @@ return 1; } // unmarked_next() is not needed with g_block_list (no locking - // used with with block linkage _next_om fields). + // used with block linkage _next_om fields). block = (PaddedObjectMonitor*)Atomic::load(&block->_next_om); } return 0;