< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page
rev 56775 : imported patch 8230876.patch
rev 56776 : v2.00 -> v2.07 (CR7/v2.07/10-for-jdk14) patches combined into one; merge with 8230876.patch (2019.10.17) and jdk-14+21.
rev 56777 : See CR7-to-CR8-changes.

*** 147,156 **** --- 147,158 ---- // =====================> List Management functions // Return true if the ObjectMonitor's next field is marked. // Otherwise returns false. static bool is_next_marked(ObjectMonitor* om) { + // Use load_acquire() since _next_om fields are updated with a + // release_store(). return ((intptr_t)OrderAccess::load_acquire(&om->_next_om) & 0x1) != 0; } // Mark an ObjectMonitor* and return it. Note: the om parameter // may or may not have been marked originally.
*** 161,172 **** // Mark the next field in an ObjectMonitor. If marking was successful, // then the unmarked next field is returned via parameter and true is // returned. Otherwise false is returned. static bool mark_next(ObjectMonitor* om, ObjectMonitor** next_p) { // Get current next field without any marking value. ! ObjectMonitor* next = (ObjectMonitor*) ! ((intptr_t)OrderAccess::load_acquire(&om->_next_om) & ~0x1); if (Atomic::cmpxchg(mark_om_ptr(next), &om->_next_om, next) != next) { return false; // Could not mark the next field or it was already marked. } *next_p = next; return true; --- 163,173 ---- // Mark the next field in an ObjectMonitor. If marking was successful, // then the unmarked next field is returned via parameter and true is // returned. Otherwise false is returned. static bool mark_next(ObjectMonitor* om, ObjectMonitor** next_p) { // Get current next field without any marking value. ! ObjectMonitor* next = (ObjectMonitor*)((intptr_t)om->_next_om & ~0x1); if (Atomic::cmpxchg(mark_om_ptr(next), &om->_next_om, next) != next) { return false; // Could not mark the next field or it was already marked. } *next_p = next; return true;
*** 195,210 **** // successful, then the mid and the unmarked next field are returned // via parameter and true is returned. Otherwise false is returned. static bool mark_list_head(ObjectMonitor* volatile * list_p, ObjectMonitor** mid_p, ObjectMonitor** next_p) { while (true) { ! ObjectMonitor* mid = OrderAccess::load_acquire(list_p); if (mid == NULL) { return false; // The list is empty so nothing to mark. } if (mark_next(mid, next_p)) { ! if (OrderAccess::load_acquire(list_p) != mid) { // The list head changed so we have to retry. set_next(mid, *next_p); // unmark mid continue; } // We marked next field to guard against races. --- 196,211 ---- // successful, then the mid and the unmarked next field are returned // via parameter and true is returned. Otherwise false is returned. static bool mark_list_head(ObjectMonitor* volatile * list_p, ObjectMonitor** mid_p, ObjectMonitor** next_p) { while (true) { ! ObjectMonitor* mid = *list_p; if (mid == NULL) { return false; // The list is empty so nothing to mark. } if (mark_next(mid, next_p)) { ! if (*list_p != mid) { // The list head changed so we have to retry. set_next(mid, *next_p); // unmark mid continue; } // We marked next field to guard against races.
*** 215,235 **** } // Return the unmarked next field in an ObjectMonitor. Note: the next // field may or may not have been marked originally. static ObjectMonitor* unmarked_next(ObjectMonitor* om) { return (ObjectMonitor*)((intptr_t)OrderAccess::load_acquire(&om->_next_om) & ~0x1); } // Prepend a list of ObjectMonitors to the specified *list_p. 'tail' is // the last ObjectMonitor in the list and there are 'count' on the list. // Also updates the specified *count_p. static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail, int count, ObjectMonitor* volatile* list_p, volatile int* count_p) { while (true) { ! ObjectMonitor* cur = OrderAccess::load_acquire(list_p); // Prepend list to *list_p. ObjectMonitor* next = NULL; if (!mark_next(tail, &next)) { continue; // failed to mark next field so try it all again } --- 216,255 ---- } // Return the unmarked next field in an ObjectMonitor. Note: the next // field may or may not have been marked originally. static ObjectMonitor* unmarked_next(ObjectMonitor* om) { + // Use load_acquire() since _next_om fields are updated with a + // release_store(). return (ObjectMonitor*)((intptr_t)OrderAccess::load_acquire(&om->_next_om) & ~0x1); } + // Mark the next ObjectMonitor for traversal. The current ObjectMonitor + // is unmarked after the next ObjectMonitor is marked. *cur_p and *next_p + // are updated to their next values in the list traversal. *cur_p is set + // to NULL when the end of the list is reached. + static void mark_next_for_traversal(ObjectMonitor** cur_p, ObjectMonitor** next_p) { + ObjectMonitor* prev = *cur_p; // Save current for unmarking. + if (*next_p == NULL) { // Reached the end of the list. + set_next(prev, NULL); // Unmark previous. + *cur_p = NULL; // Tell the caller we are done. + return; + } + (void)mark_next_loop(*next_p); // Mark next. + set_next(prev, *next_p); // Unmark previous. + *cur_p = *next_p; // Update current. + *next_p = unmarked_next(*cur_p); // Update next. + } + // Prepend a list of ObjectMonitors to the specified *list_p. 'tail' is // the last ObjectMonitor in the list and there are 'count' on the list. // Also updates the specified *count_p. static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail, int count, ObjectMonitor* volatile* list_p, volatile int* count_p) { while (true) { ! ObjectMonitor* cur = *list_p; // Prepend list to *list_p. ObjectMonitor* next = NULL; if (!mark_next(tail, &next)) { continue; // failed to mark next field so try it all again }
*** 318,328 **** ObjectMonitor* next = NULL; // Mark the list head to guard against A-B-A race: if (mark_list_head(list_p, &cur, &next)) { // List head is now marked so we can safely switch it. set_next(m, cur); // m now points to cur (and unmarks m) ! OrderAccess::release_store(list_p, m); // Switch list head to unmarked m. set_next(cur, next); // Unmark the previous list head. break; } // The list is empty so try to set the list head. assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur)); --- 338,350 ---- ObjectMonitor* next = NULL; // Mark the list head to guard against A-B-A race: if (mark_list_head(list_p, &cur, &next)) { // List head is now marked so we can safely switch it. set_next(m, cur); // m now points to cur (and unmarks m) ! *list_p = m; // Switch list head to unmarked m. ! // mark_list_head() used cmpxchg() above, switching list head can be lazier: ! OrderAccess::storestore(); set_next(cur, next); // Unmark the previous list head. break; } // The list is empty so try to set the list head. assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur));
*** 358,369 **** if (!mark_list_head(list_p, &take, &next)) { return NULL; // None are available. } // Switch marked list head to next (which unmarks the list head, but // leaves take marked): ! OrderAccess::release_store(list_p, next); Atomic::dec(count_p); // Unmark take, but leave the next value for any lagging list // walkers. It will get cleaned up when take is prepended to // the in-use list: set_next(take, next); return take; --- 380,393 ---- if (!mark_list_head(list_p, &take, &next)) { return NULL; // None are available. } // Switch marked list head to next (which unmarks the list head, but // leaves take marked): ! *list_p = next; Atomic::dec(count_p); + // mark_list_head() used cmpxchg() above, switching list head can be lazier: + OrderAccess::storestore(); // Unmark take, but leave the next value for any lagging list // walkers. It will get cleaned up when take is prepended to // the in-use list: set_next(take, next); return take;
*** 1209,1256 **** } // Visitors ... void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { ! PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list); while (block != NULL) { assert(block->object() == CHAINMARKER, "must be a block header"); for (int i = _BLOCKSIZE - 1; i > 0; i--) { ObjectMonitor* mid = (ObjectMonitor *)(block + i); ! if (mid->is_active()) { ! ObjectMonitorHandle omh(mid); ! ! if (mid->object() == NULL || ! (AsyncDeflateIdleMonitors && mid->ref_count() < 0)) { // Only process with closure if the object is set. - // For async deflation, race here if monitor is not owned! - // The above ref_count bump (in ObjectMonitorHandle ctr) - // will cause subsequent async deflation to skip it. - // However, previous or concurrent async deflation is a race - // so skip this ObjectMonitor if it is being async deflated. continue; } closure->do_monitor(mid); } } ! // unmarked_next() is not needed with g_block_list (no next field marking). ! block = (PaddedObjectMonitor*)OrderAccess::load_acquire(&block->_next_om); } } static bool monitors_used_above_threshold() { ! if (OrderAccess::load_acquire(&g_om_population) == 0) { return false; } if (MonitorUsedDeflationThreshold > 0) { ! int monitors_used = OrderAccess::load_acquire(&g_om_population) - ! OrderAccess::load_acquire(&g_om_free_count); if (HandshakeAfterDeflateIdleMonitors) { ! monitors_used -= OrderAccess::load_acquire(&g_om_wait_count); } ! int monitor_usage = (monitors_used * 100LL) / ! OrderAccess::load_acquire(&g_om_population); return monitor_usage > MonitorUsedDeflationThreshold; } return false; } --- 1233,1274 ---- } // Visitors ... void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { ! PaddedObjectMonitor* block = g_block_list; while (block != NULL) { assert(block->object() == CHAINMARKER, "must be a block header"); for (int i = _BLOCKSIZE - 1; i > 0; i--) { ObjectMonitor* mid = (ObjectMonitor *)(block + i); ! ObjectMonitorHandle omh; ! if (!mid->is_free() && omh.set_om_ptr_if_safe(mid)) { ! // The ObjectMonitor* is not free and it has been made safe. ! if (mid->object() == NULL) { // Only process with closure if the object is set. continue; } closure->do_monitor(mid); } } ! // unmarked_next() is not needed with g_block_list (no next field ! // marking) and no load_acquire() needed because _next_om is ! // updated before g_block_list is changed with cmpxchg(). ! block = (PaddedObjectMonitor*)block->_next_om; } } static bool monitors_used_above_threshold() { ! if (g_om_population == 0) { return false; } if (MonitorUsedDeflationThreshold > 0) { ! int monitors_used = g_om_population - g_om_free_count; if (HandshakeAfterDeflateIdleMonitors) { ! monitors_used -= g_om_wait_count; } ! int monitor_usage = (monitors_used * 100LL) / g_om_population; return monitor_usage > MonitorUsedDeflationThreshold; } return false; }
*** 1277,1290 **** // than AsyncDeflationInterval (unless is_async_deflation_requested) // in order to not swamp the ServiceThread. _last_async_deflation_time_ns = os::javaTimeNanos(); return true; } ! int monitors_used = OrderAccess::load_acquire(&g_om_population) - ! OrderAccess::load_acquire(&g_om_free_count); if (HandshakeAfterDeflateIdleMonitors) { ! monitors_used -= OrderAccess::load_acquire(&g_om_wait_count); } if (is_MonitorBound_exceeded(monitors_used)) { // Not enough ObjectMonitors on the global free list. return true; } --- 1295,1307 ---- // than AsyncDeflationInterval (unless is_async_deflation_requested) // in order to not swamp the ServiceThread. _last_async_deflation_time_ns = os::javaTimeNanos(); return true; } ! int monitors_used = g_om_population - g_om_free_count; if (HandshakeAfterDeflateIdleMonitors) { ! monitors_used -= g_om_wait_count; } if (is_MonitorBound_exceeded(monitors_used)) { // Not enough ObjectMonitors on the global free list. return true; }
*** 1317,1339 **** global_used_oops_do(f); } void ObjectSynchronizer::global_used_oops_do(OopClosure* f) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); ! list_oops_do(OrderAccess::load_acquire(&g_om_in_use_list), OrderAccess::load_acquire(&g_om_in_use_count), f); } void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); ! list_oops_do(OrderAccess::load_acquire(&thread->om_in_use_list), OrderAccess::load_acquire(&thread->om_in_use_count), f); } void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, int count, OopClosure* f) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); // The oops_do() phase does not overlap with monitor deflation // so no need to update the ObjectMonitor's ref_count for this ! // ObjectMonitor* use. for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) { if (mid->object() != NULL) { f->do_oop((oop*)mid->object_addr()); } } --- 1334,1357 ---- global_used_oops_do(f); } void ObjectSynchronizer::global_used_oops_do(OopClosure* f) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); ! list_oops_do(g_om_in_use_list, g_om_in_use_count, f); } void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); ! list_oops_do(thread->om_in_use_list, thread->om_in_use_count, f); } void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, int count, OopClosure* f) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); // The oops_do() phase does not overlap with monitor deflation // so no need to update the ObjectMonitor's ref_count for this ! // ObjectMonitor* use and no need to mark ObjectMonitors for the ! // list traversal. for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) { if (mid->object() != NULL) { f->do_oop((oop*)mid->object_addr()); } }
*** 1441,1451 **** // 2: try to allocate from the global g_free_list // CONSIDER: use muxTry() instead of muxAcquire(). // If the muxTry() fails then drop immediately into case 3. // If we're using thread-local free lists then try // to reprovision the caller's free list. ! if (OrderAccess::load_acquire(&g_free_list) != NULL) { // Reprovision the thread's om_free_list. // Use bulk transfers to reduce the allocation rate and heat // on various locks. for (int i = self->om_free_provision; --i >= 0;) { ObjectMonitor* take = take_from_start_of_g_free_list(); --- 1459,1469 ---- // 2: try to allocate from the global g_free_list // CONSIDER: use muxTry() instead of muxAcquire(). // If the muxTry() fails then drop immediately into case 3. // If we're using thread-local free lists then try // to reprovision the caller's free list. ! if (g_free_list != NULL) { // Reprovision the thread's om_free_list. // Use bulk transfers to reduce the allocation rate and heat // on various locks. for (int i = self->om_free_provision; --i >= 0;) { ObjectMonitor* take = take_from_start_of_g_free_list();
*** 1463,1489 **** if (take->ref_count() < 0) { // Add back max_jint to restore the ref_count field to its // proper value. Atomic::add(max_jint, &take->_ref_count); ! assert(take->ref_count() >= 0, "must not be negative: ref_count=%d", ! take->ref_count()); } } take->Recycle(); // Since we're taking from the global free-list, take must be Free. // om_release() also sets the allocation state to Free because it // is called from other code paths. assert(take->is_free(), "invariant"); om_release(self, take, false); } ! self->om_free_provision += 1 + (self->om_free_provision/2); if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE; if (!AsyncDeflateIdleMonitors && ! is_MonitorBound_exceeded(OrderAccess::load_acquire(&g_om_population) - ! OrderAccess::load_acquire(&g_om_free_count))) { // Not enough ObjectMonitors on the global free list. // We can't safely induce a STW safepoint from om_alloc() as our thread // state may not be appropriate for such activities and callers may hold // naked oops, so instead we defer the action. InduceScavenge(self, "om_alloc"); --- 1481,1507 ---- if (take->ref_count() < 0) { // Add back max_jint to restore the ref_count field to its // proper value. Atomic::add(max_jint, &take->_ref_count); ! DEBUG_ONLY(jint l_ref_count = take->ref_count();) ! assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d", ! l_ref_count, take->ref_count()); } } take->Recycle(); // Since we're taking from the global free-list, take must be Free. // om_release() also sets the allocation state to Free because it // is called from other code paths. assert(take->is_free(), "invariant"); om_release(self, take, false); } ! self->om_free_provision += 1 + (self->om_free_provision / 2); if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE; if (!AsyncDeflateIdleMonitors && ! is_MonitorBound_exceeded(g_om_population - g_om_free_count)) { // Not enough ObjectMonitors on the global free list. // We can't safely induce a STW safepoint from om_alloc() as our thread // state may not be appropriate for such activities and callers may hold // naked oops, so instead we defer the action. InduceScavenge(self, "om_alloc");
*** 1514,1529 **** // The trick of using the 1st element in the block as g_block_list // linkage should be reconsidered. A better implementation would // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } for (int i = 1; i < _BLOCKSIZE; i++) { ! OrderAccess::release_store(&temp[i]._next_om, (ObjectMonitor*)&temp[i+1]); assert(temp[i].is_free(), "invariant"); } // terminate the last monitor as the end of list ! OrderAccess::release_store(&temp[_BLOCKSIZE - 1]._next_om, (ObjectMonitor*)NULL); // Element [0] is reserved for global list linkage temp[0].set_object(CHAINMARKER); // Consider carving out this thread's current request from the --- 1532,1547 ---- // The trick of using the 1st element in the block as g_block_list // linkage should be reconsidered. A better implementation would // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } for (int i = 1; i < _BLOCKSIZE; i++) { ! temp[i]._next_om = (ObjectMonitor*)&temp[i + 1]; assert(temp[i].is_free(), "invariant"); } // terminate the last monitor as the end of list ! temp[_BLOCKSIZE - 1]._next_om = (ObjectMonitor*)NULL; // Element [0] is reserved for global list linkage temp[0].set_object(CHAINMARKER); // Consider carving out this thread's current request from the
*** 1573,1583 **** if (m == mid) { // We found 'm' on the per-thread in-use list so try to extract it. if (cur_mid_in_use == NULL) { // mid is the list head and it is marked. Switch the list head // to next which unmarks the list head, but leaves mid marked: ! OrderAccess::release_store(&self->om_in_use_list, next); } else { // mid and cur_mid_in_use are marked. Switch cur_mid_in_use's // next field to next which unmarks cur_mid_in_use, but leaves // mid marked: OrderAccess::release_store(&cur_mid_in_use->_next_om, next); --- 1591,1603 ---- if (m == mid) { // We found 'm' on the per-thread in-use list so try to extract it. if (cur_mid_in_use == NULL) { // mid is the list head and it is marked. Switch the list head // to next which unmarks the list head, but leaves mid marked: ! self->om_in_use_list = next; ! // mark_list_head() used cmpxchg() above, switching list head can be lazier: ! OrderAccess::storestore(); } else { // mid and cur_mid_in_use are marked. Switch cur_mid_in_use's // next field to next which unmarks cur_mid_in_use, but leaves // mid marked: OrderAccess::release_store(&cur_mid_in_use->_next_om, next);
*** 1668,1678 **** } // Refetch the possibly changed next field and try again. cur_om = unmarked_next(in_use_tail); continue; } ! if (!cur_om->is_active()) { // cur_om was deflated and the allocation state was changed // to Free while it was marked. We happened to see it just // after it was unmarked (and added to the free list). // Refetch the possibly changed next field and try again. cur_om = unmarked_next(in_use_tail); --- 1688,1698 ---- } // Refetch the possibly changed next field and try again. cur_om = unmarked_next(in_use_tail); continue; } ! if (cur_om->is_free()) { // cur_om was deflated and the allocation state was changed // to Free while it was marked. We happened to see it just // after it was unmarked (and added to the free list). // Refetch the possibly changed next field and try again. cur_om = unmarked_next(in_use_tail);
*** 1681,1705 **** in_use_tail = cur_om; in_use_count++; cur_om = unmarked_next(cur_om); } guarantee(in_use_tail != NULL, "invariant"); ! int l_om_in_use_count = OrderAccess::load_acquire(&self->om_in_use_count); ADIM_guarantee(l_om_in_use_count == in_use_count, "in-use counts don't " "match: l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count); ! // Clear the in-use count before unmarking the in-use list head ! // to avoid races: ! OrderAccess::release_store(&self->om_in_use_count, 0); // Clear the in-use list head (which also unmarks it): ! OrderAccess::release_store(&self->om_in_use_list, (ObjectMonitor*)NULL); ! // Unmark the disconnected list head: set_next(in_use_list, next); } int free_count = 0; ! ObjectMonitor* free_list = OrderAccess::load_acquire(&self->om_free_list); ObjectMonitor* free_tail = NULL; if (free_list != NULL) { // The thread is going away. Set 'free_tail' to the last per-thread free // monitor which will be linked to g_free_list below. stringStream ss; --- 1701,1724 ---- in_use_tail = cur_om; in_use_count++; cur_om = unmarked_next(cur_om); } guarantee(in_use_tail != NULL, "invariant"); ! int l_om_in_use_count = self->om_in_use_count; ADIM_guarantee(l_om_in_use_count == in_use_count, "in-use counts don't " "match: l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count); ! self->om_in_use_count = 0; // Clear the in-use list head (which also unmarks it): ! self->om_in_use_list = (ObjectMonitor*)NULL; ! // mark_list_head() used cmpxchg() above, clearing the disconnected list head can be lazier: ! OrderAccess::storestore(); set_next(in_use_list, next); } int free_count = 0; ! ObjectMonitor* free_list = self->om_free_list; ObjectMonitor* free_tail = NULL; if (free_list != NULL) { // The thread is going away. Set 'free_tail' to the last per-thread free // monitor which will be linked to g_free_list below. stringStream ss;
*** 1708,1723 **** free_tail = s; guarantee(s->object() == NULL, "invariant"); guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss)); } guarantee(free_tail != NULL, "invariant"); ! int l_om_free_count = OrderAccess::load_acquire(&self->om_free_count); ADIM_guarantee(l_om_free_count == free_count, "free counts don't match: " "l_om_free_count=%d, free_count=%d", l_om_free_count, free_count); ! OrderAccess::release_store(&self->om_free_list, (ObjectMonitor*)NULL); ! OrderAccess::release_store(&self->om_free_count, 0); } if (free_tail != NULL) { prepend_list_to_g_free_list(free_list, free_tail, free_count); } --- 1727,1743 ---- free_tail = s; guarantee(s->object() == NULL, "invariant"); guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss)); } guarantee(free_tail != NULL, "invariant"); ! int l_om_free_count = self->om_free_count; ADIM_guarantee(l_om_free_count == free_count, "free counts don't match: " "l_om_free_count=%d, free_count=%d", l_om_free_count, free_count); ! self->om_free_count = 0; ! self->om_free_list = NULL; ! OrderAccess::storestore(); // Lazier memory is okay for list walkers. } if (free_tail != NULL) { prepend_list_to_g_free_list(free_list, free_tail, free_count); }
*** 1899,1911 **** // with this thread we could simply set m->_owner = self. // Note that a thread can inflate an object // that it has stack-locked -- as might happen in wait() -- directly // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. if (AsyncDeflateIdleMonitors) { ! m->set_owner_from(mark.locker(), NULL, DEFLATER_MARKER); } else { ! m->set_owner_from(mark.locker(), NULL); } m->set_object(object); // TODO-FIXME: assert BasicLock->dhw != 0. omh_p->set_om_ptr(m); --- 1919,1931 ---- // with this thread we could simply set m->_owner = self. // Note that a thread can inflate an object // that it has stack-locked -- as might happen in wait() -- directly // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. if (AsyncDeflateIdleMonitors) { ! m->simply_set_owner_from(mark.locker(), NULL, DEFLATER_MARKER); } else { ! m->simply_set_owner_from(mark.locker(), NULL); } m->set_object(object); // TODO-FIXME: assert BasicLock->dhw != 0. omh_p->set_om_ptr(m);
*** 2041,2050 **** --- 2061,2079 ---- log_debug(monitorinflation)("requesting async deflation of idle monitors."); // Request deflation of idle monitors by the ServiceThread: set_is_async_deflation_requested(true); MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); ml.notify_all(); + + if (log_is_enabled(Debug, monitorinflation)) { + // exit_globals()'s call to audit_and_print_stats() is done + // at the Info level and not at a safepoint. + // For safepoint based deflation, audit_and_print_stats() is called + // in ObjectSynchronizer::finish_deflate_idle_monitors() at the + // Debug level at a safepoint. + ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); + } } // Deflate a single monitor if not in-use // Return true if deflated, false if in-use bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
*** 2243,2254 **** // Add back max_jint to restore the ref_count field to its // proper value (which may not be what we saw above): Atomic::add(max_jint, &mid->_ref_count); ! assert(mid->ref_count() >= 0, "must not be negative: ref_count=%d", ! mid->ref_count()); return false; } // The ref_count was no longer 0 so we lost the race since the // ObjectMonitor is now busy or the ObjectMonitor* is now is use. --- 2272,2284 ---- // Add back max_jint to restore the ref_count field to its // proper value (which may not be what we saw above): Atomic::add(max_jint, &mid->_ref_count); ! DEBUG_ONLY(jint l_ref_count = mid->ref_count();) ! assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d", ! l_ref_count, mid->ref_count()); return false; } // The ref_count was no longer 0 so we lost the race since the // ObjectMonitor is now busy or the ObjectMonitor* is now is use.
*** 2295,2305 **** // free_tail_p as needed. Finish the move to the local free list // by unlinking mid from the global or per-thread in-use list. if (cur_mid_in_use == NULL) { // mid is the list head and it is marked. Switch the list head // to next which unmarks the list head, but leaves mid marked: ! OrderAccess::release_store(list_p, next); } else { // mid is marked. Switch cur_mid_in_use's next field to next // which is safe because we have no parallel list deletions, // but we leave mid marked: OrderAccess::release_store(&cur_mid_in_use->_next_om, next); --- 2325,2337 ---- // free_tail_p as needed. Finish the move to the local free list // by unlinking mid from the global or per-thread in-use list. if (cur_mid_in_use == NULL) { // mid is the list head and it is marked. Switch the list head // to next which unmarks the list head, but leaves mid marked: ! *list_p = next; ! // mark_list_head() used cmpxchg() above, switching list head can be lazier: ! OrderAccess::storestore(); } else { // mid is marked. Switch cur_mid_in_use's next field to next // which is safe because we have no parallel list deletions, // but we leave mid marked: OrderAccess::release_store(&cur_mid_in_use->_next_om, next);
*** 2396,2406 **** // by unlinking mid from the global or per-thread in-use list. if (cur_mid_in_use == NULL) { // mid is the list head and it is marked. Switch the list head // to next which is also marked (if not NULL) and also leave // mid marked: ! OrderAccess::release_store(list_p, next); } else { ObjectMonitor* marked_next = mark_om_ptr(next); // mid and cur_mid_in_use are marked. Switch cur_mid_in_use's // next field to marked_next and also leave mid marked: OrderAccess::release_store(&cur_mid_in_use->_next_om, marked_next); --- 2428,2440 ---- // by unlinking mid from the global or per-thread in-use list. if (cur_mid_in_use == NULL) { // mid is the list head and it is marked. Switch the list head // to next which is also marked (if not NULL) and also leave // mid marked: ! *list_p = next; ! // mark_list_head() used cmpxchg() above, switching list head can be lazier: ! OrderAccess::storestore(); } else { ObjectMonitor* marked_next = mark_om_ptr(next); // mid and cur_mid_in_use are marked. Switch cur_mid_in_use's // next field to marked_next and also leave mid marked: OrderAccess::release_store(&cur_mid_in_use->_next_om, marked_next);
*** 2436,2447 **** cur_mid_in_use = mid; mid = next; // mid keeps non-NULL next's marked next field next = next_next; if (SafepointSynchronize::is_synchronizing() && ! cur_mid_in_use != OrderAccess::load_acquire(list_p) && ! cur_mid_in_use->is_old()) { // If a safepoint has started and cur_mid_in_use is not the list // head and is old, then it is safe to use as saved state. Return // to the caller before blocking. *saved_mid_in_use_p = cur_mid_in_use; set_next(cur_mid_in_use, mid); // umark cur_mid_in_use --- 2470,2480 ---- cur_mid_in_use = mid; mid = next; // mid keeps non-NULL next's marked next field next = next_next; if (SafepointSynchronize::is_synchronizing() && ! cur_mid_in_use != *list_p && cur_mid_in_use->is_old()) { // If a safepoint has started and cur_mid_in_use is not the list // head and is old, then it is safe to use as saved state. Return // to the caller before blocking. *saved_mid_in_use_p = cur_mid_in_use; set_next(cur_mid_in_use, mid); // umark cur_mid_in_use
*** 2466,2480 **** *saved_mid_in_use_p = NULL; return deflated_count; } void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) { ! OrderAccess::release_store(&counters->n_in_use, 0); // currently associated with objects ! OrderAccess::release_store(&counters->n_in_circulation, 0); // extant ! OrderAccess::release_store(&counters->n_scavenged, 0); // reclaimed (global and per-thread) ! OrderAccess::release_store(&counters->per_thread_scavenged, 0); // per-thread scavenge total counters->per_thread_times = 0.0; // per-thread scavenge times } void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); --- 2499,2514 ---- *saved_mid_in_use_p = NULL; return deflated_count; } void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) { ! counters->n_in_use = 0; // currently associated with objects ! counters->n_in_circulation = 0; // extant ! counters->n_scavenged = 0; // reclaimed (global and per-thread) ! counters->per_thread_scavenged = 0; // per-thread scavenge total counters->per_thread_times = 0.0; // per-thread scavenge times + OrderAccess::storestore(); // flush inits for worker threads } void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
*** 2499,2514 **** // Note: the thread-local monitors lists get deflated in // a separate pass. See deflate_thread_local_monitors(). // For moribund threads, scan g_om_in_use_list int deflated_count = 0; ! if (OrderAccess::load_acquire(&g_om_in_use_list) != NULL) { // Update n_in_circulation before g_om_in_use_count is updated by deflation. ! Atomic::add(OrderAccess::load_acquire(&g_om_in_use_count), &counters->n_in_circulation); deflated_count = deflate_monitor_list(&g_om_in_use_list, &g_om_in_use_count, &free_head_p, &free_tail_p); ! Atomic::add(OrderAccess::load_acquire(&g_om_in_use_count), &counters->n_in_use); } if (free_head_p != NULL) { // Move the deflated ObjectMonitors back to the global free list. // No races on the working free list so no need for load_acquire(). --- 2533,2548 ---- // Note: the thread-local monitors lists get deflated in // a separate pass. See deflate_thread_local_monitors(). // For moribund threads, scan g_om_in_use_list int deflated_count = 0; ! if (g_om_in_use_list != NULL) { // Update n_in_circulation before g_om_in_use_count is updated by deflation. ! Atomic::add(g_om_in_use_count, &counters->n_in_circulation); deflated_count = deflate_monitor_list(&g_om_in_use_list, &g_om_in_use_count, &free_head_p, &free_tail_p); ! Atomic::add(g_om_in_use_count, &counters->n_in_use); } if (free_head_p != NULL) { // Move the deflated ObjectMonitors back to the global free list. // No races on the working free list so no need for load_acquire().
*** 2558,2567 **** --- 2592,2607 ---- } } if (count > 0) { log_debug(monitorinflation)("did async deflation of idle monitors for %d thread(s).", count); } + + log_info(monitorinflation)("async g_om_population=%d, g_om_in_use_count=%d, " + "g_om_free_count=%d, g_om_wait_count=%d", + g_om_population, g_om_in_use_count, + g_om_free_count, g_om_wait_count); + // The ServiceThread's async deflation request has been processed. set_is_async_deflation_requested(false); if (HandshakeAfterDeflateIdleMonitors && g_om_wait_count > 0) { // There are deflated ObjectMonitors waiting for a handshake
*** 2570,2583 **** // g_wait_list and g_om_wait_count are only updated by the calling // thread so no need for load_acquire() or release_store(). ObjectMonitor* list = g_wait_list; ADIM_guarantee(list != NULL, "g_wait_list must not be NULL"); int count = g_om_wait_count; - g_wait_list = NULL; g_om_wait_count = 0; ! // Find the tail for prepend_list_to_common(). int l_count = 0; ObjectMonitor* tail = NULL; for (ObjectMonitor* n = list; n != NULL; n = unmarked_next(n)) { tail = n; l_count++; --- 2610,2626 ---- // g_wait_list and g_om_wait_count are only updated by the calling // thread so no need for load_acquire() or release_store(). ObjectMonitor* list = g_wait_list; ADIM_guarantee(list != NULL, "g_wait_list must not be NULL"); int count = g_om_wait_count; g_om_wait_count = 0; + g_wait_list = NULL; + OrderAccess::storestore(); // Lazier memory sync is okay for list walkers. ! // Find the tail for prepend_list_to_common(). No need to mark ! // ObjectMonitors for this list walk since only the deflater ! // thread manages the wait list. int l_count = 0; ObjectMonitor* tail = NULL; for (ObjectMonitor* n = list; n != NULL; n = unmarked_next(n)) { tail = n; l_count++;
*** 2627,2639 **** if (log_is_enabled(Info, monitorinflation)) { timer.start(); } if (is_global) { ! OM_PERFDATA_OP(MonExtant, set_value(OrderAccess::load_acquire(&g_om_in_use_count))); } else { ! OM_PERFDATA_OP(MonExtant, inc(OrderAccess::load_acquire(&target->om_in_use_count))); } do { int local_deflated_count; if (is_global) { --- 2670,2682 ---- if (log_is_enabled(Info, monitorinflation)) { timer.start(); } if (is_global) { ! OM_PERFDATA_OP(MonExtant, set_value(g_om_in_use_count)); } else { ! OM_PERFDATA_OP(MonExtant, inc(target->om_in_use_count)); } do { int local_deflated_count; if (is_global) {
*** 2709,2749 **** void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { // Report the cumulative time for deflating each thread's idle // monitors. Note: if the work is split among more than one // worker thread, then the reported time will likely be more // than a beginning to end measurement of the phase. ! // Note: AsyncDeflateIdleMonitors only deflates per-thread idle ! // monitors at a safepoint when a special deflation has been requested. ! log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", ! counters->per_thread_times, ! OrderAccess::load_acquire(&counters->per_thread_scavenged)); bool needs_special_deflation = is_special_deflation_requested(); ! if (!AsyncDeflateIdleMonitors || needs_special_deflation) { ! // AsyncDeflateIdleMonitors does not use these counters unless ! // there is a special deflation request. ! ! OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged)); ! OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation)); } if (log_is_enabled(Debug, monitorinflation)) { // exit_globals()'s call to audit_and_print_stats() is done ! // at the Info level. ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); } else if (log_is_enabled(Info, monitorinflation)) { log_info(monitorinflation)("g_om_population=%d, g_om_in_use_count=%d, " "g_om_free_count=%d, g_om_wait_count=%d", ! OrderAccess::load_acquire(&g_om_population), ! OrderAccess::load_acquire(&g_om_in_use_count), ! OrderAccess::load_acquire(&g_om_free_count), ! OrderAccess::load_acquire(&g_om_wait_count)); } ForceMonitorScavenge = 0; // Reset GVars.stw_random = os::random(); GVars.stw_cycle++; if (needs_special_deflation) { set_is_special_deflation_requested(false); // special deflation is done } } --- 2752,2792 ---- void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { // Report the cumulative time for deflating each thread's idle // monitors. Note: if the work is split among more than one // worker thread, then the reported time will likely be more // than a beginning to end measurement of the phase. ! log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged); bool needs_special_deflation = is_special_deflation_requested(); ! if (AsyncDeflateIdleMonitors && !needs_special_deflation) { ! // Nothing to do when idle ObjectMonitors are deflated using ! // a JavaThread unless a special deflation has been requested. ! return; } if (log_is_enabled(Debug, monitorinflation)) { // exit_globals()'s call to audit_and_print_stats() is done ! // at the Info level and not at a safepoint. ! // For async deflation, audit_and_print_stats() is called in ! // ObjectSynchronizer::do_safepoint_work() at the Debug level ! // at a safepoint. ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); } else if (log_is_enabled(Info, monitorinflation)) { log_info(monitorinflation)("g_om_population=%d, g_om_in_use_count=%d, " "g_om_free_count=%d, g_om_wait_count=%d", ! g_om_population, g_om_in_use_count, ! g_om_free_count, g_om_wait_count); } ForceMonitorScavenge = 0; // Reset + + OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged)); + OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation)); + GVars.stw_random = os::random(); GVars.stw_cycle++; + if (needs_special_deflation) { set_is_special_deflation_requested(false); // special deflation is done } }
*** 2763,2776 **** log_is_enabled(Info, monitorinflation)) { timer.start(); } // Update n_in_circulation before om_in_use_count is updated by deflation. ! Atomic::add(OrderAccess::load_acquire(&thread->om_in_use_count), &counters->n_in_circulation); int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p); ! Atomic::add(OrderAccess::load_acquire(&thread->om_in_use_count), &counters->n_in_use); if (free_head_p != NULL) { // Move the deflated ObjectMonitors back to the global free list. // No races on the working list so no need for load_acquire(). guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); --- 2806,2819 ---- log_is_enabled(Info, monitorinflation)) { timer.start(); } // Update n_in_circulation before om_in_use_count is updated by deflation. ! Atomic::add(thread->om_in_use_count, &counters->n_in_circulation); int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p); ! Atomic::add(thread->om_in_use_count, &counters->n_in_use); if (free_head_p != NULL) { // Move the deflated ObjectMonitors back to the global free list. // No races on the working list so no need for load_acquire(). guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
*** 2896,2918 **** int error_cnt = 0; ls->print_cr("Checking global lists:"); // Check g_om_population: ! if (OrderAccess::load_acquire(&g_om_population) == chk_om_population) { ls->print_cr("g_om_population=%d equals chk_om_population=%d", ! OrderAccess::load_acquire(&g_om_population), ! chk_om_population); } else { // With lock free access to the monitor lists, it is possible for // log_monitor_list_counts() to return a value that doesn't match // g_om_population. So far a higher value has been seen in testing // so something is being double counted by log_monitor_list_counts(). ls->print_cr("WARNING: g_om_population=%d is not equal to " ! "chk_om_population=%d", ! OrderAccess::load_acquire(&g_om_population), ! chk_om_population); } // Check g_om_in_use_list and g_om_in_use_count: chk_global_in_use_list_and_count(ls, &error_cnt); --- 2939,2958 ---- int error_cnt = 0; ls->print_cr("Checking global lists:"); // Check g_om_population: ! if (g_om_population == chk_om_population) { ls->print_cr("g_om_population=%d equals chk_om_population=%d", ! g_om_population, chk_om_population); } else { // With lock free access to the monitor lists, it is possible for // log_monitor_list_counts() to return a value that doesn't match // g_om_population. So far a higher value has been seen in testing // so something is being double counted by log_monitor_list_counts(). ls->print_cr("WARNING: g_om_population=%d is not equal to " ! "chk_om_population=%d", g_om_population, chk_om_population); } // Check g_om_in_use_list and g_om_in_use_count: chk_global_in_use_list_and_count(ls, &error_cnt);
*** 2999,3069 **** // Check the global free list and count; log the results of the checks. void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out, int *error_cnt_p) { int chk_om_free_count = 0; ! for (ObjectMonitor* n = OrderAccess::load_acquire(&g_free_list); n != NULL; n = unmarked_next(n)) { ! chk_free_entry(NULL /* jt */, n, out, error_cnt_p); chk_om_free_count++; } ! if (OrderAccess::load_acquire(&g_om_free_count) == chk_om_free_count) { out->print_cr("g_om_free_count=%d equals chk_om_free_count=%d", ! OrderAccess::load_acquire(&g_om_free_count), ! chk_om_free_count); } else { // With lock free access to g_free_list, it is possible for an // ObjectMonitor to be prepended to g_free_list after we started // calculating chk_om_free_count so g_om_free_count may not // match anymore. out->print_cr("WARNING: g_om_free_count=%d is not equal to " ! "chk_om_free_count=%d", ! OrderAccess::load_acquire(&g_om_free_count), ! chk_om_free_count); } } // Check the global wait list and count; log the results of the checks. void ObjectSynchronizer::chk_global_wait_list_and_count(outputStream * out, int *error_cnt_p) { int chk_om_wait_count = 0; ! for (ObjectMonitor* n = OrderAccess::load_acquire(&g_wait_list); n != NULL; n = unmarked_next(n)) { // Rules for g_wait_list are the same as of g_free_list: ! chk_free_entry(NULL /* jt */, n, out, error_cnt_p); chk_om_wait_count++; } ! if (OrderAccess::load_acquire(&g_om_wait_count) == chk_om_wait_count) { out->print_cr("g_om_wait_count=%d equals chk_om_wait_count=%d", ! OrderAccess::load_acquire(&g_om_wait_count), ! chk_om_wait_count); } else { out->print_cr("ERROR: g_om_wait_count=%d is not equal to " ! "chk_om_wait_count=%d", ! OrderAccess::load_acquire(&g_om_wait_count), ! chk_om_wait_count); *error_cnt_p = *error_cnt_p + 1; } } // Check the global in-use list and count; log the results of the checks. void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out, int *error_cnt_p) { int chk_om_in_use_count = 0; ! for (ObjectMonitor* n = OrderAccess::load_acquire(&g_om_in_use_list); n != NULL; n = unmarked_next(n)) { ! chk_in_use_entry(NULL /* jt */, n, out, error_cnt_p); chk_om_in_use_count++; } ! if (OrderAccess::load_acquire(&g_om_in_use_count) == chk_om_in_use_count) { out->print_cr("g_om_in_use_count=%d equals chk_om_in_use_count=%d", ! OrderAccess::load_acquire(&g_om_in_use_count), ! chk_om_in_use_count); } else { // With lock free access to the monitor lists, it is possible for // an exiting JavaThread to put its in-use ObjectMonitors on the // global in-use list after chk_om_in_use_count is calculated above. out->print_cr("WARNING: g_om_in_use_count=%d is not equal to chk_om_in_use_count=%d", ! OrderAccess::load_acquire(&g_om_in_use_count), ! chk_om_in_use_count); } } // Check an in-use monitor entry; log any errors. void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n, --- 3039,3131 ---- // Check the global free list and count; log the results of the checks. void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out, int *error_cnt_p) { int chk_om_free_count = 0; ! ObjectMonitor* cur = NULL; ! ObjectMonitor* next = NULL; ! if (mark_list_head(&g_free_list, &cur, &next)) { ! // Marked the global free list head so process the list. ! while (true) { ! chk_free_entry(NULL /* jt */, cur, out, error_cnt_p); chk_om_free_count++; + + mark_next_for_traversal(&cur, &next); + if (cur == NULL) { + break; } ! } ! } ! if (g_om_free_count == chk_om_free_count) { out->print_cr("g_om_free_count=%d equals chk_om_free_count=%d", ! g_om_free_count, chk_om_free_count); } else { // With lock free access to g_free_list, it is possible for an // ObjectMonitor to be prepended to g_free_list after we started // calculating chk_om_free_count so g_om_free_count may not // match anymore. out->print_cr("WARNING: g_om_free_count=%d is not equal to " ! "chk_om_free_count=%d", g_om_free_count, chk_om_free_count); } } // Check the global wait list and count; log the results of the checks. void ObjectSynchronizer::chk_global_wait_list_and_count(outputStream * out, int *error_cnt_p) { int chk_om_wait_count = 0; ! ObjectMonitor* cur = NULL; ! ObjectMonitor* next = NULL; ! if (mark_list_head(&g_wait_list, &cur, &next)) { ! // Marked the global wait list head so process the list. ! while (true) { // Rules for g_wait_list are the same as of g_free_list: ! chk_free_entry(NULL /* jt */, cur, out, error_cnt_p); chk_om_wait_count++; + + mark_next_for_traversal(&cur, &next); + if (cur == NULL) { + break; } ! } ! } ! if (g_om_wait_count == chk_om_wait_count) { out->print_cr("g_om_wait_count=%d equals chk_om_wait_count=%d", ! g_om_wait_count, chk_om_wait_count); } else { out->print_cr("ERROR: g_om_wait_count=%d is not equal to " ! "chk_om_wait_count=%d", g_om_wait_count, chk_om_wait_count); *error_cnt_p = *error_cnt_p + 1; } } // Check the global in-use list and count; log the results of the checks. void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out, int *error_cnt_p) { int chk_om_in_use_count = 0; ! ObjectMonitor* cur = NULL; ! ObjectMonitor* next = NULL; ! if (mark_list_head(&g_om_in_use_list, &cur, &next)) { ! // Marked the global in-use list head so process the list. ! while (true) { ! chk_in_use_entry(NULL /* jt */, cur, out, error_cnt_p); chk_om_in_use_count++; + + mark_next_for_traversal(&cur, &next); + if (cur == NULL) { + break; + } } ! } ! if (g_om_in_use_count == chk_om_in_use_count) { out->print_cr("g_om_in_use_count=%d equals chk_om_in_use_count=%d", ! g_om_in_use_count, chk_om_in_use_count); } else { // With lock free access to the monitor lists, it is possible for // an exiting JavaThread to put its in-use ObjectMonitors on the // global in-use list after chk_om_in_use_count is calculated above. out->print_cr("WARNING: g_om_in_use_count=%d is not equal to chk_om_in_use_count=%d", ! g_om_in_use_count, chk_om_in_use_count); } } // Check an in-use monitor entry; log any errors. void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n,
*** 3127,3223 **** // Check the thread's free list and count; log the results of the checks. void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt, outputStream * out, int *error_cnt_p) { int chk_om_free_count = 0; ! for (ObjectMonitor* n = OrderAccess::load_acquire(&jt->om_free_list); n != NULL; n = unmarked_next(n)) { ! chk_free_entry(jt, n, out, error_cnt_p); chk_om_free_count++; } ! if (OrderAccess::load_acquire(&jt->om_free_count) == chk_om_free_count) { out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals " ! "chk_om_free_count=%d", p2i(jt), ! OrderAccess::load_acquire(&jt->om_free_count), chk_om_free_count); } else { out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not " ! "equal to chk_om_free_count=%d", p2i(jt), ! OrderAccess::load_acquire(&jt->om_free_count), chk_om_free_count); *error_cnt_p = *error_cnt_p + 1; } } // Check the thread's in-use list and count; log the results of the checks. void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt, outputStream * out, int *error_cnt_p) { int chk_om_in_use_count = 0; ! for (ObjectMonitor* n = OrderAccess::load_acquire(&jt->om_in_use_list); n != NULL; n = unmarked_next(n)) { ! chk_in_use_entry(jt, n, out, error_cnt_p); chk_om_in_use_count++; } ! if (OrderAccess::load_acquire(&jt->om_in_use_count) == chk_om_in_use_count) { out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals " "chk_om_in_use_count=%d", p2i(jt), ! OrderAccess::load_acquire(&jt->om_in_use_count), ! chk_om_in_use_count); } else { out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not " "equal to chk_om_in_use_count=%d", p2i(jt), ! OrderAccess::load_acquire(&jt->om_in_use_count), ! chk_om_in_use_count); *error_cnt_p = *error_cnt_p + 1; } } // Log details about ObjectMonitors on the in-use lists. The 'BHL' // flags indicate why the entry is in-use, 'object' and 'object type' // indicate the associated object and its type. void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out) { stringStream ss; ! if (OrderAccess::load_acquire(&g_om_in_use_count) > 0) { out->print_cr("In-use global monitor info:"); out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); out->print_cr("%18s %s %7s %18s %18s", "monitor", "BHL", "ref_cnt", "object", "object type"); out->print_cr("================== === ======= ================== =================="); ! for (ObjectMonitor* n = OrderAccess::load_acquire(&g_om_in_use_list); n != NULL; n = unmarked_next(n)) { ! const oop obj = (oop) n->object(); ! const markWord mark = n->header(); ResourceMark rm; out->print(INTPTR_FORMAT " %d%d%d %7d " INTPTR_FORMAT " %s", ! p2i(n), n->is_busy() != 0, mark.hash() != 0, ! n->owner() != NULL, (int)n->ref_count(), p2i(obj), obj->klass()->external_name()); ! if (n->is_busy() != 0) { ! out->print(" (%s)", n->is_busy_to_string(&ss)); ss.reset(); } out->cr(); } } out->print_cr("In-use per-thread monitor info:"); out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); out->print_cr("%18s %18s %s %7s %18s %18s", "jt", "monitor", "BHL", "ref_cnt", "object", "object type"); out->print_cr("================== ================== === ======= ================== =================="); for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { ! for (ObjectMonitor* n = OrderAccess::load_acquire(&jt->om_in_use_list); n != NULL; n = unmarked_next(n)) { ! const oop obj = (oop) n->object(); ! const markWord mark = n->header(); ResourceMark rm; out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d %7d " ! INTPTR_FORMAT " %s", p2i(jt), p2i(n), n->is_busy() != 0, ! mark.hash() != 0, n->owner() != NULL, (int)n->ref_count(), p2i(obj), obj->klass()->external_name()); ! if (n->is_busy() != 0) { ! out->print(" (%s)", n->is_busy_to_string(&ss)); ss.reset(); } out->cr(); } } out->flush(); } --- 3189,3321 ---- // Check the thread's free list and count; log the results of the checks. void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt, outputStream * out, int *error_cnt_p) { int chk_om_free_count = 0; ! ObjectMonitor* cur = NULL; ! ObjectMonitor* next = NULL; ! if (mark_list_head(&jt->om_free_list, &cur, &next)) { ! // Marked the per-thread free list head so process the list. ! while (true) { ! chk_free_entry(jt, cur, out, error_cnt_p); chk_om_free_count++; + + mark_next_for_traversal(&cur, &next); + if (cur == NULL) { + break; } ! } ! } ! if (jt->om_free_count == chk_om_free_count) { out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals " ! "chk_om_free_count=%d", p2i(jt), jt->om_free_count, chk_om_free_count); } else { out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not " ! "equal to chk_om_free_count=%d", p2i(jt), jt->om_free_count, chk_om_free_count); *error_cnt_p = *error_cnt_p + 1; } } // Check the thread's in-use list and count; log the results of the checks. void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt, outputStream * out, int *error_cnt_p) { int chk_om_in_use_count = 0; ! ObjectMonitor* cur = NULL; ! ObjectMonitor* next = NULL; ! if (mark_list_head(&jt->om_in_use_list, &cur, &next)) { ! // Marked the per-thread in-use list head so process the list. ! while (true) { ! chk_in_use_entry(jt, cur, out, error_cnt_p); chk_om_in_use_count++; + + mark_next_for_traversal(&cur, &next); + if (cur == NULL) { + break; } ! } ! } ! if (jt->om_in_use_count == chk_om_in_use_count) { out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals " "chk_om_in_use_count=%d", p2i(jt), ! jt->om_in_use_count, chk_om_in_use_count); } else { out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not " "equal to chk_om_in_use_count=%d", p2i(jt), ! jt->om_in_use_count, chk_om_in_use_count); *error_cnt_p = *error_cnt_p + 1; } } // Log details about ObjectMonitors on the in-use lists. The 'BHL' // flags indicate why the entry is in-use, 'object' and 'object type' // indicate the associated object and its type. void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out) { stringStream ss; ! if (g_om_in_use_count > 0) { out->print_cr("In-use global monitor info:"); out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); out->print_cr("%18s %s %7s %18s %18s", "monitor", "BHL", "ref_cnt", "object", "object type"); out->print_cr("================== === ======= ================== =================="); ! ObjectMonitor* cur = NULL; ! ObjectMonitor* next = NULL; ! if (mark_list_head(&g_om_in_use_list, &cur, &next)) { ! // Marked the global in-use list head so process the list. ! while (true) { ! const oop obj = (oop) cur->object(); ! const markWord mark = cur->header(); ResourceMark rm; out->print(INTPTR_FORMAT " %d%d%d %7d " INTPTR_FORMAT " %s", ! p2i(cur), cur->is_busy() != 0, mark.hash() != 0, ! cur->owner() != NULL, (int)cur->ref_count(), p2i(obj), obj->klass()->external_name()); ! if (cur->is_busy() != 0) { ! out->print(" (%s)", cur->is_busy_to_string(&ss)); ss.reset(); } out->cr(); + + mark_next_for_traversal(&cur, &next); + if (cur == NULL) { + break; + } + } } } out->print_cr("In-use per-thread monitor info:"); out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); out->print_cr("%18s %18s %s %7s %18s %18s", "jt", "monitor", "BHL", "ref_cnt", "object", "object type"); out->print_cr("================== ================== === ======= ================== =================="); for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { ! ObjectMonitor* cur = NULL; ! ObjectMonitor* next = NULL; ! if (mark_list_head(&jt->om_in_use_list, &cur, &next)) { ! // Marked the global in-use list head so process the list. ! while (true) { ! const oop obj = (oop) cur->object(); ! const markWord mark = cur->header(); ResourceMark rm; out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d %7d " ! INTPTR_FORMAT " %s", p2i(jt), p2i(cur), cur->is_busy() != 0, ! mark.hash() != 0, cur->owner() != NULL, (int)cur->ref_count(), p2i(obj), obj->klass()->external_name()); ! if (cur->is_busy() != 0) { ! out->print(" (%s)", cur->is_busy_to_string(&ss)); ss.reset(); } out->cr(); + + mark_next_for_traversal(&cur, &next); + if (cur == NULL) { + break; + } + } } } out->flush(); }
*** 3227,3258 **** int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) { int pop_count = 0; out->print_cr("%18s %10s %10s %10s %10s", "Global Lists:", "InUse", "Free", "Wait", "Total"); out->print_cr("================== ========== ========== ========== =========="); ! out->print_cr("%18s %10d %10d %10d %10d", "", ! OrderAccess::load_acquire(&g_om_in_use_count), ! OrderAccess::load_acquire(&g_om_free_count), ! OrderAccess::load_acquire(&g_om_wait_count), ! OrderAccess::load_acquire(&g_om_population)); ! pop_count += OrderAccess::load_acquire(&g_om_in_use_count) + ! OrderAccess::load_acquire(&g_om_free_count); if (HandshakeAfterDeflateIdleMonitors) { ! pop_count += OrderAccess::load_acquire(&g_om_wait_count); } out->print_cr("%18s %10s %10s %10s", "Per-Thread Lists:", "InUse", "Free", "Provision"); out->print_cr("================== ========== ========== =========="); for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt), ! OrderAccess::load_acquire(&jt->om_in_use_count), ! OrderAccess::load_acquire(&jt->om_free_count), ! jt->om_free_provision); ! pop_count += OrderAccess::load_acquire(&jt->om_in_use_count) + ! OrderAccess::load_acquire(&jt->om_free_count); } return pop_count; } #ifndef PRODUCT --- 3325,3349 ---- int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) { int pop_count = 0; out->print_cr("%18s %10s %10s %10s %10s", "Global Lists:", "InUse", "Free", "Wait", "Total"); out->print_cr("================== ========== ========== ========== =========="); ! out->print_cr("%18s %10d %10d %10d %10d", "", g_om_in_use_count, ! g_om_free_count, g_om_wait_count, g_om_population); ! pop_count += g_om_in_use_count + g_om_free_count; if (HandshakeAfterDeflateIdleMonitors) { ! pop_count += g_om_wait_count; } out->print_cr("%18s %10s %10s %10s", "Per-Thread Lists:", "InUse", "Free", "Provision"); out->print_cr("================== ========== ========== =========="); for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt), ! jt->om_in_use_count, jt->om_free_count, jt->om_free_provision); ! pop_count += jt->om_in_use_count + jt->om_free_count; } return pop_count; } #ifndef PRODUCT
*** 3260,3281 **** // Check if monitor belongs to the monitor cache // The list is grow-only so it's *relatively* safe to traverse // the list of extant blocks without taking a lock. int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { ! PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list); while (block != NULL) { assert(block->object() == CHAINMARKER, "must be a block header"); if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { address mon = (address)monitor; address blk = (address)block; size_t diff = mon - blk; assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned"); return 1; } ! // unmarked_next() is not needed with g_block_list (no next field marking). ! block = (PaddedObjectMonitor*)OrderAccess::load_acquire(&block->_next_om); } return 0; } #endif --- 3351,3374 ---- // Check if monitor belongs to the monitor cache // The list is grow-only so it's *relatively* safe to traverse // the list of extant blocks without taking a lock. int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { ! PaddedObjectMonitor* block = g_block_list; while (block != NULL) { assert(block->object() == CHAINMARKER, "must be a block header"); if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { address mon = (address)monitor; address blk = (address)block; size_t diff = mon - blk; assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned"); return 1; } ! // unmarked_next() is not needed with g_block_list (no next field ! // marking) and no load_acquire() needed because _next_om is ! // updated before g_block_list is changed with cmpxchg(). ! block = (PaddedObjectMonitor*)block->_next_om; } return 0; } #endif
< prev index next >