< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page
rev 56044 : imported patch 8230184.patch
rev 56046 : v2.00 -> v2.05 (CR5/v2.05/8-for-jdk13) patches combined into one; merge with 8229212.patch; merge with jdk-14+11; merge with 8230184.patch.

*** 123,132 **** --- 123,135 ---- ObjectMonitor* volatile ObjectSynchronizer::g_free_list = NULL; // Global ObjectMonitor in-use list. When a JavaThread is exiting, // ObjectMonitors on its per-thread in-use list are prepended here. ObjectMonitor* volatile ObjectSynchronizer::g_om_in_use_list = NULL; int ObjectSynchronizer::g_om_in_use_count = 0; // # on g_om_in_use_list + bool volatile ObjectSynchronizer::_is_async_deflation_requested = false; + bool volatile ObjectSynchronizer::_is_special_deflation_requested = false; + jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0; static volatile intptr_t gListLock = 0; // protects global monitor lists static volatile int g_om_free_count = 0; // # on g_free_list static volatile int g_om_population = 0; // # Extant -- in circulation
*** 209,222 **** assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); assert(self->is_Java_thread(), "invariant"); assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); NoSafepointVerifier nsv; if (obj == NULL) return false; // Need to throw NPE const markWord mark = obj->mark(); if (mark.has_monitor()) { ! ObjectMonitor* const m = mark.monitor(); assert(oopDesc::equals((oop) m->object(), obj), "invariant"); Thread* const owner = (Thread *) m->_owner; // Lock contention and Transactional Lock Elision (TLE) diagnostics // and observability --- 212,233 ---- assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); assert(self->is_Java_thread(), "invariant"); assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); NoSafepointVerifier nsv; if (obj == NULL) return false; // Need to throw NPE + + while (true) { const markWord mark = obj->mark(); if (mark.has_monitor()) { ! ObjectMonitorHandle omh; ! if (!omh.save_om_ptr(obj, mark)) { ! // Lost a race with async deflation so try again. ! assert(AsyncDeflateIdleMonitors, "sanity check"); ! continue; ! } ! ObjectMonitor* const m = omh.om_ptr(); assert(oopDesc::equals((oop) m->object(), obj), "invariant"); Thread* const owner = (Thread *) m->_owner; // Lock contention and Transactional Lock Elision (TLE) diagnostics // and observability
*** 242,251 **** --- 253,274 ---- if (owner == NULL && Atomic::replace_if_null(self, &(m->_owner))) { assert(m->_recursions == 0, "invariant"); return true; } + + if (AsyncDeflateIdleMonitors && + Atomic::cmpxchg(self, &m->_owner, DEFLATER_MARKER) == DEFLATER_MARKER) { + // The deflation protocol finished the first part (setting owner), + // but it failed the second part (making ref_count negative) and + // bailed. Or the ObjectMonitor was async deflated and reused. + // Acquired the monitor. + assert(m->_recursions == 0, "invariant"); + return true; + } + } + break; } // Note that we could inflate in quick_enter. // This is likely a useful optimization // Critically, in quick_enter() we must not:
*** 325,335 **** return; } } // We have to take the slow-path of possible inflation and then exit. ! inflate(THREAD, object, inflate_cause_vm_internal)->exit(true, THREAD); } // ----------------------------------------------------------------------------- // Interpreter/Compiler Slow Case // This routine is used to handle interpreter/compiler slow case --- 348,360 ---- return; } } // We have to take the slow-path of possible inflation and then exit. ! ObjectMonitorHandle omh; ! inflate(&omh, THREAD, object, inflate_cause_vm_internal); ! omh.om_ptr()->exit(true, THREAD); } // ----------------------------------------------------------------------------- // Interpreter/Compiler Slow Case // This routine is used to handle interpreter/compiler slow case
*** 358,368 **** // The object header will never be displaced to this lock, // so it does not matter what the value is, except that it // must be non-zero to avoid looking like a re-entrant lock, // and must not look locked either. lock->set_displaced_header(markWord::unused_mark()); ! inflate(THREAD, obj(), inflate_cause_monitor_enter)->enter(THREAD); } // This routine is used to handle interpreter/compiler slow case // We don't need to use fast path here, because it must have // failed in the interpreter/compiler code. Simply use the heavy --- 383,395 ---- // The object header will never be displaced to this lock, // so it does not matter what the value is, except that it // must be non-zero to avoid looking like a re-entrant lock, // and must not look locked either. lock->set_displaced_header(markWord::unused_mark()); ! ObjectMonitorHandle omh; ! inflate(&omh, THREAD, obj(), inflate_cause_monitor_enter); ! omh.om_ptr()->enter(THREAD); } // This routine is used to handle interpreter/compiler slow case // We don't need to use fast path here, because it must have // failed in the interpreter/compiler code. Simply use the heavy
*** 387,411 **** if (UseBiasedLocking) { BiasedLocking::revoke_and_rebias(obj, false, THREAD); assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); } ! ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal); ! ! return monitor->complete_exit(THREAD); } // NOTE: must use heavy weight monitor to handle complete_exit/reenter() void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { if (UseBiasedLocking) { BiasedLocking::revoke_and_rebias(obj, false, THREAD); assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); } ! ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal); ! ! monitor->reenter(recursion, THREAD); } // ----------------------------------------------------------------------------- // JNI locks on java objects // NOTE: must use heavy weight monitor to handle jni monitor enter void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { --- 414,439 ---- if (UseBiasedLocking) { BiasedLocking::revoke_and_rebias(obj, false, THREAD); assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); } ! ObjectMonitorHandle omh; ! inflate(&omh, THREAD, obj(), inflate_cause_vm_internal); ! intptr_t ret_code = omh.om_ptr()->complete_exit(THREAD); ! return ret_code; } // NOTE: must use heavy weight monitor to handle complete_exit/reenter() void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { if (UseBiasedLocking) { BiasedLocking::revoke_and_rebias(obj, false, THREAD); assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); } ! ObjectMonitorHandle omh; ! inflate(&omh, THREAD, obj(), inflate_cause_vm_internal); ! omh.om_ptr()->reenter(recursion, THREAD); } // ----------------------------------------------------------------------------- // JNI locks on java objects // NOTE: must use heavy weight monitor to handle jni monitor enter void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
*** 413,423 **** if (UseBiasedLocking) { BiasedLocking::revoke_and_rebias(obj, false, THREAD); assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); } THREAD->set_current_pending_monitor_is_from_java(false); ! inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD); THREAD->set_current_pending_monitor_is_from_java(true); } // NOTE: must use heavy weight monitor to handle jni monitor exit void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { --- 441,453 ---- if (UseBiasedLocking) { BiasedLocking::revoke_and_rebias(obj, false, THREAD); assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); } THREAD->set_current_pending_monitor_is_from_java(false); ! ObjectMonitorHandle omh; ! inflate(&omh, THREAD, obj(), inflate_cause_jni_enter); ! omh.om_ptr()->enter(THREAD); THREAD->set_current_pending_monitor_is_from_java(true); } // NOTE: must use heavy weight monitor to handle jni monitor exit void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
*** 426,436 **** BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); obj = h_obj(); } assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); ! ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit); // If this thread has locked the object, exit the monitor. We // intentionally do not use CHECK here because we must exit the // monitor even if an exception is pending. if (monitor->check_owner(THREAD)) { monitor->exit(true, THREAD); --- 456,468 ---- BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); obj = h_obj(); } assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); ! ObjectMonitorHandle omh; ! inflate(&omh, THREAD, obj, inflate_cause_jni_exit); ! ObjectMonitor* monitor = omh.om_ptr(); // If this thread has locked the object, exit the monitor. We // intentionally do not use CHECK here because we must exit the // monitor even if an exception is pending. if (monitor->check_owner(THREAD)) { monitor->exit(true, THREAD);
*** 467,497 **** assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); } if (millis < 0) { THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); } ! ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait); DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); monitor->wait(millis, true, THREAD); // This dummy call is in place to get around dtrace bug 6254741. Once // that's fixed we can uncomment the following line, remove the call // and change this function back into a "void" func. // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); ! return dtrace_waited_probe(monitor, obj, THREAD); } void ObjectSynchronizer::wait_uninterruptibly(Handle obj, jlong millis, TRAPS) { if (UseBiasedLocking) { BiasedLocking::revoke_and_rebias(obj, false, THREAD); assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); } if (millis < 0) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); } ! inflate(THREAD, obj(), inflate_cause_wait)->wait(millis, false, THREAD); } void ObjectSynchronizer::notify(Handle obj, TRAPS) { if (UseBiasedLocking) { BiasedLocking::revoke_and_rebias(obj, false, THREAD); --- 499,534 ---- assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); } if (millis < 0) { THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); } ! ObjectMonitorHandle omh; ! inflate(&omh, THREAD, obj(), inflate_cause_wait); ! ObjectMonitor* monitor = omh.om_ptr(); DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); monitor->wait(millis, true, THREAD); // This dummy call is in place to get around dtrace bug 6254741. Once // that's fixed we can uncomment the following line, remove the call // and change this function back into a "void" func. // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); ! int ret_code = dtrace_waited_probe(monitor, obj, THREAD); ! return ret_code; } void ObjectSynchronizer::wait_uninterruptibly(Handle obj, jlong millis, TRAPS) { if (UseBiasedLocking) { BiasedLocking::revoke_and_rebias(obj, false, THREAD); assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); } if (millis < 0) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); } ! ObjectMonitorHandle omh; ! inflate(&omh, THREAD, obj(), inflate_cause_wait); ! omh.om_ptr()->wait(millis, false, THREAD); } void ObjectSynchronizer::notify(Handle obj, TRAPS) { if (UseBiasedLocking) { BiasedLocking::revoke_and_rebias(obj, false, THREAD);
*** 500,510 **** markWord mark = obj->mark(); if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { return; } ! inflate(THREAD, obj(), inflate_cause_notify)->notify(THREAD); } // NOTE: see comment of notify() void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { if (UseBiasedLocking) { --- 537,549 ---- markWord mark = obj->mark(); if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { return; } ! ObjectMonitorHandle omh; ! inflate(&omh, THREAD, obj(), inflate_cause_notify); ! omh.om_ptr()->notify(THREAD); } // NOTE: see comment of notify() void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { if (UseBiasedLocking) {
*** 514,524 **** markWord mark = obj->mark(); if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { return; } ! inflate(THREAD, obj(), inflate_cause_notify)->notifyAll(THREAD); } // ----------------------------------------------------------------------------- // Hash Code handling // --- 553,565 ---- markWord mark = obj->mark(); if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { return; } ! ObjectMonitorHandle omh; ! inflate(&omh, THREAD, obj(), inflate_cause_notify); ! omh.om_ptr()->notifyAll(THREAD); } // ----------------------------------------------------------------------------- // Hash Code handling //
*** 708,717 **** --- 749,759 ---- assert(Universe::verify_in_progress() || DumpSharedSpaces || self->is_Java_thread() , "invariant"); assert(Universe::verify_in_progress() || DumpSharedSpaces || ((JavaThread *)self)->thread_state() != _thread_blocked, "invariant"); + while (true) { ObjectMonitor* monitor = NULL; markWord temp, test; intptr_t hash; markWord mark = read_stable_mark(obj);
*** 732,742 **** } // If atomic operation failed, we must inflate the header // into heavy weight monitor. We could add more code here // for fast path, but it does not worth the complexity. } else if (mark.has_monitor()) { ! monitor = mark.monitor(); temp = monitor->header(); assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); hash = temp.hash(); if (hash != 0) { return hash; --- 774,790 ---- } // If atomic operation failed, we must inflate the header // into heavy weight monitor. We could add more code here // for fast path, but it does not worth the complexity. } else if (mark.has_monitor()) { ! ObjectMonitorHandle omh; ! if (!omh.save_om_ptr(obj, mark)) { ! // Lost a race with async deflation so try again. ! assert(AsyncDeflateIdleMonitors, "sanity check"); ! continue; ! } ! monitor = omh.om_ptr(); temp = monitor->header(); assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); hash = temp.hash(); if (hash != 0) { return hash;
*** 758,768 **** // during an inflate() call so any change to that stack memory // may not propagate to other threads correctly. } // Inflate the monitor to set hash code ! monitor = inflate(self, obj, inflate_cause_hash_code); // Load displaced header and check it has hash code mark = monitor->header(); assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); hash = mark.hash(); if (hash == 0) { --- 806,818 ---- // during an inflate() call so any change to that stack memory // may not propagate to other threads correctly. } // Inflate the monitor to set hash code ! ObjectMonitorHandle omh; ! inflate(&omh, self, obj, inflate_cause_hash_code); ! monitor = omh.om_ptr(); // Load displaced header and check it has hash code mark = monitor->header(); assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); hash = mark.hash(); if (hash == 0) {
*** 774,790 **** --- 824,845 ---- if (test != mark) { // The only non-deflation update to the ObjectMonitor's // header/dmw field is to merge in the hash code. If someone // adds a new usage of the header/dmw field, please update // this code. + // ObjectMonitor::install_displaced_markword_in_object() + // does mark the header/dmw field as part of async deflation, + // but that protocol cannot happen now due to the + // ObjectMonitorHandle above. hash = test.hash(); assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value()); assert(hash != 0, "Trivial unexpected object/monitor header usage."); } } // We finally get the hash return hash; + } } // Deprecated -- use FastHashCode() instead. intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
*** 800,823 **** } assert(thread == JavaThread::current(), "Can only be called on current thread"); oop obj = h_obj(); markWord mark = read_stable_mark(obj); // Uncontended case, header points to stack if (mark.has_locker()) { return thread->is_lock_owned((address)mark.locker()); } // Contended case, header points to ObjectMonitor (tagged pointer) if (mark.has_monitor()) { ! ObjectMonitor* monitor = mark.monitor(); ! return monitor->is_entered(thread) != 0; } // Unlocked case, header in place assert(mark.is_neutral(), "sanity check"); return false; } // Be aware of this method could revoke bias of the lock object. // This method queries the ownership of the lock handle specified by 'h_obj'. // If the current thread owns the lock, it returns owner_self. If no --- 855,886 ---- } assert(thread == JavaThread::current(), "Can only be called on current thread"); oop obj = h_obj(); + while (true) { markWord mark = read_stable_mark(obj); // Uncontended case, header points to stack if (mark.has_locker()) { return thread->is_lock_owned((address)mark.locker()); } // Contended case, header points to ObjectMonitor (tagged pointer) if (mark.has_monitor()) { ! ObjectMonitorHandle omh; ! if (!omh.save_om_ptr(obj, mark)) { ! // Lost a race with async deflation so try again. ! assert(AsyncDeflateIdleMonitors, "sanity check"); ! continue; ! } ! bool ret_code = omh.om_ptr()->is_entered(thread) != 0; ! return ret_code; } // Unlocked case, header in place assert(mark.is_neutral(), "sanity check"); return false; + } } // Be aware of this method could revoke bias of the lock object. // This method queries the ownership of the lock handle specified by 'h_obj'. // If the current thread owns the lock, it returns owner_self. If no
*** 839,869 **** "biases should be revoked by now"); } assert(self == JavaThread::current(), "Can only be called on current thread"); oop obj = h_obj(); markWord mark = read_stable_mark(obj); // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. if (mark.has_locker()) { return self->is_lock_owned((address)mark.locker()) ? owner_self : owner_other; } // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor. // The Object:ObjectMonitor relationship is stable as long as we're ! // not at a safepoint. if (mark.has_monitor()) { ! void* owner = mark.monitor()->_owner; if (owner == NULL) return owner_none; return (owner == self || self->is_lock_owned((address)owner)) ? owner_self : owner_other; } // CASE: neutral assert(mark.is_neutral(), "sanity check"); return owner_none; // it's unlocked } // FIXME: jvmti should call this JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) { if (UseBiasedLocking) { --- 902,942 ---- "biases should be revoked by now"); } assert(self == JavaThread::current(), "Can only be called on current thread"); oop obj = h_obj(); + + while (true) { markWord mark = read_stable_mark(obj); // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. if (mark.has_locker()) { return self->is_lock_owned((address)mark.locker()) ? owner_self : owner_other; } // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor. // The Object:ObjectMonitor relationship is stable as long as we're ! // not at a safepoint and AsyncDeflateIdleMonitors is false. if (mark.has_monitor()) { ! ObjectMonitorHandle omh; ! if (!omh.save_om_ptr(obj, mark)) { ! // Lost a race with async deflation so try again. ! assert(AsyncDeflateIdleMonitors, "sanity check"); ! continue; ! } ! ObjectMonitor* monitor = omh.om_ptr(); ! void* owner = monitor->_owner; if (owner == NULL) return owner_none; return (owner == self || self->is_lock_owned((address)owner)) ? owner_self : owner_other; } // CASE: neutral assert(mark.is_neutral(), "sanity check"); return owner_none; // it's unlocked + } } // FIXME: jvmti should call this JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) { if (UseBiasedLocking) {
*** 874,895 **** } assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now"); } oop obj = h_obj(); - address owner = NULL; markWord mark = read_stable_mark(obj); // Uncontended case, header points to stack if (mark.has_locker()) { owner = (address) mark.locker(); } // Contended case, header points to ObjectMonitor (tagged pointer) else if (mark.has_monitor()) { ! ObjectMonitor* monitor = mark.monitor(); assert(monitor != NULL, "monitor should be non-null"); owner = (address) monitor->owner(); } if (owner != NULL) { --- 947,975 ---- } assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now"); } oop obj = h_obj(); + while (true) { + address owner = NULL; markWord mark = read_stable_mark(obj); // Uncontended case, header points to stack if (mark.has_locker()) { owner = (address) mark.locker(); } // Contended case, header points to ObjectMonitor (tagged pointer) else if (mark.has_monitor()) { ! ObjectMonitorHandle omh; ! if (!omh.save_om_ptr(obj, mark)) { ! // Lost a race with async deflation so try again. ! assert(AsyncDeflateIdleMonitors, "sanity check"); ! continue; ! } ! ObjectMonitor* monitor = omh.om_ptr(); assert(monitor != NULL, "monitor should be non-null"); owner = (address) monitor->owner(); } if (owner != NULL) {
*** 901,923 **** // Cannot have assertion since this object may have been // locked by another thread when reaching here. // assert(mark.is_neutral(), "sanity check"); return NULL; } // Visitors ... void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list); while (block != NULL) { assert(block->object() == CHAINMARKER, "must be a block header"); for (int i = _BLOCKSIZE - 1; i > 0; i--) { ObjectMonitor* mid = (ObjectMonitor *)(block + i); ! oop object = (oop)mid->object(); ! if (object != NULL) { // Only process with closure if the object is set. closure->do_monitor(mid); } } block = (PaddedObjectMonitor*)block->_next_om; } --- 981,1013 ---- // Cannot have assertion since this object may have been // locked by another thread when reaching here. // assert(mark.is_neutral(), "sanity check"); return NULL; + } } // Visitors ... void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list); while (block != NULL) { assert(block->object() == CHAINMARKER, "must be a block header"); for (int i = _BLOCKSIZE - 1; i > 0; i--) { ObjectMonitor* mid = (ObjectMonitor *)(block + i); ! if (mid->is_active()) { ! ObjectMonitorHandle omh(mid); ! ! if (mid->object() == NULL || ! (AsyncDeflateIdleMonitors && mid->_owner == DEFLATER_MARKER)) { // Only process with closure if the object is set. + // For async deflation, race here if monitor is not owned! + // The above ref_count bump (in ObjectMonitorHandle ctr) + // will cause subsequent async deflation to skip it. + // However, previous or concurrent async deflation is a race. + continue; + } closure->do_monitor(mid); } } block = (PaddedObjectMonitor*)block->_next_om; }
*** 925,946 **** static bool monitors_used_above_threshold() { if (g_om_population == 0) { return false; } int monitors_used = g_om_population - g_om_free_count; int monitor_usage = (monitors_used * 100LL) / g_om_population; return monitor_usage > MonitorUsedDeflationThreshold; } ! bool ObjectSynchronizer::is_cleanup_needed() { ! if (MonitorUsedDeflationThreshold > 0) { ! return monitors_used_above_threshold(); } return false; } void ObjectSynchronizer::oops_do(OopClosure* f) { // We only scan the global used list here (for moribund threads), and // the thread-local monitors in Thread::oops_do(). global_used_oops_do(f); } --- 1015,1084 ---- static bool monitors_used_above_threshold() { if (g_om_population == 0) { return false; } + if (MonitorUsedDeflationThreshold > 0) { int monitors_used = g_om_population - g_om_free_count; int monitor_usage = (monitors_used * 100LL) / g_om_population; return monitor_usage > MonitorUsedDeflationThreshold; + } + return false; } ! // Returns true if MonitorBound is set (> 0) and if the specified ! // cnt is > MonitorBound. Otherwise returns false. ! static bool is_MonitorBound_exceeded(const int cnt) { ! const int mx = MonitorBound; ! return mx > 0 && cnt > mx; ! } ! ! bool ObjectSynchronizer::is_async_deflation_needed() { ! if (!AsyncDeflateIdleMonitors) { ! return false; ! } ! if (is_async_deflation_requested()) { ! // Async deflation request. ! return true; ! } ! if (AsyncDeflationInterval > 0 && ! time_since_last_async_deflation_ms() > AsyncDeflationInterval && ! monitors_used_above_threshold()) { ! // It's been longer than our specified deflate interval and there ! // are too many monitors in use. We don't deflate more frequently ! // than AsyncDeflationInterval (unless is_async_deflation_requested) ! // in order to not swamp the ServiceThread. ! _last_async_deflation_time_ns = os::javaTimeNanos(); ! return true; ! } ! if (is_MonitorBound_exceeded(g_om_population - g_om_free_count)) { ! // Not enough ObjectMonitors on the global free list. ! return true; } return false; } + bool ObjectSynchronizer::is_safepoint_deflation_needed() { + if (!AsyncDeflateIdleMonitors) { + if (monitors_used_above_threshold()) { + // Too many monitors in use. + return true; + } + return false; + } + if (is_special_deflation_requested()) { + // For AsyncDeflateIdleMonitors only do a safepoint deflation + // if there is a special deflation request. + return true; + } + return false; + } + + jlong ObjectSynchronizer::time_since_last_async_deflation_ms() { + return (os::javaTimeNanos() - _last_async_deflation_time_ns) / (NANOUNITS / MILLIUNITS); + } + void ObjectSynchronizer::oops_do(OopClosure* f) { // We only scan the global used list here (for moribund threads), and // the thread-local monitors in Thread::oops_do(). global_used_oops_do(f); }
*** 988,997 **** --- 1126,1136 ---- // Constraining monitor pool growth via MonitorBound ... // // If MonitorBound is not set (<= 0), MonitorBound checks are disabled. // + // When safepoint deflation is being used (!AsyncDeflateIdleMonitors): // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the // the rate of scavenging is driven primarily by GC. As such, we can find // an inordinate number of monitors in circulation. // To avoid that scenario we can artificially induce a STW safepoint // if the pool appears to be growing past some reasonable bound.
*** 1002,1018 **** // we'll incur more safepoints, which are harmful to performance. // See also: GuaranteedSafepointInterval // // The current implementation uses asynchronous VM operations. // ! // If MonitorBound is set, the boundry applies to // (g_om_population - g_om_free_count) // i.e., if there are not enough ObjectMonitors on the global free list, // then a safepoint deflation is induced. Picking a good MonitorBound value // is non-trivial. static void InduceScavenge(Thread* self, const char * Whence) { // Induce STW safepoint to trim monitors // Ultimately, this results in a call to deflate_idle_monitors() in the near future. // More precisely, trigger an asynchronous STW safepoint as the number // of active monitors passes the specified threshold. // TODO: assert thread state is reasonable --- 1141,1170 ---- // we'll incur more safepoints, which are harmful to performance. // See also: GuaranteedSafepointInterval // // The current implementation uses asynchronous VM operations. // ! // When safepoint deflation is being used and MonitorBound is set, the ! // boundry applies to // (g_om_population - g_om_free_count) // i.e., if there are not enough ObjectMonitors on the global free list, // then a safepoint deflation is induced. Picking a good MonitorBound value // is non-trivial. + // + // When async deflation is being used: + // The monitor pool is still grow-only. Async deflation is requested + // by a safepoint's cleanup phase or by the ServiceThread at periodic + // intervals when is_async_deflation_needed() returns true. In + // addition to other policies that are checked, if there are not + // enough ObjectMonitors on the global free list, then + // is_async_deflation_needed() will return true. The ServiceThread + // calls deflate_global_idle_monitors_using_JT() and also sets the + // per-thread om_request_deflation flag as needed. static void InduceScavenge(Thread* self, const char * Whence) { + assert(!AsyncDeflateIdleMonitors, "is not used by async deflation"); + // Induce STW safepoint to trim monitors // Ultimately, this results in a call to deflate_idle_monitors() in the near future. // More precisely, trigger an asynchronous STW safepoint as the number // of active monitors passes the specified threshold. // TODO: assert thread state is reasonable
*** 1024,1040 **** // The VMThread will delete the op when completed. VMThread::execute(new VM_ScavengeMonitors()); } } ! ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) { // A large MAXPRIVATE value reduces both list lock contention // and list coherency traffic, but also tends to increase the // number of ObjectMonitors in circulation as well as the STW // scavenge costs. As usual, we lean toward time in space-time // tradeoffs. const int MAXPRIVATE = 1024; stringStream ss; for (;;) { ObjectMonitor* m; // 1: try to allocate from the thread's local om_free_list. --- 1176,1210 ---- // The VMThread will delete the op when completed. VMThread::execute(new VM_ScavengeMonitors()); } } ! ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self, ! const InflateCause cause) { // A large MAXPRIVATE value reduces both list lock contention // and list coherency traffic, but also tends to increase the // number of ObjectMonitors in circulation as well as the STW // scavenge costs. As usual, we lean toward time in space-time // tradeoffs. const int MAXPRIVATE = 1024; + + if (AsyncDeflateIdleMonitors) { + JavaThread* jt = (JavaThread *)self; + if (jt->om_request_deflation && jt->om_in_use_count > 0 && + cause != inflate_cause_vm_internal) { + // Deflate any per-thread idle monitors for this JavaThread if + // this is not an internal inflation; internal inflations can + // occur in places where it is not safe to pause for a safepoint. + // Clean up your own mess (Gibbs Rule 45). Otherwise, skip this + // deflation. deflate_global_idle_monitors_using_JT() is called + // by the ServiceThread. Per-thread async deflation is triggered + // by the ServiceThread via om_request_deflation. + debug_only(jt->check_for_valid_safepoint_state(false);) + ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(); + } + } + stringStream ss; for (;;) { ObjectMonitor* m; // 1: try to allocate from the thread's local om_free_list.
*** 1046,1055 **** --- 1216,1226 ---- m = self->om_free_list; if (m != NULL) { self->om_free_list = m->_next_om; self->om_free_count--; guarantee(m->object() == NULL, "invariant"); + m->set_allocation_state(ObjectMonitor::New); m->_next_om = self->om_in_use_list; self->om_in_use_list = m; self->om_in_use_count++; return m; }
*** 1067,1085 **** for (int i = self->om_free_provision; --i >= 0 && g_free_list != NULL;) { g_om_free_count--; ObjectMonitor* take = g_free_list; g_free_list = take->_next_om; guarantee(take->object() == NULL, "invariant"); take->Recycle(); om_release(self, take, false); } Thread::muxRelease(&gListLock); self->om_free_provision += 1 + (self->om_free_provision/2); if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE; ! const int mx = MonitorBound; ! if (mx > 0 && (g_om_population-g_om_free_count) > mx) { // Not enough ObjectMonitors on the global free list. // We can't safely induce a STW safepoint from om_alloc() as our thread // state may not be appropriate for such activities and callers may hold // naked oops, so instead we defer the action. InduceScavenge(self, "om_alloc"); --- 1238,1273 ---- for (int i = self->om_free_provision; --i >= 0 && g_free_list != NULL;) { g_om_free_count--; ObjectMonitor* take = g_free_list; g_free_list = take->_next_om; guarantee(take->object() == NULL, "invariant"); + if (AsyncDeflateIdleMonitors) { + // We allowed 3 field values to linger during async deflation. + // We clear header and restore ref_count here, but we leave + // owner == DEFLATER_MARKER so the simple C2 ObjectMonitor + // enter optimization can no longer race with async deflation + // and reuse. + take->set_header(markWord::zero()); + if (take->ref_count() < 0) { + // Add back max_jint to restore the ref_count field to its + // proper value. + Atomic::add(max_jint, &take->_ref_count); + + assert(take->ref_count() >= 0, "must not be negative: ref_count=%d", + take->ref_count()); + } + } take->Recycle(); + assert(take->is_free(), "invariant"); om_release(self, take, false); } Thread::muxRelease(&gListLock); self->om_free_provision += 1 + (self->om_free_provision/2); if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE; ! if (!AsyncDeflateIdleMonitors && ! is_MonitorBound_exceeded(g_om_population - g_om_free_count)) { // Not enough ObjectMonitors on the global free list. // We can't safely induce a STW safepoint from om_alloc() as our thread // state may not be appropriate for such activities and callers may hold // naked oops, so instead we defer the action. InduceScavenge(self, "om_alloc");
*** 1120,1129 **** --- 1308,1318 ---- // linkage should be reconsidered. A better implementation would // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } for (int i = 1; i < _BLOCKSIZE; i++) { temp[i]._next_om = (ObjectMonitor *)&temp[i+1]; + assert(temp[i].is_free(), "invariant"); } // terminate the last monitor as the end of list temp[_BLOCKSIZE - 1]._next_om = NULL;
*** 1162,1182 **** // a CAS attempt failed. This doesn't allow unbounded #s of monitors to // accumulate on a thread's free list. // // Key constraint: all ObjectMonitors on a thread's free list and the global // free list must have their object field set to null. This prevents the ! // scavenger -- deflate_monitor_list() -- from reclaiming them while we ! // are trying to release them. void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m, bool from_per_thread_alloc) { guarantee(m->header().value() == 0, "invariant"); guarantee(m->object() == NULL, "invariant"); stringStream ss; guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: " "%s, recursions=" INTPTR_FORMAT, m->is_busy_to_string(&ss), m->_recursions); // _next_om is used for both per-thread in-use and free lists so // we have to remove 'm' from the in-use list first (as needed). if (from_per_thread_alloc) { // Need to remove 'm' from om_in_use_list. ObjectMonitor* cur_mid_in_use = NULL; --- 1351,1372 ---- // a CAS attempt failed. This doesn't allow unbounded #s of monitors to // accumulate on a thread's free list. // // Key constraint: all ObjectMonitors on a thread's free list and the global // free list must have their object field set to null. This prevents the ! // scavenger -- deflate_monitor_list() or deflate_monitor_list_using_JT() ! // -- from reclaiming them while we are trying to release them. void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m, bool from_per_thread_alloc) { guarantee(m->header().value() == 0, "invariant"); guarantee(m->object() == NULL, "invariant"); stringStream ss; guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: " "%s, recursions=" INTPTR_FORMAT, m->is_busy_to_string(&ss), m->_recursions); + m->set_allocation_state(ObjectMonitor::Free); // _next_om is used for both per-thread in-use and free lists so // we have to remove 'm' from the in-use list first (as needed). if (from_per_thread_alloc) { // Need to remove 'm' from om_in_use_list. ObjectMonitor* cur_mid_in_use = NULL;
*** 1196,1205 **** --- 1386,1396 ---- } assert(extracted, "Should have extracted from in-use list"); } m->_next_om = self->om_free_list; + guarantee(m->is_free(), "invariant"); self->om_free_list = m; self->om_free_count++; } // Return ObjectMonitors on a moribund thread's free and in-use
*** 1212,1221 **** --- 1403,1416 ---- // a safepoint and interleave with deflate_idle_monitors(). In // particular, this ensures that the thread's in-use monitors are // scanned by a GC safepoint, either via Thread::oops_do() (before // om_flush() is called) or via ObjectSynchronizer::oops_do() (after // om_flush() is called). + // + // With AsyncDeflateIdleMonitors, deflate_global_idle_monitors_using_JT() + // and deflate_per_thread_idle_monitors_using_JT() (in another thread) can + // run at the same time as om_flush() so we have to be careful. void ObjectSynchronizer::om_flush(Thread* self) { ObjectMonitor* free_list = self->om_free_list; ObjectMonitor* free_tail = NULL; int free_count = 0;
*** 1229,1239 **** free_tail = s; guarantee(s->object() == NULL, "invariant"); guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss)); } guarantee(free_tail != NULL, "invariant"); ! assert(self->om_free_count == free_count, "free-count off"); self->om_free_list = NULL; self->om_free_count = 0; } ObjectMonitor* in_use_list = self->om_in_use_list; --- 1424,1434 ---- free_tail = s; guarantee(s->object() == NULL, "invariant"); guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss)); } guarantee(free_tail != NULL, "invariant"); ! ADIM_guarantee(self->om_free_count == free_count, "free-count off"); self->om_free_list = NULL; self->om_free_count = 0; } ObjectMonitor* in_use_list = self->om_in_use_list;
*** 1246,1258 **** // in-use list g_om_in_use_list below, under the gListLock. ObjectMonitor *cur_om; for (cur_om = in_use_list; cur_om != NULL; cur_om = cur_om->_next_om) { in_use_tail = cur_om; in_use_count++; } guarantee(in_use_tail != NULL, "invariant"); ! assert(self->om_in_use_count == in_use_count, "in-use count off"); self->om_in_use_list = NULL; self->om_in_use_count = 0; } Thread::muxAcquire(&gListLock, "om_flush"); --- 1441,1454 ---- // in-use list g_om_in_use_list below, under the gListLock. ObjectMonitor *cur_om; for (cur_om = in_use_list; cur_om != NULL; cur_om = cur_om->_next_om) { in_use_tail = cur_om; in_use_count++; + ADIM_guarantee(cur_om->is_active(), "invariant"); } guarantee(in_use_tail != NULL, "invariant"); ! ADIM_guarantee(self->om_in_use_count == in_use_count, "in-use count off"); self->om_in_use_list = NULL; self->om_in_use_count = 0; } Thread::muxAcquire(&gListLock, "om_flush");
*** 1296,1318 **** event->set_cause((u1)cause); event->commit(); } // Fast path code shared by multiple functions ! void ObjectSynchronizer::inflate_helper(oop obj) { markWord mark = obj->mark(); if (mark.has_monitor()) { ! assert(ObjectSynchronizer::verify_objmon_isinpool(mark.monitor()), "monitor is invalid"); ! assert(mark.monitor()->header().is_neutral(), "monitor must record a good object header"); return; } - inflate(Thread::current(), obj, inflate_cause_vm_internal); } ! ObjectMonitor* ObjectSynchronizer::inflate(Thread* self, ! oop object, ! const InflateCause cause) { // Inflate mutates the heap ... // Relaxing assertion for bug 6320749. assert(Universe::verify_in_progress() || !SafepointSynchronize::is_at_safepoint(), "invariant"); --- 1492,1523 ---- event->set_cause((u1)cause); event->commit(); } // Fast path code shared by multiple functions ! void ObjectSynchronizer::inflate_helper(ObjectMonitorHandle* omh_p, oop obj) { ! while (true) { markWord mark = obj->mark(); if (mark.has_monitor()) { ! if (!omh_p->save_om_ptr(obj, mark)) { ! // Lost a race with async deflation so try again. ! assert(AsyncDeflateIdleMonitors, "sanity check"); ! continue; ! } ! ObjectMonitor* monitor = omh_p->om_ptr(); ! assert(ObjectSynchronizer::verify_objmon_isinpool(monitor), "monitor is invalid"); ! markWord dmw = monitor->header(); ! assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value()); ! return; ! } ! inflate(omh_p, Thread::current(), obj, inflate_cause_vm_internal); return; } } ! void ObjectSynchronizer::inflate(ObjectMonitorHandle* omh_p, Thread* self, ! oop object, const InflateCause cause) { // Inflate mutates the heap ... // Relaxing assertion for bug 6320749. assert(Universe::verify_in_progress() || !SafepointSynchronize::is_at_safepoint(), "invariant");
*** 1329,1344 **** // * Neutral - aggressively inflate the object. // * BIASED - Illegal. We should never see this // CASE: inflated if (mark.has_monitor()) { ! ObjectMonitor* inf = mark.monitor(); markWord dmw = inf->header(); assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); assert(oopDesc::equals((oop) inf->object(), object), "invariant"); assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); ! return inf; } // CASE: inflation in progress - inflating over a stack-lock. // Some other thread is converting from stack-locked to inflated. // Only that thread can complete inflation -- other threads must wait. --- 1534,1554 ---- // * Neutral - aggressively inflate the object. // * BIASED - Illegal. We should never see this // CASE: inflated if (mark.has_monitor()) { ! if (!omh_p->save_om_ptr(object, mark)) { ! // Lost a race with async deflation so try again. ! assert(AsyncDeflateIdleMonitors, "sanity check"); ! continue; ! } ! ObjectMonitor* inf = omh_p->om_ptr(); markWord dmw = inf->header(); assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); assert(oopDesc::equals((oop) inf->object(), object), "invariant"); assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); ! return; } // CASE: inflation in progress - inflating over a stack-lock. // Some other thread is converting from stack-locked to inflated. // Only that thread can complete inflation -- other threads must wait.
*** 1370,1380 **** // See the comments in om_alloc(). LogStreamHandle(Trace, monitorinflation) lsh; if (mark.has_locker()) { ! ObjectMonitor* m = om_alloc(self); // Optimistically prepare the objectmonitor - anticipate successful CAS // We do this before the CAS in order to minimize the length of time // in which INFLATING appears in the mark. m->Recycle(); m->_Responsible = NULL; --- 1580,1601 ---- // See the comments in om_alloc(). LogStreamHandle(Trace, monitorinflation) lsh; if (mark.has_locker()) { ! ObjectMonitor* m; ! if (!AsyncDeflateIdleMonitors || cause == inflate_cause_vm_internal) { ! // If !AsyncDeflateIdleMonitors or if an internal inflation, then ! // we won't stop for a potential safepoint in om_alloc. ! m = om_alloc(self, cause); ! } else { ! // If AsyncDeflateIdleMonitors and not an internal inflation, then ! // we may stop for a safepoint in om_alloc() so protect object. ! Handle h_obj(self, object); ! m = om_alloc(self, cause); ! object = h_obj(); // Refresh object. ! } // Optimistically prepare the objectmonitor - anticipate successful CAS // We do this before the CAS in order to minimize the length of time // in which INFLATING appears in the mark. m->Recycle(); m->_Responsible = NULL;
*** 1417,1427 **** // object is in the mark. Furthermore the owner can't complete // an unlock on the object, either. markWord dmw = mark.displaced_mark_helper(); // Catch if the object's header is not neutral (not locked and // not marked is what we care about here). ! assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); // Setup monitor fields to proper values -- prepare the monitor m->set_header(dmw); // Optimization: if the mark.locker stack address is associated --- 1638,1648 ---- // object is in the mark. Furthermore the owner can't complete // an unlock on the object, either. markWord dmw = mark.displaced_mark_helper(); // Catch if the object's header is not neutral (not locked and // not marked is what we care about here). ! ADIM_guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); // Setup monitor fields to proper values -- prepare the monitor m->set_header(dmw); // Optimization: if the mark.locker stack address is associated
*** 1431,1440 **** --- 1652,1665 ---- // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. m->set_owner(mark.locker()); m->set_object(object); // TODO-FIXME: assert BasicLock->dhw != 0. + omh_p->set_om_ptr(m); + assert(m->is_new(), "freshly allocated monitor must be new"); + m->set_allocation_state(ObjectMonitor::Old); + // Must preserve store ordering. The monitor state must // be stable at the time of publishing the monitor address. guarantee(object->mark() == markWord::INFLATING(), "invariant"); object->release_set_mark(markWord::encode(m));
*** 1448,1458 **** object->mark().value(), object->klass()->external_name()); } if (event.should_commit()) { post_monitor_inflate_event(&event, object, cause); } ! return m; } // CASE: neutral // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. // If we know we're inflating for entry it's better to inflate by swinging a --- 1673,1684 ---- object->mark().value(), object->klass()->external_name()); } if (event.should_commit()) { post_monitor_inflate_event(&event, object, cause); } ! ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free"); ! return; } // CASE: neutral // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. // If we know we're inflating for entry it's better to inflate by swinging a
*** 1463,1485 **** // An inflateTry() method that we could call from fast_enter() and slow_enter() // would be useful. // Catch if the object's header is not neutral (not locked and // not marked is what we care about here). ! assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); ! ObjectMonitor* m = om_alloc(self); // prepare m for installation - set monitor to initial state m->Recycle(); m->set_header(mark); m->set_object(object); m->_Responsible = NULL; m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class if (object->cas_set_mark(markWord::encode(m), mark) != mark) { m->set_header(markWord::zero()); m->set_object(NULL); m->Recycle(); om_release(self, m, true); m = NULL; continue; // interference - the markword changed - just retry. // The state-transitions are one-way, so there's no chance of --- 1689,1731 ---- // An inflateTry() method that we could call from fast_enter() and slow_enter() // would be useful. // Catch if the object's header is not neutral (not locked and // not marked is what we care about here). ! ADIM_guarantee(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); ! ObjectMonitor* m; ! if (!AsyncDeflateIdleMonitors || cause == inflate_cause_vm_internal) { ! // If !AsyncDeflateIdleMonitors or if an internal inflation, then ! // we won't stop for a potential safepoint in om_alloc. ! m = om_alloc(self, cause); ! } else { ! // If AsyncDeflateIdleMonitors and not an internal inflation, then ! // we may stop for a safepoint in om_alloc() so protect object. ! Handle h_obj(self, object); ! m = om_alloc(self, cause); ! object = h_obj(); // Refresh object. ! } // prepare m for installation - set monitor to initial state m->Recycle(); m->set_header(mark); + // If we leave _owner == DEFLATER_MARKER here, then the simple C2 + // ObjectMonitor enter optimization can no longer race with async + // deflation and reuse. m->set_object(object); m->_Responsible = NULL; m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class + omh_p->set_om_ptr(m); + assert(m->is_new(), "freshly allocated monitor must be new"); + m->set_allocation_state(ObjectMonitor::Old); + if (object->cas_set_mark(markWord::encode(m), mark) != mark) { m->set_header(markWord::zero()); m->set_object(NULL); m->Recycle(); + omh_p->set_om_ptr(NULL); + // om_release() will reset the allocation state om_release(self, m, true); m = NULL; continue; // interference - the markword changed - just retry. // The state-transitions are one-way, so there's no chance of
*** 1496,1512 **** object->mark().value(), object->klass()->external_name()); } if (event.should_commit()) { post_monitor_inflate_event(&event, object, cause); } ! return m; } } // We maintain a list of in-use monitors for each thread. // // deflate_thread_local_monitors() scans a single thread's in-use list, while // deflate_idle_monitors() scans only a global list of in-use monitors which // is populated only as a thread dies (see om_flush()). // // These operations are called at all safepoints, immediately after mutators --- 1742,1760 ---- object->mark().value(), object->klass()->external_name()); } if (event.should_commit()) { post_monitor_inflate_event(&event, object, cause); } ! ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free"); ! return; } } // We maintain a list of in-use monitors for each thread. // + // For safepoint based deflation: // deflate_thread_local_monitors() scans a single thread's in-use list, while // deflate_idle_monitors() scans only a global list of in-use monitors which // is populated only as a thread dies (see om_flush()). // // These operations are called at all safepoints, immediately after mutators
*** 1521,1530 **** --- 1769,1803 ---- // // Perversely, the heap size -- and thus the STW safepoint rate -- // typically drives the scavenge rate. Large heaps can mean infrequent GC, // which in turn can mean large(r) numbers of ObjectMonitors in circulation. // This is an unfortunate aspect of this design. + // + // For async deflation: + // If a special deflation request is made, then the safepoint based + // deflation mechanism is used. Otherwise, an async deflation request + // is registered with the ServiceThread and it is notified. + + void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* _counters) { + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + + // The per-thread in-use lists are handled in + // ParallelSPCleanupThreadClosure::do_thread(). + + if (!AsyncDeflateIdleMonitors || is_special_deflation_requested()) { + // Use the older mechanism for the global in-use list or if a + // special deflation has been requested before the safepoint. + ObjectSynchronizer::deflate_idle_monitors(_counters); + return; + } + + log_debug(monitorinflation)("requesting async deflation of idle monitors."); + // Request deflation of idle monitors by the ServiceThread: + set_is_async_deflation_requested(true); + MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); + ml.notify_all(); + } // Deflate a single monitor if not in-use // Return true if deflated, false if in-use bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, ObjectMonitor** free_head_p,
*** 1539,1549 **** guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid)); const markWord dmw = mid->header(); guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); ! if (mid->is_busy()) { deflated = false; } else { // Deflate the monitor if it is no longer being used // It's idle - scavenge and return to the global free list // plain old deflation ... --- 1812,1824 ---- guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid)); const markWord dmw = mid->header(); guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); ! if (mid->is_busy() || mid->ref_count() != 0) { ! // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor* ! // is in use so no deflation. deflated = false; } else { // Deflate the monitor if it is no longer being used // It's idle - scavenge and return to the global free list // plain old deflation ...
*** 1555,1568 **** --- 1830,1850 ---- mark.value(), obj->klass()->external_name()); } // Restore the header back to obj obj->release_set_mark(dmw); + if (AsyncDeflateIdleMonitors) { + // clear() expects the owner field to be NULL and we won't race + // with the simple C2 ObjectMonitor enter optimization since + // we're at a safepoint. + mid->set_owner(NULL); + } mid->clear(); assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT, p2i(mid->object())); + assert(mid->is_free(), "invariant"); // Move the deflated ObjectMonitor to the working free list // defined by free_head_p and free_tail_p. if (*free_head_p == NULL) *free_head_p = mid; if (*free_tail_p != NULL) {
*** 1581,1590 **** --- 1863,2012 ---- deflated = true; } return deflated; } + // Deflate the specified ObjectMonitor if not in-use using a JavaThread. + // Returns true if it was deflated and false otherwise. + // + // The async deflation protocol sets owner to DEFLATER_MARKER and + // makes ref_count negative as signals to contending threads that + // an async deflation is in progress. There are a number of checks + // as part of the protocol to make sure that the calling thread has + // not lost the race to a contending thread or to a thread that just + // wants to use the ObjectMonitor*. + // + // The ObjectMonitor has been successfully async deflated when: + // (owner == DEFLATER_MARKER && ref_count < 0) + // Contending threads or ObjectMonitor* using threads that see those + // values know to retry their operation. + // + bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid, + ObjectMonitor** free_head_p, + ObjectMonitor** free_tail_p) { + assert(AsyncDeflateIdleMonitors, "sanity check"); + assert(Thread::current()->is_Java_thread(), "precondition"); + // A newly allocated ObjectMonitor should not be seen here so we + // avoid an endless inflate/deflate cycle. + assert(mid->is_old(), "must be old: allocation_state=%d", + (int) mid->allocation_state()); + + if (mid->is_busy() || mid->ref_count() != 0) { + // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor* + // is in use so no deflation. + return false; + } + + if (Atomic::replace_if_null(DEFLATER_MARKER, &(mid->_owner))) { + // ObjectMonitor is not owned by another thread. Our setting + // owner to DEFLATER_MARKER forces any contending thread through + // the slow path. This is just the first part of the async + // deflation dance. + + if (mid->_contentions != 0 || mid->_waiters != 0) { + // Another thread has raced to enter the ObjectMonitor after + // mid->is_busy() above or has already entered and waited on + // it which makes it busy so no deflation. Restore owner to + // NULL if it is still DEFLATER_MARKER. + Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER); + return false; + } + + if (Atomic::cmpxchg(-max_jint, &mid->_ref_count, (jint)0) == 0) { + // Make ref_count negative to force any contending threads or + // ObjectMonitor* using threads to retry. This is the second + // part of the async deflation dance. + + if (mid->owner_is_DEFLATER_MARKER()) { + // If owner is still DEFLATER_MARKER, then we have successfully + // signaled any contending threads to retry. If it is not, then we + // have lost the race to an entering thread and the ObjectMonitor + // is now busy. This is the third and final part of the async + // deflation dance. + // Note: This owner check solves the ABA problem with ref_count + // where another thread acquired the ObjectMonitor, finished + // using it and restored the ref_count to zero. + + // Sanity checks for the races: + guarantee(mid->_contentions == 0, "must be 0: contentions=%d", + mid->_contentions); + guarantee(mid->_waiters == 0, "must be 0: waiters=%d", mid->_waiters); + guarantee(mid->_cxq == NULL, "must be no contending threads: cxq=" + INTPTR_FORMAT, p2i(mid->_cxq)); + guarantee(mid->_EntryList == NULL, + "must be no entering threads: EntryList=" INTPTR_FORMAT, + p2i(mid->_EntryList)); + + const oop obj = (oop) mid->object(); + if (log_is_enabled(Trace, monitorinflation)) { + ResourceMark rm; + log_trace(monitorinflation)("deflate_monitor_using_JT: " + "object=" INTPTR_FORMAT ", mark=" + INTPTR_FORMAT ", type='%s'", + p2i(obj), obj->mark().value(), + obj->klass()->external_name()); + } + + // Install the old mark word if nobody else has already done it. + mid->install_displaced_markword_in_object(obj); + mid->clear_using_JT(); + + assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT, + p2i(mid->object())); + assert(mid->is_free(), "must be free: allocation_state=%d", + (int) mid->allocation_state()); + + // Move the deflated ObjectMonitor to the working free list + // defined by free_head_p and free_tail_p. + if (*free_head_p == NULL) { + // First one on the list. + *free_head_p = mid; + } + if (*free_tail_p != NULL) { + // We append to the list so the caller can use mid->_next_om + // to fix the linkages in its context. + ObjectMonitor* prevtail = *free_tail_p; + // Should have been cleaned up by the caller: + assert(prevtail->_next_om == NULL, "must be NULL: _next_om=" + INTPTR_FORMAT, p2i(prevtail->_next_om)); + prevtail->_next_om = mid; + } + *free_tail_p = mid; + + // At this point, mid->_next_om still refers to its current + // value and another ObjectMonitor's _next_om field still + // refers to this ObjectMonitor. Those linkages have to be + // cleaned up by the caller who has the complete context. + + // We leave owner == DEFLATER_MARKER and ref_count < 0 + // to force any racing threads to retry. + return true; // Success, ObjectMonitor has been deflated. + } + + // The owner was changed from DEFLATER_MARKER so we lost the + // race since the ObjectMonitor is now busy. + + // Add back max_jint to restore the ref_count field to its + // proper value (which may not be what we saw above): + Atomic::add(max_jint, &mid->_ref_count); + + assert(mid->ref_count() >= 0, "must not be negative: ref_count=%d", + mid->ref_count()); + return false; + } + + // The ref_count was no longer 0 so we lost the race since the + // ObjectMonitor is now busy or the ObjectMonitor* is now is use. + // Restore owner to NULL if it is still DEFLATER_MARKER: + Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER); + } + + // The owner field is no longer NULL so we lost the race since the + // ObjectMonitor is now busy. + return false; + } + // Walk a given monitor list, and deflate idle monitors // The given list could be a per-thread list or a global list // Caller acquires gListLock as needed. // // In the case of parallel processing of thread local monitor lists,
*** 1625,1644 **** --- 2047,2151 ---- } } return deflated_count; } + // Walk a given ObjectMonitor list and deflate idle ObjectMonitors using + // a JavaThread. Returns the number of deflated ObjectMonitors. The given + // list could be a per-thread in-use list or the global in-use list. + // Caller acquires gListLock as appropriate. If a safepoint has started, + // then we save state via saved_mid_in_use_p and return to the caller to + // honor the safepoint. + // + int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p, + ObjectMonitor** free_head_p, + ObjectMonitor** free_tail_p, + ObjectMonitor** saved_mid_in_use_p) { + assert(AsyncDeflateIdleMonitors, "sanity check"); + assert(Thread::current()->is_Java_thread(), "precondition"); + + ObjectMonitor* mid; + ObjectMonitor* next; + ObjectMonitor* cur_mid_in_use = NULL; + int deflated_count = 0; + + if (*saved_mid_in_use_p == NULL) { + // No saved state so start at the beginning. + mid = *list_p; + } else { + // We're restarting after a safepoint so restore the necessary state + // before we resume. + cur_mid_in_use = *saved_mid_in_use_p; + mid = cur_mid_in_use->_next_om; + } + while (mid != NULL) { + // Only try to deflate if there is an associated Java object and if + // mid is old (is not newly allocated and is not newly freed). + if (mid->object() != NULL && mid->is_old() && + deflate_monitor_using_JT(mid, free_head_p, free_tail_p)) { + // Deflation succeeded so update the in-use list. + if (mid == *list_p) { + *list_p = mid->_next_om; + } else if (cur_mid_in_use != NULL) { + // Maintain the current in-use list. + cur_mid_in_use->_next_om = mid->_next_om; + } + next = mid->_next_om; + mid->_next_om = NULL; + // At this point mid is disconnected from the in-use list + // and is the current tail in the free_head_p list. + mid = next; + deflated_count++; + } else { + // mid is considered in-use if it does not have an associated + // Java object or mid is not old or deflation did not succeed. + // A mid->is_new() node can be seen here when it is freshly + // returned by om_alloc() (and skips the deflation code path). + // A mid->is_old() node can be seen here when deflation failed. + // A mid->is_free() node can be seen here when a fresh node from + // om_alloc() is released by om_release() due to losing the race + // in inflate(). + + cur_mid_in_use = mid; + mid = mid->_next_om; + + if (SafepointSynchronize::is_synchronizing() && + cur_mid_in_use != *list_p && cur_mid_in_use->is_old()) { + // If a safepoint has started and cur_mid_in_use is not the list + // head and is old, then it is safe to use as saved state. Return + // to the caller so gListLock can be dropped as appropriate + // before blocking. + *saved_mid_in_use_p = cur_mid_in_use; + return deflated_count; + } + } + } + // We finished the list without a safepoint starting so there's + // no need to save state. + *saved_mid_in_use_p = NULL; + return deflated_count; + } + void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) { counters->n_in_use = 0; // currently associated with objects counters->n_in_circulation = 0; // extant counters->n_scavenged = 0; // reclaimed (global and per-thread) counters->per_thread_scavenged = 0; // per-thread scavenge total counters->per_thread_times = 0.0; // per-thread scavenge times } void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + + if (AsyncDeflateIdleMonitors) { + // Nothing to do when global idle ObjectMonitors are deflated using + // a JavaThread unless a special deflation has been requested. + if (!is_special_deflation_requested()) { + return; + } + } + bool deflated = false; ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors ObjectMonitor* free_tail_p = NULL; elapsedTimer timer;
*** 1687,1705 **** --- 2194,2343 ---- if (ls != NULL) { ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); } } + // Deflate global idle ObjectMonitors using a JavaThread. + // + void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() { + assert(AsyncDeflateIdleMonitors, "sanity check"); + assert(Thread::current()->is_Java_thread(), "precondition"); + JavaThread* self = JavaThread::current(); + + deflate_common_idle_monitors_using_JT(true /* is_global */, self); + } + + // Deflate per-thread idle ObjectMonitors using a JavaThread. + // + void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT() { + assert(AsyncDeflateIdleMonitors, "sanity check"); + assert(Thread::current()->is_Java_thread(), "precondition"); + JavaThread* self = JavaThread::current(); + + self->om_request_deflation = false; + + deflate_common_idle_monitors_using_JT(false /* !is_global */, self); + } + + // Deflate global or per-thread idle ObjectMonitors using a JavaThread. + // + void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* self) { + int deflated_count = 0; + ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged ObjectMonitors + ObjectMonitor* free_tail_p = NULL; + ObjectMonitor* saved_mid_in_use_p = NULL; + elapsedTimer timer; + + if (log_is_enabled(Info, monitorinflation)) { + timer.start(); + } + + if (is_global) { + Thread::muxAcquire(&gListLock, "deflate_global_idle_monitors_using_JT(1)"); + OM_PERFDATA_OP(MonExtant, set_value(g_om_in_use_count)); + } else { + OM_PERFDATA_OP(MonExtant, inc(self->om_in_use_count)); + } + + do { + int local_deflated_count; + if (is_global) { + local_deflated_count = deflate_monitor_list_using_JT((ObjectMonitor **)&g_om_in_use_list, &free_head_p, &free_tail_p, &saved_mid_in_use_p); + g_om_in_use_count -= local_deflated_count; + } else { + local_deflated_count = deflate_monitor_list_using_JT(self->om_in_use_list_addr(), &free_head_p, &free_tail_p, &saved_mid_in_use_p); + self->om_in_use_count -= local_deflated_count; + } + deflated_count += local_deflated_count; + + if (free_head_p != NULL) { + // Move the scavenged ObjectMonitors to the global free list. + guarantee(free_tail_p != NULL && local_deflated_count > 0, "free_tail_p=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(free_tail_p), local_deflated_count); + assert(free_tail_p->_next_om == NULL, "invariant"); + + if (!is_global) { + Thread::muxAcquire(&gListLock, "deflate_per_thread_idle_monitors_using_JT(2)"); + } + // Constant-time list splice - prepend scavenged segment to g_free_list. + free_tail_p->_next_om = g_free_list; + g_free_list = free_head_p; + + g_om_free_count += local_deflated_count; + OM_PERFDATA_OP(Deflations, inc(local_deflated_count)); + if (!is_global) { + Thread::muxRelease(&gListLock); + } + } + + if (saved_mid_in_use_p != NULL) { + // deflate_monitor_list_using_JT() detected a safepoint starting. + if (is_global) { + Thread::muxRelease(&gListLock); + } + timer.stop(); + { + if (is_global) { + log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint."); + } else { + log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(self)); + } + assert(SafepointSynchronize::is_synchronizing(), "sanity check"); + ThreadBlockInVM blocker(self); + } + // Prepare for another loop after the safepoint. + free_head_p = NULL; + free_tail_p = NULL; + if (log_is_enabled(Info, monitorinflation)) { + timer.start(); + } + if (is_global) { + Thread::muxAcquire(&gListLock, "deflate_global_idle_monitors_using_JT(3)"); + } + } + } while (saved_mid_in_use_p != NULL); + if (is_global) { + Thread::muxRelease(&gListLock); + } + timer.stop(); + + LogStreamHandle(Debug, monitorinflation) lsh_debug; + LogStreamHandle(Info, monitorinflation) lsh_info; + LogStream* ls = NULL; + if (log_is_enabled(Debug, monitorinflation)) { + ls = &lsh_debug; + } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { + ls = &lsh_info; + } + if (ls != NULL) { + if (is_global) { + ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); + } else { + ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(self), timer.seconds(), deflated_count); + } + } + } + void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { // Report the cumulative time for deflating each thread's idle // monitors. Note: if the work is split among more than one // worker thread, then the reported time will likely be more // than a beginning to end measurement of the phase. + // Note: AsyncDeflateIdleMonitors only deflates per-thread idle + // monitors at a safepoint when a special deflation has been requested. log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged); + bool needs_special_deflation = is_special_deflation_requested(); + if (!AsyncDeflateIdleMonitors || needs_special_deflation) { + // AsyncDeflateIdleMonitors does not use these counters unless + // there is a special deflation request. + g_om_free_count += counters->n_scavenged; + OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged)); + OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation)); + } + if (log_is_enabled(Debug, monitorinflation)) { // exit_globals()'s call to audit_and_print_stats() is done // at the Info level. ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); } else if (log_is_enabled(Info, monitorinflation)) {
*** 1709,1729 **** g_om_in_use_count, g_om_free_count); Thread::muxRelease(&gListLock); } ForceMonitorScavenge = 0; // Reset - - OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged)); - OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation)); - GVars.stw_random = os::random(); GVars.stw_cycle++; } void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors ObjectMonitor* free_tail_p = NULL; elapsedTimer timer; if (log_is_enabled(Info, safepoint, cleanup) || --- 2347,2378 ---- g_om_in_use_count, g_om_free_count); Thread::muxRelease(&gListLock); } ForceMonitorScavenge = 0; // Reset GVars.stw_random = os::random(); GVars.stw_cycle++; + if (needs_special_deflation) { + set_is_special_deflation_requested(false); // special deflation is done + } } void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + if (AsyncDeflateIdleMonitors) { + if (!is_special_deflation_requested()) { + // Mark the JavaThread for idle monitor deflation if a special + // deflation has NOT been requested. + if (thread->om_in_use_count > 0) { + // This JavaThread is using monitors so mark it. + thread->om_request_deflation = true; + } + return; + } + } + ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors ObjectMonitor* free_tail_p = NULL; elapsedTimer timer; if (log_is_enabled(Info, safepoint, cleanup) ||
*** 1946,1962 **** if (jt != NULL) { out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT ": free per-thread monitor must have NULL _header " "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n), n->header().value()); ! } else { out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " "must have NULL _header field: _header=" INTPTR_FORMAT, p2i(n), n->header().value()); - } *error_cnt_p = *error_cnt_p + 1; } if (n->object() != NULL) { if (jt != NULL) { out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT ": free per-thread monitor must have NULL _object " "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n), --- 2595,2612 ---- if (jt != NULL) { out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT ": free per-thread monitor must have NULL _header " "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n), n->header().value()); ! *error_cnt_p = *error_cnt_p + 1; ! } else if (!AsyncDeflateIdleMonitors) { out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " "must have NULL _header field: _header=" INTPTR_FORMAT, p2i(n), n->header().value()); *error_cnt_p = *error_cnt_p + 1; } + } if (n->object() != NULL) { if (jt != NULL) { out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT ": free per-thread monitor must have NULL _object " "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n),
*** 2119,2138 **** stringStream ss; if (g_om_in_use_count > 0) { out->print_cr("In-use global monitor info:"); out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); ! out->print_cr("%18s %s %18s %18s", ! "monitor", "BHL", "object", "object type"); ! out->print_cr("================== === ================== =================="); for (ObjectMonitor* n = g_om_in_use_list; n != NULL; n = n->_next_om) { const oop obj = (oop) n->object(); const markWord mark = n->header(); ResourceMark rm; ! out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(n), ! n->is_busy() != 0, mark.hash() != 0, n->owner() != NULL, ! p2i(obj), obj->klass()->external_name()); if (n->is_busy() != 0) { out->print(" (%s)", n->is_busy_to_string(&ss)); ss.reset(); } out->cr(); --- 2769,2789 ---- stringStream ss; if (g_om_in_use_count > 0) { out->print_cr("In-use global monitor info:"); out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); ! out->print_cr("%18s %s %7s %18s %18s", ! "monitor", "BHL", "ref_cnt", "object", "object type"); ! out->print_cr("================== === ======= ================== =================="); for (ObjectMonitor* n = g_om_in_use_list; n != NULL; n = n->_next_om) { const oop obj = (oop) n->object(); const markWord mark = n->header(); ResourceMark rm; ! out->print(INTPTR_FORMAT " %d%d%d %7d " INTPTR_FORMAT " %s", ! p2i(n), n->is_busy() != 0, mark.hash() != 0, ! n->owner() != NULL, (int)n->ref_count(), p2i(obj), ! obj->klass()->external_name()); if (n->is_busy() != 0) { out->print(" (%s)", n->is_busy_to_string(&ss)); ss.reset(); } out->cr();
*** 2143,2164 **** Thread::muxRelease(&gListLock); } out->print_cr("In-use per-thread monitor info:"); out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); ! out->print_cr("%18s %18s %s %18s %18s", ! "jt", "monitor", "BHL", "object", "object type"); ! out->print_cr("================== ================== === ================== =================="); for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { for (ObjectMonitor* n = jt->om_in_use_list; n != NULL; n = n->_next_om) { const oop obj = (oop) n->object(); const markWord mark = n->header(); ResourceMark rm; ! out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT ! " %s", p2i(jt), p2i(n), n->is_busy() != 0, ! mark.hash() != 0, n->owner() != NULL, p2i(obj), ! obj->klass()->external_name()); if (n->is_busy() != 0) { out->print(" (%s)", n->is_busy_to_string(&ss)); ss.reset(); } out->cr(); --- 2794,2815 ---- Thread::muxRelease(&gListLock); } out->print_cr("In-use per-thread monitor info:"); out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); ! out->print_cr("%18s %18s %s %7s %18s %18s", ! "jt", "monitor", "BHL", "ref_cnt", "object", "object type"); ! out->print_cr("================== ================== === ======= ================== =================="); for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { for (ObjectMonitor* n = jt->om_in_use_list; n != NULL; n = n->_next_om) { const oop obj = (oop) n->object(); const markWord mark = n->header(); ResourceMark rm; ! out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d %7d " ! INTPTR_FORMAT " %s", p2i(jt), p2i(n), n->is_busy() != 0, ! mark.hash() != 0, n->owner() != NULL, (int)n->ref_count(), ! p2i(obj), obj->klass()->external_name()); if (n->is_busy() != 0) { out->print(" (%s)", n->is_busy_to_string(&ss)); ss.reset(); } out->cr();
< prev index next >