< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page
rev 57232 : v2.00 -> v2.08 (CR8/v2.08/11-for-jdk14) patches combined into one; merge with jdk-14+25 snapshot; merge with jdk-14+26 snapshot.
rev 57233 : See CR8-to-CR9-changes; merge with 8230876.patch (2019.11.15); merge with jdk-14+25 snapshot; fuzzy merge with jdk-14+26 snapshot.

@@ -35,15 +35,17 @@
 #include "oops/markWord.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/handles.inline.hpp"
+#include "runtime/handshake.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
 #include "runtime/objectMonitor.inline.hpp"
 #include "runtime/osThread.hpp"
+#include "runtime/safepointMechanism.inline.hpp"
 #include "runtime/safepointVerifiers.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/synchronizer.hpp"
 #include "runtime/thread.inline.hpp"

@@ -115,26 +117,299 @@
 
 #define NINFLATIONLOCKS 256
 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
 
 // global list of blocks of monitors
-PaddedObjectMonitor* volatile ObjectSynchronizer::g_block_list = NULL;
-// Global ObjectMonitor free list. Newly allocated and deflated
-// ObjectMonitors are prepended here.
-ObjectMonitor* volatile ObjectSynchronizer::g_free_list = NULL;
-// Global ObjectMonitor in-use list. When a JavaThread is exiting,
-// ObjectMonitors on its per-thread in-use list are prepended here.
-ObjectMonitor* volatile ObjectSynchronizer::g_om_in_use_list = NULL;
-int ObjectSynchronizer::g_om_in_use_count = 0;  // # on g_om_in_use_list
-
-static volatile intptr_t gListLock = 0;   // protects global monitor lists
-static volatile int g_om_free_count = 0;  // # on g_free_list
-static volatile int g_om_population = 0;  // # Extant -- in circulation
+PaddedObjectMonitor* ObjectSynchronizer::g_block_list = NULL;
+bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
+bool volatile ObjectSynchronizer::_is_special_deflation_requested = false;
+jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
+
+struct ListGlobals {
+  char         _pad_prefix[OM_CACHE_LINE_SIZE];
+  // These are highly shared list related variables.
+  // To avoid false-sharing they need to be the sole occupants of a cache line.
+
+  // Global ObjectMonitor free list. Newly allocated and deflated
+  // ObjectMonitors are prepended here.
+  ObjectMonitor* free_list;
+  DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
+
+  // Global ObjectMonitor in-use list. When a JavaThread is exiting,
+  // ObjectMonitors on its per-thread in-use list are prepended here.
+  ObjectMonitor* in_use_list;
+  DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
+
+  // Global ObjectMonitor wait list. If HandshakeAfterDeflateIdleMonitors
+  // is true, deflated ObjectMonitors wait on this list until after a
+  // handshake or a safepoint for platforms that don't support handshakes.
+  // After the handshake or safepoint, the deflated ObjectMonitors are
+  // prepended to free_list.
+  ObjectMonitor* wait_list;
+  DEFINE_PAD_MINUS_SIZE(3, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
+
+  int free_count;    // # on free_list
+  DEFINE_PAD_MINUS_SIZE(4, OM_CACHE_LINE_SIZE, sizeof(int));
+
+  int in_use_count;  // # on in_use_list
+  DEFINE_PAD_MINUS_SIZE(5, OM_CACHE_LINE_SIZE, sizeof(int));
+
+  int population;    // # Extant -- in circulation
+  DEFINE_PAD_MINUS_SIZE(6, OM_CACHE_LINE_SIZE, sizeof(int));
+
+  int wait_count;    // # on wait_list
+  DEFINE_PAD_MINUS_SIZE(7, OM_CACHE_LINE_SIZE, sizeof(int));
+};
+static ListGlobals LVars;
 
 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
 
 
+// =====================> Spinlock functions
+
+// ObjectMonitors are not lockable outside of this file. We use spinlocks
+// implemented using a bit in the _next_om field instead of the heavier
+// weight locking mechanisms for faster list management.
+
+#define OM_LOCK_BIT 0x1
+
+// Return true if the ObjectMonitor is locked.
+// Otherwise returns false.
+static bool is_locked(ObjectMonitor* om) {
+  return ((intptr_t)Atomic::load(&om->_next_om) & OM_LOCK_BIT) == OM_LOCK_BIT;
+}
+
+// Mark an ObjectMonitor* with OM_LOCK_BIT and return it.
+// Note: the om parameter may or may not have been marked originally.
+static ObjectMonitor* mark_om_ptr(ObjectMonitor* om) {
+  return (ObjectMonitor*)((intptr_t)om | OM_LOCK_BIT);
+}
+
+// Try to lock an ObjectMonitor. Returns true if locking was successful.
+// Otherwise returns false.
+static bool try_om_lock(ObjectMonitor* om) {
+  // Get current next field without any OM_LOCK_BIT value.
+  ObjectMonitor* next = (ObjectMonitor*)((intptr_t)Atomic::load(&om->_next_om) & ~OM_LOCK_BIT);
+  if (Atomic::cmpxchg(&om->_next_om, next, mark_om_ptr(next)) != next) {
+    return false;  // Cannot lock the ObjectMonitor.
+  }
+  return true;
+}
+
+// Lock an ObjectMonitor.
+static void om_lock(ObjectMonitor* om) {
+  while (true) {
+    if (try_om_lock(om)) {
+      return;
+    }
+  }
+}
+
+// Unlock an ObjectMonitor.
+static void om_unlock(ObjectMonitor* om) {
+  ObjectMonitor* next = Atomic::load(&om->_next_om);
+  guarantee(((intptr_t)next & OM_LOCK_BIT) == OM_LOCK_BIT, "next=" INTPTR_FORMAT
+            " must have OM_LOCK_BIT=%x set.", p2i(next), OM_LOCK_BIT);
+
+  next = (ObjectMonitor*)((intptr_t)next & ~OM_LOCK_BIT);  // Clear OM_LOCK_BIT.
+  Atomic::store(&om->_next_om, next);
+}
+
+// Get the list head after locking it. Returns the list head or NULL
+// if the list is empty.
+static ObjectMonitor* get_list_head_locked(ObjectMonitor** list_p) {
+  while (true) {
+    ObjectMonitor* mid = Atomic::load(list_p);
+    if (mid == NULL) {
+      return NULL;  // The list is empty.
+    }
+    if (try_om_lock(mid)) {
+      if (Atomic::load(list_p) != mid) {
+        // The list head changed so we have to retry.
+        om_unlock(mid);
+        continue;
+      }
+      return mid;
+    }
+  }
+}
+
+// Return the unmarked next field in an ObjectMonitor. Note: the next
+// field may or may not have been marked with OM_LOCK_BIT originally.
+static ObjectMonitor* unmarked_next(ObjectMonitor* om) {
+  return (ObjectMonitor*)((intptr_t)Atomic::load(&om->_next_om) & ~OM_LOCK_BIT);
+}
+
+#undef OM_LOCK_BIT
+
+
+// =====================> List Management functions
+
+// Set the next field in an ObjectMonitor to the specified value.
+static void set_next(ObjectMonitor* om, ObjectMonitor* value) {
+  Atomic::store(&om->_next_om, value);
+}
+
+// Prepend a list of ObjectMonitors to the specified *list_p. 'tail' is
+// the last ObjectMonitor in the list and there are 'count' on the list.
+// Also updates the specified *count_p.
+static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail,
+                                   int count, ObjectMonitor** list_p,
+                                   int* count_p) {
+  while (true) {
+    ObjectMonitor* cur = Atomic::load(list_p);
+    // Prepend list to *list_p.
+    if (!try_om_lock(tail)) {
+      continue;  // failed to lock tail so try it all again
+    }
+    set_next(tail, cur);  // tail now points to cur (and unlocks tail)
+    if (cur == NULL) {
+      // No potential race with takers or other prependers since
+      // *list_p is empty.
+      if (Atomic::cmpxchg(list_p, cur, list) == cur) {
+        // Successfully switched *list_p to the list value.
+        Atomic::add(count_p, count);
+        break;
+      }
+      // Implied else: try it all again
+    } else {
+      if (!try_om_lock(cur)) {
+        continue;  // failed to lock cur so try it all again
+      }
+      // We locked cur so try to switch *list_p to the list value.
+      if (Atomic::cmpxchg(list_p, cur, list) != cur) {
+        // The list head has changed so unlock cur and try again:
+        om_unlock(cur);
+        continue;
+      }
+      Atomic::add(count_p, count);
+      om_unlock(cur);
+      break;
+    }
+  }
+}
+
+// Prepend a newly allocated block of ObjectMonitors to g_block_list and
+// LVars.free_list. Also updates LVars.population and LVars.free_count.
+void ObjectSynchronizer::prepend_block_to_lists(PaddedObjectMonitor* new_blk) {
+  // First we handle g_block_list:
+  while (true) {
+    PaddedObjectMonitor* cur = Atomic::load(&g_block_list);
+    // Prepend new_blk to g_block_list. The first ObjectMonitor in
+    // a block is reserved for use as linkage to the next block.
+    new_blk[0]._next_om = cur;
+    if (Atomic::cmpxchg(&g_block_list, cur, new_blk) == cur) {
+      // Successfully switched g_block_list to the new_blk value.
+      Atomic::add(&LVars.population, _BLOCKSIZE - 1);
+      break;
+    }
+    // Implied else: try it all again
+  }
+
+  // Second we handle LVars.free_list:
+  prepend_list_to_common(new_blk + 1, &new_blk[_BLOCKSIZE - 1], _BLOCKSIZE - 1,
+                         &LVars.free_list, &LVars.free_count);
+}
+
+// Prepend a list of ObjectMonitors to LVars.free_list. 'tail' is the last
+// ObjectMonitor in the list and there are 'count' on the list. Also
+// updates LVars.free_count.
+static void prepend_list_to_global_free_list(ObjectMonitor* list,
+                                             ObjectMonitor* tail, int count) {
+  prepend_list_to_common(list, tail, count, &LVars.free_list, &LVars.free_count);
+}
+
+// Prepend a list of ObjectMonitors to LVars.wait_list. 'tail' is the last
+// ObjectMonitor in the list and there are 'count' on the list. Also
+// updates LVars.wait_count.
+static void prepend_list_to_global_wait_list(ObjectMonitor* list,
+                                             ObjectMonitor* tail, int count) {
+  assert(HandshakeAfterDeflateIdleMonitors, "sanity check");
+  prepend_list_to_common(list, tail, count, &LVars.wait_list, &LVars.wait_count);
+}
+
+// Prepend a list of ObjectMonitors to LVars.in_use_list. 'tail' is the last
+// ObjectMonitor in the list and there are 'count' on the list. Also
+// updates LVars.in_use_list.
+static void prepend_list_to_global_in_use_list(ObjectMonitor* list,
+                                               ObjectMonitor* tail, int count) {
+  prepend_list_to_common(list, tail, count, &LVars.in_use_list, &LVars.in_use_count);
+}
+
+// Prepend an ObjectMonitor to the specified list. Also updates
+// the specified counter.
+static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p,
+                              int* count_p) {
+  while (true) {
+    om_lock(m);  // Lock m so we can safely update its next field.
+    ObjectMonitor* cur = NULL;
+    // Lock the list head to guard against A-B-A race:
+    if ((cur = get_list_head_locked(list_p)) != NULL) {
+      // List head is now locked so we can safely switch it.
+      set_next(m, cur);  // m now points to cur (and unlocks m)
+      Atomic::store(list_p, m);  // Switch list head to unlocked m.
+      om_unlock(cur);
+      break;
+    }
+    // The list is empty so try to set the list head.
+    assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur));
+    set_next(m, cur);  // m now points to NULL (and unlocks m)
+    if (Atomic::cmpxchg(list_p, cur, m) == cur) {
+      // List head is now unlocked m.
+      break;
+    }
+    // Implied else: try it all again
+  }
+  Atomic::inc(count_p);
+}
+
+// Prepend an ObjectMonitor to a per-thread om_free_list.
+// Also updates the per-thread om_free_count.
+static void prepend_to_om_free_list(Thread* self, ObjectMonitor* m) {
+  prepend_to_common(m, &self->om_free_list, &self->om_free_count);
+}
+
+// Prepend an ObjectMonitor to a per-thread om_in_use_list.
+// Also updates the per-thread om_in_use_count.
+static void prepend_to_om_in_use_list(Thread* self, ObjectMonitor* m) {
+  prepend_to_common(m, &self->om_in_use_list, &self->om_in_use_count);
+}
+
+// Take an ObjectMonitor from the start of the specified list. Also
+// decrements the specified counter. Returns NULL if none are available.
+static ObjectMonitor* take_from_start_of_common(ObjectMonitor** list_p,
+                                                int* count_p) {
+  ObjectMonitor* take = NULL;
+  // Lock the list head to guard against A-B-A race:
+  if ((take = get_list_head_locked(list_p)) == NULL) {
+    return NULL;  // None are available.
+  }
+  ObjectMonitor* next = unmarked_next(take);
+  // Switch locked list head to next (which unlocks the list head, but
+  // leaves take locked):
+  Atomic::store(list_p, next);
+  Atomic::dec(count_p);
+  // Unlock take, but leave the next value for any lagging list
+  // walkers. It will get cleaned up when take is prepended to
+  // the in-use list:
+  om_unlock(take);
+  return take;
+}
+
+// Take an ObjectMonitor from the start of the LVars.free_list. Also
+// updates LVars.free_count. Returns NULL if none are available.
+static ObjectMonitor* take_from_start_of_global_free_list() {
+  return take_from_start_of_common(&LVars.free_list, &LVars.free_count);
+}
+
+// Take an ObjectMonitor from the start of a per-thread free-list.
+// Also updates om_free_count. Returns NULL if none are available.
+static ObjectMonitor* take_from_start_of_om_free_list(Thread* self) {
+  return take_from_start_of_common(&self->om_free_list, &self->om_free_count);
+}
+
+
 // =====================> Quick functions
 
 // The quick_* forms are special fast-path variants used to improve
 // performance.  In the simplest case, a "quick_*" implementation could
 // simply return false, in which case the caller will perform the necessary

@@ -209,14 +484,22 @@
   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
   assert(self->is_Java_thread(), "invariant");
   assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
   NoSafepointVerifier nsv;
   if (obj == NULL) return false;       // Need to throw NPE
+
+  while (true) {
   const markWord mark = obj->mark();
 
   if (mark.has_monitor()) {
-    ObjectMonitor* const m = mark.monitor();
+      ObjectMonitorHandle omh;
+      if (!omh.save_om_ptr(obj, mark)) {
+        // Lost a race with async deflation so try again.
+        assert(AsyncDeflateIdleMonitors, "sanity check");
+        continue;
+      }
+      ObjectMonitor* const m = omh.om_ptr();
     assert(m->object() == obj, "invariant");
     Thread* const owner = (Thread *) m->_owner;
 
     // Lock contention and Transactional Lock Elision (TLE) diagnostics
     // and observability

@@ -238,14 +521,26 @@
     // stack-locking in the object's header, the third check is for
     // recursive stack-locking in the displaced header in the BasicLock,
     // and last are the inflated Java Monitor (ObjectMonitor) checks.
     lock->set_displaced_header(markWord::unused_mark());
 
-    if (owner == NULL && Atomic::replace_if_null(&(m->_owner), self)) {
+      if (owner == NULL && m->try_set_owner_from(self, NULL) == NULL) {
       assert(m->_recursions == 0, "invariant");
       return true;
     }
+
+      if (AsyncDeflateIdleMonitors &&
+          m->try_set_owner_from(self, DEFLATER_MARKER) == DEFLATER_MARKER) {
+        // The deflation protocol finished the first part (setting owner),
+        // but it failed the second part (making ref_count negative) and
+        // bailed. Or the ObjectMonitor was async deflated and reused.
+        // Acquired the monitor.
+        assert(m->_recursions == 0, "invariant");
+        return true;
+      }
+    }
+    break;
   }
 
   // Note that we could inflate in quick_enter.
   // This is likely a useful optimization
   // Critically, in quick_enter() we must not:

@@ -293,11 +588,13 @@
   // The object header will never be displaced to this lock,
   // so it does not matter what the value is, except that it
   // must be non-zero to avoid looking like a re-entrant lock,
   // and must not look locked either.
   lock->set_displaced_header(markWord::unused_mark());
-  inflate(THREAD, obj(), inflate_cause_monitor_enter)->enter(THREAD);
+  ObjectMonitorHandle omh;
+  inflate(&omh, THREAD, obj(), inflate_cause_monitor_enter);
+  omh.om_ptr()->enter(THREAD);
 }
 
 void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) {
   markWord mark = object->mark();
   // We cannot check for Biased Locking if we are racing an inflation.

@@ -342,11 +639,13 @@
       return;
     }
   }
 
   // We have to take the slow-path of possible inflation and then exit.
-  inflate(THREAD, object, inflate_cause_vm_internal)->exit(true, THREAD);
+  ObjectMonitorHandle omh;
+  inflate(&omh, THREAD, object, inflate_cause_vm_internal);
+  omh.om_ptr()->exit(true, THREAD);
 }
 
 // -----------------------------------------------------------------------------
 // Class Loader  support to workaround deadlocks on the class loader lock objects
 // Also used by GC

@@ -363,25 +662,26 @@
   if (UseBiasedLocking) {
     BiasedLocking::revoke(obj, THREAD);
     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   }
 
-  ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
-
-  return monitor->complete_exit(THREAD);
+  ObjectMonitorHandle omh;
+  inflate(&omh, THREAD, obj(), inflate_cause_vm_internal);
+  intptr_t ret_code = omh.om_ptr()->complete_exit(THREAD);
+  return ret_code;
 }
 
 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 void ObjectSynchronizer::reenter(Handle obj, intx recursions, TRAPS) {
   if (UseBiasedLocking) {
     BiasedLocking::revoke(obj, THREAD);
     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   }
 
-  ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
-
-  monitor->reenter(recursions, THREAD);
+  ObjectMonitorHandle omh;
+  inflate(&omh, THREAD, obj(), inflate_cause_vm_internal);
+  omh.om_ptr()->reenter(recursions, THREAD);
 }
 // -----------------------------------------------------------------------------
 // JNI locks on java objects
 // NOTE: must use heavy weight monitor to handle jni monitor enter
 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {

@@ -389,11 +689,13 @@
   if (UseBiasedLocking) {
     BiasedLocking::revoke(obj, THREAD);
     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   }
   THREAD->set_current_pending_monitor_is_from_java(false);
-  inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
+  ObjectMonitorHandle omh;
+  inflate(&omh, THREAD, obj(), inflate_cause_jni_enter);
+  omh.om_ptr()->enter(THREAD);
   THREAD->set_current_pending_monitor_is_from_java(true);
 }
 
 // NOTE: must use heavy weight monitor to handle jni monitor exit
 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {

@@ -402,11 +704,13 @@
     BiasedLocking::revoke(h_obj, THREAD);
     obj = h_obj();
   }
   assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
 
-  ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit);
+  ObjectMonitorHandle omh;
+  inflate(&omh, THREAD, obj, inflate_cause_jni_exit);
+  ObjectMonitor* monitor = omh.om_ptr();
   // If this thread has locked the object, exit the monitor. We
   // intentionally do not use CHECK here because we must exit the
   // monitor even if an exception is pending.
   if (monitor->check_owner(THREAD)) {
     monitor->exit(true, THREAD);

@@ -443,31 +747,36 @@
     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   }
   if (millis < 0) {
     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
   }
-  ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait);
+  ObjectMonitorHandle omh;
+  inflate(&omh, THREAD, obj(), inflate_cause_wait);
+  ObjectMonitor* monitor = omh.om_ptr();
 
   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
   monitor->wait(millis, true, THREAD);
 
   // This dummy call is in place to get around dtrace bug 6254741.  Once
   // that's fixed we can uncomment the following line, remove the call
   // and change this function back into a "void" func.
   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
-  return dtrace_waited_probe(monitor, obj, THREAD);
+  int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
+  return ret_code;
 }
 
 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, jlong millis, TRAPS) {
   if (UseBiasedLocking) {
     BiasedLocking::revoke(obj, THREAD);
     assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   }
   if (millis < 0) {
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
   }
-  inflate(THREAD, obj(), inflate_cause_wait)->wait(millis, false, THREAD);
+  ObjectMonitorHandle omh;
+  inflate(&omh, THREAD, obj(), inflate_cause_wait);
+  omh.om_ptr()->wait(millis, false, THREAD);
 }
 
 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
   if (UseBiasedLocking) {
     BiasedLocking::revoke(obj, THREAD);

@@ -476,11 +785,13 @@
 
   markWord mark = obj->mark();
   if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
     return;
   }
-  inflate(THREAD, obj(), inflate_cause_notify)->notify(THREAD);
+  ObjectMonitorHandle omh;
+  inflate(&omh, THREAD, obj(), inflate_cause_notify);
+  omh.om_ptr()->notify(THREAD);
 }
 
 // NOTE: see comment of notify()
 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
   if (UseBiasedLocking) {

@@ -490,11 +801,13 @@
 
   markWord mark = obj->mark();
   if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
     return;
   }
-  inflate(THREAD, obj(), inflate_cause_notify)->notifyAll(THREAD);
+  ObjectMonitorHandle omh;
+  inflate(&omh, THREAD, obj(), inflate_cause_notify);
+  omh.om_ptr()->notifyAll(THREAD);
 }
 
 // -----------------------------------------------------------------------------
 // Hash Code handling
 //

@@ -515,19 +828,19 @@
 // As a general policy we use "volatile" to control compiler-based reordering
 // and explicit fences (barriers) to control for architectural reordering
 // performed by the CPU(s) or platform.
 
 struct SharedGlobals {
-  char         _pad_prefix[DEFAULT_CACHE_LINE_SIZE];
+  char         _pad_prefix[OM_CACHE_LINE_SIZE];
   // These are highly shared mostly-read variables.
   // To avoid false-sharing they need to be the sole occupants of a cache line.
   volatile int stw_random;
   volatile int stw_cycle;
-  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2);
+  DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int) * 2);
   // Hot RW variable -- Sequester to avoid false-sharing
   volatile int hc_sequence;
-  DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int));
+  DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int));
 };
 
 static SharedGlobals GVars;
 static int _forceMonitorScavenge = 0; // Scavenge required and pending
 

@@ -683,10 +996,11 @@
   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
          self->is_Java_thread() , "invariant");
   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
          ((JavaThread *)self)->thread_state() != _thread_blocked, "invariant");
 
+  while (true) {
   ObjectMonitor* monitor = NULL;
   markWord temp, test;
   intptr_t hash;
   markWord mark = read_stable_mark(obj);
 

@@ -708,13 +1022,22 @@
     // Failed to install the hash. It could be that another thread
     // installed the hash just before our attempt or inflation has
     // occurred or... so we fall thru to inflate the monitor for
     // stability and then install the hash.
   } else if (mark.has_monitor()) {
-    monitor = mark.monitor();
+      ObjectMonitorHandle omh;
+      if (!omh.save_om_ptr(obj, mark)) {
+        // Lost a race with async deflation so try again.
+        assert(AsyncDeflateIdleMonitors, "sanity check");
+        continue;
+      }
+      monitor = omh.om_ptr();
     temp = monitor->header();
-    assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
+      // Allow for a lagging install_displaced_markword_in_object() to
+      // have marked the ObjectMonitor's header/dmw field.
+      assert(temp.is_neutral() || (AsyncDeflateIdleMonitors && temp.is_marked()),
+             "invariant: header=" INTPTR_FORMAT, temp.value());
     hash = temp.hash();
     if (hash != 0) {                  // if it has a hash, just return it
       return hash;
     }
     // Fall thru so we only have one place that installs the hash in

@@ -737,34 +1060,53 @@
     // during an inflate() call so any change to that stack memory
     // may not propagate to other threads correctly.
   }
 
   // Inflate the monitor to set the hash.
-  monitor = inflate(self, obj, inflate_cause_hash_code);
+    ObjectMonitorHandle omh;
+    inflate(&omh, self, obj, inflate_cause_hash_code);
+    monitor = omh.om_ptr();
   // Load ObjectMonitor's header/dmw field and see if it has a hash.
   mark = monitor->header();
-  assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
+    // Allow for a lagging install_displaced_markword_in_object() to
+    // have marked the ObjectMonitor's header/dmw field.
+    assert(mark.is_neutral() || (AsyncDeflateIdleMonitors && mark.is_marked()),
+           "invariant: header=" INTPTR_FORMAT, mark.value());
   hash = mark.hash();
   if (hash == 0) {                    // if it does not have a hash
     hash = get_next_hash(self, obj);  // get a new hash
     temp = mark.copy_set_hash(hash);  // merge the hash into header
+      if (AsyncDeflateIdleMonitors && temp.is_marked()) {
+        // A lagging install_displaced_markword_in_object() has marked
+        // the ObjectMonitor's header/dmw field. We clear it to avoid
+        // any confusion if we are able to set the hash.
+        temp.set_unmarked();
+      }
     assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
     uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value());
     test = markWord(v);
     if (test != mark) {
       // The attempt to update the ObjectMonitor's header/dmw field
       // did not work. This can happen if another thread managed to
-      // merge in the hash just before our cmpxchg().
+        // merge in the hash just before our cmpxchg(). With async
+        // deflation, a lagging install_displaced_markword_in_object()
+        // could have just marked or just unmarked the header/dmw field.
       // If we add any new usages of the header/dmw field, this code
       // will need to be updated.
+        if (AsyncDeflateIdleMonitors) {
+          // Since async deflation gives us two possible reasons for
+          // the cmwxchg() to fail, it is easier to simply retry.
+          continue;
+        }
       hash = test.hash();
       assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
       assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
     }
   }
   // We finally get the hash.
   return hash;
+  }
 }
 
 // Deprecated -- use FastHashCode() instead.
 
 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {

@@ -780,24 +1122,32 @@
   }
 
   assert(thread == JavaThread::current(), "Can only be called on current thread");
   oop obj = h_obj();
 
+  while (true) {
   markWord mark = read_stable_mark(obj);
 
   // Uncontended case, header points to stack
   if (mark.has_locker()) {
     return thread->is_lock_owned((address)mark.locker());
   }
   // Contended case, header points to ObjectMonitor (tagged pointer)
   if (mark.has_monitor()) {
-    ObjectMonitor* monitor = mark.monitor();
-    return monitor->is_entered(thread) != 0;
+      ObjectMonitorHandle omh;
+      if (!omh.save_om_ptr(obj, mark)) {
+        // Lost a race with async deflation so try again.
+        assert(AsyncDeflateIdleMonitors, "sanity check");
+        continue;
+      }
+      bool ret_code = omh.om_ptr()->is_entered(thread) != 0;
+      return ret_code;
   }
   // Unlocked case, header in place
   assert(mark.is_neutral(), "sanity check");
   return false;
+  }
 }
 
 // Be aware of this method could revoke bias of the lock object.
 // This method queries the ownership of the lock handle specified by 'h_obj'.
 // If the current thread owns the lock, it returns owner_self. If no

@@ -819,31 +1169,41 @@
            "biases should be revoked by now");
   }
 
   assert(self == JavaThread::current(), "Can only be called on current thread");
   oop obj = h_obj();
+
+  while (true) {
   markWord mark = read_stable_mark(obj);
 
   // CASE: stack-locked.  Mark points to a BasicLock on the owner's stack.
   if (mark.has_locker()) {
     return self->is_lock_owned((address)mark.locker()) ?
       owner_self : owner_other;
   }
 
   // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor.
   // The Object:ObjectMonitor relationship is stable as long as we're
-  // not at a safepoint.
+    // not at a safepoint and AsyncDeflateIdleMonitors is false.
   if (mark.has_monitor()) {
-    void* owner = mark.monitor()->_owner;
+      ObjectMonitorHandle omh;
+      if (!omh.save_om_ptr(obj, mark)) {
+        // Lost a race with async deflation so try again.
+        assert(AsyncDeflateIdleMonitors, "sanity check");
+        continue;
+      }
+      ObjectMonitor* monitor = omh.om_ptr();
+      void* owner = monitor->_owner;
     if (owner == NULL) return owner_none;
     return (owner == self ||
             self->is_lock_owned((address)owner)) ? owner_self : owner_other;
   }
 
   // CASE: neutral
   assert(mark.is_neutral(), "sanity check");
   return owner_none;           // it's unlocked
+  }
 }
 
 // FIXME: jvmti should call this
 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
   if (UseBiasedLocking) {

@@ -854,22 +1214,29 @@
     }
     assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
   }
 
   oop obj = h_obj();
-  address owner = NULL;
 
+  while (true) {
+    address owner = NULL;
   markWord mark = read_stable_mark(obj);
 
   // Uncontended case, header points to stack
   if (mark.has_locker()) {
     owner = (address) mark.locker();
   }
 
   // Contended case, header points to ObjectMonitor (tagged pointer)
   else if (mark.has_monitor()) {
-    ObjectMonitor* monitor = mark.monitor();
+      ObjectMonitorHandle omh;
+      if (!omh.save_om_ptr(obj, mark)) {
+        // Lost a race with async deflation so try again.
+        assert(AsyncDeflateIdleMonitors, "sanity check");
+        continue;
+      }
+      ObjectMonitor* monitor = omh.om_ptr();
     assert(monitor != NULL, "monitor should be non-null");
     owner = (address) monitor->owner();
   }
 
   if (owner != NULL) {

@@ -881,107 +1248,168 @@
   // Cannot have assertion since this object may have been
   // locked by another thread when reaching here.
   // assert(mark.is_neutral(), "sanity check");
 
   return NULL;
+  }
 }
 
 // Visitors ...
 
 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
-  PaddedObjectMonitor* block = Atomic::load_acquire(&g_block_list);
+  PaddedObjectMonitor* block = Atomic::load(&g_block_list);
   while (block != NULL) {
     assert(block->object() == CHAINMARKER, "must be a block header");
     for (int i = _BLOCKSIZE - 1; i > 0; i--) {
       ObjectMonitor* mid = (ObjectMonitor *)(block + i);
-      oop object = (oop)mid->object();
-      if (object != NULL) {
+      ObjectMonitorHandle omh;
+      if (!mid->is_free() && omh.set_om_ptr_if_safe(mid)) {
+        // The ObjectMonitor* is not free and it has been made safe.
+        if (mid->object() == NULL) {
         // Only process with closure if the object is set.
+          continue;
+        }
         closure->do_monitor(mid);
       }
     }
-    block = (PaddedObjectMonitor*)block->_next_om;
+    // unmarked_next() is not needed with g_block_list (no locking
+    // used with with block linkage _next_om fields).
+    block = (PaddedObjectMonitor*)Atomic::load(&block->_next_om);
   }
 }
 
 static bool monitors_used_above_threshold() {
-  if (g_om_population == 0) {
+  if (Atomic::load(&LVars.population) == 0) {
     return false;
   }
-  int monitors_used = g_om_population - g_om_free_count;
-  int monitor_usage = (monitors_used * 100LL) / g_om_population;
+  if (MonitorUsedDeflationThreshold > 0) {
+    int monitors_used = Atomic::load(&LVars.population) - Atomic::load(&LVars.free_count);
+    if (HandshakeAfterDeflateIdleMonitors) {
+      monitors_used -= Atomic::load(&LVars.wait_count);
+    }
+    int monitor_usage = (monitors_used * 100LL) / Atomic::load(&LVars.population);
   return monitor_usage > MonitorUsedDeflationThreshold;
+  }
+  return false;
 }
 
-bool ObjectSynchronizer::is_cleanup_needed() {
-  if (MonitorUsedDeflationThreshold > 0) {
-    if (monitors_used_above_threshold()) {
+// Returns true if MonitorBound is set (> 0) and if the specified
+// cnt is > MonitorBound. Otherwise returns false.
+static bool is_MonitorBound_exceeded(const int cnt) {
+  const int mx = MonitorBound;
+  return mx > 0 && cnt > mx;
+}
+
+bool ObjectSynchronizer::is_async_deflation_needed() {
+  if (!AsyncDeflateIdleMonitors) {
+    return false;
+  }
+  if (is_async_deflation_requested()) {
+    // Async deflation request.
       return true;
     }
+  if (AsyncDeflationInterval > 0 &&
+      time_since_last_async_deflation_ms() > AsyncDeflationInterval &&
+      monitors_used_above_threshold()) {
+    // It's been longer than our specified deflate interval and there
+    // are too many monitors in use. We don't deflate more frequently
+    // than AsyncDeflationInterval (unless is_async_deflation_requested)
+    // in order to not swamp the ServiceThread.
+    _last_async_deflation_time_ns = os::javaTimeNanos();
+    return true;
   }
-  return needs_monitor_scavenge();
+  int monitors_used = Atomic::load(&LVars.population) - Atomic::load(&LVars.free_count);
+  if (HandshakeAfterDeflateIdleMonitors) {
+    monitors_used -= Atomic::load(&LVars.wait_count);
+  }
+  if (is_MonitorBound_exceeded(monitors_used)) {
+    // Not enough ObjectMonitors on the global free list.
+    return true;
+  }
+  return false;
 }
 
 bool ObjectSynchronizer::needs_monitor_scavenge() {
   if (Atomic::load(&_forceMonitorScavenge) == 1) {
     log_info(monitorinflation)("Monitor scavenge needed, triggering safepoint cleanup.");
     return true;
   }
   return false;
 }
 
+bool ObjectSynchronizer::is_safepoint_deflation_needed() {
+  if (!AsyncDeflateIdleMonitors) {
+    if (monitors_used_above_threshold()) {
+      // Too many monitors in use.
+      return true;
+    }
+     return needs_monitor_scavenge();
+  }
+  if (is_special_deflation_requested()) {
+    // For AsyncDeflateIdleMonitors only do a safepoint deflation
+    // if there is a special deflation request.
+    return true;
+  }
+  return false;
+}
+
+jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
+  return (os::javaTimeNanos() - _last_async_deflation_time_ns) / (NANOUNITS / MILLIUNITS);
+}
+
 void ObjectSynchronizer::oops_do(OopClosure* f) {
   // We only scan the global used list here (for moribund threads), and
   // the thread-local monitors in Thread::oops_do().
   global_used_oops_do(f);
 }
 
 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-  list_oops_do(g_om_in_use_list, f);
+  list_oops_do(Atomic::load(&LVars.in_use_list), Atomic::load(&LVars.in_use_count), f);
 }
 
 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-  list_oops_do(thread->om_in_use_list, f);
+  list_oops_do(thread->om_in_use_list, thread->om_in_use_count, f);
 }
 
-void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
+void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, int count, OopClosure* f) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-  ObjectMonitor* mid;
-  for (mid = list; mid != NULL; mid = mid->_next_om) {
+  // The oops_do() phase does not overlap with monitor deflation
+  // so no need to update the ObjectMonitor's ref_count for this
+  // ObjectMonitor* use and no need to mark ObjectMonitors for the
+  // list traversal.
+  for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) {
     if (mid->object() != NULL) {
       f->do_oop((oop*)mid->object_addr());
     }
   }
 }
 
 
 // -----------------------------------------------------------------------------
 // ObjectMonitor Lifecycle
 // -----------------------
-// Inflation unlinks monitors from the global g_free_list and
-// associates them with objects.  Deflation -- which occurs at
-// STW-time -- disassociates idle monitors from objects.  Such
-// scavenged monitors are returned to the g_free_list.
-//
-// The global list is protected by gListLock.  All the critical sections
-// are short and operate in constant-time.
+// Inflation unlinks monitors from LVars.free_list or a per-thread free
+// list and associates them with objects. Deflation -- which occurs at
+// STW-time or asynchronously -- disassociates idle monitors from objects.
+// Such scavenged monitors are returned to the LVars.free_list.
 //
 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
 //
 // Lifecycle:
-// --   unassigned and on the global free list
-// --   unassigned and on a thread's private om_free_list
+// --   unassigned and on the LVars.free_list
+// --   unassigned and on a per-thread free list
 // --   assigned to an object.  The object is inflated and the mark refers
-//      to the objectmonitor.
+//      to the ObjectMonitor.
 
 
 // Constraining monitor pool growth via MonitorBound ...
 //
 // If MonitorBound is not set (<= 0), MonitorBound checks are disabled.
 //
+// When safepoint deflation is being used (!AsyncDeflateIdleMonitors):
 // The monitor pool is grow-only.  We scavenge at STW safepoint-time, but the
 // the rate of scavenging is driven primarily by GC.  As such,  we can find
 // an inordinate number of monitors in circulation.
 // To avoid that scenario we can artificially induce a STW safepoint
 // if the pool appears to be growing past some reasonable bound.

@@ -990,80 +1418,114 @@
 // type of limit.  Beware that if MonitorBound is set to too low a value
 // we could just loop. In addition, if MonitorBound is set to a low value
 // we'll incur more safepoints, which are harmful to performance.
 // See also: GuaranteedSafepointInterval
 //
-// If MonitorBound is set, the boundry applies to
-//     (g_om_population - g_om_free_count)
+// When safepoint deflation is being used and MonitorBound is set, the
+// boundry applies to
+//     (LVars.population - LVars.free_count)
 // i.e., if there are not enough ObjectMonitors on the global free list,
 // then a safepoint deflation is induced. Picking a good MonitorBound value
 // is non-trivial.
+//
+// When async deflation is being used:
+// The monitor pool is still grow-only. Async deflation is requested
+// by a safepoint's cleanup phase or by the ServiceThread at periodic
+// intervals when is_async_deflation_needed() returns true. In
+// addition to other policies that are checked, if there are not
+// enough ObjectMonitors on the global free list, then
+// is_async_deflation_needed() will return true. The ServiceThread
+// calls deflate_global_idle_monitors_using_JT() and also calls
+// deflate_per_thread_idle_monitors_using_JT() as needed.
 
 static void InduceScavenge(Thread* self, const char * Whence) {
+  assert(!AsyncDeflateIdleMonitors, "is not used by async deflation");
+
   // Induce STW safepoint to trim monitors
   // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
   // More precisely, trigger a cleanup safepoint as the number
   // of active monitors passes the specified threshold.
   // TODO: assert thread state is reasonable
 
-  if (Atomic::xchg (&_forceMonitorScavenge, 1) == 0) {
+  if (Atomic::xchg(&_forceMonitorScavenge, 1) == 0) {
     VMThread::check_for_forced_cleanup();
   }
 }
 
-ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) {
+ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self,
+                                           const InflateCause cause) {
   // A large MAXPRIVATE value reduces both list lock contention
   // and list coherency traffic, but also tends to increase the
   // number of ObjectMonitors in circulation as well as the STW
   // scavenge costs.  As usual, we lean toward time in space-time
   // tradeoffs.
   const int MAXPRIVATE = 1024;
+
   stringStream ss;
   for (;;) {
     ObjectMonitor* m;
 
     // 1: try to allocate from the thread's local om_free_list.
     // Threads will attempt to allocate first from their local list, then
-    // from the global list, and only after those attempts fail will the thread
-    // attempt to instantiate new monitors.   Thread-local free lists take
-    // heat off the gListLock and improve allocation latency, as well as reducing
-    // coherency traffic on the shared global list.
-    m = self->om_free_list;
+    // from the global list, and only after those attempts fail will the
+    // thread attempt to instantiate new monitors. Thread-local free lists
+    // improve allocation latency, as well as reducing coherency traffic
+    // on the shared global list.
+    m = take_from_start_of_om_free_list(self);
     if (m != NULL) {
-      self->om_free_list = m->_next_om;
-      self->om_free_count--;
       guarantee(m->object() == NULL, "invariant");
-      m->_next_om = self->om_in_use_list;
-      self->om_in_use_list = m;
-      self->om_in_use_count++;
+      m->set_allocation_state(ObjectMonitor::New);
+      prepend_to_om_in_use_list(self, m);
       return m;
     }
 
-    // 2: try to allocate from the global g_free_list
+    // 2: try to allocate from the global LVars.free_list
     // CONSIDER: use muxTry() instead of muxAcquire().
     // If the muxTry() fails then drop immediately into case 3.
     // If we're using thread-local free lists then try
     // to reprovision the caller's free list.
-    if (g_free_list != NULL) {
+    if (Atomic::load(&LVars.free_list) != NULL) {
       // Reprovision the thread's om_free_list.
       // Use bulk transfers to reduce the allocation rate and heat
       // on various locks.
-      Thread::muxAcquire(&gListLock, "om_alloc(1)");
-      for (int i = self->om_free_provision; --i >= 0 && g_free_list != NULL;) {
-        g_om_free_count--;
-        ObjectMonitor* take = g_free_list;
-        g_free_list = take->_next_om;
+      for (int i = self->om_free_provision; --i >= 0;) {
+        ObjectMonitor* take = take_from_start_of_global_free_list();
+        if (take == NULL) {
+          break;  // No more are available.
+        }
         guarantee(take->object() == NULL, "invariant");
+        if (AsyncDeflateIdleMonitors) {
+          // We allowed 3 field values to linger during async deflation.
+          // We clear header and restore ref_count here, but we leave
+          // owner == DEFLATER_MARKER so the simple C2 ObjectMonitor
+          // enter optimization can no longer race with async deflation
+          // and reuse.
+          take->set_header(markWord::zero());
+          if (take->ref_count() < 0) {
+            // Add back max_jint to restore the ref_count field to its
+            // proper value.
+            Atomic::add(&take->_ref_count, max_jint);
+
+#ifdef ASSERT
+            jint l_ref_count = take->ref_count();
+#endif
+            assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d",
+                   l_ref_count, take->ref_count());
+          }
+        }
         take->Recycle();
+        // Since we're taking from the global free-list, take must be Free.
+        // om_release() also sets the allocation state to Free because it
+        // is called from other code paths.
+        assert(take->is_free(), "invariant");
         om_release(self, take, false);
       }
-      Thread::muxRelease(&gListLock);
-      self->om_free_provision += 1 + (self->om_free_provision/2);
+      self->om_free_provision += 1 + (self->om_free_provision / 2);
       if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE;
 
-      const int mx = MonitorBound;
-      if (mx > 0 && (g_om_population-g_om_free_count) > mx) {
+      if (!AsyncDeflateIdleMonitors &&
+          is_MonitorBound_exceeded(Atomic::load(&LVars.population) - Atomic::load(&LVars.free_count))) {
         // Not enough ObjectMonitors on the global free list.
         // We can't safely induce a STW safepoint from om_alloc() as our thread
         // state may not be appropriate for such activities and callers may hold
         // naked oops, so instead we defer the action.
         InduceScavenge(self, "om_alloc");

@@ -1080,13 +1542,13 @@
     // A better solution would be to use C++ placement-new.
     // BEWARE: As it stands currently, we don't run the ctors!
     assert(_BLOCKSIZE > 1, "invariant");
     size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE;
     PaddedObjectMonitor* temp;
-    size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1);
+    size_t aligned_size = neededsize + (OM_CACHE_LINE_SIZE - 1);
     void* real_malloc_addr = NEW_C_HEAP_ARRAY(char, aligned_size, mtInternal);
-    temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE);
+    temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, OM_CACHE_LINE_SIZE);
     (void)memset((void *) temp, 0, neededsize);
 
     // Format the block.
     // initialize the linked list, each monitor points to its next
     // forming the single linked free list, the very first monitor

@@ -1094,41 +1556,25 @@
     // The trick of using the 1st element in the block as g_block_list
     // linkage should be reconsidered.  A better implementation would
     // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
 
     for (int i = 1; i < _BLOCKSIZE; i++) {
-      temp[i]._next_om = (ObjectMonitor *)&temp[i+1];
+      temp[i]._next_om = (ObjectMonitor*)&temp[i + 1];
+      assert(temp[i].is_free(), "invariant");
     }
 
     // terminate the last monitor as the end of list
-    temp[_BLOCKSIZE - 1]._next_om = NULL;
+    temp[_BLOCKSIZE - 1]._next_om = (ObjectMonitor*)NULL;
 
     // Element [0] is reserved for global list linkage
     temp[0].set_object(CHAINMARKER);
 
     // Consider carving out this thread's current request from the
     // block in hand.  This avoids some lock traffic and redundant
     // list activity.
 
-    // Acquire the gListLock to manipulate g_block_list and g_free_list.
-    // An Oyama-Taura-Yonezawa scheme might be more efficient.
-    Thread::muxAcquire(&gListLock, "om_alloc(2)");
-    g_om_population += _BLOCKSIZE-1;
-    g_om_free_count += _BLOCKSIZE-1;
-
-    // Add the new block to the list of extant blocks (g_block_list).
-    // The very first ObjectMonitor in a block is reserved and dedicated.
-    // It serves as blocklist "next" linkage.
-    temp[0]._next_om = g_block_list;
-    // There are lock-free uses of g_block_list so make sure that
-    // the previous stores happen before we update g_block_list.
-    Atomic::release_store(&g_block_list, temp);
-
-    // Add the new string of ObjectMonitors to the global free list
-    temp[_BLOCKSIZE - 1]._next_om = g_free_list;
-    g_free_list = temp + 1;
-    Thread::muxRelease(&gListLock);
+    prepend_block_to_lists(temp);
   }
 }
 
 // Place "m" on the caller's private per-thread om_free_list.
 // In practice there's no need to clamp or limit the number of

@@ -1137,46 +1583,79 @@
 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to
 // accumulate on a thread's free list.
 //
 // Key constraint: all ObjectMonitors on a thread's free list and the global
 // free list must have their object field set to null. This prevents the
-// scavenger -- deflate_monitor_list() -- from reclaiming them while we
-// are trying to release them.
+// scavenger -- deflate_monitor_list() or deflate_monitor_list_using_JT()
+// -- from reclaiming them while we are trying to release them.
 
 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m,
                                     bool from_per_thread_alloc) {
   guarantee(m->header().value() == 0, "invariant");
   guarantee(m->object() == NULL, "invariant");
   stringStream ss;
   guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: "
             "%s, recursions=" INTX_FORMAT, m->is_busy_to_string(&ss),
             m->_recursions);
+  m->set_allocation_state(ObjectMonitor::Free);
   // _next_om is used for both per-thread in-use and free lists so
   // we have to remove 'm' from the in-use list first (as needed).
   if (from_per_thread_alloc) {
     // Need to remove 'm' from om_in_use_list.
+    // We use the more complicated lock-cur_mid_in_use-and-mid-as-we-go
+    // protocol because async deflation can do list deletions in parallel.
     ObjectMonitor* cur_mid_in_use = NULL;
+    ObjectMonitor* mid = NULL;
+    ObjectMonitor* next = NULL;
     bool extracted = false;
-    for (ObjectMonitor* mid = self->om_in_use_list; mid != NULL; cur_mid_in_use = mid, mid = mid->_next_om) {
+
+    if ((mid = get_list_head_locked(&self->om_in_use_list)) == NULL) {
+      fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self));
+    }
+    next = unmarked_next(mid);
+    while (true) {
       if (m == mid) {
-        // extract from per-thread in-use list
-        if (mid == self->om_in_use_list) {
-          self->om_in_use_list = mid->_next_om;
-        } else if (cur_mid_in_use != NULL) {
-          cur_mid_in_use->_next_om = mid->_next_om; // maintain the current thread in-use list
+        // We found 'm' on the per-thread in-use list so try to extract it.
+        if (cur_mid_in_use == NULL) {
+          // mid is the list head and it is locked. Switch the list head
+          // to next which unlocks the list head, but leaves mid locked:
+          Atomic::store(&self->om_in_use_list, next);
+        } else {
+          // mid and cur_mid_in_use are locked. Switch cur_mid_in_use's
+          // next field to next which unlocks cur_mid_in_use, but leaves
+          // mid locked:
+          set_next(cur_mid_in_use, next);
         }
         extracted = true;
-        self->om_in_use_count--;
+        Atomic::dec(&self->om_in_use_count);
+        // Unlock mid, but leave the next value for any lagging list
+        // walkers. It will get cleaned up when mid is prepended to
+        // the thread's free list:
+        om_unlock(mid);
         break;
       }
+      if (cur_mid_in_use != NULL) {
+        om_unlock(cur_mid_in_use);
+      }
+      // The next cur_mid_in_use keeps mid's locked state so
+      // that it is stable for a possible next field change. It
+      // cannot be deflated while it is locked.
+      cur_mid_in_use = mid;
+      mid = next;
+      if (mid == NULL) {
+        // Reached end of the list and didn't find m so:
+        fatal("must find m=" INTPTR_FORMAT "on om_in_use_list=" INTPTR_FORMAT,
+              p2i(m), p2i(self->om_in_use_list));
+      }
+      // Lock mid so we can possibly extract it:
+      om_lock(mid);
+      next = unmarked_next(mid);
     }
-    assert(extracted, "Should have extracted from in-use list");
   }
 
-  m->_next_om = self->om_free_list;
-  self->om_free_list = m;
-  self->om_free_count++;
+  prepend_to_om_free_list(self, m);
+  guarantee(m->is_free(), "invariant");
 }
 
 // Return ObjectMonitors on a moribund thread's free and in-use
 // lists to the appropriate global lists. The ObjectMonitors on the
 // per-thread in-use list may still be in use by other threads.

@@ -1187,66 +1666,108 @@
 // a safepoint and interleave with deflate_idle_monitors(). In
 // particular, this ensures that the thread's in-use monitors are
 // scanned by a GC safepoint, either via Thread::oops_do() (before
 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after
 // om_flush() is called).
+//
+// With AsyncDeflateIdleMonitors, deflate_global_idle_monitors_using_JT()
+// and deflate_per_thread_idle_monitors_using_JT() (in another thread) can
+// run at the same time as om_flush() so we have to follow a careful
+// protocol to prevent list corruption.
 
 void ObjectSynchronizer::om_flush(Thread* self) {
+  // This function can race with an async deflater thread. Since
+  // deflation has to process the per-thread in-use list before
+  // prepending the deflated ObjectMonitors to the global free list,
+  // we process the per-thread lists in the same order to prevent
+  // ordering races.
+  int in_use_count = 0;
+  ObjectMonitor* in_use_list = NULL;
+  ObjectMonitor* in_use_tail = NULL;
+
+  // An async deflation thread checks to see if the target thread
+  // is exiting, but if it has made it past that check before we
+  // started exiting, then it is racing to get to the in-use list.
+  if ((in_use_list = get_list_head_locked(&self->om_in_use_list)) != NULL) {
+    // At this point, we have marked the in-use list head so an
+    // async deflation thread cannot come in after us. If an async
+    // deflation thread is ahead of us, then we'll detect that and
+    // wait for it to finish its work.
+    //
+    // The thread is going away, however the ObjectMonitors on the
+    // om_in_use_list may still be in-use by other threads. Link
+    // them to in_use_tail, which will be linked into the global
+    // in-use list (LVars.in_use_list) below.
+    //
+    // Account for the in-use list head before the loop since it is
+    // already marked (by this thread):
+    in_use_tail = in_use_list;
+    in_use_count++;
+    for (ObjectMonitor* cur_om = unmarked_next(in_use_list); cur_om != NULL;) {
+      if (is_locked(cur_om)) {
+        // cur_om is locked so there must be an async deflater
+        // thread ahead of us so we'll give it a chance to finish.
+        while (is_locked(cur_om)) {
+          os::naked_short_sleep(1);
+        }
+        // Refetch the possibly changed next field and try again.
+        cur_om = unmarked_next(in_use_tail);
+        continue;
+      }
+      if (cur_om->is_free()) {
+        // cur_om was deflated and the allocation state was changed
+        // to Free while it was marked. We happened to see it just
+        // after it was unmarked (and added to the free list).
+        // Refetch the possibly changed next field and try again.
+        cur_om = unmarked_next(in_use_tail);
+        continue;
+      }
+      in_use_tail = cur_om;
+      in_use_count++;
+      cur_om = unmarked_next(cur_om);
+    }
+    guarantee(in_use_tail != NULL, "invariant");
+    int l_om_in_use_count = self->om_in_use_count;
+    ADIM_guarantee(l_om_in_use_count == in_use_count, "in-use counts don't "
+                   "match: l_om_in_use_count=%d, in_use_count=%d",
+                   l_om_in_use_count, in_use_count);
+    self->om_in_use_count = 0;
+    // Clear the in-use list head (which also unlocks it):
+    Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL);
+    om_unlock(in_use_list);
+  }
+
+  int free_count = 0;
   ObjectMonitor* free_list = self->om_free_list;
   ObjectMonitor* free_tail = NULL;
-  int free_count = 0;
   if (free_list != NULL) {
-    ObjectMonitor* s;
     // The thread is going away. Set 'free_tail' to the last per-thread free
-    // monitor which will be linked to g_free_list below under the gListLock.
+    // monitor which will be linked to LVars.free_list below.
     stringStream ss;
-    for (s = free_list; s != NULL; s = s->_next_om) {
+    for (ObjectMonitor* s = free_list; s != NULL; s = unmarked_next(s)) {
       free_count++;
       free_tail = s;
       guarantee(s->object() == NULL, "invariant");
       guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss));
     }
     guarantee(free_tail != NULL, "invariant");
-    assert(self->om_free_count == free_count, "free-count off");
-    self->om_free_list = NULL;
+    int l_om_free_count = self->om_free_count;
+    ADIM_guarantee(l_om_free_count == free_count, "free counts don't match: "
+                   "l_om_free_count=%d, free_count=%d", l_om_free_count,
+                   free_count);
     self->om_free_count = 0;
+    Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL);
   }
 
-  ObjectMonitor* in_use_list = self->om_in_use_list;
-  ObjectMonitor* in_use_tail = NULL;
-  int in_use_count = 0;
-  if (in_use_list != NULL) {
-    // The thread is going away, however the ObjectMonitors on the
-    // om_in_use_list may still be in-use by other threads. Link
-    // them to in_use_tail, which will be linked into the global
-    // in-use list g_om_in_use_list below, under the gListLock.
-    ObjectMonitor *cur_om;
-    for (cur_om = in_use_list; cur_om != NULL; cur_om = cur_om->_next_om) {
-      in_use_tail = cur_om;
-      in_use_count++;
-    }
-    guarantee(in_use_tail != NULL, "invariant");
-    assert(self->om_in_use_count == in_use_count, "in-use count off");
-    self->om_in_use_list = NULL;
-    self->om_in_use_count = 0;
-  }
-
-  Thread::muxAcquire(&gListLock, "om_flush");
   if (free_tail != NULL) {
-    free_tail->_next_om = g_free_list;
-    g_free_list = free_list;
-    g_om_free_count += free_count;
+    prepend_list_to_global_free_list(free_list, free_tail, free_count);
   }
 
   if (in_use_tail != NULL) {
-    in_use_tail->_next_om = g_om_in_use_list;
-    g_om_in_use_list = in_use_list;
-    g_om_in_use_count += in_use_count;
+    prepend_list_to_global_in_use_list(in_use_list, in_use_tail, in_use_count);
   }
 
-  Thread::muxRelease(&gListLock);
-
   LogStreamHandle(Debug, monitorinflation) lsh_debug;
   LogStreamHandle(Info, monitorinflation) lsh_info;
   LogStream* ls = NULL;
   if (log_is_enabled(Debug, monitorinflation)) {
     ls = &lsh_debug;

@@ -1271,23 +1792,32 @@
   event->set_cause((u1)cause);
   event->commit();
 }
 
 // Fast path code shared by multiple functions
-void ObjectSynchronizer::inflate_helper(oop obj) {
+void ObjectSynchronizer::inflate_helper(ObjectMonitorHandle* omh_p, oop obj) {
+  while (true) {
   markWord mark = obj->mark();
   if (mark.has_monitor()) {
-    assert(ObjectSynchronizer::verify_objmon_isinpool(mark.monitor()), "monitor is invalid");
-    assert(mark.monitor()->header().is_neutral(), "monitor must record a good object header");
+      if (!omh_p->save_om_ptr(obj, mark)) {
+        // Lost a race with async deflation so try again.
+        assert(AsyncDeflateIdleMonitors, "sanity check");
+        continue;
+      }
+      ObjectMonitor* monitor = omh_p->om_ptr();
+      assert(ObjectSynchronizer::verify_objmon_isinpool(monitor), "monitor is invalid");
+      markWord dmw = monitor->header();
+      assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
+      return;
+    }
+    inflate(omh_p, Thread::current(), obj, inflate_cause_vm_internal);
     return;
   }
-  inflate(Thread::current(), obj, inflate_cause_vm_internal);
 }
 
-ObjectMonitor* ObjectSynchronizer::inflate(Thread* self,
-                                           oop object,
-                                           const InflateCause cause) {
+void ObjectSynchronizer::inflate(ObjectMonitorHandle* omh_p, Thread* self,
+                                 oop object, const InflateCause cause) {
   // Inflate mutates the heap ...
   // Relaxing assertion for bug 6320749.
   assert(Universe::verify_in_progress() ||
          !SafepointSynchronize::is_at_safepoint(), "invariant");
 

@@ -1304,16 +1834,21 @@
     // *  Neutral      - aggressively inflate the object.
     // *  BIASED       - Illegal.  We should never see this
 
     // CASE: inflated
     if (mark.has_monitor()) {
-      ObjectMonitor* inf = mark.monitor();
+      if (!omh_p->save_om_ptr(object, mark)) {
+        // Lost a race with async deflation so try again.
+        assert(AsyncDeflateIdleMonitors, "sanity check");
+        continue;
+      }
+      ObjectMonitor* inf = omh_p->om_ptr();
       markWord dmw = inf->header();
       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
       assert(inf->object() == object, "invariant");
       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
-      return inf;
+      return;
     }
 
     // CASE: inflation in progress - inflating over a stack-lock.
     // Some other thread is converting from stack-locked to inflated.
     // Only that thread can complete inflation -- other threads must wait.

@@ -1345,20 +1880,21 @@
     // See the comments in om_alloc().
 
     LogStreamHandle(Trace, monitorinflation) lsh;
 
     if (mark.has_locker()) {
-      ObjectMonitor* m = om_alloc(self);
+      ObjectMonitor* m = om_alloc(self, cause);
       // Optimistically prepare the objectmonitor - anticipate successful CAS
       // We do this before the CAS in order to minimize the length of time
       // in which INFLATING appears in the mark.
       m->Recycle();
       m->_Responsible  = NULL;
       m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;   // Consider: maintain by type/class
 
       markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
       if (cmp != mark) {
+        // om_release() will reset the allocation state from New to Free.
         om_release(self, m, true);
         continue;       // Interference -- just retry
       }
 
       // We've successfully installed INFLATING (0) into the mark-word.

@@ -1392,29 +1928,40 @@
       // object is in the mark.  Furthermore the owner can't complete
       // an unlock on the object, either.
       markWord dmw = mark.displaced_mark_helper();
       // Catch if the object's header is not neutral (not locked and
       // not marked is what we care about here).
-      assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
+      ADIM_guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
 
       // Setup monitor fields to proper values -- prepare the monitor
       m->set_header(dmw);
 
       // Optimization: if the mark.locker stack address is associated
       // with this thread we could simply set m->_owner = self.
       // Note that a thread can inflate an object
       // that it has stack-locked -- as might happen in wait() -- directly
       // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
-      m->set_owner(mark.locker());
+      if (AsyncDeflateIdleMonitors) {
+        m->simply_set_owner_from(mark.locker(), NULL, DEFLATER_MARKER);
+      } else {
+        m->simply_set_owner_from(mark.locker(), NULL);
+      }
       m->set_object(object);
       // TODO-FIXME: assert BasicLock->dhw != 0.
 
+      omh_p->set_om_ptr(m);
+
       // Must preserve store ordering. The monitor state must
       // be stable at the time of publishing the monitor address.
       guarantee(object->mark() == markWord::INFLATING(), "invariant");
       object->release_set_mark(markWord::encode(m));
 
+      // Once ObjectMonitor is configured and the object is associated
+      // with the ObjectMonitor, it is safe to allow async deflation:
+      assert(m->is_new(), "freshly allocated monitor must be new");
+      m->set_allocation_state(ObjectMonitor::Old);
+
       // Hopefully the performance counters are allocated on distinct cache lines
       // to avoid false sharing on MP systems ...
       OM_PERFDATA_OP(Inflations, inc());
       if (log_is_enabled(Trace, monitorinflation)) {
         ResourceMark rm(self);

@@ -1423,11 +1970,12 @@
                      object->mark().value(), object->klass()->external_name());
       }
       if (event.should_commit()) {
         post_monitor_inflate_event(&event, object, cause);
       }
-      return m;
+      ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free");
+      return;
     }
 
     // CASE: neutral
     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
     // If we know we're inflating for entry it's better to inflate by swinging a

@@ -1437,31 +1985,43 @@
     // to inflate and then CAS() again to try to swing _owner from NULL to self.
     // An inflateTry() method that we could call from enter() would be useful.
 
     // Catch if the object's header is not neutral (not locked and
     // not marked is what we care about here).
-    assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
-    ObjectMonitor* m = om_alloc(self);
+    ADIM_guarantee(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT,mark.value());
+    ObjectMonitor* m = om_alloc(self, cause);
     // prepare m for installation - set monitor to initial state
     m->Recycle();
     m->set_header(mark);
+    // If we leave _owner == DEFLATER_MARKER here, then the simple C2
+    // ObjectMonitor enter optimization can no longer race with async
+    // deflation and reuse.
     m->set_object(object);
     m->_Responsible  = NULL;
     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
 
+    omh_p->set_om_ptr(m);
+
     if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
       m->set_header(markWord::zero());
       m->set_object(NULL);
       m->Recycle();
+      omh_p->set_om_ptr(NULL);
+      // om_release() will reset the allocation state from New to Free.
       om_release(self, m, true);
       m = NULL;
       continue;
       // interference - the markword changed - just retry.
       // The state-transitions are one-way, so there's no chance of
       // live-lock -- "Inflated" is an absorbing state.
     }
 
+    // Once the ObjectMonitor is configured and object is associated
+    // with the ObjectMonitor, it is safe to allow async deflation:
+    assert(m->is_new(), "freshly allocated monitor must be new");
+    m->set_allocation_state(ObjectMonitor::Old);
+
     // Hopefully the performance counters are allocated on distinct
     // cache lines to avoid false sharing on MP systems ...
     OM_PERFDATA_OP(Inflations, inc());
     if (log_is_enabled(Trace, monitorinflation)) {
       ResourceMark rm(self);

@@ -1470,17 +2030,19 @@
                    object->mark().value(), object->klass()->external_name());
     }
     if (event.should_commit()) {
       post_monitor_inflate_event(&event, object, cause);
     }
-    return m;
+    ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free");
+    return;
   }
 }
 
 
 // We maintain a list of in-use monitors for each thread.
 //
+// For safepoint based deflation:
 // deflate_thread_local_monitors() scans a single thread's in-use list, while
 // deflate_idle_monitors() scans only a global list of in-use monitors which
 // is populated only as a thread dies (see om_flush()).
 //
 // These operations are called at all safepoints, immediately after mutators

@@ -1495,10 +2057,44 @@
 //
 // Perversely, the heap size -- and thus the STW safepoint rate --
 // typically drives the scavenge rate.  Large heaps can mean infrequent GC,
 // which in turn can mean large(r) numbers of ObjectMonitors in circulation.
 // This is an unfortunate aspect of this design.
+//
+// For async deflation:
+// If a special deflation request is made, then the safepoint based
+// deflation mechanism is used. Otherwise, an async deflation request
+// is registered with the ServiceThread and it is notified.
+
+void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* counters) {
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+
+  // The per-thread in-use lists are handled in
+  // ParallelSPCleanupThreadClosure::do_thread().
+
+  if (!AsyncDeflateIdleMonitors || is_special_deflation_requested()) {
+    // Use the older mechanism for the global in-use list or if a
+    // special deflation has been requested before the safepoint.
+    ObjectSynchronizer::deflate_idle_monitors(counters);
+    return;
+  }
+
+  log_debug(monitorinflation)("requesting async deflation of idle monitors.");
+  // Request deflation of idle monitors by the ServiceThread:
+  set_is_async_deflation_requested(true);
+  MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
+  ml.notify_all();
+
+  if (log_is_enabled(Debug, monitorinflation)) {
+    // exit_globals()'s call to audit_and_print_stats() is done
+    // at the Info level and not at a safepoint.
+    // For safepoint based deflation, audit_and_print_stats() is called
+    // in ObjectSynchronizer::finish_deflate_idle_monitors() at the
+    // Debug level at a safepoint.
+    ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
+  }
+}
 
 // Deflate a single monitor if not in-use
 // Return true if deflated, false if in-use
 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
                                          ObjectMonitor** free_head_p,

@@ -1513,11 +2109,13 @@
   guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
             ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid));
   const markWord dmw = mid->header();
   guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
 
-  if (mid->is_busy()) {
+  if (mid->is_busy() || mid->ref_count() != 0) {
+    // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor*
+    // is in use so no deflation.
     deflated = false;
   } else {
     // Deflate the monitor if it is no longer being used
     // It's idle - scavenge and return to the global free list
     // plain old deflation ...

@@ -1529,25 +2127,38 @@
                                   mark.value(), obj->klass()->external_name());
     }
 
     // Restore the header back to obj
     obj->release_set_mark(dmw);
+    if (AsyncDeflateIdleMonitors) {
+      // clear() expects the owner field to be NULL and we won't race
+      // with the simple C2 ObjectMonitor enter optimization since
+      // we're at a safepoint. DEFLATER_MARKER is the only non-NULL
+      // value we should see here.
+      mid->try_set_owner_from(NULL, DEFLATER_MARKER);
+    }
     mid->clear();
 
     assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
            p2i(mid->object()));
+    assert(mid->is_free(), "invariant");
 
     // Move the deflated ObjectMonitor to the working free list
-    // defined by free_head_p and free_tail_p.
+    // defined by free_head_p and free_tail_p. No races on this list
+    // so no need for load_acquire() or store_release().
     if (*free_head_p == NULL) *free_head_p = mid;
     if (*free_tail_p != NULL) {
       // We append to the list so the caller can use mid->_next_om
       // to fix the linkages in its context.
       ObjectMonitor* prevtail = *free_tail_p;
       // Should have been cleaned up by the caller:
-      assert(prevtail->_next_om == NULL, "cleaned up deflated?");
-      prevtail->_next_om = mid;
+      // Note: Should not have to lock prevtail here since we're at a
+      // safepoint and ObjectMonitors on the local free list should
+      // not be accessed in parallel.
+      assert(prevtail->_next_om == NULL, "must be NULL: _next_om="
+             INTPTR_FORMAT, p2i(prevtail->_next_om));
+      set_next(prevtail, mid);
     }
     *free_tail_p = mid;
     // At this point, mid->_next_om still refers to its current
     // value and another ObjectMonitor's _next_om field still
     // refers to this ObjectMonitor. Those linkages have to be

@@ -1555,13 +2166,157 @@
     deflated = true;
   }
   return deflated;
 }
 
-// Walk a given monitor list, and deflate idle monitors
-// The given list could be a per-thread list or a global list
-// Caller acquires gListLock as needed.
+// Deflate the specified ObjectMonitor if not in-use using a JavaThread.
+// Returns true if it was deflated and false otherwise.
+//
+// The async deflation protocol sets owner to DEFLATER_MARKER and
+// makes ref_count negative as signals to contending threads that
+// an async deflation is in progress. There are a number of checks
+// as part of the protocol to make sure that the calling thread has
+// not lost the race to a contending thread or to a thread that just
+// wants to use the ObjectMonitor*.
+//
+// The ObjectMonitor has been successfully async deflated when:
+// (owner == DEFLATER_MARKER && ref_count < 0)
+// Contending threads or ObjectMonitor* using threads that see those
+// values know to retry their operation.
+//
+bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid,
+                                                  ObjectMonitor** free_head_p,
+                                                  ObjectMonitor** free_tail_p) {
+  assert(AsyncDeflateIdleMonitors, "sanity check");
+  assert(Thread::current()->is_Java_thread(), "precondition");
+  // A newly allocated ObjectMonitor should not be seen here so we
+  // avoid an endless inflate/deflate cycle.
+  assert(mid->is_old(), "must be old: allocation_state=%d",
+         (int) mid->allocation_state());
+
+  if (mid->is_busy() || mid->ref_count() != 0) {
+    // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor*
+    // is in use so no deflation.
+    return false;
+  }
+
+  if (mid->try_set_owner_from(DEFLATER_MARKER, NULL) == NULL) {
+    // ObjectMonitor is not owned by another thread. Our setting
+    // owner to DEFLATER_MARKER forces any contending thread through
+    // the slow path. This is just the first part of the async
+    // deflation dance.
+
+    if (mid->_contentions != 0 || mid->_waiters != 0) {
+      // Another thread has raced to enter the ObjectMonitor after
+      // mid->is_busy() above or has already entered and waited on
+      // it which makes it busy so no deflation. Restore owner to
+      // NULL if it is still DEFLATER_MARKER.
+      mid->try_set_owner_from(NULL, DEFLATER_MARKER);
+      return false;
+    }
+
+    if (Atomic::cmpxchg(&mid->_ref_count, (jint)0, -max_jint) == 0) {
+      // Make ref_count negative to force any contending threads or
+      // ObjectMonitor* using threads to retry. This is the second
+      // part of the async deflation dance.
+
+      if (mid->owner_is_DEFLATER_MARKER()) {
+        // If owner is still DEFLATER_MARKER, then we have successfully
+        // signaled any contending threads to retry. If it is not, then we
+        // have lost the race to an entering thread and the ObjectMonitor
+        // is now busy. This is the third and final part of the async
+        // deflation dance.
+        // Note: This owner check solves the ABA problem with ref_count
+        // where another thread acquired the ObjectMonitor, finished
+        // using it and restored the ref_count to zero.
+
+        // Sanity checks for the races:
+        guarantee(mid->_contentions == 0, "must be 0: contentions=%d",
+                  mid->_contentions);
+        guarantee(mid->_waiters == 0, "must be 0: waiters=%d", mid->_waiters);
+        guarantee(mid->_cxq == NULL, "must be no contending threads: cxq="
+                  INTPTR_FORMAT, p2i(mid->_cxq));
+        guarantee(mid->_EntryList == NULL,
+                  "must be no entering threads: EntryList=" INTPTR_FORMAT,
+                  p2i(mid->_EntryList));
+
+        const oop obj = (oop) mid->object();
+        if (log_is_enabled(Trace, monitorinflation)) {
+          ResourceMark rm;
+          log_trace(monitorinflation)("deflate_monitor_using_JT: "
+                                      "object=" INTPTR_FORMAT ", mark="
+                                      INTPTR_FORMAT ", type='%s'",
+                                      p2i(obj), obj->mark().value(),
+                                      obj->klass()->external_name());
+        }
+
+        // Install the old mark word if nobody else has already done it.
+        mid->install_displaced_markword_in_object(obj);
+        mid->clear_using_JT();
+
+        assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT,
+               p2i(mid->object()));
+        assert(mid->is_free(), "must be free: allocation_state=%d",
+               (int) mid->allocation_state());
+
+        // Move the deflated ObjectMonitor to the working free list
+        // defined by free_head_p and free_tail_p. No races on this list
+        // so no need for load_acquire() or store_release().
+        if (*free_head_p == NULL) {
+          // First one on the list.
+          *free_head_p = mid;
+        }
+        if (*free_tail_p != NULL) {
+          // We append to the list so the caller can use mid->_next_om
+          // to fix the linkages in its context.
+          ObjectMonitor* prevtail = *free_tail_p;
+          // Should have been cleaned up by the caller:
+          om_lock(prevtail);
+          assert(unmarked_next(prevtail) == NULL, "must be NULL: _next_om="
+                 INTPTR_FORMAT, p2i(unmarked_next(prevtail)));
+          set_next(prevtail, mid);  // prevtail now points to mid (and is unlocked)
+        }
+        *free_tail_p = mid;
+
+        // At this point, mid->_next_om still refers to its current
+        // value and another ObjectMonitor's _next_om field still
+        // refers to this ObjectMonitor. Those linkages have to be
+        // cleaned up by the caller who has the complete context.
+
+        // We leave owner == DEFLATER_MARKER and ref_count < 0
+        // to force any racing threads to retry.
+        return true;  // Success, ObjectMonitor has been deflated.
+      }
+
+      // The owner was changed from DEFLATER_MARKER so we lost the
+      // race since the ObjectMonitor is now busy.
+
+      // Add back max_jint to restore the ref_count field to its
+      // proper value (which may not be what we saw above):
+      Atomic::add(&mid->_ref_count, max_jint);
+
+#ifdef ASSERT
+      jint l_ref_count = mid->ref_count();
+#endif
+      assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d",
+             l_ref_count, mid->ref_count());
+      return false;
+    }
+
+    // The ref_count was no longer 0 so we lost the race since the
+    // ObjectMonitor is now busy or the ObjectMonitor* is now is use.
+    // Restore owner to NULL if it is still DEFLATER_MARKER:
+    mid->try_set_owner_from(NULL, DEFLATER_MARKER);
+  }
+
+  // The owner field is no longer NULL so we lost the race since the
+  // ObjectMonitor is now busy.
+  return false;
+}
+
+// Walk a given monitor list, and deflate idle monitors.
+// The given list could be a per-thread list or a global list.
 //
 // In the case of parallel processing of thread local monitor lists,
 // work is done by Threads::parallel_threads_do() which ensures that
 // each Java thread is processed by exactly one worker thread, and
 // thus avoid conflicts that would arise when worker threads would

@@ -1569,87 +2324,255 @@
 //
 // See also ParallelSPCleanupTask and
 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
 // Threads::parallel_java_threads_do() in thread.cpp.
 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p,
+                                             int* count_p,
                                              ObjectMonitor** free_head_p,
                                              ObjectMonitor** free_tail_p) {
-  ObjectMonitor* mid;
-  ObjectMonitor* next;
   ObjectMonitor* cur_mid_in_use = NULL;
+  ObjectMonitor* mid = NULL;
+  ObjectMonitor* next = NULL;
   int deflated_count = 0;
 
-  for (mid = *list_p; mid != NULL;) {
+  // We use the simpler lock-mid-as-we-go protocol since there are no
+  // parallel list deletions since we are at a safepoint.
+  if ((mid = get_list_head_locked(list_p)) == NULL) {
+    return 0;  // The list is empty so nothing to deflate.
+  }
+  next = unmarked_next(mid);
+
+  while (true) {
     oop obj = (oop) mid->object();
     if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) {
       // Deflation succeeded and already updated free_head_p and
       // free_tail_p as needed. Finish the move to the local free list
       // by unlinking mid from the global or per-thread in-use list.
-      if (mid == *list_p) {
-        *list_p = mid->_next_om;
-      } else if (cur_mid_in_use != NULL) {
-        cur_mid_in_use->_next_om = mid->_next_om; // maintain the current thread in-use list
+      if (cur_mid_in_use == NULL) {
+        // mid is the list head and it is locked. Switch the list head
+        // to next which unlocks the list head, but leaves mid locked:
+        Atomic::store(list_p, next);
+      } else {
+        // mid is locked. Switch cur_mid_in_use's next field to next
+        // which is safe because we have no parallel list deletions,
+        // but we leave mid locked:
+        set_next(cur_mid_in_use, next);
       }
-      next = mid->_next_om;
-      mid->_next_om = NULL;  // This mid is current tail in the free_head_p list
-      mid = next;
+      // At this point mid is disconnected from the in-use list so
+      // its lock no longer has any effects on the in-use list.
       deflated_count++;
+      Atomic::dec(count_p);
+      // mid is current tail in the free_head_p list so NULL terminate it
+      // (which also unlocks it):
+      set_next(mid, NULL);
     } else {
+      om_unlock(mid);
+      cur_mid_in_use = mid;
+    }
+    // All the list management is done so move on to the next one:
+    mid = next;
+    if (mid == NULL) {
+      break;  // Reached end of the list so nothing more to deflate.
+    }
+    // Lock mid so we can possibly deflate it:
+    om_lock(mid);
+    next = unmarked_next(mid);
+  }
+  return deflated_count;
+}
+
+// Walk a given ObjectMonitor list and deflate idle ObjectMonitors using
+// a JavaThread. Returns the number of deflated ObjectMonitors. The given
+// list could be a per-thread in-use list or the global in-use list.
+// If a safepoint has started, then we save state via saved_mid_in_use_p
+// and return to the caller to honor the safepoint.
+//
+int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p,
+                                                      int* count_p,
+                                                      ObjectMonitor** free_head_p,
+                                                      ObjectMonitor** free_tail_p,
+                                                      ObjectMonitor** saved_mid_in_use_p) {
+  assert(AsyncDeflateIdleMonitors, "sanity check");
+  JavaThread* self = JavaThread::current();
+
+  ObjectMonitor* cur_mid_in_use = NULL;
+  ObjectMonitor* mid = NULL;
+  ObjectMonitor* next = NULL;
+  ObjectMonitor* next_next = NULL;
+  int deflated_count = 0;
+
+  // We use the more complicated lock-cur_mid_in_use-and-mid-as-we-go
+  // protocol because om_release() can do list deletions in parallel.
+  // We also lock-next-next-as-we-go to prevent an om_flush() that is
+  // behind this thread from passing us.
+  if (*saved_mid_in_use_p == NULL) {
+    // No saved state so start at the beginning.
+    // Lock the list head so we can possibly deflate it:
+    if ((mid = get_list_head_locked(list_p)) == NULL) {
+      return 0;  // The list is empty so nothing to deflate.
+    }
+    next = unmarked_next(mid);
+  } else {
+    // We're restarting after a safepoint so restore the necessary state
+    // before we resume.
+    cur_mid_in_use = *saved_mid_in_use_p;
+    // Lock cur_mid_in_use so we can possibly update its
+    // next field to extract a deflated ObjectMonitor.
+    om_lock(cur_mid_in_use);
+    mid = unmarked_next(cur_mid_in_use);
+    if (mid == NULL) {
+      om_unlock(cur_mid_in_use);
+      *saved_mid_in_use_p = NULL;
+      return 0;  // The remainder is empty so nothing more to deflate.
+    }
+    // Lock mid so we can possibly deflate it:
+    om_lock(mid);
+    next = unmarked_next(mid);
+  }
+
+  while (true) {
+    // The current mid's next field is marked at this point. If we have
+    // a cur_mid_in_use, then its next field is also marked at this point.
+
+    if (next != NULL) {
+      // We lock next so that an om_flush() thread that is behind us
+      // cannot pass us when we unlock the current mid.
+      om_lock(next);
+      next_next = unmarked_next(next);
+    }
+
+    // Only try to deflate if there is an associated Java object and if
+    // mid is old (is not newly allocated and is not newly freed).
+    if (mid->object() != NULL && mid->is_old() &&
+        deflate_monitor_using_JT(mid, free_head_p, free_tail_p)) {
+      // Deflation succeeded and already updated free_head_p and
+      // free_tail_p as needed. Finish the move to the local free list
+      // by unlinking mid from the global or per-thread in-use list.
+      if (cur_mid_in_use == NULL) {
+        // mid is the list head and it is locked. Switch the list head
+        // to next which is also locked (if not NULL) and also leave
+        // mid locked:
+        Atomic::store(list_p, next);
+      } else {
+        ObjectMonitor* locked_next = mark_om_ptr(next);
+        // mid and cur_mid_in_use are locked. Switch cur_mid_in_use's
+        // next field to locked_next and also leave mid locked:
+        set_next(cur_mid_in_use, locked_next);
+      }
+      // At this point mid is disconnected from the in-use list so
+      // its lock longer has any effects on in-use list.
+      deflated_count++;
+      Atomic::dec(count_p);
+      // mid is current tail in the free_head_p list so NULL terminate it
+      // (which also unlocks it):
+      set_next(mid, NULL);
+
+      // All the list management is done so move on to the next one:
+      mid = next;  // mid keeps non-NULL next's locked next field
+      next = next_next;
+    } else {
+      // mid is considered in-use if it does not have an associated
+      // Java object or mid is not old or deflation did not succeed.
+      // A mid->is_new() node can be seen here when it is freshly
+      // returned by om_alloc() (and skips the deflation code path).
+      // A mid->is_old() node can be seen here when deflation failed.
+      // A mid->is_free() node can be seen here when a fresh node from
+      // om_alloc() is released by om_release() due to losing the race
+      // in inflate().
+
+      // All the list management is done so move on to the next one:
+      if (cur_mid_in_use != NULL) {
+        om_unlock(cur_mid_in_use);
+      }
+      // The next cur_mid_in_use keeps mid's lock state so
+      // that it is stable for a possible next field change. It
+      // cannot be modified by om_release() while it is locked.
       cur_mid_in_use = mid;
-      mid = mid->_next_om;
+      mid = next;  // mid keeps non-NULL next's locked state
+      next = next_next;
+
+      if (SafepointMechanism::should_block(self) &&
+          cur_mid_in_use != Atomic::load(list_p) && cur_mid_in_use->is_old()) {
+        // If a safepoint has started and cur_mid_in_use is not the list
+        // head and is old, then it is safe to use as saved state. Return
+        // to the caller before blocking.
+        *saved_mid_in_use_p = cur_mid_in_use;
+        om_unlock(cur_mid_in_use);
+        if (mid != NULL) {
+          om_unlock(mid);
+        }
+        return deflated_count;
+      }
     }
+    if (mid == NULL) {
+      if (cur_mid_in_use != NULL) {
+        om_unlock(cur_mid_in_use);
+      }
+      break;  // Reached end of the list so nothing more to deflate.
+    }
+
+    // The current mid's next field is locked at this point. If we have
+    // a cur_mid_in_use, then it is also locked at this point.
   }
+  // We finished the list without a safepoint starting so there's
+  // no need to save state.
+  *saved_mid_in_use_p = NULL;
   return deflated_count;
 }
 
 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
   counters->n_in_use = 0;              // currently associated with objects
   counters->n_in_circulation = 0;      // extant
   counters->n_scavenged = 0;           // reclaimed (global and per-thread)
   counters->per_thread_scavenged = 0;  // per-thread scavenge total
   counters->per_thread_times = 0.0;    // per-thread scavenge times
+  OrderAccess::storestore();           // flush inits for worker threads
 }
 
 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+
+  if (AsyncDeflateIdleMonitors) {
+    // Nothing to do when global idle ObjectMonitors are deflated using
+    // a JavaThread unless a special deflation has been requested.
+    if (!is_special_deflation_requested()) {
+      return;
+    }
+  }
+
   bool deflated = false;
 
   ObjectMonitor* free_head_p = NULL;  // Local SLL of scavenged monitors
   ObjectMonitor* free_tail_p = NULL;
   elapsedTimer timer;
 
   if (log_is_enabled(Info, monitorinflation)) {
     timer.start();
   }
 
-  // Prevent om_flush from changing mids in Thread dtor's during deflation
-  // And in case the vm thread is acquiring a lock during a safepoint
-  // See e.g. 6320749
-  Thread::muxAcquire(&gListLock, "deflate_idle_monitors");
-
   // Note: the thread-local monitors lists get deflated in
   // a separate pass. See deflate_thread_local_monitors().
 
-  // For moribund threads, scan g_om_in_use_list
+  // For moribund threads, scan LVars.in_use_list
   int deflated_count = 0;
-  if (g_om_in_use_list) {
-    counters->n_in_circulation += g_om_in_use_count;
-    deflated_count = deflate_monitor_list((ObjectMonitor **)&g_om_in_use_list, &free_head_p, &free_tail_p);
-    g_om_in_use_count -= deflated_count;
-    counters->n_scavenged += deflated_count;
-    counters->n_in_use += g_om_in_use_count;
+  if (Atomic::load(&LVars.in_use_list) != NULL) {
+    // Update n_in_circulation before LVars.in_use_count is updated by deflation.
+    Atomic::add(&counters->n_in_circulation, Atomic::load(&LVars.in_use_count));
+
+    deflated_count = deflate_monitor_list(&LVars.in_use_list, &LVars.in_use_count, &free_head_p, &free_tail_p);
+    Atomic::add(&counters->n_in_use, Atomic::load(&LVars.in_use_count));
   }
 
   if (free_head_p != NULL) {
     // Move the deflated ObjectMonitors back to the global free list.
-    guarantee(free_tail_p != NULL && counters->n_scavenged > 0, "invariant");
-    assert(free_tail_p->_next_om == NULL, "invariant");
-    // constant-time list splice - prepend scavenged segment to g_free_list
-    free_tail_p->_next_om = g_free_list;
-    g_free_list = free_head_p;
+    // No races on the working free list so no need for load_acquire().
+    guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
+    assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om="
+           INTPTR_FORMAT, p2i(free_tail_p->_next_om));
+    prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count);
+    Atomic::add(&counters->n_scavenged, deflated_count);
   }
-  Thread::muxRelease(&gListLock);
   timer.stop();
 
   LogStreamHandle(Debug, monitorinflation) lsh_debug;
   LogStreamHandle(Info, monitorinflation) lsh_info;
   LogStream* ls = NULL;

@@ -1661,81 +2584,272 @@
   if (ls != NULL) {
     ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
   }
 }
 
+class HandshakeForDeflation : public HandshakeClosure {
+ public:
+  HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {}
+
+  void do_thread(Thread* thread) {
+    log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread="
+                                INTPTR_FORMAT, p2i(thread));
+  }
+};
+
+void ObjectSynchronizer::deflate_idle_monitors_using_JT() {
+  assert(AsyncDeflateIdleMonitors, "sanity check");
+
+  // Deflate any global idle monitors.
+  deflate_global_idle_monitors_using_JT();
+
+  int count = 0;
+  for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
+    if (jt->om_in_use_count > 0 && !jt->is_exiting()) {
+      // This JavaThread is using ObjectMonitors so deflate any that
+      // are idle unless this JavaThread is exiting; do not race with
+      // ObjectSynchronizer::om_flush().
+      deflate_per_thread_idle_monitors_using_JT(jt);
+      count++;
+    }
+  }
+  if (count > 0) {
+    log_debug(monitorinflation)("did async deflation of idle monitors for %d thread(s).", count);
+  }
+
+  log_info(monitorinflation)("async global_population=%d, global_in_use_count=%d, "
+                             "global_free_count=%d, global_wait_count=%d",
+                             Atomic::load(&LVars.population), Atomic::load(&LVars.in_use_count),
+                             Atomic::load(&LVars.free_count), Atomic::load(&LVars.wait_count));
+
+  // The ServiceThread's async deflation request has been processed.
+  set_is_async_deflation_requested(false);
+
+  if (HandshakeAfterDeflateIdleMonitors && Atomic::load(&LVars.wait_count) > 0) {
+    // There are deflated ObjectMonitors waiting for a handshake
+    // (or a safepoint) for safety.
+
+    ObjectMonitor* list = Atomic::load(&LVars.wait_list);
+    ADIM_guarantee(list != NULL, "LVars.wait_list must not be NULL");
+    int count = Atomic::load(&LVars.wait_count);
+    Atomic::store(&LVars.wait_count, 0);
+    Atomic::store(&LVars.wait_list, (ObjectMonitor*)NULL);
+
+    // Find the tail for prepend_list_to_common(). No need to mark
+    // ObjectMonitors for this list walk since only the deflater
+    // thread manages the wait list.
+    int l_count = 0;
+    ObjectMonitor* tail = NULL;
+    for (ObjectMonitor* n = list; n != NULL; n = unmarked_next(n)) {
+      tail = n;
+      l_count++;
+    }
+    ADIM_guarantee(count == l_count, "count=%d != l_count=%d", count, l_count);
+
+    // Will execute a safepoint if !ThreadLocalHandshakes:
+    HandshakeForDeflation hfd_hc;
+    Handshake::execute(&hfd_hc);
+
+    prepend_list_to_common(list, tail, count, &LVars.free_list, &LVars.free_count);
+
+    log_info(monitorinflation)("moved %d idle monitors from global waiting list to global free list", count);
+  }
+}
+
+// Deflate global idle ObjectMonitors using a JavaThread.
+//
+void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() {
+  assert(AsyncDeflateIdleMonitors, "sanity check");
+  assert(Thread::current()->is_Java_thread(), "precondition");
+  JavaThread* self = JavaThread::current();
+
+  deflate_common_idle_monitors_using_JT(true /* is_global */, self);
+}
+
+// Deflate the specified JavaThread's idle ObjectMonitors using a JavaThread.
+//
+void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(JavaThread* target) {
+  assert(AsyncDeflateIdleMonitors, "sanity check");
+  assert(Thread::current()->is_Java_thread(), "precondition");
+
+  deflate_common_idle_monitors_using_JT(false /* !is_global */, target);
+}
+
+// Deflate global or per-thread idle ObjectMonitors using a JavaThread.
+//
+void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* target) {
+  JavaThread* self = JavaThread::current();
+
+  int deflated_count = 0;
+  ObjectMonitor* free_head_p = NULL;  // Local SLL of scavenged ObjectMonitors
+  ObjectMonitor* free_tail_p = NULL;
+  ObjectMonitor* saved_mid_in_use_p = NULL;
+  elapsedTimer timer;
+
+  if (log_is_enabled(Info, monitorinflation)) {
+    timer.start();
+  }
+
+  if (is_global) {
+    OM_PERFDATA_OP(MonExtant, set_value(Atomic::load(&LVars.in_use_count)));
+  } else {
+    OM_PERFDATA_OP(MonExtant, inc(target->om_in_use_count));
+  }
+
+  do {
+    int local_deflated_count;
+    if (is_global) {
+      local_deflated_count = deflate_monitor_list_using_JT(&LVars.in_use_list, &LVars.in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p);
+    } else {
+      local_deflated_count = deflate_monitor_list_using_JT(&target->om_in_use_list, &target->om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p);
+    }
+    deflated_count += local_deflated_count;
+
+    if (free_head_p != NULL) {
+      // Move the deflated ObjectMonitors to the global free list.
+      // No races on the working list so no need for load_acquire().
+      guarantee(free_tail_p != NULL && local_deflated_count > 0, "free_tail_p=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(free_tail_p), local_deflated_count);
+      // Note: The target thread can be doing an om_alloc() that
+      // is trying to prepend an ObjectMonitor on its in-use list
+      // at the same time that we have deflated the current in-use
+      // list head and put it on the local free list. prepend_to_common()
+      // will detect the race and retry which avoids list corruption,
+      // but the next field in free_tail_p can flicker to marked
+      // and then unmarked while prepend_to_common() is sorting it
+      // all out.
+      assert(unmarked_next(free_tail_p) == NULL, "must be NULL: _next_om="
+             INTPTR_FORMAT, p2i(unmarked_next(free_tail_p)));
+
+      if (HandshakeAfterDeflateIdleMonitors) {
+        prepend_list_to_global_wait_list(free_head_p, free_tail_p, local_deflated_count);
+      } else {
+        prepend_list_to_global_free_list(free_head_p, free_tail_p, local_deflated_count);
+      }
+
+      OM_PERFDATA_OP(Deflations, inc(local_deflated_count));
+    }
+
+    if (saved_mid_in_use_p != NULL) {
+      // deflate_monitor_list_using_JT() detected a safepoint starting.
+      timer.stop();
+      {
+        if (is_global) {
+          log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint.");
+        } else {
+          log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(target));
+        }
+        assert(SafepointMechanism::should_block(self), "sanity check");
+        ThreadBlockInVM blocker(self);
+      }
+      // Prepare for another loop after the safepoint.
+      free_head_p = NULL;
+      free_tail_p = NULL;
+      if (log_is_enabled(Info, monitorinflation)) {
+        timer.start();
+      }
+    }
+  } while (saved_mid_in_use_p != NULL);
+  timer.stop();
+
+  LogStreamHandle(Debug, monitorinflation) lsh_debug;
+  LogStreamHandle(Info, monitorinflation) lsh_info;
+  LogStream* ls = NULL;
+  if (log_is_enabled(Debug, monitorinflation)) {
+    ls = &lsh_debug;
+  } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
+    ls = &lsh_info;
+  }
+  if (ls != NULL) {
+    if (is_global) {
+      ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
+    } else {
+      ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(target), timer.seconds(), deflated_count);
+    }
+  }
+}
+
 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
   // Report the cumulative time for deflating each thread's idle
   // monitors. Note: if the work is split among more than one
   // worker thread, then the reported time will likely be more
   // than a beginning to end measurement of the phase.
   log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged);
 
-  g_om_free_count += counters->n_scavenged;
+  bool needs_special_deflation = is_special_deflation_requested();
+  if (AsyncDeflateIdleMonitors && !needs_special_deflation) {
+    // Nothing to do when idle ObjectMonitors are deflated using
+    // a JavaThread unless a special deflation has been requested.
+    return;
+  }
 
   if (log_is_enabled(Debug, monitorinflation)) {
     // exit_globals()'s call to audit_and_print_stats() is done
-    // at the Info level.
+    // at the Info level and not at a safepoint.
+    // For async deflation, audit_and_print_stats() is called in
+    // ObjectSynchronizer::do_safepoint_work() at the Debug level
+    // at a safepoint.
     ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
   } else if (log_is_enabled(Info, monitorinflation)) {
-    Thread::muxAcquire(&gListLock, "finish_deflate_idle_monitors");
-    log_info(monitorinflation)("g_om_population=%d, g_om_in_use_count=%d, "
-                               "g_om_free_count=%d", g_om_population,
-                               g_om_in_use_count, g_om_free_count);
-    Thread::muxRelease(&gListLock);
+    log_info(monitorinflation)("global_population=%d, global_in_use_count=%d, "
+                               "global_free_count=%d, global_wait_count=%d",
+                               Atomic::load(&LVars.population), Atomic::load(&LVars.in_use_count),
+                               Atomic::load(&LVars.free_count), Atomic::load(&LVars.wait_count));
   }
 
   Atomic::store(&_forceMonitorScavenge, 0);    // Reset
 
   OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged));
   OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation));
 
   GVars.stw_random = os::random();
   GVars.stw_cycle++;
+
+  if (needs_special_deflation) {
+    set_is_special_deflation_requested(false);  // special deflation is done
+  }
 }
 
 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 
+  if (AsyncDeflateIdleMonitors && !is_special_deflation_requested()) {
+    // Nothing to do if a special deflation has NOT been requested.
+    return;
+  }
+
   ObjectMonitor* free_head_p = NULL;  // Local SLL of scavenged monitors
   ObjectMonitor* free_tail_p = NULL;
   elapsedTimer timer;
 
   if (log_is_enabled(Info, safepoint, cleanup) ||
       log_is_enabled(Info, monitorinflation)) {
     timer.start();
   }
 
-  int deflated_count = deflate_monitor_list(thread->om_in_use_list_addr(), &free_head_p, &free_tail_p);
+  // Update n_in_circulation before om_in_use_count is updated by deflation.
+  Atomic::add(&counters->n_in_circulation, thread->om_in_use_count);
 
-  Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors");
-
-  // Adjust counters
-  counters->n_in_circulation += thread->om_in_use_count;
-  thread->om_in_use_count -= deflated_count;
-  counters->n_scavenged += deflated_count;
-  counters->n_in_use += thread->om_in_use_count;
-  counters->per_thread_scavenged += deflated_count;
+  int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p);
+  Atomic::add(&counters->n_in_use, thread->om_in_use_count);
 
   if (free_head_p != NULL) {
     // Move the deflated ObjectMonitors back to the global free list.
+    // No races on the working list so no need for load_acquire().
     guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
-    assert(free_tail_p->_next_om == NULL, "invariant");
-
-    // constant-time list splice - prepend scavenged segment to g_free_list
-    free_tail_p->_next_om = g_free_list;
-    g_free_list = free_head_p;
+    assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om="
+           INTPTR_FORMAT, p2i(free_tail_p->_next_om));
+    prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count);
+    Atomic::add(&counters->n_scavenged, deflated_count);
+    Atomic::add(&counters->per_thread_scavenged, deflated_count);
   }
 
   timer.stop();
   // Safepoint logging cares about cumulative per_thread_times and
   // we'll capture most of the cost, but not the muxRelease() which
   // should be cheap.
   counters->per_thread_times += timer.seconds();
 
-  Thread::muxRelease(&gListLock);
-
   LogStreamHandle(Debug, monitorinflation) lsh_debug;
   LogStreamHandle(Info, monitorinflation) lsh_info;
   LogStream* ls = NULL;
   if (log_is_enabled(Debug, monitorinflation)) {
     ls = &lsh_debug;

@@ -1782,13 +2896,11 @@
 
 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
   assert(THREAD == JavaThread::current(), "must be current Java thread");
   NoSafepointVerifier nsv;
   ReleaseJavaMonitorsClosure rjmc(THREAD);
-  Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread");
   ObjectSynchronizer::monitors_iterate(&rjmc);
-  Thread::muxRelease(&gListLock);
   THREAD->clear_pending_exception();
 }
 
 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
   switch (cause) {

@@ -1838,40 +2950,38 @@
   } else if (log_is_enabled(Info, monitorinflation)) {
     ls = &lsh_info;
   }
   assert(ls != NULL, "sanity check");
 
-  if (!on_exit) {
-    // Not at VM exit so grab the global list lock.
-    Thread::muxAcquire(&gListLock, "audit_and_print_stats");
-  }
-
   // Log counts for the global and per-thread monitor lists:
   int chk_om_population = log_monitor_list_counts(ls);
   int error_cnt = 0;
 
   ls->print_cr("Checking global lists:");
 
-  // Check g_om_population:
-  if (g_om_population == chk_om_population) {
-    ls->print_cr("g_om_population=%d equals chk_om_population=%d",
-                 g_om_population, chk_om_population);
-  } else {
-    ls->print_cr("ERROR: g_om_population=%d is not equal to "
-                 "chk_om_population=%d", g_om_population,
-                 chk_om_population);
-    error_cnt++;
+  // Check LVars.population:
+  if (Atomic::load(&LVars.population) == chk_om_population) {
+    ls->print_cr("global_population=%d equals chk_om_population=%d",
+                 Atomic::load(&LVars.population), chk_om_population);
+  } else {
+    // With lock free access to the monitor lists, it is possible for
+    // log_monitor_list_counts() to return a value that doesn't match
+    // LVars.population. So far a higher value has been seen in testing
+    // so something is being double counted by log_monitor_list_counts().
+    ls->print_cr("WARNING: global_population=%d is not equal to "
+                 "chk_om_population=%d", Atomic::load(&LVars.population), chk_om_population);
   }
 
-  // Check g_om_in_use_list and g_om_in_use_count:
+  // Check LVars.in_use_list and LVars.in_use_count:
   chk_global_in_use_list_and_count(ls, &error_cnt);
 
-  // Check g_free_list and g_om_free_count:
+  // Check LVars.free_list and LVars.free_count:
   chk_global_free_list_and_count(ls, &error_cnt);
 
-  if (!on_exit) {
-    Thread::muxRelease(&gListLock);
+  if (HandshakeAfterDeflateIdleMonitors) {
+    // Check LVars.wait_list and LVars.wait_count:
+    chk_global_wait_list_and_count(ls, &error_cnt);
   }
 
   ls->print_cr("Checking per-thread lists:");
 
   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {

@@ -1891,11 +3001,11 @@
   if ((on_exit && log_is_enabled(Info, monitorinflation)) ||
       (!on_exit && log_is_enabled(Trace, monitorinflation))) {
     // When exiting this log output is at the Info level. When called
     // at a safepoint, this log output is at the Trace level since
     // there can be a lot of it.
-    log_in_use_monitor_details(ls, on_exit);
+    log_in_use_monitor_details(ls);
   }
 
   ls->flush();
 
   guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt);

@@ -1920,17 +3030,18 @@
     if (jt != NULL) {
       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
                     ": free per-thread monitor must have NULL _header "
                     "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n),
                     n->header().value());
-    } else {
+      *error_cnt_p = *error_cnt_p + 1;
+    } else if (!AsyncDeflateIdleMonitors) {
       out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
                     "must have NULL _header field: _header=" INTPTR_FORMAT,
                     p2i(n), n->header().value());
-    }
     *error_cnt_p = *error_cnt_p + 1;
   }
+  }
   if (n->object() != NULL) {
     if (jt != NULL) {
       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
                     ": free per-thread monitor must have NULL _object "
                     "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n),

@@ -1942,44 +3053,117 @@
     }
     *error_cnt_p = *error_cnt_p + 1;
   }
 }
 
+// Lock the next ObjectMonitor for traversal. The current ObjectMonitor
+// is unlocked after the next ObjectMonitor is locked. *cur_p and *next_p
+// are updated to their next values in the list traversal. *cur_p is set
+// to NULL when the end of the list is reached.
+static void lock_next_for_traversal(ObjectMonitor** cur_p, ObjectMonitor** next_p) {
+  ObjectMonitor* prev = *cur_p;      // Save current for unlocking.
+  if (*next_p == NULL) {             // Reached the end of the list.
+    om_unlock(prev);                 // Unlock previous.
+    *cur_p = NULL;                   // Tell the caller we are done.
+    return;
+  }
+  om_lock(*next_p);                  // Lock next.
+  om_unlock(prev);                   // Unlock previous.
+  *cur_p = *next_p;                  // Update current.
+  *next_p = unmarked_next(*cur_p);   // Update next.
+}
+
 // Check the global free list and count; log the results of the checks.
 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out,
                                                         int *error_cnt_p) {
   int chk_om_free_count = 0;
-  for (ObjectMonitor* n = g_free_list; n != NULL; n = n->_next_om) {
-    chk_free_entry(NULL /* jt */, n, out, error_cnt_p);
+  ObjectMonitor* cur = NULL;
+  ObjectMonitor* next = NULL;
+  if ((cur = get_list_head_locked(&LVars.free_list)) != NULL) {
+    next = unmarked_next(cur);
+    // Marked the global free list head so process the list.
+    while (true) {
+      chk_free_entry(NULL /* jt */, cur, out, error_cnt_p);
     chk_om_free_count++;
+
+      lock_next_for_traversal(&cur, &next);
+      if (cur == NULL) {
+        break;
+      }
+    }
   }
-  if (g_om_free_count == chk_om_free_count) {
-    out->print_cr("g_om_free_count=%d equals chk_om_free_count=%d",
-                  g_om_free_count, chk_om_free_count);
+  if (Atomic::load(&LVars.free_count) == chk_om_free_count) {
+    out->print_cr("global_free_count=%d equals chk_om_free_count=%d",
+                  Atomic::load(&LVars.free_count), chk_om_free_count);
   } else {
-    out->print_cr("ERROR: g_om_free_count=%d is not equal to "
-                  "chk_om_free_count=%d", g_om_free_count,
-                  chk_om_free_count);
+    // With lock free access to LVars.free_list, it is possible for an
+    // ObjectMonitor to be prepended to LVars.free_list after we started
+    // calculating chk_om_free_count so LVars.free_count may not
+    // match anymore.
+    out->print_cr("WARNING: global_free_count=%d is not equal to "
+                  "chk_om_free_count=%d", Atomic::load(&LVars.free_count), chk_om_free_count);
+  }
+}
+
+// Check the global wait list and count; log the results of the checks.
+void ObjectSynchronizer::chk_global_wait_list_and_count(outputStream * out,
+                                                        int *error_cnt_p) {
+  int chk_om_wait_count = 0;
+  ObjectMonitor* cur = NULL;
+  ObjectMonitor* next = NULL;
+  if ((cur = get_list_head_locked(&LVars.wait_list)) != NULL) {
+    next = unmarked_next(cur);
+    // Marked the global wait list head so process the list.
+    while (true) {
+      // Rules for LVars.wait_list are the same as of LVars.free_list:
+      chk_free_entry(NULL /* jt */, cur, out, error_cnt_p);
+      chk_om_wait_count++;
+
+      lock_next_for_traversal(&cur, &next);
+      if (cur == NULL) {
+        break;
+      }
+    }
+  }
+  if (Atomic::load(&LVars.wait_count) == chk_om_wait_count) {
+    out->print_cr("global_wait_count=%d equals chk_om_wait_count=%d",
+                  Atomic::load(&LVars.wait_count), chk_om_wait_count);
+  } else {
+    out->print_cr("ERROR: global_wait_count=%d is not equal to "
+                  "chk_om_wait_count=%d", Atomic::load(&LVars.wait_count), chk_om_wait_count);
     *error_cnt_p = *error_cnt_p + 1;
   }
 }
 
 // Check the global in-use list and count; log the results of the checks.
 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out,
                                                           int *error_cnt_p) {
   int chk_om_in_use_count = 0;
-  for (ObjectMonitor* n = g_om_in_use_list; n != NULL; n = n->_next_om) {
-    chk_in_use_entry(NULL /* jt */, n, out, error_cnt_p);
+  ObjectMonitor* cur = NULL;
+  ObjectMonitor* next = NULL;
+  if ((cur = get_list_head_locked(&LVars.in_use_list)) != NULL) {
+    next = unmarked_next(cur);
+    // Marked the global in-use list head so process the list.
+    while (true) {
+      chk_in_use_entry(NULL /* jt */, cur, out, error_cnt_p);
     chk_om_in_use_count++;
+
+      lock_next_for_traversal(&cur, &next);
+      if (cur == NULL) {
+        break;
+      }
   }
-  if (g_om_in_use_count == chk_om_in_use_count) {
-    out->print_cr("g_om_in_use_count=%d equals chk_om_in_use_count=%d", g_om_in_use_count,
-                  chk_om_in_use_count);
-  } else {
-    out->print_cr("ERROR: g_om_in_use_count=%d is not equal to chk_om_in_use_count=%d",
-                  g_om_in_use_count, chk_om_in_use_count);
-    *error_cnt_p = *error_cnt_p + 1;
+  }
+  if (Atomic::load(&LVars.in_use_count) == chk_om_in_use_count) {
+    out->print_cr("global_in_use_count=%d equals chk_om_in_use_count=%d",
+                  Atomic::load(&LVars.in_use_count), chk_om_in_use_count);
+  } else {
+    // With lock free access to the monitor lists, it is possible for
+    // an exiting JavaThread to put its in-use ObjectMonitors on the
+    // global in-use list after chk_om_in_use_count is calculated above.
+    out->print_cr("WARNING: global_in_use_count=%d is not equal to chk_om_in_use_count=%d",
+                  Atomic::load(&LVars.in_use_count), chk_om_in_use_count);
   }
 }
 
 // Check an in-use monitor entry; log any errors.
 void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n,

@@ -2043,17 +3227,29 @@
 // Check the thread's free list and count; log the results of the checks.
 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt,
                                                             outputStream * out,
                                                             int *error_cnt_p) {
   int chk_om_free_count = 0;
-  for (ObjectMonitor* n = jt->om_free_list; n != NULL; n = n->_next_om) {
-    chk_free_entry(jt, n, out, error_cnt_p);
+  ObjectMonitor* cur = NULL;
+  ObjectMonitor* next = NULL;
+  if ((cur = get_list_head_locked(&jt->om_free_list)) != NULL) {
+    next = unmarked_next(cur);
+    // Marked the per-thread free list head so process the list.
+    while (true) {
+      chk_free_entry(jt, cur, out, error_cnt_p);
     chk_om_free_count++;
+
+      lock_next_for_traversal(&cur, &next);
+      if (cur == NULL) {
+        break;
+      }
+    }
   }
   if (jt->om_free_count == chk_om_free_count) {
     out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals "
-                  "chk_om_free_count=%d", p2i(jt), jt->om_free_count, chk_om_free_count);
+                  "chk_om_free_count=%d", p2i(jt), jt->om_free_count,
+                  chk_om_free_count);
   } else {
     out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not "
                   "equal to chk_om_free_count=%d", p2i(jt), jt->om_free_count,
                   chk_om_free_count);
     *error_cnt_p = *error_cnt_p + 1;

@@ -2063,97 +3259,124 @@
 // Check the thread's in-use list and count; log the results of the checks.
 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt,
                                                               outputStream * out,
                                                               int *error_cnt_p) {
   int chk_om_in_use_count = 0;
-  for (ObjectMonitor* n = jt->om_in_use_list; n != NULL; n = n->_next_om) {
-    chk_in_use_entry(jt, n, out, error_cnt_p);
+  ObjectMonitor* cur = NULL;
+  ObjectMonitor* next = NULL;
+  if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) {
+    next = unmarked_next(cur);
+    // Marked the per-thread in-use list head so process the list.
+    while (true) {
+      chk_in_use_entry(jt, cur, out, error_cnt_p);
     chk_om_in_use_count++;
+
+      lock_next_for_traversal(&cur, &next);
+      if (cur == NULL) {
+        break;
+      }
+    }
   }
   if (jt->om_in_use_count == chk_om_in_use_count) {
     out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals "
-                  "chk_om_in_use_count=%d", p2i(jt), jt->om_in_use_count,
-                  chk_om_in_use_count);
+                  "chk_om_in_use_count=%d", p2i(jt),
+                  jt->om_in_use_count, chk_om_in_use_count);
   } else {
     out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not "
-                  "equal to chk_om_in_use_count=%d", p2i(jt), jt->om_in_use_count,
-                  chk_om_in_use_count);
+                  "equal to chk_om_in_use_count=%d", p2i(jt),
+                  jt->om_in_use_count, chk_om_in_use_count);
     *error_cnt_p = *error_cnt_p + 1;
   }
 }
 
 // Log details about ObjectMonitors on the in-use lists. The 'BHL'
 // flags indicate why the entry is in-use, 'object' and 'object type'
 // indicate the associated object and its type.
-void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out,
-                                                    bool on_exit) {
-  if (!on_exit) {
-    // Not at VM exit so grab the global list lock.
-    Thread::muxAcquire(&gListLock, "log_in_use_monitor_details");
-  }
-
+void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out) {
   stringStream ss;
-  if (g_om_in_use_count > 0) {
+  if (Atomic::load(&LVars.in_use_count) > 0) {
     out->print_cr("In-use global monitor info:");
     out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
-    out->print_cr("%18s  %s  %18s  %18s",
-                  "monitor", "BHL", "object", "object type");
-    out->print_cr("==================  ===  ==================  ==================");
-    for (ObjectMonitor* n = g_om_in_use_list; n != NULL; n = n->_next_om) {
-      const oop obj = (oop) n->object();
-      const markWord mark = n->header();
+    out->print_cr("%18s  %s  %7s  %18s  %18s",
+                  "monitor", "BHL", "ref_cnt", "object", "object type");
+    out->print_cr("==================  ===  =======  ==================  ==================");
+    ObjectMonitor* cur = NULL;
+    ObjectMonitor* next = NULL;
+    if ((cur = get_list_head_locked(&LVars.in_use_list)) != NULL) {
+      next = unmarked_next(cur);
+      // Marked the global in-use list head so process the list.
+      while (true) {
+        const oop obj = (oop) cur->object();
+        const markWord mark = cur->header();
       ResourceMark rm;
-      out->print(INTPTR_FORMAT "  %d%d%d  " INTPTR_FORMAT "  %s", p2i(n),
-                 n->is_busy() != 0, mark.hash() != 0, n->owner() != NULL,
-                 p2i(obj), obj->klass()->external_name());
-      if (n->is_busy() != 0) {
-        out->print(" (%s)", n->is_busy_to_string(&ss));
+        out->print(INTPTR_FORMAT "  %d%d%d  %7d  " INTPTR_FORMAT "  %s",
+                   p2i(cur), cur->is_busy() != 0, mark.hash() != 0,
+                   cur->owner() != NULL, (int)cur->ref_count(), p2i(obj),
+                   obj->klass()->external_name());
+        if (cur->is_busy() != 0) {
+          out->print(" (%s)", cur->is_busy_to_string(&ss));
         ss.reset();
       }
       out->cr();
+
+        lock_next_for_traversal(&cur, &next);
+        if (cur == NULL) {
+          break;
+        }
     }
   }
-
-  if (!on_exit) {
-    Thread::muxRelease(&gListLock);
   }
 
   out->print_cr("In-use per-thread monitor info:");
   out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
-  out->print_cr("%18s  %18s  %s  %18s  %18s",
-                "jt", "monitor", "BHL", "object", "object type");
-  out->print_cr("==================  ==================  ===  ==================  ==================");
+  out->print_cr("%18s  %18s  %s  %7s  %18s  %18s",
+                "jt", "monitor", "BHL", "ref_cnt", "object", "object type");
+  out->print_cr("==================  ==================  ===  =======  ==================  ==================");
   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
-    for (ObjectMonitor* n = jt->om_in_use_list; n != NULL; n = n->_next_om) {
-      const oop obj = (oop) n->object();
-      const markWord mark = n->header();
+    ObjectMonitor* cur = NULL;
+    ObjectMonitor* next = NULL;
+    if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) {
+      next = unmarked_next(cur);
+      // Marked the global in-use list head so process the list.
+      while (true) {
+        const oop obj = (oop) cur->object();
+        const markWord mark = cur->header();
       ResourceMark rm;
-      out->print(INTPTR_FORMAT "  " INTPTR_FORMAT "  %d%d%d  " INTPTR_FORMAT
-                 "  %s", p2i(jt), p2i(n), n->is_busy() != 0,
-                 mark.hash() != 0, n->owner() != NULL, p2i(obj),
-                 obj->klass()->external_name());
-      if (n->is_busy() != 0) {
-        out->print(" (%s)", n->is_busy_to_string(&ss));
+        out->print(INTPTR_FORMAT "  " INTPTR_FORMAT "  %d%d%d  %7d  "
+                   INTPTR_FORMAT "  %s", p2i(jt), p2i(cur), cur->is_busy() != 0,
+                   mark.hash() != 0, cur->owner() != NULL, (int)cur->ref_count(),
+                   p2i(obj), obj->klass()->external_name());
+        if (cur->is_busy() != 0) {
+          out->print(" (%s)", cur->is_busy_to_string(&ss));
         ss.reset();
       }
       out->cr();
+
+        lock_next_for_traversal(&cur, &next);
+        if (cur == NULL) {
+          break;
+        }
+      }
     }
   }
 
   out->flush();
 }
 
 // Log counts for the global and per-thread monitor lists and return
 // the population count.
 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
   int pop_count = 0;
-  out->print_cr("%18s  %10s  %10s  %10s",
-                "Global Lists:", "InUse", "Free", "Total");
-  out->print_cr("==================  ==========  ==========  ==========");
-  out->print_cr("%18s  %10d  %10d  %10d", "",
-                g_om_in_use_count, g_om_free_count, g_om_population);
-  pop_count += g_om_in_use_count + g_om_free_count;
+  out->print_cr("%18s  %10s  %10s  %10s  %10s",
+                "Global Lists:", "InUse", "Free", "Wait", "Total");
+  out->print_cr("==================  ==========  ==========  ==========  ==========");
+  out->print_cr("%18s  %10d  %10d  %10d  %10d", "", Atomic::load(&LVars.in_use_count),
+                Atomic::load(&LVars.free_count), Atomic::load(&LVars.wait_count), Atomic::load(&LVars.population));
+  pop_count += Atomic::load(&LVars.in_use_count) + Atomic::load(&LVars.free_count);
+  if (HandshakeAfterDeflateIdleMonitors) {
+    pop_count += Atomic::load(&LVars.wait_count);
+  }
 
   out->print_cr("%18s  %10s  %10s  %10s",
                 "Per-Thread Lists:", "InUse", "Free", "Provision");
   out->print_cr("==================  ==========  ==========  ==========");
 

@@ -2170,21 +3393,23 @@
 // Check if monitor belongs to the monitor cache
 // The list is grow-only so it's *relatively* safe to traverse
 // the list of extant blocks without taking a lock.
 
 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
-  PaddedObjectMonitor* block = Atomic::load_acquire(&g_block_list);
+  PaddedObjectMonitor* block = Atomic::load(&g_block_list);
   while (block != NULL) {
     assert(block->object() == CHAINMARKER, "must be a block header");
     if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
       address mon = (address)monitor;
       address blk = (address)block;
       size_t diff = mon - blk;
       assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned");
       return 1;
     }
-    block = (PaddedObjectMonitor*)block->_next_om;
+    // unmarked_next() is not needed with g_block_list (no locking
+    // used with with block linkage _next_om fields).
+    block = (PaddedObjectMonitor*)Atomic::load(&block->_next_om);
   }
   return 0;
 }
 
 #endif
< prev index next >