< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page
rev 60025 : 8246476: remove AsyncDeflateIdleMonitors option and the safepoint based deflation mechanism

@@ -489,20 +489,16 @@
 
   const markWord mark = obj->mark();
 
   if (mark.has_monitor()) {
     ObjectMonitor* const m = mark.monitor();
-    if (AsyncDeflateIdleMonitors) {
       // An async deflation can race us before we manage to make the
       // ObjectMonitor busy by setting the owner below. If we detect
       // that race we just bail out to the slow-path here.
       if (m->object() == NULL) {
         return false;
       }
-    } else {
-      assert(m->object() == obj, "invariant");
-    }
     Thread* const owner = (Thread *) m->_owner;
 
     // Lock contention and Transactional Lock Elision (TLE) diagnostics
     // and observability
     // Case: light contention possibly amenable to TLE

@@ -1191,12 +1187,10 @@
     return self->is_lock_owned((address)mark.locker()) ?
       owner_self : owner_other;
   }
 
   // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor.
-  // The Object:ObjectMonitor relationship is stable as long as we're
-  // not at a safepoint and AsyncDeflateIdleMonitors is false.
   if (mark.has_monitor()) {
     // The first stage of async deflation does not affect any field
     // used by this comparison so the ObjectMonitor* is usable here.
     ObjectMonitor* monitor = mark.monitor();
     void* owner = monitor->owner();

@@ -1292,13 +1286,10 @@
   }
   return false;
 }
 
 bool ObjectSynchronizer::is_async_deflation_needed() {
-  if (!AsyncDeflateIdleMonitors) {
-    return false;
-  }
   if (is_async_deflation_requested()) {
     // Async deflation request.
     return true;
   }
   if (AsyncDeflationInterval > 0 &&

@@ -1311,20 +1302,14 @@
     return true;
   }
   return false;
 }
 
-bool ObjectSynchronizer::is_safepoint_deflation_needed() {
-  return !AsyncDeflateIdleMonitors &&
-         monitors_used_above_threshold();  // Too many monitors in use.
-}
-
 bool ObjectSynchronizer::request_deflate_idle_monitors() {
   bool is_JavaThread = Thread::current()->is_Java_thread();
   bool ret_code = false;
 
-  if (AsyncDeflateIdleMonitors) {
     jlong last_time = last_async_deflation_time_ns();
     set_is_async_deflation_requested(true);
     {
       MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
       ml.notify_all();

@@ -1345,19 +1330,10 @@
       }
     }
     if (!ret_code) {
       log_info(monitorinflation)("Async Deflation DID NOT happen after %d checks.", N_CHECKS);
     }
-  } else {
-    // Only need to force this safepoint if we are not using async
-    // deflation. The VMThread won't call this function before the
-    // final safepoint if we are not using async deflation so we
-    // don't have to reason about the VMThread executing a VM-op here.
-    VM_ForceSafepoint force_safepoint_op;
-    VMThread::execute(&force_safepoint_op);
-    ret_code = true;
-  }
 
   return ret_code;
 }
 
 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {

@@ -1445,11 +1421,10 @@
         ObjectMonitor* take = take_from_start_of_global_free_list();
         if (take == NULL) {
           break;  // No more are available.
         }
         guarantee(take->object() == NULL, "invariant");
-        if (AsyncDeflateIdleMonitors) {
           // We allowed 3 field values to linger during async deflation.
           // Clear or restore them as appropriate.
           take->set_header(markWord::zero());
           // DEFLATER_MARKER is the only non-NULL value we should see here.
           take->try_set_owner_from(DEFLATER_MARKER, NULL);

@@ -1462,11 +1437,10 @@
             jint l_contentions = take->contentions();
 #endif
             assert(l_contentions >= 0, "must not be negative: l_contentions=%d, contentions=%d",
                    l_contentions, take->contentions());
           }
-        }
         take->Recycle();
         // Since we're taking from the global free-list, take must be Free.
         // om_release() also sets the allocation state to Free because it
         // is called from other code paths.
         assert(take->is_free(), "invariant");

@@ -1527,12 +1501,12 @@
 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to
 // accumulate on a thread's free list.
 //
 // Key constraint: all ObjectMonitors on a thread's free list and the global
 // free list must have their object field set to null. This prevents the
-// scavenger -- deflate_monitor_list() or deflate_monitor_list_using_JT()
-// -- from reclaiming them while we are trying to release them.
+// scavenger -- deflate_monitor_list_using_JT() -- from reclaiming them
+// while we are trying to release them.
 
 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m,
                                     bool from_per_thread_alloc) {
   guarantee(m->header().value() == 0, "invariant");
   guarantee(m->object() == NULL, "invariant");

@@ -1637,19 +1611,17 @@
 // lists to the appropriate global lists. The ObjectMonitors on the
 // per-thread in-use list may still be in use by other threads.
 //
 // We currently call om_flush() from Threads::remove() before the
 // thread has been excised from the thread list and is no longer a
-// mutator. This means that om_flush() cannot run concurrently with
-// a safepoint and interleave with deflate_idle_monitors(). In
-// particular, this ensures that the thread's in-use monitors are
-// scanned by a GC safepoint, either via Thread::oops_do() (before
-// om_flush() is called) or via ObjectSynchronizer::oops_do() (after
-// om_flush() is called).
+// mutator. In particular, this ensures that the thread's in-use
+// monitors are scanned by a GC safepoint, either via Thread::oops_do()
+// (before om_flush() is called) or via ObjectSynchronizer::oops_do()
+// (after om_flush() is called).
 //
-// With AsyncDeflateIdleMonitors, deflate_global_idle_monitors_using_JT()
-// and deflate_per_thread_idle_monitors_using_JT() (in another thread) can
+// deflate_global_idle_monitors_using_JT() and
+// deflate_per_thread_idle_monitors_using_JT() (in another thread) can
 // run at the same time as om_flush() so we have to follow a careful
 // protocol to prevent list corruption.
 
 void ObjectSynchronizer::om_flush(Thread* self) {
   // Process the per-thread in-use list first to be consistent.

@@ -1699,12 +1671,14 @@
       in_use_tail = cur_om;
       in_use_count++;
       cur_om = unmarked_next(cur_om);
     }
     guarantee(in_use_tail != NULL, "invariant");
+#ifdef ASSERT
     int l_om_in_use_count = Atomic::load(&self->om_in_use_count);
-    ADIM_guarantee(l_om_in_use_count == in_use_count, "in-use counts don't match: "
+#endif
+    assert(l_om_in_use_count == in_use_count, "in-use counts don't match: "
                    "l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count);
     Atomic::store(&self->om_in_use_count, 0);
     // Clear the in-use list head (which also unlocks it):
     Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL);
     om_unlock(in_use_list);

@@ -1742,12 +1716,14 @@
         stringStream ss;
         fatal("must be !is_busy: %s", s->is_busy_to_string(&ss));
       }
     }
     guarantee(free_tail != NULL, "invariant");
+#ifdef ASSERT
     int l_om_free_count = Atomic::load(&self->om_free_count);
-    ADIM_guarantee(l_om_free_count == free_count, "free counts don't match: "
+#endif
+    assert(l_om_free_count == free_count, "free counts don't match: "
                    "l_om_free_count=%d, free_count=%d", l_om_free_count, free_count);
     Atomic::store(&self->om_free_count, 0);
     Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL);
     om_unlock(free_list);
   }

@@ -1823,11 +1799,10 @@
     // CASE: inflated
     if (mark.has_monitor()) {
       ObjectMonitor* inf = mark.monitor();
       markWord dmw = inf->header();
       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
-      assert(AsyncDeflateIdleMonitors || inf->object() == object, "invariant");
       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
       return inf;
     }
 
     // CASE: inflation in progress - inflating over a stack-lock.

@@ -1909,25 +1884,21 @@
       // object is in the mark.  Furthermore the owner can't complete
       // an unlock on the object, either.
       markWord dmw = mark.displaced_mark_helper();
       // Catch if the object's header is not neutral (not locked and
       // not marked is what we care about here).
-      ADIM_guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
+      assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
 
       // Setup monitor fields to proper values -- prepare the monitor
       m->set_header(dmw);
 
       // Optimization: if the mark.locker stack address is associated
       // with this thread we could simply set m->_owner = self.
       // Note that a thread can inflate an object
       // that it has stack-locked -- as might happen in wait() -- directly
       // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
-      if (AsyncDeflateIdleMonitors) {
         m->set_owner_from(NULL, DEFLATER_MARKER, mark.locker());
-      } else {
-        m->set_owner_from(NULL, mark.locker());
-      }
       m->set_object(object);
       // TODO-FIXME: assert BasicLock->dhw != 0.
 
       // Must preserve store ordering. The monitor state must
       // be stable at the time of publishing the monitor address.

@@ -1963,19 +1934,17 @@
     // to inflate and then CAS() again to try to swing _owner from NULL to self.
     // An inflateTry() method that we could call from enter() would be useful.
 
     // Catch if the object's header is not neutral (not locked and
     // not marked is what we care about here).
-    ADIM_guarantee(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
+    assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
     ObjectMonitor* m = om_alloc(self);
     // prepare m for installation - set monitor to initial state
     m->Recycle();
     m->set_header(mark);
-    if (AsyncDeflateIdleMonitors) {
       // DEFLATER_MARKER is the only non-NULL value we should see here.
       m->try_set_owner_from(DEFLATER_MARKER, NULL);
-    }
     m->set_object(object);
     m->_Responsible  = NULL;
     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
 
     if (object->cas_set_mark(markWord::encode(m), mark) != mark) {

@@ -2011,137 +1980,28 @@
     return m;
   }
 }
 
 
-// We maintain a list of in-use monitors for each thread.
-//
-// For safepoint based deflation:
-// deflate_thread_local_monitors() scans a single thread's in-use list, while
-// deflate_idle_monitors() scans only a global list of in-use monitors which
-// is populated only as a thread dies (see om_flush()).
-//
-// These operations are called at all safepoints, immediately after mutators
-// are stopped, but before any objects have moved. Collectively they traverse
-// the population of in-use monitors, deflating where possible. The scavenged
-// monitors are returned to the global monitor free list.
-//
-// Beware that we scavenge at *every* stop-the-world point. Having a large
-// number of monitors in-use could negatively impact performance. We also want
-// to minimize the total # of monitors in circulation, as they incur a small
-// footprint penalty.
-//
-// Perversely, the heap size -- and thus the STW safepoint rate --
-// typically drives the scavenge rate.  Large heaps can mean infrequent GC,
-// which in turn can mean large(r) numbers of ObjectMonitors in circulation.
-// This is an unfortunate aspect of this design.
-//
-// For async deflation:
-// If a special deflation request is made, then the safepoint based
-// deflation mechanism is used. Otherwise, an async deflation request
-// is registered with the ServiceThread and it is notified.
-
-void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* counters) {
+// An async deflation request is registered with the ServiceThread
+// and it is notified.
+void ObjectSynchronizer::do_safepoint_work() {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 
-  // The per-thread in-use lists are handled in
-  // ParallelSPCleanupThreadClosure::do_thread().
-
-  if (!AsyncDeflateIdleMonitors) {
-    // Use the older mechanism for the global in-use list.
-    ObjectSynchronizer::deflate_idle_monitors(counters);
-    return;
-  }
-
   log_debug(monitorinflation)("requesting async deflation of idle monitors.");
   // Request deflation of idle monitors by the ServiceThread:
   set_is_async_deflation_requested(true);
   MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
   ml.notify_all();
 
   if (log_is_enabled(Debug, monitorinflation)) {
     // exit_globals()'s call to audit_and_print_stats() is done
     // at the Info level and not at a safepoint.
-    // For safepoint based deflation, audit_and_print_stats() is called
-    // in ObjectSynchronizer::finish_deflate_idle_monitors() at the
-    // Debug level at a safepoint.
     ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
   }
 }
 
-// Deflate a single monitor if not in-use
-// Return true if deflated, false if in-use
-bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
-                                         ObjectMonitor** free_head_p,
-                                         ObjectMonitor** free_tail_p) {
-  bool deflated;
-  // Normal case ... The monitor is associated with obj.
-  const markWord mark = obj->mark();
-  guarantee(mark == markWord::encode(mid), "should match: mark="
-            INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(),
-            markWord::encode(mid).value());
-  // Make sure that mark.monitor() and markWord::encode() agree:
-  guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
-            ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid));
-  const markWord dmw = mid->header();
-  guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
-
-  if (mid->is_busy()) {
-    // Easy checks are first - the ObjectMonitor is busy so no deflation.
-    deflated = false;
-  } else {
-    // Deflate the monitor if it is no longer being used
-    // It's idle - scavenge and return to the global free list
-    // plain old deflation ...
-    if (log_is_enabled(Trace, monitorinflation)) {
-      ResourceMark rm;
-      log_trace(monitorinflation)("deflate_monitor: "
-                                  "object=" INTPTR_FORMAT ", mark="
-                                  INTPTR_FORMAT ", type='%s'", p2i(obj),
-                                  mark.value(), obj->klass()->external_name());
-    }
-
-    // Restore the header back to obj
-    obj->release_set_mark(dmw);
-    if (AsyncDeflateIdleMonitors) {
-      // clear() expects the owner field to be NULL.
-      // DEFLATER_MARKER is the only non-NULL value we should see here.
-      mid->try_set_owner_from(DEFLATER_MARKER, NULL);
-    }
-    mid->clear();
-
-    assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
-           p2i(mid->object()));
-    assert(mid->is_free(), "invariant");
-
-    // Move the deflated ObjectMonitor to the working free list
-    // defined by free_head_p and free_tail_p.
-    if (*free_head_p == NULL) *free_head_p = mid;
-    if (*free_tail_p != NULL) {
-      // We append to the list so the caller can use mid->_next_om
-      // to fix the linkages in its context.
-      ObjectMonitor* prevtail = *free_tail_p;
-      // Should have been cleaned up by the caller:
-      // Note: Should not have to lock prevtail here since we're at a
-      // safepoint and ObjectMonitors on the local free list should
-      // not be accessed in parallel.
-#ifdef ASSERT
-      ObjectMonitor* l_next_om = prevtail->next_om();
-#endif
-      assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
-      prevtail->set_next_om(mid);
-    }
-    *free_tail_p = mid;
-    // At this point, mid->_next_om still refers to its current
-    // value and another ObjectMonitor's _next_om field still
-    // refers to this ObjectMonitor. Those linkages have to be
-    // cleaned up by the caller who has the complete context.
-    deflated = true;
-  }
-  return deflated;
-}
-
 // Deflate the specified ObjectMonitor if not in-use using a JavaThread.
 // Returns true if it was deflated and false otherwise.
 //
 // The async deflation protocol sets owner to DEFLATER_MARKER and
 // makes contentions negative as signals to contending threads that

@@ -2154,11 +2014,10 @@
 // Contending threads that see that condition know to retry their operation.
 //
 bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid,
                                                   ObjectMonitor** free_head_p,
                                                   ObjectMonitor** free_tail_p) {
-  assert(AsyncDeflateIdleMonitors, "sanity check");
   assert(Thread::current()->is_Java_thread(), "precondition");
   // A newly allocated ObjectMonitor should not be seen here so we
   // avoid an endless inflate/deflate cycle.
   assert(mid->is_old(), "must be old: allocation_state=%d",
          (int) mid->allocation_state());

@@ -2260,60 +2119,10 @@
   // We leave owner == DEFLATER_MARKER and contentions < 0
   // to force any racing threads to retry.
   return true;  // Success, ObjectMonitor has been deflated.
 }
 
-// Walk a given monitor list, and deflate idle monitors.
-// The given list could be a per-thread list or a global list.
-//
-// In the case of parallel processing of thread local monitor lists,
-// work is done by Threads::parallel_threads_do() which ensures that
-// each Java thread is processed by exactly one worker thread, and
-// thus avoid conflicts that would arise when worker threads would
-// process the same monitor lists concurrently.
-//
-// See also ParallelSPCleanupTask and
-// SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
-// Threads::parallel_java_threads_do() in thread.cpp.
-int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p,
-                                             int* count_p,
-                                             ObjectMonitor** free_head_p,
-                                             ObjectMonitor** free_tail_p) {
-  ObjectMonitor* cur_mid_in_use = NULL;
-  ObjectMonitor* mid = NULL;
-  ObjectMonitor* next = NULL;
-  int deflated_count = 0;
-
-  // This list walk executes at a safepoint and does not race with any
-  // other list walkers.
-
-  for (mid = Atomic::load(list_p); mid != NULL; mid = next) {
-    next = unmarked_next(mid);
-    oop obj = (oop) mid->object();
-    if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) {
-      // Deflation succeeded and already updated free_head_p and
-      // free_tail_p as needed. Finish the move to the local free list
-      // by unlinking mid from the global or per-thread in-use list.
-      if (cur_mid_in_use == NULL) {
-        // mid is the list head so switch the list head to next:
-        Atomic::store(list_p, next);
-      } else {
-        // Switch cur_mid_in_use's next field to next:
-        cur_mid_in_use->set_next_om(next);
-      }
-      // At this point mid is disconnected from the in-use list.
-      deflated_count++;
-      Atomic::dec(count_p);
-      // mid is current tail in the free_head_p list so NULL terminate it:
-      mid->set_next_om(NULL);
-    } else {
-      cur_mid_in_use = mid;
-    }
-  }
-  return deflated_count;
-}
-
 // Walk a given ObjectMonitor list and deflate idle ObjectMonitors using
 // a JavaThread. Returns the number of deflated ObjectMonitors. The given
 // list could be a per-thread in-use list or the global in-use list.
 // If a safepoint has started, then we save state via saved_mid_in_use_p
 // and return to the caller to honor the safepoint.

@@ -2321,11 +2130,10 @@
 int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p,
                                                       int* count_p,
                                                       ObjectMonitor** free_head_p,
                                                       ObjectMonitor** free_tail_p,
                                                       ObjectMonitor** saved_mid_in_use_p) {
-  assert(AsyncDeflateIdleMonitors, "sanity check");
   JavaThread* self = JavaThread::current();
 
   ObjectMonitor* cur_mid_in_use = NULL;
   ObjectMonitor* mid = NULL;
   ObjectMonitor* next = NULL;

@@ -2451,79 +2259,10 @@
   // no need to save state.
   *saved_mid_in_use_p = NULL;
   return deflated_count;
 }
 
-void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
-  counters->n_in_use = 0;              // currently associated with objects
-  counters->n_in_circulation = 0;      // extant
-  counters->n_scavenged = 0;           // reclaimed (global and per-thread)
-  counters->per_thread_scavenged = 0;  // per-thread scavenge total
-  counters->per_thread_times = 0.0;    // per-thread scavenge times
-}
-
-void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-
-  if (AsyncDeflateIdleMonitors) {
-    // Nothing to do when global idle ObjectMonitors are deflated using
-    // a JavaThread.
-    return;
-  }
-
-  bool deflated = false;
-
-  ObjectMonitor* free_head_p = NULL;  // Local SLL of scavenged monitors
-  ObjectMonitor* free_tail_p = NULL;
-  elapsedTimer timer;
-
-  if (log_is_enabled(Info, monitorinflation)) {
-    timer.start();
-  }
-
-  // Note: the thread-local monitors lists get deflated in
-  // a separate pass. See deflate_thread_local_monitors().
-
-  // For moribund threads, scan om_list_globals._in_use_list
-  int deflated_count = 0;
-  if (Atomic::load(&om_list_globals._in_use_list) != NULL) {
-    // Update n_in_circulation before om_list_globals._in_use_count is
-    // updated by deflation.
-    Atomic::add(&counters->n_in_circulation,
-                Atomic::load(&om_list_globals._in_use_count));
-
-    deflated_count = deflate_monitor_list(&om_list_globals._in_use_list,
-                                          &om_list_globals._in_use_count,
-                                          &free_head_p, &free_tail_p);
-    Atomic::add(&counters->n_in_use, Atomic::load(&om_list_globals._in_use_count));
-  }
-
-  if (free_head_p != NULL) {
-    // Move the deflated ObjectMonitors back to the global free list.
-    guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
-#ifdef ASSERT
-    ObjectMonitor* l_next_om = free_tail_p->next_om();
-#endif
-    assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
-    prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count);
-    Atomic::add(&counters->n_scavenged, deflated_count);
-  }
-  timer.stop();
-
-  LogStreamHandle(Debug, monitorinflation) lsh_debug;
-  LogStreamHandle(Info, monitorinflation) lsh_info;
-  LogStream* ls = NULL;
-  if (log_is_enabled(Debug, monitorinflation)) {
-    ls = &lsh_debug;
-  } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
-    ls = &lsh_info;
-  }
-  if (ls != NULL) {
-    ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
-  }
-}
-
 class HandshakeForDeflation : public HandshakeClosure {
  public:
   HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {}
 
   void do_thread(Thread* thread) {

@@ -2531,12 +2270,10 @@
                                 INTPTR_FORMAT, p2i(thread));
   }
 };
 
 void ObjectSynchronizer::deflate_idle_monitors_using_JT() {
-  assert(AsyncDeflateIdleMonitors, "sanity check");
-
   // Deflate any global idle monitors.
   deflate_global_idle_monitors_using_JT();
 
   int count = 0;
   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {

@@ -2566,25 +2303,29 @@
   if (Atomic::load(&om_list_globals._wait_count) > 0) {
     // There are deflated ObjectMonitors waiting for a handshake
     // (or a safepoint) for safety.
 
     ObjectMonitor* list = Atomic::load(&om_list_globals._wait_list);
-    ADIM_guarantee(list != NULL, "om_list_globals._wait_list must not be NULL");
+    assert(list != NULL, "om_list_globals._wait_list must not be NULL");
     int count = Atomic::load(&om_list_globals._wait_count);
     Atomic::store(&om_list_globals._wait_count, 0);
     Atomic::store(&om_list_globals._wait_list, (ObjectMonitor*)NULL);
 
     // Find the tail for prepend_list_to_common(). No need to mark
     // ObjectMonitors for this list walk since only the deflater
     // thread manages the wait list.
+#ifdef ASSERT
     int l_count = 0;
+#endif
     ObjectMonitor* tail = NULL;
     for (ObjectMonitor* n = list; n != NULL; n = unmarked_next(n)) {
       tail = n;
+#ifdef ASSERT
       l_count++;
+#endif
     }
-    ADIM_guarantee(count == l_count, "count=%d != l_count=%d", count, l_count);
+    assert(count == l_count, "count=%d != l_count=%d", count, l_count);
 
     // Will execute a safepoint if !ThreadLocalHandshakes:
     HandshakeForDeflation hfd_hc;
     Handshake::execute(&hfd_hc);
 

@@ -2596,21 +2337,19 @@
 }
 
 // Deflate global idle ObjectMonitors using a JavaThread.
 //
 void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() {
-  assert(AsyncDeflateIdleMonitors, "sanity check");
   assert(Thread::current()->is_Java_thread(), "precondition");
   JavaThread* self = JavaThread::current();
 
   deflate_common_idle_monitors_using_JT(true /* is_global */, self);
 }
 
 // Deflate the specified JavaThread's idle ObjectMonitors using a JavaThread.
 //
 void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(JavaThread* target) {
-  assert(AsyncDeflateIdleMonitors, "sanity check");
   assert(Thread::current()->is_Java_thread(), "precondition");
 
   deflate_common_idle_monitors_using_JT(false /* !is_global */, target);
 }
 

@@ -2709,98 +2448,10 @@
       ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(target), timer.seconds(), deflated_count);
     }
   }
 }
 
-void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
-  // Report the cumulative time for deflating each thread's idle
-  // monitors. Note: if the work is split among more than one
-  // worker thread, then the reported time will likely be more
-  // than a beginning to end measurement of the phase.
-  log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged);
-
-  if (AsyncDeflateIdleMonitors) {
-    // Nothing to do when idle ObjectMonitors are deflated using
-    // a JavaThread.
-    return;
-  }
-
-  if (log_is_enabled(Debug, monitorinflation)) {
-    // exit_globals()'s call to audit_and_print_stats() is done
-    // at the Info level and not at a safepoint.
-    // For async deflation, audit_and_print_stats() is called in
-    // ObjectSynchronizer::do_safepoint_work() at the Debug level
-    // at a safepoint.
-    ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
-  } else if (log_is_enabled(Info, monitorinflation)) {
-    log_info(monitorinflation)("global_population=%d, global_in_use_count=%d, "
-                               "global_free_count=%d, global_wait_count=%d",
-                               Atomic::load(&om_list_globals._population),
-                               Atomic::load(&om_list_globals._in_use_count),
-                               Atomic::load(&om_list_globals._free_count),
-                               Atomic::load(&om_list_globals._wait_count));
-  }
-
-  OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged));
-  OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation));
-
-  GVars.stw_random = os::random();
-  GVars.stw_cycle++;
-}
-
-void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-
-  if (AsyncDeflateIdleMonitors) {
-    // Nothing to do when per-thread idle ObjectMonitors are deflated
-    // using a JavaThread.
-    return;
-  }
-
-  ObjectMonitor* free_head_p = NULL;  // Local SLL of scavenged monitors
-  ObjectMonitor* free_tail_p = NULL;
-  elapsedTimer timer;
-
-  if (log_is_enabled(Info, safepoint, cleanup) ||
-      log_is_enabled(Info, monitorinflation)) {
-    timer.start();
-  }
-
-  // Update n_in_circulation before om_in_use_count is updated by deflation.
-  Atomic::add(&counters->n_in_circulation, Atomic::load(&thread->om_in_use_count));
-
-  int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p);
-  Atomic::add(&counters->n_in_use, Atomic::load(&thread->om_in_use_count));
-
-  if (free_head_p != NULL) {
-    // Move the deflated ObjectMonitors back to the global free list.
-    guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
-#ifdef ASSERT
-    ObjectMonitor* l_next_om = free_tail_p->next_om();
-#endif
-    assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
-    prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count);
-    Atomic::add(&counters->n_scavenged, deflated_count);
-    Atomic::add(&counters->per_thread_scavenged, deflated_count);
-  }
-
-  timer.stop();
-  counters->per_thread_times += timer.seconds();
-
-  LogStreamHandle(Debug, monitorinflation) lsh_debug;
-  LogStreamHandle(Info, monitorinflation) lsh_info;
-  LogStream* ls = NULL;
-  if (log_is_enabled(Debug, monitorinflation)) {
-    ls = &lsh_debug;
-  } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
-    ls = &lsh_info;
-  }
-  if (ls != NULL) {
-    ls->print_cr("jt=" INTPTR_FORMAT ": deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(thread), timer.seconds(), deflated_count);
-  }
-}
-
 // Monitor cleanup on JavaThread::exit
 
 // Iterate through monitor cache and attempt to release thread's monitors
 // Gives up on a particular monitor if an exception occurs, but continues
 // the overall iteration, swallowing the exception.

@@ -2881,12 +2532,10 @@
 //
 // Calls to this function can be added in various places as a debugging
 // aid; pass 'true' for the 'on_exit' parameter to have in-use monitor
 // details logged at the Info level and 'false' for the 'on_exit'
 // parameter to have in-use monitor details logged at the Trace level.
-// deflate_monitor_list() no longer uses spin-locking so be careful
-// when adding audit_and_print_stats() calls at a safepoint.
 //
 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) {
   assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant");
 
   LogStreamHandle(Debug, monitorinflation) lsh_debug;

@@ -2981,15 +2630,10 @@
       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
                     ": free per-thread monitor must have NULL _header "
                     "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n),
                     n->header().value());
       *error_cnt_p = *error_cnt_p + 1;
-    } else if (!AsyncDeflateIdleMonitors) {
-      out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
-                    "must have NULL _header field: _header=" INTPTR_FORMAT,
-                    p2i(n), n->header().value());
-      *error_cnt_p = *error_cnt_p + 1;
     }
   }
   if (n->object() != NULL) {
     if (jt != NULL) {
       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
< prev index next >