< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page
rev 55489 : Checkpoint latest preliminary review patches for full OpenJDK review; merge with 8222295.patch.
rev 55490 : imported patch dcubed.monitor_deflate_conc.v2.01
rev 55491 : imported patch dcubed.monitor_deflate_conc.v2.02
rev 55492 : imported patch dcubed.monitor_deflate_conc.v2.03
rev 55493 : imported patch dcubed.monitor_deflate_conc.v2.04
rev 55494 : imported patch dcubed.monitor_deflate_conc.v2.05

@@ -253,10 +253,20 @@
 
       if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) {
         assert(m->_recursions == 0, "invariant");
         return true;
       }
+
+      if (AsyncDeflateIdleMonitors &&
+          Atomic::cmpxchg(Self, &m->_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
+        // The deflation protocol finished the first part (setting owner),
+        // but it failed the second part (making ref_count negative) and
+        // bailed. Or the ObjectMonitor was async deflated and reused.
+        // Acquired the monitor.
+        assert(m->_recursions == 0, "invariant");
+        return true;
+      }
     }
     break;
   }
 
   // Note that we could inflate in quick_enter.

@@ -1019,10 +1029,17 @@
     return monitor_usage > MonitorUsedDeflationThreshold;
   }
   return false;
 }
 
+// Returns true if MonitorBound is set (> 0) and if the specified
+// cnt is > MonitorBound. Otherwise returns false.
+static bool is_MonitorBound_exceeded(const int cnt) {
+  const int mx = MonitorBound;
+  return mx > 0 && cnt > mx;
+}
+
 bool ObjectSynchronizer::is_async_deflation_needed() {
   if (!AsyncDeflateIdleMonitors) {
     return false;
   }
   if (is_async_deflation_requested()) {

@@ -1037,10 +1054,14 @@
     // than AsyncDeflationInterval (unless is_async_deflation_requested)
     // in order to not swamp the ServiceThread.
     _last_async_deflation_time_ns = os::javaTimeNanos();
     return true;
   }
+  if (is_MonitorBound_exceeded(gMonitorPopulation - gMonitorFreeCount)) {
+    // Not enough ObjectMonitors on the global free list.
+    return true;
+  }
   return false;
 }
 
 bool ObjectSynchronizer::is_safepoint_deflation_needed() {
   if (!AsyncDeflateIdleMonitors) {

@@ -1109,10 +1130,13 @@
 //      to the objectmonitor.
 
 
 // Constraining monitor pool growth via MonitorBound ...
 //
+// If MonitorBound is not set (<= 0), MonitorBound checks are disabled.
+//
+// When safepoint deflation is being used (!AsyncDeflateIdleMonitors):
 // The monitor pool is grow-only.  We scavenge at STW safepoint-time, but the
 // the rate of scavenging is driven primarily by GC.  As such,  we can find
 // an inordinate number of monitors in circulation.
 // To avoid that scenario we can artificially induce a STW safepoint
 // if the pool appears to be growing past some reasonable bound.

@@ -1122,12 +1146,30 @@
 // we could just loop. In addition, if MonitorBound is set to a low value
 // we'll incur more safepoints, which are harmful to performance.
 // See also: GuaranteedSafepointInterval
 //
 // The current implementation uses asynchronous VM operations.
+//
+// When safepoint deflation is being used and MonitorBound is set, the
+// boundry applies to (gMonitorPopulation - gMonitorFreeCount), i.e.,
+// if there are not enough ObjectMonitors on the global free list, then
+// a safepoint deflation is induced. Picking a good MonitorBound value
+// is non-trivial.
+//
+// When async deflation is being used:
+// The monitor pool is still grow-only. Async deflation is requested
+// by a safepoint's cleanup phase or by the ServiceThread at periodic
+// intervals when is_async_deflation_needed() returns true. In
+// addition to other policies that are checked, if there are not
+// enough ObjectMonitors on the global free list, then
+// is_async_deflation_needed() will return true. The ServiceThread
+// calls deflate_global_idle_monitors_using_JT() and also sets the
+// per-thread omShouldDeflateIdleMonitors flag as needed.
 
 static void InduceScavenge(Thread * Self, const char * Whence) {
+  assert(!AsyncDeflateIdleMonitors, "is not used by async deflation");
+
   // Induce STW safepoint to trim monitors
   // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
   // More precisely, trigger an asynchronous STW safepoint as the number
   // of active monitors passes the specified threshold.
   // TODO: assert thread state is reasonable

@@ -1155,13 +1197,14 @@
     if (jt->omShouldDeflateIdleMonitors && jt->omInUseCount > 0 &&
         cause != inflate_cause_vm_internal) {
       // Deflate any per-thread idle monitors for this JavaThread if
       // this is not an internal inflation; internal inflations can
       // occur in places where it is not safe to pause for a safepoint.
-      // Clean up your own mess. (Gibbs Rule 45) Otherwise, skip this
+      // Clean up your own mess (Gibbs Rule 45). Otherwise, skip this
       // deflation. deflate_global_idle_monitors_using_JT() is called
-      // by the ServiceThread.
+      // by the ServiceThread. Per-thread async deflation is triggered
+      // by the ServiceThread via omShouldDeflateIdleMonitors.
       debug_only(jt->check_for_valid_safepoint_state(false);)
       ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT();
     }
   }
 

@@ -1201,14 +1244,16 @@
         gMonitorFreeCount--;
         ObjectMonitor * take = gFreeList;
         gFreeList = take->FreeNext;
         guarantee(take->object() == NULL, "invariant");
         if (AsyncDeflateIdleMonitors) {
-          // Clear any values we allowed to linger during async deflation.
+          // We allowed 3 field values to linger during async deflation.
+          // We clear header and restore ref_count here, but we leave
+          // owner == DEFLATER_MARKER so the simple C2 ObjectMonitor
+          // enter optimization can no longer race with async deflation
+          // and reuse.
           take->_header = NULL;
-          take->set_owner(NULL);
-
           if (take->ref_count() < 0) {
             // Add back max_jint to restore the ref_count field to its
             // proper value.
             Atomic::add(max_jint, &take->_ref_count);
 

@@ -1222,12 +1267,13 @@
       }
       Thread::muxRelease(&gListLock);
       Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
       if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
 
-      const int mx = MonitorBound;
-      if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) {
+      if (!AsyncDeflateIdleMonitors &&
+          is_MonitorBound_exceeded(gMonitorPopulation - gMonitorFreeCount)) {
+        // Not enough ObjectMonitors on the global free list.
         // We can't safely induce a STW safepoint from omAlloc() as our thread
         // state may not be appropriate for such activities and callers may hold
         // naked oops, so instead we defer the action.
         InduceScavenge(Self, "omAlloc");
       }

@@ -1671,10 +1717,13 @@
       object = h_obj();  // Refresh object.
     }
     // prepare m for installation - set monitor to initial state
     m->Recycle();
     m->set_header(mark);
+    // If we leave _owner == DEFLATER_MARKER here, then the simple C2
+    // ObjectMonitor enter optimization can no longer race with async
+    // deflation and reuse.
     m->set_object(object);
     m->_Responsible  = NULL;
     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
 
     omh_p->set_om_ptr(m);

@@ -1713,10 +1762,11 @@
 }
 
 
 // We maintain a list of in-use monitors for each thread.
 //
+// For safepoint based deflation:
 // deflate_thread_local_monitors() scans a single thread's in-use list, while
 // deflate_idle_monitors() scans only a global list of in-use monitors which
 // is populated only as a thread dies (see omFlush()).
 //
 // These operations are called at all safepoints, immediately after mutators

@@ -1731,10 +1781,15 @@
 //
 // Perversely, the heap size -- and thus the STW safepoint rate --
 // typically drives the scavenge rate.  Large heaps can mean infrequent GC,
 // which in turn can mean large(r) numbers of ObjectMonitors in circulation.
 // This is an unfortunate aspect of this design.
+//
+// For async deflation:
+// If a special deflation request is made, then the safepoint based
+// deflation mechanism is used. Otherwise, an async deflation request
+// is registered with the ServiceThread and it is notified.
 
 void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* _counters) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 
   // The per-thread in-use lists are handled in

@@ -1769,11 +1824,13 @@
   guarantee(mark->monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
             ", mid=" INTPTR_FORMAT, p2i(mark->monitor()), p2i(mid));
   const markOop dmw = mid->header();
   guarantee(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
 
-  if (mid->is_busy()) {
+  if (mid->is_busy() || mid->ref_count() != 0) {
+    // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor*
+    // is in use so no deflation.
     deflated = false;
   } else {
     // Deflate the monitor if it is no longer being used
     // It's idle - scavenge and return to the global free list
     // plain old deflation ...

@@ -1785,10 +1842,16 @@
                                   p2i(mark), obj->klass()->external_name());
     }
 
     // Restore the header back to obj
     obj->release_set_mark(dmw);
+    if (AsyncDeflateIdleMonitors) {
+      // clear() expects the owner field to be NULL and we won't race
+      // with the simple C2 ObjectMonitor enter optimization since
+      // we're at a safepoint.
+      mid->set_owner(NULL);
+    }
     mid->clear();
 
     assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
            p2i(mid->object()));
     assert(mid->is_free(), "invariant");

@@ -1855,11 +1918,11 @@
     if (Atomic::cmpxchg(-max_jint, &mid->_ref_count, (jint)0) == 0) {
       // Make ref_count negative to force any contending threads or
       // ObjectMonitor* using threads to retry. This is the second
       // part of the async deflation dance.
 
-      if (mid->_owner == DEFLATER_MARKER) {
+      if (mid->owner_is_DEFLATER_MARKER()) {
         // If owner is still DEFLATER_MARKER, then we have successfully
         // signaled any contending threads to retry. If it is not, then we
         // have lost the race to an entering thread and the ObjectMonitor
         // is now busy. This is the third and final part of the async
         // deflation dance.

@@ -2518,12 +2581,11 @@
 
 // Check a free monitor entry; log any errors.
 void ObjectSynchronizer::chk_free_entry(JavaThread * jt, ObjectMonitor * n,
                                         outputStream * out, int *error_cnt_p) {
   stringStream ss;
-  if ((!AsyncDeflateIdleMonitors && n->is_busy()) ||
-      (AsyncDeflateIdleMonitors && n->is_busy_async())) {
+  if (n->is_busy()) {
     if (jt != NULL) {
       out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
                     ": free per-thread monitor must not be busy: %s", p2i(jt),
                     p2i(n), n->is_busy_to_string(&ss));
     } else {
< prev index next >