< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page
rev 54612 : Checkpoint latest preliminary review patches for full OpenJDK review; merge with 8222295.patch.
rev 54613 : imported patch dcubed.monitor_deflate_conc.v2.01
rev 54614 : imported patch dcubed.monitor_deflate_conc.v2.02
rev 54615 : imported patch dcubed.monitor_deflate_conc.v2.03

@@ -237,10 +237,12 @@
 
 // -----------------------------------------------------------------------------
 // Enter support
 
 void ObjectMonitor::enter(TRAPS) {
+  ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count);
+
   // The following code is ordered to check the most common cases first
   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
   Thread * const Self = THREAD;
 
   void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);

@@ -291,16 +293,18 @@
   assert(_succ != Self, "invariant");
   assert(Self->is_Java_thread(), "invariant");
   JavaThread * jt = (JavaThread *) Self;
   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
   assert(jt->thread_state() != _thread_blocked, "invariant");
-  assert(this->object() != NULL, "invariant");
-  assert(_contentions >= 0, "invariant");
+  assert(AsyncDeflateIdleMonitors || this->object() != NULL, "invariant");
+  assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions);
 
-  // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
-  // Ensure the object-monitor relationship remains stable while there's contention.
-  Atomic::inc(&_contentions);
+  // Prevent deflation. See ObjectSynchronizer::deflate_monitor(),
+  // ObjectSynchronizer::deflate_monitor_using_JT() and is_busy().
+  // Ensure the object <-> monitor relationship remains stable while
+  // there's contention.
+  Atomic::add(1, &_contentions);
 
   JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
   EventJavaMonitorEnter event;
   if (event.should_commit()) {
     event.set_monitorClass(((oop)this->object())->klass());

@@ -358,11 +362,11 @@
     // states will still report that the thread is blocked trying to
     // acquire it.
   }
 
   Atomic::dec(&_contentions);
-  assert(_contentions >= 0, "invariant");
+  assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions);
   Self->_Stalled = 0;
 
   // Must either set _recursions = 0 or ASSERT _recursions == 0.
   assert(_recursions == 0, "invariant");
   assert(_owner == Self, "invariant");

@@ -415,13 +419,94 @@
   // We can either return -1 or retry.
   // Retry doesn't make as much sense because the lock was just acquired.
   return -1;
 }
 
+// Install the displaced mark word (dmw) of a deflating ObjectMonitor
+// into the header of the object associated with the monitor. This
+// idempotent method is called by a thread that is deflating a
+// monitor and by other threads that have detected a race with the
+// deflation process.
+void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
+  // This function must only be called when (owner == DEFLATER_MARKER
+  // && ref_count <= 0), but we can't guarantee that here because
+  // those values could change when the ObjectMonitor gets moved from
+  // the global free list to a per-thread free list.
+
+  guarantee(obj != NULL, "must be non-NULL");
+  if (object() != obj) {
+    // ObjectMonitor's object ref no longer refers to the target object
+    // so the object's header has already been restored.
+    return;
+  }
+
+  markOop dmw = header();
+  if (dmw == NULL) {
+    // ObjectMonitor's header/dmw has been cleared by the deflating
+    // thread so the object's header has already been restored.
+    return;
+  }
+
+  // A non-NULL dmw has to be either neutral (not locked and not marked)
+  // or is already participating in this restoration protocol.
+  assert(dmw->is_neutral() || (dmw->is_marked() && dmw->hash() == 0),
+         "failed precondition: dmw=" INTPTR_FORMAT, p2i(dmw));
+
+  markOop marked_dmw = NULL;
+  if (!dmw->is_marked() && dmw->hash() == 0) {
+    // This dmw has not yet started the restoration protocol so we
+    // mark a copy of the dmw to begin the protocol.
+    // Note: A dmw with a hashcode does not take this code path.
+    marked_dmw = dmw->set_marked();
+
+    // All of the callers to this function can be racing with each
+    // other trying to update the _header field.
+    dmw = (markOop) Atomic::cmpxchg(marked_dmw, &_header, dmw);
+    if (dmw == NULL) {
+      // ObjectMonitor's header/dmw has been cleared by the deflating
+      // thread so the object's header has already been restored.
+      return;
+    }
+    // The _header field is now marked. The winner's 'dmw' variable
+    // contains the original, unmarked header/dmw value and any
+    // losers have a marked header/dmw value that will be cleaned
+    // up below.
+  }
+
+  if (dmw->is_marked()) {
+    // Clear the mark from the header/dmw copy in preparation for
+    // possible restoration from this thread.
+    assert(dmw->hash() == 0, "hashcode must be 0: dmw=" INTPTR_FORMAT,
+           p2i(dmw));
+    dmw = dmw->set_unmarked();
+  }
+  assert(dmw->is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, p2i(dmw));
+
+  // Install displaced mark word if the object's header still points
+  // to this ObjectMonitor. All racing callers to this function will
+  // reach this point, but only one can win.
+  obj->cas_set_mark(dmw, markOopDesc::encode(this));
+
+  // Note: It does not matter which thread restored the header/dmw
+  // into the object's header. The thread deflating the monitor just
+  // wanted the object's header restored and it is. The threads that
+  // detected a race with the deflation process also wanted the
+  // object's header restored before they retry their operation and
+  // because it is restored they will only retry once.
+
+  if (marked_dmw != NULL) {
+    // Clear _header to NULL if it is still marked_dmw so a racing
+    // install_displaced_markword_in_object() can bail out sooner.
+    Atomic::cmpxchg((markOop)NULL, &_header, marked_dmw);
+  }
+}
+
 #define MAX_RECHECK_INTERVAL 1000
 
 void ObjectMonitor::EnterI(TRAPS) {
+  ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count);
+
   Thread * const Self = THREAD;
   assert(Self->is_Java_thread(), "invariant");
   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 
   // Try the lock - TATAS

@@ -430,10 +515,21 @@
     assert(_owner == Self, "invariant");
     assert(_Responsible != Self, "invariant");
     return;
   }
 
+  if (_owner == DEFLATER_MARKER) {
+    // The deflation protocol finished the first part (setting owner), but
+    // it failed the second part (making ref_count negative) and bailed.
+    if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
+      // Acquired the monitor.
+      assert(_succ != Self, "invariant");
+      assert(_Responsible != Self, "invariant");
+      return;
+    }
+  }
+
   assert(InitDone, "Unexpectedly not initialized");
 
   // We try one round of spinning *before* enqueueing Self.
   //
   // If the _owner is ready but OFFPROC we could use a YieldTo()

@@ -546,10 +642,19 @@
       Self->_ParkEvent->park();
     }
 
     if (TryLock(Self) > 0) break;
 
+    if (_owner == DEFLATER_MARKER) {
+      // The deflation protocol finished the first part (setting owner), but
+      // it failed the second part (making ref_count negative) and bailed.
+      if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
+        // Acquired the monitor.
+        break;
+      }
+    }
+
     // The lock is still contested.
     // Keep a tally of the # of futile wakeups.
     // Note that the counter is not protected by a lock or updated by atomics.
     // That is by design - we trade "lossy" counters which are exposed to
     // races during updates for a lower probe effect.

@@ -650,10 +755,12 @@
 // monitor reentry in wait().
 //
 // In the future we should reconcile EnterI() and ReenterI().
 
 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
+  ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count);
+
   assert(Self != NULL, "invariant");
   assert(SelfNode != NULL, "invariant");
   assert(SelfNode->_thread == Self, "invariant");
   assert(_waiters > 0, "invariant");
   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");

@@ -667,10 +774,19 @@
     assert(_owner != Self, "invariant");
 
     if (TryLock(Self) > 0) break;
     if (TrySpin(Self) > 0) break;
 
+    if (_owner == DEFLATER_MARKER) {
+      // The deflation protocol finished the first part (setting owner), but
+      // it failed the second part (making ref_count negative) and bailed.
+      if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
+        // Acquired the monitor.
+        break;
+      }
+    }
+
     // State transition wrappers around park() ...
     // ReenterI() wisely defers state transitions until
     // it's clear we must park the thread.
     {
       OSThreadContendState osts(Self->osthread());

@@ -874,11 +990,12 @@
       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
       // Upon deeper reflection, however, in a properly run JVM the only
       // way we should encounter this situation is in the presence of
       // unbalanced JNI locking. TODO: CheckJNICalls.
       // See also: CR4414101
-      assert(false, "Non-balanced monitor enter/exit! Likely JNI locking");
+      assert(false, "Non-balanced monitor enter/exit! Likely JNI locking: "
+             "owner=" INTPTR_FORMAT, p2i(_owner));
       return;
     }
   }
 
   if (_recursions != 0) {

@@ -1130,14 +1247,14 @@
   Thread * const Self = THREAD;
   assert(Self->is_Java_thread(), "Must be Java thread!");
   JavaThread *jt = (JavaThread *)THREAD;
 
   guarantee(_owner != Self, "reenter already owner");
-  enter(THREAD);       // enter the monitor
+  enter(THREAD);
+  // Entered the monitor.
   guarantee(_recursions == 0, "reenter recursion");
   _recursions = recursions;
-  return;
 }
 
 
 // -----------------------------------------------------------------------------
 // A macro is used below because there may already be a pending

@@ -1924,5 +2041,79 @@
 #undef NEWPERFVARIABLE
   }
 
   DEBUG_ONLY(InitDone = true;)
 }
+
+// For internal use by ObjectSynchronizer::monitors_iterate().
+ObjectMonitorHandle::ObjectMonitorHandle(ObjectMonitor * om_ptr) {
+  om_ptr->inc_ref_count();
+  _om_ptr = om_ptr;
+}
+
+ObjectMonitorHandle::~ObjectMonitorHandle() {
+  if (_om_ptr != NULL) {
+    _om_ptr->dec_ref_count();
+    _om_ptr = NULL;
+  }
+}
+
+// Save the ObjectMonitor* associated with the specified markOop and
+// increment the ref_count. This function should only be called if
+// the caller has verified mark->has_monitor() == true. The object
+// parameter is needed to verify that ObjectMonitor* has not been
+// deflated and reused for another object.
+//
+// This function returns true if the ObjectMonitor* has been safely
+// saved. This function returns false if we have lost a race with
+// async deflation; the caller should retry as appropriate.
+//
+bool ObjectMonitorHandle::save_om_ptr(oop object, markOop mark) {
+  guarantee(mark->has_monitor(), "sanity check: mark=" INTPTR_FORMAT,
+            p2i(mark));
+
+  ObjectMonitor * om_ptr = mark->monitor();
+  om_ptr->inc_ref_count();
+
+  if (AsyncDeflateIdleMonitors) {
+    // Race here if monitor is not owned! The above ref_count bump
+    // will cause subsequent async deflation to skip it. However,
+    // previous or concurrent async deflation is a race.
+    if (om_ptr->_owner == DEFLATER_MARKER && om_ptr->ref_count() <= 0) {
+      // Async deflation is in progress and our ref_count increment
+      // above lost the race to async deflation. Attempt to restore
+      // the header/dmw to the object's header so that we only retry
+      // once if the deflater thread happens to be slow.
+      om_ptr->install_displaced_markword_in_object(object);
+      om_ptr->dec_ref_count();
+      return false;
+    }
+    // The ObjectMonitor could have been deflated and reused for
+    // another object before we bumped the ref_count so make sure
+    // our object still refers to this ObjectMonitor.
+    const markOop tmp = object->mark();
+    if (!tmp->has_monitor() || tmp->monitor() != om_ptr) {
+      // Async deflation and reuse won the race so we have to retry.
+      // Skip object header restoration since that's already done.
+      om_ptr->dec_ref_count();
+      return false;
+    }
+  }
+
+  ADIM_guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT,
+                 p2i(_om_ptr));
+  _om_ptr = om_ptr;
+  return true;
+}
+
+// For internal use by ObjectSynchronizer::inflate().
+void ObjectMonitorHandle::set_om_ptr(ObjectMonitor * om_ptr) {
+  if (_om_ptr == NULL) {
+    ADIM_guarantee(om_ptr != NULL, "cannot clear an unset om_ptr");
+    om_ptr->inc_ref_count();
+    _om_ptr = om_ptr;
+  } else {
+    ADIM_guarantee(om_ptr == NULL, "can only clear a set om_ptr");
+    _om_ptr->dec_ref_count();
+    _om_ptr = NULL;
+  }
+}
< prev index next >