< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page
rev 54110 : Checkpoint latest preliminary review patches for full OpenJDK review.

@@ -236,36 +236,36 @@
 }
 
 // -----------------------------------------------------------------------------
 // Enter support
 
-void ObjectMonitor::enter(TRAPS) {
+bool ObjectMonitor::enter(TRAPS) {
   // The following code is ordered to check the most common cases first
   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
   Thread * const Self = THREAD;
 
   void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
   if (cur == NULL) {
     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
     assert(_recursions == 0, "invariant");
     assert(_owner == Self, "invariant");
-    return;
+    return true;
   }
 
   if (cur == Self) {
     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
     _recursions++;
-    return;
+    return true;
   }
 
   if (Self->is_lock_owned ((address)cur)) {
     assert(_recursions == 0, "internal state error");
     _recursions = 1;
     // Commute owner from a thread-specific on-stack BasicLockObject address to
     // a full-fledged "Thread *".
     _owner = Self;
-    return;
+    return true;
   }
 
   // We've encountered genuine contention.
   assert(Self->_Stalled == 0, "invariant");
   Self->_Stalled = intptr_t(this);

@@ -278,25 +278,33 @@
   if (TrySpin(Self) > 0) {
     assert(_owner == Self, "invariant");
     assert(_recursions == 0, "invariant");
     assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
     Self->_Stalled = 0;
-    return;
+    return true;
   }
 
   assert(_owner != Self, "invariant");
   assert(_succ != Self, "invariant");
   assert(Self->is_Java_thread(), "invariant");
   JavaThread * jt = (JavaThread *) Self;
   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
   assert(jt->thread_state() != _thread_blocked, "invariant");
-  assert(this->object() != NULL, "invariant");
-  assert(_count >= 0, "invariant");
+  assert(AsyncDeflateIdleMonitors || this->object() != NULL, "invariant");
+  assert(AsyncDeflateIdleMonitors || _count >= 0, "invariant");
 
-  // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
+  // Prevent deflation. See ObjectSynchronizer::deflate_monitor() and is_busy().
   // Ensure the object-monitor relationship remains stable while there's contention.
-  Atomic::inc(&_count);
+  const jint count = Atomic::add(1, &_count);
+  if (count <= 0 && _owner == DEFLATER_MARKER) {
+    // Async deflation in progress. Help deflater thread install
+    // the mark word (in case deflater thread is slow).
+    install_displaced_markword_in_object();
+    Self->_Stalled = 0;
+    return false;  // Caller should retry. Never mind about _count as this monitor has been deflated.
+  }
+  // The deflater thread will not deflate this monitor and the monitor is contended, continue.
 
   JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
   EventJavaMonitorEnter event;
   if (event.should_commit()) {
     event.set_monitorClass(((oop)this->object())->klass());

@@ -354,11 +362,11 @@
     // states will still report that the thread is blocked trying to
     // acquire it.
   }
 
   Atomic::dec(&_count);
-  assert(_count >= 0, "invariant");
+  assert(AsyncDeflateIdleMonitors || _count >= 0, "invariant");
   Self->_Stalled = 0;
 
   // Must either set _recursions = 0 or ASSERT _recursions == 0.
   assert(_recursions == 0, "invariant");
   assert(_owner == Self, "invariant");

@@ -390,10 +398,11 @@
   if (event.should_commit()) {
     event.set_previousOwner((uintptr_t)_previous_owner_tid);
     event.commit();
   }
   OM_PERFDATA_OP(ContendedLockAttempts, inc());
+  return true;
 }
 
 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 // Callers must compensate as needed.
 

@@ -411,10 +420,71 @@
   // We can either return -1 or retry.
   // Retry doesn't make as much sense because the lock was just acquired.
   return -1;
 }
 
+// Install the displaced markword of a deflated monitor into the object
+// associated with the monitor.
+// This method is idempotent and is executed by both mutators wanting to
+// acquire a monitor for an object and the thread deflating monitors.
+// A mutator trying to install a hash in the monitor's _header field can
+// also run in parallel to this method.
+void ObjectMonitor::install_displaced_markword_in_object() {
+  markOop dmw = header();
+  if (dmw == NULL) {
+    // The thread deflating monitors has won the race so we
+    // have nothing to do.
+    return;
+  }
+
+  // A non-NULL dmw has to be either neutral or is participating in
+  // this restoration protocol.
+  assert(dmw->is_neutral() || (dmw->is_marked() && dmw->hash() == 0),
+         "failed precondition: is_neutral=%d, is_marked=%d, hash="
+         INTPTR_FORMAT, dmw->is_neutral(), dmw->is_marked(), dmw->hash());
+
+  if (!dmw->is_marked() && dmw->hash() == 0) {
+    // This dmw is neutral and has not yet started the restoration
+    // protocol so we mark a copy of the dmw to begin the protocol.
+    markOop marked_dmw = dmw->set_marked();
+    assert(marked_dmw->is_marked() && marked_dmw->hash() == 0,
+           "sanity_check: is_marked=%d, hash=" INTPTR_FORMAT,
+           marked_dmw->is_marked(), marked_dmw->hash());
+
+    // There can be three different racers trying to update the _header
+    // field and the return dmw value will tell us what cleanup needs
+    // to be done (if any) after the race winner:
+    //   1)  A mutator trying to install a hash in the object.
+    //       Note: That mutator is not executing this code, but it is
+    //       trying to update the _header field.
+    //       If winner: dmw will contain the hash and be unmarked
+    //   2a) A mutator trying to acquire the monitor via enter():
+    //       If winner: dmw is marked and hash() == 0
+    //   2b) The thread deflating the monitor via deflate_monitor_using_JT():
+    //       If winner: dmw is marked and hash() == 0
+    dmw = (markOop) Atomic::cmpxchg(marked_dmw, &_header, dmw);
+  }
+
+  if (dmw->is_marked()) {
+    // The dmw copy is marked which means a hash was not set by a racing
+    // thread. Clear the mark from the copy in preparation for possible
+    // restoration from this thread.
+    assert(dmw->hash() == 0, "must be 0: hash=" INTPTR_FORMAT, dmw->hash());
+    dmw = dmw->set_unmarked();
+  }
+  assert(dmw->is_neutral(), "must be a neutral markword");
+
+  oop const obj = (oop) object();
+  // Install displaced markword if object markword still points to this
+  // monitor. Both the mutator trying to enter() and the thread deflating
+  // the monitor will reach this point, but only one can win.
+  // Note: If a mutator won the cmpxchg() race above and installed a hash
+  // in _header, then the updated dmw contains that hash and we'll install
+  // it in the object's markword here.
+  obj->cas_set_mark(dmw, markOopDesc::encode(this));
+}
+
 #define MAX_RECHECK_INTERVAL 1000
 
 void ObjectMonitor::EnterI(TRAPS) {
   Thread * const Self = THREAD;
   assert(Self->is_Java_thread(), "invariant");

@@ -426,10 +496,22 @@
     assert(_owner == Self, "invariant");
     assert(_Responsible != Self, "invariant");
     return;
   }
 
+  if (_owner == DEFLATER_MARKER) {
+    guarantee(0 < _count, "_owner == DEFLATER_MARKER && _count <= 0 should have been handled by the caller");
+    // Deflater thread tried to lock this monitor, but it failed to make _count negative and gave up.
+    // Try to acquire monitor.
+    if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
+      assert(_succ != Self, "invariant");
+      assert(_owner == Self, "invariant");
+      assert(_Responsible != Self, "invariant");
+      return;
+    }
+  }
+
   assert(InitDone, "Unexpectedly not initialized");
 
   // We try one round of spinning *before* enqueueing Self.
   //
   // If the _owner is ready but OFFPROC we could use a YieldTo()

@@ -542,10 +624,19 @@
       Self->_ParkEvent->park();
     }
 
     if (TryLock(Self) > 0) break;
 
+    if (_owner == DEFLATER_MARKER) {
+      guarantee(0 < _count, "_owner == DEFLATER_MARKER && _count <= 0 should have been handled by the caller");
+      // Deflater thread tried to lock this monitor, but it failed to make _count negative and gave up.
+      if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
+        // Acquired the monitor.
+        break;
+      }
+    }
+
     // The lock is still contested.
     // Keep a tally of the # of futile wakeups.
     // Note that the counter is not protected by a lock or updated by atomics.
     // That is by design - we trade "lossy" counters which are exposed to
     // races during updates for a lower probe effect.

@@ -663,10 +754,18 @@
     assert(_owner != Self, "invariant");
 
     if (TryLock(Self) > 0) break;
     if (TrySpin(Self) > 0) break;
 
+    if (_owner == DEFLATER_MARKER) {
+      guarantee(0 <= _count, "Impossible: _owner == DEFLATER_MARKER && _count < 0, monitor must not be owned by deflater thread here");
+      if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
+        // Acquired the monitor.
+        break;
+      }
+    }
+
     // State transition wrappers around park() ...
     // ReenterI() wisely defers state transitions until
     // it's clear we must park the thread.
     {
       OSThreadContendState osts(Self->osthread());

@@ -1120,20 +1219,24 @@
   return save;
 }
 
 // reenter() enters a lock and sets recursion count
 // complete_exit/reenter operate as a wait without waiting
-void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
+bool ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
   Thread * const Self = THREAD;
   assert(Self->is_Java_thread(), "Must be Java thread!");
   JavaThread *jt = (JavaThread *)THREAD;
 
   guarantee(_owner != Self, "reenter already owner");
-  enter(THREAD);       // enter the monitor
+  if (!enter(THREAD)) {
+    // Failed to enter the monitor so return for a retry.
+    return false;
+  }
+  // Entered the monitor.
   guarantee(_recursions == 0, "reenter recursion");
   _recursions = recursions;
-  return;
+  return true;
 }
 
 
 // -----------------------------------------------------------------------------
 // A macro is used below because there may already be a pending

@@ -1357,11 +1460,12 @@
     Self->_Stalled = 0;
 
     assert(_owner != Self, "invariant");
     ObjectWaiter::TStates v = node.TState;
     if (v == ObjectWaiter::TS_RUN) {
-      enter(Self);
+      const bool success = enter(Self);
+      guarantee(success, "enter signaled for a retry, but monitor should not have been deflated as waiters > 0");
     } else {
       guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
       ReenterI(Self, &node);
       node.wait_reenter_end(this);
     }

@@ -1920,5 +2024,72 @@
 #undef NEWPERFVARIABLE
   }
 
   DEBUG_ONLY(InitDone = true;)
 }
+
+// For internal used by ObjectSynchronizer::monitors_iterate().
+ObjectMonitorHandle::ObjectMonitorHandle(ObjectMonitor * om_ptr) {
+  om_ptr->inc_ref_count();
+  _om_ptr = om_ptr;
+}
+
+ObjectMonitorHandle::~ObjectMonitorHandle() {
+  if (_om_ptr != NULL) {
+    _om_ptr->dec_ref_count();
+    _om_ptr = NULL;
+  }
+}
+
+// Save the ObjectMonitor* associated with the specified markOop and
+// increment the ref_count. This function should only be called if
+// the caller has verified mark->has_monitor() == true. The object
+// parameter is needed to verify that ObjectMonitor* has not been
+// deflated and reused for another object.
+//
+// This function returns true if the ObjectMonitor* has been safely
+// saved. This function returns false if we have lost a race with
+// async deflation; the caller should retry as appropriate.
+//
+bool ObjectMonitorHandle::save_om_ptr(oop object, markOop mark) {
+  guarantee(mark->has_monitor(), "sanity check: mark=" INTPTR_FORMAT,
+            p2i((address)mark));
+
+  ObjectMonitor * om_ptr = mark->monitor();
+  om_ptr->inc_ref_count();
+
+  if (AsyncDeflateIdleMonitors) {
+    // Race here if monitor is not owned! The above ref_count bump
+    // will cause subsequent async deflation to skip it. However,
+    // previous or concurrent async deflation is a race.
+    if (om_ptr->_owner == DEFLATER_MARKER) {
+      // Async deflation won the race so we have to retry.
+      om_ptr->dec_ref_count();
+      return false;
+    }
+    // The ObjectMonitor could have been deflated and reused for
+    // another object before we bumped the ref_count so make sure
+    // our object still refers to this ObjectMonitor.
+    const markOop tmp = object->mark();
+    if (!tmp->has_monitor() || tmp->monitor() != om_ptr) {
+      // Async deflation and reuse won the race so we have to retry.
+      om_ptr->dec_ref_count();
+      return false;
+    }
+  }
+
+  guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT,
+            p2i(_om_ptr));
+  _om_ptr = om_ptr;
+  return true;
+}
+
+// For internal use by ObjectSynchronizer::inflate().
+void ObjectMonitorHandle::set_om_ptr(ObjectMonitor * om_ptr) {
+  // Cannot guarantee() is_new() here. As soon as the ObjectMonitor*
+  // is attached to the object in inflate(), it can be used by other
+  // JavaThreads.
+  // guarantee(om_ptr->is_new(), "sanity check: allocation_state=%d",
+  //           int(om_ptr->allocation_state()));
+  om_ptr->inc_ref_count();
+  _om_ptr = om_ptr;
+}
< prev index next >