< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page
rev 54612 : Checkpoint latest preliminary review patches for full OpenJDK review; merge with 8222295.patch.
rev 54613 : imported patch dcubed.monitor_deflate_conc.v2.01
rev 54614 : imported patch dcubed.monitor_deflate_conc.v2.02
rev 54615 : imported patch dcubed.monitor_deflate_conc.v2.03

@@ -236,36 +236,38 @@
 }
 
 // -----------------------------------------------------------------------------
 // Enter support
 
-bool ObjectMonitor::enter(TRAPS) {
+void ObjectMonitor::enter(TRAPS) {
+  ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count);
+
   // The following code is ordered to check the most common cases first
   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
   Thread * const Self = THREAD;
 
   void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
   if (cur == NULL) {
     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
     assert(_recursions == 0, "invariant");
     assert(_owner == Self, "invariant");
-    return true;
+    return;
   }
 
   if (cur == Self) {
     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
     _recursions++;
-    return true;
+    return;
   }
 
   if (Self->is_lock_owned ((address)cur)) {
     assert(_recursions == 0, "internal state error");
     _recursions = 1;
     // Commute owner from a thread-specific on-stack BasicLockObject address to
     // a full-fledged "Thread *".
     _owner = Self;
-    return true;
+    return;
   }
 
   // We've encountered genuine contention.
   assert(Self->_Stalled == 0, "invariant");
   Self->_Stalled = intptr_t(this);

@@ -282,35 +284,27 @@
     assert(((oop)object())->mark() == markOopDesc::encode(this),
            "object mark must match encoded this: mark=" INTPTR_FORMAT
            ", encoded this=" INTPTR_FORMAT, p2i(((oop)object())->mark()),
            p2i(markOopDesc::encode(this)));
     Self->_Stalled = 0;
-    return true;
+    return;
   }
 
   assert(_owner != Self, "invariant");
   assert(_succ != Self, "invariant");
   assert(Self->is_Java_thread(), "invariant");
   JavaThread * jt = (JavaThread *) Self;
   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
   assert(jt->thread_state() != _thread_blocked, "invariant");
   assert(AsyncDeflateIdleMonitors || this->object() != NULL, "invariant");
-  assert(AsyncDeflateIdleMonitors || _contentions >= 0, "invariant");
+  assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions);
 
-  // Prevent deflation. See ObjectSynchronizer::deflate_monitor() and is_busy().
-  // Ensure the object-monitor relationship remains stable while there's contention.
-  const jint contentions = Atomic::add(1, &_contentions);
-  if (contentions <= 0 && _owner == DEFLATER_MARKER) {
-    // Async deflation is in progress. Attempt to restore the
-    // header/dmw to the object's header so that we only retry once
-    // if the deflater thread happens to be slow.
-    const oop obj = (oop) object();
-    install_displaced_markword_in_object(obj);
-    Self->_Stalled = 0;
-    return false;  // Caller should retry. Never mind about _contentions as this monitor has been deflated.
-  }
-  // The deflater thread will not deflate this monitor and the monitor is contended, continue.
+  // Prevent deflation. See ObjectSynchronizer::deflate_monitor(),
+  // ObjectSynchronizer::deflate_monitor_using_JT() and is_busy().
+  // Ensure the object <-> monitor relationship remains stable while
+  // there's contention.
+  Atomic::add(1, &_contentions);
 
   JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
   EventJavaMonitorEnter event;
   if (event.should_commit()) {
     event.set_monitorClass(((oop)this->object())->klass());

@@ -368,11 +362,11 @@
     // states will still report that the thread is blocked trying to
     // acquire it.
   }
 
   Atomic::dec(&_contentions);
-  assert(AsyncDeflateIdleMonitors || _contentions >= 0, "invariant");
+  assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions);
   Self->_Stalled = 0;
 
   // Must either set _recursions = 0 or ASSERT _recursions == 0.
   assert(_recursions == 0, "invariant");
   assert(_owner == Self, "invariant");

@@ -404,11 +398,10 @@
   if (event.should_commit()) {
     event.set_previousOwner((uintptr_t)_previous_owner_tid);
     event.commit();
   }
   OM_PERFDATA_OP(ContendedLockAttempts, inc());
-  return true;
 }
 
 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 // Callers must compensate as needed.
 

@@ -433,11 +426,11 @@
 // idempotent method is called by a thread that is deflating a
 // monitor and by other threads that have detected a race with the
 // deflation process.
 void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
   // This function must only be called when (owner == DEFLATER_MARKER
-  // && contentions <= 0), but we can't guarantee that here because
+  // && ref_count <= 0), but we can't guarantee that here because
   // those values could change when the ObjectMonitor gets moved from
   // the global free list to a per-thread free list.
 
   guarantee(obj != NULL, "must be non-NULL");
   if (object() != obj) {

@@ -508,10 +501,12 @@
 }
 
 #define MAX_RECHECK_INTERVAL 1000
 
 void ObjectMonitor::EnterI(TRAPS) {
+  ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count);
+
   Thread * const Self = THREAD;
   assert(Self->is_Java_thread(), "invariant");
   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 
   // Try the lock - TATAS

@@ -521,16 +516,12 @@
     assert(_Responsible != Self, "invariant");
     return;
   }
 
   if (_owner == DEFLATER_MARKER) {
-    // The deflation protocol finished the first part (setting _owner), but
-    // it failed the second part (making _contentions negative) and bailed.
-    // Because we're called from enter() we have at least one contention.
-    guarantee(_contentions > 0, "owner == DEFLATER_MARKER && contentions <= 0 "
-              "should have been handled by the caller: contentions=%d",
-              _contentions);
+    // The deflation protocol finished the first part (setting owner), but
+    // it failed the second part (making ref_count negative) and bailed.
     if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
       // Acquired the monitor.
       assert(_succ != Self, "invariant");
       assert(_Responsible != Self, "invariant");
       return;

@@ -652,16 +643,12 @@
     }
 
     if (TryLock(Self) > 0) break;
 
     if (_owner == DEFLATER_MARKER) {
-      // The deflation protocol finished the first part (setting _owner), but
-      // it failed the second part (making _contentions negative) and bailed.
-      // Because we're called from enter() we have at least one contention.
-      guarantee(_contentions > 0, "owner == DEFLATER_MARKER && contentions <= 0 "
-                "should have been handled by the caller: contentions=%d",
-                _contentions);
+      // The deflation protocol finished the first part (setting owner), but
+      // it failed the second part (making ref_count negative) and bailed.
       if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
         // Acquired the monitor.
         break;
       }
     }

@@ -768,10 +755,12 @@
 // monitor reentry in wait().
 //
 // In the future we should reconcile EnterI() and ReenterI().
 
 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
+  ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count);
+
   assert(Self != NULL, "invariant");
   assert(SelfNode != NULL, "invariant");
   assert(SelfNode->_thread == Self, "invariant");
   assert(_waiters > 0, "invariant");
   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");

@@ -786,16 +775,12 @@
 
     if (TryLock(Self) > 0) break;
     if (TrySpin(Self) > 0) break;
 
     if (_owner == DEFLATER_MARKER) {
-      // The deflation protocol finished the first part (setting _owner),
-      // but it will observe _waiters != 0 and will bail out. Because we're
-      // called from wait() we may or may not have any contentions.
-      guarantee(_contentions >= 0, "owner == DEFLATER_MARKER && contentions < 0 "
-                "should have been handled by the caller: contentions=%d",
-                _contentions);
+      // The deflation protocol finished the first part (setting owner), but
+      // it failed the second part (making ref_count negative) and bailed.
       if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
         // Acquired the monitor.
         break;
       }
     }

@@ -1256,24 +1241,20 @@
   return save;
 }
 
 // reenter() enters a lock and sets recursion count
 // complete_exit/reenter operate as a wait without waiting
-bool ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
+void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
   Thread * const Self = THREAD;
   assert(Self->is_Java_thread(), "Must be Java thread!");
   JavaThread *jt = (JavaThread *)THREAD;
 
   guarantee(_owner != Self, "reenter already owner");
-  if (!enter(THREAD)) {
-    // Failed to enter the monitor so return for a retry.
-    return false;
-  }
+  enter(THREAD);
   // Entered the monitor.
   guarantee(_recursions == 0, "reenter recursion");
   _recursions = recursions;
-  return true;
 }
 
 
 // -----------------------------------------------------------------------------
 // A macro is used below because there may already be a pending

@@ -1497,12 +1478,11 @@
     Self->_Stalled = 0;
 
     assert(_owner != Self, "invariant");
     ObjectWaiter::TStates v = node.TState;
     if (v == ObjectWaiter::TS_RUN) {
-      const bool success = enter(Self);
-      ADIM_guarantee(success, "enter signaled for a retry, but monitor should not have been deflated as waiters > 0");
+      enter(Self);
     } else {
       guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
       ReenterI(Self, &node);
       node.wait_reenter_end(this);
     }

@@ -2062,11 +2042,11 @@
   }
 
   DEBUG_ONLY(InitDone = true;)
 }
 
-// For internal used by ObjectSynchronizer::monitors_iterate().
+// For internal use by ObjectSynchronizer::monitors_iterate().
 ObjectMonitorHandle::ObjectMonitorHandle(ObjectMonitor * om_ptr) {
   om_ptr->inc_ref_count();
   _om_ptr = om_ptr;
 }
 

@@ -2096,22 +2076,19 @@
 
   if (AsyncDeflateIdleMonitors) {
     // Race here if monitor is not owned! The above ref_count bump
     // will cause subsequent async deflation to skip it. However,
     // previous or concurrent async deflation is a race.
-    if (om_ptr->_owner == DEFLATER_MARKER && om_ptr->_contentions <= 0) {
-      // Async deflation is in progress.
-      if (om_ptr->ref_count() <= 0) {
-        // And our ref_count increment above lost the race to async
-        // deflation. Attempt to restore the header/dmw to the
-        // object's header so that we only retry once if the deflater
-        // thread happens to be slow.
+    if (om_ptr->_owner == DEFLATER_MARKER && om_ptr->ref_count() <= 0) {
+      // Async deflation is in progress and our ref_count increment
+      // above lost the race to async deflation. Attempt to restore
+      // the header/dmw to the object's header so that we only retry
+      // once if the deflater thread happens to be slow.
         om_ptr->install_displaced_markword_in_object(object);
         om_ptr->dec_ref_count();
         return false;
       }
-    }
     // The ObjectMonitor could have been deflated and reused for
     // another object before we bumped the ref_count so make sure
     // our object still refers to this ObjectMonitor.
     const markOop tmp = object->mark();
     if (!tmp->has_monitor() || tmp->monitor() != om_ptr) {

@@ -2120,23 +2097,23 @@
       om_ptr->dec_ref_count();
       return false;
     }
   }
 
-  guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT,
+  ADIM_guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT,
             p2i(_om_ptr));
   _om_ptr = om_ptr;
   return true;
 }
 
 // For internal use by ObjectSynchronizer::inflate().
 void ObjectMonitorHandle::set_om_ptr(ObjectMonitor * om_ptr) {
   if (_om_ptr == NULL) {
-    guarantee(om_ptr != NULL, "cannot clear an unset om_ptr");
+    ADIM_guarantee(om_ptr != NULL, "cannot clear an unset om_ptr");
     om_ptr->inc_ref_count();
     _om_ptr = om_ptr;
   } else {
-    guarantee(om_ptr == NULL, "can only clear a set om_ptr");
+    ADIM_guarantee(om_ptr == NULL, "can only clear a set om_ptr");
     _om_ptr->dec_ref_count();
     _om_ptr = NULL;
   }
 }
< prev index next >