< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page
rev 57560 : imported patch 8236035.patch.cr0
rev 57561 : dholmes CR - rename simply_set_owner_from() -> set_owner_from() and simply_set_owner_from_BasicLock() -> set_owner_from_BasicLock(); rename release_clear_owner_with_barrier() -> release_clear_owner() and refactor barrier code back into the call sites.
rev 57562 : kbarrett CR - rearrange some loads of _owner field to be more efficient; clarify header comment for try_set_owner_from() declaration; make some loads of _owner field DEBUG_ONLY since they only exist for assert()'s; update related logging calls to use the existing function parameter instead.

@@ -243,11 +243,11 @@
 void ObjectMonitor::enter(TRAPS) {
   // The following code is ordered to check the most common cases first
   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
   Thread * const Self = THREAD;
 
-  void * cur = Atomic::cmpxchg(&_owner, (void*)NULL, Self);
+  void* cur = try_set_owner_from(NULL, Self);
   if (cur == NULL) {
     assert(_recursions == 0, "invariant");
     return;
   }
 

@@ -258,13 +258,11 @@
   }
 
   if (Self->is_lock_owned((address)cur)) {
     assert(_recursions == 0, "internal state error");
     _recursions = 1;
-    // Commute owner from a thread-specific on-stack BasicLockObject address to
-    // a full-fledged "Thread *".
-    _owner = Self;
+    set_owner_from_BasicLock(cur, Self);  // Convert from BasicLock* to Thread*.
     return;
   }
 
   // We've encountered genuine contention.
   assert(Self->_Stalled == 0, "invariant");

@@ -401,11 +399,11 @@
 // Callers must compensate as needed.
 
 int ObjectMonitor::TryLock(Thread * Self) {
   void * own = _owner;
   if (own != NULL) return 0;
-  if (Atomic::replace_if_null(&_owner, Self)) {
+  if (try_set_owner_from(NULL, Self) == NULL) {
     assert(_recursions == 0, "invariant");
     return 1;
   }
   // The lock had been free momentarily, but we lost the race to the lock.
   // Interference -- the CAS failed.

@@ -860,19 +858,16 @@
 // then wake a thread unnecessarily. This is benign, and we've
 // structured the code so the windows are short and the frequency
 // of such futile wakups is low.
 
 void ObjectMonitor::exit(bool not_suspended, TRAPS) {
-  Thread * const Self = THREAD;
-  if (THREAD != _owner) {
-    if (THREAD->is_lock_owned((address) _owner)) {
-      // Transmute _owner from a BasicLock pointer to a Thread address.
-      // We don't need to hold _mutex for this transition.
-      // Non-null to Non-null is safe as long as all readers can
-      // tolerate either flavor.
+  Thread* const Self = THREAD;
+  void* cur = Atomic::load(&_owner);
+  if (THREAD != cur) {
+    if (THREAD->is_lock_owned((address)cur)) {
       assert(_recursions == 0, "invariant");
-      _owner = THREAD;
+      set_owner_from_BasicLock(cur, Self);  // Convert from BasicLock* to Thread*.
       _recursions = 0;
     } else {
       // Apparent unbalanced locking ...
       // Naively we'd like to throw IllegalMonitorStateException.
       // As a practical matter we can neither allocate nor throw an

@@ -912,14 +907,19 @@
 #endif
 
   for (;;) {
     assert(THREAD == _owner, "invariant");
 
+    // Drop the lock.
     // release semantics: prior loads and stores from within the critical section
     // must not float (reorder) past the following store that drops the lock.
-    Atomic::release_store(&_owner, (void*)NULL);   // drop the lock
-    OrderAccess::storeload();                      // See if we need to wake a successor
+    // Uses a storeload to separate release_store(owner) from the
+    // successor check. The try_set_owner() below uses cmpxchg() so
+    // we get the fence down there.
+    release_clear_owner(Self);
+    OrderAccess::storeload();
+
     if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
       return;
     }
     // Other threads are blocked trying to acquire the lock.
 

@@ -957,11 +957,11 @@
     // Only the current lock owner can manipulate the EntryList or
     // drain _cxq, so we need to reacquire the lock.  If we fail
     // to reacquire the lock the responsibility for ensuring succession
     // falls to the new owner.
     //
-    if (!Atomic::replace_if_null(&_owner, THREAD)) {
+    if (try_set_owner_from(NULL, Self) != NULL) {
       return;
     }
 
     guarantee(_owner == THREAD, "invariant");
 

@@ -1090,12 +1090,13 @@
   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
   // out-of-scope (non-extant).
   Wakee  = NULL;
 
   // Drop the lock
-  Atomic::release_store(&_owner, (void*)NULL);
-  OrderAccess::fence();                               // ST _owner vs LD in unpark()
+  // Uses a fence to separate release_store(owner) from the LD in unpark().
+  release_clear_owner(Self);
+  OrderAccess::fence();
 
   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
   Trigger->unpark();
 
   // Maintain stats and report events to JVMTI

@@ -1117,14 +1118,15 @@
   assert(Self->is_Java_thread(), "Must be Java thread!");
   JavaThread *jt = (JavaThread *)THREAD;
 
   assert(InitDone, "Unexpectedly not initialized");
 
-  if (THREAD != _owner) {
-    if (THREAD->is_lock_owned ((address)_owner)) {
+  void* cur = Atomic::load(&_owner);
+  if (THREAD != cur) {
+    if (THREAD->is_lock_owned((address)cur)) {
       assert(_recursions == 0, "internal state error");
-      _owner = THREAD;   // Convert from basiclock addr to Thread addr
+      set_owner_from_BasicLock(cur, Self);  // Convert from BasicLock* to Thread*.
       _recursions = 0;
     }
   }
 
   guarantee(Self == _owner, "complete_exit not owner");

@@ -1165,15 +1167,16 @@
 // Returns true if the specified thread owns the ObjectMonitor.
 // Otherwise returns false and throws IllegalMonitorStateException
 // (IMSE). If there is a pending exception and the specified thread
 // is not the owner, that exception will be replaced by the IMSE.
 bool ObjectMonitor::check_owner(Thread* THREAD) {
-  if (_owner == THREAD) {
+  void* cur = Atomic::load(&_owner);
+  if (cur == THREAD) {
     return true;
   }
-  if (THREAD->is_lock_owned((address)_owner)) {
-    _owner = THREAD;  // convert from BasicLock addr to Thread addr
+  if (THREAD->is_lock_owned((address)cur)) {
+    set_owner_from_BasicLock(cur, THREAD);  // Convert from BasicLock* to Thread*.
     _recursions = 0;
     return true;
   }
   THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
              "current thread is not owner", false);

@@ -1678,11 +1681,11 @@
     // the spin without prejudice or apply a "penalty" to the
     // spin count-down variable "ctr", reducing it by 100, say.
 
     Thread * ox = (Thread *) _owner;
     if (ox == NULL) {
-      ox = (Thread*)Atomic::cmpxchg(&_owner, (void*)NULL, Self);
+      ox = (Thread*)try_set_owner_from(NULL, Self);
       if (ox == NULL) {
         // The CAS succeeded -- this thread acquired ownership
         // Take care of some bookkeeping to exit spin state.
         if (_succ == Self) {
           _succ = NULL;
< prev index next >