< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page
rev 47400 : [mq]: cmpxchg_ptr
rev 47401 : [mq]: cmpxchg_if_null
rev 47406 : [mq]: assembler_cmpxchg

@@ -247,11 +247,11 @@
 void ObjectMonitor::enter(TRAPS) {
   // The following code is ordered to check the most common cases first
   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
   Thread * const Self = THREAD;
 
-  void * cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL);
+  void * cur = Atomic::cmpxchg((void*)Self, &_owner, (void*)NULL);
   if (cur == NULL) {
     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
     assert(_recursions == 0, "invariant");
     assert(_owner == Self, "invariant");
     return;

@@ -404,11 +404,11 @@
 // Callers must compensate as needed.
 
 int ObjectMonitor::TryLock(Thread * Self) {
   void * own = _owner;
   if (own != NULL) return 0;
-  if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
+  if (Atomic::cmpxchg_if_null((void*)Self, &_owner)) {
     // Either guarantee _recursions == 0 or set _recursions = 0.
     assert(_recursions == 0, "invariant");
     assert(_owner == Self, "invariant");
     return 1;
   }

@@ -474,11 +474,11 @@
   // Note that spinning tends to reduce the rate at which threads
   // enqueue and dequeue on EntryList|cxq.
   ObjectWaiter * nxt;
   for (;;) {
     node._next = nxt = _cxq;
-    if (Atomic::cmpxchg_ptr(&node, &_cxq, nxt) == nxt) break;
+    if (Atomic::cmpxchg(&node, &_cxq, nxt) == nxt) break;
 
     // Interference - the CAS failed because _cxq changed.  Just retry.
     // As an optional optimization we retry the lock.
     if (TryLock (Self) > 0) {
       assert(_succ != Self, "invariant");

@@ -512,11 +512,11 @@
   // -- the checker -- parked on a timer.
 
   if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
     // Try to assume the role of responsible thread for the monitor.
     // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
-    Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
+    Atomic::cmpxchg_if_null(Self, &_Responsible);
   }
 
   // The lock might have been released while this thread was occupied queueing
   // itself onto _cxq.  To close the race and avoid "stranding" and
   // progress-liveness failure we must resample-retry _owner before parking.

@@ -536,11 +536,11 @@
 
     if (TryLock(Self) > 0) break;
     assert(_owner != Self, "invariant");
 
     if ((SyncFlags & 2) && _Responsible == NULL) {
-      Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
+      Atomic::cmpxchg_if_null(Self, &_Responsible);
     }
 
     // park self
     if (_Responsible == Self || (SyncFlags & 1)) {
       TEVENT(Inflated enter - park TIMED);

@@ -793,11 +793,11 @@
     // and then unlink Self from EntryList.  We have to drain eventually,
     // so it might as well be now.
 
     ObjectWaiter * v = _cxq;
     assert(v != NULL, "invariant");
-    if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
+    if (v != SelfNode || Atomic::cmpxchg(SelfNode->_next, &_cxq, v) != v) {
       // The CAS above can fail from interference IFF a "RAT" arrived.
       // In that case Self must be in the interior and can no longer be
       // at the head of cxq.
       if (v == SelfNode) {
         assert(_cxq != v, "invariant");

@@ -945,11 +945,11 @@
       // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
       // store into a _dummy variable.  That store is not needed, but can result
       // in massive wasteful coherency traffic on classic SMP systems.
       // Instead, I use release_store(), which is implemented as just a simple
       // ST on x64, x86 and SPARC.
-      OrderAccess::release_store_ptr(&_owner, NULL);   // drop the lock
+      OrderAccess::release_store(&_owner, (void*)NULL);   // drop the lock
       OrderAccess::storeload();                        // See if we need to wake a successor
       if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
         TEVENT(Inflated exit - simple egress);
         return;
       }

@@ -990,17 +990,17 @@
       // Only the current lock owner can manipulate the EntryList or
       // drain _cxq, so we need to reacquire the lock.  If we fail
       // to reacquire the lock the responsibility for ensuring succession
       // falls to the new owner.
       //
-      if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+      if (!Atomic::cmpxchg_if_null((void*)THREAD, &_owner)) {
         return;
       }
       TEVENT(Exit - Reacquired);
     } else {
       if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
-        OrderAccess::release_store_ptr(&_owner, NULL);   // drop the lock
+        OrderAccess::release_store(&_owner, (void*)NULL);   // drop the lock
         OrderAccess::storeload();
         // Ratify the previously observed values.
         if (_cxq == NULL || _succ != NULL) {
           TEVENT(Inflated exit - simple egress);
           return;

@@ -1015,11 +1015,11 @@
         //     we either restart/rerun the exit operation, or simply
         //     fall-through into the code below which wakes a successor.
         // B.  If the elements forming the EntryList|cxq are TSM
         //     we could simply unpark() the lead thread and return
         //     without having set _succ.
-        if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+        if (!Atomic::cmpxchg_if_null((void*)THREAD, &_owner)) {
           TEVENT(Inflated exit - reacquired succeeded);
           return;
         }
         TEVENT(Inflated exit - reacquired failed);
       } else {

@@ -1050,11 +1050,11 @@
       // First, detach _cxq.
       // The following loop is tantamount to: w = swap(&cxq, NULL)
       w = _cxq;
       for (;;) {
         assert(w != NULL, "Invariant");
-        ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
+        ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w);
         if (u == w) break;
         w = u;
       }
       assert(w != NULL, "invariant");
 

@@ -1091,11 +1091,11 @@
       // First, detach _cxq.
       // The following loop is tantamount to: w = swap(&cxq, NULL)
       w = _cxq;
       for (;;) {
         assert(w != NULL, "Invariant");
-        ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
+        ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w);
         if (u == w) break;
         w = u;
       }
       assert(w != NULL, "invariant");
 

@@ -1144,11 +1144,11 @@
     // Drain _cxq into EntryList - bulk transfer.
     // First, detach _cxq.
     // The following loop is tantamount to: w = swap(&cxq, NULL)
     for (;;) {
       assert(w != NULL, "Invariant");
-      ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w);
+      ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w);
       if (u == w) break;
       w = u;
     }
     TEVENT(Inflated exit - drain cxq into EntryList);
 

@@ -1277,11 +1277,11 @@
   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
   // out-of-scope (non-extant).
   Wakee  = NULL;
 
   // Drop the lock
-  OrderAccess::release_store_ptr(&_owner, NULL);
+  OrderAccess::release_store(&_owner, (void*)NULL);
   OrderAccess::fence();                               // ST _owner vs LD in unpark()
 
   if (SafepointSynchronize::do_call_back()) {
     TEVENT(unpark before SAFEPOINT);
   }

@@ -1686,22 +1686,22 @@
       } else {
         iterator->TState = ObjectWaiter::TS_CXQ;
         for (;;) {
           ObjectWaiter * front = _cxq;
           iterator->_next = front;
-          if (Atomic::cmpxchg_ptr(iterator, &_cxq, front) == front) {
+          if (Atomic::cmpxchg(iterator, &_cxq, front) == front) {
             break;
           }
         }
       }
     } else if (policy == 3) {      // append to cxq
       iterator->TState = ObjectWaiter::TS_CXQ;
       for (;;) {
         ObjectWaiter * tail = _cxq;
         if (tail == NULL) {
           iterator->_next = NULL;
-          if (Atomic::cmpxchg_ptr(iterator, &_cxq, NULL) == NULL) {
+          if (Atomic::cmpxchg_if_null(iterator, &_cxq)) {
             break;
           }
         } else {
           while (tail->_next != NULL) tail = tail->_next;
           tail->_next = iterator;

@@ -1978,11 +1978,11 @@
     // the spin without prejudice or apply a "penalty" to the
     // spin count-down variable "ctr", reducing it by 100, say.
 
     Thread * ox = (Thread *) _owner;
     if (ox == NULL) {
-      ox = (Thread *) Atomic::cmpxchg_ptr(Self, &_owner, NULL);
+      ox = (Thread*)Atomic::cmpxchg((void*)Self, &_owner, (void*)NULL);
       if (ox == NULL) {
         // The CAS succeeded -- this thread acquired ownership
         // Take care of some bookkeeping to exit spin state.
         if (sss && _succ == Self) {
           _succ = NULL;
< prev index next >