< prev index next >

src/hotspot/share/utilities/lockFreeStack.hpp

Print this page

        

@@ -63,11 +63,11 @@
     T* cur = top();
     T* old;
     do {
       old = cur;
       set_next(*last, cur);
-      cur = Atomic::cmpxchg(first, &_top, cur);
+      cur = Atomic::cmpxchg(&_top, cur, first);
     } while (old != cur);
   }
 
   // Noncopyable.
   LockFreeStack(const LockFreeStack&);

@@ -89,11 +89,11 @@
       T* new_top = NULL;
       if (result != NULL) {
         new_top = next(*result);
       }
       // CAS even on empty pop, for consistent membar bahavior.
-      result = Atomic::cmpxchg(new_top, &_top, result);
+      result = Atomic::cmpxchg(&_top, result, new_top);
     } while (result != old);
     if (result != NULL) {
       set_next(*result, NULL);
     }
     return result;

@@ -101,11 +101,11 @@
 
   // Atomically exchange the list of elements with NULL, returning the old
   // list of elements.  Acts as a full memory barrier.
   // postcondition: empty()
   T* pop_all() {
-    return Atomic::xchg((T*)NULL, &_top);
+    return Atomic::xchg(&_top, (T*)NULL);
   }
 
   // Atomically adds value to the top of this stack.  Acts as a full
   // memory barrier.
   void push(T& value) {

@@ -168,10 +168,10 @@
   // Set the entry following value to new_next in the list used by the
   // specialized LockFreeStack class.  Not thread-safe; in particular,
   // if value is in an instance of this specialization of LockFreeStack,
   // there must be no concurrent push or pop operations on that stack.
   static void set_next(T& value, T* new_next) {
-    Atomic::store(new_next, next_ptr(value));
+    Atomic::store(next_ptr(value), new_next);
   }
 };
 
 #endif // SHARE_UTILITIES_LOCKFREESTACK_HPP
< prev index next >