< prev index next >

src/hotspot/share/gc/shared/ptrQueue.cpp

Print this page

        

@@ -148,11 +148,11 @@
     node = BufferNode::allocate(_buffer_size);
   } else {
     // Decrement count after getting buffer from free list.  This, along
     // with incrementing count before adding to free list, ensures count
     // never underflows.
-    size_t count = Atomic::sub(1u, &_free_count);
+    size_t count = Atomic::sub(&_free_count, 1u);
     assert((count + 1) != 0, "_free_count underflow");
   }
   return node;
 }
 

@@ -180,11 +180,11 @@
   // the allocation rate and the release rate are going to be fairly
   // similar, due to how the buffers are used.
   const size_t trigger_transfer = 10;
 
   // Add to pending list. Update count first so no underflow in transfer.
-  size_t pending_count = Atomic::add(1u, &_pending_count);
+  size_t pending_count = Atomic::add(&_pending_count, 1u);
   _pending_list.push(*node);
   if (pending_count > trigger_transfer) {
     try_transfer_pending();
   }
 }

@@ -195,11 +195,11 @@
 // transfer, false if blocked from doing so by some other thread's
 // in-progress transfer.
 bool BufferNode::Allocator::try_transfer_pending() {
   // Attempt to claim the lock.
   if (Atomic::load(&_transfer_lock) || // Skip CAS if likely to fail.
-      Atomic::cmpxchg(true, &_transfer_lock, false)) {
+      Atomic::cmpxchg(&_transfer_lock, false, true)) {
     return false;
   }
   // Have the lock; perform the transfer.
 
   // Claim all the pending nodes.

@@ -210,18 +210,18 @@
     size_t count = 1;
     for (BufferNode* next = first->next(); next != NULL; next = next->next()) {
       last = next;
       ++count;
     }
-    Atomic::sub(count, &_pending_count);
+    Atomic::sub(&_pending_count, count);
 
     // Wait for any in-progress pops, to avoid ABA for them.
     GlobalCounter::write_synchronize();
 
     // Add synchronized nodes to _free_list.
     // Update count first so no underflow in allocate().
-    Atomic::add(count, &_free_count);
+    Atomic::add(&_free_count, count);
     _free_list.prepend(*first, *last);
     log_trace(gc, ptrqueue, freelist)
              ("Transferred %s pending to free: " SIZE_FORMAT, name(), count);
   }
   Atomic::release_store(&_transfer_lock, false);

@@ -234,11 +234,11 @@
   for ( ; removed < remove_goal; ++removed) {
     BufferNode* node = _free_list.pop();
     if (node == NULL) break;
     BufferNode::deallocate(node);
   }
-  size_t new_count = Atomic::sub(removed, &_free_count);
+  size_t new_count = Atomic::sub(&_free_count, removed);
   log_debug(gc, ptrqueue, freelist)
            ("Reduced %s free list by " SIZE_FORMAT " to " SIZE_FORMAT,
             name(), removed, new_count);
   return removed;
 }

@@ -256,6 +256,5 @@
 }
 
 void PtrQueueSet::deallocate_buffer(BufferNode* node) {
   _allocator->release(node);
 }
-
< prev index next >