< prev index next >

src/hotspot/share/gc/shared/oopStorage.cpp

Print this page

        

@@ -142,11 +142,11 @@
 size_t OopStorage::ActiveArray::block_count_acquire() const {
   return Atomic::load_acquire(&_block_count);
 }
 
 void OopStorage::ActiveArray::increment_refcount() const {
-  int new_value = Atomic::add(1, &_refcount);
+  int new_value = Atomic::add(&_refcount, 1);
   assert(new_value >= 1, "negative refcount %d", new_value - 1);
 }
 
 bool OopStorage::ActiveArray::decrement_refcount() const {
   int new_value = Atomic::sub(1, &_refcount);

@@ -1008,11 +1008,11 @@
   size_t step = MIN2(max_step, 1 + (remaining / _estimated_thread_count));
   // Atomic::add with possible overshoot.  This can perform better
   // than a CAS loop on some platforms when there is contention.
   // We can cope with the uncertainty by recomputing start/end from
   // the result of the add, and dealing with potential overshoot.
-  size_t end = Atomic::add(step, &_next_block);
+  size_t end = Atomic::add(&_next_block, step);
   // _next_block may have changed, so recompute start from result of add.
   start = end - step;
   // _next_block may have changed so much that end has overshot.
   end = MIN2(end, _block_count);
   // _next_block may have changed so much that even start has overshot.
< prev index next >