--- old/src/hotspot/share/gc/shared/oopStorage.cpp 2019-11-21 11:57:23.963213514 +0100 +++ new/src/hotspot/share/gc/shared/oopStorage.cpp 2019-11-21 11:57:23.479205338 +0100 @@ -144,12 +144,12 @@ } void OopStorage::ActiveArray::increment_refcount() const { - int new_value = Atomic::add(1, &_refcount); + int new_value = Atomic::add(&_refcount, 1); assert(new_value >= 1, "negative refcount %d", new_value - 1); } bool OopStorage::ActiveArray::decrement_refcount() const { - int new_value = Atomic::sub(1, &_refcount); + int new_value = Atomic::sub(&_refcount, 1); assert(new_value >= 0, "negative refcount %d", new_value); return new_value == 0; } @@ -307,7 +307,7 @@ assert(!is_full_bitmask(allocated), "attempt to allocate from full block"); unsigned index = count_trailing_zeros(~allocated); uintx new_value = allocated | bitmask_for_index(index); - uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, allocated); + uintx fetched = Atomic::cmpxchg(&_allocated_bitmask, allocated, new_value); if (fetched == allocated) { return get_pointer(index); // CAS succeeded; return entry for index. } @@ -595,7 +595,7 @@ while (true) { assert((releasing & ~old_allocated) == 0, "releasing unallocated entries"); uintx new_value = old_allocated ^ releasing; - uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, old_allocated); + uintx fetched = Atomic::cmpxchg(&_allocated_bitmask, old_allocated, new_value); if (fetched == old_allocated) break; // Successful update. old_allocated = fetched; // Retry with updated bitmask. } @@ -614,12 +614,12 @@ // then someone else has made such a claim and the deferred update has not // yet been processed and will include our change, so we don't need to do // anything further. - if (Atomic::replace_if_null(this, &_deferred_updates_next)) { + if (Atomic::replace_if_null(&_deferred_updates_next, this)) { // Successfully claimed. Push, with self-loop for end-of-list. Block* head = owner->_deferred_updates; while (true) { _deferred_updates_next = (head == NULL) ? this : head; - Block* fetched = Atomic::cmpxchg(this, &owner->_deferred_updates, head); + Block* fetched = Atomic::cmpxchg(&owner->_deferred_updates, head, this); if (fetched == head) break; // Successful update. head = fetched; // Retry with updated head. } @@ -651,7 +651,7 @@ // Try atomic pop of block from list. Block* tail = block->deferred_updates_next(); if (block == tail) tail = NULL; // Handle self-loop end marker. - Block* fetched = Atomic::cmpxchg(tail, &_deferred_updates, block); + Block* fetched = Atomic::cmpxchg(&_deferred_updates, block, tail); if (fetched == block) break; // Update successful. block = fetched; // Retry with updated block. } @@ -724,7 +724,7 @@ } // Release the contiguous entries that are in block. block->release_entries(releasing, this); - Atomic::sub(count, &_allocation_count); + Atomic::sub(&_allocation_count, count); } } @@ -825,7 +825,7 @@ // Set the request flag false and return its old value. // Needs to be atomic to avoid dropping a concurrent request. // Can't use Atomic::xchg, which may not support bool. - return Atomic::cmpxchg(false, &needs_cleanup_requested, true); + return Atomic::cmpxchg(&needs_cleanup_requested, true, false); } // Record that cleanup is needed, without notifying the Service thread. @@ -1010,7 +1010,7 @@ // than a CAS loop on some platforms when there is contention. // We can cope with the uncertainty by recomputing start/end from // the result of the add, and dealing with potential overshoot. - size_t end = Atomic::add(step, &_next_block); + size_t end = Atomic::add(&_next_block, step); // _next_block may have changed, so recompute start from result of add. start = end - step; // _next_block may have changed so much that end has overshot.