--- old/src/hotspot/cpu/arm/stubGenerator_arm.cpp 2019-11-21 11:52:47.798568666 +0100 +++ new/src/hotspot/cpu/arm/stubGenerator_arm.cpp 2019-11-21 11:52:47.566564877 +0100 @@ -437,7 +437,7 @@ // for which we do not support MP and so membars are not necessary. This ARMv5 code will // be removed in the future. - // Support for jint Atomic::add(jint add_value, volatile jint *dest) + // Support for jint Atomic::add(volatile jint *dest, jint add_value) // // Arguments : // --- old/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp 2019-11-21 11:52:48.234575789 +0100 +++ new/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp 2019-11-21 11:52:48.026572391 +0100 @@ -679,7 +679,7 @@ } - // Support for jint Atomic::add(jint add_value, volatile jint* dest). + // Support for jint Atomic::add(volatile jint* dest, jint add_value). // // Arguments: // --- old/src/hotspot/os/bsd/os_bsd.cpp 2019-11-21 11:52:48.754584284 +0100 +++ new/src/hotspot/os/bsd/os_bsd.cpp 2019-11-21 11:52:48.494580037 +0100 @@ -1894,7 +1894,7 @@ } char buf[PATH_MAX + 1]; - int num = Atomic::add(1, &cnt); + int num = Atomic::add(&cnt, 1); snprintf(buf, PATH_MAX + 1, "%s/hs-vm-%d-%d", os::get_temp_directory(), os::current_process_id(), num); @@ -3264,7 +3264,7 @@ while (processor_id < 0) { if (Atomic::cmpxchg(-2, &mapping[apic_id], -1)) { - Atomic::store(&mapping[apic_id], Atomic::add(1, &next_processor_id) - 1); + Atomic::store(&mapping[apic_id], Atomic::add(&next_processor_id, 1) - 1); } processor_id = Atomic::load(&mapping[apic_id]); } --- old/src/hotspot/os/linux/os_linux.cpp 2019-11-21 11:52:49.278592844 +0100 +++ new/src/hotspot/os/linux/os_linux.cpp 2019-11-21 11:52:49.014588532 +0100 @@ -2813,7 +2813,7 @@ } char buf[PATH_MAX+1]; - int num = Atomic::add(1, &cnt); + int num = Atomic::add(&cnt, 1); snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d", os::get_temp_directory(), os::current_process_id(), num); --- old/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp 2019-11-21 11:52:49.826601796 +0100 +++ new/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp 2019-11-21 11:52:49.574597680 +0100 @@ -96,13 +96,13 @@ struct Atomic::PlatformAdd : Atomic::AddAndFetch > { - template - D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; + template + D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; }; template<> -template -inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); @@ -127,8 +127,8 @@ template<> -template -inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); --- old/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp 2019-11-21 11:52:50.262608919 +0100 +++ new/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp 2019-11-21 11:52:50.006604737 +0100 @@ -31,13 +31,13 @@ struct Atomic::PlatformAdd : Atomic::FetchAndAdd > { - template - D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order /* order */) const; + template + D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const; }; template<> -template -inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); @@ -92,8 +92,8 @@ #ifdef AMD64 template<> -template -inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); --- old/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp 2019-11-21 11:52:50.698616041 +0100 +++ new/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp 2019-11-21 11:52:50.442611859 +0100 @@ -163,22 +163,22 @@ struct Atomic::PlatformAdd : Atomic::AddAndFetch > { - template - D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; + template + D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; }; template<> -template -inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); #ifdef ARM - return add_using_helper(arm_add_and_fetch, add_value, dest); + return add_using_helper(arm_add_and_fetch, dest, add_value); #else #ifdef M68K - return add_using_helper(m68k_add_and_fetch, add_value, dest); + return add_using_helper(m68k_add_and_fetch, dest, add_value); #else return __sync_add_and_fetch(dest, add_value); #endif // M68K @@ -186,8 +186,8 @@ } template<> -template -inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); --- old/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp 2019-11-21 11:52:51.126623033 +0100 +++ new/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp 2019-11-21 11:52:50.870618851 +0100 @@ -36,8 +36,8 @@ struct Atomic::PlatformAdd : Atomic::AddAndFetch > { - template - D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const { + template + D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE); FULL_MEM_BARRIER; return res; --- old/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp 2019-11-21 11:52:51.550629960 +0100 +++ new/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp 2019-11-21 11:52:51.294625778 +0100 @@ -70,17 +70,17 @@ struct Atomic::PlatformAdd : Atomic::AddAndFetch > { - template - D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; + template + D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; }; template<> -template -inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); - return add_using_helper(os::atomic_add_func, add_value, dest); + return add_using_helper(os::atomic_add_func, dest, add_value); } --- old/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp 2019-11-21 11:52:51.978636952 +0100 +++ new/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp 2019-11-21 11:52:51.722632770 +0100 @@ -96,13 +96,13 @@ struct Atomic::PlatformAdd : Atomic::AddAndFetch > { - template - D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; + template + D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; }; template<> -template -inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); @@ -127,8 +127,8 @@ template<> -template -inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); --- old/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp 2019-11-21 11:52:52.406643944 +0100 +++ new/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp 2019-11-21 11:52:52.154639826 +0100 @@ -78,13 +78,13 @@ struct Atomic::PlatformAdd : Atomic::AddAndFetch > { - template - D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; + template + D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; }; template<> -template -inline D Atomic::PlatformAdd<4>::add_and_fetch(I inc, D volatile* dest, +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I inc, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); @@ -137,8 +137,8 @@ template<> -template -inline D Atomic::PlatformAdd<8>::add_and_fetch(I inc, D volatile* dest, +template +inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I inc, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); --- old/src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp 2019-11-21 11:52:52.846651131 +0100 +++ new/src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp 2019-11-21 11:52:52.586646884 +0100 @@ -31,13 +31,13 @@ struct Atomic::PlatformAdd : Atomic::AddAndFetch > { - template - D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; + template + D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; }; template<> -template -inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); @@ -59,8 +59,8 @@ } template<> -template -inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); --- old/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp 2019-11-21 11:52:53.274658123 +0100 +++ new/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp 2019-11-21 11:52:53.018653941 +0100 @@ -31,13 +31,13 @@ struct Atomic::PlatformAdd : Atomic::FetchAndAdd > { - template - D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order order) const; + template + D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const; }; template<> -template -inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); @@ -93,8 +93,8 @@ #ifdef AMD64 template<> -template -inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); --- old/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp 2019-11-21 11:52:53.698665050 +0100 +++ new/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp 2019-11-21 11:52:53.442660868 +0100 @@ -34,13 +34,13 @@ struct Atomic::PlatformAdd : Atomic::AddAndFetch > { - template - D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; + template + D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; }; template<> -template -inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); @@ -49,8 +49,8 @@ } template<> -template -inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); --- old/src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp 2019-11-21 11:52:54.118671911 +0100 +++ new/src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp 2019-11-21 11:52:53.866667794 +0100 @@ -30,8 +30,8 @@ // Implement ADD using a CAS loop. template struct Atomic::PlatformAdd { - template - inline D operator()(I add_value, D volatile* dest, atomic_memory_order order) const { + template + inline D operator()(D volatile* dest, I add_value, atomic_memory_order order) const { D old_value = *dest; while (true) { D new_value = old_value + add_value; --- old/src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp 2019-11-21 11:52:54.538678771 +0100 +++ new/src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp 2019-11-21 11:52:54.282674590 +0100 @@ -44,14 +44,14 @@ struct Atomic::PlatformAdd : Atomic::AddAndFetch > { - template - D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; + template + D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; }; // Not using add_using_helper; see comment for cmpxchg. template<> -template -inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); @@ -62,8 +62,8 @@ // Not using add_using_helper; see comment for cmpxchg. template<> -template -inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); --- old/src/hotspot/os_cpu/solaris_x86/solaris_x86_64.il 2019-11-21 11:52:54.962685699 +0100 +++ new/src/hotspot/os_cpu/solaris_x86/solaris_x86_64.il 2019-11-21 11:52:54.706681517 +0100 @@ -49,7 +49,7 @@ orq %rdx, %rax .end - // Support for jint Atomic::add(jint add_value, volatile jint* dest) + // Support for jint Atomic::add(volatile jint* dest, jint add_value) .inline _Atomic_add,2 movl %edi, %eax // save add_value for return lock @@ -57,7 +57,7 @@ addl %edi, %eax .end - // Support for jlong Atomic::add(jlong add_value, volatile jlong* dest) + // Support for jlong Atomic::add(volatile jlong* dest, jlong add_value) .inline _Atomic_add_long,2 movq %rdi, %rax // save add_value for return lock --- old/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp 2019-11-21 11:52:55.386692624 +0100 +++ new/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp 2019-11-21 11:52:55.134688507 +0100 @@ -57,23 +57,23 @@ struct Atomic::PlatformAdd : Atomic::AddAndFetch > { - template - D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; + template + D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; }; #ifdef AMD64 template<> -template -inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { - return add_using_helper(os::atomic_add_func, add_value, dest); + return add_using_helper(os::atomic_add_func, dest, add_value); } template<> -template -inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { - return add_using_helper(os::atomic_add_long_func, add_value, dest); + return add_using_helper(os::atomic_add_long_func, dest, add_value); } #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \ @@ -111,8 +111,8 @@ #else // !AMD64 template<> -template -inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); --- old/src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp 2019-11-21 11:52:55.814699617 +0100 +++ new/src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp 2019-11-21 11:52:55.554695370 +0100 @@ -50,7 +50,7 @@ } void ClassLoaderDataGraph::inc_instance_classes(size_t count) { - Atomic::add(count, &_num_instance_classes); + Atomic::add(&_num_instance_classes, count); } void ClassLoaderDataGraph::dec_instance_classes(size_t count) { @@ -59,7 +59,7 @@ } void ClassLoaderDataGraph::inc_array_classes(size_t count) { - Atomic::add(count, &_num_array_classes); + Atomic::add(&_num_array_classes, count); } void ClassLoaderDataGraph::dec_array_classes(size_t count) { --- old/src/hotspot/share/classfile/stringTable.cpp 2019-11-21 11:52:56.234706477 +0100 +++ new/src/hotspot/share/classfile/stringTable.cpp 2019-11-21 11:52:55.974702231 +0100 @@ -214,11 +214,11 @@ } size_t StringTable::item_added() { - return Atomic::add((size_t)1, &_items_count); + return Atomic::add(&_items_count, (size_t)1); } size_t StringTable::add_items_to_clean(size_t ndead) { - size_t total = Atomic::add((size_t)ndead, &_uncleaned_items_count); + size_t total = Atomic::add(&_uncleaned_items_count, (size_t)ndead); log_trace(stringtable)( "Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT, _uncleaned_items_count, ndead, total); @@ -226,7 +226,7 @@ } void StringTable::item_removed() { - Atomic::add((size_t)-1, &_items_count); + Atomic::add(&_items_count, (size_t)-1); } double StringTable::get_load_factor() { --- old/src/hotspot/share/classfile/symbolTable.cpp 2019-11-21 11:52:56.626712881 +0100 +++ new/src/hotspot/share/classfile/symbolTable.cpp 2019-11-21 11:52:56.414709418 +0100 @@ -724,7 +724,7 @@ bdt.done(jt); } - Atomic::add(stdc._processed, &_symbols_counted); + Atomic::add(&_symbols_counted, stdc._processed); log_debug(symboltable)("Cleaned " SIZE_FORMAT " of " SIZE_FORMAT, stdd._deleted, stdc._processed); --- old/src/hotspot/share/compiler/compileBroker.cpp 2019-11-21 11:52:57.054719873 +0100 +++ new/src/hotspot/share/compiler/compileBroker.cpp 2019-11-21 11:52:56.798715691 +0100 @@ -1479,14 +1479,14 @@ assert(!is_osr, "can't be osr"); // Adapters, native wrappers and method handle intrinsics // should be generated always. - return Atomic::add(1, &_compilation_id); + return Atomic::add(&_compilation_id, 1); } else if (CICountOSR && is_osr) { - id = Atomic::add(1, &_osr_compilation_id); + id = Atomic::add(&_osr_compilation_id, 1); if (CIStartOSR <= id && id < CIStopOSR) { return id; } } else { - id = Atomic::add(1, &_compilation_id); + id = Atomic::add(&_compilation_id, 1); if (CIStart <= id && id < CIStop) { return id; } @@ -1498,7 +1498,7 @@ #else // CICountOSR is a develop flag and set to 'false' by default. In a product built, // only _compilation_id is incremented. - return Atomic::add(1, &_compilation_id); + return Atomic::add(&_compilation_id, 1); #endif } --- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2019-11-21 11:52:57.542727844 +0100 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2019-11-21 11:52:57.282723598 +0100 @@ -4226,7 +4226,7 @@ HeapRegion* r = g1h->region_at(region_idx); assert(!g1h->is_on_master_free_list(r), "sanity"); - Atomic::add(r->rem_set()->occupied_locked(), &_rs_length); + Atomic::add(&_rs_length, r->rem_set()->occupied_locked()); if (!is_young) { g1h->hot_card_cache()->reset_card_counts(r); @@ -4290,7 +4290,7 @@ // Claim serial work. if (_serial_work_claim == 0) { - jint value = Atomic::add(1, &_serial_work_claim) - 1; + jint value = Atomic::add(&_serial_work_claim, 1) - 1; if (value == 0) { double serial_time = os::elapsedTime(); do_serial_work(); @@ -4305,7 +4305,7 @@ bool has_non_young_time = false; while (true) { - size_t end = Atomic::add(chunk_size(), &_parallel_work_claim); + size_t end = Atomic::add(&_parallel_work_claim, chunk_size()); size_t cur = end - chunk_size(); if (cur >= _num_work_items) { --- old/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp 2019-11-21 11:52:58.066736405 +0100 +++ new/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp 2019-11-21 11:52:57.814732288 +0100 @@ -112,7 +112,7 @@ // Claim a new chunk, returning its bounds [from, to[. void claim_chunk(uint& from, uint& to) { - uint result = Atomic::add(_chunk_size, &_cur_claim_idx); + uint result = Atomic::add(&_cur_claim_idx, _chunk_size); assert(_max_size > result - 1, "Array too small, is %u should be %u with chunk size %u.", _max_size, result, _chunk_size); @@ -214,8 +214,8 @@ void update_totals(uint num_regions, size_t reclaimable_bytes) { if (num_regions > 0) { assert(reclaimable_bytes > 0, "invariant"); - Atomic::add(num_regions, &_num_regions_added); - Atomic::add(reclaimable_bytes, &_reclaimable_bytes_added); + Atomic::add(&_num_regions_added, num_regions); + Atomic::add(&_reclaimable_bytes_added, reclaimable_bytes); } else { assert(reclaimable_bytes == 0, "invariant"); } --- old/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2019-11-21 11:52:58.494743397 +0100 +++ new/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2019-11-21 11:52:58.234739149 +0100 @@ -207,7 +207,7 @@ return NULL; } - size_t cur_idx = Atomic::add(1u, &_hwm) - 1; + size_t cur_idx = Atomic::add(&_hwm, 1u) - 1; if (cur_idx >= _chunk_capacity) { return NULL; } @@ -280,7 +280,7 @@ void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) { assert_at_safepoint(); - size_t idx = Atomic::add((size_t)1, &_num_root_regions) - 1; + size_t idx = Atomic::add(&_num_root_regions, (size_t)1) - 1; assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions); assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to " "end (" PTR_FORMAT ")", p2i(start), p2i(end)); @@ -308,7 +308,7 @@ return NULL; } - size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1; + size_t claimed_index = Atomic::add(&_claimed_root_regions, (size_t)1) - 1; if (claimed_index < _num_root_regions) { return &_root_regions[claimed_index]; } @@ -1121,7 +1121,7 @@ virtual void work(uint worker_id) { G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl); _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id); - Atomic::add(update_cl.num_selected_for_rebuild(), &_total_selected_for_rebuild); + Atomic::add(&_total_selected_for_rebuild, update_cl.num_selected_for_rebuild()); } uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; } --- old/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp 2019-11-21 11:52:58.986751433 +0100 +++ new/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp 2019-11-21 11:52:58.734747317 +0100 @@ -29,17 +29,17 @@ #include "runtime/atomic.hpp" inline void G1EvacStats::add_direct_allocated(size_t value) { - Atomic::add(value, &_direct_allocated); + Atomic::add(&_direct_allocated, value); } inline void G1EvacStats::add_region_end_waste(size_t value) { - Atomic::add(value, &_region_end_waste); + Atomic::add(&_region_end_waste, value); Atomic::inc(&_regions_filled); } inline void G1EvacStats::add_failure_used_and_waste(size_t used, size_t waste) { - Atomic::add(used, &_failure_used); - Atomic::add(waste, &_failure_waste); + Atomic::add(&_failure_used, used); + Atomic::add(&_failure_waste, waste); } #endif // SHARE_GC_G1_G1EVACSTATS_INLINE_HPP --- old/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp 2019-11-21 11:52:59.414758425 +0100 +++ new/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp 2019-11-21 11:52:59.158754243 +0100 @@ -101,7 +101,7 @@ // Adjust the weak roots. - if (Atomic::add(1u, &_references_done) == 1u) { // First incr claims task. + if (Atomic::add(&_references_done, 1u) == 1u) { // First incr claims task. G1CollectedHeap::heap()->ref_processor_stw()->weak_oops_do(&_adjust); } --- old/src/hotspot/share/gc/g1/g1HotCardCache.cpp 2019-11-21 11:52:59.846765483 +0100 +++ new/src/hotspot/share/gc/g1/g1HotCardCache.cpp 2019-11-21 11:52:59.590761301 +0100 @@ -68,7 +68,7 @@ return card_ptr; } // Otherwise, the card is hot. - size_t index = Atomic::add(1u, &_hot_cache_idx) - 1; + size_t index = Atomic::add(&_hot_cache_idx, 1u) - 1; size_t masked_index = index & (_hot_cache_size - 1); CardValue* current_ptr = _hot_cache[masked_index]; @@ -91,8 +91,8 @@ assert(!use_cache(), "cache should be disabled"); while (_hot_cache_par_claimed_idx < _hot_cache_size) { - size_t end_idx = Atomic::add(_hot_cache_par_chunk_size, - &_hot_cache_par_claimed_idx); + size_t end_idx = Atomic::add(&_hot_cache_par_claimed_idx, + _hot_cache_par_chunk_size); size_t start_idx = end_idx - _hot_cache_par_chunk_size; // The current worker has successfully claimed the chunk [start_idx..end_idx) end_idx = MIN2(end_idx, _hot_cache_size); --- old/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp 2019-11-21 11:53:00.230771756 +0100 +++ new/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp 2019-11-21 11:53:00.014768227 +0100 @@ -261,7 +261,7 @@ virtual void work(uint worker_id) { size_t const actual_chunk_size = MAX2(chunk_size(), _page_size); while (true) { - char* touch_addr = Atomic::add(actual_chunk_size, &_cur_addr) - actual_chunk_size; + char* touch_addr = Atomic::add(&_cur_addr, actual_chunk_size) - actual_chunk_size; if (touch_addr < _start_addr || touch_addr >= _end_addr) { break; } --- old/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.cpp 2019-11-21 11:53:00.650778616 +0100 +++ new/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.cpp 2019-11-21 11:53:00.394774434 +0100 @@ -129,7 +129,7 @@ void G1RedirtyCardsQueueSet::enqueue_completed_buffer(BufferNode* node) { assert(_collecting, "precondition"); - Atomic::add(buffer_size() - node->index(), &_entry_count); + Atomic::add(&_entry_count, buffer_size() - node->index()); _list.push(*node); update_tail(node); } @@ -139,7 +139,7 @@ const G1BufferNodeList from = src->take_all_completed_buffers(); if (from._head != NULL) { assert(from._tail != NULL, "invariant"); - Atomic::add(from._entry_count, &_entry_count); + Atomic::add(&_entry_count, from._entry_count); _list.prepend(*from._head, *from._tail); update_tail(from._tail); } --- old/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.inline.hpp 2019-11-21 11:53:01.062785346 +0100 +++ new/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.inline.hpp 2019-11-21 11:53:00.822781426 +0100 @@ -46,7 +46,7 @@ inline void G1RegionMarkStatsCache::evict(uint idx) { G1RegionMarkStatsCacheEntry* cur = &_cache[idx]; if (cur->_stats._live_words != 0) { - Atomic::add(cur->_stats._live_words, &_target[cur->_region_idx]._live_words); + Atomic::add(&_target[cur->_region_idx]._live_words, cur->_stats._live_words); } cur->clear(); } --- old/src/hotspot/share/gc/g1/g1RemSet.cpp 2019-11-21 11:53:01.474792076 +0100 +++ new/src/hotspot/share/gc/g1/g1RemSet.cpp 2019-11-21 11:53:01.218787895 +0100 @@ -179,7 +179,7 @@ bool marked_as_dirty = Atomic::cmpxchg(true, &_contains[region], false) == false; if (marked_as_dirty) { - uint allocated = Atomic::add(1u, &_cur_idx) - 1; + uint allocated = Atomic::add(&_cur_idx, 1u) - 1; _buffer[allocated] = region; } } @@ -255,7 +255,7 @@ void work(uint worker_id) { while (_cur_dirty_regions < _regions->size()) { - uint next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length; + uint next = Atomic::add(&_cur_dirty_regions, _chunk_length) - _chunk_length; uint max = MIN2(next + _chunk_length, _regions->size()); for (uint i = next; i < max; i++) { @@ -447,7 +447,7 @@ uint claim_cards_to_scan(uint region, uint increment) { assert(region < _max_regions, "Tried to access invalid region %u", region); - return Atomic::add(increment, &_card_table_scan_state[region]) - increment; + return Atomic::add(&_card_table_scan_state[region], increment) - increment; } void add_dirty_region(uint const region) { --- old/src/hotspot/share/gc/parallel/parMarkBitMap.cpp 2019-11-21 11:53:01.922799395 +0100 +++ new/src/hotspot/share/gc/parallel/parMarkBitMap.cpp 2019-11-21 11:53:01.670795279 +0100 @@ -90,7 +90,7 @@ bool end_bit_ok = _end_bits.par_set_bit(end_bit); assert(end_bit_ok, "concurrency problem"); DEBUG_ONLY(Atomic::inc(&mark_bitmap_count)); - DEBUG_ONLY(Atomic::add(size, &mark_bitmap_size)); + DEBUG_ONLY(Atomic::add(&mark_bitmap_size, size)); return true; } return false; --- old/src/hotspot/share/gc/parallel/psParallelCompact.cpp 2019-11-21 11:53:02.362806582 +0100 +++ new/src/hotspot/share/gc/parallel/psParallelCompact.cpp 2019-11-21 11:53:02.094802205 +0100 @@ -532,7 +532,7 @@ const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize; DEBUG_ONLY(Atomic::inc(&add_obj_count);) - DEBUG_ONLY(Atomic::add(len, &add_obj_size);) + DEBUG_ONLY(Atomic::add(&add_obj_size, len);) if (beg_region == end_region) { // All in one region. @@ -2449,7 +2449,7 @@ } bool try_claim(PSParallelCompact::UpdateDensePrefixTask& reference) { - uint claimed = Atomic::add(1u, &_counter) - 1; // -1 is so that we start with zero + uint claimed = Atomic::add(&_counter, 1u) - 1; // -1 is so that we start with zero if (claimed < _insert_index) { reference = _backing_array[claimed]; return true; --- old/src/hotspot/share/gc/parallel/psParallelCompact.hpp 2019-11-21 11:53:02.866814817 +0100 +++ new/src/hotspot/share/gc/parallel/psParallelCompact.hpp 2019-11-21 11:53:02.610810635 +0100 @@ -536,7 +536,7 @@ { assert(_dc_and_los < dc_claimed, "already claimed"); assert(_dc_and_los >= dc_one, "count would go negative"); - Atomic::add(dc_mask, &_dc_and_los); + Atomic::add(&_dc_and_los, dc_mask); } inline HeapWord* ParallelCompactData::RegionData::data_location() const @@ -576,7 +576,7 @@ inline void ParallelCompactData::RegionData::add_live_obj(size_t words) { assert(words <= (size_t)los_mask - live_obj_size(), "overflow"); - Atomic::add(static_cast(words), &_dc_and_los); + Atomic::add(&_dc_and_los, static_cast(words)); } inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr) --- old/src/hotspot/share/gc/shared/oopStorage.cpp 2019-11-21 11:53:03.310822069 +0100 +++ new/src/hotspot/share/gc/shared/oopStorage.cpp 2019-11-21 11:53:03.058817952 +0100 @@ -144,7 +144,7 @@ } void OopStorage::ActiveArray::increment_refcount() const { - int new_value = Atomic::add(1, &_refcount); + int new_value = Atomic::add(&_refcount, 1); assert(new_value >= 1, "negative refcount %d", new_value - 1); } @@ -1010,7 +1010,7 @@ // than a CAS loop on some platforms when there is contention. // We can cope with the uncertainty by recomputing start/end from // the result of the add, and dealing with potential overshoot. - size_t end = Atomic::add(step, &_next_block); + size_t end = Atomic::add(&_next_block, step); // _next_block may have changed, so recompute start from result of add. start = end - step; // _next_block may have changed so much that end has overshot. --- old/src/hotspot/share/gc/shared/plab.inline.hpp 2019-11-21 11:53:03.766829518 +0100 +++ new/src/hotspot/share/gc/shared/plab.inline.hpp 2019-11-21 11:53:03.510825337 +0100 @@ -43,19 +43,19 @@ } void PLABStats::add_allocated(size_t v) { - Atomic::add(v, &_allocated); + Atomic::add(&_allocated, v); } void PLABStats::add_unused(size_t v) { - Atomic::add(v, &_unused); + Atomic::add(&_unused, v); } void PLABStats::add_wasted(size_t v) { - Atomic::add(v, &_wasted); + Atomic::add(&_wasted, v); } void PLABStats::add_undo_wasted(size_t v) { - Atomic::add(v, &_undo_wasted); + Atomic::add(&_undo_wasted, v); } #endif // SHARE_GC_SHARED_PLAB_INLINE_HPP --- old/src/hotspot/share/gc/shared/preservedMarks.cpp 2019-11-21 11:53:04.178836249 +0100 +++ new/src/hotspot/share/gc/shared/preservedMarks.cpp 2019-11-21 11:53:03.918832001 +0100 @@ -55,7 +55,7 @@ restore(); // Only do the atomic add if the size is > 0. if (stack_size > 0) { - Atomic::add(stack_size, total_size_addr); + Atomic::add(total_size_addr, stack_size); } } --- old/src/hotspot/share/gc/shared/ptrQueue.cpp 2019-11-21 11:53:04.606843240 +0100 +++ new/src/hotspot/share/gc/shared/ptrQueue.cpp 2019-11-21 11:53:04.350839058 +0100 @@ -182,7 +182,7 @@ const size_t trigger_transfer = 10; // Add to pending list. Update count first so no underflow in transfer. - size_t pending_count = Atomic::add(1u, &_pending_count); + size_t pending_count = Atomic::add(&_pending_count, 1u); _pending_list.push(*node); if (pending_count > trigger_transfer) { try_transfer_pending(); @@ -219,7 +219,7 @@ // Add synchronized nodes to _free_list. // Update count first so no underflow in allocate(). - Atomic::add(count, &_free_count); + Atomic::add(&_free_count, count); _free_list.prepend(*first, *last); log_trace(gc, ptrqueue, freelist) ("Transferred %s pending to free: " SIZE_FORMAT, name(), count); @@ -258,4 +258,3 @@ void PtrQueueSet::deallocate_buffer(BufferNode* node) { _allocator->release(node); } - --- old/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp 2019-11-21 11:53:05.026850101 +0100 +++ new/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp 2019-11-21 11:53:04.774845985 +0100 @@ -246,7 +246,7 @@ void ReferenceProcessorPhaseTimes::add_ref_cleared(ReferenceType ref_type, size_t count) { ASSERT_REF_TYPE(ref_type); - Atomic::add(count, &_ref_cleared[ref_type_2_index(ref_type)]); + Atomic::add(&_ref_cleared[ref_type_2_index(ref_type)], count); } void ReferenceProcessorPhaseTimes::set_ref_discovered(ReferenceType ref_type, size_t count) { --- old/src/hotspot/share/gc/shared/stringdedup/stringDedupQueue.cpp 2019-11-21 11:53:05.454857093 +0100 +++ new/src/hotspot/share/gc/shared/stringdedup/stringDedupQueue.cpp 2019-11-21 11:53:05.198852911 +0100 @@ -32,7 +32,7 @@ volatile size_t StringDedupQueue::_claimed_index = 0; size_t StringDedupQueue::claim() { - return Atomic::add(size_t(1), &_claimed_index) - 1; + return Atomic::add(&_claimed_index, size_t(1)) - 1; } void StringDedupQueue::unlink_or_oops_do(StringDedupUnlinkOrOopsDoClosure* cl) { --- old/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp 2019-11-21 11:53:05.878864019 +0100 +++ new/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp 2019-11-21 11:53:05.622859837 +0100 @@ -589,7 +589,7 @@ } size_t StringDedupTable::claim_table_partition(size_t partition_size) { - return Atomic::add(partition_size, &_claimed_index) - partition_size; + return Atomic::add(&_claimed_index, partition_size) - partition_size; } void StringDedupTable::verify() { --- old/src/hotspot/share/gc/shared/workgroup.cpp 2019-11-21 11:53:06.306871011 +0100 +++ new/src/hotspot/share/gc/shared/workgroup.cpp 2019-11-21 11:53:06.054866894 +0100 @@ -153,7 +153,7 @@ // Wait for the coordinator to dispatch a task. _start_semaphore->wait(); - uint num_started = Atomic::add(1u, &_started); + uint num_started = Atomic::add(&_started, 1u); // Subtract one to get a zero-indexed worker id. uint worker_id = num_started - 1; --- old/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp 2019-11-21 11:53:06.678877087 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp 2019-11-21 11:53:06.474873755 +0100 @@ -264,7 +264,7 @@ size_t max = (size_t)list->length(); while (_claimed < max) { - size_t cur = Atomic::add(stride, &_claimed) - stride; + size_t cur = Atomic::add(&_claimed, stride) - stride; size_t start = cur; size_t end = MIN2(cur + stride, max); if (start >= max) break; --- old/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp 2019-11-21 11:53:07.090883818 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp 2019-11-21 11:53:06.834879635 +0100 @@ -593,7 +593,7 @@ void ShenandoahControlThread::pacing_notify_alloc(size_t words) { assert(ShenandoahPacing, "should only call when pacing is enabled"); - Atomic::add(words, &_allocs_seen); + Atomic::add(&_allocs_seen, words); } void ShenandoahControlThread::set_forced_counters_update(bool value) { --- old/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp 2019-11-21 11:53:07.526890940 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp 2019-11-21 11:53:07.266886692 +0100 @@ -620,7 +620,7 @@ } void ShenandoahHeap::increase_used(size_t bytes) { - Atomic::add(bytes, &_used); + Atomic::add(&_used, bytes); } void ShenandoahHeap::set_used(size_t bytes) { @@ -633,7 +633,7 @@ } void ShenandoahHeap::increase_allocated(size_t bytes) { - Atomic::add(bytes, &_bytes_allocated_since_gc_start); + Atomic::add(&_bytes_allocated_since_gc_start, bytes); } void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) { @@ -1350,7 +1350,7 @@ size_t max = _heap->num_regions(); while (_index < max) { - size_t cur = Atomic::add(stride, &_index) - stride; + size_t cur = Atomic::add(&_index, stride) - stride; size_t start = cur; size_t end = MIN2(cur + stride, max); if (start >= max) break; --- old/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp 2019-11-21 11:53:08.022899042 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp 2019-11-21 11:53:07.762894795 +0100 @@ -49,7 +49,7 @@ inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() { - size_t new_index = Atomic::add((size_t) 1, &_index); + size_t new_index = Atomic::add(&_index, (size_t) 1); // get_region() provides the bounds-check and returns NULL on OOB. return _heap->get_region(new_index - 1); } --- old/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp 2019-11-21 11:53:08.462906229 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp 2019-11-21 11:53:08.206902048 +0100 @@ -687,7 +687,7 @@ } void ShenandoahHeapRegion::record_pin() { - Atomic::add((size_t)1, &_critical_pins); + Atomic::add(&_critical_pins, (size_t)1); } void ShenandoahHeapRegion::record_unpin() { --- old/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp 2019-11-21 11:53:08.902913417 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp 2019-11-21 11:53:08.642909170 +0100 @@ -103,7 +103,7 @@ } inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) { - size_t new_live_data = Atomic::add(s, &_live_data); + size_t new_live_data = Atomic::add(&_live_data, s); #ifdef ASSERT size_t live_bytes = new_live_data * HeapWordSize; size_t used_bytes = used(); --- old/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp 2019-11-21 11:53:09.334920474 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp 2019-11-21 11:53:09.074916227 +0100 @@ -132,7 +132,7 @@ } void BinaryMagnitudeSeq::add(size_t val) { - Atomic::add(val, &_sum); + Atomic::add(&_sum, val); int mag = log2_intptr(val) + 1; @@ -147,7 +147,7 @@ mag = BitsPerSize_t - 1; } - Atomic::add((size_t)1, &_mags[mag]); + Atomic::add(&_mags[mag], (size_t)1); } size_t BinaryMagnitudeSeq::level(int level) const { --- old/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp 2019-11-21 11:53:09.770927597 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp 2019-11-21 11:53:09.506923284 +0100 @@ -223,7 +223,7 @@ } intptr_t tax = MAX2(1, words * Atomic::load(&_tax_rate)); - Atomic::add(tax, &_budget); + Atomic::add(&_budget, tax); } intptr_t ShenandoahPacer::epoch() { --- old/src/hotspot/share/gc/shenandoah/shenandoahPacer.inline.hpp 2019-11-21 11:53:10.166934066 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahPacer.inline.hpp 2019-11-21 11:53:09.950930536 +0100 @@ -47,13 +47,13 @@ inline void ShenandoahPacer::report_internal(size_t words) { assert(ShenandoahPacing, "Only be here when pacing is enabled"); STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t)); - Atomic::add((intptr_t)words, &_budget); + Atomic::add(&_budget, (intptr_t)words); } inline void ShenandoahPacer::report_progress_internal(size_t words) { assert(ShenandoahPacing, "Only be here when pacing is enabled"); STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t)); - Atomic::add((intptr_t)words, &_progress); + Atomic::add(&_progress, (intptr_t)words); } #endif // SHARE_GC_SHENANDOAH_SHENANDOAHPACER_INLINE_HPP --- old/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp 2019-11-21 11:53:10.582940861 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp 2019-11-21 11:53:10.330936744 +0100 @@ -304,7 +304,7 @@ return NULL; } - jint index = Atomic::add(1, &_claimed_index); + jint index = Atomic::add(&_claimed_index, 1); if (index <= size) { return GenericTaskQueueSet::queue((uint)index - 1); --- old/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp 2019-11-21 11:53:11.010947852 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp 2019-11-21 11:53:10.754943671 +0100 @@ -139,7 +139,7 @@ // skip break; case ShenandoahVerifier::_verify_liveness_complete: - Atomic::add((uint) obj->size(), &_ld[obj_reg->region_number()]); + Atomic::add(&_ld[obj_reg->region_number()], (uint) obj->size()); // fallthrough for fast failure for un-live regions: case ShenandoahVerifier::_verify_liveness_conservative: check(ShenandoahAsserts::_safe_oop, obj, obj_reg->has_live(), @@ -479,7 +479,7 @@ } } - Atomic::add(processed, &_processed); + Atomic::add(&_processed, processed); } }; @@ -518,7 +518,7 @@ _options); while (true) { - size_t v = Atomic::add(1u, &_claimed) - 1; + size_t v = Atomic::add(&_claimed, 1u) - 1; if (v < _heap->num_regions()) { ShenandoahHeapRegion* r = _heap->get_region(v); if (!r->is_humongous() && !r->is_trash()) { @@ -538,7 +538,7 @@ if (_heap->complete_marking_context()->is_marked((oop)obj)) { verify_and_follow(obj, stack, cl, &processed); } - Atomic::add(processed, &_processed); + Atomic::add(&_processed, processed); } virtual void work_regular(ShenandoahHeapRegion *r, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl) { @@ -571,7 +571,7 @@ } } - Atomic::add(processed, &_processed); + Atomic::add(&_processed, processed); } void verify_and_follow(HeapWord *addr, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl, size_t *processed) { --- old/src/hotspot/share/gc/z/zArray.inline.hpp 2019-11-21 11:53:11.462955235 +0100 +++ new/src/hotspot/share/gc/z/zArray.inline.hpp 2019-11-21 11:53:11.206951054 +0100 @@ -101,7 +101,7 @@ template inline bool ZArrayIteratorImpl::next(T* elem) { if (parallel) { - const size_t next = Atomic::add(1u, &_next) - 1u; + const size_t next = Atomic::add(&_next, 1u) - 1u; if (next < _array->size()) { *elem = _array->at(next); return true; --- old/src/hotspot/share/gc/z/zLiveMap.inline.hpp 2019-11-21 11:53:11.846961508 +0100 +++ new/src/hotspot/share/gc/z/zLiveMap.inline.hpp 2019-11-21 11:53:11.638958111 +0100 @@ -121,8 +121,8 @@ } inline void ZLiveMap::inc_live(uint32_t objects, size_t bytes) { - Atomic::add(objects, &_live_objects); - Atomic::add(bytes, &_live_bytes); + Atomic::add(&_live_objects, objects); + Atomic::add(&_live_bytes, bytes); } inline BitMap::idx_t ZLiveMap::segment_start(BitMap::idx_t segment) const { --- old/src/hotspot/share/gc/z/zMarkStackAllocator.cpp 2019-11-21 11:53:12.270968434 +0100 +++ new/src/hotspot/share/gc/z/zMarkStackAllocator.cpp 2019-11-21 11:53:12.014964252 +0100 @@ -110,8 +110,8 @@ // Increment top before end to make sure another // thread can't steal out newly expanded space. - addr = Atomic::add(size, &_top) - size; - Atomic::add(expand_size, &_end); + addr = Atomic::add(&_top, size) - size; + Atomic::add(&_end, expand_size); return addr; } --- old/src/hotspot/share/gc/z/zMarkTerminate.inline.hpp 2019-11-21 11:53:12.694975360 +0100 +++ new/src/hotspot/share/gc/z/zMarkTerminate.inline.hpp 2019-11-21 11:53:12.438971179 +0100 @@ -37,7 +37,7 @@ } inline void ZMarkTerminate::exit_stage(volatile uint* nworking_stage) { - Atomic::add(1u, nworking_stage); + Atomic::add(nworking_stage, 1u); } inline bool ZMarkTerminate::try_exit_stage(volatile uint* nworking_stage) { --- old/src/hotspot/share/gc/z/zNMethodTableIteration.cpp 2019-11-21 11:53:13.118982287 +0100 +++ new/src/hotspot/share/gc/z/zNMethodTableIteration.cpp 2019-11-21 11:53:12.862978105 +0100 @@ -58,7 +58,7 @@ // Claim table partition. Each partition is currently sized to span // two cache lines. This number is just a guess, but seems to work well. const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry); - const size_t partition_start = MIN2(Atomic::add(partition_size, &_claimed) - partition_size, _size); + const size_t partition_start = MIN2(Atomic::add(&_claimed, partition_size) - partition_size, _size); const size_t partition_end = MIN2(partition_start + partition_size, _size); if (partition_start == partition_end) { // End of table --- old/src/hotspot/share/gc/z/zObjectAllocator.cpp 2019-11-21 11:53:13.546989279 +0100 +++ new/src/hotspot/share/gc/z/zObjectAllocator.cpp 2019-11-21 11:53:13.286985031 +0100 @@ -63,7 +63,7 @@ ZPage* const page = ZHeap::heap()->alloc_page(type, size, flags); if (page != NULL) { // Increment used bytes - Atomic::add(size, _used.addr()); + Atomic::add(_used.addr(), size); } return page; @@ -71,7 +71,7 @@ void ZObjectAllocator::undo_alloc_page(ZPage* page) { // Increment undone bytes - Atomic::add(page->size(), _undone.addr()); + Atomic::add(_undone.addr(), page->size()); ZHeap::heap()->undo_alloc_page(page); } --- old/src/hotspot/share/gc/z/zRelocationSet.inline.hpp 2019-11-21 11:53:13.986996466 +0100 +++ new/src/hotspot/share/gc/z/zRelocationSet.inline.hpp 2019-11-21 11:53:13.726992218 +0100 @@ -38,7 +38,7 @@ if (parallel) { if (_next < nforwardings) { - const size_t next = Atomic::add(1u, &_next) - 1u; + const size_t next = Atomic::add(&_next, 1u) - 1u; if (next < nforwardings) { *forwarding = _relocation_set->_forwardings[next]; return true; --- old/src/hotspot/share/gc/z/zStat.cpp 2019-11-21 11:53:14.411003392 +0100 +++ new/src/hotspot/share/gc/z/zStat.cpp 2019-11-21 11:53:14.150999145 +0100 @@ -761,8 +761,8 @@ // void ZStatSample(const ZStatSampler& sampler, uint64_t value) { ZStatSamplerData* const cpu_data = sampler.get(); - Atomic::add(1u, &cpu_data->_nsamples); - Atomic::add(value, &cpu_data->_sum); + Atomic::add(&cpu_data->_nsamples, 1u); + Atomic::add(&cpu_data->_sum, value); uint64_t max = cpu_data->_max; for (;;) { @@ -787,14 +787,14 @@ void ZStatInc(const ZStatCounter& counter, uint64_t increment) { ZStatCounterData* const cpu_data = counter.get(); - const uint64_t value = Atomic::add(increment, &cpu_data->_counter); + const uint64_t value = Atomic::add(&cpu_data->_counter, increment); ZTracer::tracer()->report_stat_counter(counter, increment, value); } void ZStatInc(const ZStatUnsampledCounter& counter, uint64_t increment) { ZStatCounterData* const cpu_data = counter.get(); - Atomic::add(increment, &cpu_data->_counter); + Atomic::add(&cpu_data->_counter, increment); } // --- old/src/hotspot/share/jfr/utilities/jfrRefCountPointer.hpp 2019-11-21 11:53:14.867010841 +0100 +++ new/src/hotspot/share/jfr/utilities/jfrRefCountPointer.hpp 2019-11-21 11:53:14.615006724 +0100 @@ -92,11 +92,11 @@ MultiThreadedRefCounter() : _refs(0) {} void inc() const { - Atomic::add(1, &_refs); + Atomic::add(&_refs, 1); } bool dec() const { - return 0 == Atomic::add((-1), &_refs); + return 0 == Atomic::add(&_refs, (-1)); } int current() const { --- old/src/hotspot/share/logging/logOutputList.cpp 2019-11-21 11:53:15.303017963 +0100 +++ new/src/hotspot/share/logging/logOutputList.cpp 2019-11-21 11:53:15.035013585 +0100 @@ -30,13 +30,13 @@ #include "utilities/globalDefinitions.hpp" jint LogOutputList::increase_readers() { - jint result = Atomic::add(1, &_active_readers); + jint result = Atomic::add(&_active_readers, 1); assert(_active_readers > 0, "Ensure we have consistent state"); return result; } jint LogOutputList::decrease_readers() { - jint result = Atomic::add(-1, &_active_readers); + jint result = Atomic::add(&_active_readers, -1); assert(result >= 0, "Ensure we have consistent state"); return result; } --- old/src/hotspot/share/memory/metaspace.cpp 2019-11-21 11:53:15.747025216 +0100 +++ new/src/hotspot/share/memory/metaspace.cpp 2019-11-21 11:53:15.487020969 +0100 @@ -394,7 +394,7 @@ } static void inc_stat_atomically(volatile size_t* pstat, size_t words) { - Atomic::add(words, pstat); + Atomic::add(pstat, words); } static void dec_stat_atomically(volatile size_t* pstat, size_t words) { --- old/src/hotspot/share/memory/universe.cpp 2019-11-21 11:53:16.199032599 +0100 +++ new/src/hotspot/share/memory/universe.cpp 2019-11-21 11:53:15.943028417 +0100 @@ -580,7 +580,7 @@ int next; if ((_preallocated_out_of_memory_error_avail_count > 0) && SystemDictionary::Throwable_klass()->is_initialized()) { - next = (int)Atomic::add(-1, &_preallocated_out_of_memory_error_avail_count); + next = (int)Atomic::add(&_preallocated_out_of_memory_error_avail_count, -1); assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt"); } else { next = -1; --- old/src/hotspot/share/oops/klass.cpp 2019-11-21 11:53:16.643039852 +0100 +++ new/src/hotspot/share/oops/klass.cpp 2019-11-21 11:53:16.387035670 +0100 @@ -710,7 +710,7 @@ } int Klass::atomic_incr_biased_lock_revocation_count() { - return (int) Atomic::add(1, &_biased_lock_revocation_count); + return (int) Atomic::add(&_biased_lock_revocation_count, 1); } // Unless overridden, jvmti_class_status has no flags set. --- old/src/hotspot/share/prims/resolvedMethodTable.cpp 2019-11-21 11:53:17.091047171 +0100 +++ new/src/hotspot/share/prims/resolvedMethodTable.cpp 2019-11-21 11:53:16.835042989 +0100 @@ -327,7 +327,7 @@ } void ResolvedMethodTable::inc_dead_counter(size_t ndead) { - size_t total = Atomic::add(ndead, &_uncleaned_items_count); + size_t total = Atomic::add(&_uncleaned_items_count, ndead); log_trace(membername, table)( "Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT, _uncleaned_items_count, ndead, total); --- old/src/hotspot/share/runtime/atomic.hpp 2019-11-21 11:53:17.531054357 +0100 +++ new/src/hotspot/share/runtime/atomic.hpp 2019-11-21 11:53:17.275050175 +0100 @@ -100,8 +100,8 @@ // Atomically add to a location. Returns updated value. add*() provide: // add-value-to-dest - template - inline static D add(I add_value, D volatile* dest, + template + inline static D add(D volatile* dest, I add_value, atomic_memory_order order = memory_order_conservative); template @@ -224,7 +224,7 @@ // Dispatch handler for add. Provides type-based validity checking // and limited conversions around calls to the platform-specific // implementation layer provided by PlatformAdd. - template + template struct AddImpl; // Platform-specific implementation of add. Support for sizes of 4 @@ -239,7 +239,7 @@ // - platform_add is an object of type PlatformAdd. // // Then - // platform_add(add_value, dest) + // platform_add(dest, add_value) // must be a valid expression, returning a result convertible to D. // // No definition is provided; all platforms must explicitly define @@ -259,12 +259,12 @@ // otherwise, addend is add_value. // // FetchAndAdd requires the derived class to provide - // fetch_and_add(addend, dest) + // fetch_and_add(dest, addend) // atomically adding addend to the value of dest, and returning the // old value. // // AddAndFetch requires the derived class to provide - // add_and_fetch(addend, dest) + // add_and_fetch(dest, addend) // atomically adding addend to the value of dest, and returning the // new value. // @@ -286,8 +286,8 @@ // function. No scaling of add_value is performed when D is a pointer // type, so this function can be used to implement the support function // required by AddAndFetch. - template - static D add_using_helper(Fn fn, I add_value, D volatile* dest); + template + static D add_using_helper(Fn fn, D volatile* dest, I add_value); // Dispatch handler for cmpxchg. Provides type-based validity // checking and limited conversions around calls to the @@ -517,21 +517,21 @@ template struct Atomic::FetchAndAdd { - template - D operator()(I add_value, D volatile* dest, atomic_memory_order order) const; + template + D operator()(D volatile* dest, I add_value, atomic_memory_order order) const; }; template struct Atomic::AddAndFetch { - template - D operator()(I add_value, D volatile* dest, atomic_memory_order order) const; + template + D operator()(D volatile* dest, I add_value, atomic_memory_order order) const; }; template inline void Atomic::inc(D volatile* dest, atomic_memory_order order) { STATIC_ASSERT(IsPointer::value || IsIntegral::value); typedef typename Conditional::value, ptrdiff_t, D>::type I; - Atomic::add(I(1), dest, order); + Atomic::add(dest, I(1), order); } template @@ -540,7 +540,7 @@ typedef typename Conditional::value, ptrdiff_t, D>::type I; // Assumes two's complement integer representation. #pragma warning(suppress: 4146) - Atomic::add(I(-1), dest, order); + Atomic::add(dest, I(-1), order); } template @@ -557,7 +557,7 @@ AddendType addend = sub_value; // Assumes two's complement integer representation. #pragma warning(suppress: 4146) // In case AddendType is not signed. - return Atomic::add(-addend, dest, order); + return Atomic::add(dest, -addend, order); } // Define the class before including platform file, which may specialize @@ -678,68 +678,68 @@ StoreImpl >()(p, v); } -template -inline D Atomic::add(I add_value, D volatile* dest, +template +inline D Atomic::add(D volatile* dest, I add_value, atomic_memory_order order) { - return AddImpl()(add_value, dest, order); + return AddImpl()(dest, add_value, order); } -template +template struct Atomic::AddImpl< - I, D, + D, I, typename EnableIf::value && IsIntegral::value && (sizeof(I) <= sizeof(D)) && (IsSigned::value == IsSigned::value)>::type> { - D operator()(I add_value, D volatile* dest, atomic_memory_order order) const { + D operator()(D volatile* dest, I add_value, atomic_memory_order order) const { D addend = add_value; - return PlatformAdd()(addend, dest, order); + return PlatformAdd()(dest, addend, order); } }; -template +template struct Atomic::AddImpl< - I, P*, + P*, I, typename EnableIf::value && (sizeof(I) <= sizeof(P*))>::type> { - P* operator()(I add_value, P* volatile* dest, atomic_memory_order order) const { + P* operator()(P* volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*)); STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*)); typedef typename Conditional::value, intptr_t, uintptr_t>::type CI; CI addend = add_value; - return PlatformAdd()(addend, dest, order); + return PlatformAdd()(dest, addend, order); } }; template -template -inline D Atomic::FetchAndAdd::operator()(I add_value, D volatile* dest, +template +inline D Atomic::FetchAndAdd::operator()(D volatile* dest, I add_value, atomic_memory_order order) const { I addend = add_value; // If D is a pointer type P*, scale by sizeof(P). if (IsPointer::value) { addend *= sizeof(typename RemovePointer::type); } - D old = static_cast(this)->fetch_and_add(addend, dest, order); + D old = static_cast(this)->fetch_and_add(dest, addend, order); return old + add_value; } template -template -inline D Atomic::AddAndFetch::operator()(I add_value, D volatile* dest, +template +inline D Atomic::AddAndFetch::operator()(D volatile* dest, I add_value, atomic_memory_order order) const { // If D is a pointer type P*, scale by sizeof(P). if (IsPointer::value) { add_value *= sizeof(typename RemovePointer::type); } - return static_cast(this)->add_and_fetch(add_value, dest, order); + return static_cast(this)->add_and_fetch(dest, add_value, order); } -template -inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) { +template +inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) { return PrimitiveConversions::cast( fn(PrimitiveConversions::cast(add_value), reinterpret_cast(dest))); --- old/src/hotspot/share/runtime/os.cpp 2019-11-21 11:53:17.995061937 +0100 +++ new/src/hotspot/share/runtime/os.cpp 2019-11-21 11:53:17.739057756 +0100 @@ -668,7 +668,7 @@ if ((cur_malloc_words + words) > MallocMaxTestWords) { return true; } - Atomic::add(words, &cur_malloc_words); + Atomic::add(&cur_malloc_words, words); } return false; } --- old/src/hotspot/share/runtime/threadSMR.cpp 2019-11-21 11:53:18.455069451 +0100 +++ new/src/hotspot/share/runtime/threadSMR.cpp 2019-11-21 11:53:18.199065269 +0100 @@ -134,7 +134,7 @@ // 'inline' functions first so the definitions are before first use: inline void ThreadsSMRSupport::add_deleted_thread_times(uint add_value) { - Atomic::add(add_value, &_deleted_thread_times); + Atomic::add(&_deleted_thread_times, add_value); } inline void ThreadsSMRSupport::inc_deleted_thread_cnt() { --- old/src/hotspot/share/runtime/threadSMR.inline.hpp 2019-11-21 11:53:18.911076900 +0100 +++ new/src/hotspot/share/runtime/threadSMR.inline.hpp 2019-11-21 11:53:18.655072719 +0100 @@ -56,7 +56,7 @@ // they are called by public inline update_tlh_stats() below: inline void ThreadsSMRSupport::add_tlh_times(uint add_value) { - Atomic::add(add_value, &_tlh_times); + Atomic::add(&_tlh_times, add_value); } inline void ThreadsSMRSupport::inc_tlh_cnt() { --- old/src/hotspot/share/services/mallocSiteTable.hpp 2019-11-21 11:53:19.339083892 +0100 +++ new/src/hotspot/share/services/mallocSiteTable.hpp 2019-11-21 11:53:19.087079774 +0100 @@ -153,7 +153,7 @@ // Acquire shared lock. // Return true if shared access is granted. inline bool sharedLock() { - jint res = Atomic::add(1, _lock); + jint res = Atomic::add(_lock, 1); if (res < 0) { Atomic::dec(_lock); return false; --- old/src/hotspot/share/services/mallocTracker.hpp 2019-11-21 11:53:19.771090948 +0100 +++ new/src/hotspot/share/services/mallocTracker.hpp 2019-11-21 11:53:19.519086831 +0100 @@ -55,7 +55,7 @@ inline void allocate(size_t sz) { Atomic::inc(&_count); if (sz > 0) { - Atomic::add(sz, &_size); + Atomic::add(&_size, sz); DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size)); } DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);) @@ -72,7 +72,7 @@ inline void resize(long sz) { if (sz != 0) { - Atomic::add(size_t(sz), &_size); + Atomic::add(&_size, size_t(sz)); DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);) } } --- old/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp 2019-11-21 11:53:20.207098070 +0100 +++ new/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp 2019-11-21 11:53:19.951093889 +0100 @@ -53,7 +53,7 @@ // Returns true if you succeeded to claim the range start -> (stop-1). bool claim(size_t* start, size_t* stop) { - size_t claimed = Atomic::add((size_t)1, &_next_to_claim) - 1; + size_t claimed = Atomic::add(&_next_to_claim, (size_t)1) - 1; if (claimed >= _stop_task) { return false; } --- old/src/hotspot/share/utilities/globalCounter.cpp 2019-11-21 11:53:20.631104996 +0100 +++ new/src/hotspot/share/utilities/globalCounter.cpp 2019-11-21 11:53:20.375100815 +0100 @@ -59,7 +59,7 @@ void GlobalCounter::write_synchronize() { assert((*Thread::current()->get_rcu_counter() & COUNTER_ACTIVE) == 0x0, "must be outside a critcal section"); // Atomic::add must provide fence since we have storeload dependency. - uintx gbl_cnt = Atomic::add(COUNTER_INCREMENT, &_global_counter._counter); + uintx gbl_cnt = Atomic::add(&_global_counter._counter, COUNTER_INCREMENT); // Do all RCU threads. CounterThreadCheck ctc(gbl_cnt); --- old/src/hotspot/share/utilities/singleWriterSynchronizer.cpp 2019-11-21 11:53:21.051111857 +0100 +++ new/src/hotspot/share/utilities/singleWriterSynchronizer.cpp 2019-11-21 11:53:20.799107740 +0100 @@ -44,7 +44,7 @@ // synchronization have exited that critical section. void SingleWriterSynchronizer::synchronize() { // Side-effect in assert balanced by debug-only dec at end. - assert(Atomic::add(1u, &_writers) == 1u, "multiple writers"); + assert(Atomic::add(&_writers, 1u) == 1u, "multiple writers"); // We don't know anything about the muxing between this invocation // and invocations in other threads. We must start with the latest // _enter polarity, else we could clobber the wrong _exit value on --- old/src/hotspot/share/utilities/singleWriterSynchronizer.hpp 2019-11-21 11:53:21.479118848 +0100 +++ new/src/hotspot/share/utilities/singleWriterSynchronizer.hpp 2019-11-21 11:53:21.223114666 +0100 @@ -89,11 +89,11 @@ }; inline uint SingleWriterSynchronizer::enter() { - return Atomic::add(2u, &_enter); + return Atomic::add(&_enter, 2u); } inline void SingleWriterSynchronizer::exit(uint enter_value) { - uint exit_value = Atomic::add(2u, &_exit[enter_value & 1]); + uint exit_value = Atomic::add(&_exit[enter_value & 1], 2u); // If this exit completes a synchronize request, wakeup possibly // waiting synchronizer. Read of _waiting_for must follow the _exit // update. --- old/src/hotspot/share/utilities/waitBarrier_generic.cpp 2019-11-21 11:53:21.843124794 +0100 +++ new/src/hotspot/share/utilities/waitBarrier_generic.cpp 2019-11-21 11:53:21.639121461 +0100 @@ -82,13 +82,13 @@ OrderAccess::fence(); return; } - Atomic::add(1, &_barrier_threads); + Atomic::add(&_barrier_threads, 1); if (barrier_tag != 0 && barrier_tag == _barrier_tag) { - Atomic::add(1, &_waiters); + Atomic::add(&_waiters, 1); _sem_barrier.wait(); // We help out with posting, but we need to do so before we decrement the // _barrier_threads otherwise we might wake threads up in next wait. GenericWaitBarrier::wake_if_needed(); } - Atomic::add(-1, &_barrier_threads); + Atomic::add(&_barrier_threads, -1); } --- old/test/hotspot/gtest/gc/g1/test_g1FreeIdSet.cpp 2019-11-21 11:53:22.251131458 +0100 +++ new/test/hotspot/gtest/gc/g1/test_g1FreeIdSet.cpp 2019-11-21 11:53:21.999127343 +0100 @@ -114,7 +114,7 @@ ThreadBlockInVM tbiv(this); // Safepoint check. } tty->print_cr("%u allocations: " SIZE_FORMAT, _thread_number, _allocations); - Atomic::add(_allocations, _total_allocations); + Atomic::add(_total_allocations, _allocations); } }; --- old/test/hotspot/gtest/gc/shared/test_ptrQueueBufferAllocator.cpp 2019-11-21 11:53:22.679138450 +0100 +++ new/test/hotspot/gtest/gc/shared/test_ptrQueueBufferAllocator.cpp 2019-11-21 11:53:22.419134203 +0100 @@ -157,7 +157,7 @@ ThreadBlockInVM tbiv(this); // Safepoint check. } tty->print_cr("allocations: " SIZE_FORMAT, _allocations); - Atomic::add(_allocations, _total_allocations); + Atomic::add(_total_allocations, _allocations); } };