--- old/src/hotspot/cpu/arm/stubGenerator_arm.cpp 2019-11-21 11:54:01.151785235 +0100 +++ new/src/hotspot/cpu/arm/stubGenerator_arm.cpp 2019-11-21 11:54:00.915781242 +0100 @@ -487,7 +487,7 @@ return start; } - // Support for jint Atomic::xchg(jint exchange_value, volatile jint *dest) + // Support for jint Atomic::xchg(volatile jint *dest, jint exchange_value) // // Arguments : // --- old/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp 2019-11-21 11:54:01.587792611 +0100 +++ new/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp 2019-11-21 11:54:01.383789159 +0100 @@ -585,7 +585,7 @@ return start; } - // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest). + // Support for jint Atomic::xchg(volatile jint* dest, jint exchange_value). // // Arguments: // --- old/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp 2019-11-21 11:54:02.059800595 +0100 +++ new/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp 2019-11-21 11:54:01.855797143 +0100 @@ -430,7 +430,7 @@ //---------------------------------------------------------------------------------------------------- - // Support for int32_t Atomic::xchg(int32_t exchange_value, volatile int32_t* dest) + // Support for int32_t Atomic::xchg(volatile int32_t* dest, int32_t exchange_value) // // xchg exists as far back as 8086, lock needed for MP only // Stack layout immediately after call: --- old/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp 2019-11-21 11:54:02.563809120 +0100 +++ new/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp 2019-11-21 11:54:02.307804789 +0100 @@ -552,7 +552,7 @@ return start; } - // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) + // Support for jint atomic::xchg(volatile jint* dest, jint exchange_value) // // Arguments : // c_rarg0: exchange_value @@ -571,7 +571,7 @@ return start; } - // Support for intptr_t atomic::xchg_long(jlong exchange_value, volatile jlong* dest) + // Support for intptr_t atomic::xchg_long(volatile jlong* dest, jlong exchange_value) // // Arguments : // c_rarg0: exchange_value --- old/src/hotspot/os/posix/os_posix.cpp 2019-11-21 11:54:03.107818322 +0100 +++ new/src/hotspot/os/posix/os_posix.cpp 2019-11-21 11:54:02.851813992 +0100 @@ -1998,7 +1998,7 @@ // but only in the correctly written condition checking loops of ObjectMonitor, // Mutex/Monitor, Thread::muxAcquire and JavaThread::sleep - if (Atomic::xchg(1, &_event) >= 0) return; + if (Atomic::xchg(&_event, 1) >= 0) return; int status = pthread_mutex_lock(_mutex); assert_status(status == 0, status, "mutex_lock"); @@ -2046,7 +2046,7 @@ // Return immediately if a permit is available. // We depend on Atomic::xchg() having full barrier semantics // since we are doing a lock-free update to _counter. - if (Atomic::xchg(0, &_counter) > 0) return; + if (Atomic::xchg(&_counter, 0) > 0) return; Thread* thread = Thread::current(); assert(thread->is_Java_thread(), "Must be JavaThread"); --- old/src/hotspot/os/solaris/os_solaris.cpp 2019-11-21 11:54:03.575826239 +0100 +++ new/src/hotspot/os/solaris/os_solaris.cpp 2019-11-21 11:54:03.319821909 +0100 @@ -4797,7 +4797,7 @@ // from the first park() call after an unpark() call which will help // shake out uses of park() and unpark() without condition variables. - if (Atomic::xchg(1, &_Event) >= 0) return; + if (Atomic::xchg(&_Event, 1) >= 0) return; // If the thread associated with the event was parked, wake it. // Wait for the thread assoc with the PlatformEvent to vacate. @@ -4896,7 +4896,7 @@ // Return immediately if a permit is available. // We depend on Atomic::xchg() having full barrier semantics // since we are doing a lock-free update to _counter. - if (Atomic::xchg(0, &_counter) > 0) return; + if (Atomic::xchg(&_counter, 0) > 0) return; // Optional fast-exit: Check interrupt before trying to wait Thread* thread = Thread::current(); --- old/src/hotspot/os/windows/os_windows.cpp 2019-11-21 11:54:04.119835441 +0100 +++ new/src/hotspot/os/windows/os_windows.cpp 2019-11-21 11:54:03.859831042 +0100 @@ -5236,7 +5236,7 @@ // from the first park() call after an unpark() call which will help // shake out uses of park() and unpark() without condition variables. - if (Atomic::xchg(1, &_Event) >= 0) return; + if (Atomic::xchg(&_Event, 1) >= 0) return; ::SetEvent(_ParkHandle); } --- old/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp 2019-11-21 11:54:04.647844372 +0100 +++ new/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp 2019-11-21 11:54:04.395840109 +0100 @@ -153,8 +153,8 @@ template<> template -inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const { // Note that xchg doesn't necessarily do an acquire // (see synchronizer.cpp). @@ -192,8 +192,8 @@ template<> template -inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); // Note that xchg doesn't necessarily do an acquire --- old/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp 2019-11-21 11:54:05.083851747 +0100 +++ new/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp 2019-11-21 11:54:04.831847485 +0100 @@ -51,8 +51,8 @@ template<> template -inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order /* order */) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ( "xchgl (%2),%0" @@ -107,8 +107,8 @@ template<> template -inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ("xchgq (%2),%0" --- old/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp 2019-11-21 11:54:05.491858649 +0100 +++ new/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp 2019-11-21 11:54:05.263854792 +0100 @@ -197,15 +197,15 @@ template<> template -inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); #ifdef ARM - return xchg_using_helper(arm_lock_test_and_set, exchange_value, dest); + return xchg_using_helper(arm_lock_test_and_set, dest, exchange_value); #else #ifdef M68K - return xchg_using_helper(m68k_lock_test_and_set, exchange_value, dest); + return xchg_using_helper(m68k_lock_test_and_set, dest, exchange_value); #else // __sync_lock_test_and_set is a bizarrely named atomic exchange // operation. Note that some platforms only support this with the @@ -224,8 +224,8 @@ template<> template -inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); T result = __sync_lock_test_and_set (dest, exchange_value); --- old/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp 2019-11-21 11:54:05.907865686 +0100 +++ new/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp 2019-11-21 11:54:05.651861355 +0100 @@ -46,8 +46,8 @@ template template -inline T Atomic::PlatformXchg::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg::operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(byte_size == sizeof(T)); T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE); --- old/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp 2019-11-21 11:54:06.287872113 +0100 +++ new/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp 2019-11-21 11:54:06.079868595 +0100 @@ -86,11 +86,11 @@ template<> template -inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); - return xchg_using_helper(os::atomic_xchg_func, exchange_value, dest); + return xchg_using_helper(os::atomic_xchg_func, dest, exchange_value); } --- old/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp 2019-11-21 11:54:06.651878270 +0100 +++ new/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp 2019-11-21 11:54:06.451874887 +0100 @@ -153,8 +153,8 @@ template<> template -inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const { // Note that xchg doesn't necessarily do an acquire // (see synchronizer.cpp). @@ -192,8 +192,8 @@ template<> template -inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); // Note that xchg doesn't necessarily do an acquire --- old/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp 2019-11-21 11:54:07.071885375 +0100 +++ new/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp 2019-11-21 11:54:06.819881112 +0100 @@ -208,8 +208,8 @@ // replacement succeeded. template<> template -inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order unused) const { STATIC_ASSERT(4 == sizeof(T)); T old; @@ -232,8 +232,8 @@ template<> template -inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order unused) const { STATIC_ASSERT(8 == sizeof(T)); T old; --- old/src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp 2019-11-21 11:54:07.499892615 +0100 +++ new/src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp 2019-11-21 11:54:07.239888216 +0100 @@ -83,8 +83,8 @@ template<> template -inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); T rv = exchange_value; @@ -98,8 +98,8 @@ template<> template -inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); T rv = exchange_value; --- old/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp 2019-11-21 11:54:07.919899719 +0100 +++ new/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp 2019-11-21 11:54:07.671895524 +0100 @@ -51,8 +51,8 @@ template<> template -inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ( "xchgl (%2),%0" @@ -108,7 +108,7 @@ template<> template -inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, T volatile* dest, +inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ("xchgq (%2),%0" --- old/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp 2019-11-21 11:54:08.351907026 +0100 +++ new/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp 2019-11-21 11:54:08.095902696 +0100 @@ -59,8 +59,8 @@ template<> template -inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); // __sync_lock_test_and_set is a bizarrely named atomic exchange @@ -78,8 +78,8 @@ template<> template -inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); T result = __sync_lock_test_and_set (dest, exchange_value); --- old/src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp 2019-11-21 11:54:08.783914333 +0100 +++ new/src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp 2019-11-21 11:54:08.523909936 +0100 @@ -45,8 +45,8 @@ template<> template -inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ( "swap [%2],%0" @@ -58,8 +58,8 @@ template<> template -inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); T old_value = *dest; --- old/src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp 2019-11-21 11:54:09.215921641 +0100 +++ new/src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp 2019-11-21 11:54:08.959917310 +0100 @@ -74,8 +74,8 @@ template<> template -inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); return PrimitiveConversions::cast( @@ -87,8 +87,8 @@ template<> template -inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); return PrimitiveConversions::cast( --- old/src/hotspot/os_cpu/solaris_x86/solaris_x86_64.il 2019-11-21 11:54:09.647928948 +0100 +++ new/src/hotspot/os_cpu/solaris_x86/solaris_x86_64.il 2019-11-21 11:54:09.391924617 +0100 @@ -65,13 +65,13 @@ addq %rdi, %rax .end - // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest). + // Support for jint Atomic::xchg(volatile jint* dest, jint exchange_value). .inline _Atomic_xchg,2 xchgl (%rsi), %edi movl %edi, %eax .end - // Support for jlong Atomic::xchg(jlong exchange_value, volatile jlong* dest). + // Support for jlong Atomic::xchg(volatile jlong* dest, jlong exchange_value). .inline _Atomic_xchg_long,2 xchgq (%rsi), %rdi movq %rdi, %rax --- old/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp 2019-11-21 11:54:10.071936119 +0100 +++ new/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp 2019-11-21 11:54:09.819931857 +0100 @@ -79,11 +79,11 @@ #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \ template<> \ template \ - inline T Atomic::PlatformXchg::operator()(T exchange_value, \ - T volatile* dest, \ + inline T Atomic::PlatformXchg::operator()(T volatile* dest, \ + T exchange_value, \ atomic_memory_order order) const { \ STATIC_ASSERT(ByteSize == sizeof(T)); \ - return xchg_using_helper(StubName, exchange_value, dest); \ + return xchg_using_helper(StubName, dest, exchange_value); \ } DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func) @@ -127,8 +127,8 @@ template<> template -inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, - T volatile* dest, +inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); // alternative for InterlockedExchange --- old/src/hotspot/share/code/nmethod.cpp 2019-11-21 11:54:10.511943562 +0100 +++ new/src/hotspot/share/code/nmethod.cpp 2019-11-21 11:54:10.251939165 +0100 @@ -1900,7 +1900,7 @@ extract_state(_oops_do_mark_link) == claim_strong_request_tag, "must be but is nmethod " PTR_FORMAT " %u", p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link)); - nmethod* old_head = Atomic::xchg(this, &_oops_do_mark_nmethods); + nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this); // Self-loop if needed. if (old_head == NULL) { old_head = this; @@ -1917,7 +1917,7 @@ void nmethod::oops_do_add_to_list_as_strong_done() { assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint"); - nmethod* old_head = Atomic::xchg(this, &_oops_do_mark_nmethods); + nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this); // Self-loop if needed. if (old_head == NULL) { old_head = this; --- old/src/hotspot/share/compiler/compileBroker.hpp 2019-11-21 11:54:11.015952087 +0100 +++ new/src/hotspot/share/compiler/compileBroker.hpp 2019-11-21 11:54:10.763947825 +0100 @@ -350,7 +350,7 @@ static void disable_compilation_forever() { UseCompiler = false; AlwaysCompileLoopMethods = false; - Atomic::xchg(jint(shutdown_compilation), &_should_compile_new_jobs); + Atomic::xchg(&_should_compile_new_jobs, jint(shutdown_compilation)); } static bool is_compilation_disabled_forever() { --- old/src/hotspot/share/gc/shared/barrierSet.hpp 2019-11-21 11:54:11.439959259 +0100 +++ new/src/hotspot/share/gc/shared/barrierSet.hpp 2019-11-21 11:54:11.187954996 +0100 @@ -221,13 +221,13 @@ } template - static T atomic_xchg_in_heap(T new_value, T* addr) { - return Raw::atomic_xchg(new_value, addr); + static T atomic_xchg_in_heap(T* addr, T new_value) { + return Raw::atomic_xchg(addr, new_value); } template - static T atomic_xchg_in_heap_at(T new_value, oop base, ptrdiff_t offset) { - return Raw::atomic_xchg_at(new_value, base, offset); + static T atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, T new_value) { + return Raw::atomic_xchg_at(base, offset, new_value); } template @@ -270,12 +270,12 @@ } template - static oop oop_atomic_xchg_in_heap(oop new_value, T* addr) { - return Raw::oop_atomic_xchg(new_value, addr); + static oop oop_atomic_xchg_in_heap(T* addr, oop new_value) { + return Raw::oop_atomic_xchg(addr, new_value); } - static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) { - return Raw::oop_atomic_xchg_at(new_value, base, offset); + static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) { + return Raw::oop_atomic_xchg_at(base, offset, new_value); } template @@ -302,8 +302,8 @@ } template - static oop oop_atomic_xchg_not_in_heap(oop new_value, T* addr) { - return Raw::oop_atomic_xchg(new_value, addr); + static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value) { + return Raw::oop_atomic_xchg(addr, new_value); } // Clone barrier support --- old/src/hotspot/share/gc/shared/modRefBarrierSet.hpp 2019-11-21 11:54:11.883966769 +0100 +++ new/src/hotspot/share/gc/shared/modRefBarrierSet.hpp 2019-11-21 11:54:11.627962439 +0100 @@ -81,7 +81,7 @@ template static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value); template - static oop oop_atomic_xchg_in_heap(oop new_value, T* addr); + static oop oop_atomic_xchg_in_heap(T* addr, oop new_value); template static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, @@ -94,8 +94,8 @@ oop_store_in_heap(AccessInternal::oop_field_addr(base, offset), value); } - static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) { - return oop_atomic_xchg_in_heap(new_value, AccessInternal::oop_field_addr(base, offset)); + static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) { + return oop_atomic_xchg_in_heap(AccessInternal::oop_field_addr(base, offset), new_value); } static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) { --- old/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp 2019-11-21 11:54:12.271973332 +0100 +++ new/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp 2019-11-21 11:54:12.063969813 +0100 @@ -80,10 +80,10 @@ template template inline oop ModRefBarrierSet::AccessBarrier:: -oop_atomic_xchg_in_heap(oop new_value, T* addr) { +oop_atomic_xchg_in_heap(T* addr, oop new_value) { BarrierSetT *bs = barrier_set_cast(barrier_set()); bs->template write_ref_field_pre(addr); - oop result = Raw::oop_atomic_xchg(new_value, addr); + oop result = Raw::oop_atomic_xchg(addr, new_value); bs->template write_ref_field_post(addr, new_value); return result; } --- old/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp 2019-11-21 11:54:12.699980572 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp 2019-11-21 11:54:12.443976241 +0100 @@ -145,7 +145,7 @@ static oop oop_atomic_cmpxchg_in_heap_impl(oop new_value, T* addr, oop compare_value); template - static oop oop_atomic_xchg_in_heap_impl(oop new_value, T* addr); + static oop oop_atomic_xchg_in_heap_impl(T* addr, oop new_value); public: // Heap oop accesses. These accessors get resolved when @@ -164,8 +164,8 @@ static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value); template - static oop oop_atomic_xchg_in_heap(oop new_value, T* addr); - static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset); + static oop oop_atomic_xchg_in_heap(T* addr, oop new_value); + static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value); template static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, @@ -187,7 +187,7 @@ static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value); template - static oop oop_atomic_xchg_not_in_heap(oop new_value, T* addr); + static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value); }; --- old/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp 2019-11-21 11:54:13.135987946 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp 2019-11-21 11:54:12.879983616 +0100 @@ -145,8 +145,8 @@ template template -inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_xchg_not_in_heap(oop new_value, T* addr) { - oop previous = Raw::oop_atomic_xchg(new_value, addr); +inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_xchg_not_in_heap(T* addr, oop new_value) { + oop previous = Raw::oop_atomic_xchg(addr, new_value); if (previous != NULL) { return ShenandoahBarrierSet::barrier_set()->load_reference_barrier_not_null(previous); } else { @@ -156,9 +156,9 @@ template template -inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap_impl(oop new_value, T* addr) { +inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap_impl(T* addr, oop new_value) { ShenandoahBarrierSet::barrier_set()->storeval_barrier(new_value); - oop result = oop_atomic_xchg_not_in_heap(new_value, addr); + oop result = oop_atomic_xchg_not_in_heap(addr, new_value); const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0; if (keep_alive && ShenandoahSATBBarrier && !CompressedOops::is_null(result) && ShenandoahHeap::heap()->is_concurrent_mark_in_progress()) { @@ -169,15 +169,15 @@ template template -inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap(oop new_value, T* addr) { - oop result = oop_atomic_xchg_in_heap_impl(new_value, addr); +inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap(T* addr, oop new_value) { + oop result = oop_atomic_xchg_in_heap_impl(addr, new_value); keep_alive_if_weak(addr, result); return result; } template -inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) { - oop result = oop_atomic_xchg_in_heap_impl(new_value, AccessInternal::oop_field_addr(base, offset)); +inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) { + oop result = oop_atomic_xchg_in_heap_impl(AccessInternal::oop_field_addr(base, offset), new_value); keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength(base, offset), result); return result; } --- old/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp 2019-11-21 11:54:13.571995320 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp 2019-11-21 11:54:13.315990991 +0100 @@ -92,7 +92,7 @@ bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause); // This control loop iteration have seen this much allocations. - size_t allocs_seen = Atomic::xchg(0, &_allocs_seen); + size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0); // Choose which GC mode to run in. The block below should select a single mode. GCMode mode = none; --- old/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp 2019-11-21 11:54:14.020002898 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp 2019-11-21 11:54:13.763998568 +0100 @@ -191,7 +191,7 @@ void ShenandoahPacer::restart_with(size_t non_taxable_bytes, double tax_rate) { size_t initial = (size_t)(non_taxable_bytes * tax_rate) >> LogHeapWordSize; STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t)); - Atomic::xchg((intptr_t)initial, &_budget); + Atomic::xchg(&_budget, (intptr_t)initial); Atomic::store(&_tax_rate, tax_rate); Atomic::inc(&_epoch); } --- old/src/hotspot/share/gc/z/zBarrierSet.hpp 2019-11-21 11:54:14.452010205 +0100 +++ new/src/hotspot/share/gc/z/zBarrierSet.hpp 2019-11-21 11:54:14.196005875 +0100 @@ -74,8 +74,8 @@ static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value); template - static oop oop_atomic_xchg_in_heap(oop new_value, T* addr); - static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset); + static oop oop_atomic_xchg_in_heap(T* addr, oop new_value); + static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value); template static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, @@ -94,7 +94,7 @@ static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value); template - static oop oop_atomic_xchg_not_in_heap(oop new_value, T* addr); + static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value); }; }; --- old/src/hotspot/share/gc/z/zBarrierSet.inline.hpp 2019-11-21 11:54:14.880017445 +0100 +++ new/src/hotspot/share/gc/z/zBarrierSet.inline.hpp 2019-11-21 11:54:14.624013114 +0100 @@ -155,20 +155,20 @@ template template -inline oop ZBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap(oop new_value, T* addr) { +inline oop ZBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap(T* addr, oop new_value) { verify_decorators_present(); verify_decorators_absent(); - const oop o = Raw::oop_atomic_xchg_in_heap(new_value, addr); + const oop o = Raw::oop_atomic_xchg_in_heap(addr, new_value); return ZBarrier::load_barrier_on_oop(o); } template -inline oop ZBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) { +inline oop ZBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) { verify_decorators_present(); verify_decorators_absent(); - const oop o = Raw::oop_atomic_xchg_in_heap_at(new_value, base, offset); + const oop o = Raw::oop_atomic_xchg_in_heap_at(base, offset, new_value); return ZBarrier::load_barrier_on_oop(o); } @@ -231,11 +231,11 @@ template template -inline oop ZBarrierSet::AccessBarrier::oop_atomic_xchg_not_in_heap(oop new_value, T* addr) { +inline oop ZBarrierSet::AccessBarrier::oop_atomic_xchg_not_in_heap(T* addr, oop new_value) { verify_decorators_present(); verify_decorators_absent(); - return Raw::oop_atomic_xchg_not_in_heap(new_value, addr); + return Raw::oop_atomic_xchg_not_in_heap(addr, new_value); } #endif // SHARE_GC_Z_ZBARRIERSET_INLINE_HPP --- old/src/hotspot/share/gc/z/zReferenceProcessor.cpp 2019-11-21 11:54:15.312024752 +0100 +++ new/src/hotspot/share/gc/z/zReferenceProcessor.cpp 2019-11-21 11:54:15.060020489 +0100 @@ -316,7 +316,7 @@ // Prepend discovered references to internal pending list if (*list != NULL) { - *p = Atomic::xchg(*list, _pending_list.addr()); + *p = Atomic::xchg(_pending_list.addr(), *list); if (*p == NULL) { // First to prepend to list, record tail _pending_list_tail = p; --- old/src/hotspot/share/gc/z/zStat.cpp 2019-11-21 11:54:15.756032261 +0100 +++ new/src/hotspot/share/gc/z/zStat.cpp 2019-11-21 11:54:15.496027863 +0100 @@ -424,9 +424,9 @@ for (uint32_t i = 0; i < ncpus; i++) { ZStatSamplerData* const cpu_data = get_cpu_local(i); if (cpu_data->_nsamples > 0) { - const uint64_t nsamples = Atomic::xchg((uint64_t)0, &cpu_data->_nsamples); - const uint64_t sum = Atomic::xchg((uint64_t)0, &cpu_data->_sum); - const uint64_t max = Atomic::xchg((uint64_t)0, &cpu_data->_max); + const uint64_t nsamples = Atomic::xchg(&cpu_data->_nsamples, (uint64_t)0); + const uint64_t sum = Atomic::xchg(&cpu_data->_sum, (uint64_t)0); + const uint64_t max = Atomic::xchg(&cpu_data->_max, (uint64_t)0); all._nsamples += nsamples; all._sum += sum; if (all._max < max) { @@ -459,7 +459,7 @@ const uint32_t ncpus = ZCPU::count(); for (uint32_t i = 0; i < ncpus; i++) { ZStatCounterData* const cpu_data = get_cpu_local(i); - counter += Atomic::xchg((uint64_t)0, &cpu_data->_counter); + counter += Atomic::xchg(&cpu_data->_counter, (uint64_t)0); } ZStatSample(_sampler, counter); @@ -481,7 +481,7 @@ const uint32_t ncpus = ZCPU::count(); for (uint32_t i = 0; i < ncpus; i++) { ZStatCounterData* const cpu_data = get_cpu_local(i); - all._counter += Atomic::xchg((uint64_t)0, &cpu_data->_counter); + all._counter += Atomic::xchg(&cpu_data->_counter, (uint64_t)0); } return all; --- old/src/hotspot/share/jfr/recorder/service/jfrPostBox.cpp 2019-11-21 11:54:16.220040110 +0100 +++ new/src/hotspot/share/jfr/recorder/service/jfrPostBox.cpp 2019-11-21 11:54:15.964035780 +0100 @@ -139,7 +139,7 @@ int JfrPostBox::collect() { // get pending and reset to 0 - const int messages = Atomic::xchg(0, &_messages); + const int messages = Atomic::xchg(&_messages, 0); if (check_waiters(messages)) { _has_waiters = true; assert(JfrMsg_lock->owned_by_self(), "incrementing _msg_read_serial is protected by JfrMsg_lock"); --- old/src/hotspot/share/memory/universe.cpp 2019-11-21 11:54:16.648047348 +0100 +++ new/src/hotspot/share/memory/universe.cpp 2019-11-21 11:54:16.396043086 +0100 @@ -511,7 +511,7 @@ oop Universe::swap_reference_pending_list(oop list) { assert_pll_locked(is_locked); - return Atomic::xchg(list, &_reference_pending_list); + return Atomic::xchg(&_reference_pending_list, list); } #undef assert_pll_locked --- old/src/hotspot/share/oops/access.hpp 2019-11-21 11:54:17.096054926 +0100 +++ new/src/hotspot/share/oops/access.hpp 2019-11-21 11:54:16.844050664 +0100 @@ -171,9 +171,9 @@ } template - static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { + static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) { verify_primitive_decorators(); - return AccessInternal::atomic_xchg_at(new_value, base, offset); + return AccessInternal::atomic_xchg_at(base, offset, new_value); } // Oop heap accesses @@ -200,11 +200,11 @@ } template - static inline T oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { + static inline T oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) { verify_heap_oop_decorators(); typedef typename AccessInternal::OopOrNarrowOop::type OopType; OopType new_oop_value = new_value; - return AccessInternal::atomic_xchg_at(new_oop_value, base, offset); + return AccessInternal::atomic_xchg_at(base, offset, new_oop_value); } // Clone an object from src to dst @@ -233,9 +233,9 @@ } template - static inline T atomic_xchg(T new_value, P* addr) { + static inline T atomic_xchg(P* addr, T new_value) { verify_primitive_decorators(); - return AccessInternal::atomic_xchg(new_value, addr); + return AccessInternal::atomic_xchg(addr, new_value); } // Oop accesses @@ -263,11 +263,11 @@ } template - static inline T oop_atomic_xchg(T new_value, P* addr) { + static inline T oop_atomic_xchg(P* addr, T new_value) { verify_oop_decorators(); typedef typename AccessInternal::OopOrNarrowOop::type OopType; OopType new_oop_value = new_value; - return AccessInternal::atomic_xchg(new_oop_value, addr); + return AccessInternal::atomic_xchg(addr, new_oop_value); } static oop resolve(oop obj) { --- old/src/hotspot/share/oops/access.inline.hpp 2019-11-21 11:54:17.540062436 +0100 +++ new/src/hotspot/share/oops/access.inline.hpp 2019-11-21 11:54:17.280058039 +0100 @@ -90,16 +90,16 @@ template struct PostRuntimeDispatch: public AllStatic { template - static T access_barrier(T new_value, void* addr) { - return GCBarrierType::atomic_xchg_in_heap(new_value, reinterpret_cast(addr)); + static T access_barrier(void* addr, T new_value) { + return GCBarrierType::atomic_xchg_in_heap(reinterpret_cast(addr), new_value); } - static oop oop_access_barrier(oop new_value, void* addr) { + static oop oop_access_barrier(void* addr, oop new_value) { typedef typename HeapOopType::type OopType; if (HasDecorator::value) { - return GCBarrierType::oop_atomic_xchg_in_heap(new_value, reinterpret_cast(addr)); + return GCBarrierType::oop_atomic_xchg_in_heap(reinterpret_cast(addr), new_value); } else { - return GCBarrierType::oop_atomic_xchg_not_in_heap(new_value, reinterpret_cast(addr)); + return GCBarrierType::oop_atomic_xchg_not_in_heap(reinterpret_cast(addr), new_value); } } }; @@ -171,12 +171,12 @@ template struct PostRuntimeDispatch: public AllStatic { template - static T access_barrier(T new_value, oop base, ptrdiff_t offset) { - return GCBarrierType::atomic_xchg_in_heap_at(new_value, base, offset); + static T access_barrier(oop base, ptrdiff_t offset, T new_value) { + return GCBarrierType::atomic_xchg_in_heap_at(base, offset, new_value); } - static oop oop_access_barrier(oop new_value, oop base, ptrdiff_t offset) { - return GCBarrierType::oop_atomic_xchg_in_heap_at(new_value, base, offset); + static oop oop_access_barrier(oop base, ptrdiff_t offset, oop new_value) { + return GCBarrierType::oop_atomic_xchg_in_heap_at(base, offset, new_value); } }; @@ -323,17 +323,17 @@ } template - T RuntimeDispatch::atomic_xchg_init(T new_value, void* addr) { + T RuntimeDispatch::atomic_xchg_init(void* addr, T new_value) { func_t function = BarrierResolver::resolve_barrier(); _atomic_xchg_func = function; - return function(new_value, addr); + return function(addr, new_value); } template - T RuntimeDispatch::atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset) { + T RuntimeDispatch::atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value) { func_t function = BarrierResolver::resolve_barrier(); _atomic_xchg_at_func = function; - return function(new_value, base, offset); + return function(base, offset, new_value); } template --- old/src/hotspot/share/oops/accessBackend.hpp 2019-11-21 11:54:17.984069946 +0100 +++ new/src/hotspot/share/oops/accessBackend.hpp 2019-11-21 11:54:17.724065548 +0100 @@ -103,12 +103,12 @@ typedef T (*load_at_func_t)(oop base, ptrdiff_t offset); typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value); typedef T (*atomic_cmpxchg_at_func_t)(T new_value, oop base, ptrdiff_t offset, T compare_value); - typedef T (*atomic_xchg_at_func_t)(T new_value, oop base, ptrdiff_t offset); + typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value); typedef T (*load_func_t)(void* addr); typedef void (*store_func_t)(void* addr, T value); typedef T (*atomic_cmpxchg_func_t)(T new_value, void* addr, T compare_value); - typedef T (*atomic_xchg_func_t)(T new_value, void* addr); + typedef T (*atomic_xchg_func_t)(void* addr, T new_value); typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, @@ -303,7 +303,7 @@ template static typename EnableIf< HasDecorator::value, T>::type - atomic_xchg_internal(T new_value, void* addr); + atomic_xchg_internal(void* addr, T new_value); // The following *_locked mechanisms serve the purpose of handling atomic operations // that are larger than a machine can handle, and then possibly opt for using @@ -324,14 +324,14 @@ template static inline typename EnableIf< !AccessInternal::PossiblyLockedAccess::value, T>::type - atomic_xchg_maybe_locked(T new_value, void* addr) { - return atomic_xchg_internal(new_value, addr); + atomic_xchg_maybe_locked(void* addr, T new_value) { + return atomic_xchg_internal(addr, new_value); } template static typename EnableIf< AccessInternal::PossiblyLockedAccess::value, T>::type - atomic_xchg_maybe_locked(T new_value, void* addr); + atomic_xchg_maybe_locked(void* addr, T new_value); public: template @@ -350,8 +350,8 @@ } template - static inline T atomic_xchg(T new_value, void* addr) { - return atomic_xchg_maybe_locked(new_value, addr); + static inline T atomic_xchg(void* addr, T new_value) { + return atomic_xchg_maybe_locked(addr, new_value); } template @@ -375,9 +375,9 @@ static T oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value); template - static T oop_atomic_xchg(T new_value, void* addr); + static T oop_atomic_xchg(void* addr, T new_value); template - static T oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset); + static T oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value); template static void store_at(oop base, ptrdiff_t offset, T value) { @@ -395,8 +395,8 @@ } template - static T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { - return atomic_xchg(new_value, field_addr(base, offset)); + static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) { + return atomic_xchg(field_addr(base, offset), new_value); } template @@ -539,10 +539,10 @@ typedef typename AccessFunction::type func_t; static func_t _atomic_xchg_func; - static T atomic_xchg_init(T new_value, void* addr); + static T atomic_xchg_init(void* addr, T new_value); - static inline T atomic_xchg(T new_value, void* addr) { - return _atomic_xchg_func(new_value, addr); + static inline T atomic_xchg(void* addr, T new_value) { + return _atomic_xchg_func(addr, new_value); } }; @@ -551,10 +551,10 @@ typedef typename AccessFunction::type func_t; static func_t _atomic_xchg_at_func; - static T atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset); + static T atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value); - static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { - return _atomic_xchg_at_func(new_value, base, offset); + static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) { + return _atomic_xchg_at_func(base, offset, new_value); } }; @@ -838,56 +838,56 @@ template inline static typename EnableIf< HasDecorator::value && CanHardwireRaw::value, T>::type - atomic_xchg(T new_value, void* addr) { + atomic_xchg(void* addr, T new_value) { typedef RawAccessBarrier Raw; if (HasDecorator::value) { - return Raw::oop_atomic_xchg(new_value, addr); + return Raw::oop_atomic_xchg(addr, new_value); } else { - return Raw::atomic_xchg(new_value, addr); + return Raw::atomic_xchg(addr, new_value); } } template inline static typename EnableIf< HasDecorator::value && !CanHardwireRaw::value, T>::type - atomic_xchg(T new_value, void* addr) { + atomic_xchg(void* addr, T new_value) { if (UseCompressedOops) { const DecoratorSet expanded_decorators = decorators | convert_compressed_oops; - return PreRuntimeDispatch::atomic_xchg(new_value, addr); + return PreRuntimeDispatch::atomic_xchg(addr, new_value); } else { const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops; - return PreRuntimeDispatch::atomic_xchg(new_value, addr); + return PreRuntimeDispatch::atomic_xchg(addr, new_value); } } template inline static typename EnableIf< !HasDecorator::value, T>::type - atomic_xchg(T new_value, void* addr) { + atomic_xchg(void* addr, T new_value) { if (is_hardwired_primitive()) { const DecoratorSet expanded_decorators = decorators | AS_RAW; - return PreRuntimeDispatch::atomic_xchg(new_value, addr); + return PreRuntimeDispatch::atomic_xchg(addr, new_value); } else { - return RuntimeDispatch::atomic_xchg(new_value, addr); + return RuntimeDispatch::atomic_xchg(addr, new_value); } } template inline static typename EnableIf< HasDecorator::value, T>::type - atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { - return atomic_xchg(new_value, field_addr(base, offset)); + atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) { + return atomic_xchg(field_addr(base, offset), new_value); } template inline static typename EnableIf< !HasDecorator::value, T>::type - atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { + atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) { if (is_hardwired_primitive()) { const DecoratorSet expanded_decorators = decorators | AS_RAW; - return PreRuntimeDispatch::atomic_xchg(new_value, base, offset); + return PreRuntimeDispatch::atomic_xchg(base, offset, new_value); } else { - return RuntimeDispatch::atomic_xchg_at(new_value, base, offset); + return RuntimeDispatch::atomic_xchg_at(base, offset, new_value); } } @@ -1045,29 +1045,29 @@ } template - inline T atomic_xchg_reduce_types(T new_value, T* addr) { + inline T atomic_xchg_reduce_types(T* addr, T new_value) { const DecoratorSet expanded_decorators = decorators; - return PreRuntimeDispatch::atomic_xchg(new_value, addr); + return PreRuntimeDispatch::atomic_xchg(addr, new_value); } template - inline oop atomic_xchg_reduce_types(oop new_value, narrowOop* addr) { + inline oop atomic_xchg_reduce_types(narrowOop* addr, oop new_value) { const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | INTERNAL_RT_USE_COMPRESSED_OOPS; - return PreRuntimeDispatch::atomic_xchg(new_value, addr); + return PreRuntimeDispatch::atomic_xchg(addr, new_value); } template - inline narrowOop atomic_xchg_reduce_types(narrowOop new_value, narrowOop* addr) { + inline narrowOop atomic_xchg_reduce_types(narrowOop* addr, narrowOop new_value) { const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | INTERNAL_RT_USE_COMPRESSED_OOPS; - return PreRuntimeDispatch::atomic_xchg(new_value, addr); + return PreRuntimeDispatch::atomic_xchg(addr, new_value); } template - inline oop atomic_xchg_reduce_types(oop new_value, HeapWord* addr) { + inline oop atomic_xchg_reduce_types(HeapWord* addr, oop new_value) { const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP; - return PreRuntimeDispatch::atomic_xchg(new_value, addr); + return PreRuntimeDispatch::atomic_xchg(addr, new_value); } template @@ -1224,19 +1224,19 @@ } template - inline T atomic_xchg(T new_value, P* addr) { + inline T atomic_xchg(P* addr, T new_value) { verify_types(); typedef typename Decay

::type DecayedP; typedef typename Decay::type DecayedT; DecayedT new_decayed_value = new_value; // atomic_xchg is only available in SEQ_CST flavour. const DecoratorSet expanded_decorators = DecoratorFixup::value; - return atomic_xchg_reduce_types(new_decayed_value, - const_cast(addr)); + return atomic_xchg_reduce_types(const_cast(addr), + new_decayed_value); } template - inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { + inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) { verify_types(); typedef typename Decay::type DecayedT; DecayedT new_decayed_value = new_value; @@ -1244,7 +1244,7 @@ const DecoratorSet expanded_decorators = DecoratorFixup::value ? INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value; - return PreRuntimeDispatch::atomic_xchg_at(new_decayed_value, base, offset); + return PreRuntimeDispatch::atomic_xchg_at(base, offset, new_decayed_value); } template --- old/src/hotspot/share/oops/accessBackend.inline.hpp 2019-11-21 11:54:18.464078064 +0100 +++ new/src/hotspot/share/oops/accessBackend.inline.hpp 2019-11-21 11:54:18.212073802 +0100 @@ -103,17 +103,17 @@ template template -inline T RawAccessBarrier::oop_atomic_xchg(T new_value, void* addr) { +inline T RawAccessBarrier::oop_atomic_xchg(void* addr, T new_value) { typedef typename AccessInternal::EncodedType::type Encoded; Encoded encoded_new = encode(new_value); - Encoded encoded_result = atomic_xchg(encoded_new, reinterpret_cast(addr)); + Encoded encoded_result = atomic_xchg(reinterpret_cast(addr), encoded_new); return decode(encoded_result); } template template -inline T RawAccessBarrier::oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { - return oop_atomic_xchg(new_value, field_addr(base, offset)); +inline T RawAccessBarrier::oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) { + return oop_atomic_xchg(field_addr(base, offset), new_value); } template @@ -203,9 +203,9 @@ template inline typename EnableIf< HasDecorator::value, T>::type -RawAccessBarrier::atomic_xchg_internal(T new_value, void* addr) { - return Atomic::xchg(new_value, - reinterpret_cast(addr)); +RawAccessBarrier::atomic_xchg_internal(void* addr, T new_value) { + return Atomic::xchg(reinterpret_cast(addr), + new_value); } // For platforms that do not have native support for wide atomics, @@ -216,9 +216,9 @@ template inline typename EnableIf< AccessInternal::PossiblyLockedAccess::value, T>::type -RawAccessBarrier::atomic_xchg_maybe_locked(T new_value, void* addr) { +RawAccessBarrier::atomic_xchg_maybe_locked(void* addr, T new_value) { if (!AccessInternal::wide_atomic_needs_locking()) { - return atomic_xchg_internal(new_value, addr); + return atomic_xchg_internal(addr, new_value); } else { AccessInternal::AccessLocker access_lock; volatile T* p = reinterpret_cast(addr); --- old/src/hotspot/share/prims/jni.cpp 2019-11-21 11:54:18.904085507 +0100 +++ new/src/hotspot/share/prims/jni.cpp 2019-11-21 11:54:18.640081041 +0100 @@ -3811,9 +3811,9 @@ #if defined(ZERO) && defined(ASSERT) { jint a = 0xcafebabe; - jint b = Atomic::xchg((jint) 0xdeadbeef, &a); + jint b = Atomic::xchg(&a, (jint) 0xdeadbeef); void *c = &a; - void *d = Atomic::xchg(&b, &c); + void *d = Atomic::xchg(&c, &b); assert(a == (jint) 0xdeadbeef && b == (jint) 0xcafebabe, "Atomic::xchg() works"); assert(c == &b && d == &a, "Atomic::xchg() works"); } @@ -3829,10 +3829,10 @@ // We use Atomic::xchg rather than Atomic::add/dec since on some platforms // the add/dec implementations are dependent on whether we are running // on a multiprocessor Atomic::xchg does not have this problem. - if (Atomic::xchg(1, &vm_created) == 1) { + if (Atomic::xchg(&vm_created, 1) == 1) { return JNI_EEXIST; // already created, or create attempt in progress } - if (Atomic::xchg(0, &safe_to_recreate_vm) == 0) { + if (Atomic::xchg(&safe_to_recreate_vm, 0) == 0) { return JNI_ERR; // someone tried and failed and retry not allowed. } --- old/src/hotspot/share/runtime/atomic.hpp 2019-11-21 11:54:19.412094099 +0100 +++ new/src/hotspot/share/runtime/atomic.hpp 2019-11-21 11:54:19.160089837 +0100 @@ -132,8 +132,8 @@ // The type T must be either a pointer type convertible to or equal // to D, an integral/enum type equal to D, or a type equal to D that // is primitive convertible using PrimitiveConversions. - template - inline static D xchg(T exchange_value, volatile D* dest, + template + inline static D xchg(volatile D* dest, T exchange_value, atomic_memory_order order = memory_order_conservative); // Performs atomic compare of *dest and compare_value, and exchanges @@ -341,7 +341,7 @@ // checking and limited conversions around calls to the // platform-specific implementation layer provided by // PlatformXchg. - template + template struct XchgImpl; // Platform-specific implementation of xchg. Support for sizes @@ -353,11 +353,11 @@ // - platform_xchg is an object of type PlatformXchg. // // Then - // platform_xchg(exchange_value, dest) + // platform_xchg(dest, exchange_value) // must be a valid expression, returning a result convertible to T. // // A default definition is provided, which declares a function template - // T operator()(T, T volatile*, T, atomic_memory_order) const + // T operator()(T volatile*, T, atomic_memory_order) const // // For each required size, a platform must either provide an // appropriate definition of that function, or must entirely @@ -373,8 +373,8 @@ // helper function. template static T xchg_using_helper(Fn fn, - T exchange_value, - T volatile* dest); + T volatile* dest, + T exchange_value); }; template @@ -593,8 +593,8 @@ template struct Atomic::PlatformXchg { template - T operator()(T exchange_value, - T volatile* dest, + T operator()(T volatile* dest, + T exchange_value, atomic_memory_order order) const; }; @@ -891,9 +891,9 @@ T, T, typename EnableIf::value || IsRegisteredEnum::value>::type> { - T operator()(T exchange_value, T volatile* dest, atomic_memory_order order) const { + T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const { // Forward to the platform handler for the size of T. - return PlatformXchg()(exchange_value, dest, order); + return PlatformXchg()(dest, exchange_value, order); } }; @@ -902,15 +902,15 @@ // The exchange_value must be implicitly convertible to the // destination's type; it must be type-correct to store the // exchange_value in the destination. -template +template struct Atomic::XchgImpl< - T*, D*, + D*, T*, typename EnableIf::value>::type> { - D* operator()(T* exchange_value, D* volatile* dest, atomic_memory_order order) const { + D* operator()(D* volatile* dest, T* exchange_value, atomic_memory_order order) const { // Allow derived to base conversion, and adding cv-qualifiers. D* new_value = exchange_value; - return PlatformXchg()(new_value, dest, order); + return PlatformXchg()(dest, new_value, order); } }; @@ -926,30 +926,31 @@ T, T, typename EnableIf::value>::type> { - T operator()(T exchange_value, T volatile* dest, atomic_memory_order order) const { + T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const { typedef PrimitiveConversions::Translate Translator; typedef typename Translator::Decayed Decayed; STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); return Translator::recover( - xchg(Translator::decay(exchange_value), - reinterpret_cast(dest), + xchg(reinterpret_cast(dest), + Translator::decay(exchange_value), order)); } }; template inline T Atomic::xchg_using_helper(Fn fn, - T exchange_value, - T volatile* dest) { + T volatile* dest, + T exchange_value) { STATIC_ASSERT(sizeof(Type) == sizeof(T)); + // Notice the swapped order of arguments. Change when/if stubs are rewritten. return PrimitiveConversions::cast( fn(PrimitiveConversions::cast(exchange_value), reinterpret_cast(dest))); } -template -inline D Atomic::xchg(T exchange_value, volatile D* dest, atomic_memory_order order) { - return XchgImpl()(exchange_value, dest, order); +template +inline D Atomic::xchg(volatile D* dest, T exchange_value, atomic_memory_order order) { + return XchgImpl()(dest, exchange_value, order); } #endif // SHARE_RUNTIME_ATOMIC_HPP --- old/src/hotspot/share/runtime/synchronizer.cpp 2019-11-21 11:54:19.868101811 +0100 +++ new/src/hotspot/share/runtime/synchronizer.cpp 2019-11-21 11:54:19.612097482 +0100 @@ -993,7 +993,7 @@ // of active monitors passes the specified threshold. // TODO: assert thread state is reasonable - if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { + if (ForceMonitorScavenge == 0 && Atomic::xchg(&ForceMonitorScavenge, 1) == 0) { // Induce a 'null' safepoint to scavenge monitors // Must VM_Operation instance be heap allocated as the op will be enqueue and posted // to the VMthread and have a lifespan longer than that of this activation record. --- old/src/hotspot/share/runtime/threadSMR.cpp 2019-11-21 11:54:20.340109794 +0100 +++ new/src/hotspot/share/runtime/threadSMR.cpp 2019-11-21 11:54:20.084105464 +0100 @@ -170,7 +170,7 @@ } inline ThreadsList* ThreadsSMRSupport::xchg_java_thread_list(ThreadsList* new_list) { - return (ThreadsList*)Atomic::xchg(new_list, &_java_thread_list); + return (ThreadsList*)Atomic::xchg(&_java_thread_list, new_list); } // Hash table of pointers found by a scan. Used for collecting hazard --- old/src/hotspot/share/utilities/lockFreeStack.hpp 2019-11-21 11:54:20.796117507 +0100 +++ new/src/hotspot/share/utilities/lockFreeStack.hpp 2019-11-21 11:54:20.536113109 +0100 @@ -103,7 +103,7 @@ // list of elements. Acts as a full memory barrier. // postcondition: empty() T* pop_all() { - return Atomic::xchg((T*)NULL, &_top); + return Atomic::xchg(&_top, (T*)NULL); } // Atomically adds value to the top of this stack. Acts as a full