--- old/src/hotspot/os/bsd/os_bsd.cpp 2019-11-21 11:51:58.745767293 +0100 +++ new/src/hotspot/os/bsd/os_bsd.cpp 2019-11-21 11:51:58.313760235 +0100 @@ -3264,7 +3264,7 @@ while (processor_id < 0) { if (Atomic::cmpxchg(-2, &mapping[apic_id], -1)) { - Atomic::store(Atomic::add(1, &next_processor_id) - 1, &mapping[apic_id]); + Atomic::store(&mapping[apic_id], Atomic::add(1, &next_processor_id) - 1); } processor_id = Atomic::load(&mapping[apic_id]); } --- old/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp 2019-11-21 11:51:59.425778403 +0100 +++ new/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp 2019-11-21 11:51:58.965770887 +0100 @@ -161,8 +161,8 @@ template<> template -inline void Atomic::PlatformStore<8>::operator()(T store_value, - T volatile* dest) const { +inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, + T store_value) const { STATIC_ASSERT(8 == sizeof(T)); _Atomic_move_long(reinterpret_cast(&store_value), reinterpret_cast(dest)); } @@ -173,7 +173,7 @@ struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE> { template - void operator()(T v, volatile T* p) const { + void operator()(volatile T* p, T v) const { __asm__ volatile ( "xchgb (%2),%0" : "=q" (v) : "0" (v), "r" (p) @@ -185,7 +185,7 @@ struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE> { template - void operator()(T v, volatile T* p) const { + void operator()(volatile T* p, T v) const { __asm__ volatile ( "xchgw (%2),%0" : "=r" (v) : "0" (v), "r" (p) @@ -197,7 +197,7 @@ struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE> { template - void operator()(T v, volatile T* p) const { + void operator()(volatile T* p, T v) const { __asm__ volatile ( "xchgl (%2),%0" : "=r" (v) : "0" (v), "r" (p) @@ -210,7 +210,7 @@ struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE> { template - void operator()(T v, volatile T* p) const { + void operator()(volatile T* p, T v) const { __asm__ volatile ( "xchgq (%2), %0" : "=r" (v) : "0" (v), "r" (p) --- old/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp 2019-11-21 11:52:00.021788140 +0100 +++ new/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp 2019-11-21 11:51:59.589781082 +0100 @@ -276,8 +276,8 @@ template<> template -inline void Atomic::PlatformStore<8>::operator()(T store_value, - T volatile* dest) const { +inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, + T store_value) const { STATIC_ASSERT(8 == sizeof(T)); os::atomic_copy64(reinterpret_cast(&store_value), reinterpret_cast(dest)); } --- old/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp 2019-11-21 11:52:00.685798989 +0100 +++ new/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp 2019-11-21 11:52:00.185790820 +0100 @@ -88,14 +88,14 @@ struct Atomic::PlatformOrderedStore { template - void operator()(T v, volatile T* p) const { __atomic_store(const_cast(p), &v, __ATOMIC_RELEASE); } + void operator()(volatile T* p, T v) const { __atomic_store(const_cast(p), &v, __ATOMIC_RELEASE); } }; template struct Atomic::PlatformOrderedStore { template - void operator()(T v, volatile T* p) const { release_store(p, v); OrderAccess::fence(); } + void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); } }; #endif // OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP --- old/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp 2019-11-21 11:52:01.281808726 +0100 +++ new/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp 2019-11-21 11:52:00.849801668 +0100 @@ -54,8 +54,8 @@ template<> template -inline void Atomic::PlatformStore<8>::operator()(T store_value, - T volatile* dest) const { +inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, + T store_value) const { STATIC_ASSERT(8 == sizeof(T)); (*os::atomic_store_long_func)( PrimitiveConversions::cast(store_value), reinterpret_cast(dest)); --- old/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp 2019-11-21 11:52:01.937819444 +0100 +++ new/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp 2019-11-21 11:52:01.445811405 +0100 @@ -161,8 +161,8 @@ template<> template -inline void Atomic::PlatformStore<8>::operator()(T store_value, - T volatile* dest) const { +inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, + T store_value) const { STATIC_ASSERT(8 == sizeof(T)); _Atomic_move_long(reinterpret_cast(&store_value), reinterpret_cast(dest)); } @@ -173,7 +173,7 @@ struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE> { template - void operator()(T v, volatile T* p) const { + void operator()(volatile T* p, T v) const { __asm__ volatile ( "xchgb (%2),%0" : "=q" (v) : "0" (v), "r" (p) @@ -185,7 +185,7 @@ struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE> { template - void operator()(T v, volatile T* p) const { + void operator()(volatile T* p, T v) const { __asm__ volatile ( "xchgw (%2),%0" : "=r" (v) : "0" (v), "r" (p) @@ -197,7 +197,7 @@ struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE> { template - void operator()(T v, volatile T* p) const { + void operator()(volatile T* p, T v) const { __asm__ volatile ( "xchgl (%2),%0" : "=r" (v) : "0" (v), "r" (p) @@ -210,7 +210,7 @@ struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE> { template - void operator()(T v, volatile T* p) const { + void operator()(volatile T* p, T v) const { __asm__ volatile ( "xchgq (%2), %0" : "=r" (v) : "0" (v), "r" (p) --- old/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp 2019-11-21 11:52:02.537829248 +0100 +++ new/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp 2019-11-21 11:52:02.113822319 +0100 @@ -122,8 +122,8 @@ template<> template -inline void Atomic::PlatformStore<8>::operator()(T store_value, - T volatile* dest) const { +inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, + T store_value) const { STATIC_ASSERT(8 == sizeof(T)); os::atomic_copy64(reinterpret_cast(&store_value), reinterpret_cast(dest)); } --- old/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp 2019-11-21 11:52:03.137839050 +0100 +++ new/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp 2019-11-21 11:52:02.709832057 +0100 @@ -213,8 +213,8 @@ template<> template -inline void Atomic::PlatformStore<8>::operator()(T store_value, - T volatile* dest) const { +inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, + T store_value) const { STATIC_ASSERT(8 == sizeof(T)); volatile T* src = &store_value; __asm { @@ -234,7 +234,7 @@ struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE> { template - void operator()(T v, volatile T* p) const { + void operator()(volatile T* p, T v) const { __asm { mov edx, p; mov al, v; @@ -247,7 +247,7 @@ struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE> { template - void operator()(T v, volatile T* p) const { + void operator()(volatile T* p, T v) const { __asm { mov edx, p; mov ax, v; @@ -260,7 +260,7 @@ struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE> { template - void operator()(T v, volatile T* p) const { + void operator()(volatile T* p, T v) const { __asm { mov edx, p; mov eax, v; --- old/src/hotspot/share/classfile/symbolTable.cpp 2019-11-21 11:52:03.729848722 +0100 +++ new/src/hotspot/share/classfile/symbolTable.cpp 2019-11-21 11:52:03.309841859 +0100 @@ -189,8 +189,8 @@ } } -void SymbolTable::reset_has_items_to_clean() { Atomic::store(false, &_has_items_to_clean); } -void SymbolTable::mark_has_items_to_clean() { Atomic::store(true, &_has_items_to_clean); } +void SymbolTable::reset_has_items_to_clean() { Atomic::store(&_has_items_to_clean, false); } +void SymbolTable::mark_has_items_to_clean() { Atomic::store(&_has_items_to_clean, true); } bool SymbolTable::has_items_to_clean() { return Atomic::load(&_has_items_to_clean); } void SymbolTable::item_added() { --- old/src/hotspot/share/code/compiledMethod.cpp 2019-11-21 11:52:04.397859635 +0100 +++ new/src/hotspot/share/code/compiledMethod.cpp 2019-11-21 11:52:03.913851728 +0100 @@ -615,7 +615,7 @@ if (md != NULL && md->is_method()) { Method* method = static_cast(md); if (!method->method_holder()->is_loader_alive()) { - Atomic::store((Method*)NULL, r->metadata_addr()); + Atomic::store(r->metadata_addr(), (Method*)NULL); if (!r->metadata_is_immediate()) { r->fix_metadata_relocation(); --- old/src/hotspot/share/code/dependencyContext.cpp 2019-11-21 11:52:05.009869634 +0100 +++ new/src/hotspot/share/code/dependencyContext.cpp 2019-11-21 11:52:04.581862642 +0100 @@ -300,7 +300,7 @@ // Relaxed accessors void DependencyContext::set_dependencies(nmethodBucket* b) { - Atomic::store(b, _dependency_context_addr); + Atomic::store(_dependency_context_addr, b); } nmethodBucket* DependencyContext::dependencies() { @@ -313,7 +313,7 @@ void DependencyContext::cleaning_start() { assert(SafepointSynchronize::is_at_safepoint(), "must be"); uint64_t epoch = ++_cleaning_epoch_monotonic; - Atomic::store(epoch, &_cleaning_epoch); + Atomic::store(&_cleaning_epoch, epoch); } // The epilogue marks the end of dependency context cleanup by the GC, @@ -323,7 +323,7 @@ // was called. That allows dependency contexts to be cleaned concurrently. void DependencyContext::cleaning_end() { uint64_t epoch = 0; - Atomic::store(epoch, &_cleaning_epoch); + Atomic::store(&_cleaning_epoch, epoch); } // This function skips over nmethodBuckets in the list corresponding to @@ -358,7 +358,7 @@ } void nmethodBucket::set_next(nmethodBucket* b) { - Atomic::store(b, &_next); + Atomic::store(&_next, b); } nmethodBucket* nmethodBucket::purge_list_next() { @@ -366,5 +366,5 @@ } void nmethodBucket::set_purge_list_next(nmethodBucket* b) { - Atomic::store(b, &_purge_list_next); + Atomic::store(&_purge_list_next, b); } --- old/src/hotspot/share/code/nmethod.cpp 2019-11-21 11:52:05.601879305 +0100 +++ new/src/hotspot/share/code/nmethod.cpp 2019-11-21 11:52:05.173872313 +0100 @@ -315,7 +315,7 @@ } void ExceptionCache::set_next(ExceptionCache *ec) { - Atomic::store(ec, &_next); + Atomic::store(&_next, ec); } //----------------------------------------------------------------------------- --- old/src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp 2019-11-21 11:52:06.345891460 +0100 +++ new/src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp 2019-11-21 11:52:05.841883227 +0100 @@ -55,7 +55,7 @@ } void G1BlockOffsetTable::set_offset_array_raw(size_t index, u_char offset) { - Atomic::store(offset, &_offset_array[index]); + Atomic::store(&_offset_array[index], offset); } void G1BlockOffsetTable::set_offset_array(size_t index, u_char offset) { --- old/src/hotspot/share/gc/shared/satbMarkQueue.cpp 2019-11-21 11:52:06.929901002 +0100 +++ new/src/hotspot/share/gc/shared/satbMarkQueue.cpp 2019-11-21 11:52:06.505894076 +0100 @@ -329,7 +329,7 @@ #endif // PRODUCT void SATBMarkQueueSet::abandon_completed_buffers() { - Atomic::store(size_t(0), &_count_and_process_flag); + Atomic::store(&_count_and_process_flag, size_t(0)); BufferNode* buffers_to_delete = _list.pop_all(); while (buffers_to_delete != NULL) { BufferNode* bn = buffers_to_delete; --- old/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp 2019-11-21 11:52:07.525910740 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp 2019-11-21 11:52:07.101903813 +0100 @@ -305,7 +305,7 @@ } void ShenandoahHeapRegion::clear_live_data() { - Atomic::release_store_fence(&_live_data, 0); + Atomic::release_store_fence(&_live_data, (size_t)0); } void ShenandoahHeapRegion::reset_alloc_metadata() { --- old/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp 2019-11-21 11:52:08.173921327 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp 2019-11-21 11:52:07.701913615 +0100 @@ -178,12 +178,12 @@ size_t ShenandoahPacer::update_and_get_progress_history() { if (_progress == -1) { // First initialization, report some prior - Atomic::store((intptr_t)PACING_PROGRESS_ZERO, &_progress); + Atomic::store(&_progress, (intptr_t)PACING_PROGRESS_ZERO); return (size_t) (_heap->max_capacity() * 0.1); } else { // Record history, and reply historical data _progress_history->add(_progress); - Atomic::store((intptr_t)PACING_PROGRESS_ZERO, &_progress); + Atomic::store(&_progress, (intptr_t)PACING_PROGRESS_ZERO); return (size_t) (_progress_history->avg() * HeapWordSize); } } @@ -192,7 +192,7 @@ size_t initial = (size_t)(non_taxable_bytes * tax_rate) >> LogHeapWordSize; STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t)); Atomic::xchg((intptr_t)initial, &_budget); - Atomic::store(tax_rate, &_tax_rate); + Atomic::store(&_tax_rate, tax_rate); Atomic::inc(&_epoch); } --- old/src/hotspot/share/gc/z/zForwarding.inline.hpp 2019-11-21 11:52:08.833932109 +0100 +++ new/src/hotspot/share/gc/z/zForwarding.inline.hpp 2019-11-21 11:52:08.341924071 +0100 @@ -54,7 +54,7 @@ } inline void ZForwarding::set_pinned() { - Atomic::store(true, &_pinned); + Atomic::store(&_pinned, true); } inline bool ZForwarding::inc_refcount() { --- old/src/hotspot/share/gc/z/zLock.inline.hpp 2019-11-21 11:52:09.473942565 +0100 +++ new/src/hotspot/share/gc/z/zLock.inline.hpp 2019-11-21 11:52:09.005934920 +0100 @@ -53,7 +53,7 @@ if (owner != thread) { _lock.lock(); - Atomic::store(thread, &_owner); + Atomic::store(&_owner, thread); } _count++; @@ -66,7 +66,7 @@ _count--; if (_count == 0) { - Atomic::store((Thread*)NULL, &_owner); + Atomic::store(&_owner, (Thread*)NULL); _lock.unlock(); } } --- old/src/hotspot/share/gc/z/zMark.cpp 2019-11-21 11:52:10.057952106 +0100 +++ new/src/hotspot/share/gc/z/zMark.cpp 2019-11-21 11:52:09.633945179 +0100 @@ -487,7 +487,7 @@ // Flush before termination if (!try_flush(&_work_nterminateflush)) { // No more work available, skip further flush attempts - Atomic::store(false, &_work_terminateflush); + Atomic::store(&_work_terminateflush, false); } // Don't terminate, regardless of whether we successfully --- old/src/hotspot/share/gc/z/zNMethod.cpp 2019-11-21 11:52:10.709962759 +0100 +++ new/src/hotspot/share/gc/z/zNMethod.cpp 2019-11-21 11:52:10.233954982 +0100 @@ -258,7 +258,7 @@ volatile bool _failed; void set_failed() { - Atomic::store(true, &_failed); + Atomic::store(&_failed, true); } void unlink(nmethod* nm) { --- old/src/hotspot/share/memory/allocation.inline.hpp 2019-11-21 11:52:11.357973346 +0100 +++ new/src/hotspot/share/memory/allocation.inline.hpp 2019-11-21 11:52:10.881965569 +0100 @@ -41,7 +41,7 @@ *dest += add_value; #else julong value = Atomic::load(dest); - Atomic::store(value + add_value, dest); + Atomic::store(dest, value + add_value); #endif } #endif --- old/src/hotspot/share/oops/accessBackend.inline.hpp 2019-11-21 11:52:11.945982952 +0100 +++ new/src/hotspot/share/oops/accessBackend.inline.hpp 2019-11-21 11:52:11.521976024 +0100 @@ -174,7 +174,7 @@ inline typename EnableIf< HasDecorator::value>::type RawAccessBarrier::store_internal(void* addr, T value) { - Atomic::store(value, reinterpret_cast(addr)); + Atomic::store(reinterpret_cast(addr), value); } template --- old/src/hotspot/share/oops/klass.cpp 2019-11-21 11:52:12.621993996 +0100 +++ new/src/hotspot/share/oops/klass.cpp 2019-11-21 11:52:12.121985828 +0100 @@ -410,7 +410,7 @@ // Does not need release semantics. If used by cleanup, it will link to // already safely published data, and if used by inserts, will be published // safely using cmpxchg. - Atomic::store(s, &_next_sibling); + Atomic::store(&_next_sibling, s); } void Klass::append_to_sibling_list() { --- old/src/hotspot/share/oops/methodData.hpp 2019-11-21 11:52:13.222003798 +0100 +++ new/src/hotspot/share/oops/methodData.hpp 2019-11-21 11:52:12.793996806 +0100 @@ -2244,7 +2244,7 @@ _rtm_state = (int)rstate; } void atomic_set_rtm_state(RTMState rstate) { - Atomic::store((int)rstate, &_rtm_state); + Atomic::store(&_rtm_state, (int)rstate); } static int rtm_state_offset_in_bytes() { --- old/src/hotspot/share/oops/oop.inline.hpp 2019-11-21 11:52:13.862014254 +0100 +++ new/src/hotspot/share/oops/oop.inline.hpp 2019-11-21 11:52:13.430007197 +0100 @@ -61,7 +61,7 @@ } void oopDesc::set_mark_raw(markWord m) { - Atomic::store(m, &_mark); + Atomic::store(&_mark, m); } void oopDesc::set_mark_raw(HeapWord* mem, markWord m) { --- old/src/hotspot/share/prims/jni.cpp 2019-11-21 11:52:14.458023991 +0100 +++ new/src/hotspot/share/prims/jni.cpp 2019-11-21 11:52:14.030016999 +0100 @@ -3689,7 +3689,7 @@ intptr_t *a = (intptr_t *) jni_functions(); intptr_t *b = (intptr_t *) new_jni_NativeInterface; for (uint i=0; i < sizeof(struct JNINativeInterface_)/sizeof(void *); i++) { - Atomic::store(*b++, a++); + Atomic::store(a++, *b++); } } --- old/src/hotspot/share/runtime/atomic.hpp 2019-11-21 11:52:15.138035102 +0100 +++ new/src/hotspot/share/runtime/atomic.hpp 2019-11-21 11:52:14.706028043 +0100 @@ -79,13 +79,13 @@ // The type T must be either a pointer type convertible to or equal // to D, an integral/enum type equal to D, or a type equal to D that // is primitive convertible using PrimitiveConversions. - template - inline static void store(T store_value, volatile D* dest); + template + inline static void store(volatile D* dest, T store_value); - template + template inline static void release_store(volatile D* dest, T store_value); - template + template inline static void release_store_fence(volatile D* dest, T store_value); // Atomically load from a location @@ -168,7 +168,7 @@ // Dispatch handler for store. Provides type-based validity // checking and limited conversions around calls to the platform- // specific implementation layer provided by PlatformOp. - template + template struct StoreImpl; // Platform-specific implementation of store. Support for sizes @@ -450,9 +450,9 @@ PlatformOp, typename EnableIf::value || IsRegisteredEnum::value>::type> { - void operator()(T new_value, T volatile* dest) const { + void operator()(T volatile* dest, T new_value) const { // Forward to the platform handler for the size of T. - PlatformOp()(new_value, dest); + PlatformOp()(dest, new_value); } }; @@ -461,16 +461,16 @@ // The new_value must be implicitly convertible to the // destination's type; it must be type-correct to store the // new_value in the destination. -template +template struct Atomic::StoreImpl< - T*, D*, + D*, T*, PlatformOp, typename EnableIf::value>::type> { - void operator()(T* new_value, D* volatile* dest) const { + void operator()(D* volatile* dest, T* new_value) const { // Allow derived to base conversion, and adding cv-qualifiers. D* value = new_value; - PlatformOp()(value, dest); + PlatformOp()(dest, value); } }; @@ -486,12 +486,12 @@ PlatformOp, typename EnableIf::value>::type> { - void operator()(T new_value, T volatile* dest) const { + void operator()(T volatile* dest, T new_value) const { typedef PrimitiveConversions::Translate Translator; typedef typename Translator::Decayed Decayed; STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); - PlatformOp()(Translator::decay(new_value), - reinterpret_cast(dest)); + PlatformOp()(reinterpret_cast(dest), + Translator::decay(new_value)); } }; @@ -504,8 +504,8 @@ template struct Atomic::PlatformStore { template - void operator()(T new_value, - T volatile* dest) const { + void operator()(T volatile* dest, + T new_value) const { STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization (void)const_cast(*dest = new_value); } @@ -654,28 +654,28 @@ return LoadImpl >()(p); } -template -inline void Atomic::store(T store_value, volatile D* dest) { - StoreImpl >()(store_value, dest); +template +inline void Atomic::store(volatile D* dest, T store_value) { + StoreImpl >()(dest, store_value); } template struct Atomic::PlatformOrderedStore { template - void operator()(T v, volatile T* p) const { + void operator()(volatile T* p, T v) const { ScopedFence f((void*)p); - Atomic::store(v, p); + Atomic::store(p, v); } }; -template +template inline void Atomic::release_store(volatile D* p, T v) { - StoreImpl >()(v, p); + StoreImpl >()(p, v); } -template +template inline void Atomic::release_store_fence(volatile D* p, T v) { - StoreImpl >()(v, p); + StoreImpl >()(p, v); } template --- old/src/hotspot/share/runtime/basicLock.hpp 2019-11-21 11:52:15.826046342 +0100 +++ new/src/hotspot/share/runtime/basicLock.hpp 2019-11-21 11:52:15.322038107 +0100 @@ -40,7 +40,7 @@ } void set_displaced_header(markWord header) { - Atomic::store(header, &_displaced_header); + Atomic::store(&_displaced_header, header); } void print_on(outputStream* st) const; --- old/src/hotspot/share/runtime/objectMonitor.inline.hpp 2019-11-21 11:52:16.506057450 +0100 +++ new/src/hotspot/share/runtime/objectMonitor.inline.hpp 2019-11-21 11:52:15.994049086 +0100 @@ -44,7 +44,7 @@ } inline void ObjectMonitor::set_header(markWord hdr) { - Atomic::store(hdr, &_header); + Atomic::store(&_header, hdr); } inline jint ObjectMonitor::waiters() const { @@ -63,7 +63,7 @@ assert(_object != NULL, "must be non-NULL"); assert(_owner == NULL, "must be NULL: owner=" INTPTR_FORMAT, p2i(_owner)); - Atomic::store(markWord::zero(), &_header); + Atomic::store(&_header, markWord::zero()); _object = NULL; } --- old/src/hotspot/share/services/attachListener.hpp 2019-11-21 11:52:17.146067906 +0100 +++ new/src/hotspot/share/services/attachListener.hpp 2019-11-21 11:52:16.662060000 +0100 @@ -86,7 +86,7 @@ public: static void set_state(AttachListenerState new_state) { - Atomic::store(new_state, &_state); + Atomic::store(&_state, new_state); } static AttachListenerState get_state() { @@ -103,7 +103,7 @@ } static void set_initialized() { - Atomic::store(AL_INITIALIZED, &_state); + Atomic::store(&_state, AL_INITIALIZED); } // indicates if this VM supports attach-on-demand --- old/src/hotspot/share/utilities/lockFreeStack.hpp 2019-11-21 11:52:17.798078558 +0100 +++ new/src/hotspot/share/utilities/lockFreeStack.hpp 2019-11-21 11:52:17.306070520 +0100 @@ -170,7 +170,7 @@ // if value is in an instance of this specialization of LockFreeStack, // there must be no concurrent push or pop operations on that stack. static void set_next(T& value, T* new_next) { - Atomic::store(new_next, next_ptr(value)); + Atomic::store(next_ptr(value), new_next); } }; --- old/src/hotspot/share/utilities/vmError.cpp 2019-11-21 11:52:18.430088884 +0100 +++ new/src/hotspot/share/utilities/vmError.cpp 2019-11-21 11:52:17.954081108 +0100 @@ -399,7 +399,7 @@ void VMError::record_reporting_start_time() { const jlong now = get_current_timestamp(); - Atomic::store(now, &_reporting_start_time); + Atomic::store(&_reporting_start_time, now); } jlong VMError::get_reporting_start_time() { @@ -408,7 +408,7 @@ void VMError::record_step_start_time() { const jlong now = get_current_timestamp(); - Atomic::store(now, &_step_start_time); + Atomic::store(&_step_start_time, now); } jlong VMError::get_step_start_time() { @@ -416,7 +416,7 @@ } void VMError::clear_step_start_time() { - return Atomic::store((jlong)0, &_step_start_time); + return Atomic::store(&_step_start_time, (jlong)0); } void VMError::report(outputStream* st, bool _verbose) {