--- old/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp 2019-11-21 11:16:59.463397958 +0100 +++ new/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp 2019-11-21 11:16:58.971390060 +0100 @@ -32,10 +32,6 @@ // Note that memory_order_conservative requires a full barrier after atomic stores. // See https://patchwork.kernel.org/patch/3575821/ -#define FULL_MEM_BARRIER __sync_synchronize() -#define READ_MEM_BARRIER __atomic_thread_fence(__ATOMIC_ACQUIRE); -#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE); - template struct Atomic::PlatformAdd : Atomic::AddAndFetch > @@ -81,4 +77,25 @@ } } +template +struct Atomic::PlatformOrderedLoad +{ + template + T operator()(const volatile T* p) const { T data; __atomic_load(const_cast(p), &data, __ATOMIC_ACQUIRE); return data; } +}; + +template +struct Atomic::PlatformOrderedStore +{ + template + void operator()(T v, volatile T* p) const { __atomic_store(const_cast(p), &v, __ATOMIC_RELEASE); } +}; + +template +struct Atomic::PlatformOrderedStore +{ + template + void operator()(T v, volatile T* p) const { release_store(p, v); OrderAccess::fence(); } +}; + #endif // OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP