< prev index next >

src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp

Print this page

        

*** 34,67 **** template<size_t byte_size> struct Atomic::PlatformAdd : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > { ! template<typename I, typename D> ! D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const { D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE); FULL_MEM_BARRIER; return res; } }; template<size_t byte_size> template<typename T> ! inline T Atomic::PlatformXchg<byte_size>::operator()(T exchange_value, ! T volatile* dest, atomic_memory_order order) const { STATIC_ASSERT(byte_size == sizeof(T)); T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE); FULL_MEM_BARRIER; return res; } template<size_t byte_size> template<typename T> ! inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value, ! T volatile* dest, T compare_value, atomic_memory_order order) const { STATIC_ASSERT(byte_size == sizeof(T)); if (order == memory_order_relaxed) { T value = compare_value; __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false, --- 34,67 ---- template<size_t byte_size> struct Atomic::PlatformAdd : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > { ! template<typename D, typename I> ! D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE); FULL_MEM_BARRIER; return res; } }; template<size_t byte_size> template<typename T> ! inline T Atomic::PlatformXchg<byte_size>::operator()(T volatile* dest, ! T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(byte_size == sizeof(T)); T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE); FULL_MEM_BARRIER; return res; } template<size_t byte_size> template<typename T> ! inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(byte_size == sizeof(T)); if (order == memory_order_relaxed) { T value = compare_value; __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
*** 86,101 **** template<size_t byte_size> struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X> { template <typename T> ! void operator()(T v, volatile T* p) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); } }; template<size_t byte_size> struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE> { template <typename T> ! void operator()(T v, volatile T* p) const { release_store(p, v); OrderAccess::fence(); } }; #endif // OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP --- 86,101 ---- template<size_t byte_size> struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X> { template <typename T> ! void operator()(volatile T* p, T v) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); } }; template<size_t byte_size> struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE> { template <typename T> ! void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); } }; #endif // OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
< prev index next >