< prev index next >

src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp

Print this page

        

*** 52,63 **** (*os::atomic_load_long_func)(reinterpret_cast<const volatile int64_t*>(src))); } template<> template<typename T> ! inline void Atomic::PlatformStore<8>::operator()(T store_value, ! T volatile* dest) const { STATIC_ASSERT(8 == sizeof(T)); (*os::atomic_store_long_func)( PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest)); } --- 52,63 ---- (*os::atomic_load_long_func)(reinterpret_cast<const volatile int64_t*>(src))); } template<> template<typename T> ! inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, ! T store_value) const { STATIC_ASSERT(8 == sizeof(T)); (*os::atomic_store_long_func)( PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest)); }
*** 68,98 **** template<size_t byte_size> struct Atomic::PlatformAdd : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > { ! template<typename I, typename D> ! D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; }; template<> ! template<typename I, typename D> ! inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); ! return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest); } template<> template<typename T> ! inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, ! T volatile* dest, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); ! return xchg_using_helper<int32_t>(os::atomic_xchg_func, exchange_value, dest); } // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering --- 68,98 ---- template<size_t byte_size> struct Atomic::PlatformAdd : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > { ! template<typename D, typename I> ! D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; }; template<> ! template<typename D, typename I> ! inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); ! return add_using_helper<int32_t>(os::atomic_add_func, dest, add_value); } template<> template<typename T> ! inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, ! T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); ! return xchg_using_helper<int32_t>(os::atomic_xchg_func, dest, exchange_value); } // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering
*** 117,140 **** } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, ! T volatile* dest, T compare_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); ! return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, exchange_value, dest, compare_value); } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, ! T volatile* dest, T compare_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); ! return cmpxchg_using_helper<int64_t>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value); } #endif // OS_CPU_LINUX_ARM_ATOMIC_LINUX_ARM_HPP --- 117,140 ---- } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); ! return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, dest, compare_value, exchange_value); } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); ! return cmpxchg_using_helper<int64_t>(reorder_cmpxchg_long_func, dest, compare_value, exchange_value); } #endif // OS_CPU_LINUX_ARM_ATOMIC_LINUX_ARM_HPP
< prev index next >