37 * However, gcc emits LDRD/STRD instructions on v5te and LDM/STM on v5t 38 * when loading/storing 64 bits. 39 * For non-MP machines (which is all we support for ARM < v7) 40 * under current Linux distros these instructions appear atomic. 41 * See section A3.5.3 of ARM Architecture Reference Manual for ARM v7. 42 * Also, for cmpxchg64, if ARM < v7 we check for cmpxchg64 support in the 43 * Linux kernel using _kuser_helper_version. See entry-armv.S in the Linux 44 * kernel source or kernel_user_helpers.txt in Linux Doc. 45 */ 46 47 template<> 48 template<typename T> 49 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { 50 STATIC_ASSERT(8 == sizeof(T)); 51 return PrimitiveConversions::cast<T>( 52 (*os::atomic_load_long_func)(reinterpret_cast<const volatile int64_t*>(src))); 53 } 54 55 template<> 56 template<typename T> 57 inline void Atomic::PlatformStore<8>::operator()(T store_value, 58 T volatile* dest) const { 59 STATIC_ASSERT(8 == sizeof(T)); 60 (*os::atomic_store_long_func)( 61 PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest)); 62 } 63 64 // As per atomic.hpp all read-modify-write operations have to provide two-way 65 // barriers semantics. 66 // 67 // For ARMv7 we add explicit barriers in the stubs. 68 69 template<size_t byte_size> 70 struct Atomic::PlatformAdd 71 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > 72 { 73 template<typename I, typename D> 74 D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; 75 }; 76 77 template<> 78 template<typename I, typename D> 79 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, 80 atomic_memory_order order) const { 81 STATIC_ASSERT(4 == sizeof(I)); 82 STATIC_ASSERT(4 == sizeof(D)); 83 return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest); 84 } 85 86 87 template<> 88 template<typename T> 89 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, 90 T volatile* dest, 91 atomic_memory_order order) const { 92 STATIC_ASSERT(4 == sizeof(T)); 93 return xchg_using_helper<int32_t>(os::atomic_xchg_func, exchange_value, dest); 94 } 95 96 97 // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering 98 99 // No direct support for cmpxchg of bytes; emulate using int. 100 template<> 101 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {}; 102 103 104 inline int32_t reorder_cmpxchg_func(int32_t exchange_value, 105 int32_t volatile* dest, 106 int32_t compare_value) { 107 // Warning: Arguments are swapped to avoid moving them for kernel call 108 return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest); 109 } 110 111 inline int64_t reorder_cmpxchg_long_func(int64_t exchange_value, 112 int64_t volatile* dest, 113 int64_t compare_value) { 114 assert(VM_Version::supports_cx8(), "Atomic compare and exchange int64_t not supported on this architecture!"); 115 // Warning: Arguments are swapped to avoid moving them for kernel call 116 return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest); 117 } 118 119 120 template<> 121 template<typename T> 122 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, 123 T volatile* dest, 124 T compare_value, 125 atomic_memory_order order) const { 126 STATIC_ASSERT(4 == sizeof(T)); 127 return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, exchange_value, dest, compare_value); 128 } 129 130 template<> 131 template<typename T> 132 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, 133 T volatile* dest, 134 T compare_value, 135 atomic_memory_order order) const { 136 STATIC_ASSERT(8 == sizeof(T)); 137 return cmpxchg_using_helper<int64_t>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value); 138 } 139 140 #endif // OS_CPU_LINUX_ARM_ATOMIC_LINUX_ARM_HPP | 37 * However, gcc emits LDRD/STRD instructions on v5te and LDM/STM on v5t 38 * when loading/storing 64 bits. 39 * For non-MP machines (which is all we support for ARM < v7) 40 * under current Linux distros these instructions appear atomic. 41 * See section A3.5.3 of ARM Architecture Reference Manual for ARM v7. 42 * Also, for cmpxchg64, if ARM < v7 we check for cmpxchg64 support in the 43 * Linux kernel using _kuser_helper_version. See entry-armv.S in the Linux 44 * kernel source or kernel_user_helpers.txt in Linux Doc. 45 */ 46 47 template<> 48 template<typename T> 49 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { 50 STATIC_ASSERT(8 == sizeof(T)); 51 return PrimitiveConversions::cast<T>( 52 (*os::atomic_load_long_func)(reinterpret_cast<const volatile int64_t*>(src))); 53 } 54 55 template<> 56 template<typename T> 57 inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, 58 T store_value) const { 59 STATIC_ASSERT(8 == sizeof(T)); 60 (*os::atomic_store_long_func)( 61 PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest)); 62 } 63 64 // As per atomic.hpp all read-modify-write operations have to provide two-way 65 // barriers semantics. 66 // 67 // For ARMv7 we add explicit barriers in the stubs. 68 69 template<size_t byte_size> 70 struct Atomic::PlatformAdd 71 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > 72 { 73 template<typename D, typename I> 74 D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; 75 }; 76 77 template<> 78 template<typename D, typename I> 79 inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, 80 atomic_memory_order order) const { 81 STATIC_ASSERT(4 == sizeof(I)); 82 STATIC_ASSERT(4 == sizeof(D)); 83 return add_using_helper<int32_t>(os::atomic_add_func, dest, add_value); 84 } 85 86 87 template<> 88 template<typename T> 89 inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, 90 T exchange_value, 91 atomic_memory_order order) const { 92 STATIC_ASSERT(4 == sizeof(T)); 93 return xchg_using_helper<int32_t>(os::atomic_xchg_func, dest, exchange_value); 94 } 95 96 97 // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering 98 99 // No direct support for cmpxchg of bytes; emulate using int. 100 template<> 101 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {}; 102 103 104 inline int32_t reorder_cmpxchg_func(int32_t exchange_value, 105 int32_t volatile* dest, 106 int32_t compare_value) { 107 // Warning: Arguments are swapped to avoid moving them for kernel call 108 return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest); 109 } 110 111 inline int64_t reorder_cmpxchg_long_func(int64_t exchange_value, 112 int64_t volatile* dest, 113 int64_t compare_value) { 114 assert(VM_Version::supports_cx8(), "Atomic compare and exchange int64_t not supported on this architecture!"); 115 // Warning: Arguments are swapped to avoid moving them for kernel call 116 return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest); 117 } 118 119 120 template<> 121 template<typename T> 122 inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, 123 T compare_value, 124 T exchange_value, 125 atomic_memory_order order) const { 126 STATIC_ASSERT(4 == sizeof(T)); 127 return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, dest, compare_value, exchange_value); 128 } 129 130 template<> 131 template<typename T> 132 inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, 133 T compare_value, 134 T exchange_value, 135 atomic_memory_order order) const { 136 STATIC_ASSERT(8 == sizeof(T)); 137 return cmpxchg_using_helper<int64_t>(reorder_cmpxchg_long_func, dest, compare_value, exchange_value); 138 } 139 140 #endif // OS_CPU_LINUX_ARM_ATOMIC_LINUX_ARM_HPP |