53 } 54 55 template<> 56 template<typename T> 57 inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, 58 T store_value) const { 59 STATIC_ASSERT(8 == sizeof(T)); 60 (*os::atomic_store_long_func)( 61 PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest)); 62 } 63 64 // As per atomic.hpp all read-modify-write operations have to provide two-way 65 // barriers semantics. 66 // 67 // For ARMv7 we add explicit barriers in the stubs. 68 69 template<size_t byte_size> 70 struct Atomic::PlatformAdd 71 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > 72 { 73 template<typename I, typename D> 74 D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; 75 }; 76 77 template<> 78 template<typename I, typename D> 79 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, 80 atomic_memory_order order) const { 81 STATIC_ASSERT(4 == sizeof(I)); 82 STATIC_ASSERT(4 == sizeof(D)); 83 return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest); 84 } 85 86 87 template<> 88 template<typename T> 89 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, 90 T volatile* dest, 91 atomic_memory_order order) const { 92 STATIC_ASSERT(4 == sizeof(T)); 93 return xchg_using_helper<int32_t>(os::atomic_xchg_func, exchange_value, dest); 94 } 95 96 97 // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering 98 99 // No direct support for cmpxchg of bytes; emulate using int. 100 template<> 101 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {}; 102 103 | 53 } 54 55 template<> 56 template<typename T> 57 inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, 58 T store_value) const { 59 STATIC_ASSERT(8 == sizeof(T)); 60 (*os::atomic_store_long_func)( 61 PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest)); 62 } 63 64 // As per atomic.hpp all read-modify-write operations have to provide two-way 65 // barriers semantics. 66 // 67 // For ARMv7 we add explicit barriers in the stubs. 68 69 template<size_t byte_size> 70 struct Atomic::PlatformAdd 71 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > 72 { 73 template<typename D, typename I> 74 D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; 75 }; 76 77 template<> 78 template<typename D, typename I> 79 inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, 80 atomic_memory_order order) const { 81 STATIC_ASSERT(4 == sizeof(I)); 82 STATIC_ASSERT(4 == sizeof(D)); 83 return add_using_helper<int32_t>(os::atomic_add_func, dest, add_value); 84 } 85 86 87 template<> 88 template<typename T> 89 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, 90 T volatile* dest, 91 atomic_memory_order order) const { 92 STATIC_ASSERT(4 == sizeof(T)); 93 return xchg_using_helper<int32_t>(os::atomic_xchg_func, exchange_value, dest); 94 } 95 96 97 // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering 98 99 // No direct support for cmpxchg of bytes; emulate using int. 100 template<> 101 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {}; 102 103 |