< prev index next >

src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp

Print this page
rev 49898 : 8202080: Introduce ordering semantics for Atomic::add and other RMW atomics
Reviewed-by:

*** 29,39 **** // Implement ADD using a CAS loop. template<size_t byte_size> struct Atomic::PlatformAdd { template<typename I, typename D> ! inline D operator()(I add_value, D volatile* dest) const { D old_value = *dest; while (true) { D new_value = old_value + add_value; D result = cmpxchg(new_value, dest, old_value); if (result == old_value) break; --- 29,39 ---- // Implement ADD using a CAS loop. template<size_t byte_size> struct Atomic::PlatformAdd { template<typename I, typename D> ! inline D operator()(I add_value, D volatile* dest, atomic_memory_order order) const { D old_value = *dest; while (true) { D new_value = old_value + add_value; D result = cmpxchg(new_value, dest, old_value); if (result == old_value) break;
*** 44,54 **** }; template<> template<typename T> inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, ! T volatile* dest) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ( "swap [%2],%0" : "=r" (exchange_value) : "0" (exchange_value), "r" (dest) : "memory"); --- 44,55 ---- }; template<> template<typename T> inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, ! T volatile* dest, ! atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ( "swap [%2],%0" : "=r" (exchange_value) : "0" (exchange_value), "r" (dest) : "memory");
*** 56,66 **** } template<> template<typename T> inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, ! T volatile* dest) const { STATIC_ASSERT(8 == sizeof(T)); T old_value = *dest; while (true) { T result = cmpxchg(exchange_value, dest, old_value); if (result == old_value) break; --- 57,68 ---- } template<> template<typename T> inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, ! T volatile* dest, ! atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); T old_value = *dest; while (true) { T result = cmpxchg(exchange_value, dest, old_value); if (result == old_value) break;
*** 76,86 **** template<> template<typename T> inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, T volatile* dest, T compare_value, ! cmpxchg_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); T rv; __asm__ volatile( " cas [%2], %3, %0" : "=r" (rv) --- 78,88 ---- template<> template<typename T> inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, T volatile* dest, T compare_value, ! atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); T rv; __asm__ volatile( " cas [%2], %3, %0" : "=r" (rv)
*** 92,102 **** template<> template<typename T> inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, T volatile* dest, T compare_value, ! cmpxchg_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); T rv; __asm__ volatile( " casx [%2], %3, %0" : "=r" (rv) --- 94,104 ---- template<> template<typename T> inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, T volatile* dest, T compare_value, ! atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); T rv; __asm__ volatile( " casx [%2], %3, %0" : "=r" (rv)
< prev index next >