< prev index next >

src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp

Print this page

        

*** 230,242 **** return old_value; } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, ! T volatile* dest, T compare_value, atomic_memory_order order) const { STATIC_ASSERT(1 == sizeof(T)); // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not --- 230,242 ---- return old_value; } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(1 == sizeof(T)); // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
*** 300,312 **** return PrimitiveConversions::cast<T>((unsigned char)old_value); } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, ! T volatile* dest, T compare_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not --- 300,312 ---- return PrimitiveConversions::cast<T>((unsigned char)old_value); } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
*** 350,362 **** return old_value; } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, ! T volatile* dest, T compare_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not --- 350,362 ---- return old_value; } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
< prev index next >