< prev index next >

src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp

Print this page
rev 13452 : [mq]: coleen_review1
rev 13453 : [mq]: dholmes_review1

*** 310,319 **** --- 310,321 ---- template<typename T> inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, T volatile* dest, T compare_value, cmpxchg_memory_order order) const { + STATIC_CAST(1 == sizeof(T)); + // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). // Using 32 bit internally.
*** 378,392 **** template<typename T> inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, T volatile* dest, T compare_value, cmpxchg_memory_order order) const { // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). ! unsigned int old_value; const uint64_t zero = 0; cmpxchg_pre_membar(order); __asm__ __volatile__ ( --- 380,396 ---- template<typename T> inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, T volatile* dest, T compare_value, cmpxchg_memory_order order) const { + STATIC_CAST(4 == sizeof(T)); + // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). ! T old_value; const uint64_t zero = 0; cmpxchg_pre_membar(order); __asm__ __volatile__ (
*** 417,440 **** "memory" ); cmpxchg_post_membar(order); ! return IntegerTypes::cast<T>(old_value); } template<> template<typename T> inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, T volatile* dest, T compare_value, cmpxchg_memory_order order) const { // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). ! long old_value; const uint64_t zero = 0; cmpxchg_pre_membar(order); __asm__ __volatile__ ( --- 421,446 ---- "memory" ); cmpxchg_post_membar(order); ! return old_value; } template<> template<typename T> inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, T volatile* dest, T compare_value, cmpxchg_memory_order order) const { + STATIC_CAST(8 == sizeof(T)); + // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). ! T old_value; const uint64_t zero = 0; cmpxchg_pre_membar(order); __asm__ __volatile__ (
*** 465,475 **** "memory" ); cmpxchg_post_membar(order); ! return IntegerTypes::cast<T>(old_value); } #undef strasm_sync #undef strasm_lwsync #undef strasm_isync --- 471,481 ---- "memory" ); cmpxchg_post_membar(order); ! return old_value; } #undef strasm_sync #undef strasm_lwsync #undef strasm_isync
< prev index next >