< prev index next >

src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp

Print this page
rev 10933 : 8154736: enhancement of cmpxchg and copy_to_survivor for ppc64
Reviewed-by:
Contributed-by: HORII@jp.ibm.com, mdoerr

*** 289,300 **** inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); } #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE ! inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) { // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_acquire' // (see atomic.hpp). --- 289,328 ---- inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); } + inline void cmpxchg_pre_membar(memory_order order) { + if (order == memory_order_seq_cst) { + __asm__ __volatile__ ( + /* fence */ + strasm_sync + ); + } else if (order == memory_order_release || order == memory_order_acq_rel) { + __asm__ __volatile__ ( + /* release */ + strasm_lwsync + ); + } + } + + inline void cmpxchg_post_membar(memory_order order) { + if (order == memory_order_seq_cst) { + __asm__ __volatile__ ( + /* fence */ + strasm_sync + ); + } else if (order == memory_order_acquire || order == memory_order_acq_rel) { + __asm__ __volatile__ ( + /* acquire */ + strasm_isync + ); + } + } + #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE ! inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, memory_order order) { // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_acquire' // (see atomic.hpp).
*** 310,322 **** masked_exchange_val = ((unsigned int)(unsigned char)exchange_value), xor_value = (masked_compare_val ^ masked_exchange_val) << shift_amount; unsigned int old_value, value32; __asm__ __volatile__ ( - /* fence */ - strasm_sync /* simple guard */ " lbz %[old_value], 0(%[dest]) \n" " cmpw %[masked_compare_val], %[old_value] \n" " bne- 2f \n" /* atomic loop */ --- 338,350 ---- masked_exchange_val = ((unsigned int)(unsigned char)exchange_value), xor_value = (masked_compare_val ^ masked_exchange_val) << shift_amount; unsigned int old_value, value32; + cmpxchg_pre_membar(order); + __asm__ __volatile__ ( /* simple guard */ " lbz %[old_value], 0(%[dest]) \n" " cmpw %[masked_compare_val], %[old_value] \n" " bne- 2f \n" /* atomic loop */
*** 329,340 **** " bne- 2f \n" /* replace byte and try to store */ " xor %[value32], %[xor_value], %[value32] \n" " stwcx. %[value32], 0, %[dest_base] \n" " bne- 1b \n" - /* acquire */ - strasm_sync /* exit */ "2: \n" /* out */ : [old_value] "=&r" (old_value), [value32] "=&r" (value32), --- 357,366 ----
*** 351,375 **** /* clobber */ : "cc", "memory" ); return (jbyte)(unsigned char)old_value; } ! inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) { // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_acquire' // (see atomic.hpp). unsigned int old_value; const uint64_t zero = 0; __asm__ __volatile__ ( - /* fence */ - strasm_sync /* simple guard */ " lwz %[old_value], 0(%[dest]) \n" " cmpw %[compare_value], %[old_value] \n" " bne- 2f \n" /* atomic loop */ --- 377,403 ---- /* clobber */ : "cc", "memory" ); + cmpxchg_post_membar(order); + return (jbyte)(unsigned char)old_value; } ! inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, memory_order order) { // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_acquire' // (see atomic.hpp). unsigned int old_value; const uint64_t zero = 0; + cmpxchg_pre_membar(order); + __asm__ __volatile__ ( /* simple guard */ " lwz %[old_value], 0(%[dest]) \n" " cmpw %[compare_value], %[old_value] \n" " bne- 2f \n" /* atomic loop */
*** 377,388 **** " lwarx %[old_value], %[dest], %[zero] \n" " cmpw %[compare_value], %[old_value] \n" " bne- 2f \n" " stwcx. %[exchange_value], %[dest], %[zero] \n" " bne- 1b \n" - /* acquire */ - strasm_sync /* exit */ "2: \n" /* out */ : [old_value] "=&r" (old_value), "=m" (*dest) --- 405,414 ----
*** 395,419 **** /* clobber */ : "cc", "memory" ); return (jint) old_value; } ! inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value) { // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_acquire' // (see atomic.hpp). long old_value; const uint64_t zero = 0; __asm__ __volatile__ ( - /* fence */ - strasm_sync /* simple guard */ " ld %[old_value], 0(%[dest]) \n" " cmpd %[compare_value], %[old_value] \n" " bne- 2f \n" /* atomic loop */ --- 421,447 ---- /* clobber */ : "cc", "memory" ); + cmpxchg_post_membar(order); + return (jint) old_value; } ! inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value, memory_order order) { // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_acquire' // (see atomic.hpp). long old_value; const uint64_t zero = 0; + cmpxchg_pre_membar(order); + __asm__ __volatile__ ( /* simple guard */ " ld %[old_value], 0(%[dest]) \n" " cmpd %[compare_value], %[old_value] \n" " bne- 2f \n" /* atomic loop */
*** 421,432 **** " ldarx %[old_value], %[dest], %[zero] \n" " cmpd %[compare_value], %[old_value] \n" " bne- 2f \n" " stdcx. %[exchange_value], %[dest], %[zero] \n" " bne- 1b \n" - /* acquire */ - strasm_sync /* exit */ "2: \n" /* out */ : [old_value] "=&r" (old_value), "=m" (*dest) --- 449,458 ----
*** 439,457 **** /* clobber */ : "cc", "memory" ); return (jlong) old_value; } ! inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) { ! return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value); } ! inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) { ! return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value); } #undef strasm_sync #undef strasm_lwsync #undef strasm_isync --- 465,485 ---- /* clobber */ : "cc", "memory" ); + cmpxchg_post_membar(order); + return (jlong) old_value; } ! inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, memory_order order) { ! return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order); } ! inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, memory_order order) { ! return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order); } #undef strasm_sync #undef strasm_lwsync #undef strasm_isync
< prev index next >