< prev index next >
src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp
Print this page
rev 10933 : 8154736: enhancement of cmpxchg and copy_to_survivor for ppc64
Reviewed-by:
Contributed-by: HORII@jp.ibm.com, mdoerr
@@ -289,12 +289,40 @@
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
}
+inline void cmpxchg_pre_membar(memory_order order) {
+ if (order == memory_order_seq_cst) {
+ __asm__ __volatile__ (
+ /* fence */
+ strasm_sync
+ );
+ } else if (order == memory_order_release || order == memory_order_acq_rel) {
+ __asm__ __volatile__ (
+ /* release */
+ strasm_lwsync
+ );
+ }
+}
+
+inline void cmpxchg_post_membar(memory_order order) {
+ if (order == memory_order_seq_cst) {
+ __asm__ __volatile__ (
+ /* fence */
+ strasm_sync
+ );
+ } else if (order == memory_order_acquire || order == memory_order_acq_rel) {
+ __asm__ __volatile__ (
+ /* acquire */
+ strasm_isync
+ );
+ }
+}
+
#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
+inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, memory_order order) {
// Note that cmpxchg guarantees a two-way memory barrier across
// the cmpxchg, so it's really a a 'fence_cmpxchg_acquire'
// (see atomic.hpp).
@@ -310,13 +338,13 @@
masked_exchange_val = ((unsigned int)(unsigned char)exchange_value),
xor_value = (masked_compare_val ^ masked_exchange_val) << shift_amount;
unsigned int old_value, value32;
+ cmpxchg_pre_membar(order);
+
__asm__ __volatile__ (
- /* fence */
- strasm_sync
/* simple guard */
" lbz %[old_value], 0(%[dest]) \n"
" cmpw %[masked_compare_val], %[old_value] \n"
" bne- 2f \n"
/* atomic loop */
@@ -329,12 +357,10 @@
" bne- 2f \n"
/* replace byte and try to store */
" xor %[value32], %[xor_value], %[value32] \n"
" stwcx. %[value32], 0, %[dest_base] \n"
" bne- 1b \n"
- /* acquire */
- strasm_sync
/* exit */
"2: \n"
/* out */
: [old_value] "=&r" (old_value),
[value32] "=&r" (value32),
@@ -351,25 +377,27 @@
/* clobber */
: "cc",
"memory"
);
+ cmpxchg_post_membar(order);
+
return (jbyte)(unsigned char)old_value;
}
-inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) {
+inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, memory_order order) {
// Note that cmpxchg guarantees a two-way memory barrier across
// the cmpxchg, so it's really a a 'fence_cmpxchg_acquire'
// (see atomic.hpp).
unsigned int old_value;
const uint64_t zero = 0;
+ cmpxchg_pre_membar(order);
+
__asm__ __volatile__ (
- /* fence */
- strasm_sync
/* simple guard */
" lwz %[old_value], 0(%[dest]) \n"
" cmpw %[compare_value], %[old_value] \n"
" bne- 2f \n"
/* atomic loop */
@@ -377,12 +405,10 @@
" lwarx %[old_value], %[dest], %[zero] \n"
" cmpw %[compare_value], %[old_value] \n"
" bne- 2f \n"
" stwcx. %[exchange_value], %[dest], %[zero] \n"
" bne- 1b \n"
- /* acquire */
- strasm_sync
/* exit */
"2: \n"
/* out */
: [old_value] "=&r" (old_value),
"=m" (*dest)
@@ -395,25 +421,27 @@
/* clobber */
: "cc",
"memory"
);
+ cmpxchg_post_membar(order);
+
return (jint) old_value;
}
-inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
+inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value, memory_order order) {
// Note that cmpxchg guarantees a two-way memory barrier across
// the cmpxchg, so it's really a a 'fence_cmpxchg_acquire'
// (see atomic.hpp).
long old_value;
const uint64_t zero = 0;
+ cmpxchg_pre_membar(order);
+
__asm__ __volatile__ (
- /* fence */
- strasm_sync
/* simple guard */
" ld %[old_value], 0(%[dest]) \n"
" cmpd %[compare_value], %[old_value] \n"
" bne- 2f \n"
/* atomic loop */
@@ -421,12 +449,10 @@
" ldarx %[old_value], %[dest], %[zero] \n"
" cmpd %[compare_value], %[old_value] \n"
" bne- 2f \n"
" stdcx. %[exchange_value], %[dest], %[zero] \n"
" bne- 1b \n"
- /* acquire */
- strasm_sync
/* exit */
"2: \n"
/* out */
: [old_value] "=&r" (old_value),
"=m" (*dest)
@@ -439,19 +465,21 @@
/* clobber */
: "cc",
"memory"
);
+ cmpxchg_post_membar(order);
+
return (jlong) old_value;
}
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) {
- return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, memory_order order) {
+ return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
}
-inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) {
- return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
+inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, memory_order order) {
+ return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
}
#undef strasm_sync
#undef strasm_lwsync
#undef strasm_isync
< prev index next >