< prev index next >
src/os_cpu/linux_ppc/vm/atomic_linux_ppc.hpp
Print this page
rev 13445 : imported patch linux_ppc
rev 13452 : [mq]: coleen_review1
rev 13453 : [mq]: dholmes_review1
*** 304,315 ****
strasm_sync
);
}
}
! #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
! inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
// Note that cmpxchg guarantees a two-way memory barrier across
// the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
// specified otherwise (see atomic.hpp).
--- 304,320 ----
strasm_sync
);
}
}
! template<>
! template<typename T>
! inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
! T volatile* dest,
! T compare_value,
! cmpxchg_memory_order order) const {
! STATIC_ASSERT(1 == sizeof(T));
// Note that cmpxchg guarantees a two-way memory barrier across
// the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
// specified otherwise (see atomic.hpp).
*** 366,385 ****
"memory"
);
cmpxchg_post_membar(order);
! return (jbyte)(unsigned char)old_value;
}
! inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
// Note that cmpxchg guarantees a two-way memory barrier across
// the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
// specified otherwise (see atomic.hpp).
! unsigned int old_value;
const uint64_t zero = 0;
cmpxchg_pre_membar(order);
__asm__ __volatile__ (
--- 371,396 ----
"memory"
);
cmpxchg_post_membar(order);
! return IntegerTypes::cast<T>((unsigned char)old_value);
}
! template<>
! template<typename T>
! inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
! T volatile* dest,
! T compare_value,
! cmpxchg_memory_order order) const {
! STATIC_ASSERT(4 == sizeof(T));
// Note that cmpxchg guarantees a two-way memory barrier across
// the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
// specified otherwise (see atomic.hpp).
! T old_value;
const uint64_t zero = 0;
cmpxchg_pre_membar(order);
__asm__ __volatile__ (
*** 410,429 ****
"memory"
);
cmpxchg_post_membar(order);
! return (jint) old_value;
}
! inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
// Note that cmpxchg guarantees a two-way memory barrier across
// the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
// specified otherwise (see atomic.hpp).
! long old_value;
const uint64_t zero = 0;
cmpxchg_pre_membar(order);
__asm__ __volatile__ (
--- 421,446 ----
"memory"
);
cmpxchg_post_membar(order);
! return old_value;
}
! template<>
! template<typename T>
! inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
! T volatile* dest,
! T compare_value,
! cmpxchg_memory_order order) const {
! STATIC_ASSERT(8 == sizeof(T));
// Note that cmpxchg guarantees a two-way memory barrier across
// the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
// specified otherwise (see atomic.hpp).
! T old_value;
const uint64_t zero = 0;
cmpxchg_pre_membar(order);
__asm__ __volatile__ (
*** 454,472 ****
"memory"
);
cmpxchg_post_membar(order);
! return (jlong) old_value;
! }
!
! inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
! return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
! }
!
! inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
! return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
}
#undef strasm_sync
#undef strasm_lwsync
#undef strasm_isync
--- 471,481 ----
"memory"
);
cmpxchg_post_membar(order);
! return old_value;
}
#undef strasm_sync
#undef strasm_lwsync
#undef strasm_isync
< prev index next >