< prev index next >

src/os_cpu/linux_ppc/vm/atomic_linux_ppc.hpp

Print this page
rev 13323 : imported patch Atomic_refactoring
rev 13327 : [mq]: SpecializableAtomic

*** 30,55 **** #error "Atomic currently only implemented for PPC64" #endif // Implementation of class atomic - inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } - inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } - inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } - inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } - inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } - inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } - - inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } - inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } - inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } - inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } - inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } - inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } - - inline jlong Atomic::load(const volatile jlong* src) { return *src; } - // // machine barrier instructions: // // - sync two-way memory barrier, aka fence // - lwsync orders Store|Store, --- 30,39 ----
*** 91,102 **** #define strasm_acquire strasm_lwsync #define strasm_fence strasm_sync #define strasm_nobarrier "" #define strasm_nobarrier_clobber_memory "" ! inline jint Atomic::add (jint add_value, volatile jint* dest) { ! unsigned int result; __asm__ __volatile__ ( strasm_lwsync "1: lwarx %0, 0, %2 \n" --- 75,86 ---- #define strasm_acquire strasm_lwsync #define strasm_fence strasm_sync #define strasm_nobarrier "" #define strasm_nobarrier_clobber_memory "" ! template <> ! inline int32_t GeneralizedAtomic::specialized_add<int32_t>(int32_t add_value, volatile int32_t* dest) { unsigned int result; __asm__ __volatile__ ( strasm_lwsync "1: lwarx %0, 0, %2 \n"
*** 106,121 **** strasm_isync : /*%0*/"=&r" (result) : /*%1*/"r" (add_value), /*%2*/"r" (dest) : "cc", "memory" ); ! return (jint) result; } ! inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { ! long result; __asm__ __volatile__ ( strasm_lwsync "1: ldarx %0, 0, %2 \n" --- 90,105 ---- strasm_isync : /*%0*/"=&r" (result) : /*%1*/"r" (add_value), /*%2*/"r" (dest) : "cc", "memory" ); ! return (int32_t) result; } ! template <> ! inline int64_t GeneralizedAtomic::specialized_add<int64_t>(int64_t add_value, volatile int64_t* dest) { long result; __asm__ __volatile__ ( strasm_lwsync "1: ldarx %0, 0, %2 \n"
*** 125,144 **** strasm_isync : /*%0*/"=&r" (result) : /*%1*/"r" (add_value), /*%2*/"r" (dest) : "cc", "memory" ); ! return (intptr_t) result; ! } ! ! inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { ! return (void*)add_ptr(add_value, (volatile intptr_t*)dest); } ! inline void Atomic::inc (volatile jint* dest) { ! unsigned int temp; __asm__ __volatile__ ( strasm_nobarrier "1: lwarx %0, 0, %2 \n" --- 109,124 ---- strasm_isync : /*%0*/"=&r" (result) : /*%1*/"r" (add_value), /*%2*/"r" (dest) : "cc", "memory" ); ! return (int64_t) result; } ! template <> ! inline void GeneralizedAtomic::specialized_inc<int32_t>(volatile int32_t* dest) { unsigned int temp; __asm__ __volatile__ ( strasm_nobarrier "1: lwarx %0, 0, %2 \n"
*** 150,161 **** : /*%2*/"r" (dest), "m" (*dest) : "cc" strasm_nobarrier_clobber_memory); } ! inline void Atomic::inc_ptr(volatile intptr_t* dest) { ! long temp; __asm__ __volatile__ ( strasm_nobarrier "1: ldarx %0, 0, %2 \n" --- 130,141 ---- : /*%2*/"r" (dest), "m" (*dest) : "cc" strasm_nobarrier_clobber_memory); } ! template <> ! inline void GeneralizedAtomic::specialized_inc<int64_t>(volatile int64_t* dest) { long temp; __asm__ __volatile__ ( strasm_nobarrier "1: ldarx %0, 0, %2 \n"
*** 167,183 **** : /*%2*/"r" (dest), "m" (*dest) : "cc" strasm_nobarrier_clobber_memory); } - inline void Atomic::inc_ptr(volatile void* dest) { - inc_ptr((volatile intptr_t*)dest); - } - - - inline void Atomic::dec (volatile jint* dest) { unsigned int temp; __asm__ __volatile__ ( strasm_nobarrier "1: lwarx %0, 0, %2 \n" --- 147,159 ---- : /*%2*/"r" (dest), "m" (*dest) : "cc" strasm_nobarrier_clobber_memory); } + template <> + inline void GeneralizedAtomic::specialized_dec<int32_t>(volatile int32_t* dest) { unsigned int temp; __asm__ __volatile__ ( strasm_nobarrier "1: lwarx %0, 0, %2 \n"
*** 189,200 **** : /*%2*/"r" (dest), "m" (*dest) : "cc" strasm_nobarrier_clobber_memory); } - inline void Atomic::dec_ptr(volatile intptr_t* dest) { long temp; __asm__ __volatile__ ( strasm_nobarrier "1: ldarx %0, 0, %2 \n" --- 165,177 ---- : /*%2*/"r" (dest), "m" (*dest) : "cc" strasm_nobarrier_clobber_memory); } + template <> + inline void GeneralizedAtomic::specialized_dec<int64_t>(volatile int64_t* dest) { long temp; __asm__ __volatile__ ( strasm_nobarrier "1: ldarx %0, 0, %2 \n"
*** 206,221 **** : /*%2*/"r" (dest), "m" (*dest) : "cc" strasm_nobarrier_clobber_memory); } - inline void Atomic::dec_ptr(volatile void* dest) { - dec_ptr((volatile intptr_t*)dest); - } - - inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) { // Note that xchg_ptr doesn't necessarily do an acquire // (see synchronizer.cpp). unsigned int old_value; const uint64_t zero = 0; --- 183,195 ---- : /*%2*/"r" (dest), "m" (*dest) : "cc" strasm_nobarrier_clobber_memory); } + template <> + inline int32_t GeneralizedAtomic::specialized_xchg<int32_t>(int32_t exchange_value, volatile int32_t* dest) { // Note that xchg_ptr doesn't necessarily do an acquire // (see synchronizer.cpp). unsigned int old_value; const uint64_t zero = 0;
*** 243,257 **** /* clobber */ : "cc", "memory" ); ! return (jint) old_value; } - inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { // Note that xchg_ptr doesn't necessarily do an acquire // (see synchronizer.cpp). long old_value; const uint64_t zero = 0; --- 217,232 ---- /* clobber */ : "cc", "memory" ); ! return (int32_t) old_value; } + template <> + inline int64_t GeneralizedAtomic::specialized_xchg<int64_t>(int64_t exchange_value, volatile int64_t* dest) { // Note that xchg_ptr doesn't necessarily do an acquire // (see synchronizer.cpp). long old_value; const uint64_t zero = 0;
*** 279,294 **** /* clobber */ : "cc", "memory" ); ! return (intptr_t) old_value; } - inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { - return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); - } inline void cmpxchg_pre_membar(cmpxchg_memory_order order) { if (order != memory_order_relaxed) { __asm__ __volatile__ ( /* fence */ --- 254,266 ---- /* clobber */ : "cc", "memory" ); ! return (int64_t) old_value; } inline void cmpxchg_pre_membar(cmpxchg_memory_order order) { if (order != memory_order_relaxed) { __asm__ __volatile__ ( /* fence */
*** 305,316 **** ); } } #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE ! inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) { ! // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). // Using 32 bit internally. --- 277,288 ---- ); } } #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE ! template <> ! inline int8_t GeneralizedAtomic::specialized_cmpxchg<int8_t>(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) { // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). // Using 32 bit internally.
*** 366,380 **** "memory" ); cmpxchg_post_membar(order); ! return (jbyte)(unsigned char)old_value; } ! inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) { ! // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). unsigned int old_value; --- 338,352 ---- "memory" ); cmpxchg_post_membar(order); ! return (int8_t)(unsigned char)old_value; } ! template <> ! inline int32_t GeneralizedAtomic::specialized_cmpxchg<int32_t>(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). unsigned int old_value;
*** 410,424 **** "memory" ); cmpxchg_post_membar(order); ! return (jint) old_value; } - inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) { // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). long old_value; --- 382,397 ---- "memory" ); cmpxchg_post_membar(order); ! return (int32_t) old_value; } + template <> + inline int64_t GeneralizedAtomic::specialized_cmpxchg<int64_t>(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). long old_value;
*** 454,473 **** "memory" ); cmpxchg_post_membar(order); ! return (jlong) old_value; ! } ! ! inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) { ! return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order); } - inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) { - return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order); - } #undef strasm_sync #undef strasm_lwsync #undef strasm_isync #undef strasm_release --- 427,439 ---- "memory" ); cmpxchg_post_membar(order); ! return (int64_t) old_value; } #undef strasm_sync #undef strasm_lwsync #undef strasm_isync #undef strasm_release
< prev index next >