< prev index next >

src/os_cpu/bsd_x86/vm/atomic_bsd_x86.hpp

Print this page
rev 13452 : imported patch Atomic_cmpxchg
rev 13453 : imported patch Atomic_add
rev 13454 : [mq]: Atomic_add_v2

*** 38,56 **** inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } ! inline jint Atomic::add (jint add_value, volatile jint* dest) { ! jint addend = add_value; __asm__ volatile ( "lock xaddl %0,(%2)" ! : "=r" (addend) ! : "0" (addend), "r" (dest) : "cc", "memory"); ! return addend + add_value; } inline void Atomic::inc (volatile jint* dest) { __asm__ volatile ( "lock addl $1,(%0)" : : "r" (dest) : "cc", "memory"); } --- 38,71 ---- inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } ! template<size_t byte_size> ! struct Atomic::PlatformAdd ! : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> > ! { ! template<typename I, typename D> ! D fetch_and_add(I add_value, D volatile* dest) const; ! }; ! ! template<> ! template<typename I, typename D> ! inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const { ! STATIC_ASSERT(4 == sizeof(I)); ! STATIC_ASSERT(4 == sizeof(D)); ! D old_value; __asm__ volatile ( "lock xaddl %0,(%2)" ! : "=r" (old_value) ! : "0" (add_value), "r" (dest) : "cc", "memory"); ! return old_value; } + template<> + struct Atomic::PlatformAdd<2>: Atomic::AddShortUsingInt {}; + inline void Atomic::inc (volatile jint* dest) { __asm__ volatile ( "lock addl $1,(%0)" : : "r" (dest) : "cc", "memory"); }
*** 109,129 **** #ifdef AMD64 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } ! inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { ! intptr_t addend = add_value; __asm__ __volatile__ ( "lock xaddq %0,(%2)" ! : "=r" (addend) ! : "0" (addend), "r" (dest) : "cc", "memory"); ! return addend + add_value; ! } ! ! inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { ! return (void*)add_ptr(add_value, (volatile intptr_t*)dest); } inline void Atomic::inc_ptr(volatile intptr_t* dest) { __asm__ __volatile__ ( "lock addq $1,(%0)" : --- 124,144 ---- #ifdef AMD64 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } ! template<> ! template<typename I, typename D> ! inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const { ! STATIC_ASSERT(8 == sizeof(I)); ! STATIC_ASSERT(8 == sizeof(D)); ! D old_value; __asm__ __volatile__ ( "lock xaddq %0,(%2)" ! : "=r" (old_value) ! : "0" (add_value), "r" (dest) : "cc", "memory"); ! return old_value; } inline void Atomic::inc_ptr(volatile intptr_t* dest) { __asm__ __volatile__ ( "lock addq $1,(%0)" :
*** 162,180 **** inline jlong Atomic::load(const volatile jlong* src) { return *src; } #else // !AMD64 - inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { - return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest); - } - - inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)Atomic::add((jint)add_value, (volatile jint*)dest); - } - - inline void Atomic::inc_ptr(volatile intptr_t* dest) { inc((volatile jint*)dest); } inline void Atomic::dec_ptr(volatile intptr_t* dest) { --- 177,186 ----
< prev index next >