< prev index next >

src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp

Print this page
rev 13452 : imported patch Atomic_cmpxchg
rev 13453 : imported patch Atomic_add
rev 13454 : [mq]: Atomic_add_v2

*** 89,101 **** // likelihood that the hardware would pull loads/stores into the region guarded // by the reservation. // // For ARMv7 we add explicit barriers in the stubs. ! inline jint Atomic::add(jint add_value, volatile jint* dest) { #ifdef AARCH64 ! jint val; int tmp; __asm__ volatile( "1:\n\t" " ldaxr %w[val], [%[dest]]\n\t" " add %w[val], %w[val], %w[add_val]\n\t" --- 89,113 ---- // likelihood that the hardware would pull loads/stores into the region guarded // by the reservation. // // For ARMv7 we add explicit barriers in the stubs. ! template<size_t byte_size> ! struct Atomic::PlatformAdd ! : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > ! { ! template<typename I, typename D> ! D add_and_fetch(I add_value, D volatile* dest) const; ! }; ! ! template<> ! template<typename I, typename D> ! inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { ! STATIC_ASSERT(4 == sizeof(I)); ! STATIC_ASSERT(4 == sizeof(D)); #ifdef AARCH64 ! D val; int tmp; __asm__ volatile( "1:\n\t" " ldaxr %w[val], [%[dest]]\n\t" " add %w[val], %w[val], %w[add_val]\n\t"
*** 104,114 **** : [val] "=&r" (val), [tmp] "=&r" (tmp) : [add_val] "r" (add_value), [dest] "r" (dest) : "memory"); return val; #else ! return (*os::atomic_add_func)(add_value, dest); #endif } inline void Atomic::inc(volatile jint* dest) { Atomic::add(1, (volatile jint *)dest); --- 116,126 ---- : [val] "=&r" (val), [tmp] "=&r" (tmp) : [add_val] "r" (add_value), [dest] "r" (dest) : "memory"); return val; #else ! return add_using_helper<jint>(os::atomic_add_func, add_value, dest); #endif } inline void Atomic::inc(volatile jint* dest) { Atomic::add(1, (volatile jint *)dest);
*** 116,128 **** inline void Atomic::dec(volatile jint* dest) { Atomic::add(-1, (volatile jint *)dest); } - inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { #ifdef AARCH64 ! intptr_t val; int tmp; __asm__ volatile( "1:\n\t" " ldaxr %[val], [%[dest]]\n\t" " add %[val], %[val], %[add_val]\n\t" --- 128,144 ---- inline void Atomic::dec(volatile jint* dest) { Atomic::add(-1, (volatile jint *)dest); } #ifdef AARCH64 ! template<> ! template<typename I, typename D> ! inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const { ! STATIC_ASSERT(8 == sizeof(I)); ! STATIC_ASSERT(8 == sizeof(D)); ! D val; int tmp; __asm__ volatile( "1:\n\t" " ldaxr %[val], [%[dest]]\n\t" " add %[val], %[val], %[add_val]\n\t"
*** 130,147 **** " cbnz %w[tmp], 1b\n\t" : [val] "=&r" (val), [tmp] "=&r" (tmp) : [add_val] "r" (add_value), [dest] "r" (dest) : "memory"); return val; - #else - return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest); - #endif } ! inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { ! return (void*)add_ptr(add_value, (volatile intptr_t*)dest); ! } inline void Atomic::inc_ptr(volatile intptr_t* dest) { Atomic::add_ptr(1, dest); } --- 146,160 ---- " cbnz %w[tmp], 1b\n\t" : [val] "=&r" (val), [tmp] "=&r" (tmp) : [add_val] "r" (add_value), [dest] "r" (dest) : "memory"); return val; } + #endif // AARCH64 ! template<> ! struct Atomic::PlatformAdd<2>: Atomic::AddShortUsingInt {}; inline void Atomic::inc_ptr(volatile intptr_t* dest) { Atomic::add_ptr(1, dest); }
< prev index next >