< prev index next >

src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp

Print this page
rev 13266 : imported patch Atomic_refactoring
rev 13267 : [mq]: Atomic_polishing
rev 13268 : [mq]: Atomic_polishing_v2

*** 145,155 **** } template <> inline int64_t Atomic::specialized_cmpxchg<int64_t>(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { #ifdef AARCH64 ! jlong rv; int tmp; __asm__ volatile( "1:\n\t" " ldaxr %[rv], [%[dest]]\n\t" " cmp %[rv], %[cv]\n\t" --- 145,155 ---- } template <> inline int64_t Atomic::specialized_cmpxchg<int64_t>(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { #ifdef AARCH64 ! int64_t rv; int tmp; __asm__ volatile( "1:\n\t" " ldaxr %[rv], [%[dest]]\n\t" " cmp %[rv], %[cv]\n\t"
*** 171,181 **** } #ifdef AARCH64 template <> inline int64_t Atomic::specialized_add<int64_t>(int64_t add_value, volatile int64_t* dest) { ! intptr_t val; int tmp; __asm__ volatile( "1:\n\t" " ldaxr %[val], [%[dest]]\n\t" " add %[val], %[val], %[add_val]\n\t" --- 171,181 ---- } #ifdef AARCH64 template <> inline int64_t Atomic::specialized_add<int64_t>(int64_t add_value, volatile int64_t* dest) { ! int64_t val; int tmp; __asm__ volatile( "1:\n\t" " ldaxr %[val], [%[dest]]\n\t" " add %[val], %[val], %[add_val]\n\t"
*** 187,197 **** return val; } template <> inline int64_t Atomic::specialized_xchg<int64_t>(int64_t exchange_value, volatile int64_t* dest) { ! intptr_t old_val; int tmp; __asm__ volatile( "1:\n\t" " ldaxr %[old_val], [%[dest]]\n\t" " stlxr %w[tmp], %[new_val], [%[dest]]\n\t" --- 187,197 ---- return val; } template <> inline int64_t Atomic::specialized_xchg<int64_t>(int64_t exchange_value, volatile int64_t* dest) { ! int64_t old_val; int tmp; __asm__ volatile( "1:\n\t" " ldaxr %[old_val], [%[dest]]\n\t" " stlxr %w[tmp], %[new_val], [%[dest]]\n\t"
< prev index next >