< prev index next >

src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp

Print this page
rev 13452 : [mq]: coleen_review1
rev 13458 : imported patch cmpxchg_using_helper


 183   __asm__ volatile(
 184     "1:\n\t"
 185     " ldaxr %[old_val], [%[dest]]\n\t"
 186     " stlxr %w[tmp], %[new_val], [%[dest]]\n\t"
 187     " cbnz %w[tmp], 1b\n\t"
 188     : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp)
 189     : [new_val] "r" (exchange_value), [dest] "r" (dest)
 190     : "memory");
 191   return old_val;
 192 #else
 193   return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
 194 #endif
 195 }
 196 
 197 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
 198   return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
 199 }
 200 
 201 // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering
 202 

 203 template<>
 204 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
 205 
 206 #ifndef AARCH64
 207 
 208 inline jint reorder_cmpxchg_func(jint exchange_value,
 209                                  jint volatile* dest,
 210                                  jint compare_value) {
 211   // Warning:  Arguments are swapped to avoid moving them for kernel call
 212   return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
 213 }
 214 
 215 inline jlong reorder_cmpxchg_long_func(jlong exchange_value,
 216                                        jlong volatile* dest,
 217                                        jlong compare_value) {
 218   assert(VM_Version::supports_cx8(), "Atomic compare and exchange jlong not supported on this architecture!");
 219   // Warning:  Arguments are swapped to avoid moving them for kernel call
 220   return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
 221 }
 222 


 231   STATIC_ASSERT(4 == sizeof(T));
 232 #ifdef AARCH64
 233   T rv;
 234   int tmp;
 235   __asm__ volatile(
 236     "1:\n\t"
 237     " ldaxr %w[rv], [%[dest]]\n\t"
 238     " cmp %w[rv], %w[cv]\n\t"
 239     " b.ne 2f\n\t"
 240     " stlxr %w[tmp], %w[ev], [%[dest]]\n\t"
 241     " cbnz %w[tmp], 1b\n\t"
 242     " b 3f\n\t"
 243     "2:\n\t"
 244     " dmb sy\n\t"
 245     "3:\n\t"
 246     : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
 247     : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
 248     : "memory");
 249   return rv;
 250 #else
 251   return cmpxchg_using_stub<jint>(reorder_cmpxchg_func, exchange_value, dest, compare_value);
 252 #endif
 253 }
 254 
 255 template<>
 256 template<typename T>
 257 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 258                                                 T volatile* dest,
 259                                                 T compare_value,
 260                                                 cmpxchg_memory_order order) const {
 261   STATIC_ASSERT(8 == sizeof(T));
 262 #ifdef AARCH64
 263   T rv;
 264   int tmp;
 265   __asm__ volatile(
 266     "1:\n\t"
 267     " ldaxr %[rv], [%[dest]]\n\t"
 268     " cmp %[rv], %[cv]\n\t"
 269     " b.ne 2f\n\t"
 270     " stlxr %w[tmp], %[ev], [%[dest]]\n\t"
 271     " cbnz %w[tmp], 1b\n\t"
 272     " b 3f\n\t"
 273     "2:\n\t"
 274     " dmb sy\n\t"
 275     "3:\n\t"
 276     : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
 277     : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
 278     : "memory");
 279   return rv;
 280 #else
 281   return cmpxchg_using_stub<jlong>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value);
 282 #endif
 283 }
 284 
 285 #endif // OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP


 183   __asm__ volatile(
 184     "1:\n\t"
 185     " ldaxr %[old_val], [%[dest]]\n\t"
 186     " stlxr %w[tmp], %[new_val], [%[dest]]\n\t"
 187     " cbnz %w[tmp], 1b\n\t"
 188     : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp)
 189     : [new_val] "r" (exchange_value), [dest] "r" (dest)
 190     : "memory");
 191   return old_val;
 192 #else
 193   return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
 194 #endif
 195 }
 196 
 197 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
 198   return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
 199 }
 200 
 201 // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering
 202 
 203 // No direct support for cmpxchg of bytes; emulate using int.
 204 template<>
 205 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
 206 
 207 #ifndef AARCH64
 208 
 209 inline jint reorder_cmpxchg_func(jint exchange_value,
 210                                  jint volatile* dest,
 211                                  jint compare_value) {
 212   // Warning:  Arguments are swapped to avoid moving them for kernel call
 213   return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
 214 }
 215 
 216 inline jlong reorder_cmpxchg_long_func(jlong exchange_value,
 217                                        jlong volatile* dest,
 218                                        jlong compare_value) {
 219   assert(VM_Version::supports_cx8(), "Atomic compare and exchange jlong not supported on this architecture!");
 220   // Warning:  Arguments are swapped to avoid moving them for kernel call
 221   return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
 222 }
 223 


 232   STATIC_ASSERT(4 == sizeof(T));
 233 #ifdef AARCH64
 234   T rv;
 235   int tmp;
 236   __asm__ volatile(
 237     "1:\n\t"
 238     " ldaxr %w[rv], [%[dest]]\n\t"
 239     " cmp %w[rv], %w[cv]\n\t"
 240     " b.ne 2f\n\t"
 241     " stlxr %w[tmp], %w[ev], [%[dest]]\n\t"
 242     " cbnz %w[tmp], 1b\n\t"
 243     " b 3f\n\t"
 244     "2:\n\t"
 245     " dmb sy\n\t"
 246     "3:\n\t"
 247     : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
 248     : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
 249     : "memory");
 250   return rv;
 251 #else
 252   return cmpxchg_using_helper<jint>(reorder_cmpxchg_func, exchange_value, dest, compare_value);
 253 #endif
 254 }
 255 
 256 template<>
 257 template<typename T>
 258 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 259                                                 T volatile* dest,
 260                                                 T compare_value,
 261                                                 cmpxchg_memory_order order) const {
 262   STATIC_ASSERT(8 == sizeof(T));
 263 #ifdef AARCH64
 264   T rv;
 265   int tmp;
 266   __asm__ volatile(
 267     "1:\n\t"
 268     " ldaxr %[rv], [%[dest]]\n\t"
 269     " cmp %[rv], %[cv]\n\t"
 270     " b.ne 2f\n\t"
 271     " stlxr %w[tmp], %[ev], [%[dest]]\n\t"
 272     " cbnz %w[tmp], 1b\n\t"
 273     " b 3f\n\t"
 274     "2:\n\t"
 275     " dmb sy\n\t"
 276     "3:\n\t"
 277     : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
 278     : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
 279     : "memory");
 280   return rv;
 281 #else
 282   return cmpxchg_using_helper<jlong>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value);
 283 #endif
 284 }
 285 
 286 #endif // OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP
< prev index next >