< prev index next >

src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp

Print this page
rev 13443 : imported patch linux_arm


 183   __asm__ volatile(
 184     "1:\n\t"
 185     " ldaxr %[old_val], [%[dest]]\n\t"
 186     " stlxr %w[tmp], %[new_val], [%[dest]]\n\t"
 187     " cbnz %w[tmp], 1b\n\t"
 188     : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp)
 189     : [new_val] "r" (exchange_value), [dest] "r" (dest)
 190     : "memory");
 191   return old_val;
 192 #else
 193   return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
 194 #endif
 195 }
 196 
 197 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
 198   return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
 199 }
 200 
 201 // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering
 202 
 203 inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {




























 204 #ifdef AARCH64
 205   jint rv;
 206   int tmp;
 207   __asm__ volatile(
 208     "1:\n\t"
 209     " ldaxr %w[rv], [%[dest]]\n\t"
 210     " cmp %w[rv], %w[cv]\n\t"
 211     " b.ne 2f\n\t"
 212     " stlxr %w[tmp], %w[ev], [%[dest]]\n\t"
 213     " cbnz %w[tmp], 1b\n\t"
 214     " b 3f\n\t"
 215     "2:\n\t"
 216     " dmb sy\n\t"
 217     "3:\n\t"
 218     : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
 219     : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
 220     : "memory");
 221   return rv;
 222 #else
 223   // Warning:  Arguments are swapped to avoid moving them for kernel call
 224   return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
 225 #endif
 226 }
 227 
 228 inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {






 229 #ifdef AARCH64
 230   jlong rv;
 231   int tmp;
 232   __asm__ volatile(
 233     "1:\n\t"
 234     " ldaxr %[rv], [%[dest]]\n\t"
 235     " cmp %[rv], %[cv]\n\t"
 236     " b.ne 2f\n\t"
 237     " stlxr %w[tmp], %[ev], [%[dest]]\n\t"
 238     " cbnz %w[tmp], 1b\n\t"
 239     " b 3f\n\t"
 240     "2:\n\t"
 241     " dmb sy\n\t"
 242     "3:\n\t"
 243     : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
 244     : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
 245     : "memory");
 246   return rv;
 247 #else
 248   assert(VM_Version::supports_cx8(), "Atomic compare and exchange jlong not supported on this architecture!");
 249   return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
 250 #endif
 251 }
 252 
 253 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
 254 #ifdef AARCH64
 255   return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
 256 #else
 257   return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
 258 #endif
 259 }
 260 
 261 inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
 262   return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order);
 263 }
 264 
 265 #endif // OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP


 183   __asm__ volatile(
 184     "1:\n\t"
 185     " ldaxr %[old_val], [%[dest]]\n\t"
 186     " stlxr %w[tmp], %[new_val], [%[dest]]\n\t"
 187     " cbnz %w[tmp], 1b\n\t"
 188     : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp)
 189     : [new_val] "r" (exchange_value), [dest] "r" (dest)
 190     : "memory");
 191   return old_val;
 192 #else
 193   return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
 194 #endif
 195 }
 196 
 197 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
 198   return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
 199 }
 200 
 201 // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering
 202 
 203 template<>
 204 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
 205 
 206 #ifndef AARCH64
 207 
 208 inline jint reorder_cmpxchg_func(jint exchange_value,
 209                                  jint volatile* dest,
 210                                  jint compare_value) {
 211   // Warning:  Arguments are swapped to avoid moving them for kernel call
 212   return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
 213 }
 214 
 215 inline jlong reorder_cmpxchg_long_func(jlong exchange_value,
 216                                        jlong volatile* dest,
 217                                        jlong compare_value) {
 218   assert(VM_Version::supports_cx8(), "Atomic compare and exchange jlong not supported on this architecture!");
 219   // Warning:  Arguments are swapped to avoid moving them for kernel call
 220   return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
 221 }
 222 
 223 #endif // !AARCH64
 224 
 225 template<>
 226 template<typename T>
 227 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
 228                                                 T volatile* dest,
 229                                                 T compare_value,
 230                                                 cmpxchg_memory_order order) const {
 231   STATIC_ASSERT(4 == sizeof(T));
 232 #ifdef AARCH64
 233   T rv;
 234   int tmp;
 235   __asm__ volatile(
 236     "1:\n\t"
 237     " ldaxr %w[rv], [%[dest]]\n\t"
 238     " cmp %w[rv], %w[cv]\n\t"
 239     " b.ne 2f\n\t"
 240     " stlxr %w[tmp], %w[ev], [%[dest]]\n\t"
 241     " cbnz %w[tmp], 1b\n\t"
 242     " b 3f\n\t"
 243     "2:\n\t"
 244     " dmb sy\n\t"
 245     "3:\n\t"
 246     : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
 247     : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
 248     : "memory");
 249   return rv;
 250 #else
 251   return cmpxchg_using_stub<jint>(reorder_cmpxchg_func, exchange_value, dest, compare_value);

 252 #endif
 253 }
 254 
 255 template<>
 256 template<typename T>
 257 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 258                                                 T volatile* dest,
 259                                                 T compare_value,
 260                                                 cmpxchg_memory_order order) const {
 261   STATIC_ASSERT(8 == sizeof(T));
 262 #ifdef AARCH64
 263   T rv;
 264   int tmp;
 265   __asm__ volatile(
 266     "1:\n\t"
 267     " ldaxr %[rv], [%[dest]]\n\t"
 268     " cmp %[rv], %[cv]\n\t"
 269     " b.ne 2f\n\t"
 270     " stlxr %w[tmp], %[ev], [%[dest]]\n\t"
 271     " cbnz %w[tmp], 1b\n\t"
 272     " b 3f\n\t"
 273     "2:\n\t"
 274     " dmb sy\n\t"
 275     "3:\n\t"
 276     : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
 277     : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
 278     : "memory");
 279   return rv;
 280 #else
 281   return cmpxchg_using_stub<jlong>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value);

 282 #endif












 283 }
 284 
 285 #endif // OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP
< prev index next >