< prev index next >

src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp

Print this page
rev 13463 : imported patch linux_arm
rev 13472 : imported patch coleen_review1
rev 13478 : imported patch cmpxchg_using_helper


 183   __asm__ volatile(
 184     "1:\n\t"
 185     " ldaxr %[old_val], [%[dest]]\n\t"
 186     " stlxr %w[tmp], %[new_val], [%[dest]]\n\t"
 187     " cbnz %w[tmp], 1b\n\t"
 188     : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp)
 189     : [new_val] "r" (exchange_value), [dest] "r" (dest)
 190     : "memory");
 191   return old_val;
 192 #else
 193   return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
 194 #endif
 195 }
 196 
 197 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
 198   return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
 199 }
 200 
 201 // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering
 202 
 203 inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {





























 204 #ifdef AARCH64
 205   jint rv;
 206   int tmp;
 207   __asm__ volatile(
 208     "1:\n\t"
 209     " ldaxr %w[rv], [%[dest]]\n\t"
 210     " cmp %w[rv], %w[cv]\n\t"
 211     " b.ne 2f\n\t"
 212     " stlxr %w[tmp], %w[ev], [%[dest]]\n\t"
 213     " cbnz %w[tmp], 1b\n\t"
 214     " b 3f\n\t"
 215     "2:\n\t"
 216     " dmb sy\n\t"
 217     "3:\n\t"
 218     : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
 219     : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
 220     : "memory");
 221   return rv;
 222 #else
 223   // Warning:  Arguments are swapped to avoid moving them for kernel call
 224   return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
 225 #endif
 226 }
 227 
 228 inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {






 229 #ifdef AARCH64
 230   jlong rv;
 231   int tmp;
 232   __asm__ volatile(
 233     "1:\n\t"
 234     " ldaxr %[rv], [%[dest]]\n\t"
 235     " cmp %[rv], %[cv]\n\t"
 236     " b.ne 2f\n\t"
 237     " stlxr %w[tmp], %[ev], [%[dest]]\n\t"
 238     " cbnz %w[tmp], 1b\n\t"
 239     " b 3f\n\t"
 240     "2:\n\t"
 241     " dmb sy\n\t"
 242     "3:\n\t"
 243     : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
 244     : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
 245     : "memory");
 246   return rv;
 247 #else
 248   assert(VM_Version::supports_cx8(), "Atomic compare and exchange jlong not supported on this architecture!");
 249   return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
 250 #endif
 251 }
 252 
 253 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
 254 #ifdef AARCH64
 255   return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
 256 #else
 257   return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
 258 #endif
 259 }
 260 
 261 inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
 262   return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order);
 263 }
 264 
 265 #endif // OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP


 183   __asm__ volatile(
 184     "1:\n\t"
 185     " ldaxr %[old_val], [%[dest]]\n\t"
 186     " stlxr %w[tmp], %[new_val], [%[dest]]\n\t"
 187     " cbnz %w[tmp], 1b\n\t"
 188     : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp)
 189     : [new_val] "r" (exchange_value), [dest] "r" (dest)
 190     : "memory");
 191   return old_val;
 192 #else
 193   return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
 194 #endif
 195 }
 196 
 197 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
 198   return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
 199 }
 200 
 201 // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering
 202 
 203 // No direct support for cmpxchg of bytes; emulate using int.
 204 template<>
 205 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
 206 
 207 #ifndef AARCH64
 208 
 209 inline jint reorder_cmpxchg_func(jint exchange_value,
 210                                  jint volatile* dest,
 211                                  jint compare_value) {
 212   // Warning:  Arguments are swapped to avoid moving them for kernel call
 213   return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
 214 }
 215 
 216 inline jlong reorder_cmpxchg_long_func(jlong exchange_value,
 217                                        jlong volatile* dest,
 218                                        jlong compare_value) {
 219   assert(VM_Version::supports_cx8(), "Atomic compare and exchange jlong not supported on this architecture!");
 220   // Warning:  Arguments are swapped to avoid moving them for kernel call
 221   return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
 222 }
 223 
 224 #endif // !AARCH64
 225 
 226 template<>
 227 template<typename T>
 228 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
 229                                                 T volatile* dest,
 230                                                 T compare_value,
 231                                                 cmpxchg_memory_order order) const {
 232   STATIC_ASSERT(4 == sizeof(T));
 233 #ifdef AARCH64
 234   T rv;
 235   int tmp;
 236   __asm__ volatile(
 237     "1:\n\t"
 238     " ldaxr %w[rv], [%[dest]]\n\t"
 239     " cmp %w[rv], %w[cv]\n\t"
 240     " b.ne 2f\n\t"
 241     " stlxr %w[tmp], %w[ev], [%[dest]]\n\t"
 242     " cbnz %w[tmp], 1b\n\t"
 243     " b 3f\n\t"
 244     "2:\n\t"
 245     " dmb sy\n\t"
 246     "3:\n\t"
 247     : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
 248     : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
 249     : "memory");
 250   return rv;
 251 #else
 252   return cmpxchg_using_helper<jint>(reorder_cmpxchg_func, exchange_value, dest, compare_value);

 253 #endif
 254 }
 255 
 256 template<>
 257 template<typename T>
 258 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 259                                                 T volatile* dest,
 260                                                 T compare_value,
 261                                                 cmpxchg_memory_order order) const {
 262   STATIC_ASSERT(8 == sizeof(T));
 263 #ifdef AARCH64
 264   T rv;
 265   int tmp;
 266   __asm__ volatile(
 267     "1:\n\t"
 268     " ldaxr %[rv], [%[dest]]\n\t"
 269     " cmp %[rv], %[cv]\n\t"
 270     " b.ne 2f\n\t"
 271     " stlxr %w[tmp], %[ev], [%[dest]]\n\t"
 272     " cbnz %w[tmp], 1b\n\t"
 273     " b 3f\n\t"
 274     "2:\n\t"
 275     " dmb sy\n\t"
 276     "3:\n\t"
 277     : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
 278     : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
 279     : "memory");
 280   return rv;
 281 #else
 282   return cmpxchg_using_helper<jlong>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value);

 283 #endif












 284 }
 285 
 286 #endif // OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP
< prev index next >