< prev index next >

src/os_cpu/linux_zero/vm/atomic_linux_zero.hpp

Print this page
rev 13449 : imported patch linux_zero
rev 13452 : [mq]: coleen_review1
rev 13458 : imported patch cmpxchg_using_helper


  40  * This implementation is processor specific and works on
  41  * 68020 68030 68040 and 68060.
  42  *
  43  * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
  44  * instruction.
  45  * Using a kernelhelper would be better for arch complete implementation.
  46  *
  47  */
  48 
  49 static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
  50   int ret;
  51   __asm __volatile ("cas%.l %0,%2,%1"
  52                    : "=d" (ret), "+m" (*(ptr))
  53                    : "d" (newval), "0" (oldval));
  54   return ret;
  55 }
  56 
  57 /* Perform an atomic compare and swap: if the current value of `*PTR'
  58    is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
  59    `*PTR' before the operation.*/
  60 static inline int m68k_compare_and_swap(volatile int *ptr,
  61                                         int oldval,
  62                                         int newval) {
  63   for (;;) {
  64       int prev = *ptr;
  65       if (prev != oldval)
  66         return prev;
  67 
  68       if (__m68k_cmpxchg (prev, newval, ptr) == newval)
  69         // Success.
  70         return prev;
  71 
  72       // We failed even though prev == oldval.  Try again.
  73     }
  74 }
  75 
  76 /* Atomically add an int to memory.  */
  77 static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
  78   for (;;) {
  79       // Loop until success.
  80 
  81       int prev = *ptr;
  82 


 101 #ifdef ARM
 102 
 103 /*
 104  * __kernel_cmpxchg
 105  *
 106  * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
 107  * Return zero if *ptr was changed or non-zero if no exchange happened.
 108  * The C flag is also set if *ptr was changed to allow for assembly
 109  * optimization in the calling code.
 110  *
 111  */
 112 
 113 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
 114 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
 115 
 116 
 117 
 118 /* Perform an atomic compare and swap: if the current value of `*PTR'
 119    is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
 120    `*PTR' before the operation.*/
 121 static inline int arm_compare_and_swap(volatile int *ptr,
 122                                        int oldval,
 123                                        int newval) {
 124   for (;;) {
 125       int prev = *ptr;
 126       if (prev != oldval)
 127         return prev;
 128 
 129       if (__kernel_cmpxchg (prev, newval, ptr) == 0)
 130         // Success.
 131         return prev;
 132 
 133       // We failed even though prev == oldval.  Try again.
 134     }
 135 }
 136 
 137 /* Atomically add an int to memory.  */
 138 static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
 139   for (;;) {
 140       // Loop until a __kernel_cmpxchg succeeds.
 141 
 142       int prev = *ptr;
 143 


 244 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
 245                                  volatile intptr_t* dest) {
 246 #ifdef ARM
 247   return arm_lock_test_and_set(dest, exchange_value);
 248 #else
 249 #ifdef M68K
 250   return m68k_lock_test_and_set(dest, exchange_value);
 251 #else
 252   intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
 253   __sync_synchronize();
 254   return result;
 255 #endif // M68K
 256 #endif // ARM
 257 }
 258 
 259 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
 260   return (void *) xchg_ptr((intptr_t) exchange_value,
 261                            (volatile intptr_t*) dest);
 262 }
 263 
 264 inline jint Atomic::cmpxchg(jint exchange_value,
 265                             volatile jint* dest,
 266                             jint compare_value,






 267                             cmpxchg_memory_order order) {

 268 #ifdef ARM
 269   return arm_compare_and_swap(dest, compare_value, exchange_value);
 270 #else
 271 #ifdef M68K
 272   return m68k_compare_and_swap(dest, compare_value, exchange_value);
 273 #else
 274   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 275 #endif // M68K
 276 #endif // ARM
 277 }
 278 
 279 inline jlong Atomic::cmpxchg(jlong exchange_value,
 280                              volatile jlong* dest,
 281                              jlong compare_value,


 282                              cmpxchg_memory_order order) {
 283 
 284   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 285 }
 286 
 287 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
 288                                     volatile intptr_t* dest,
 289                                     intptr_t compare_value,
 290                                     cmpxchg_memory_order order) {
 291 #ifdef ARM
 292   return arm_compare_and_swap(dest, compare_value, exchange_value);
 293 #else
 294 #ifdef M68K
 295   return m68k_compare_and_swap(dest, compare_value, exchange_value);
 296 #else
 297   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 298 #endif // M68K
 299 #endif // ARM
 300 }
 301 
 302 inline void* Atomic::cmpxchg_ptr(void* exchange_value,
 303                                  volatile void* dest,
 304                                  void* compare_value,
 305                                  cmpxchg_memory_order order) {
 306 
 307   return (void *) cmpxchg_ptr((intptr_t) exchange_value,
 308                               (volatile intptr_t*) dest,
 309                               (intptr_t) compare_value,
 310                               order);
 311 }
 312 
 313 inline jlong Atomic::load(const volatile jlong* src) {
 314   volatile jlong dest;
 315   os::atomic_copy64(src, &dest);
 316   return dest;
 317 }
 318 
 319 inline void Atomic::store(jlong store_value, jlong* dest) {
 320   os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
 321 }
 322 
 323 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
 324   os::atomic_copy64((volatile jlong*)&store_value, dest);
 325 }
 326 
 327 #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP


  40  * This implementation is processor specific and works on
  41  * 68020 68030 68040 and 68060.
  42  *
  43  * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
  44  * instruction.
  45  * Using a kernelhelper would be better for arch complete implementation.
  46  *
  47  */
  48 
  49 static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
  50   int ret;
  51   __asm __volatile ("cas%.l %0,%2,%1"
  52                    : "=d" (ret), "+m" (*(ptr))
  53                    : "d" (newval), "0" (oldval));
  54   return ret;
  55 }
  56 
  57 /* Perform an atomic compare and swap: if the current value of `*PTR'
  58    is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
  59    `*PTR' before the operation.*/
  60 static inline int m68k_compare_and_swap(int newval,
  61                                         volatile int *ptr,
  62                                         int oldval) {
  63   for (;;) {
  64       int prev = *ptr;
  65       if (prev != oldval)
  66         return prev;
  67 
  68       if (__m68k_cmpxchg (prev, newval, ptr) == newval)
  69         // Success.
  70         return prev;
  71 
  72       // We failed even though prev == oldval.  Try again.
  73     }
  74 }
  75 
  76 /* Atomically add an int to memory.  */
  77 static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
  78   for (;;) {
  79       // Loop until success.
  80 
  81       int prev = *ptr;
  82 


 101 #ifdef ARM
 102 
 103 /*
 104  * __kernel_cmpxchg
 105  *
 106  * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
 107  * Return zero if *ptr was changed or non-zero if no exchange happened.
 108  * The C flag is also set if *ptr was changed to allow for assembly
 109  * optimization in the calling code.
 110  *
 111  */
 112 
 113 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
 114 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
 115 
 116 
 117 
 118 /* Perform an atomic compare and swap: if the current value of `*PTR'
 119    is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
 120    `*PTR' before the operation.*/
 121 static inline int arm_compare_and_swap(int newval,
 122                                        volatile int *ptr,
 123                                        int oldval) {
 124   for (;;) {
 125       int prev = *ptr;
 126       if (prev != oldval)
 127         return prev;
 128 
 129       if (__kernel_cmpxchg (prev, newval, ptr) == 0)
 130         // Success.
 131         return prev;
 132 
 133       // We failed even though prev == oldval.  Try again.
 134     }
 135 }
 136 
 137 /* Atomically add an int to memory.  */
 138 static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
 139   for (;;) {
 140       // Loop until a __kernel_cmpxchg succeeds.
 141 
 142       int prev = *ptr;
 143 


 244 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
 245                                  volatile intptr_t* dest) {
 246 #ifdef ARM
 247   return arm_lock_test_and_set(dest, exchange_value);
 248 #else
 249 #ifdef M68K
 250   return m68k_lock_test_and_set(dest, exchange_value);
 251 #else
 252   intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
 253   __sync_synchronize();
 254   return result;
 255 #endif // M68K
 256 #endif // ARM
 257 }
 258 
 259 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
 260   return (void *) xchg_ptr((intptr_t) exchange_value,
 261                            (volatile intptr_t*) dest);
 262 }
 263 
 264 // No direct support for cmpxchg of bytes; emulate using int.
 265 template<>
 266 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
 267 
 268 template<>
 269 template<typename T>
 270 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
 271                                                 T volatile* dest,
 272                                                 T compare_value,
 273                                                 cmpxchg_memory_order order) {
 274   STATIC_ASSERT(4 == sizeof(T));
 275 #ifdef ARM
 276   return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
 277 #else
 278 #ifdef M68K
 279   return cmpxchg_using_helper<int>(m68k_compare_and_swap, exchange_value, dest, compare_value);
 280 #else
 281   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 282 #endif // M68K
 283 #endif // ARM
 284 }
 285 
 286 template<>
 287 template<typename T>
 288 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 289                                                 T volatile* dest,
 290                                                 T compare_value,
 291                                                 cmpxchg_memory_order order) {
 292   STATIC_ASSERT(8 == sizeof(T));
 293   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);


























 294 }
 295 
 296 inline jlong Atomic::load(const volatile jlong* src) {
 297   volatile jlong dest;
 298   os::atomic_copy64(src, &dest);
 299   return dest;
 300 }
 301 
 302 inline void Atomic::store(jlong store_value, jlong* dest) {
 303   os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
 304 }
 305 
 306 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
 307   os::atomic_copy64((volatile jlong*)&store_value, dest);
 308 }
 309 
 310 #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
< prev index next >