< prev index next >

src/os_cpu/bsd_zero/vm/atomic_bsd_zero.hpp

Print this page
rev 13448 : imported patch bsd_zero
rev 13452 : [mq]: coleen_review1
rev 13458 : imported patch cmpxchg_using_helper


  40  * This implementation is processor specific and works on
  41  * 68020 68030 68040 and 68060.
  42  *
  43  * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
  44  * instruction.
  45  * Using a kernelhelper would be better for arch complete implementation.
  46  *
  47  */
  48 
  49 static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
  50   int ret;
  51   __asm __volatile ("cas%.l %0,%2,%1"
  52                    : "=d" (ret), "+m" (*(ptr))
  53                    : "d" (newval), "0" (oldval));
  54   return ret;
  55 }
  56 
  57 /* Perform an atomic compare and swap: if the current value of `*PTR'
  58    is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
  59    `*PTR' before the operation.*/
  60 static inline int m68k_compare_and_swap(volatile int *ptr,
  61                                         int oldval,
  62                                         int newval) {
  63   for (;;) {
  64       int prev = *ptr;
  65       if (prev != oldval)
  66         return prev;
  67 
  68       if (__m68k_cmpxchg (prev, newval, ptr) == newval)
  69         // Success.
  70         return prev;
  71 
  72       // We failed even though prev == oldval.  Try again.
  73     }
  74 }
  75 
  76 /* Atomically add an int to memory.  */
  77 static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
  78   for (;;) {
  79       // Loop until success.
  80 
  81       int prev = *ptr;
  82 


 101 #ifdef ARM
 102 
 103 /*
 104  * __kernel_cmpxchg
 105  *
 106  * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
 107  * Return zero if *ptr was changed or non-zero if no exchange happened.
 108  * The C flag is also set if *ptr was changed to allow for assembly
 109  * optimization in the calling code.
 110  *
 111  */
 112 
 113 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
 114 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
 115 
 116 
 117 
 118 /* Perform an atomic compare and swap: if the current value of `*PTR'
 119    is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
 120    `*PTR' before the operation.*/
 121 static inline int arm_compare_and_swap(volatile int *ptr,
 122                                        int oldval,
 123                                        int newval) {
 124   for (;;) {
 125       int prev = *ptr;
 126       if (prev != oldval)
 127         return prev;
 128 
 129       if (__kernel_cmpxchg (prev, newval, ptr) == 0)
 130         // Success.
 131         return prev;
 132 
 133       // We failed even though prev == oldval.  Try again.
 134     }
 135 }
 136 
 137 /* Atomically add an int to memory.  */
 138 static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
 139   for (;;) {
 140       // Loop until a __kernel_cmpxchg succeeds.
 141 
 142       int prev = *ptr;
 143 


 250 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
 251                                  volatile intptr_t* dest) {
 252 #ifdef ARM
 253   return arm_lock_test_and_set(dest, exchange_value);
 254 #else
 255 #ifdef M68K
 256   return m68k_lock_test_and_set(dest, exchange_value);
 257 #else
 258   intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
 259   __sync_synchronize();
 260   return result;
 261 #endif // M68K
 262 #endif // ARM
 263 }
 264 
 265 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
 266   return (void *) xchg_ptr((intptr_t) exchange_value,
 267                            (volatile intptr_t*) dest);
 268 }
 269 
 270 inline jint Atomic::cmpxchg(jint exchange_value,
 271                             volatile jint* dest,
 272                             jint compare_value,






 273                             cmpxchg_memory_order order) {

 274 #ifdef ARM
 275   return arm_compare_and_swap(dest, compare_value, exchange_value);
 276 #else
 277 #ifdef M68K
 278   return m68k_compare_and_swap(dest, compare_value, exchange_value);
 279 #else
 280   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 281 #endif // M68K
 282 #endif // ARM
 283 }
 284 
 285 inline jlong Atomic::cmpxchg(jlong exchange_value,
 286                              volatile jlong* dest,
 287                              jlong compare_value,


 288                              cmpxchg_memory_order order) {
 289 
 290   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 291 }
 292 
 293 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
 294                                     volatile intptr_t* dest,
 295                                     intptr_t compare_value,
 296                                     cmpxchg_memory_order order) {
 297 #ifdef ARM
 298   return arm_compare_and_swap(dest, compare_value, exchange_value);
 299 #else
 300 #ifdef M68K
 301   return m68k_compare_and_swap(dest, compare_value, exchange_value);
 302 #else
 303   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 304 #endif // M68K
 305 #endif // ARM
 306 }
 307 
 308 inline void* Atomic::cmpxchg_ptr(void* exchange_value,
 309                                  volatile void* dest,
 310                                  void* compare_value,
 311                                  cmpxchg_memory_order order) {
 312 
 313   return (void *) cmpxchg_ptr((intptr_t) exchange_value,
 314                               (volatile intptr_t*) dest,
 315                               (intptr_t) compare_value,
 316                               order);
 317 }
 318 
 319 inline jlong Atomic::load(const volatile jlong* src) {
 320   volatile jlong dest;
 321   os::atomic_copy64(src, &dest);
 322   return dest;
 323 }
 324 
 325 inline void Atomic::store(jlong store_value, jlong* dest) {
 326   os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
 327 }
 328 
 329 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
 330   os::atomic_copy64((volatile jlong*)&store_value, dest);
 331 }
 332 
 333 #endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP


  40  * This implementation is processor specific and works on
  41  * 68020 68030 68040 and 68060.
  42  *
  43  * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
  44  * instruction.
  45  * Using a kernelhelper would be better for arch complete implementation.
  46  *
  47  */
  48 
  49 static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
  50   int ret;
  51   __asm __volatile ("cas%.l %0,%2,%1"
  52                    : "=d" (ret), "+m" (*(ptr))
  53                    : "d" (newval), "0" (oldval));
  54   return ret;
  55 }
  56 
  57 /* Perform an atomic compare and swap: if the current value of `*PTR'
  58    is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
  59    `*PTR' before the operation.*/
  60 static inline int m68k_compare_and_swap(int newval,
  61                                         volatile int *ptr,
  62                                         int oldval) {
  63   for (;;) {
  64       int prev = *ptr;
  65       if (prev != oldval)
  66         return prev;
  67 
  68       if (__m68k_cmpxchg (prev, newval, ptr) == newval)
  69         // Success.
  70         return prev;
  71 
  72       // We failed even though prev == oldval.  Try again.
  73     }
  74 }
  75 
  76 /* Atomically add an int to memory.  */
  77 static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
  78   for (;;) {
  79       // Loop until success.
  80 
  81       int prev = *ptr;
  82 


 101 #ifdef ARM
 102 
 103 /*
 104  * __kernel_cmpxchg
 105  *
 106  * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
 107  * Return zero if *ptr was changed or non-zero if no exchange happened.
 108  * The C flag is also set if *ptr was changed to allow for assembly
 109  * optimization in the calling code.
 110  *
 111  */
 112 
 113 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
 114 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
 115 
 116 
 117 
 118 /* Perform an atomic compare and swap: if the current value of `*PTR'
 119    is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
 120    `*PTR' before the operation.*/
 121 static inline int arm_compare_and_swap(int newval,
 122                                        volatile int *ptr,
 123                                        int oldval) {
 124   for (;;) {
 125       int prev = *ptr;
 126       if (prev != oldval)
 127         return prev;
 128 
 129       if (__kernel_cmpxchg (prev, newval, ptr) == 0)
 130         // Success.
 131         return prev;
 132 
 133       // We failed even though prev == oldval.  Try again.
 134     }
 135 }
 136 
 137 /* Atomically add an int to memory.  */
 138 static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
 139   for (;;) {
 140       // Loop until a __kernel_cmpxchg succeeds.
 141 
 142       int prev = *ptr;
 143 


 250 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
 251                                  volatile intptr_t* dest) {
 252 #ifdef ARM
 253   return arm_lock_test_and_set(dest, exchange_value);
 254 #else
 255 #ifdef M68K
 256   return m68k_lock_test_and_set(dest, exchange_value);
 257 #else
 258   intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
 259   __sync_synchronize();
 260   return result;
 261 #endif // M68K
 262 #endif // ARM
 263 }
 264 
 265 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
 266   return (void *) xchg_ptr((intptr_t) exchange_value,
 267                            (volatile intptr_t*) dest);
 268 }
 269 
 270 // No direct support for cmpxchg of bytes; emulate using int.
 271 template<>
 272 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
 273 
 274 template<>
 275 template<typename T>
 276 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
 277                                                 T volatile* dest,
 278                                                 T compare_value,
 279                                                 cmpxchg_memory_order order) {
 280   STATIC_CAST(4 == sizeof(T));
 281 #ifdef ARM
 282   return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
 283 #else
 284 #ifdef M68K
 285   return cmpxchg_using_helper<int>(m68k_compare_and_swap, exchange_value, dest, compare_value);
 286 #else
 287   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 288 #endif // M68K
 289 #endif // ARM
 290 }
 291 
 292 template<>
 293 template<typename T>
 294 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 295                                                 T volatile* dest,
 296                                                 T compare_value,
 297                                                 cmpxchg_memory_order order) {
 298   STATIC_CAST(8 == sizeof(T));
 299   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);


























 300 }
 301 
 302 inline jlong Atomic::load(const volatile jlong* src) {
 303   volatile jlong dest;
 304   os::atomic_copy64(src, &dest);
 305   return dest;
 306 }
 307 
 308 inline void Atomic::store(jlong store_value, jlong* dest) {
 309   os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
 310 }
 311 
 312 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
 313   os::atomic_copy64((volatile jlong*)&store_value, dest);
 314 }
 315 
 316 #endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
< prev index next >