< prev index next >

src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.hpp

Print this page
rev 13281 : imported patch Atomic_refactoring
rev 13282 : imported patch Atomic_polishing
rev 13284 : [mq]: Atomic_aliasing_2


  39 inline int32_t Atomic::specialized_add<int32_t>(int32_t add_value, volatile int32_t* dest) {
  40  return __sync_add_and_fetch(dest, add_value);
  41 }
  42 
  43 
  44 template <>
  45 inline int64_t Atomic::specialized_add<int64_t>(int64_t add_value, volatile int64_t* dest) {
  46  return __sync_add_and_fetch(dest, add_value);
  47 }
  48 
  49 
  50 template <>
  51 inline int32_t Atomic::specialized_xchg<int32_t>(int32_t exchange_value, volatile int32_t* dest) {
  52   int32_t res = __sync_lock_test_and_set (dest, exchange_value);
  53   FULL_MEM_BARRIER;
  54   return res;
  55 }
  56 
  57 template <>
  58 inline int64_t Atomic::specialized_xchg<int64_t>(int64_t exchange_value, volatile int64_t* dest) {
  59   int64_t res = __sync_lock_test_and_set (dest, exchange_value);
  60   FULL_MEM_BARRIER;
  61   return res;
  62 }
  63 
  64 template <typename T> T generic_cmpxchg(T exchange_value, volatile T* dest,
  65                                         T compare_value, cmpxchg_memory_order order)
  66 {
  67   if (order == memory_order_relaxed) {
  68     T value = compare_value;
  69     __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
  70                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
  71     return value;
  72   } else {
  73     return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
  74   }
  75 }
  76 
  77 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
  78 template <>
  79 inline int8_t Atomic::specialized_cmpxchg<int8_t>(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) {


  39 inline int32_t Atomic::specialized_add<int32_t>(int32_t add_value, volatile int32_t* dest) {
  40  return __sync_add_and_fetch(dest, add_value);
  41 }
  42 
  43 
  44 template <>
  45 inline int64_t Atomic::specialized_add<int64_t>(int64_t add_value, volatile int64_t* dest) {
  46  return __sync_add_and_fetch(dest, add_value);
  47 }
  48 
  49 
  50 template <>
  51 inline int32_t Atomic::specialized_xchg<int32_t>(int32_t exchange_value, volatile int32_t* dest) {
  52   int32_t res = __sync_lock_test_and_set (dest, exchange_value);
  53   FULL_MEM_BARRIER;
  54   return res;
  55 }
  56 
  57 template <>
  58 inline int64_t Atomic::specialized_xchg<int64_t>(int64_t exchange_value, volatile int64_t* dest) {
  59   intptr_t res = reinterpret_cast<intptr_t>(__sync_lock_test_and_set(reinterpret_cast<char* volatile*>(dest), reinterpret_cast<char*>(exchange_value)));
  60   FULL_MEM_BARRIER;
  61   return res;
  62 }
  63 
  64 template <typename T> T generic_cmpxchg(T exchange_value, volatile T* dest,
  65                                         T compare_value, cmpxchg_memory_order order)
  66 {
  67   if (order == memory_order_relaxed) {
  68     T value = compare_value;
  69     __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
  70                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
  71     return value;
  72   } else {
  73     return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
  74   }
  75 }
  76 
  77 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
  78 template <>
  79 inline int8_t Atomic::specialized_cmpxchg<int8_t>(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) {
< prev index next >