< prev index next >

src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.hpp

Print this page
rev 13281 : imported patch Atomic_refactoring
rev 13282 : imported patch Atomic_polishing
rev 13284 : [mq]: Atomic_aliasing_1


  44 template <>
  45 inline int64_t Atomic::specialized_add<int64_t>(int64_t add_value, volatile int64_t* dest) {
  46  return __sync_add_and_fetch(dest, add_value);
  47 }
  48 
  49 
  50 template <>
  51 inline int32_t Atomic::specialized_xchg<int32_t>(int32_t exchange_value, volatile int32_t* dest) {
  52   int32_t res = __sync_lock_test_and_set (dest, exchange_value);
  53   FULL_MEM_BARRIER;
  54   return res;
  55 }
  56 
  57 template <>
  58 inline int64_t Atomic::specialized_xchg<int64_t>(int64_t exchange_value, volatile int64_t* dest) {
  59   int64_t res = __sync_lock_test_and_set (dest, exchange_value);
  60   FULL_MEM_BARRIER;
  61   return res;
  62 }
  63 







  64 template <typename T> T generic_cmpxchg(T exchange_value, volatile T* dest,
  65                                         T compare_value, cmpxchg_memory_order order)
  66 {
  67   if (order == memory_order_relaxed) {
  68     T value = compare_value;
  69     __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
  70                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
  71     return value;
  72   } else {
  73     return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
  74   }
  75 }
  76 
  77 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
  78 template <>
  79 inline int8_t Atomic::specialized_cmpxchg<int8_t>(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) {
  80   return generic_cmpxchg(exchange_value, dest, compare_value, order);
  81 }
  82 
  83 template <>


  44 template <>
  45 inline int64_t Atomic::specialized_add<int64_t>(int64_t add_value, volatile int64_t* dest) {
  46  return __sync_add_and_fetch(dest, add_value);
  47 }
  48 
  49 
  50 template <>
  51 inline int32_t Atomic::specialized_xchg<int32_t>(int32_t exchange_value, volatile int32_t* dest) {
  52   int32_t res = __sync_lock_test_and_set (dest, exchange_value);
  53   FULL_MEM_BARRIER;
  54   return res;
  55 }
  56 
  57 template <>
  58 inline int64_t Atomic::specialized_xchg<int64_t>(int64_t exchange_value, volatile int64_t* dest) {
  59   int64_t res = __sync_lock_test_and_set (dest, exchange_value);
  60   FULL_MEM_BARRIER;
  61   return res;
  62 }
  63 
  64 template <>
  65 inline Atomic::CanonicalPointer Atomic::specialized_xchg<Atomic::CanonicalPointer>(Atomic::CanonicalPointer exchange_value, volatile Atomic::CanonicalPointer* dest) {
  66   Atomic::CanonicalPointer res = __sync_lock_test_and_set (dest, exchange_value);
  67   FULL_MEM_BARRIER;
  68   return res;
  69 }
  70 
  71 template <typename T> T generic_cmpxchg(T exchange_value, volatile T* dest,
  72                                         T compare_value, cmpxchg_memory_order order)
  73 {
  74   if (order == memory_order_relaxed) {
  75     T value = compare_value;
  76     __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
  77                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
  78     return value;
  79   } else {
  80     return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
  81   }
  82 }
  83 
  84 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
  85 template <>
  86 inline int8_t Atomic::specialized_cmpxchg<int8_t>(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) {
  87   return generic_cmpxchg(exchange_value, dest, compare_value, order);
  88 }
  89 
  90 template <>
< prev index next >