< prev index next >

src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp

Print this page




  40   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
  41     D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
  42     FULL_MEM_BARRIER;
  43     return res;
  44   }
  45 };
  46 
  47 template<size_t byte_size>
  48 template<typename T>
  49 inline T Atomic::PlatformXchg<byte_size>::operator()(T volatile* dest,
  50                                                      T exchange_value,
  51                                                      atomic_memory_order order) const {
  52   STATIC_ASSERT(byte_size == sizeof(T));
  53   T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE);
  54   FULL_MEM_BARRIER;
  55   return res;
  56 }
  57 
  58 template<size_t byte_size>
  59 template<typename T>
  60 inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
  61                                                         T volatile* dest,
  62                                                         T compare_value,

  63                                                         atomic_memory_order order) const {
  64   STATIC_ASSERT(byte_size == sizeof(T));
  65   if (order == memory_order_relaxed) {
  66     T value = compare_value;
  67     __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
  68                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
  69     return value;
  70   } else {
  71     T value = compare_value;
  72     FULL_MEM_BARRIER;
  73     __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
  74                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
  75     FULL_MEM_BARRIER;
  76     return value;
  77   }
  78 }
  79 
  80 template<size_t byte_size>
  81 struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
  82 {


  40   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
  41     D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
  42     FULL_MEM_BARRIER;
  43     return res;
  44   }
  45 };
  46 
  47 template<size_t byte_size>
  48 template<typename T>
  49 inline T Atomic::PlatformXchg<byte_size>::operator()(T volatile* dest,
  50                                                      T exchange_value,
  51                                                      atomic_memory_order order) const {
  52   STATIC_ASSERT(byte_size == sizeof(T));
  53   T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE);
  54   FULL_MEM_BARRIER;
  55   return res;
  56 }
  57 
  58 template<size_t byte_size>
  59 template<typename T>
  60 inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T volatile* dest,

  61                                                         T compare_value,
  62                                                         T exchange_value,
  63                                                         atomic_memory_order order) const {
  64   STATIC_ASSERT(byte_size == sizeof(T));
  65   if (order == memory_order_relaxed) {
  66     T value = compare_value;
  67     __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
  68                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
  69     return value;
  70   } else {
  71     T value = compare_value;
  72     FULL_MEM_BARRIER;
  73     __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
  74                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
  75     FULL_MEM_BARRIER;
  76     return value;
  77   }
  78 }
  79 
  80 template<size_t byte_size>
  81 struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
  82 {
< prev index next >