< prev index next >

src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp

Print this page




  29 #include "runtime/vm_version.hpp"
  30 
  31 // Implementation of class atomic
  32 // Note that memory_order_conservative requires a full barrier after atomic stores.
  33 // See https://patchwork.kernel.org/patch/3575821/
  34 
  35 template<size_t byte_size>
  36 struct Atomic::PlatformAdd
  37   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
  38 {
  39   template<typename D, typename I>
  40   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
  41     D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
  42     FULL_MEM_BARRIER;
  43     return res;
  44   }
  45 };
  46 
  47 template<size_t byte_size>
  48 template<typename T>
  49 inline T Atomic::PlatformXchg<byte_size>::operator()(T exchange_value,
  50                                                      T volatile* dest,
  51                                                      atomic_memory_order order) const {
  52   STATIC_ASSERT(byte_size == sizeof(T));
  53   T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE);
  54   FULL_MEM_BARRIER;
  55   return res;
  56 }
  57 
  58 template<size_t byte_size>
  59 template<typename T>
  60 inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
  61                                                         T volatile* dest,
  62                                                         T compare_value,
  63                                                         atomic_memory_order order) const {
  64   STATIC_ASSERT(byte_size == sizeof(T));
  65   if (order == memory_order_relaxed) {
  66     T value = compare_value;
  67     __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
  68                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
  69     return value;
  70   } else {




  29 #include "runtime/vm_version.hpp"
  30 
  31 // Implementation of class atomic
  32 // Note that memory_order_conservative requires a full barrier after atomic stores.
  33 // See https://patchwork.kernel.org/patch/3575821/
  34 
  35 template<size_t byte_size>
  36 struct Atomic::PlatformAdd
  37   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
  38 {
  39   template<typename D, typename I>
  40   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
  41     D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
  42     FULL_MEM_BARRIER;
  43     return res;
  44   }
  45 };
  46 
  47 template<size_t byte_size>
  48 template<typename T>
  49 inline T Atomic::PlatformXchg<byte_size>::operator()(T volatile* dest,
  50                                                      T exchange_value,
  51                                                      atomic_memory_order order) const {
  52   STATIC_ASSERT(byte_size == sizeof(T));
  53   T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE);
  54   FULL_MEM_BARRIER;
  55   return res;
  56 }
  57 
  58 template<size_t byte_size>
  59 template<typename T>
  60 inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
  61                                                         T volatile* dest,
  62                                                         T compare_value,
  63                                                         atomic_memory_order order) const {
  64   STATIC_ASSERT(byte_size == sizeof(T));
  65   if (order == memory_order_relaxed) {
  66     T value = compare_value;
  67     __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
  68                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
  69     return value;
  70   } else {


< prev index next >