< prev index next >

src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp

Print this page




  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
  27 #define OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
  28 
  29 #include "runtime/vm_version.hpp"
  30 
  31 // Implementation of class atomic
  32 // Note that memory_order_conservative requires a full barrier after atomic stores.
  33 // See https://patchwork.kernel.org/patch/3575821/
  34 
  35 #define FULL_MEM_BARRIER  __sync_synchronize()
  36 #define READ_MEM_BARRIER  __atomic_thread_fence(__ATOMIC_ACQUIRE);
  37 #define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
  38 
  39 template<size_t byte_size>
  40 struct Atomic::PlatformAdd
  41   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
  42 {
  43   template<typename I, typename D>
  44   D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const {
  45     D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
  46     FULL_MEM_BARRIER;
  47     return res;
  48   }
  49 };
  50 
  51 template<size_t byte_size>
  52 template<typename T>
  53 inline T Atomic::PlatformXchg<byte_size>::operator()(T exchange_value,
  54                                                      T volatile* dest,
  55                                                      atomic_memory_order order) const {
  56   STATIC_ASSERT(byte_size == sizeof(T));
  57   T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE);
  58   FULL_MEM_BARRIER;


  63 template<typename T>
  64 inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
  65                                                         T volatile* dest,
  66                                                         T compare_value,
  67                                                         atomic_memory_order order) const {
  68   STATIC_ASSERT(byte_size == sizeof(T));
  69   if (order == memory_order_relaxed) {
  70     T value = compare_value;
  71     __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
  72                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
  73     return value;
  74   } else {
  75     T value = compare_value;
  76     FULL_MEM_BARRIER;
  77     __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
  78                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
  79     FULL_MEM_BARRIER;
  80     return value;
  81   }
  82 }





















  83 
  84 #endif // OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP


  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
  27 #define OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
  28 
  29 #include "runtime/vm_version.hpp"
  30 
  31 // Implementation of class atomic
  32 // Note that memory_order_conservative requires a full barrier after atomic stores.
  33 // See https://patchwork.kernel.org/patch/3575821/
  34 




  35 template<size_t byte_size>
  36 struct Atomic::PlatformAdd
  37   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
  38 {
  39   template<typename I, typename D>
  40   D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const {
  41     D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
  42     FULL_MEM_BARRIER;
  43     return res;
  44   }
  45 };
  46 
  47 template<size_t byte_size>
  48 template<typename T>
  49 inline T Atomic::PlatformXchg<byte_size>::operator()(T exchange_value,
  50                                                      T volatile* dest,
  51                                                      atomic_memory_order order) const {
  52   STATIC_ASSERT(byte_size == sizeof(T));
  53   T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE);
  54   FULL_MEM_BARRIER;


  59 template<typename T>
  60 inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
  61                                                         T volatile* dest,
  62                                                         T compare_value,
  63                                                         atomic_memory_order order) const {
  64   STATIC_ASSERT(byte_size == sizeof(T));
  65   if (order == memory_order_relaxed) {
  66     T value = compare_value;
  67     __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
  68                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
  69     return value;
  70   } else {
  71     T value = compare_value;
  72     FULL_MEM_BARRIER;
  73     __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
  74                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
  75     FULL_MEM_BARRIER;
  76     return value;
  77   }
  78 }
  79 
  80 template<size_t byte_size>
  81 struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
  82 {
  83   template <typename T>
  84   T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }
  85 };
  86 
  87 template<size_t byte_size>
  88 struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X>
  89 {
  90   template <typename T>
  91   void operator()(T v, volatile T* p) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
  92 };
  93 
  94 template<size_t byte_size>
  95 struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
  96 {
  97   template <typename T>
  98   void operator()(T v, volatile T* p) const { release_store(p, v); OrderAccess::fence(); }
  99 };
 100 
 101 #endif // OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
< prev index next >