1 /*
   2  * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
  27 #define OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
  28 
  29 #include "runtime/vm_version.hpp"
  30 
  31 // Implementation of class atomic
  32 // Note that memory_order_conservative requires a full barrier after atomic stores.
  33 // See https://patchwork.kernel.org/patch/3575821/
  34 
  35 template<size_t byte_size>
  36 struct Atomic::PlatformAdd
  37   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
  38 {
  39   template<typename I, typename D>
  40   D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const {
  41     D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
  42     FULL_MEM_BARRIER;
  43     return res;
  44   }
  45 };
  46 
  47 template<size_t byte_size>
  48 template<typename T>
  49 inline T Atomic::PlatformXchg<byte_size>::operator()(T exchange_value,
  50                                                      T volatile* dest,
  51                                                      atomic_memory_order order) const {
  52   STATIC_ASSERT(byte_size == sizeof(T));
  53   T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE);
  54   FULL_MEM_BARRIER;
  55   return res;
  56 }
  57 
  58 template<size_t byte_size>
  59 template<typename T>
  60 inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
  61                                                         T volatile* dest,
  62                                                         T compare_value,
  63                                                         atomic_memory_order order) const {
  64   STATIC_ASSERT(byte_size == sizeof(T));
  65   if (order == memory_order_relaxed) {
  66     T value = compare_value;
  67     __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
  68                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
  69     return value;
  70   } else {
  71     T value = compare_value;
  72     FULL_MEM_BARRIER;
  73     __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
  74                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
  75     FULL_MEM_BARRIER;
  76     return value;
  77   }
  78 }
  79 
  80 template<size_t byte_size>
  81 struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
  82 {
  83   template <typename T>
  84   T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }
  85 };
  86 
  87 template<size_t byte_size>
  88 struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X>
  89 {
  90   template <typename T>
  91   void operator()(T v, volatile T* p) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
  92 };
  93 
  94 template<size_t byte_size>
  95 struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
  96 {
  97   template <typename T>
  98   void operator()(T v, volatile T* p) const { release_store(p, v); OrderAccess::fence(); }
  99 };
 100 
 101 #endif // OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP