1 /*
   2  * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP
  27 #define OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP
  28 
  29 #include "vm_version_aarch64.hpp"
  30 
  31 // Implementation of class atomic
  32 
  33 #define FULL_MEM_BARRIER  __sync_synchronize()
  34 #define READ_MEM_BARRIER  __atomic_thread_fence(__ATOMIC_ACQUIRE);
  35 #define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
  36 
  37 
  38 template <>
  39 inline int32_t Atomic::specialized_add<int32_t>(int32_t add_value, volatile int32_t* dest) {
  40  return __sync_add_and_fetch(dest, add_value);
  41 }
  42 
  43 
  44 template <>
  45 inline int64_t Atomic::specialized_add<int64_t>(int64_t add_value, volatile int64_t* dest) {
  46  return __sync_add_and_fetch(dest, add_value);
  47 }
  48 
  49 
  50 template <>
  51 inline int32_t Atomic::specialized_xchg<int32_t>(int32_t exchange_value, volatile int32_t* dest) {
  52   int32_t res = __sync_lock_test_and_set (dest, exchange_value);
  53   FULL_MEM_BARRIER;
  54   return res;
  55 }
  56 
  57 template <>
  58 inline int64_t Atomic::specialized_xchg<int64_t>(int64_t exchange_value, volatile int64_t* dest) {
  59   int64_t res = __sync_lock_test_and_set (dest, exchange_value);
  60   FULL_MEM_BARRIER;
  61   return res;
  62 }
  63 
  64 template <typename T> T generic_cmpxchg(T exchange_value, volatile T* dest,
  65                                         T compare_value, cmpxchg_memory_order order)
  66 {
  67   if (order == memory_order_relaxed) {
  68     T value = compare_value;
  69     __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
  70                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
  71     return value;
  72   } else {
  73     return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
  74   }
  75 }
  76 
  77 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
  78 template <>
  79 inline int8_t Atomic::specialized_cmpxchg<int8_t>(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) {
  80   return generic_cmpxchg(exchange_value, dest, compare_value, order);
  81 }
  82 
  83 template <>
  84 inline int32_t Atomic::specialized_cmpxchg<int32_t>(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) {
  85   return generic_cmpxchg(exchange_value, dest, compare_value, order);
  86 }
  87 
  88 
  89 template <>
  90 inline int64_t Atomic::specialized_cmpxchg<int64_t>(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) {
  91   return generic_cmpxchg(exchange_value, dest, compare_value, order);
  92 }
  93 
  94 
  95 #endif // OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP