1 /*
   2  * Copyright (c) 2020, Microsoft Corporation. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_WINDOWS_AARCH64_ATOMIC_WINDOWS_AARCH64_HPP
  26 #define OS_CPU_WINDOWS_AARCH64_ATOMIC_WINDOWS_AARCH64_HPP
  27 
  28 #include <intrin.h>
  29 #include "runtime/os.hpp"
  30 #include "runtime/vm_version.hpp"
  31 
  32 
  33 // As per atomic.hpp all read-modify-write operations have to provide two-way
  34 // barriers semantics. The memory_order parameter is ignored - we always provide
  35 // the strongest/most-conservative ordering
  36 //
  37 // For AARCH64 we add explicit barriers in the stubs.
  38 
  39 template<size_t byte_size>
  40 struct Atomic::PlatformAdd {
  41   template<typename D, typename I>
  42   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
  43 
  44   template<typename D, typename I>
  45   D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
  46     return add_and_fetch(dest, add_value, order) - add_value;
  47   }
  48 };
  49 
  50 #define DEFINE_STUB_ADD(ByteSize, StubType, StubName)                     \
  51   template<>                                                              \
  52   template<typename D, typename I>                                        \
  53   inline D Atomic::PlatformAdd<ByteSize>::add_and_fetch(D volatile* dest, \
  54                                                         I add_value,      \
  55                                                         atomic_memory_order order) const { \
  56     STATIC_ASSERT(ByteSize == sizeof(D));                                 \
  57     return PrimitiveConversions::cast<D>(                                 \
  58       StubName(reinterpret_cast<StubType volatile *>(dest),               \
  59                PrimitiveConversions::cast<StubType>(add_value)));         \
  60   }
  61 
  62 DEFINE_STUB_ADD(4, long,    InterlockedAdd)
  63 DEFINE_STUB_ADD(8, __int64, InterlockedAdd64)
  64 
  65 #undef DEFINE_STUB_ADD
  66 
  67 #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName)                  \
  68   template<>                                                            \
  69   template<typename T>                                                  \
  70   inline T Atomic::PlatformXchg<ByteSize>::operator()(T volatile* dest, \
  71                                                       T exchange_value, \
  72                                                       atomic_memory_order order) const { \
  73     STATIC_ASSERT(ByteSize == sizeof(T));                               \
  74     return PrimitiveConversions::cast<T>(                               \
  75       StubName(reinterpret_cast<StubType volatile *>(dest),             \
  76                PrimitiveConversions::cast<StubType>(exchange_value)));  \
  77   }
  78 
  79 DEFINE_STUB_XCHG(4, long,    InterlockedExchange)
  80 DEFINE_STUB_XCHG(8, __int64, InterlockedExchange64)
  81 
  82 #undef DEFINE_STUB_XCHG
  83 
  84 #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName)                  \
  85   template<>                                                               \
  86   template<typename T>                                                     \
  87   inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T volatile* dest, \
  88                                                          T compare_value,  \
  89                                                          T exchange_value, \
  90                                                          atomic_memory_order order) const { \
  91     STATIC_ASSERT(ByteSize == sizeof(T));                                  \
  92     return PrimitiveConversions::cast<T>(                                  \
  93       StubName(reinterpret_cast<StubType volatile *>(dest),                \
  94                PrimitiveConversions::cast<StubType>(exchange_value),       \
  95                PrimitiveConversions::cast<StubType>(compare_value)));      \
  96   }
  97 
  98 DEFINE_STUB_CMPXCHG(1, char,    _InterlockedCompareExchange8) // Use the intrinsic as InterlockedCompareExchange8 does not exist
  99 DEFINE_STUB_CMPXCHG(4, long,    InterlockedCompareExchange)
 100 DEFINE_STUB_CMPXCHG(8, __int64, InterlockedCompareExchange64)
 101 
 102 #undef DEFINE_STUB_CMPXCHG
 103 
 104 #endif // OS_CPU_WINDOWS_AARCH64_ATOMIC_WINDOWS_AARCH64_HPP