1 /*
   2  * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_LINUX_X86_ATOMIC_LINUX_X86_HPP
  26 #define OS_CPU_LINUX_X86_ATOMIC_LINUX_X86_HPP
  27 
  28 // Implementation of class atomic
  29 
  30 template<size_t byte_size>
  31 struct Atomic::PlatformFetchAndAdd {
  32   template<typename D, typename I>
  33   D operator()(D volatile* dest, I add_value, atomic_memory_order order) const;
  34 };
  35 
  36 template<size_t byte_size>
  37 struct Atomic::PlatformAddAndFetch {
  38   template<typename D, typename I>
  39   D operator()(D volatile* dest, I add_value, atomic_memory_order order) const {
  40     return Atomic::PlatformFetchAndAdd<byte_size>()(dest, add_value, order) + add_value;
  41   }
  42 };
  43 
  44 template<>
  45 template<typename D, typename I>
  46 inline D Atomic::PlatformAddAndFetch<4>::operator()(D volatile* dest, I add_value,
  47                                                     atomic_memory_order order) const {
  48   STATIC_ASSERT(4 == sizeof(I));
  49   STATIC_ASSERT(4 == sizeof(D));
  50   D old_value;
  51   __asm__ volatile (  "lock xaddl %0,(%2)"
  52                     : "=r" (old_value)
  53                     : "0" (add_value), "r" (dest)
  54                     : "cc", "memory");
  55   return old_value;
  56 }
  57 
  58 template<>
  59 template<typename T>
  60 inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
  61                                              T exchange_value,
  62                                              atomic_memory_order order) const {
  63   STATIC_ASSERT(4 == sizeof(T));
  64   __asm__ volatile (  "xchgl (%2),%0"
  65                     : "=r" (exchange_value)
  66                     : "0" (exchange_value), "r" (dest)
  67                     : "memory");
  68   return exchange_value;
  69 }
  70 
  71 template<>
  72 template<typename T>
  73 inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
  74                                                 T compare_value,
  75                                                 T exchange_value,
  76                                                 atomic_memory_order /* order */) const {
  77   STATIC_ASSERT(1 == sizeof(T));
  78   __asm__ volatile ("lock cmpxchgb %1,(%3)"
  79                     : "=a" (exchange_value)
  80                     : "q" (exchange_value), "a" (compare_value), "r" (dest)
  81                     : "cc", "memory");
  82   return exchange_value;
  83 }
  84 
  85 template<>
  86 template<typename T>
  87 inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
  88                                                 T compare_value,
  89                                                 T exchange_value,
  90                                                 atomic_memory_order /* order */) const {
  91   STATIC_ASSERT(4 == sizeof(T));
  92   __asm__ volatile ("lock cmpxchgl %1,(%3)"
  93                     : "=a" (exchange_value)
  94                     : "r" (exchange_value), "a" (compare_value), "r" (dest)
  95                     : "cc", "memory");
  96   return exchange_value;
  97 }
  98 
  99 #ifdef AMD64
 100 
 101 template<>
 102 template<typename D, typename I>
 103 inline D Atomic::PlatformFetchAndAdd<8>::operator()(D volatile* dest, I add_value,
 104                                                     atomic_memory_order order) const {
 105   STATIC_ASSERT(8 == sizeof(I));
 106   STATIC_ASSERT(8 == sizeof(D));
 107   D old_value;
 108   __asm__ __volatile__ ("lock xaddq %0,(%2)"
 109                         : "=r" (old_value)
 110                         : "0" (add_value), "r" (dest)
 111                         : "cc", "memory");
 112   return old_value;
 113 }
 114 
 115 template<>
 116 template<typename T>
 117 inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value,
 118                                              atomic_memory_order order) const {
 119   STATIC_ASSERT(8 == sizeof(T));
 120   __asm__ __volatile__ ("xchgq (%2),%0"
 121                         : "=r" (exchange_value)
 122                         : "0" (exchange_value), "r" (dest)
 123                         : "memory");
 124   return exchange_value;
 125 }
 126 
 127 template<>
 128 template<typename T>
 129 inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
 130                                                 T compare_value,
 131                                                 T exchange_value,
 132                                                 atomic_memory_order /* order */) const {
 133   STATIC_ASSERT(8 == sizeof(T));
 134   __asm__ __volatile__ ("lock cmpxchgq %1,(%3)"
 135                         : "=a" (exchange_value)
 136                         : "r" (exchange_value), "a" (compare_value), "r" (dest)
 137                         : "cc", "memory");
 138   return exchange_value;
 139 }
 140 
 141 #else // !AMD64
 142 
 143 extern "C" {
 144   // defined in linux_x86.s
 145   int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t);
 146   void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
 147 }
 148 
 149 template<>
 150 template<typename T>
 151 inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
 152                                                 T compare_value,
 153                                                 T exchange_value,
 154                                                 atomic_memory_order order) const {
 155   STATIC_ASSERT(8 == sizeof(T));
 156   return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, dest, compare_value, exchange_value);
 157 }
 158 
 159 template<>
 160 template<typename T>
 161 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
 162   STATIC_ASSERT(8 == sizeof(T));
 163   volatile int64_t dest;
 164   _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
 165   return PrimitiveConversions::cast<T>(dest);
 166 }
 167 
 168 template<>
 169 template<typename T>
 170 inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
 171                                                  T store_value) const {
 172   STATIC_ASSERT(8 == sizeof(T));
 173   _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
 174 }
 175 
 176 #endif // AMD64
 177 
 178 template<>
 179 struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
 180 {
 181   template <typename T>
 182   void operator()(volatile T* p, T v) const {
 183     __asm__ volatile (  "xchgb (%2),%0"
 184                       : "=q" (v)
 185                       : "0" (v), "r" (p)
 186                       : "memory");
 187   }
 188 };
 189 
 190 template<>
 191 struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
 192 {
 193   template <typename T>
 194   void operator()(volatile T* p, T v) const {
 195     __asm__ volatile (  "xchgw (%2),%0"
 196                       : "=r" (v)
 197                       : "0" (v), "r" (p)
 198                       : "memory");
 199   }
 200 };
 201 
 202 template<>
 203 struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
 204 {
 205   template <typename T>
 206   void operator()(volatile T* p, T v) const {
 207     __asm__ volatile (  "xchgl (%2),%0"
 208                       : "=r" (v)
 209                       : "0" (v), "r" (p)
 210                       : "memory");
 211   }
 212 };
 213 
 214 #ifdef AMD64
 215 template<>
 216 struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE>
 217 {
 218   template <typename T>
 219   void operator()(volatile T* p, T v) const {
 220     __asm__ volatile (  "xchgq (%2), %0"
 221                       : "=r" (v)
 222                       : "0" (v), "r" (p)
 223                       : "memory");
 224   }
 225 };
 226 #endif // AMD64
 227 
 228 #endif // OS_CPU_LINUX_X86_ATOMIC_LINUX_X86_HPP