1 /*
   2  * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef OS_CPU_BSD_ZERO_ATOMIC_BSD_ZERO_HPP
  27 #define OS_CPU_BSD_ZERO_ATOMIC_BSD_ZERO_HPP
  28 
  29 #include "runtime/os.hpp"
  30 
  31 // Implementation of class atomic
  32 
  33 #ifdef M68K
  34 
  35 /*
  36  * __m68k_cmpxchg
  37  *
  38  * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
  39  * Returns newval on success and oldval if no exchange happened.
  40  * This implementation is processor specific and works on
  41  * 68020 68030 68040 and 68060.
  42  *
  43  * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
  44  * instruction.
  45  * Using a kernelhelper would be better for arch complete implementation.
  46  *
  47  */
  48 
  49 static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
  50   int ret;
  51   __asm __volatile ("cas%.l %0,%2,%1"
  52                    : "=d" (ret), "+m" (*(ptr))
  53                    : "d" (newval), "0" (oldval));
  54   return ret;
  55 }
  56 
  57 /* Perform an atomic compare and swap: if the current value of `*PTR'
  58    is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
  59    `*PTR' before the operation.*/
  60 static inline int m68k_compare_and_swap(int newval,
  61                                         volatile int *ptr,
  62                                         int oldval) {
  63   for (;;) {
  64       int prev = *ptr;
  65       if (prev != oldval)
  66         return prev;
  67 
  68       if (__m68k_cmpxchg (prev, newval, ptr) == newval)
  69         // Success.
  70         return prev;
  71 
  72       // We failed even though prev == oldval.  Try again.
  73     }
  74 }
  75 
  76 /* Atomically add an int to memory.  */
  77 static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) {
  78   for (;;) {
  79       // Loop until success.
  80 
  81       int prev = *ptr;
  82 
  83       if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
  84         return prev + add_value;
  85     }
  86 }
  87 
  88 /* Atomically write VALUE into `*PTR' and returns the previous
  89    contents of `*PTR'.  */
  90 static inline int m68k_lock_test_and_set(int newval, volatile int *ptr) {
  91   for (;;) {
  92       // Loop until success.
  93       int prev = *ptr;
  94 
  95       if (__m68k_cmpxchg (prev, newval, ptr) == prev)
  96         return prev;
  97     }
  98 }
  99 #endif // M68K
 100 
 101 #ifdef ARM
 102 
 103 /*
 104  * __kernel_cmpxchg
 105  *
 106  * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
 107  * Return zero if *ptr was changed or non-zero if no exchange happened.
 108  * The C flag is also set if *ptr was changed to allow for assembly
 109  * optimization in the calling code.
 110  *
 111  */
 112 
 113 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
 114 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
 115 
 116 
 117 
 118 /* Perform an atomic compare and swap: if the current value of `*PTR'
 119    is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
 120    `*PTR' before the operation.*/
 121 static inline int arm_compare_and_swap(int newval,
 122                                        volatile int *ptr,
 123                                        int oldval) {
 124   for (;;) {
 125       int prev = *ptr;
 126       if (prev != oldval)
 127         return prev;
 128 
 129       if (__kernel_cmpxchg (prev, newval, ptr) == 0)
 130         // Success.
 131         return prev;
 132 
 133       // We failed even though prev == oldval.  Try again.
 134     }
 135 }
 136 
 137 /* Atomically add an int to memory.  */
 138 static inline int arm_add_and_fetch(int add_value, volatile int *ptr) {
 139   for (;;) {
 140       // Loop until a __kernel_cmpxchg succeeds.
 141 
 142       int prev = *ptr;
 143 
 144       if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
 145         return prev + add_value;
 146     }
 147 }
 148 
 149 /* Atomically write VALUE into `*PTR' and returns the previous
 150    contents of `*PTR'.  */
 151 static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
 152   for (;;) {
 153       // Loop until a __kernel_cmpxchg succeeds.
 154       int prev = *ptr;
 155 
 156       if (__kernel_cmpxchg (prev, newval, ptr) == 0)
 157         return prev;
 158     }
 159 }
 160 #endif // ARM
 161 
 162 template<size_t byte_size>
 163 struct Atomic::PlatformAdd
 164   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 165 {
 166   template<typename D, typename I>
 167   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
 168 };
 169 
 170 template<>
 171 template<typename D, typename I>
 172 inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
 173                                                atomic_memory_order order) const {
 174   STATIC_ASSERT(4 == sizeof(I));
 175   STATIC_ASSERT(4 == sizeof(D));
 176 
 177 #ifdef ARM
 178   return add_using_helper<int>(arm_add_and_fetch, dest, add_value);
 179 #else
 180 #ifdef M68K
 181   return add_using_helper<int>(m68k_add_and_fetch, dest, add_value);
 182 #else
 183   return __sync_add_and_fetch(dest, add_value);
 184 #endif // M68K
 185 #endif // ARM
 186 }
 187 
 188 template<>
 189 template<typename D, typename !>
 190 inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
 191                                                atomic_memory_order order) const {
 192   STATIC_ASSERT(8 == sizeof(I));
 193   STATIC_ASSERT(8 == sizeof(D));
 194 
 195   return __sync_add_and_fetch(dest, add_value);
 196 }
 197 
 198 template<>
 199 template<typename T>
 200 inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
 201                                              T exchange_value,
 202                                              atomic_memory_order order) const {
 203   STATIC_ASSERT(4 == sizeof(T));
 204 #ifdef ARM
 205   return xchg_using_helper<int>(arm_lock_test_and_set, dest, exchange_value);
 206 #else
 207 #ifdef M68K
 208   return xchg_using_helper<int>(m68k_lock_test_and_set, dest, exchange_value);
 209 #else
 210   // __sync_lock_test_and_set is a bizarrely named atomic exchange
 211   // operation.  Note that some platforms only support this with the
 212   // limitation that the only valid value to store is the immediate
 213   // constant 1.  There is a test for this in JNI_CreateJavaVM().
 214   T result = __sync_lock_test_and_set (dest, exchange_value);
 215   // All atomic operations are expected to be full memory barriers
 216   // (see atomic.hpp). However, __sync_lock_test_and_set is not
 217   // a full memory barrier, but an acquire barrier. Hence, this added
 218   // barrier.
 219   __sync_synchronize();
 220   return result;
 221 #endif // M68K
 222 #endif // ARM
 223 }
 224 
 225 template<>
 226 template<typename T>
 227 inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
 228                                              T exchange_value,
 229                                              atomic_memory_order order) const {
 230   STATIC_ASSERT(8 == sizeof(T));
 231   T result = __sync_lock_test_and_set (dest, exchange_value);
 232   __sync_synchronize();
 233   return result;
 234 }
 235 
 236 // No direct support for cmpxchg of bytes; emulate using int.
 237 template<>
 238 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
 239 
 240 template<>
 241 template<typename T>
 242 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
 243                                                 T volatile* dest,
 244                                                 T compare_value,
 245                                                 atomic_memory_order order) const {
 246   STATIC_ASSERT(4 == sizeof(T));
 247 #ifdef ARM
 248   return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
 249 #else
 250 #ifdef M68K
 251   return cmpxchg_using_helper<int>(m68k_compare_and_swap, exchange_value, dest, compare_value);
 252 #else
 253   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 254 #endif // M68K
 255 #endif // ARM
 256 }
 257 
 258 template<>
 259 template<typename T>
 260 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 261                                                 T volatile* dest,
 262                                                 T compare_value,
 263                                                 atomic_memory_order order) const {
 264   STATIC_ASSERT(8 == sizeof(T));
 265   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 266 }
 267 
 268 template<>
 269 template<typename T>
 270 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
 271   STATIC_ASSERT(8 == sizeof(T));
 272   volatile int64_t dest;
 273   os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
 274   return PrimitiveConversions::cast<T>(dest);
 275 }
 276 
 277 template<>
 278 template<typename T>
 279 inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
 280                                                  T store_value) const {
 281   STATIC_ASSERT(8 == sizeof(T));
 282   os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
 283 }
 284 
 285 #endif // OS_CPU_BSD_ZERO_ATOMIC_BSD_ZERO_HPP