src/os_cpu/bsd_zero/vm/atomic_bsd_zero.inline.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 7089790_bsd_vs_linux Sdiff src/os_cpu/bsd_zero/vm

src/os_cpu/bsd_zero/vm/atomic_bsd_zero.inline.hpp

Print this page
rev 2698 : new bsd files


   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP
  27 #define OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP
  28 
  29 #include "orderAccess_linux_zero.inline.hpp"
  30 #include "runtime/atomic.hpp"
  31 #include "runtime/os.hpp"
  32 #include "vm_version_zero.hpp"
  33 
  34 // Implementation of class atomic



  35 
  36 #ifdef M68K



  37 
  38 /*
  39  * __m68k_cmpxchg









  40  *
  41  * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
  42  * Returns newval on success and oldval if no exchange happened.
  43  * This implementation is processor specific and works on
  44  * 68020 68030 68040 and 68060.




  45  *
  46  * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
  47  * instruction.
  48  * Using a kernelhelper would be better for arch complete implementation.








  49  *
  50  */
  51 
  52 static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
  53   int ret;
  54   __asm __volatile ("cas%.l %0,%2,%1"
  55                    : "=d" (ret), "+m" (*(ptr))
  56                    : "d" (newval), "0" (oldval));
  57   return ret;
  58 }
  59 
  60 /* Perform an atomic compare and swap: if the current value of `*PTR'
  61    is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
  62    `*PTR' before the operation.*/
  63 static inline int m68k_compare_and_swap(volatile int *ptr,
  64                                         int oldval,
  65                                         int newval) {
  66   for (;;) {
  67       int prev = *ptr;
  68       if (prev != oldval)
  69         return prev;
  70 
  71       if (__m68k_cmpxchg (prev, newval, ptr) == newval)
  72         // Success.
  73         return prev;
  74 
  75       // We failed even though prev == oldval.  Try again.
  76     }
  77 }
  78 
  79 /* Atomically add an int to memory.  */
  80 static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
  81   for (;;) {
  82       // Loop until success.
  83 
  84       int prev = *ptr;






  85 
  86       if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
  87         return prev + add_value;
  88     }
  89 }



  90 
  91 /* Atomically write VALUE into `*PTR' and returns the previous
  92    contents of `*PTR'.  */
  93 static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
  94   for (;;) {
  95       // Loop until success.
  96       int prev = *ptr;
  97 
  98       if (__m68k_cmpxchg (prev, newval, ptr) == prev)
  99         return prev;
 100     }
 101 }
 102 #endif // M68K
 103 
 104 #ifdef ARM


 105 
 106 /*
 107  * __kernel_cmpxchg




 108  *
 109  * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
 110  * Return zero if *ptr was changed or non-zero if no exchange happened.
 111  * The C flag is also set if *ptr was changed to allow for assembly
 112  * optimization in the calling code.
 113  *

























 114  */
 115 
 116 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
 117 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
 118 


 119 


 120 
 121 /* Perform an atomic compare and swap: if the current value of `*PTR'
 122    is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
 123    `*PTR' before the operation.*/
 124 static inline int arm_compare_and_swap(volatile int *ptr,
 125                                        int oldval,
 126                                        int newval) {
 127   for (;;) {
 128       int prev = *ptr;
 129       if (prev != oldval)


















































































































































































 130         return prev;

 131 
 132       if (__kernel_cmpxchg (prev, newval, ptr) == 0)
 133         // Success.




 134         return prev;

 135 
 136       // We failed even though prev == oldval.  Try again.
 137     }






 138 }
 139 
 140 /* Atomically add an int to memory.  */
 141 static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
 142   for (;;) {
 143       // Loop until a __kernel_cmpxchg succeeds.





 144 
 145       int prev = *ptr;







 146 
 147       if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
 148         return prev + add_value;
 149     }





 150 }
 151 
 152 /* Atomically write VALUE into `*PTR' and returns the previous
 153    contents of `*PTR'.  */
 154 static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
 155   for (;;) {
 156       // Loop until a __kernel_cmpxchg succeeds.
 157       int prev = *ptr;



 158 
 159       if (__kernel_cmpxchg (prev, newval, ptr) == 0)










 160         return prev;
 161     }
 162 }
 163 #endif // ARM
 164 













































 165 inline void Atomic::store(jint store_value, volatile jint* dest) {
 166   *dest = store_value;
 167 }
 168 
 169 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
 170   *dest = store_value;
 171 }
 172 
 173 inline jint Atomic::add(jint add_value, volatile jint* dest) {
 174 #ifdef ARM
 175   return arm_add_and_fetch(dest, add_value);
 176 #else
 177 #ifdef M68K
 178   return m68k_add_and_fetch(dest, add_value);
 179 #else
 180   return __sync_add_and_fetch(dest, add_value);
 181 #endif // M68K
 182 #endif // ARM
 183 }
 184 
 185 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
 186 #ifdef ARM
 187   return arm_add_and_fetch(dest, add_value);
 188 #else
 189 #ifdef M68K
 190   return m68k_add_and_fetch(dest, add_value);
 191 #else
 192   return __sync_add_and_fetch(dest, add_value);
 193 #endif // M68K
 194 #endif // ARM
 195 }
 196 
 197 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
 198   return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
 199 }
 200 
 201 inline void Atomic::inc(volatile jint* dest) {
 202   add(1, dest);
 203 }
 204 
 205 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
 206   add_ptr(1, dest);
 207 }
 208 
 209 inline void Atomic::inc_ptr(volatile void* dest) {
 210   add_ptr(1, dest);
 211 }
 212 
 213 inline void Atomic::dec(volatile jint* dest) {
 214   add(-1, dest);
 215 }
 216 
 217 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
 218   add_ptr(-1, dest);
 219 }
 220 
 221 inline void Atomic::dec_ptr(volatile void* dest) {
 222   add_ptr(-1, dest);
 223 }
 224 
 225 inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
 226 #ifdef ARM
 227   return arm_lock_test_and_set(dest, exchange_value);
 228 #else
 229 #ifdef M68K
 230   return m68k_lock_test_and_set(dest, exchange_value);
 231 #else
 232   // __sync_lock_test_and_set is a bizarrely named atomic exchange
 233   // operation.  Note that some platforms only support this with the
 234   // limitation that the only valid value to store is the immediate
 235   // constant 1.  There is a test for this in JNI_CreateJavaVM().
 236   return __sync_lock_test_and_set (dest, exchange_value);
 237 #endif // M68K
 238 #endif // ARM
 239 }
 240 
 241 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
 242                                  volatile intptr_t* dest) {
 243 #ifdef ARM
 244   return arm_lock_test_and_set(dest, exchange_value);
 245 #else
 246 #ifdef M68K
 247   return m68k_lock_test_and_set(dest, exchange_value);
 248 #else
 249   return __sync_lock_test_and_set (dest, exchange_value);
 250 #endif // M68K
 251 #endif // ARM
 252 }
 253 
 254 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
 255   return (void *) xchg_ptr((intptr_t) exchange_value,
 256                            (volatile intptr_t*) dest);
 257 }
 258 
 259 inline jint Atomic::cmpxchg(jint exchange_value,
 260                             volatile jint* dest,
 261                             jint compare_value) {
 262 #ifdef ARM
 263   return arm_compare_and_swap(dest, compare_value, exchange_value);
 264 #else
 265 #ifdef M68K
 266   return m68k_compare_and_swap(dest, compare_value, exchange_value);
 267 #else
 268   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 269 #endif // M68K
 270 #endif // ARM
 271 }
 272 
 273 inline jlong Atomic::cmpxchg(jlong exchange_value,
 274                              volatile jlong* dest,
 275                              jlong compare_value) {
 276 
 277   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 278 }
 279 
 280 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
 281                                     volatile intptr_t* dest,
 282                                     intptr_t compare_value) {
 283 #ifdef ARM
 284   return arm_compare_and_swap(dest, compare_value, exchange_value);
 285 #else
 286 #ifdef M68K
 287   return m68k_compare_and_swap(dest, compare_value, exchange_value);
 288 #else
 289   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 290 #endif // M68K
 291 #endif // ARM
 292 }
 293 
 294 inline void* Atomic::cmpxchg_ptr(void* exchange_value,
 295                                  volatile void* dest,
 296                                  void* compare_value) {
 297 
 298   return (void *) cmpxchg_ptr((intptr_t) exchange_value,
 299                               (volatile intptr_t*) dest,
 300                               (intptr_t) compare_value);
 301 }
 302 
 303 inline jlong Atomic::load(volatile jlong* src) {
 304   volatile jlong dest;
 305   os::atomic_copy64(src, &dest);
 306   return dest;
 307 }
 308 
 309 inline void Atomic::store(jlong store_value, jlong* dest) {
 310   os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
 311 }
 312 
 313 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
 314   os::atomic_copy64((volatile jlong*)&store_value, dest);
 315 }
 316 
 317 #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP


   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP
  27 #define OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP
  28 
  29 #include "orderAccess_bsd_zero.inline.hpp"
  30 #include "runtime/atomic.hpp"
  31 #include "runtime/os.hpp"
  32 #include "vm_version_zero.hpp"
  33 
  34 #include <sys/types.h>
  35 #ifdef __NetBSD__
  36 #include <sys/atomic.h>
  37 #elif __FreeBSD__
  38 
  39 #include <sys/types.h>
  40 #ifndef SPARC
  41 #include <machine/atomic.h>
  42 #else
  43 
  44 /*
  45  * On FreeBSD/sparc64, <machine/atomic.h> pulls in <machine/cpufunc.h>
  46  * which includes definitions which cause conflicts with various
  47  * definitions within HotSpot source.  To avoid that, pull in those
  48  * definitions verbatim instead of including the header.  Yuck.
  49  */
  50 
  51 /*-
  52  * Copyright (c) 1998 Doug Rabson.
  53  * Copyright (c) 2001 Jake Burkholder.
  54  * All rights reserved.
  55  *
  56  * Redistribution and use in source and binary forms, with or without
  57  * modification, are permitted provided that the following conditions
  58  * are met:
  59  * 1. Redistributions of source code must retain the above copyright
  60  *    notice, this list of conditions and the following disclaimer.
  61  * 2. Redistributions in binary form must reproduce the above copyright
  62  *    notice, this list of conditions and the following disclaimer in the
  63  *    documentation and/or other materials provided with the distribution.
  64  *
  65  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  66  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  67  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  68  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  69  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  70  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  71  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  72  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  73  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  74  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  75  * SUCH DAMAGE.
  76  *
  77  */
  78 
  79 #include <machine/asi.h>






  80 
  81 /*
  82  * Membar operand macros for use in other macros when # is a special
  83  * character.  Keep these in sync with what the hardware expects.
  84  */
  85 #define M_LoadLoad      (0)
  86 #define M_StoreLoad     (1)
  87 #define M_LoadStore     (2)
  88 #define M_StoreStore    (3)


  89 
  90 #define CMASK_SHIFT     (4)
  91 #define MMASK_SHIFT     (0)

  92 
  93 #define CMASK_GEN(bit)  ((1 << (bit)) << CMASK_SHIFT)
  94 #define MMASK_GEN(bit)  ((1 << (bit)) << MMASK_SHIFT)

  95 
  96 #define LoadLoad        MMASK_GEN(M_LoadLoad)
  97 #define StoreLoad       MMASK_GEN(M_StoreLoad)
  98 #define LoadStore       MMASK_GEN(M_LoadStore)
  99 #define StoreStore      MMASK_GEN(M_StoreStore)
 100 
 101 #define casa(rs1, rs2, rd, asi) ({                                      \
 102         u_int __rd = (uint32_t)(rd);                                    \
 103         __asm __volatile("casa [%2] %3, %4, %0"                         \
 104             : "+r" (__rd), "=m" (*rs1)                                  \
 105             : "r" (rs1), "n" (asi), "r" (rs2), "m" (*rs1));             \
 106         __rd;                                                           \
 107 })
 108 
 109 #define casxa(rs1, rs2, rd, asi) ({                                     \
 110         u_long __rd = (uint64_t)(rd);                                   \
 111         __asm __volatile("casxa [%2] %3, %4, %0"                        \
 112             : "+r" (__rd), "=m" (*rs1)                                  \
 113             : "r" (rs1), "n" (asi), "r" (rs2), "m" (*rs1));             \
 114         __rd;                                                           \
 115 })
 116 
 117 #define membar(mask) do {                                               \
 118         __asm __volatile("membar %0" : : "n" (mask) : "memory");        \
 119 } while (0)



 120 
 121 #ifdef _KERNEL
 122 #define __ASI_ATOMIC    ASI_N
 123 #else
 124 #define __ASI_ATOMIC    ASI_P
 125 #endif
 126 
 127 #define mb()    __asm__ __volatile__ ("membar #MemIssue": : :"memory")
 128 #define wmb()   mb()
 129 #define rmb()   mb()
 130 
 131 /*
 132  * Various simple arithmetic on memory which is atomic in the presence
 133  * of interrupts and multiple processors.  See atomic(9) for details.
 134  * Note that efficient hardware support exists only for the 32 and 64
 135  * bit variants; the 8 and 16 bit versions are not provided and should
 136  * not be used in MI code.
 137  *
 138  * This implementation takes advantage of the fact that the sparc64
 139  * cas instruction is both a load and a store.  The loop is often coded
 140  * as follows:

 141  *
 142  *      do {
 143  *              expect = *p;
 144  *              new = expect + 1;
 145  *      } while (cas(p, expect, new) != expect);
 146  *
 147  * which performs an unnnecessary load on each iteration that the cas
 148  * operation fails.  Modified as follows:
 149  *
 150  *      expect = *p;
 151  *      for (;;) {
 152  *              new = expect + 1;
 153  *              result = cas(p, expect, new);
 154  *              if (result == expect)
 155  *                      break;
 156  *              expect = result;
 157  *      }
 158  *
 159  * the return value of cas is used to avoid the extra reload.
 160  *
 161  * The memory barriers provided by the acq and rel variants are intended
 162  * to be sufficient for use of relaxed memory ordering.  Due to the
 163  * suggested assembly syntax of the membar operands containing a #
 164  * character, they cannot be used in macros.  The cmask and mmask bits
 165  * are hard coded in machine/cpufunc.h and used here through macros.
 166  * Hopefully sun will choose not to change the bit numbers.
 167  */
 168 
 169 #define itype(sz)       uint ## sz ## _t

 170 
 171 #define atomic_cas_32(p, e, s)  casa(p, e, s, __ASI_ATOMIC)
 172 #define atomic_cas_64(p, e, s)  casxa(p, e, s, __ASI_ATOMIC)
 173 
 174 #define atomic_cas(p, e, s, sz)                                         \
 175         atomic_cas_ ## sz(p, e, s)
 176 
 177 #define atomic_cas_acq(p, e, s, sz) ({                                  \
 178         itype(sz) v;                                                    \
 179         v = atomic_cas(p, e, s, sz);                                    \
 180         membar(LoadLoad | LoadStore);                                   \
 181         v;                                                              \
 182 })
 183 
 184 #define atomic_cas_rel(p, e, s, sz) ({                                  \
 185         itype(sz) v;                                                    \
 186         membar(LoadStore | StoreStore);                                 \
 187         v = atomic_cas(p, e, s, sz);                                    \
 188         v;                                                              \
 189 })
 190 
 191 #define atomic_op(p, op, v, sz) ({                                      \
 192         itype(sz) e, r, s;                                              \
 193         for (e = *(volatile itype(sz) *)p;; e = r) {                    \
 194                 s = e op v;                                             \
 195                 r = atomic_cas_ ## sz(p, e, s);                         \
 196                 if (r == e)                                             \
 197                         break;                                          \
 198         }                                                               \
 199         e;                                                              \
 200 })
 201 
 202 #define atomic_op_acq(p, op, v, sz) ({                                  \
 203         itype(sz) t;                                                    \
 204         t = atomic_op(p, op, v, sz);                                    \
 205         membar(LoadLoad | LoadStore);                                   \
 206         t;                                                              \
 207 })
 208 
 209 #define atomic_op_rel(p, op, v, sz) ({                                  \
 210         itype(sz) t;                                                    \
 211         membar(LoadStore | StoreStore);                                 \
 212         t = atomic_op(p, op, v, sz);                                    \
 213         t;                                                              \
 214 })
 215 
 216 #define atomic_load(p, sz)                                              \
 217         atomic_cas(p, 0, 0, sz)
 218 
 219 #define atomic_load_acq(p, sz) ({                                       \
 220         itype(sz) v;                                                    \
 221         v = atomic_load(p, sz);                                         \
 222         membar(LoadLoad | LoadStore);                                   \
 223         v;                                                              \
 224 })
 225 
 226 #define atomic_load_clear(p, sz) ({                                     \
 227         itype(sz) e, r;                                                 \
 228         for (e = *(volatile itype(sz) *)p;; e = r) {                    \
 229                 r = atomic_cas(p, e, 0, sz);                            \
 230                 if (r == e)                                             \
 231                         break;                                          \
 232         }                                                               \
 233         e;                                                              \
 234 })
 235 
 236 #define atomic_store(p, v, sz) do {                                     \
 237         itype(sz) e, r;                                                 \
 238         for (e = *(volatile itype(sz) *)p;; e = r) {                    \
 239                 r = atomic_cas(p, e, v, sz);                            \
 240                 if (r == e)                                             \
 241                         break;                                          \
 242         }                                                               \
 243 } while (0)
 244 
 245 #define atomic_store_rel(p, v, sz) do {                                 \
 246         membar(LoadStore | StoreStore);                                 \
 247         atomic_store(p, v, sz);                                         \
 248 } while (0)
 249 
 250 #define ATOMIC_GEN(name, ptype, vtype, atype, sz)                       \
 251                                                                         \
 252 static __inline vtype                                                   \
 253 atomic_add_ ## name(volatile ptype p, atype v)                          \
 254 {                                                                       \
 255         return ((vtype)atomic_op(p, +, v, sz));                         \
 256 }                                                                       \
 257 static __inline vtype                                                   \
 258 atomic_add_acq_ ## name(volatile ptype p, atype v)                      \
 259 {                                                                       \
 260         return ((vtype)atomic_op_acq(p, +, v, sz));                     \
 261 }                                                                       \
 262 static __inline vtype                                                   \
 263 atomic_add_rel_ ## name(volatile ptype p, atype v)                      \
 264 {                                                                       \
 265         return ((vtype)atomic_op_rel(p, +, v, sz));                     \
 266 }                                                                       \
 267                                                                         \
 268 static __inline int                                                     \
 269 atomic_cmpset_ ## name(volatile ptype p, vtype e, vtype s)              \
 270 {                                                                       \
 271         return (((vtype)atomic_cas(p, e, s, sz)) == e);                 \
 272 }                                                                       \
 273 static __inline int                                                     \
 274 atomic_cmpset_acq_ ## name(volatile ptype p, vtype e, vtype s)          \
 275 {                                                                       \
 276         return (((vtype)atomic_cas_acq(p, e, s, sz)) == e);             \
 277 }                                                                       \
 278 static __inline int                                                     \
 279 atomic_cmpset_rel_ ## name(volatile ptype p, vtype e, vtype s)          \
 280 {                                                                       \
 281         return (((vtype)atomic_cas_rel(p, e, s, sz)) == e);             \
 282 }                                                                       \
 283                                                                         \
 284 static __inline vtype                                                   \
 285 atomic_load_ ## name(volatile ptype p)                                  \
 286 {                                                                       \
 287         return ((vtype)atomic_cas(p, 0, 0, sz));                        \
 288 }                                                                       \
 289 static __inline vtype                                                   \
 290 atomic_load_acq_ ## name(volatile ptype p)                              \
 291 {                                                                       \
 292         return ((vtype)atomic_cas_acq(p, 0, 0, sz));                    \
 293 }                                                                       \
 294                                                                         \
 295 static __inline void                                                    \
 296 atomic_store_ ## name(volatile ptype p, vtype v)                        \
 297 {                                                                       \
 298         atomic_store(p, v, sz);                                         \
 299 }                                                                       \
 300 static __inline void                                                    \
 301 atomic_store_rel_ ## name(volatile ptype p, vtype v)                    \
 302 {                                                                       \
 303         atomic_store_rel(p, v, sz);                                     \
 304 }
 305 
 306 inline jlong Atomic::load(volatile jlong* src) {
 307   volatile jlong dest;
 308   os::atomic_copy64(src, &dest);
 309   return dest;
 310 }
 311 
 312 inline void Atomic::store(jlong store_value, jlong* dest) {
 313   os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
 314 }
 315 
 316 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
 317   os::atomic_copy64((volatile jlong*)&store_value, dest);
 318 }
 319 
 320 ATOMIC_GEN(int, u_int *, u_int, u_int, 32);
 321 ATOMIC_GEN(32, uint32_t *, uint32_t, uint32_t, 32);
 322 
 323 ATOMIC_GEN(long, u_long *, u_long, u_long, 64);
 324 ATOMIC_GEN(64, uint64_t *, uint64_t, uint64_t, 64);
 325 
 326 ATOMIC_GEN(ptr, uintptr_t *, uintptr_t, uintptr_t, 64);
 327 
 328 #define atomic_fetchadd_int     atomic_add_int
 329 #define atomic_fetchadd_32      atomic_add_32
 330 #define atomic_fetchadd_long    atomic_add_long
 331 
 332 #undef ATOMIC_GEN
 333 #undef atomic_cas
 334 #undef atomic_cas_acq
 335 #undef atomic_cas_rel
 336 #undef atomic_op
 337 #undef atomic_op_acq
 338 #undef atomic_op_rel
 339 #undef atomic_load_acq
 340 #undef atomic_store_rel
 341 #undef atomic_load_clear
 342 #endif
 343 
 344 static __inline __attribute__((__always_inline__))
 345 unsigned int atomic_add_int_nv(volatile unsigned int* dest, unsigned int add_value)
 346 {
 347   atomic_add_acq_int(dest, add_value);
 348   return *dest;
 349 }
 350 
 351 static __inline __attribute__((__always_inline__))
 352 uintptr_t atomic_add_ptr_nv(volatile intptr_t* dest, intptr_t add_value)
 353 {
 354   atomic_add_acq_ptr((volatile uintptr_t*) dest, (uintptr_t) add_value);
 355   return *((volatile uintptr_t*) dest);
 356 }
 357 
 358 static __inline __attribute__((__always_inline__))
 359 unsigned int
 360 atomic_swap_uint(volatile unsigned int *dest, unsigned int exchange_value)
 361 {
 362   jint prev = *dest;
 363   atomic_store_rel_int(dest, exchange_value);
 364   return prev;
 365 }
 366 
 367 static __inline __attribute__((__always_inline__))
 368 void *
 369 atomic_swap_ptr(volatile void *dest, void *exchange_value)
 370 {
 371   void *prev = *(void **)dest;
 372   atomic_store_rel_ptr((volatile uintptr_t*) dest, (uintptr_t) exchange_value);
 373   return prev;
 374 }
 375 
 376 static __inline __attribute__((__always_inline__))
 377 unsigned int
 378 atomic_cas_uint(volatile unsigned int *dest, unsigned int compare_value,
 379   unsigned int exchange_value)
 380 {
 381   unsigned int prev = *dest;
 382   atomic_cmpset_acq_int(dest, compare_value, exchange_value);
 383   return prev;
 384 }
 385 
 386 static __inline __attribute__((__always_inline__))
 387 unsigned long
 388 atomic_cas_ulong(volatile unsigned long *dest, unsigned long compare_value,
 389   unsigned long exchange_value)
 390 {
 391   unsigned long prev = *dest;
 392   atomic_cmpset_acq_long(dest, compare_value, exchange_value);
 393   return prev;
 394 }
 395 
 396 static __inline __attribute__((__always_inline__))
 397 void *
 398 atomic_cas_ptr(volatile void *dest, void *compare_value, void *exchange_value)
 399 {
 400   void *prev = *(void **)dest;
 401   atomic_cmpset_acq_ptr((volatile uintptr_t*) dest, (uintptr_t) compare_value, (uintptr_t) exchange_value);
 402   return prev;
 403 }
 404 
 405 #elif defined(__APPLE__)
 406 
 407 #include <libkern/OSAtomic.h>
 408 
 409 static __inline __attribute__((__always_inline__))
 410 unsigned int
 411 atomic_add_int_nv(volatile unsigned int *target, int delta) {
 412   return (unsigned int) OSAtomicAdd32Barrier(delta, (volatile int32_t *) target);
 413 }
 414 
 415 static __inline __attribute__((__always_inline__))
 416 void *
 417 atomic_add_ptr_nv(volatile void *target, ssize_t delta) {
 418 #ifdef __LP64__
 419   return (void *) OSAtomicAdd64Barrier(delta, (volatile int64_t *) target);
 420 #else
 421   return (void *) OSAtomicAdd32Barrier(delta, (volatile int32_t *) target);
 422 #endif
 423 }
 424 
 425 
 426 static __inline __attribute__((__always_inline__))
 427 unsigned int
 428 atomic_swap_uint(volatile unsigned int *dest, unsigned int exchange_value)
 429 {
 430   /* No xchg support in OSAtomic */
 431   unsigned int prev;
 432   do {
 433     prev = *dest;
 434   } while (!OSAtomicCompareAndSwapIntBarrier((int) prev, (int) exchange_value, (volatile int *) dest));
 435 
 436   return prev;

 437 }

 438 
 439 static __inline __attribute__((__always_inline__))
 440 void *
 441 atomic_swap_ptr(volatile void *dest, void *exchange_value)
 442 {
 443   /* No xchg support in OSAtomic */
 444   void *prev;
 445   do {
 446     prev = *((void * volatile *) dest);
 447   } while (!OSAtomicCompareAndSwapPtrBarrier(prev, exchange_value, (void * volatile *) dest));
 448 
 449   return prev;
 450 }
 451 
 452 static __inline __attribute__((__always_inline__))
 453 unsigned int
 454 atomic_cas_uint(volatile unsigned int *dest, unsigned int compare_value,
 455   unsigned int exchange_value)
 456 {
 457   unsigned int prev = *dest;
 458   OSAtomicCompareAndSwapIntBarrier(compare_value, exchange_value, (volatile int *) dest);
 459   return prev;
 460 }
 461 
 462 static __inline __attribute__((__always_inline__))
 463 unsigned long
 464 atomic_cas_ulong(volatile unsigned long *dest, unsigned long compare_value,
 465   unsigned long exchange_value)
 466 {
 467   unsigned long prev = *dest;
 468   OSAtomicCompareAndSwapLongBarrier(compare_value, exchange_value, (volatile long *) dest);
 469   return prev;
 470 }
 471 
 472 static __inline __attribute__((__always_inline__))
 473 void *
 474 atomic_cas_ptr(volatile void *dest, void *compare_value, void *exchange_value)
 475 {
 476   void *prev = *(void **)dest;
 477   OSAtomicCompareAndSwapPtrBarrier(compare_value, exchange_value, (void * volatile *) dest);
 478   return prev;
 479 }
 480 
 481 
 482 #endif
 483 
 484 inline void Atomic::store(jint store_value, volatile jint* dest) {
 485   *dest = store_value;
 486 }
 487 
 488 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
 489   *dest = store_value;
 490 }
 491 
 492 inline jint Atomic::add(jint add_value, volatile jint* dest) {
 493   return (jint)atomic_add_int_nv((volatile unsigned int*) dest, add_value);








 494 }
 495 
 496 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
 497   return (intptr_t)atomic_add_ptr_nv(dest, add_value);








 498 }
 499 
 500 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
 501   return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
 502 }
 503 
 504 inline void Atomic::inc(volatile jint* dest) {
 505   add(1, dest);
 506 }
 507 
 508 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
 509   add_ptr(1, dest);
 510 }
 511 
 512 inline void Atomic::inc_ptr(volatile void* dest) {
 513   add_ptr(1, dest);
 514 }
 515 
 516 inline void Atomic::dec(volatile jint* dest) {
 517   add(-1, dest);
 518 }
 519 
 520 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
 521   add_ptr(-1, dest);
 522 }
 523 
 524 inline void Atomic::dec_ptr(volatile void* dest) {
 525   add_ptr(-1, dest);
 526 }
 527 
 528 inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
 529   return (jint)atomic_swap_uint((volatile u_int *)dest, (u_int)exchange_value);












 530 }
 531 
 532 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
 533                                  volatile intptr_t* dest) {
 534   return (intptr_t)atomic_swap_ptr((volatile void *)dest,
 535     (void *)exchange_value);







 536 }
 537 
 538 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
 539   return atomic_swap_ptr(dest, exchange_value);

 540 }
 541 
 542 inline jint Atomic::cmpxchg(jint exchange_value,
 543                             volatile jint* dest,
 544                             jint compare_value) {
 545   return atomic_cas_uint((volatile u_int *)dest, compare_value, exchange_value);








 546 }
 547 
 548 inline jlong Atomic::cmpxchg(jlong exchange_value,
 549                              volatile jlong* dest,
 550                              jlong compare_value) {
 551   return atomic_cas_ulong((volatile u_long *)dest, compare_value,
 552     exchange_value);
 553 }
 554 
 555 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
 556                                     volatile intptr_t* dest,
 557                                     intptr_t compare_value) {
 558   return (intptr_t)atomic_cas_ptr((volatile void *)dest, (void *)compare_value,
 559       (void *)exchange_value);







 560 }
 561 
 562 inline void* Atomic::cmpxchg_ptr(void* exchange_value,
 563                                  volatile void* dest,
 564                                  void* compare_value) {
 565   return atomic_cas_ptr((volatile void *)dest, compare_value, exchange_value);



 566 }
 567 
 568 #endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP














src/os_cpu/bsd_zero/vm/atomic_bsd_zero.inline.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File