1 /*
   2  * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
  26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "utilities/macros.hpp"
  30 
  31 enum cmpxchg_memory_order {
  32   memory_order_relaxed,
  33   // Use value which doesn't interfere with C++2011. We need to be more conservative.
  34   memory_order_conservative = 8
  35 };
  36 
  37 class Atomic : AllStatic {
  38  public:
  39   // Atomic operations on jlong types are not available on all 32-bit
  40   // platforms. If atomic ops on jlongs are defined here they must only
  41   // be used from code that verifies they are available at runtime and
  42   // can provide an alternative action if not - see supports_cx8() for
  43   // a means to test availability.
  44 
  45   // The memory operations that are mentioned with each of the atomic
  46   // function families come from src/share/vm/runtime/orderAccess.hpp,
  47   // e.g., <fence> is described in that file and is implemented by the
  48   // OrderAccess::fence() function. See that file for the gory details
  49   // on the Memory Access Ordering Model.
  50 
  51   // All of the atomic operations that imply a read-modify-write action
  52   // guarantee a two-way memory barrier across that operation. Historically
  53   // these semantics reflect the strength of atomic operations that are
  54   // provided on SPARC/X86. We assume that strength is necessary unless
  55   // we can prove that a weaker form is sufficiently safe.
  56 
  57   // Atomically store to a location
  58   inline static void store    (jbyte    store_value, jbyte*    dest);
  59   inline static void store    (jshort   store_value, jshort*   dest);
  60   inline static void store    (jint     store_value, jint*     dest);
  61   // See comment above about using jlong atomics on 32-bit platforms
  62   inline static void store    (jlong    store_value, jlong*    dest);
  63   inline static void store_ptr(intptr_t store_value, intptr_t* dest);
  64   inline static void store_ptr(void*    store_value, void*     dest);
  65 
  66   inline static void store    (jbyte    store_value, volatile jbyte*    dest);
  67   inline static void store    (jshort   store_value, volatile jshort*   dest);
  68   inline static void store    (jint     store_value, volatile jint*     dest);
  69   // See comment above about using jlong atomics on 32-bit platforms
  70   inline static void store    (jlong    store_value, volatile jlong*    dest);
  71   inline static void store_ptr(intptr_t store_value, volatile intptr_t* dest);
  72   inline static void store_ptr(void*    store_value, volatile void*     dest);
  73 
  74   // See comment above about using jlong atomics on 32-bit platforms
  75   inline static jlong load(volatile jlong* src);
  76 
  77   // Atomically add to a location. Returns updated value. add*() provide:
  78   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
  79   inline static jint     add    (jint     add_value, volatile jint*     dest);
  80   inline static size_t   add    (size_t   add_value, volatile size_t*   dest);
  81   inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
  82   inline static void*    add_ptr(intptr_t add_value, volatile void*     dest);
  83   // See comment above about using jlong atomics on 32-bit platforms
  84   inline static jlong    add    (jlong    add_value, volatile jlong*    dest);
  85 
  86   // Atomically increment location. inc*() provide:
  87   // <fence> increment-dest <membar StoreLoad|StoreStore>
  88   inline static void inc    (volatile jint*     dest);
  89   inline static jint inc    (volatile jshort*   dest);
  90   inline static void inc    (volatile size_t*   dest);
  91   inline static void inc_ptr(volatile intptr_t* dest);
  92   inline static void inc_ptr(volatile void*     dest);
  93 
  94   // Atomically decrement a location. dec*() provide:
  95   // <fence> decrement-dest <membar StoreLoad|StoreStore>
  96   inline static void dec    (volatile jint*     dest);
  97   inline static jint dec    (volatile jshort*   dest);
  98   inline static void dec    (volatile size_t*   dest);
  99   inline static void dec_ptr(volatile intptr_t* dest);
 100   inline static void dec_ptr(volatile void*     dest);
 101 
 102   // Performs atomic exchange of *dest with exchange_value. Returns old
 103   // prior value of *dest. xchg*() provide:
 104   // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
 105   inline static jint         xchg    (jint         exchange_value, volatile jint*         dest);
 106   inline static unsigned int xchg    (unsigned int exchange_value, volatile unsigned int* dest);
 107   inline static intptr_t     xchg_ptr(intptr_t     exchange_value, volatile intptr_t*     dest);
 108   inline static void*        xchg_ptr(void*        exchange_value, volatile void*         dest);
 109 
 110   // Performs atomic compare of *dest and compare_value, and exchanges
 111   // *dest with exchange_value if the comparison succeeded. Returns prior
 112   // value of *dest. cmpxchg*() provide:
 113   // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
 114   inline static jbyte        cmpxchg    (jbyte        exchange_value, volatile jbyte*        dest, jbyte        compare_value, cmpxchg_memory_order order = memory_order_conservative);
 115   inline static jint         cmpxchg    (jint         exchange_value, volatile jint*         dest, jint         compare_value, cmpxchg_memory_order order = memory_order_conservative);
 116   // See comment above about using jlong atomics on 32-bit platforms
 117   inline static jlong        cmpxchg    (jlong        exchange_value, volatile jlong*        dest, jlong        compare_value, cmpxchg_memory_order order = memory_order_conservative);
 118   inline static unsigned int cmpxchg    (unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value, cmpxchg_memory_order order = memory_order_conservative);
 119   inline static intptr_t     cmpxchg_ptr(intptr_t     exchange_value, volatile intptr_t*     dest, intptr_t     compare_value, cmpxchg_memory_order order = memory_order_conservative);
 120   inline static void*        cmpxchg_ptr(void*        exchange_value, volatile void*         dest, void*        compare_value, cmpxchg_memory_order order = memory_order_conservative);
 121 };
 122 
 123 // platform specific in-line definitions - must come before shared definitions
 124 
 125 #include OS_CPU_HEADER(atomic)
 126 
 127 // shared in-line definitions
 128 
 129 // size_t casts...
 130 #if (SIZE_MAX != UINTPTR_MAX)
 131 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
 132 #endif
 133 
 134 inline size_t Atomic::add(size_t add_value, volatile size_t* dest) {
 135   return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest);
 136 }
 137 
 138 inline void Atomic::inc(volatile size_t* dest) {
 139   inc_ptr((volatile intptr_t*) dest);
 140 }
 141 
 142 inline void Atomic::dec(volatile size_t* dest) {
 143   dec_ptr((volatile intptr_t*) dest);
 144 }
 145 
 146 #ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE
 147 /*
 148  * This is the default implementation of byte-sized cmpxchg. It emulates jbyte-sized cmpxchg
 149  * in terms of jint-sized cmpxchg. Platforms may override this by defining their own inline definition
 150  * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific
 151  * implementation to be used instead.
 152  */
 153 inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte *dest, jbyte comparand, cmpxchg_memory_order order)
 154 {
 155   assert(sizeof(jbyte) == 1, "assumption.");
 156   uintptr_t dest_addr = (uintptr_t)dest;
 157   uintptr_t offset = dest_addr % sizeof(jint);
 158   volatile jint* dest_int = (volatile jint*)(dest_addr - offset);
 159   jint cur = *dest_int;
 160   jbyte* cur_as_bytes = (jbyte*)(&cur);
 161   jint new_val = cur;
 162   jbyte* new_val_as_bytes = (jbyte*)(&new_val);
 163   new_val_as_bytes[offset] = exchange_value;
 164   while (cur_as_bytes[offset] == comparand) {
 165     jint res = cmpxchg(new_val, dest_int, cur, order);
 166     if (res == cur) break;
 167     cur = res;
 168     new_val = cur;
 169     new_val_as_bytes[offset] = exchange_value;
 170   }
 171   return cur_as_bytes[offset];
 172 }
 173 #endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE
 174 
 175 inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
 176   assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
 177   return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
 178 }
 179 
 180 inline unsigned Atomic::cmpxchg(unsigned int exchange_value,
 181                          volatile unsigned int* dest, unsigned int compare_value,
 182                          cmpxchg_memory_order order) {
 183   assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
 184   return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
 185                                        (jint)compare_value, order);
 186 }
 187 
 188 inline jlong Atomic::add(jlong    add_value, volatile jlong*    dest) {
 189   jlong old = load(dest);
 190   jlong new_value = old + add_value;
 191   while (old != cmpxchg(new_value, dest, old)) {
 192     old = load(dest);
 193     new_value = old + add_value;
 194   }
 195   return old;
 196 }
 197 
 198 inline jint Atomic::inc(volatile short* dest) {
 199   // Most platforms do not support atomic increment on a 2-byte value. However,
 200   // if the value occupies the most significant 16 bits of an aligned 32-bit
 201   // word, then we can do this with an atomic add of 0x10000 to the 32-bit word.
 202   //
 203   // The least significant parts of this 32-bit word will never be affected, even
 204   // in case of overflow/underflow.
 205   //
 206   // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
 207 #ifdef VM_LITTLE_ENDIAN
 208   assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
 209   jint new_value = Atomic::add(0x10000, (volatile int*)(dest-1));
 210 #else
 211   assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
 212   jint new_value = Atomic::add(0x10000, (volatile int*)(dest));
 213 #endif
 214   return new_value >> 16; // preserves sign
 215 }
 216 
 217 inline jint Atomic::dec(volatile short* dest) {
 218 #ifdef VM_LITTLE_ENDIAN
 219   assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
 220   jint new_value = Atomic::add(-0x10000, (volatile int*)(dest-1));
 221 #else
 222   assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
 223   jint new_value = Atomic::add(-0x10000, (volatile int*)(dest));
 224 #endif
 225   return new_value >> 16; // preserves sign
 226 }
 227 
 228 #endif // SHARE_VM_RUNTIME_ATOMIC_HPP