1 /*
   2  * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
  26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 
  30 typedef enum cmpxchg_cmpxchg_memory_order {
  31   memory_order_relaxed,
  32   // Use value which doesn't interfere with C++2011. We need to be more conservative.
  33   memory_order_conservative = 8
  34 } cmpxchg_memory_order;
  35 
  36 class Atomic : AllStatic {
  37  private:
  38   static jbyte cmpxchg_general(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value);
  39 
  40  public:
  41   // Atomic operations on jlong types are not available on all 32-bit
  42   // platforms. If atomic ops on jlongs are defined here they must only
  43   // be used from code that verifies they are available at runtime and
  44   // can provide an alternative action if not - see supports_cx8() for
  45   // a means to test availability.
  46 
  47   // The memory operations that are mentioned with each of the atomic
  48   // function families come from src/share/vm/runtime/orderAccess.hpp,
  49   // e.g., <fence> is described in that file and is implemented by the
  50   // OrderAccess::fence() function. See that file for the gory details
  51   // on the Memory Access Ordering Model.
  52 
  53   // All of the atomic operations that imply a read-modify-write action
  54   // guarantee a two-way memory barrier across that operation. Historically
  55   // these semantics reflect the strength of atomic operations that are
  56   // provided on SPARC/X86. We assume that strength is necessary unless
  57   // we can prove that a weaker form is sufficiently safe.
  58 
  59   // Atomically store to a location
  60   inline static void store    (jbyte    store_value, jbyte*    dest);
  61   inline static void store    (jshort   store_value, jshort*   dest);
  62   inline static void store    (jint     store_value, jint*     dest);
  63   // See comment above about using jlong atomics on 32-bit platforms
  64   inline static void store    (jlong    store_value, jlong*    dest);
  65   inline static void store_ptr(intptr_t store_value, intptr_t* dest);
  66   inline static void store_ptr(void*    store_value, void*     dest);
  67 
  68   inline static void store    (jbyte    store_value, volatile jbyte*    dest);
  69   inline static void store    (jshort   store_value, volatile jshort*   dest);
  70   inline static void store    (jint     store_value, volatile jint*     dest);
  71   // See comment above about using jlong atomics on 32-bit platforms
  72   inline static void store    (jlong    store_value, volatile jlong*    dest);
  73   inline static void store_ptr(intptr_t store_value, volatile intptr_t* dest);
  74   inline static void store_ptr(void*    store_value, volatile void*     dest);
  75 
  76   // See comment above about using jlong atomics on 32-bit platforms
  77   inline static jlong load(volatile jlong* src);
  78 
  79   // Atomically add to a location. Returns updated value. add*() provide:
  80   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
  81   inline static jint     add    (jint     add_value, volatile jint*     dest);
  82   inline static size_t   add    (size_t   add_value, volatile size_t*   dest);
  83   inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
  84   inline static void*    add_ptr(intptr_t add_value, volatile void*     dest);
  85   // See comment above about using jlong atomics on 32-bit platforms
  86   static jlong           add    (jlong    add_value, volatile jlong*    dest);
  87 
  88   // Atomically increment location. inc*() provide:
  89   // <fence> increment-dest <membar StoreLoad|StoreStore>
  90   inline static void inc    (volatile jint*     dest);
  91   static void        inc    (volatile jshort*   dest);
  92   inline static void inc    (volatile size_t*   dest);
  93   inline static void inc_ptr(volatile intptr_t* dest);
  94   inline static void inc_ptr(volatile void*     dest);
  95 
  96   // Atomically decrement a location. dec*() provide:
  97   // <fence> decrement-dest <membar StoreLoad|StoreStore>
  98   inline static void dec    (volatile jint*     dest);
  99   static void        dec    (volatile jshort*   dest);
 100   inline static void dec    (volatile size_t*   dest);
 101   inline static void dec_ptr(volatile intptr_t* dest);
 102   inline static void dec_ptr(volatile void*     dest);
 103 
 104   // Performs atomic exchange of *dest with exchange_value. Returns old
 105   // prior value of *dest. xchg*() provide:
 106   // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
 107   inline static jint     xchg    (jint         exchange_value, volatile jint*         dest);
 108   static unsigned int    xchg    (unsigned int exchange_value, volatile unsigned int* dest);
 109   inline static intptr_t xchg_ptr(intptr_t     exchange_value, volatile intptr_t*     dest);
 110   inline static void*    xchg_ptr(void*        exchange_value, volatile void*         dest);
 111 
 112   // Performs atomic compare of *dest and compare_value, and exchanges
 113   // *dest with exchange_value if the comparison succeeded. Returns prior
 114   // value of *dest. cmpxchg*() provide:
 115   // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
 116   inline static jbyte    cmpxchg    (jbyte        exchange_value, volatile jbyte*        dest, jbyte        compare_value, cmpxchg_memory_order order = memory_order_conservative);
 117   inline static jint     cmpxchg    (jint         exchange_value, volatile jint*         dest, jint         compare_value, cmpxchg_memory_order order = memory_order_conservative);
 118   // See comment above about using jlong atomics on 32-bit platforms
 119   inline static jlong    cmpxchg    (jlong        exchange_value, volatile jlong*        dest, jlong        compare_value, cmpxchg_memory_order order = memory_order_conservative);
 120   static unsigned int    cmpxchg    (unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value, cmpxchg_memory_order order = memory_order_conservative);
 121   inline static intptr_t cmpxchg_ptr(intptr_t     exchange_value, volatile intptr_t*     dest, intptr_t     compare_value, cmpxchg_memory_order order = memory_order_conservative);
 122   inline static void*    cmpxchg_ptr(void*        exchange_value, volatile void*         dest, void*        compare_value, cmpxchg_memory_order order = memory_order_conservative);
 123 };
 124 
 125 // To use Atomic::inc(jshort* dest) and Atomic::dec(jshort* dest), the address must be specially
 126 // aligned, such that (*dest) occupies the upper 16 bits of an aligned 32-bit word. The best way to
 127 // achieve is to place your short value next to another short value, which doesn't need atomic ops.
 128 //
 129 // Example
 130 //  ATOMIC_SHORT_PAIR(
 131 //    volatile short _refcount,  // needs atomic operation
 132 //    unsigned short _length     // number of UTF8 characters in the symbol (does not need atomic op)
 133 //  );
 134 
 135 #ifdef VM_LITTLE_ENDIAN
 136   #define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl)  \
 137     non_atomic_decl;                                       \
 138     atomic_decl
 139 #else
 140   #define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl)  \
 141     atomic_decl;                                           \
 142     non_atomic_decl
 143 #endif
 144 
 145 #endif // SHARE_VM_RUNTIME_ATOMIC_HPP