< prev index next >

src/share/vm/runtime/atomic.hpp

Print this page
rev 10933 : 8154736: enhancement of cmpxchg and copy_to_survivor for ppc64
Reviewed-by:
Contributed-by: HORII@jp.ibm.com, mdoerr


  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
  26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 









  30 class Atomic : AllStatic {
  31  private:
  32   static jbyte cmpxchg_general(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value);
  33 
  34  public:
  35   // Atomic operations on jlong types are not available on all 32-bit
  36   // platforms. If atomic ops on jlongs are defined here they must only
  37   // be used from code that verifies they are available at runtime and
  38   // can provide an alternative action if not - see supports_cx8() for
  39   // a means to test availability.
  40 
  41   // The memory operations that are mentioned with each of the atomic
  42   // function families come from src/share/vm/runtime/orderAccess.hpp,
  43   // e.g., <fence> is described in that file and is implemented by the
  44   // OrderAccess::fence() function. See that file for the gory details
  45   // on the Memory Access Ordering Model.
  46 
  47   // All of the atomic operations that imply a read-modify-write action
  48   // guarantee a two-way memory barrier across that operation. Historically
  49   // these semantics reflect the strength of atomic operations that are


  90   // Atomically decrement a location. dec*() provide:
  91   // <fence> decrement-dest <membar StoreLoad|StoreStore>
  92   inline static void dec    (volatile jint*     dest);
  93   static void        dec    (volatile jshort*   dest);
  94   inline static void dec    (volatile size_t*   dest);
  95   inline static void dec_ptr(volatile intptr_t* dest);
  96   inline static void dec_ptr(volatile void*     dest);
  97 
  98   // Performs atomic exchange of *dest with exchange_value. Returns old
  99   // prior value of *dest. xchg*() provide:
 100   // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
 101   inline static jint     xchg    (jint         exchange_value, volatile jint*         dest);
 102   static unsigned int    xchg    (unsigned int exchange_value, volatile unsigned int* dest);
 103   inline static intptr_t xchg_ptr(intptr_t     exchange_value, volatile intptr_t*     dest);
 104   inline static void*    xchg_ptr(void*        exchange_value, volatile void*         dest);
 105 
 106   // Performs atomic compare of *dest and compare_value, and exchanges
 107   // *dest with exchange_value if the comparison succeeded. Returns prior
 108   // value of *dest. cmpxchg*() provide:
 109   // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
 110   inline static jbyte    cmpxchg    (jbyte        exchange_value, volatile jbyte*        dest, jbyte        compare_value);
 111   inline static jint     cmpxchg    (jint         exchange_value, volatile jint*         dest, jint         compare_value);
 112   // See comment above about using jlong atomics on 32-bit platforms
 113   inline static jlong    cmpxchg    (jlong        exchange_value, volatile jlong*        dest, jlong        compare_value);
 114   static unsigned int    cmpxchg    (unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value);
 115   inline static intptr_t cmpxchg_ptr(intptr_t     exchange_value, volatile intptr_t*     dest, intptr_t     compare_value);
 116   inline static void*    cmpxchg_ptr(void*        exchange_value, volatile void*         dest, void*        compare_value);
 117 };
 118 
 119 // To use Atomic::inc(jshort* dest) and Atomic::dec(jshort* dest), the address must be specially
 120 // aligned, such that (*dest) occupies the upper 16 bits of an aligned 32-bit word. The best way to
 121 // achieve is to place your short value next to another short value, which doesn't need atomic ops.
 122 //
 123 // Example
 124 //  ATOMIC_SHORT_PAIR(
 125 //    volatile short _refcount,  // needs atomic operation
 126 //    unsigned short _length     // number of UTF8 characters in the symbol (does not need atomic op)
 127 //  );
 128 
 129 #ifdef VM_LITTLE_ENDIAN
 130   #define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl)  \
 131     non_atomic_decl;                                       \
 132     atomic_decl
 133 #else
 134   #define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl)  \
 135     atomic_decl;                                           \
 136     non_atomic_decl


  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
  26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 
  30 typedef enum memory_order {
  31   memory_order_relaxed,
  32   memory_order_consume,
  33   memory_order_acquire,
  34   memory_order_release,
  35   memory_order_acq_rel,
  36   memory_order_seq_cst
  37 } memory_order;
  38 
  39 class Atomic : AllStatic {
  40  private:
  41   static jbyte cmpxchg_general(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value);
  42 
  43  public:
  44   // Atomic operations on jlong types are not available on all 32-bit
  45   // platforms. If atomic ops on jlongs are defined here they must only
  46   // be used from code that verifies they are available at runtime and
  47   // can provide an alternative action if not - see supports_cx8() for
  48   // a means to test availability.
  49 
  50   // The memory operations that are mentioned with each of the atomic
  51   // function families come from src/share/vm/runtime/orderAccess.hpp,
  52   // e.g., <fence> is described in that file and is implemented by the
  53   // OrderAccess::fence() function. See that file for the gory details
  54   // on the Memory Access Ordering Model.
  55 
  56   // All of the atomic operations that imply a read-modify-write action
  57   // guarantee a two-way memory barrier across that operation. Historically
  58   // these semantics reflect the strength of atomic operations that are


  99   // Atomically decrement a location. dec*() provide:
 100   // <fence> decrement-dest <membar StoreLoad|StoreStore>
 101   inline static void dec    (volatile jint*     dest);
 102   static void        dec    (volatile jshort*   dest);
 103   inline static void dec    (volatile size_t*   dest);
 104   inline static void dec_ptr(volatile intptr_t* dest);
 105   inline static void dec_ptr(volatile void*     dest);
 106 
 107   // Performs atomic exchange of *dest with exchange_value. Returns old
 108   // prior value of *dest. xchg*() provide:
 109   // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
 110   inline static jint     xchg    (jint         exchange_value, volatile jint*         dest);
 111   static unsigned int    xchg    (unsigned int exchange_value, volatile unsigned int* dest);
 112   inline static intptr_t xchg_ptr(intptr_t     exchange_value, volatile intptr_t*     dest);
 113   inline static void*    xchg_ptr(void*        exchange_value, volatile void*         dest);
 114 
 115   // Performs atomic compare of *dest and compare_value, and exchanges
 116   // *dest with exchange_value if the comparison succeeded. Returns prior
 117   // value of *dest. cmpxchg*() provide:
 118   // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
 119   inline static jbyte    cmpxchg    (jbyte        exchange_value, volatile jbyte*        dest, jbyte        compare_value, memory_order order = memory_order_seq_cst);
 120   inline static jint     cmpxchg    (jint         exchange_value, volatile jint*         dest, jint         compare_value, memory_order order = memory_order_seq_cst);
 121   // See comment above about using jlong atomics on 32-bit platforms
 122   inline static jlong    cmpxchg    (jlong        exchange_value, volatile jlong*        dest, jlong        compare_value, memory_order order = memory_order_seq_cst);
 123   static unsigned int    cmpxchg    (unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value, memory_order order = memory_order_seq_cst);
 124   inline static intptr_t cmpxchg_ptr(intptr_t     exchange_value, volatile intptr_t*     dest, intptr_t     compare_value, memory_order order = memory_order_seq_cst);
 125   inline static void*    cmpxchg_ptr(void*        exchange_value, volatile void*         dest, void*        compare_value, memory_order order = memory_order_seq_cst);
 126 };
 127 
 128 // To use Atomic::inc(jshort* dest) and Atomic::dec(jshort* dest), the address must be specially
 129 // aligned, such that (*dest) occupies the upper 16 bits of an aligned 32-bit word. The best way to
 130 // achieve is to place your short value next to another short value, which doesn't need atomic ops.
 131 //
 132 // Example
 133 //  ATOMIC_SHORT_PAIR(
 134 //    volatile short _refcount,  // needs atomic operation
 135 //    unsigned short _length     // number of UTF8 characters in the symbol (does not need atomic op)
 136 //  );
 137 
 138 #ifdef VM_LITTLE_ENDIAN
 139   #define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl)  \
 140     non_atomic_decl;                                       \
 141     atomic_decl
 142 #else
 143   #define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl)  \
 144     atomic_decl;                                           \
 145     non_atomic_decl
< prev index next >