< prev index next >

src/share/vm/runtime/atomic.hpp

Print this page
rev 10933 : 8154736: enhancement of cmpxchg and copy_to_survivor for ppc64
Reviewed-by:
Contributed-by: HORII@jp.ibm.com, mdoerr

*** 25,34 **** --- 25,43 ---- #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP #define SHARE_VM_RUNTIME_ATOMIC_HPP #include "memory/allocation.hpp" + typedef enum memory_order { + memory_order_relaxed, + memory_order_consume, + memory_order_acquire, + memory_order_release, + memory_order_acq_rel, + memory_order_seq_cst + } memory_order; + class Atomic : AllStatic { private: static jbyte cmpxchg_general(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value); public:
*** 105,121 **** // Performs atomic compare of *dest and compare_value, and exchanges // *dest with exchange_value if the comparison succeeded. Returns prior // value of *dest. cmpxchg*() provide: // <fence> compare-and-exchange <membar StoreLoad|StoreStore> ! inline static jbyte cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value); ! inline static jint cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value); // See comment above about using jlong atomics on 32-bit platforms ! inline static jlong cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value); ! static unsigned int cmpxchg (unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value); ! inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value); ! inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value); }; // To use Atomic::inc(jshort* dest) and Atomic::dec(jshort* dest), the address must be specially // aligned, such that (*dest) occupies the upper 16 bits of an aligned 32-bit word. The best way to // achieve is to place your short value next to another short value, which doesn't need atomic ops. --- 114,130 ---- // Performs atomic compare of *dest and compare_value, and exchanges // *dest with exchange_value if the comparison succeeded. Returns prior // value of *dest. cmpxchg*() provide: // <fence> compare-and-exchange <membar StoreLoad|StoreStore> ! inline static jbyte cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, memory_order order = memory_order_seq_cst); ! inline static jint cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, memory_order order = memory_order_seq_cst); // See comment above about using jlong atomics on 32-bit platforms ! inline static jlong cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, memory_order order = memory_order_seq_cst); ! static unsigned int cmpxchg (unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value, memory_order order = memory_order_seq_cst); ! inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, memory_order order = memory_order_seq_cst); ! inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, memory_order order = memory_order_seq_cst); }; // To use Atomic::inc(jshort* dest) and Atomic::dec(jshort* dest), the address must be specially // aligned, such that (*dest) occupies the upper 16 bits of an aligned 32-bit word. The best way to // achieve is to place your short value next to another short value, which doesn't need atomic ops.
< prev index next >