< prev index next >

src/os_cpu/bsd_zero/vm/atomic_bsd_zero.hpp

Print this page
rev 13266 : imported patch Atomic_refactoring

*** 157,181 **** return prev; } } #endif // ARM ! inline void Atomic::store(jint store_value, volatile jint* dest) { ! #if !defined(ARM) && !defined(M68K) ! __sync_synchronize(); ! #endif ! *dest = store_value; ! } ! inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { #if !defined(ARM) && !defined(M68K) __sync_synchronize(); #endif *dest = store_value; } ! inline jint Atomic::add(jint add_value, volatile jint* dest) { #ifdef ARM return arm_add_and_fetch(dest, add_value); #else #ifdef M68K return m68k_add_and_fetch(dest, add_value); --- 157,178 ---- return prev; } } #endif // ARM ! #ifdef _LP64 ! template<> ! inline void Atomic::specialized_store<int64_t>(int64_t store_value, volatile int64_t* dest) { #if !defined(ARM) && !defined(M68K) __sync_synchronize(); #endif *dest = store_value; } ! template <> ! inline int64_t Atomic::specialized_add<int64_t>(int64_t add_value, volatile int64_t* dest) { #ifdef ARM return arm_add_and_fetch(dest, add_value); #else #ifdef M68K return m68k_add_and_fetch(dest, add_value);
*** 183,278 **** return __sync_add_and_fetch(dest, add_value); #endif // M68K #endif // ARM } ! inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { #ifdef ARM ! return arm_add_and_fetch(dest, add_value); #else #ifdef M68K ! return m68k_add_and_fetch(dest, add_value); #else ! return __sync_add_and_fetch(dest, add_value); #endif // M68K #endif // ARM } ! inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { ! return (void *) add_ptr(add_value, (volatile intptr_t *) dest); ! } ! inline void Atomic::inc(volatile jint* dest) { ! add(1, dest); } ! inline void Atomic::inc_ptr(volatile intptr_t* dest) { ! add_ptr(1, dest); } ! inline void Atomic::inc_ptr(volatile void* dest) { ! add_ptr(1, dest); ! } ! inline void Atomic::dec(volatile jint* dest) { ! add(-1, dest); } ! inline void Atomic::dec_ptr(volatile intptr_t* dest) { ! add_ptr(-1, dest); } - inline void Atomic::dec_ptr(volatile void* dest) { - add_ptr(-1, dest); - } ! inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) { #ifdef ARM return arm_lock_test_and_set(dest, exchange_value); #else #ifdef M68K return m68k_lock_test_and_set(dest, exchange_value); #else // __sync_lock_test_and_set is a bizarrely named atomic exchange // operation. Note that some platforms only support this with the // limitation that the only valid value to store is the immediate // constant 1. There is a test for this in JNI_CreateJavaVM(). ! jint result = __sync_lock_test_and_set (dest, exchange_value); // All atomic operations are expected to be full memory barriers // (see atomic.hpp). However, __sync_lock_test_and_set is not // a full memory barrier, but an acquire barrier. Hence, this added // barrier. __sync_synchronize(); return result; #endif // M68K #endif // ARM } - inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, - volatile intptr_t* dest) { - #ifdef ARM - return arm_lock_test_and_set(dest, exchange_value); - #else - #ifdef M68K - return m68k_lock_test_and_set(dest, exchange_value); - #else - intptr_t result = __sync_lock_test_and_set (dest, exchange_value); - __sync_synchronize(); - return result; - #endif // M68K - #endif // ARM - } ! inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { ! return (void *) xchg_ptr((intptr_t) exchange_value, ! (volatile intptr_t*) dest); ! } ! ! inline jint Atomic::cmpxchg(jint exchange_value, ! volatile jint* dest, ! jint compare_value, ! cmpxchg_memory_order order) { #ifdef ARM return arm_compare_and_swap(dest, compare_value, exchange_value); #else #ifdef M68K return m68k_compare_and_swap(dest, compare_value, exchange_value); --- 180,268 ---- return __sync_add_and_fetch(dest, add_value); #endif // M68K #endif // ARM } ! template <> ! inline int64_t Atomic::specialized_xchg<int64_t>(int64_t exchange_value, volatile int64_t* dest) { #ifdef ARM ! return arm_lock_test_and_set(dest, exchange_value); #else #ifdef M68K ! return m68k_lock_test_and_set(dest, exchange_value); #else ! intptr_t result = __sync_lock_test_and_set (dest, exchange_value); ! __sync_synchronize(); ! return result; #endif // M68K #endif // ARM } ! #else // _LP64 ! template<> ! inline int64_t Atomic::specialized_load<int64_t>(const volatile int64_t* src) { ! volatile int64_t dest; ! os::atomic_copy64(src, &dest); ! return dest; } ! template<> ! inline void Atomic::specialized_store<int64_t>(int64_t store_value, volatile int64_t* dest) { ! os::atomic_copy64((volatile int64_t*)&store_value, dest); } ! #endif // _LP64 ! template<> ! inline void Atomic::specialized_store<int32_t>(int32_t store_value, volatile int32_t* dest) { ! #if !defined(ARM) && !defined(M68K) ! __sync_synchronize(); ! #endif ! *dest = store_value; } ! template <> ! inline int32_t Atomic::specialized_add<int32_t>(int32_t add_value, volatile int32_t* dest) { ! #ifdef ARM ! return arm_add_and_fetch(dest, add_value); ! #else ! #ifdef M68K ! return m68k_add_and_fetch(dest, add_value); ! #else ! return __sync_add_and_fetch(dest, add_value); ! #endif // M68K ! #endif // ARM } ! template <> ! inline int32_t Atomic::specialized_xchg<int32_t>(int32_t exchange_value, volatile int32_t* dest) { #ifdef ARM return arm_lock_test_and_set(dest, exchange_value); #else #ifdef M68K return m68k_lock_test_and_set(dest, exchange_value); #else // __sync_lock_test_and_set is a bizarrely named atomic exchange // operation. Note that some platforms only support this with the // limitation that the only valid value to store is the immediate // constant 1. There is a test for this in JNI_CreateJavaVM(). ! int32_t result = __sync_lock_test_and_set (dest, exchange_value); // All atomic operations are expected to be full memory barriers // (see atomic.hpp). However, __sync_lock_test_and_set is not // a full memory barrier, but an acquire barrier. Hence, this added // barrier. __sync_synchronize(); return result; #endif // M68K #endif // ARM } ! template <> ! inline int32_t Atomic::specialized_cmpxchg<int32_t>(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { #ifdef ARM return arm_compare_and_swap(dest, compare_value, exchange_value); #else #ifdef M68K return m68k_compare_and_swap(dest, compare_value, exchange_value);
*** 280,333 **** return __sync_val_compare_and_swap(dest, compare_value, exchange_value); #endif // M68K #endif // ARM } ! inline jlong Atomic::cmpxchg(jlong exchange_value, ! volatile jlong* dest, ! jlong compare_value, ! cmpxchg_memory_order order) { ! return __sync_val_compare_and_swap(dest, compare_value, exchange_value); } - inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, - volatile intptr_t* dest, - intptr_t compare_value, - cmpxchg_memory_order order) { - #ifdef ARM - return arm_compare_and_swap(dest, compare_value, exchange_value); - #else - #ifdef M68K - return m68k_compare_and_swap(dest, compare_value, exchange_value); - #else - return __sync_val_compare_and_swap(dest, compare_value, exchange_value); - #endif // M68K - #endif // ARM - } - - inline void* Atomic::cmpxchg_ptr(void* exchange_value, - volatile void* dest, - void* compare_value, - cmpxchg_memory_order order) { - - return (void *) cmpxchg_ptr((intptr_t) exchange_value, - (volatile intptr_t*) dest, - (intptr_t) compare_value, - order); - } - - inline jlong Atomic::load(const volatile jlong* src) { - volatile jlong dest; - os::atomic_copy64(src, &dest); - return dest; - } - - inline void Atomic::store(jlong store_value, jlong* dest) { - os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest); - } - - inline void Atomic::store(jlong store_value, volatile jlong* dest) { - os::atomic_copy64((volatile jlong*)&store_value, dest); - } - #endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP --- 270,280 ---- return __sync_val_compare_and_swap(dest, compare_value, exchange_value); #endif // M68K #endif // ARM } ! template <> ! inline int64_t Atomic::specialized_cmpxchg<int64_t>(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { return __sync_val_compare_and_swap(dest, compare_value, exchange_value); } #endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
< prev index next >