< prev index next >

src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp

Print this page

        

*** 48,67 **** template<> template<typename T> inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { STATIC_ASSERT(8 == sizeof(T)); return PrimitiveConversions::cast<T>( ! (*os::atomic_load_long_func)(reinterpret_cast<const volatile jlong*>(src))); } template<> template<typename T> inline void Atomic::PlatformStore<8>::operator()(T store_value, T volatile* dest) const { STATIC_ASSERT(8 == sizeof(T)); (*os::atomic_store_long_func)( ! PrimitiveConversions::cast<jlong>(store_value), reinterpret_cast<volatile jlong*>(dest)); } #endif // As per atomic.hpp all read-modify-write operations have to provide two-way // barriers semantics. For AARCH64 we are using load-acquire-with-reservation and --- 48,67 ---- template<> template<typename T> inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { STATIC_ASSERT(8 == sizeof(T)); return PrimitiveConversions::cast<T>( ! (*os::atomic_load_long_func)(reinterpret_cast<const volatile int64_t*>(src))); } template<> template<typename T> inline void Atomic::PlatformStore<8>::operator()(T store_value, T volatile* dest) const { STATIC_ASSERT(8 == sizeof(T)); (*os::atomic_store_long_func)( ! PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest)); } #endif // As per atomic.hpp all read-modify-write operations have to provide two-way // barriers semantics. For AARCH64 we are using load-acquire-with-reservation and
*** 101,111 **** : [val] "=&r" (val), [tmp] "=&r" (tmp) : [add_val] "r" (add_value), [dest] "r" (dest) : "memory"); return val; #else ! return add_using_helper<jint>(os::atomic_add_func, add_value, dest); #endif } #ifdef AARCH64 template<> --- 101,111 ---- : [val] "=&r" (val), [tmp] "=&r" (tmp) : [add_val] "r" (add_value), [dest] "r" (dest) : "memory"); return val; #else ! return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest); #endif } #ifdef AARCH64 template<>
*** 144,154 **** : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp) : [new_val] "r" (exchange_value), [dest] "r" (dest) : "memory"); return old_val; #else ! return xchg_using_helper<jint>(os::atomic_xchg_func, exchange_value, dest); #endif } #ifdef AARCH64 template<> --- 144,154 ---- : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp) : [new_val] "r" (exchange_value), [dest] "r" (dest) : "memory"); return old_val; #else ! return xchg_using_helper<int32_t>(os::atomic_xchg_func, exchange_value, dest); #endif } #ifdef AARCH64 template<>
*** 176,196 **** template<> struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {}; #ifndef AARCH64 ! inline jint reorder_cmpxchg_func(jint exchange_value, ! jint volatile* dest, ! jint compare_value) { // Warning: Arguments are swapped to avoid moving them for kernel call return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest); } ! inline jlong reorder_cmpxchg_long_func(jlong exchange_value, ! jlong volatile* dest, ! jlong compare_value) { ! assert(VM_Version::supports_cx8(), "Atomic compare and exchange jlong not supported on this architecture!"); // Warning: Arguments are swapped to avoid moving them for kernel call return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest); } #endif // !AARCH64 --- 176,196 ---- template<> struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {}; #ifndef AARCH64 ! inline int32_t reorder_cmpxchg_func(int32_t exchange_value, ! int32_t volatile* dest, ! int32_t compare_value) { // Warning: Arguments are swapped to avoid moving them for kernel call return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest); } ! inline int64_t reorder_cmpxchg_long_func(int64_t exchange_value, ! int64_t volatile* dest, ! int64_t compare_value) { ! assert(VM_Version::supports_cx8(), "Atomic compare and exchange int64_t not supported on this architecture!"); // Warning: Arguments are swapped to avoid moving them for kernel call return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest); } #endif // !AARCH64
*** 219,229 **** : [rv] "=&r" (rv), [tmp] "=&r" (tmp) : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value) : "memory"); return rv; #else ! return cmpxchg_using_helper<jint>(reorder_cmpxchg_func, exchange_value, dest, compare_value); #endif } template<> template<typename T> --- 219,229 ---- : [rv] "=&r" (rv), [tmp] "=&r" (tmp) : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value) : "memory"); return rv; #else ! return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, exchange_value, dest, compare_value); #endif } template<> template<typename T>
*** 249,258 **** : [rv] "=&r" (rv), [tmp] "=&r" (tmp) : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value) : "memory"); return rv; #else ! return cmpxchg_using_helper<jlong>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value); #endif } #endif // OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP --- 249,258 ---- : [rv] "=&r" (rv), [tmp] "=&r" (tmp) : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value) : "memory"); return rv; #else ! return cmpxchg_using_helper<int64_t>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value); #endif } #endif // OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP
< prev index next >