< prev index next >
src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp
Print this page
@@ -48,20 +48,20 @@
template<>
template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
return PrimitiveConversions::cast<T>(
- (*os::atomic_load_long_func)(reinterpret_cast<const volatile jlong*>(src)));
+ (*os::atomic_load_long_func)(reinterpret_cast<const volatile int64_t*>(src)));
}
template<>
template<typename T>
inline void Atomic::PlatformStore<8>::operator()(T store_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
(*os::atomic_store_long_func)(
- PrimitiveConversions::cast<jlong>(store_value), reinterpret_cast<volatile jlong*>(dest));
+ PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest));
}
#endif
// As per atomic.hpp all read-modify-write operations have to provide two-way
// barriers semantics. For AARCH64 we are using load-acquire-with-reservation and
@@ -101,11 +101,11 @@
: [val] "=&r" (val), [tmp] "=&r" (tmp)
: [add_val] "r" (add_value), [dest] "r" (dest)
: "memory");
return val;
#else
- return add_using_helper<jint>(os::atomic_add_func, add_value, dest);
+ return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
#endif
}
#ifdef AARCH64
template<>
@@ -144,11 +144,11 @@
: [old_val] "=&r" (old_val), [tmp] "=&r" (tmp)
: [new_val] "r" (exchange_value), [dest] "r" (dest)
: "memory");
return old_val;
#else
- return xchg_using_helper<jint>(os::atomic_xchg_func, exchange_value, dest);
+ return xchg_using_helper<int32_t>(os::atomic_xchg_func, exchange_value, dest);
#endif
}
#ifdef AARCH64
template<>
@@ -176,21 +176,21 @@
template<>
struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
#ifndef AARCH64
-inline jint reorder_cmpxchg_func(jint exchange_value,
- jint volatile* dest,
- jint compare_value) {
+inline int32_t reorder_cmpxchg_func(int32_t exchange_value,
+ int32_t volatile* dest,
+ int32_t compare_value) {
// Warning: Arguments are swapped to avoid moving them for kernel call
return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
}
-inline jlong reorder_cmpxchg_long_func(jlong exchange_value,
- jlong volatile* dest,
- jlong compare_value) {
- assert(VM_Version::supports_cx8(), "Atomic compare and exchange jlong not supported on this architecture!");
+inline int64_t reorder_cmpxchg_long_func(int64_t exchange_value,
+ int64_t volatile* dest,
+ int64_t compare_value) {
+ assert(VM_Version::supports_cx8(), "Atomic compare and exchange int64_t not supported on this architecture!");
// Warning: Arguments are swapped to avoid moving them for kernel call
return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
}
#endif // !AARCH64
@@ -219,11 +219,11 @@
: [rv] "=&r" (rv), [tmp] "=&r" (tmp)
: [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
: "memory");
return rv;
#else
- return cmpxchg_using_helper<jint>(reorder_cmpxchg_func, exchange_value, dest, compare_value);
+ return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, exchange_value, dest, compare_value);
#endif
}
template<>
template<typename T>
@@ -249,10 +249,10 @@
: [rv] "=&r" (rv), [tmp] "=&r" (tmp)
: [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
: "memory");
return rv;
#else
- return cmpxchg_using_helper<jlong>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value);
+ return cmpxchg_using_helper<int64_t>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value);
#endif
}
#endif // OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP
< prev index next >