< prev index next >

src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp

Print this page

        

*** 29,45 **** template<size_t byte_size> struct Atomic::PlatformAdd : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> > { ! template<typename I, typename D> ! D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order order) const; }; template<> ! template<typename I, typename D> ! inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); D old_value; __asm__ volatile ( "lock xaddl %0,(%2)" --- 29,45 ---- template<size_t byte_size> struct Atomic::PlatformAdd : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> > { ! template<typename D, typename I> ! D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const; }; template<> ! template<typename D, typename I> ! inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); D old_value; __asm__ volatile ( "lock xaddl %0,(%2)"
*** 49,60 **** return old_value; } template<> template<typename T> ! inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, ! T volatile* dest, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ( "xchgl (%2),%0" : "=r" (exchange_value) : "0" (exchange_value), "r" (dest) --- 49,60 ---- return old_value; } template<> template<typename T> ! inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, ! T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ( "xchgl (%2),%0" : "=r" (exchange_value) : "0" (exchange_value), "r" (dest)
*** 62,74 **** return exchange_value; } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, ! T volatile* dest, T compare_value, atomic_memory_order /* order */) const { STATIC_ASSERT(1 == sizeof(T)); __asm__ volatile ("lock cmpxchgb %1,(%3)" : "=a" (exchange_value) : "q" (exchange_value), "a" (compare_value), "r" (dest) --- 62,74 ---- return exchange_value; } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order /* order */) const { STATIC_ASSERT(1 == sizeof(T)); __asm__ volatile ("lock cmpxchgb %1,(%3)" : "=a" (exchange_value) : "q" (exchange_value), "a" (compare_value), "r" (dest)
*** 76,88 **** return exchange_value; } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, ! T volatile* dest, T compare_value, atomic_memory_order /* order */) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ("lock cmpxchgl %1,(%3)" : "=a" (exchange_value) : "r" (exchange_value), "a" (compare_value), "r" (dest) --- 76,88 ---- return exchange_value; } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order /* order */) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ("lock cmpxchgl %1,(%3)" : "=a" (exchange_value) : "r" (exchange_value), "a" (compare_value), "r" (dest)
*** 91,102 **** } #ifdef AMD64 template<> ! template<typename I, typename D> ! inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); D old_value; __asm__ __volatile__ ("lock xaddq %0,(%2)" --- 91,102 ---- } #ifdef AMD64 template<> ! template<typename D, typename I> ! inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); D old_value; __asm__ __volatile__ ("lock xaddq %0,(%2)"
*** 106,116 **** return old_value; } template<> template<typename T> ! inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, T volatile* dest, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ("xchgq (%2),%0" : "=r" (exchange_value) : "0" (exchange_value), "r" (dest) --- 106,116 ---- return old_value; } template<> template<typename T> ! inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ("xchgq (%2),%0" : "=r" (exchange_value) : "0" (exchange_value), "r" (dest)
*** 118,130 **** return exchange_value; } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, ! T volatile* dest, T compare_value, atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ("lock cmpxchgq %1,(%3)" : "=a" (exchange_value) : "r" (exchange_value), "a" (compare_value), "r" (dest) --- 118,130 ---- return exchange_value; } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ("lock cmpxchgq %1,(%3)" : "=a" (exchange_value) : "r" (exchange_value), "a" (compare_value), "r" (dest)
*** 140,155 **** void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst); } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, ! T volatile* dest, T compare_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); ! return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value); } template<> template<typename T> inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { --- 140,155 ---- void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst); } template<> template<typename T> ! inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, + T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); ! return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, dest, compare_value, exchange_value); } template<> template<typename T> inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
*** 159,181 **** return PrimitiveConversions::cast<T>(dest); } template<> template<typename T> ! inline void Atomic::PlatformStore<8>::operator()(T store_value, ! T volatile* dest) const { STATIC_ASSERT(8 == sizeof(T)); _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest)); } #endif // AMD64 template<> struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE> { template <typename T> ! void operator()(T v, volatile T* p) const { __asm__ volatile ( "xchgb (%2),%0" : "=q" (v) : "0" (v), "r" (p) : "memory"); } --- 159,181 ---- return PrimitiveConversions::cast<T>(dest); } template<> template<typename T> ! inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, ! T store_value) const { STATIC_ASSERT(8 == sizeof(T)); _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest)); } #endif // AMD64 template<> struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE> { template <typename T> ! void operator()(volatile T* p, T v) const { __asm__ volatile ( "xchgb (%2),%0" : "=q" (v) : "0" (v), "r" (p) : "memory"); }
*** 183,193 **** template<> struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE> { template <typename T> ! void operator()(T v, volatile T* p) const { __asm__ volatile ( "xchgw (%2),%0" : "=r" (v) : "0" (v), "r" (p) : "memory"); } --- 183,193 ---- template<> struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE> { template <typename T> ! void operator()(volatile T* p, T v) const { __asm__ volatile ( "xchgw (%2),%0" : "=r" (v) : "0" (v), "r" (p) : "memory"); }
*** 195,205 **** template<> struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE> { template <typename T> ! void operator()(T v, volatile T* p) const { __asm__ volatile ( "xchgl (%2),%0" : "=r" (v) : "0" (v), "r" (p) : "memory"); } --- 195,205 ---- template<> struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE> { template <typename T> ! void operator()(volatile T* p, T v) const { __asm__ volatile ( "xchgl (%2),%0" : "=r" (v) : "0" (v), "r" (p) : "memory"); }
*** 208,218 **** #ifdef AMD64 template<> struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE> { template <typename T> ! void operator()(T v, volatile T* p) const { __asm__ volatile ( "xchgq (%2), %0" : "=r" (v) : "0" (v), "r" (p) : "memory"); } --- 208,218 ---- #ifdef AMD64 template<> struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE> { template <typename T> ! void operator()(volatile T* p, T v) const { __asm__ volatile ( "xchgq (%2), %0" : "=r" (v) : "0" (v), "r" (p) : "memory"); }
< prev index next >