< prev index next >

src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp

Print this page

        

*** 29,45 **** template<size_t byte_size> struct Atomic::PlatformAdd : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> > { ! template<typename I, typename D> ! D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order order) const; }; template<> ! template<typename I, typename D> ! inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); D old_value; __asm__ volatile ( "lock xaddl %0,(%2)" --- 29,45 ---- template<size_t byte_size> struct Atomic::PlatformAdd : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> > { ! template<typename D, typename I> ! D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const; }; template<> ! template<typename D, typename I> ! inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); D old_value; __asm__ volatile ( "lock xaddl %0,(%2)"
*** 91,102 **** } #ifdef AMD64 template<> ! template<typename I, typename D> ! inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); D old_value; __asm__ __volatile__ ("lock xaddq %0,(%2)" --- 91,102 ---- } #ifdef AMD64 template<> ! template<typename D, typename I> ! inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); D old_value; __asm__ __volatile__ ("lock xaddq %0,(%2)"
< prev index next >