14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef OS_CPU_LINUX_X86_ATOMIC_LINUX_X86_HPP 26 #define OS_CPU_LINUX_X86_ATOMIC_LINUX_X86_HPP 27 28 // Implementation of class atomic 29 30 template<size_t byte_size> 31 struct Atomic::PlatformAdd 32 : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> > 33 { 34 template<typename I, typename D> 35 D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order order) const; 36 }; 37 38 template<> 39 template<typename I, typename D> 40 inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest, 41 atomic_memory_order order) const { 42 STATIC_ASSERT(4 == sizeof(I)); 43 STATIC_ASSERT(4 == sizeof(D)); 44 D old_value; 45 __asm__ volatile ( "lock xaddl %0,(%2)" 46 : "=r" (old_value) 47 : "0" (add_value), "r" (dest) 48 : "cc", "memory"); 49 return old_value; 50 } 51 52 template<> 53 template<typename T> 54 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, 55 T volatile* dest, 56 atomic_memory_order order) const { 57 STATIC_ASSERT(4 == sizeof(T)); 58 __asm__ volatile ( "xchgl (%2),%0" 59 : "=r" (exchange_value) 60 : "0" (exchange_value), "r" (dest) 76 return exchange_value; 77 } 78 79 template<> 80 template<typename T> 81 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, 82 T volatile* dest, 83 T compare_value, 84 atomic_memory_order /* order */) const { 85 STATIC_ASSERT(4 == sizeof(T)); 86 __asm__ volatile ("lock cmpxchgl %1,(%3)" 87 : "=a" (exchange_value) 88 : "r" (exchange_value), "a" (compare_value), "r" (dest) 89 : "cc", "memory"); 90 return exchange_value; 91 } 92 93 #ifdef AMD64 94 95 template<> 96 template<typename I, typename D> 97 inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest, 98 atomic_memory_order order) const { 99 STATIC_ASSERT(8 == sizeof(I)); 100 STATIC_ASSERT(8 == sizeof(D)); 101 D old_value; 102 __asm__ __volatile__ ("lock xaddq %0,(%2)" 103 : "=r" (old_value) 104 : "0" (add_value), "r" (dest) 105 : "cc", "memory"); 106 return old_value; 107 } 108 109 template<> 110 template<typename T> 111 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, T volatile* dest, 112 atomic_memory_order order) const { 113 STATIC_ASSERT(8 == sizeof(T)); 114 __asm__ __volatile__ ("xchgq (%2),%0" 115 : "=r" (exchange_value) 116 : "0" (exchange_value), "r" (dest) 117 : "memory"); | 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef OS_CPU_LINUX_X86_ATOMIC_LINUX_X86_HPP 26 #define OS_CPU_LINUX_X86_ATOMIC_LINUX_X86_HPP 27 28 // Implementation of class atomic 29 30 template<size_t byte_size> 31 struct Atomic::PlatformAdd 32 : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> > 33 { 34 template<typename D, typename I> 35 D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const; 36 }; 37 38 template<> 39 template<typename D, typename I> 40 inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value, 41 atomic_memory_order order) const { 42 STATIC_ASSERT(4 == sizeof(I)); 43 STATIC_ASSERT(4 == sizeof(D)); 44 D old_value; 45 __asm__ volatile ( "lock xaddl %0,(%2)" 46 : "=r" (old_value) 47 : "0" (add_value), "r" (dest) 48 : "cc", "memory"); 49 return old_value; 50 } 51 52 template<> 53 template<typename T> 54 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, 55 T volatile* dest, 56 atomic_memory_order order) const { 57 STATIC_ASSERT(4 == sizeof(T)); 58 __asm__ volatile ( "xchgl (%2),%0" 59 : "=r" (exchange_value) 60 : "0" (exchange_value), "r" (dest) 76 return exchange_value; 77 } 78 79 template<> 80 template<typename T> 81 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, 82 T volatile* dest, 83 T compare_value, 84 atomic_memory_order /* order */) const { 85 STATIC_ASSERT(4 == sizeof(T)); 86 __asm__ volatile ("lock cmpxchgl %1,(%3)" 87 : "=a" (exchange_value) 88 : "r" (exchange_value), "a" (compare_value), "r" (dest) 89 : "cc", "memory"); 90 return exchange_value; 91 } 92 93 #ifdef AMD64 94 95 template<> 96 template<typename D, typename I> 97 inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value, 98 atomic_memory_order order) const { 99 STATIC_ASSERT(8 == sizeof(I)); 100 STATIC_ASSERT(8 == sizeof(D)); 101 D old_value; 102 __asm__ __volatile__ ("lock xaddq %0,(%2)" 103 : "=r" (old_value) 104 : "0" (add_value), "r" (dest) 105 : "cc", "memory"); 106 return old_value; 107 } 108 109 template<> 110 template<typename T> 111 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, T volatile* dest, 112 atomic_memory_order order) const { 113 STATIC_ASSERT(8 == sizeof(T)); 114 __asm__ __volatile__ ("xchgq (%2),%0" 115 : "=r" (exchange_value) 116 : "0" (exchange_value), "r" (dest) 117 : "memory"); |