1 /* 2 * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_HPP 26 #define OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_HPP 27 28 // Implementation of class atomic 29 30 template<size_t byte_size> 31 struct Atomic::PlatformAdd 32 : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> > 33 { 34 template<typename I, typename D> 35 D fetch_and_add(I add_value, D volatile* dest) const; 36 }; 37 38 template<> 39 template<typename I, typename D> 40 inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const { 41 STATIC_ASSERT(4 == sizeof(I)); 42 STATIC_ASSERT(4 == sizeof(D)); 43 D old_value; 44 __asm__ volatile ( "lock xaddl %0,(%2)" 45 : "=r" (old_value) 46 : "0" (add_value), "r" (dest) 47 : "cc", "memory"); 48 return old_value; 49 } 50 51 template<> 52 template<typename T> 53 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, 54 T volatile* dest) const { 55 STATIC_ASSERT(4 == sizeof(T)); 56 __asm__ volatile ( "xchgl (%2),%0" 57 : "=r" (exchange_value) 58 : "0" (exchange_value), "r" (dest) 59 : "memory"); 60 return exchange_value; 61 } 62 63 template<> 64 template<typename T> 65 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, 66 T volatile* dest, 67 T compare_value, 68 cmpxchg_memory_order /* order */) const { 69 STATIC_ASSERT(1 == sizeof(T)); 70 __asm__ volatile ("lock cmpxchgb %1,(%3)" 71 : "=a" (exchange_value) 72 : "q" (exchange_value), "a" (compare_value), "r" (dest) 73 : "cc", "memory"); 74 return exchange_value; 75 } 76 77 template<> 78 template<typename T> 79 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, 80 T volatile* dest, 81 T compare_value, 82 cmpxchg_memory_order /* order */) const { 83 STATIC_ASSERT(4 == sizeof(T)); 84 __asm__ volatile ("lock cmpxchgl %1,(%3)" 85 : "=a" (exchange_value) 86 : "r" (exchange_value), "a" (compare_value), "r" (dest) 87 : "cc", "memory"); 88 return exchange_value; 89 } 90 91 #ifdef AMD64 92 93 template<> 94 template<typename I, typename D> 95 inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const { 96 STATIC_ASSERT(8 == sizeof(I)); 97 STATIC_ASSERT(8 == sizeof(D)); 98 D old_value; 99 __asm__ __volatile__ ("lock xaddq %0,(%2)" 100 : "=r" (old_value) 101 : "0" (add_value), "r" (dest) 102 : "cc", "memory"); 103 return old_value; 104 } 105 106 template<> 107 template<typename T> 108 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, 109 T volatile* dest) const { 110 STATIC_ASSERT(8 == sizeof(T)); 111 __asm__ __volatile__ ("xchgq (%2),%0" 112 : "=r" (exchange_value) 113 : "0" (exchange_value), "r" (dest) 114 : "memory"); 115 return exchange_value; 116 } 117 118 template<> 119 template<typename T> 120 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, 121 T volatile* dest, 122 T compare_value, 123 cmpxchg_memory_order /* order */) const { 124 STATIC_ASSERT(8 == sizeof(T)); 125 __asm__ __volatile__ ("lock cmpxchgq %1,(%3)" 126 : "=a" (exchange_value) 127 : "r" (exchange_value), "a" (compare_value), "r" (dest) 128 : "cc", "memory"); 129 return exchange_value; 130 } 131 132 #else // !AMD64 133 134 extern "C" { 135 // defined in linux_x86.s 136 int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t); 137 void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst); 138 } 139 140 template<> 141 template<typename T> 142 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, 143 T volatile* dest, 144 T compare_value, 145 cmpxchg_memory_order order) const { 146 STATIC_ASSERT(8 == sizeof(T)); 147 return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value); 148 } 149 150 template<> 151 template<typename T> 152 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { 153 STATIC_ASSERT(8 == sizeof(T)); 154 volatile int64_t dest; 155 _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest)); 156 return PrimitiveConversions::cast<T>(dest); 157 } 158 159 template<> 160 template<typename T> 161 inline void Atomic::PlatformStore<8>::operator()(T store_value, 162 T volatile* dest) const { 163 STATIC_ASSERT(8 == sizeof(T)); 164 _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest)); 165 } 166 167 #endif // AMD64 168 169 #endif // OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_HPP