1 /* 2 * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_INLINE_HPP 26 #define OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_INLINE_HPP 27 28 #include "orderAccess_windows_x86.inline.hpp" 29 #include "runtime/atomic.hpp" 30 #include "runtime/os.hpp" 31 #include "vm_version_x86.hpp" 32 33 // The following alternative implementations are needed because 34 // Windows 95 doesn't support (some of) the corresponding Windows NT 35 // calls. Furthermore, these versions allow inlining in the caller. 36 // (More precisely: The documentation for InterlockedExchange says 37 // it is supported for Windows 95. However, when single-stepping 38 // through the assembly code we cannot step into the routine and 39 // when looking at the routine address we see only garbage code. 40 // Better safe then sorry!). Was bug 7/31/98 (gri). 41 // 42 // Performance note: On uniprocessors, the 'lock' prefixes are not 43 // necessary (and expensive). We should generate separate cases if 44 // this becomes a performance problem. 45 46 #pragma warning(disable: 4035) // Disables warnings reporting missing return statement 47 48 inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } 49 inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } 50 inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } 51 52 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } 53 inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } 54 55 inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } 56 inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } 57 inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } 58 59 60 inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } 61 inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } 62 63 // Adding a lock prefix to an instruction on MP machine 64 // VC++ doesn't like the lock prefix to be on a single line 65 // so we can't insert a label after the lock prefix. 66 // By emitting a lock prefix, we can define a label after it. 67 #define LOCK_IF_MP(mp) __asm cmp mp, 0 \ 68 __asm je L0 \ 69 __asm _emit 0xF0 \ 70 __asm L0: 71 72 #ifdef AMD64 73 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } 74 inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } 75 76 inline jint Atomic::add (jint add_value, volatile jint* dest) { 77 return (jint)(*os::atomic_add_func)(add_value, dest); 78 } 79 80 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { 81 return (intptr_t)(*os::atomic_add_ptr_func)(add_value, dest); 82 } 83 84 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { 85 return (void*)(*os::atomic_add_ptr_func)(add_value, (volatile intptr_t*)dest); 86 } 87 88 inline void Atomic::inc (volatile jint* dest) { 89 (void)add (1, dest); 90 } 91 92 inline void Atomic::inc_ptr(volatile intptr_t* dest) { 93 (void)add_ptr(1, dest); 94 } 95 96 inline void Atomic::inc_ptr(volatile void* dest) { 97 (void)add_ptr(1, dest); 98 } 99 100 inline void Atomic::dec (volatile jint* dest) { 101 (void)add (-1, dest); 102 } 103 104 inline void Atomic::dec_ptr(volatile intptr_t* dest) { 105 (void)add_ptr(-1, dest); 106 } 107 108 inline void Atomic::dec_ptr(volatile void* dest) { 109 (void)add_ptr(-1, dest); 110 } 111 112 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { 113 return (jint)(*os::atomic_xchg_func)(exchange_value, dest); 114 } 115 116 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { 117 return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest); 118 } 119 120 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { 121 return (void *)(os::atomic_xchg_ptr_func)((intptr_t)exchange_value, (volatile intptr_t*)dest); 122 } 123 124 inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) { 125 return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value); 126 } 127 128 inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) { 129 return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value); 130 } 131 132 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) { 133 return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value); 134 } 135 136 inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) { 137 return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value); 138 } 139 140 inline jlong Atomic::load(volatile jlong* src) { return *src; } 141 142 #else // !AMD64 143 144 inline jint Atomic::add (jint add_value, volatile jint* dest) { 145 int mp = os::is_MP(); 146 __asm { 147 mov edx, dest; 148 mov eax, add_value; 149 mov ecx, eax; 150 LOCK_IF_MP(mp) 151 xadd dword ptr [edx], eax; 152 add eax, ecx; 153 } 154 } 155 156 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { 157 return (intptr_t)add((jint)add_value, (volatile jint*)dest); 158 } 159 160 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { 161 return (void*)add((jint)add_value, (volatile jint*)dest); 162 } 163 164 inline void Atomic::inc (volatile jint* dest) { 165 // alternative for InterlockedIncrement 166 int mp = os::is_MP(); 167 __asm { 168 mov edx, dest; 169 LOCK_IF_MP(mp) 170 add dword ptr [edx], 1; 171 } 172 } 173 174 inline void Atomic::inc_ptr(volatile intptr_t* dest) { 175 inc((volatile jint*)dest); 176 } 177 178 inline void Atomic::inc_ptr(volatile void* dest) { 179 inc((volatile jint*)dest); 180 } 181 182 inline void Atomic::dec (volatile jint* dest) { 183 // alternative for InterlockedDecrement 184 int mp = os::is_MP(); 185 __asm { 186 mov edx, dest; 187 LOCK_IF_MP(mp) 188 sub dword ptr [edx], 1; 189 } 190 } 191 192 inline void Atomic::dec_ptr(volatile intptr_t* dest) { 193 dec((volatile jint*)dest); 194 } 195 196 inline void Atomic::dec_ptr(volatile void* dest) { 197 dec((volatile jint*)dest); 198 } 199 200 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { 201 // alternative for InterlockedExchange 202 __asm { 203 mov eax, exchange_value; 204 mov ecx, dest; 205 xchg eax, dword ptr [ecx]; 206 } 207 } 208 209 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { 210 return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest); 211 } 212 213 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { 214 return (void*)xchg((jint)exchange_value, (volatile jint*)dest); 215 } 216 217 inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) { 218 // alternative for InterlockedCompareExchange 219 int mp = os::is_MP(); 220 __asm { 221 mov edx, dest 222 mov ecx, exchange_value 223 mov eax, compare_value 224 LOCK_IF_MP(mp) 225 cmpxchg dword ptr [edx], ecx 226 } 227 } 228 229 inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) { 230 int mp = os::is_MP(); 231 jint ex_lo = (jint)exchange_value; 232 jint ex_hi = *( ((jint*)&exchange_value) + 1 ); 233 jint cmp_lo = (jint)compare_value; 234 jint cmp_hi = *( ((jint*)&compare_value) + 1 ); 235 __asm { 236 push ebx 237 push edi 238 mov eax, cmp_lo 239 mov edx, cmp_hi 240 mov edi, dest 241 mov ebx, ex_lo 242 mov ecx, ex_hi 243 LOCK_IF_MP(mp) 244 cmpxchg8b qword ptr [edi] 245 pop edi 246 pop ebx 247 } 248 } 249 250 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) { 251 return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value); 252 } 253 254 inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) { 255 return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value); 256 } 257 258 inline jlong Atomic::load(volatile jlong* src) { 259 volatile jlong dest; 260 volatile jlong* pdest = &dest; 261 __asm { 262 mov eax, src 263 fild qword ptr [eax] 264 mov eax, pdest 265 fistp qword ptr [eax] 266 } 267 return dest; 268 } 269 270 inline void Atomic::store(jlong store_value, volatile jlong* dest) { 271 volatile jlong* src = &store_value; 272 __asm { 273 mov eax, src 274 fild qword ptr [eax] 275 mov eax, dest 276 fistp qword ptr [eax] 277 } 278 } 279 280 inline void Atomic::store(jlong store_value, jlong* dest) { 281 Atomic::store(store_value, (volatile jlong*)dest); 282 } 283 284 #endif // AMD64 285 286 #pragma warning(default: 4035) // Enables warnings reporting missing return statement 287 288 #endif // OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_INLINE_HPP