1 /* 2 * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP 26 #define OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP 27 28 #include "runtime/os.hpp" 29 30 // The following alternative implementations are needed because 31 // Windows 95 doesn't support (some of) the corresponding Windows NT 32 // calls. Furthermore, these versions allow inlining in the caller. 33 // (More precisely: The documentation for InterlockedExchange says 34 // it is supported for Windows 95. However, when single-stepping 35 // through the assembly code we cannot step into the routine and 36 // when looking at the routine address we see only garbage code. 37 // Better safe then sorry!). Was bug 7/31/98 (gri). 38 // 39 // Performance note: On uniprocessors, the 'lock' prefixes are not 40 // necessary (and expensive). We should generate separate cases if 41 // this becomes a performance problem. 42 43 #pragma warning(disable: 4035) // Disables warnings reporting missing return statement 44 45 inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } 46 inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } 47 inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } 48 49 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } 50 inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } 51 52 inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } 53 inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } 54 inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } 55 56 57 inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } 58 inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } 59 60 // Adding a lock prefix to an instruction on MP machine 61 // VC++ doesn't like the lock prefix to be on a single line 62 // so we can't insert a label after the lock prefix. 63 // By emitting a lock prefix, we can define a label after it. 64 #define LOCK_IF_MP(mp) __asm cmp mp, 0 \ 65 __asm je L0 \ 66 __asm _emit 0xF0 \ 67 __asm L0: 68 69 #ifdef AMD64 70 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } 71 inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } 72 73 inline jint Atomic::add (jint add_value, volatile jint* dest) { 74 return (jint)(*os::atomic_add_func)(add_value, dest); 75 } 76 77 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { 78 return (intptr_t)(*os::atomic_add_ptr_func)(add_value, dest); 79 } 80 81 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { 82 return (void*)(*os::atomic_add_ptr_func)(add_value, (volatile intptr_t*)dest); 83 } 84 85 inline void Atomic::inc (volatile jint* dest) { 86 (void)add (1, dest); 87 } 88 89 inline void Atomic::inc_ptr(volatile intptr_t* dest) { 90 (void)add_ptr(1, dest); 91 } 92 93 inline void Atomic::inc_ptr(volatile void* dest) { 94 (void)add_ptr(1, dest); 95 } 96 97 inline void Atomic::dec (volatile jint* dest) { 98 (void)add (-1, dest); 99 } 100 101 inline void Atomic::dec_ptr(volatile intptr_t* dest) { 102 (void)add_ptr(-1, dest); 103 } 104 105 inline void Atomic::dec_ptr(volatile void* dest) { 106 (void)add_ptr(-1, dest); 107 } 108 109 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { 110 return (jint)(*os::atomic_xchg_func)(exchange_value, dest); 111 } 112 113 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { 114 return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest); 115 } 116 117 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { 118 return (void *)(os::atomic_xchg_ptr_func)((intptr_t)exchange_value, (volatile intptr_t*)dest); 119 } 120 121 inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) { 122 return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value); 123 } 124 125 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE 126 inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) { 127 return (*os::atomic_cmpxchg_byte_func)(exchange_value, dest, compare_value); 128 } 129 130 inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) { 131 return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value); 132 } 133 134 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) { 135 return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order); 136 } 137 138 inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) { 139 return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order); 140 } 141 142 inline jlong Atomic::load(volatile jlong* src) { return *src; } 143 144 #else // !AMD64 145 146 inline jint Atomic::add (jint add_value, volatile jint* dest) { 147 int mp = os::is_MP(); 148 __asm { 149 mov edx, dest; 150 mov eax, add_value; 151 mov ecx, eax; 152 LOCK_IF_MP(mp) 153 xadd dword ptr [edx], eax; 154 add eax, ecx; 155 } 156 } 157 158 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { 159 return (intptr_t)add((jint)add_value, (volatile jint*)dest); 160 } 161 162 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { 163 return (void*)add((jint)add_value, (volatile jint*)dest); 164 } 165 166 inline void Atomic::inc (volatile jint* dest) { 167 // alternative for InterlockedIncrement 168 int mp = os::is_MP(); 169 __asm { 170 mov edx, dest; 171 LOCK_IF_MP(mp) 172 add dword ptr [edx], 1; 173 } 174 } 175 176 inline void Atomic::inc_ptr(volatile intptr_t* dest) { 177 inc((volatile jint*)dest); 178 } 179 180 inline void Atomic::inc_ptr(volatile void* dest) { 181 inc((volatile jint*)dest); 182 } 183 184 inline void Atomic::dec (volatile jint* dest) { 185 // alternative for InterlockedDecrement 186 int mp = os::is_MP(); 187 __asm { 188 mov edx, dest; 189 LOCK_IF_MP(mp) 190 sub dword ptr [edx], 1; 191 } 192 } 193 194 inline void Atomic::dec_ptr(volatile intptr_t* dest) { 195 dec((volatile jint*)dest); 196 } 197 198 inline void Atomic::dec_ptr(volatile void* dest) { 199 dec((volatile jint*)dest); 200 } 201 202 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { 203 // alternative for InterlockedExchange 204 __asm { 205 mov eax, exchange_value; 206 mov ecx, dest; 207 xchg eax, dword ptr [ecx]; 208 } 209 } 210 211 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { 212 return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest); 213 } 214 215 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { 216 return (void*)xchg((jint)exchange_value, (volatile jint*)dest); 217 } 218 219 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE 220 inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) { 221 // alternative for InterlockedCompareExchange 222 int mp = os::is_MP(); 223 __asm { 224 mov edx, dest 225 mov cl, exchange_value 226 mov al, compare_value 227 LOCK_IF_MP(mp) 228 cmpxchg byte ptr [edx], cl 229 } 230 } 231 232 inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) { 233 // alternative for InterlockedCompareExchange 234 int mp = os::is_MP(); 235 __asm { 236 mov edx, dest 237 mov ecx, exchange_value 238 mov eax, compare_value 239 LOCK_IF_MP(mp) 240 cmpxchg dword ptr [edx], ecx 241 } 242 } 243 244 inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) { 245 int mp = os::is_MP(); 246 jint ex_lo = (jint)exchange_value; 247 jint ex_hi = *( ((jint*)&exchange_value) + 1 ); 248 jint cmp_lo = (jint)compare_value; 249 jint cmp_hi = *( ((jint*)&compare_value) + 1 ); 250 __asm { 251 push ebx 252 push edi 253 mov eax, cmp_lo 254 mov edx, cmp_hi 255 mov edi, dest 256 mov ebx, ex_lo 257 mov ecx, ex_hi 258 LOCK_IF_MP(mp) 259 cmpxchg8b qword ptr [edi] 260 pop edi 261 pop ebx 262 } 263 } 264 265 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) { 266 return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order); 267 } 268 269 inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) { 270 return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order); 271 } 272 273 inline jlong Atomic::load(volatile jlong* src) { 274 volatile jlong dest; 275 volatile jlong* pdest = &dest; 276 __asm { 277 mov eax, src 278 fild qword ptr [eax] 279 mov eax, pdest 280 fistp qword ptr [eax] 281 } 282 return dest; 283 } 284 285 inline void Atomic::store(jlong store_value, volatile jlong* dest) { 286 volatile jlong* src = &store_value; 287 __asm { 288 mov eax, src 289 fild qword ptr [eax] 290 mov eax, dest 291 fistp qword ptr [eax] 292 } 293 } 294 295 inline void Atomic::store(jlong store_value, jlong* dest) { 296 Atomic::store(store_value, (volatile jlong*)dest); 297 } 298 299 #endif // AMD64 300 301 #pragma warning(default: 4035) // Enables warnings reporting missing return statement 302 303 #endif // OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP