1 /* 2 * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP 26 #define OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP 27 28 #include "runtime/os.hpp" 29 30 // The following alternative implementations are needed because 31 // Windows 95 doesn't support (some of) the corresponding Windows NT 32 // calls. Furthermore, these versions allow inlining in the caller. 33 // (More precisely: The documentation for InterlockedExchange says 34 // it is supported for Windows 95. However, when single-stepping 35 // through the assembly code we cannot step into the routine and 36 // when looking at the routine address we see only garbage code. 37 // Better safe then sorry!). Was bug 7/31/98 (gri). 38 // 39 // Performance note: On uniprocessors, the 'lock' prefixes are not 40 // necessary (and expensive). We should generate separate cases if 41 // this becomes a performance problem. 42 43 #pragma warning(disable: 4035) // Disables warnings reporting missing return statement 44 45 template<size_t byte_size> 46 struct Atomic::PlatformAdd 47 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > 48 { 49 template<typename I, typename D> 50 D add_and_fetch(I add_value, D volatile* dest) const; 51 }; 52 53 #ifdef AMD64 54 template<> 55 template<typename I, typename D> 56 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { 57 return add_using_helper<jint>(os::atomic_add_func, add_value, dest); 58 } 59 60 template<> 61 template<typename I, typename D> 62 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const { 63 return add_using_helper<intptr_t>(os::atomic_add_ptr_func, add_value, dest); 64 } 65 66 #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \ 67 template<> \ 68 template<typename T> \ 69 inline T Atomic::PlatformXchg<ByteSize>::operator()(T exchange_value, \ 70 T volatile* dest) const { \ 71 STATIC_ASSERT(ByteSize == sizeof(T)); \ 72 return xchg_using_helper<StubType>(StubName, exchange_value, dest); \ 73 } 74 75 DEFINE_STUB_XCHG(4, jint, os::atomic_xchg_func) 76 DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_long_func) 77 78 #undef DEFINE_STUB_XCHG 79 80 #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \ 81 template<> \ 82 template<typename T> \ 83 inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \ 84 T volatile* dest, \ 85 T compare_value, \ 86 cmpxchg_memory_order order) const { \ 87 STATIC_ASSERT(ByteSize == sizeof(T)); \ 88 return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \ 89 } 90 91 DEFINE_STUB_CMPXCHG(1, jbyte, os::atomic_cmpxchg_byte_func) 92 DEFINE_STUB_CMPXCHG(4, jint, os::atomic_cmpxchg_func) 93 DEFINE_STUB_CMPXCHG(8, jlong, os::atomic_cmpxchg_long_func) 94 95 #undef DEFINE_STUB_CMPXCHG 96 97 #else // !AMD64 98 99 template<> 100 template<typename I, typename D> 101 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { 102 STATIC_ASSERT(4 == sizeof(I)); 103 STATIC_ASSERT(4 == sizeof(D)); 104 __asm { 105 mov edx, dest; 106 mov eax, add_value; 107 mov ecx, eax; 108 lock xadd dword ptr [edx], eax; 109 add eax, ecx; 110 } 111 } 112 113 template<> 114 template<typename T> 115 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, 116 T volatile* dest) const { 117 STATIC_ASSERT(4 == sizeof(T)); 118 // alternative for InterlockedExchange 119 __asm { 120 mov eax, exchange_value; 121 mov ecx, dest; 122 xchg eax, dword ptr [ecx]; 123 } 124 } 125 126 template<> 127 template<typename T> 128 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, 129 T volatile* dest, 130 T compare_value, 131 cmpxchg_memory_order order) const { 132 STATIC_ASSERT(1 == sizeof(T)); 133 // alternative for InterlockedCompareExchange 134 __asm { 135 mov edx, dest 136 mov cl, exchange_value 137 mov al, compare_value 138 lock cmpxchg byte ptr [edx], cl 139 } 140 } 141 142 template<> 143 template<typename T> 144 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, 145 T volatile* dest, 146 T compare_value, 147 cmpxchg_memory_order order) const { 148 STATIC_ASSERT(4 == sizeof(T)); 149 // alternative for InterlockedCompareExchange 150 __asm { 151 mov edx, dest 152 mov ecx, exchange_value 153 mov eax, compare_value 154 lock cmpxchg dword ptr [edx], ecx 155 } 156 } 157 158 template<> 159 template<typename T> 160 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, 161 T volatile* dest, 162 T compare_value, 163 cmpxchg_memory_order order) const { 164 STATIC_ASSERT(8 == sizeof(T)); 165 jint ex_lo = (jint)exchange_value; 166 jint ex_hi = *( ((jint*)&exchange_value) + 1 ); 167 jint cmp_lo = (jint)compare_value; 168 jint cmp_hi = *( ((jint*)&compare_value) + 1 ); 169 __asm { 170 push ebx 171 push edi 172 mov eax, cmp_lo 173 mov edx, cmp_hi 174 mov edi, dest 175 mov ebx, ex_lo 176 mov ecx, ex_hi 177 lock cmpxchg8b qword ptr [edi] 178 pop edi 179 pop ebx 180 } 181 } 182 183 template<> 184 template<typename T> 185 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { 186 STATIC_ASSERT(8 == sizeof(T)); 187 volatile T dest; 188 volatile T* pdest = &dest; 189 __asm { 190 mov eax, src 191 fild qword ptr [eax] 192 mov eax, pdest 193 fistp qword ptr [eax] 194 } 195 return dest; 196 } 197 198 template<> 199 template<typename T> 200 inline void Atomic::PlatformStore<8>::operator()(T store_value, 201 T volatile* dest) const { 202 STATIC_ASSERT(8 == sizeof(T)); 203 volatile T* src = &store_value; 204 __asm { 205 mov eax, src 206 fild qword ptr [eax] 207 mov eax, dest 208 fistp qword ptr [eax] 209 } 210 } 211 212 #endif // AMD64 213 214 #pragma warning(default: 4035) // Enables warnings reporting missing return statement 215 216 #endif // OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP