/* * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #ifndef OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP #define OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP #include "runtime/os.hpp" // The following alternative implementations are needed because // Windows 95 doesn't support (some of) the corresponding Windows NT // calls. Furthermore, these versions allow inlining in the caller. // (More precisely: The documentation for InterlockedExchange says // it is supported for Windows 95. However, when single-stepping // through the assembly code we cannot step into the routine and // when looking at the routine address we see only garbage code. // Better safe then sorry!). Was bug 7/31/98 (gri). // // Performance note: On uniprocessors, the 'lock' prefixes are not // necessary (and expensive). We should generate separate cases if // this becomes a performance problem. #pragma warning(disable: 4035) // Disables warnings reporting missing return statement #ifdef AMD64 template <> inline int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) { return (int32_t)(*os::atomic_add_func)(add_value, dest); } template <> inline int64_t Atomic::specialized_add(int64_t add_value, volatile int64_t* dest) { return (intptr_t)(*os::atomic_add_ptr_func)(add_value, dest); } template <> inline int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest) { return (int32_t)(*os::atomic_xchg_func)(exchange_value, dest); } template <> inline int64_t Atomic::specialized_xchg(int64_t exchange_value, volatile int64_t* dest) { return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest); } template <> inline int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value); } #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE template <> inline int8_t Atomic::specialized_cmpxchg(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) { return (*os::atomic_cmpxchg_byte_func)(exchange_value, dest, compare_value); } template <> inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value); } #else // !AMD64 template <> inline int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) { __asm { mov edx, dest; mov eax, add_value; mov ecx, eax; lock xadd dword ptr [edx], eax; add eax, ecx; } } template <> inline void Atomic::specialized_inc(volatile int32_t* dest) { // alternative for InterlockedIncrement __asm { mov edx, dest; lock add dword ptr [edx], 1; } } template <> inline void Atomic::specialized_dec(volatile int32_t* dest) { // alternative for InterlockedDecrement __asm { mov edx, dest; lock sub dword ptr [edx], 1; } } template <> inline int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest) { // alternative for InterlockedExchange __asm { mov eax, exchange_value; mov ecx, dest; xchg eax, dword ptr [ecx]; } } #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE template <> inline int8_t Atomic::specialized_cmpxchg(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) { // alternative for InterlockedCompareExchange __asm { mov edx, dest mov cl, exchange_value mov al, compare_value lock cmpxchg byte ptr [edx], cl } } template <> inline int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { // alternative for InterlockedCompareExchange __asm { mov edx, dest mov ecx, exchange_value mov eax, compare_value lock cmpxchg dword ptr [edx], ecx } } template <> inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { int32_t ex_lo = (int32_t)exchange_value; int32_t ex_hi = *( ((int32_t*)&exchange_value) + 1 ); int32_t cmp_lo = (int32_t)compare_value; int32_t cmp_hi = *( ((int32_t*)&compare_value) + 1 ); __asm { push ebx push edi mov eax, cmp_lo mov edx, cmp_hi mov edi, dest mov ebx, ex_lo mov ecx, ex_hi lock cmpxchg8b qword ptr [edi] pop edi pop ebx } } template <> inline int64_t Atomic::specialized_load(const volatile int64_t* src) { volatile int64_t dest; volatile int64_t* pdest = &dest; __asm { mov eax, src fild qword ptr [eax] mov eax, pdest fistp qword ptr [eax] } return dest; } template <> inline void Atomic::specialized_store(int64_t store_value, volatile int64_t* dest) { volatile int64_t* src = &store_value; __asm { mov eax, src fild qword ptr [eax] mov eax, dest fistp qword ptr [eax] } } #endif // AMD64 #pragma warning(default: 4035) // Enables warnings reporting missing return statement #endif // OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP