--- old/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp 2019-11-21 11:17:04.743482718 +0100 +++ new/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp 2019-11-21 11:17:04.359476553 +0100 @@ -27,6 +27,17 @@ #include "runtime/os.hpp" +// Note that in MSVC, volatile memory accesses are explicitly +// guaranteed to have acquire release semantics (w.r.t. compiler +// reordering) and therefore does not even need a compiler barrier +// for normal acquire release accesses. And all generalized +// bound calls like release_store go through OrderAccess::load +// and OrderAccess::store which do volatile memory accesses. +template<> inline void ScopedFence::postfix() { } +template<> inline void ScopedFence::prefix() { } +template<> inline void ScopedFence::prefix() { } +template<> inline void ScopedFence::postfix() { OrderAccess::fence(); } + // The following alternative implementations are needed because // Windows 95 doesn't support (some of) the corresponding Windows NT // calls. Furthermore, these versions allow inlining in the caller. @@ -218,4 +229,45 @@ #pragma warning(default: 4035) // Enables warnings reporting missing return statement +#ifndef AMD64 +template<> +struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm { + mov edx, p; + mov al, v; + xchg al, byte ptr [edx]; + } + } +}; + +template<> +struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm { + mov edx, p; + mov ax, v; + xchg ax, word ptr [edx]; + } + } +}; + +template<> +struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm { + mov edx, p; + mov eax, v; + xchg eax, dword ptr [edx]; + } + } +}; +#endif // AMD64 + #endif // OS_CPU_WINDOWS_X86_ATOMIC_WINDOWS_X86_HPP