5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
26 #define OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
27
28 #include <intrin.h>
29 #include "runtime/atomic.hpp"
30 #include "runtime/orderAccess.hpp"
31 #include "runtime/os.hpp"
32
33 // Compiler version last used for testing: Microsoft Visual Studio 2010
34 // Please update this information when this file changes
35
36 // Implementation of class OrderAccess.
37
38 // A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
39 inline void compiler_barrier() {
40 _ReadWriteBarrier();
41 }
42
43 // Note that in MSVC, volatile memory accesses are explicitly
44 // guaranteed to have acquire release semantics (w.r.t. compiler
45 // reordering) and therefore does not even need a compiler barrier
46 // for normal acquire release accesses. And all generalized
47 // bound calls like release_store go through OrderAccess::load
48 // and OrderAccess::store which do volatile memory accesses.
49 template<> inline void ScopedFence<X_ACQUIRE>::postfix() { }
50 template<> inline void ScopedFence<RELEASE_X>::prefix() { }
51 template<> inline void ScopedFence<RELEASE_X_FENCE>::prefix() { }
52 template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
53
54 inline void OrderAccess::loadload() { compiler_barrier(); }
55 inline void OrderAccess::storestore() { compiler_barrier(); }
56 inline void OrderAccess::loadstore() { compiler_barrier(); }
57 inline void OrderAccess::storeload() { fence(); }
58
59 inline void OrderAccess::acquire() { compiler_barrier(); }
60 inline void OrderAccess::release() { compiler_barrier(); }
61
62 inline void OrderAccess::fence() {
63 #ifdef AMD64
64 StubRoutines_fence();
65 #else
66 if (os::is_MP()) {
67 __asm {
68 lock add dword ptr [esp], 0;
69 }
70 }
71 #endif // AMD64
72 compiler_barrier();
73 }
74
75 #ifndef AMD64
76 template<>
77 struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
78 {
79 template <typename T>
80 void operator()(T v, volatile T* p) const {
81 __asm {
82 mov edx, p;
83 mov al, v;
84 xchg al, byte ptr [edx];
85 }
86 }
87 };
88
89 template<>
90 struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
96 mov ax, v;
97 xchg ax, word ptr [edx];
98 }
99 }
100 };
101
102 template<>
103 struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
104 {
105 template <typename T>
106 void operator()(T v, volatile T* p) const {
107 __asm {
108 mov edx, p;
109 mov eax, v;
110 xchg eax, dword ptr [edx];
111 }
112 }
113 };
114 #endif // AMD64
115
116 #endif // OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
|
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_HPP
26 #define OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_HPP
27
28 #include <intrin.h>
29 #include "runtime/atomic.hpp"
30 #include "runtime/orderAccess.hpp"
31
32 // Compiler version last used for testing: Microsoft Visual Studio 2010
33 // Please update this information when this file changes
34
35 // Implementation of class OrderAccess.
36
37 // A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
38 inline void compiler_barrier() {
39 _ReadWriteBarrier();
40 }
41
42 // Note that in MSVC, volatile memory accesses are explicitly
43 // guaranteed to have acquire release semantics (w.r.t. compiler
44 // reordering) and therefore does not even need a compiler barrier
45 // for normal acquire release accesses. And all generalized
46 // bound calls like release_store go through OrderAccess::load
47 // and OrderAccess::store which do volatile memory accesses.
48 template<> inline void ScopedFence<X_ACQUIRE>::postfix() { }
49 template<> inline void ScopedFence<RELEASE_X>::prefix() { }
50 template<> inline void ScopedFence<RELEASE_X_FENCE>::prefix() { }
51 template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
52
53 inline void OrderAccess::loadload() { compiler_barrier(); }
54 inline void OrderAccess::storestore() { compiler_barrier(); }
55 inline void OrderAccess::loadstore() { compiler_barrier(); }
56 inline void OrderAccess::storeload() { fence(); }
57
58 inline void OrderAccess::acquire() { compiler_barrier(); }
59 inline void OrderAccess::release() { compiler_barrier(); }
60
61 inline void OrderAccess::fence() {
62 #ifdef AMD64
63 StubRoutines_fence();
64 #else
65 __asm {
66 lock add dword ptr [esp], 0;
67 }
68 #endif // AMD64
69 compiler_barrier();
70 }
71
72 #ifndef AMD64
73 template<>
74 struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
75 {
76 template <typename T>
77 void operator()(T v, volatile T* p) const {
78 __asm {
79 mov edx, p;
80 mov al, v;
81 xchg al, byte ptr [edx];
82 }
83 }
84 };
85
86 template<>
87 struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
93 mov ax, v;
94 xchg ax, word ptr [edx];
95 }
96 }
97 };
98
99 template<>
100 struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
101 {
102 template <typename T>
103 void operator()(T v, volatile T* p) const {
104 __asm {
105 mov edx, p;
106 mov eax, v;
107 xchg eax, dword ptr [edx];
108 }
109 }
110 };
111 #endif // AMD64
112
113 #endif // OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_HPP
|