25 #ifndef OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
26 #define OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
27
28 #include "runtime/os.hpp"
29
30 // The following alternative implementations are needed because
31 // Windows 95 doesn't support (some of) the corresponding Windows NT
32 // calls. Furthermore, these versions allow inlining in the caller.
33 // (More precisely: The documentation for InterlockedExchange says
34 // it is supported for Windows 95. However, when single-stepping
35 // through the assembly code we cannot step into the routine and
36 // when looking at the routine address we see only garbage code.
37 // Better safe then sorry!). Was bug 7/31/98 (gri).
38 //
39 // Performance note: On uniprocessors, the 'lock' prefixes are not
40 // necessary (and expensive). We should generate separate cases if
41 // this becomes a performance problem.
42
43 #pragma warning(disable: 4035) // Disables warnings reporting missing return statement
44
45 inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
46 inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
47 inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
48
49 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
50 inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
51
52 inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
53 inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
54 inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
55
56
57 inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
58 inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
59
60 template<size_t byte_size>
61 struct Atomic::PlatformAdd
62 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
63 {
64 template<typename I, typename D>
65 D add_and_fetch(I add_value, D volatile* dest) const;
66 };
67
68 #ifdef AMD64
69 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
70 inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
71
72 template<>
73 template<typename I, typename D>
74 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
75 return add_using_helper<jint>(os::atomic_add_func, add_value, dest);
76 }
77
78 template<>
79 template<typename I, typename D>
80 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
81 return add_using_helper<intptr_t>(os::atomic_add_ptr_func, add_value, dest);
82 }
83
84 #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \
85 template<> \
86 template<typename T> \
87 inline T Atomic::PlatformXchg<ByteSize>::operator()(T exchange_value, \
88 T volatile* dest) const { \
89 STATIC_ASSERT(ByteSize == sizeof(T)); \
90 return xchg_using_helper<StubType>(StubName, exchange_value, dest); \
91 }
95
96 #undef DEFINE_STUB_XCHG
97
98 #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \
99 template<> \
100 template<typename T> \
101 inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \
102 T volatile* dest, \
103 T compare_value, \
104 cmpxchg_memory_order order) const { \
105 STATIC_ASSERT(ByteSize == sizeof(T)); \
106 return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
107 }
108
109 DEFINE_STUB_CMPXCHG(1, jbyte, os::atomic_cmpxchg_byte_func)
110 DEFINE_STUB_CMPXCHG(4, jint, os::atomic_cmpxchg_func)
111 DEFINE_STUB_CMPXCHG(8, jlong, os::atomic_cmpxchg_long_func)
112
113 #undef DEFINE_STUB_CMPXCHG
114
115 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
116
117 #else // !AMD64
118
119 template<>
120 template<typename I, typename D>
121 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
122 STATIC_ASSERT(4 == sizeof(I));
123 STATIC_ASSERT(4 == sizeof(D));
124 __asm {
125 mov edx, dest;
126 mov eax, add_value;
127 mov ecx, eax;
128 lock xadd dword ptr [edx], eax;
129 add eax, ecx;
130 }
131 }
132
133 template<>
134 template<typename T>
135 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
136 T volatile* dest) const {
183 cmpxchg_memory_order order) const {
184 STATIC_ASSERT(8 == sizeof(T));
185 jint ex_lo = (jint)exchange_value;
186 jint ex_hi = *( ((jint*)&exchange_value) + 1 );
187 jint cmp_lo = (jint)compare_value;
188 jint cmp_hi = *( ((jint*)&compare_value) + 1 );
189 __asm {
190 push ebx
191 push edi
192 mov eax, cmp_lo
193 mov edx, cmp_hi
194 mov edi, dest
195 mov ebx, ex_lo
196 mov ecx, ex_hi
197 lock cmpxchg8b qword ptr [edi]
198 pop edi
199 pop ebx
200 }
201 }
202
203 inline jlong Atomic::load(const volatile jlong* src) {
204 volatile jlong dest;
205 volatile jlong* pdest = &dest;
206 __asm {
207 mov eax, src
208 fild qword ptr [eax]
209 mov eax, pdest
210 fistp qword ptr [eax]
211 }
212 return dest;
213 }
214
215 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
216 volatile jlong* src = &store_value;
217 __asm {
218 mov eax, src
219 fild qword ptr [eax]
220 mov eax, dest
221 fistp qword ptr [eax]
222 }
223 }
224
225 inline void Atomic::store(jlong store_value, jlong* dest) {
226 Atomic::store(store_value, (volatile jlong*)dest);
227 }
228
229 #endif // AMD64
230
231 #pragma warning(default: 4035) // Enables warnings reporting missing return statement
232
233 #endif // OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
|
25 #ifndef OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
26 #define OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
27
28 #include "runtime/os.hpp"
29
30 // The following alternative implementations are needed because
31 // Windows 95 doesn't support (some of) the corresponding Windows NT
32 // calls. Furthermore, these versions allow inlining in the caller.
33 // (More precisely: The documentation for InterlockedExchange says
34 // it is supported for Windows 95. However, when single-stepping
35 // through the assembly code we cannot step into the routine and
36 // when looking at the routine address we see only garbage code.
37 // Better safe then sorry!). Was bug 7/31/98 (gri).
38 //
39 // Performance note: On uniprocessors, the 'lock' prefixes are not
40 // necessary (and expensive). We should generate separate cases if
41 // this becomes a performance problem.
42
43 #pragma warning(disable: 4035) // Disables warnings reporting missing return statement
44
45 template<size_t byte_size>
46 struct Atomic::PlatformAdd
47 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
48 {
49 template<typename I, typename D>
50 D add_and_fetch(I add_value, D volatile* dest) const;
51 };
52
53 #ifdef AMD64
54 template<>
55 template<typename I, typename D>
56 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
57 return add_using_helper<jint>(os::atomic_add_func, add_value, dest);
58 }
59
60 template<>
61 template<typename I, typename D>
62 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
63 return add_using_helper<intptr_t>(os::atomic_add_ptr_func, add_value, dest);
64 }
65
66 #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \
67 template<> \
68 template<typename T> \
69 inline T Atomic::PlatformXchg<ByteSize>::operator()(T exchange_value, \
70 T volatile* dest) const { \
71 STATIC_ASSERT(ByteSize == sizeof(T)); \
72 return xchg_using_helper<StubType>(StubName, exchange_value, dest); \
73 }
77
78 #undef DEFINE_STUB_XCHG
79
80 #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \
81 template<> \
82 template<typename T> \
83 inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \
84 T volatile* dest, \
85 T compare_value, \
86 cmpxchg_memory_order order) const { \
87 STATIC_ASSERT(ByteSize == sizeof(T)); \
88 return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
89 }
90
91 DEFINE_STUB_CMPXCHG(1, jbyte, os::atomic_cmpxchg_byte_func)
92 DEFINE_STUB_CMPXCHG(4, jint, os::atomic_cmpxchg_func)
93 DEFINE_STUB_CMPXCHG(8, jlong, os::atomic_cmpxchg_long_func)
94
95 #undef DEFINE_STUB_CMPXCHG
96
97 #else // !AMD64
98
99 template<>
100 template<typename I, typename D>
101 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
102 STATIC_ASSERT(4 == sizeof(I));
103 STATIC_ASSERT(4 == sizeof(D));
104 __asm {
105 mov edx, dest;
106 mov eax, add_value;
107 mov ecx, eax;
108 lock xadd dword ptr [edx], eax;
109 add eax, ecx;
110 }
111 }
112
113 template<>
114 template<typename T>
115 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
116 T volatile* dest) const {
163 cmpxchg_memory_order order) const {
164 STATIC_ASSERT(8 == sizeof(T));
165 jint ex_lo = (jint)exchange_value;
166 jint ex_hi = *( ((jint*)&exchange_value) + 1 );
167 jint cmp_lo = (jint)compare_value;
168 jint cmp_hi = *( ((jint*)&compare_value) + 1 );
169 __asm {
170 push ebx
171 push edi
172 mov eax, cmp_lo
173 mov edx, cmp_hi
174 mov edi, dest
175 mov ebx, ex_lo
176 mov ecx, ex_hi
177 lock cmpxchg8b qword ptr [edi]
178 pop edi
179 pop ebx
180 }
181 }
182
183 template<>
184 template<typename T>
185 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
186 STATIC_ASSERT(8 == sizeof(T));
187 volatile T dest;
188 volatile T* pdest = &dest;
189 __asm {
190 mov eax, src
191 fild qword ptr [eax]
192 mov eax, pdest
193 fistp qword ptr [eax]
194 }
195 return dest;
196 }
197
198 template<>
199 template<typename T>
200 inline void Atomic::PlatformStore<8>::operator()(T store_value,
201 T volatile* dest) const {
202 STATIC_ASSERT(8 == sizeof(T));
203 volatile T* src = &store_value;
204 __asm {
205 mov eax, src
206 fild qword ptr [eax]
207 mov eax, dest
208 fistp qword ptr [eax]
209 }
210 }
211
212 #endif // AMD64
213
214 #pragma warning(default: 4035) // Enables warnings reporting missing return statement
215
216 #endif // OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
|