25 #ifndef OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
26 #define OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
27
28 #include "runtime/os.hpp"
29
30 // The following alternative implementations are needed because
31 // Windows 95 doesn't support (some of) the corresponding Windows NT
32 // calls. Furthermore, these versions allow inlining in the caller.
33 // (More precisely: The documentation for InterlockedExchange says
34 // it is supported for Windows 95. However, when single-stepping
35 // through the assembly code we cannot step into the routine and
36 // when looking at the routine address we see only garbage code.
37 // Better safe then sorry!). Was bug 7/31/98 (gri).
38 //
39 // Performance note: On uniprocessors, the 'lock' prefixes are not
40 // necessary (and expensive). We should generate separate cases if
41 // this becomes a performance problem.
42
43 #pragma warning(disable: 4035) // Disables warnings reporting missing return statement
44
45 inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
46 inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
47 inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
48
49 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
50 inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
51
52 inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
53 inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
54 inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
55
56
57 inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
58 inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
59
60 #ifdef AMD64
61 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
62 inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
63
64 inline jint Atomic::add (jint add_value, volatile jint* dest) {
65 return (jint)(*os::atomic_add_func)(add_value, dest);
66 }
67
68 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
69 return (intptr_t)(*os::atomic_add_ptr_func)(add_value, dest);
70 }
71
72 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
73 return (void*)(*os::atomic_add_ptr_func)(add_value, (volatile intptr_t*)dest);
74 }
75
76 inline void Atomic::inc (volatile jint* dest) {
77 (void)add (1, dest);
78 }
79
80 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
81 (void)add_ptr(1, dest);
82 }
83
84 inline void Atomic::inc_ptr(volatile void* dest) {
85 (void)add_ptr(1, dest);
86 }
87
88 inline void Atomic::dec (volatile jint* dest) {
89 (void)add (-1, dest);
90 }
91
92 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
93 (void)add_ptr(-1, dest);
94 }
95
96 inline void Atomic::dec_ptr(volatile void* dest) {
97 (void)add_ptr(-1, dest);
98 }
99
100 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
101 return (jint)(*os::atomic_xchg_func)(exchange_value, dest);
102 }
103
104 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
105 return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest);
106 }
107
108 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
109 return (void *)(os::atomic_xchg_ptr_func)((intptr_t)exchange_value, (volatile intptr_t*)dest);
110 }
111
112 inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
113 return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value);
114 }
115
116 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
117 inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
118 return (*os::atomic_cmpxchg_byte_func)(exchange_value, dest, compare_value);
119 }
120
121 inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
122 return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value);
123 }
124
125 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
126 return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
127 }
128
129 inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
130 return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
131 }
132
133 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
134
135 #else // !AMD64
136
137 inline jint Atomic::add (jint add_value, volatile jint* dest) {
138 __asm {
139 mov edx, dest;
140 mov eax, add_value;
141 mov ecx, eax;
142 lock xadd dword ptr [edx], eax;
143 add eax, ecx;
144 }
145 }
146
147 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
148 return (intptr_t)add((jint)add_value, (volatile jint*)dest);
149 }
150
151 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
152 return (void*)add((jint)add_value, (volatile jint*)dest);
153 }
154
155 inline void Atomic::inc (volatile jint* dest) {
156 // alternative for InterlockedIncrement
157 __asm {
158 mov edx, dest;
159 lock add dword ptr [edx], 1;
160 }
161 }
162
163 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
164 inc((volatile jint*)dest);
165 }
166
167 inline void Atomic::inc_ptr(volatile void* dest) {
168 inc((volatile jint*)dest);
169 }
170
171 inline void Atomic::dec (volatile jint* dest) {
172 // alternative for InterlockedDecrement
173 __asm {
174 mov edx, dest;
175 lock sub dword ptr [edx], 1;
176 }
177 }
178
179 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
180 dec((volatile jint*)dest);
181 }
182
183 inline void Atomic::dec_ptr(volatile void* dest) {
184 dec((volatile jint*)dest);
185 }
186
187 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
188 // alternative for InterlockedExchange
189 __asm {
190 mov eax, exchange_value;
191 mov ecx, dest;
192 xchg eax, dword ptr [ecx];
193 }
194 }
195
196 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
197 return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
198 }
199
200 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
201 return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
202 }
203
204 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
205 inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
206 // alternative for InterlockedCompareExchange
207 __asm {
208 mov edx, dest
209 mov cl, exchange_value
210 mov al, compare_value
211 lock cmpxchg byte ptr [edx], cl
212 }
213 }
214
215 inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
216 // alternative for InterlockedCompareExchange
217 __asm {
218 mov edx, dest
219 mov ecx, exchange_value
220 mov eax, compare_value
221 lock cmpxchg dword ptr [edx], ecx
222 }
223 }
224
225 inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
226 jint ex_lo = (jint)exchange_value;
227 jint ex_hi = *( ((jint*)&exchange_value) + 1 );
228 jint cmp_lo = (jint)compare_value;
229 jint cmp_hi = *( ((jint*)&compare_value) + 1 );
230 __asm {
231 push ebx
232 push edi
233 mov eax, cmp_lo
234 mov edx, cmp_hi
235 mov edi, dest
236 mov ebx, ex_lo
237 mov ecx, ex_hi
238 lock cmpxchg8b qword ptr [edi]
239 pop edi
240 pop ebx
241 }
242 }
243
244 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
245 return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
246 }
247
248 inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
249 return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
250 }
251
252 inline jlong Atomic::load(const volatile jlong* src) {
253 volatile jlong dest;
254 volatile jlong* pdest = &dest;
255 __asm {
256 mov eax, src
257 fild qword ptr [eax]
258 mov eax, pdest
259 fistp qword ptr [eax]
260 }
261 return dest;
262 }
263
264 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
265 volatile jlong* src = &store_value;
266 __asm {
267 mov eax, src
268 fild qword ptr [eax]
269 mov eax, dest
270 fistp qword ptr [eax]
271 }
272 }
273
274 inline void Atomic::store(jlong store_value, jlong* dest) {
275 Atomic::store(store_value, (volatile jlong*)dest);
276 }
277
278 #endif // AMD64
279
280 #pragma warning(default: 4035) // Enables warnings reporting missing return statement
281
282 #endif // OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
|
25 #ifndef OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
26 #define OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
27
28 #include "runtime/os.hpp"
29
30 // The following alternative implementations are needed because
31 // Windows 95 doesn't support (some of) the corresponding Windows NT
32 // calls. Furthermore, these versions allow inlining in the caller.
33 // (More precisely: The documentation for InterlockedExchange says
34 // it is supported for Windows 95. However, when single-stepping
35 // through the assembly code we cannot step into the routine and
36 // when looking at the routine address we see only garbage code.
37 // Better safe then sorry!). Was bug 7/31/98 (gri).
38 //
39 // Performance note: On uniprocessors, the 'lock' prefixes are not
40 // necessary (and expensive). We should generate separate cases if
41 // this becomes a performance problem.
42
43 #pragma warning(disable: 4035) // Disables warnings reporting missing return statement
44
45 #ifdef AMD64
46
47 template <>
48 inline int32_t Atomic::specialized_add<int32_t>(int32_t add_value, volatile int32_t* dest) {
49 return (int32_t)(*os::atomic_add_func)(add_value, dest);
50 }
51
52 template <>
53 inline int64_t Atomic::specialized_add<int64_t>(int64_t add_value, volatile int64_t* dest) {
54 return (intptr_t)(*os::atomic_add_ptr_func)(add_value, dest);
55 }
56
57 template <>
58 inline int32_t Atomic::specialized_xchg<int32_t>(int32_t exchange_value, volatile int32_t* dest) {
59 return (int32_t)(*os::atomic_xchg_func)(exchange_value, dest);
60 }
61
62 template <>
63 inline int64_t Atomic::specialized_xchg<int64_t>(int64_t exchange_value, volatile int64_t* dest) {
64 return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest);
65 }
66
67 template <>
68 inline int32_t Atomic::specialized_cmpxchg<int32_t>(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) {
69 return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value);
70 }
71
72 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
73 template <>
74 inline int8_t Atomic::specialized_cmpxchg<int8_t>(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) {
75 return (*os::atomic_cmpxchg_byte_func)(exchange_value, dest, compare_value);
76 }
77
78 template <>
79 inline int64_t Atomic::specialized_cmpxchg<int64_t>(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) {
80 return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value);
81 }
82
83 #else // !AMD64
84
85 template <>
86 inline int32_t Atomic::specialized_add<int32_t>(int32_t add_value, volatile int32_t* dest) {
87 __asm {
88 mov edx, dest;
89 mov eax, add_value;
90 mov ecx, eax;
91 lock xadd dword ptr [edx], eax;
92 add eax, ecx;
93 }
94 }
95
96 template <>
97 inline void Atomic::specialized_inc<int32_t>(volatile int32_t* dest) {
98 // alternative for InterlockedIncrement
99 __asm {
100 mov edx, dest;
101 lock add dword ptr [edx], 1;
102 }
103 }
104
105 template <>
106 inline void Atomic::specialized_dec<int32_t>(volatile int32_t* dest) {
107 // alternative for InterlockedDecrement
108 __asm {
109 mov edx, dest;
110 lock sub dword ptr [edx], 1;
111 }
112 }
113
114 template <>
115 inline int32_t Atomic::specialized_xchg<int32_t>(int32_t exchange_value, volatile int32_t* dest) {
116 // alternative for InterlockedExchange
117 __asm {
118 mov eax, exchange_value;
119 mov ecx, dest;
120 xchg eax, dword ptr [ecx];
121 }
122 }
123
124 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
125 template <>
126 inline int8_t Atomic::specialized_cmpxchg<int8_t>(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) {
127 // alternative for InterlockedCompareExchange
128 __asm {
129 mov edx, dest
130 mov cl, exchange_value
131 mov al, compare_value
132 lock cmpxchg byte ptr [edx], cl
133 }
134 }
135
136 template <>
137 inline int32_t Atomic::specialized_cmpxchg<int32_t>(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) {
138 // alternative for InterlockedCompareExchange
139 __asm {
140 mov edx, dest
141 mov ecx, exchange_value
142 mov eax, compare_value
143 lock cmpxchg dword ptr [edx], ecx
144 }
145 }
146
147 template <>
148 inline int64_t Atomic::specialized_cmpxchg<int64_t>(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) {
149 int32_t ex_lo = (int32_t)exchange_value;
150 int32_t ex_hi = *( ((int32_t*)&exchange_value) + 1 );
151 int32_t cmp_lo = (int32_t)compare_value;
152 int32_t cmp_hi = *( ((int32_t*)&compare_value) + 1 );
153 __asm {
154 push ebx
155 push edi
156 mov eax, cmp_lo
157 mov edx, cmp_hi
158 mov edi, dest
159 mov ebx, ex_lo
160 mov ecx, ex_hi
161 lock cmpxchg8b qword ptr [edi]
162 pop edi
163 pop ebx
164 }
165 }
166
167 template <>
168 inline int64_t Atomic::specialized_load<int64_t>(const volatile int64_t* src) {
169 volatile int64_t dest;
170 volatile int64_t* pdest = &dest;
171 __asm {
172 mov eax, src
173 fild qword ptr [eax]
174 mov eax, pdest
175 fistp qword ptr [eax]
176 }
177 return dest;
178 }
179
180 template <>
181 inline void Atomic::specialized_store<int64_t>(int64_t store_value, volatile int64_t* dest) {
182 volatile int64_t* src = &store_value;
183 __asm {
184 mov eax, src
185 fild qword ptr [eax]
186 mov eax, dest
187 fistp qword ptr [eax]
188 }
189 }
190
191 #endif // AMD64
192
193 #pragma warning(default: 4035) // Enables warnings reporting missing return statement
194
195 #endif // OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
|