92 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
93 (void)add_ptr(-1, dest);
94 }
95
96 inline void Atomic::dec_ptr(volatile void* dest) {
97 (void)add_ptr(-1, dest);
98 }
99
100 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
101 return (jint)(*os::atomic_xchg_func)(exchange_value, dest);
102 }
103
104 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
105 return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest);
106 }
107
108 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
109 return (void *)(os::atomic_xchg_ptr_func)((intptr_t)exchange_value, (volatile intptr_t*)dest);
110 }
111
112 inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
113 return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value);
114 }
115
116 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
117 inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
118 return (*os::atomic_cmpxchg_byte_func)(exchange_value, dest, compare_value);
119 }
120
121 inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
122 return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value);
123 }
124
125 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
126 return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
127 }
128
129 inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
130 return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
131 }
132
133 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
134
135 #else // !AMD64
136
137 inline jint Atomic::add (jint add_value, volatile jint* dest) {
138 __asm {
139 mov edx, dest;
140 mov eax, add_value;
141 mov ecx, eax;
142 lock xadd dword ptr [edx], eax;
143 add eax, ecx;
144 }
145 }
146
147 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
148 return (intptr_t)add((jint)add_value, (volatile jint*)dest);
149 }
150
151 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
184 dec((volatile jint*)dest);
185 }
186
187 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
188 // alternative for InterlockedExchange
189 __asm {
190 mov eax, exchange_value;
191 mov ecx, dest;
192 xchg eax, dword ptr [ecx];
193 }
194 }
195
196 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
197 return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
198 }
199
200 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
201 return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
202 }
203
204 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
205 inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
206 // alternative for InterlockedCompareExchange
207 __asm {
208 mov edx, dest
209 mov cl, exchange_value
210 mov al, compare_value
211 lock cmpxchg byte ptr [edx], cl
212 }
213 }
214
215 inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
216 // alternative for InterlockedCompareExchange
217 __asm {
218 mov edx, dest
219 mov ecx, exchange_value
220 mov eax, compare_value
221 lock cmpxchg dword ptr [edx], ecx
222 }
223 }
224
225 inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
226 jint ex_lo = (jint)exchange_value;
227 jint ex_hi = *( ((jint*)&exchange_value) + 1 );
228 jint cmp_lo = (jint)compare_value;
229 jint cmp_hi = *( ((jint*)&compare_value) + 1 );
230 __asm {
231 push ebx
232 push edi
233 mov eax, cmp_lo
234 mov edx, cmp_hi
235 mov edi, dest
236 mov ebx, ex_lo
237 mov ecx, ex_hi
238 lock cmpxchg8b qword ptr [edi]
239 pop edi
240 pop ebx
241 }
242 }
243
244 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
245 return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
246 }
247
248 inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
249 return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
250 }
251
252 inline jlong Atomic::load(const volatile jlong* src) {
253 volatile jlong dest;
254 volatile jlong* pdest = &dest;
255 __asm {
256 mov eax, src
257 fild qword ptr [eax]
258 mov eax, pdest
259 fistp qword ptr [eax]
260 }
261 return dest;
262 }
263
264 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
265 volatile jlong* src = &store_value;
266 __asm {
267 mov eax, src
268 fild qword ptr [eax]
269 mov eax, dest
|
92 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
93 (void)add_ptr(-1, dest);
94 }
95
96 inline void Atomic::dec_ptr(volatile void* dest) {
97 (void)add_ptr(-1, dest);
98 }
99
100 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
101 return (jint)(*os::atomic_xchg_func)(exchange_value, dest);
102 }
103
104 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
105 return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest);
106 }
107
108 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
109 return (void *)(os::atomic_xchg_ptr_func)((intptr_t)exchange_value, (volatile intptr_t*)dest);
110 }
111
112 #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \
113 template<> \
114 template<typename T> \
115 inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \
116 T volatile* dest, \
117 T compare_value, \
118 cmpxchg_memory_order order) const { \
119 STATIC_ASSERT(ByteSize == sizeof(T)); \
120 return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
121 }
122
123 DEFINE_STUB_CMPXCHG(1, jbyte, os::atomic_cmpxchg_byte_func)
124 DEFINE_STUB_CMPXCHG(4, jint, os::atomic_cmpxchg_func)
125 DEFINE_STUB_CMPXCHG(8, jlong, os::atomic_cmpxchg_long_func)
126
127 #undef DEFINE_STUB_CMPXCHG
128
129 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
130
131 #else // !AMD64
132
133 inline jint Atomic::add (jint add_value, volatile jint* dest) {
134 __asm {
135 mov edx, dest;
136 mov eax, add_value;
137 mov ecx, eax;
138 lock xadd dword ptr [edx], eax;
139 add eax, ecx;
140 }
141 }
142
143 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
144 return (intptr_t)add((jint)add_value, (volatile jint*)dest);
145 }
146
147 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
180 dec((volatile jint*)dest);
181 }
182
183 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
184 // alternative for InterlockedExchange
185 __asm {
186 mov eax, exchange_value;
187 mov ecx, dest;
188 xchg eax, dword ptr [ecx];
189 }
190 }
191
192 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
193 return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
194 }
195
196 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
197 return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
198 }
199
200 template<>
201 template<typename T>
202 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
203 T volatile* dest,
204 T compare_value,
205 cmpxchg_memory_order order) const {
206 STATIC_ASSERT(1 == sizeof(T));
207 // alternative for InterlockedCompareExchange
208 __asm {
209 mov edx, dest
210 mov cl, exchange_value
211 mov al, compare_value
212 lock cmpxchg byte ptr [edx], cl
213 }
214 }
215
216 template<>
217 template<typename T>
218 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
219 T volatile* dest,
220 T compare_value,
221 cmpxchg_memory_order order) const {
222 STATIC_ASSERT(4 == sizeof(T));
223 // alternative for InterlockedCompareExchange
224 __asm {
225 mov edx, dest
226 mov ecx, exchange_value
227 mov eax, compare_value
228 lock cmpxchg dword ptr [edx], ecx
229 }
230 }
231
232 template<>
233 template<typename T>
234 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
235 T volatile* dest,
236 T compare_value,
237 cmpxchg_memory_order order) const {
238 STATIC_ASSERT(8 == sizeof(T));
239 jint ex_lo = (jint)exchange_value;
240 jint ex_hi = *( ((jint*)&exchange_value) + 1 );
241 jint cmp_lo = (jint)compare_value;
242 jint cmp_hi = *( ((jint*)&compare_value) + 1 );
243 __asm {
244 push ebx
245 push edi
246 mov eax, cmp_lo
247 mov edx, cmp_hi
248 mov edi, dest
249 mov ebx, ex_lo
250 mov ecx, ex_hi
251 lock cmpxchg8b qword ptr [edi]
252 pop edi
253 pop ebx
254 }
255 }
256
257 inline jlong Atomic::load(const volatile jlong* src) {
258 volatile jlong dest;
259 volatile jlong* pdest = &dest;
260 __asm {
261 mov eax, src
262 fild qword ptr [eax]
263 mov eax, pdest
264 fistp qword ptr [eax]
265 }
266 return dest;
267 }
268
269 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
270 volatile jlong* src = &store_value;
271 __asm {
272 mov eax, src
273 fild qword ptr [eax]
274 mov eax, dest
|