142 int prev = *ptr;
143
144 if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
145 return prev + add_value;
146 }
147 }
148
149 /* Atomically write VALUE into `*PTR' and returns the previous
150 contents of `*PTR'. */
151 static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
152 for (;;) {
153 // Loop until a __kernel_cmpxchg succeeds.
154 int prev = *ptr;
155
156 if (__kernel_cmpxchg (prev, newval, ptr) == 0)
157 return prev;
158 }
159 }
160 #endif // ARM
161
162 inline void Atomic::store(jint store_value, volatile jint* dest) {
163 #if !defined(ARM) && !defined(M68K)
164 __sync_synchronize();
165 #endif
166 *dest = store_value;
167 }
168
169 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
170 #if !defined(ARM) && !defined(M68K)
171 __sync_synchronize();
172 #endif
173 *dest = store_value;
174 }
175
176 inline jint Atomic::add(jint add_value, volatile jint* dest) {
177 #ifdef ARM
178 return arm_add_and_fetch(dest, add_value);
179 #else
180 #ifdef M68K
181 return m68k_add_and_fetch(dest, add_value);
182 #else
183 return __sync_add_and_fetch(dest, add_value);
184 #endif // M68K
185 #endif // ARM
186 }
187
188 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
189 #ifdef ARM
190 return arm_add_and_fetch(dest, add_value);
191 #else
192 #ifdef M68K
193 return m68k_add_and_fetch(dest, add_value);
194 #else
195 return __sync_add_and_fetch(dest, add_value);
196 #endif // M68K
197 #endif // ARM
198 }
199
200 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
201 return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
202 }
203
204 inline void Atomic::inc(volatile jint* dest) {
205 add(1, dest);
206 }
207
208 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
209 add_ptr(1, dest);
210 }
211
212 inline void Atomic::inc_ptr(volatile void* dest) {
213 add_ptr(1, dest);
214 }
215
216 inline void Atomic::dec(volatile jint* dest) {
217 add(-1, dest);
218 }
219
220 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
221 add_ptr(-1, dest);
222 }
223
224 inline void Atomic::dec_ptr(volatile void* dest) {
225 add_ptr(-1, dest);
226 }
227
228 inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
229 #ifdef ARM
230 return arm_lock_test_and_set(dest, exchange_value);
231 #else
232 #ifdef M68K
233 return m68k_lock_test_and_set(dest, exchange_value);
234 #else
235 // __sync_lock_test_and_set is a bizarrely named atomic exchange
236 // operation. Note that some platforms only support this with the
237 // limitation that the only valid value to store is the immediate
238 // constant 1. There is a test for this in JNI_CreateJavaVM().
239 jint result = __sync_lock_test_and_set (dest, exchange_value);
240 // All atomic operations are expected to be full memory barriers
241 // (see atomic.hpp). However, __sync_lock_test_and_set is not
242 // a full memory barrier, but an acquire barrier. Hence, this added
243 // barrier.
244 __sync_synchronize();
245 return result;
246 #endif // M68K
247 #endif // ARM
248 }
249
250 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
251 volatile intptr_t* dest) {
252 #ifdef ARM
253 return arm_lock_test_and_set(dest, exchange_value);
254 #else
255 #ifdef M68K
256 return m68k_lock_test_and_set(dest, exchange_value);
257 #else
258 intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
259 __sync_synchronize();
260 return result;
261 #endif // M68K
262 #endif // ARM
263 }
264
265 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
266 return (void *) xchg_ptr((intptr_t) exchange_value,
267 (volatile intptr_t*) dest);
268 }
269
270 inline jint Atomic::cmpxchg(jint exchange_value,
271 volatile jint* dest,
272 jint compare_value,
273 cmpxchg_memory_order order) {
274 #ifdef ARM
275 return arm_compare_and_swap(dest, compare_value, exchange_value);
276 #else
277 #ifdef M68K
278 return m68k_compare_and_swap(dest, compare_value, exchange_value);
279 #else
280 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
281 #endif // M68K
282 #endif // ARM
283 }
284
285 inline jlong Atomic::cmpxchg(jlong exchange_value,
286 volatile jlong* dest,
287 jlong compare_value,
288 cmpxchg_memory_order order) {
289
290 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
291 }
292
293 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
294 volatile intptr_t* dest,
295 intptr_t compare_value,
296 cmpxchg_memory_order order) {
297 #ifdef ARM
298 return arm_compare_and_swap(dest, compare_value, exchange_value);
299 #else
300 #ifdef M68K
301 return m68k_compare_and_swap(dest, compare_value, exchange_value);
302 #else
303 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
304 #endif // M68K
305 #endif // ARM
306 }
307
308 inline void* Atomic::cmpxchg_ptr(void* exchange_value,
309 volatile void* dest,
310 void* compare_value,
311 cmpxchg_memory_order order) {
312
313 return (void *) cmpxchg_ptr((intptr_t) exchange_value,
314 (volatile intptr_t*) dest,
315 (intptr_t) compare_value,
316 order);
317 }
318
319 inline jlong Atomic::load(const volatile jlong* src) {
320 volatile jlong dest;
321 os::atomic_copy64(src, &dest);
322 return dest;
323 }
324
325 inline void Atomic::store(jlong store_value, jlong* dest) {
326 os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
327 }
328
329 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
330 os::atomic_copy64((volatile jlong*)&store_value, dest);
331 }
332
333 #endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
|
142 int prev = *ptr;
143
144 if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
145 return prev + add_value;
146 }
147 }
148
149 /* Atomically write VALUE into `*PTR' and returns the previous
150 contents of `*PTR'. */
151 static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
152 for (;;) {
153 // Loop until a __kernel_cmpxchg succeeds.
154 int prev = *ptr;
155
156 if (__kernel_cmpxchg (prev, newval, ptr) == 0)
157 return prev;
158 }
159 }
160 #endif // ARM
161
162 #ifdef _LP64
163
164 template<>
165 inline void Atomic::specialized_store<int64_t>(int64_t store_value, volatile int64_t* dest) {
166 #if !defined(ARM) && !defined(M68K)
167 __sync_synchronize();
168 #endif
169 *dest = store_value;
170 }
171
172 template <>
173 inline int64_t Atomic::specialized_add<int64_t>(int64_t add_value, volatile int64_t* dest) {
174 #ifdef ARM
175 return arm_add_and_fetch(dest, add_value);
176 #else
177 #ifdef M68K
178 return m68k_add_and_fetch(dest, add_value);
179 #else
180 return __sync_add_and_fetch(dest, add_value);
181 #endif // M68K
182 #endif // ARM
183 }
184
185 template <>
186 inline int64_t Atomic::specialized_xchg<int64_t>(int64_t exchange_value, volatile int64_t* dest) {
187 #ifdef ARM
188 return arm_lock_test_and_set(dest, exchange_value);
189 #else
190 #ifdef M68K
191 return m68k_lock_test_and_set(dest, exchange_value);
192 #else
193 intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
194 __sync_synchronize();
195 return result;
196 #endif // M68K
197 #endif // ARM
198 }
199
200 #else // _LP64
201
202 template<>
203 inline int64_t Atomic::specialized_load<int64_t>(const volatile int64_t* src) {
204 volatile int64_t dest;
205 os::atomic_copy64(src, &dest);
206 return dest;
207 }
208
209 template<>
210 inline void Atomic::specialized_store<int64_t>(int64_t store_value, volatile int64_t* dest) {
211 os::atomic_copy64((volatile int64_t*)&store_value, dest);
212 }
213
214 #endif // _LP64
215
216 template<>
217 inline void Atomic::specialized_store<int32_t>(int32_t store_value, volatile int32_t* dest) {
218 #if !defined(ARM) && !defined(M68K)
219 __sync_synchronize();
220 #endif
221 *dest = store_value;
222 }
223
224 template <>
225 inline int32_t Atomic::specialized_add<int32_t>(int32_t add_value, volatile int32_t* dest) {
226 #ifdef ARM
227 return arm_add_and_fetch(dest, add_value);
228 #else
229 #ifdef M68K
230 return m68k_add_and_fetch(dest, add_value);
231 #else
232 return __sync_add_and_fetch(dest, add_value);
233 #endif // M68K
234 #endif // ARM
235 }
236
237
238 template <>
239 inline int32_t Atomic::specialized_xchg<int32_t>(int32_t exchange_value, volatile int32_t* dest) {
240 #ifdef ARM
241 return arm_lock_test_and_set(dest, exchange_value);
242 #else
243 #ifdef M68K
244 return m68k_lock_test_and_set(dest, exchange_value);
245 #else
246 // __sync_lock_test_and_set is a bizarrely named atomic exchange
247 // operation. Note that some platforms only support this with the
248 // limitation that the only valid value to store is the immediate
249 // constant 1. There is a test for this in JNI_CreateJavaVM().
250 int32_t result = __sync_lock_test_and_set (dest, exchange_value);
251 // All atomic operations are expected to be full memory barriers
252 // (see atomic.hpp). However, __sync_lock_test_and_set is not
253 // a full memory barrier, but an acquire barrier. Hence, this added
254 // barrier.
255 __sync_synchronize();
256 return result;
257 #endif // M68K
258 #endif // ARM
259 }
260
261
262 template <>
263 inline int32_t Atomic::specialized_cmpxchg<int32_t>(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) {
264 #ifdef ARM
265 return arm_compare_and_swap(dest, compare_value, exchange_value);
266 #else
267 #ifdef M68K
268 return m68k_compare_and_swap(dest, compare_value, exchange_value);
269 #else
270 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
271 #endif // M68K
272 #endif // ARM
273 }
274
275 template <>
276 inline int64_t Atomic::specialized_cmpxchg<int64_t>(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) {
277 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
278 }
279
280 #endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
|