142 int prev = *ptr;
143
144 if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
145 return prev + add_value;
146 }
147 }
148
149 /* Atomically write VALUE into `*PTR' and returns the previous
150 contents of `*PTR'. */
151 static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
152 for (;;) {
153 // Loop until a __kernel_cmpxchg succeeds.
154 int prev = *ptr;
155
156 if (__kernel_cmpxchg (prev, newval, ptr) == 0)
157 return prev;
158 }
159 }
160 #endif // ARM
161
162 inline void Atomic::store(jint store_value, volatile jint* dest) {
163 #if !defined(ARM) && !defined(M68K)
164 __sync_synchronize();
165 #endif
166 *dest = store_value;
167 }
168
169 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
170 #if !defined(ARM) && !defined(M68K)
171 __sync_synchronize();
172 #endif
173 *dest = store_value;
174 }
175
176 template<size_t byte_size>
177 struct Atomic::PlatformAdd
178 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
179 {
180 template<typename I, typename D>
181 D add_and_fetch(I add_value, D volatile* dest) const;
182 };
183
184 template<>
185 template<typename I, typename D>
186 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
187 STATIC_ASSERT(4 == sizeof(I));
188 STATIC_ASSERT(4 == sizeof(D));
189
190 #ifdef ARM
191 return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
192 #else
193 #ifdef M68K
194 return add_using_helper<int>(m68k_add_and_fetch, add_value, dest);
195 #else
258 return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
259 #else
260 #ifdef M68K
261 return cmpxchg_using_helper<int>(m68k_compare_and_swap, exchange_value, dest, compare_value);
262 #else
263 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
264 #endif // M68K
265 #endif // ARM
266 }
267
268 template<>
269 template<typename T>
270 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
271 T volatile* dest,
272 T compare_value,
273 cmpxchg_memory_order order) const {
274 STATIC_ASSERT(8 == sizeof(T));
275 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
276 }
277
278 inline jlong Atomic::load(const volatile jlong* src) {
279 volatile jlong dest;
280 os::atomic_copy64(src, &dest);
281 return dest;
282 }
283
284 inline void Atomic::store(jlong store_value, jlong* dest) {
285 os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
286 }
287
288 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
289 os::atomic_copy64((volatile jlong*)&store_value, dest);
290 }
291
292 #endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
|
142 int prev = *ptr;
143
144 if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
145 return prev + add_value;
146 }
147 }
148
149 /* Atomically write VALUE into `*PTR' and returns the previous
150 contents of `*PTR'. */
151 static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
152 for (;;) {
153 // Loop until a __kernel_cmpxchg succeeds.
154 int prev = *ptr;
155
156 if (__kernel_cmpxchg (prev, newval, ptr) == 0)
157 return prev;
158 }
159 }
160 #endif // ARM
161
162 template<size_t byte_size>
163 struct Atomic::PlatformAdd
164 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
165 {
166 template<typename I, typename D>
167 D add_and_fetch(I add_value, D volatile* dest) const;
168 };
169
170 template<>
171 template<typename I, typename D>
172 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
173 STATIC_ASSERT(4 == sizeof(I));
174 STATIC_ASSERT(4 == sizeof(D));
175
176 #ifdef ARM
177 return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
178 #else
179 #ifdef M68K
180 return add_using_helper<int>(m68k_add_and_fetch, add_value, dest);
181 #else
244 return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
245 #else
246 #ifdef M68K
247 return cmpxchg_using_helper<int>(m68k_compare_and_swap, exchange_value, dest, compare_value);
248 #else
249 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
250 #endif // M68K
251 #endif // ARM
252 }
253
254 template<>
255 template<typename T>
256 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
257 T volatile* dest,
258 T compare_value,
259 cmpxchg_memory_order order) const {
260 STATIC_ASSERT(8 == sizeof(T));
261 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
262 }
263
264 template<>
265 template<typename T>
266 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
267 STATIC_ASSERT(8 == sizeof(T));
268 volatile jlong dest;
269 os::atomic_copy64(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
270 return PrimitiveConversions::cast<T>(dest);
271 }
272
273 template<>
274 template<typename T>
275 inline void Atomic::PlatformStore<8>::operator()(T store_value,
276 T volatile* dest) const {
277 STATIC_ASSERT(8 == sizeof(T));
278 os::atomic_copy64(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
279 }
280
281 #endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
|