< prev index next >

src/os_cpu/linux_zero/vm/atomic_linux_zero.hpp

Print this page
rev 13323 : imported patch Atomic_refactoring
rev 13327 : [mq]: SpecializableAtomic


 142       int prev = *ptr;
 143 
 144       if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
 145         return prev + add_value;
 146     }
 147 }
 148 
 149 /* Atomically write VALUE into `*PTR' and returns the previous
 150    contents of `*PTR'.  */
 151 static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
 152   for (;;) {
 153       // Loop until a __kernel_cmpxchg succeeds.
 154       int prev = *ptr;
 155 
 156       if (__kernel_cmpxchg (prev, newval, ptr) == 0)
 157         return prev;
 158     }
 159 }
 160 #endif // ARM
 161 
 162 inline void Atomic::store(jint store_value, volatile jint* dest) {
 163   *dest = store_value;
 164 }
 165 
 166 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
 167   *dest = store_value;
 168 }
 169 
 170 inline jint Atomic::add(jint add_value, volatile jint* dest) {

 171 #ifdef ARM
 172   return arm_add_and_fetch(dest, add_value);
 173 #else
 174 #ifdef M68K
 175   return m68k_add_and_fetch(dest, add_value);
 176 #else
 177   return __sync_add_and_fetch(dest, add_value);
 178 #endif // M68K
 179 #endif // ARM
 180 }
 181 
 182 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {


 183 #ifdef ARM
 184   return arm_add_and_fetch(dest, add_value);
 185 #else
 186 #ifdef M68K
 187   return m68k_add_and_fetch(dest, add_value);
 188 #else
 189   return __sync_add_and_fetch(dest, add_value);


 190 #endif // M68K
 191 #endif // ARM
 192 }
 193 
 194 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
 195   return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
 196 }
 197 
 198 inline void Atomic::inc(volatile jint* dest) {
 199   add(1, dest);
 200 }
 201 
 202 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
 203   add_ptr(1, dest);
 204 }
 205 
 206 inline void Atomic::inc_ptr(volatile void* dest) {
 207   add_ptr(1, dest);
 208 }
 209 
 210 inline void Atomic::dec(volatile jint* dest) {
 211   add(-1, dest);
 212 }
 213 
 214 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
 215   add_ptr(-1, dest);
 216 }
 217 
 218 inline void Atomic::dec_ptr(volatile void* dest) {
 219   add_ptr(-1, dest);
 220 }
 221 
 222 inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {

 223 #ifdef ARM
 224   return arm_lock_test_and_set(dest, exchange_value);
 225 #else
 226 #ifdef M68K
 227   return m68k_lock_test_and_set(dest, exchange_value);
 228 #else
 229   // __sync_lock_test_and_set is a bizarrely named atomic exchange
 230   // operation.  Note that some platforms only support this with the
 231   // limitation that the only valid value to store is the immediate
 232   // constant 1.  There is a test for this in JNI_CreateJavaVM().
 233   jint result = __sync_lock_test_and_set (dest, exchange_value);
 234   // All atomic operations are expected to be full memory barriers
 235   // (see atomic.hpp). However, __sync_lock_test_and_set is not
 236   // a full memory barrier, but an acquire barrier. Hence, this added
 237   // barrier.
 238   __sync_synchronize();
 239   return result;
 240 #endif // M68K
 241 #endif // ARM
 242 }
 243 
 244 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
 245                                  volatile intptr_t* dest) {
 246 #ifdef ARM
 247   return arm_lock_test_and_set(dest, exchange_value);
 248 #else
 249 #ifdef M68K
 250   return m68k_lock_test_and_set(dest, exchange_value);
 251 #else
 252   intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
 253   __sync_synchronize();
 254   return result;
 255 #endif // M68K
 256 #endif // ARM
 257 }
 258 
 259 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
 260   return (void *) xchg_ptr((intptr_t) exchange_value,
 261                            (volatile intptr_t*) dest);
 262 }
 263 
 264 inline jint Atomic::cmpxchg(jint exchange_value,
 265                             volatile jint* dest,
 266                             jint compare_value,
 267                             cmpxchg_memory_order order) {
 268 #ifdef ARM
 269   return arm_compare_and_swap(dest, compare_value, exchange_value);
 270 #else
 271 #ifdef M68K
 272   return m68k_compare_and_swap(dest, compare_value, exchange_value);
 273 #else
 274   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 275 #endif // M68K
 276 #endif // ARM
 277 }
 278 
 279 inline jlong Atomic::cmpxchg(jlong exchange_value,
 280                              volatile jlong* dest,
 281                              jlong compare_value,
 282                              cmpxchg_memory_order order) {
 283 
 284   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 285 }
 286 
 287 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
 288                                     volatile intptr_t* dest,
 289                                     intptr_t compare_value,
 290                                     cmpxchg_memory_order order) {
 291 #ifdef ARM
 292   return arm_compare_and_swap(dest, compare_value, exchange_value);
 293 #else
 294 #ifdef M68K
 295   return m68k_compare_and_swap(dest, compare_value, exchange_value);
 296 #else
 297   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 298 #endif // M68K
 299 #endif // ARM
 300 }
 301 
 302 inline void* Atomic::cmpxchg_ptr(void* exchange_value,
 303                                  volatile void* dest,
 304                                  void* compare_value,
 305                                  cmpxchg_memory_order order) {
 306 
 307   return (void *) cmpxchg_ptr((intptr_t) exchange_value,
 308                               (volatile intptr_t*) dest,
 309                               (intptr_t) compare_value,
 310                               order);
 311 }
 312 
 313 inline jlong Atomic::load(const volatile jlong* src) {
 314   volatile jlong dest;

 315   os::atomic_copy64(src, &dest);
 316   return dest;
 317 }
 318 
 319 inline void Atomic::store(jlong store_value, jlong* dest) {
 320   os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
 321 }
 322 
 323 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
 324   os::atomic_copy64((volatile jlong*)&store_value, dest);

 325 }
 326 
 327 #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP


 142       int prev = *ptr;
 143 
 144       if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
 145         return prev + add_value;
 146     }
 147 }
 148 
 149 /* Atomically write VALUE into `*PTR' and returns the previous
 150    contents of `*PTR'.  */
 151 static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
 152   for (;;) {
 153       // Loop until a __kernel_cmpxchg succeeds.
 154       int prev = *ptr;
 155 
 156       if (__kernel_cmpxchg (prev, newval, ptr) == 0)
 157         return prev;
 158     }
 159 }
 160 #endif // ARM
 161 
 162 #ifdef _LP64






 163 
 164 template <>
 165 inline int64_t GeneralizedAtomic::specialized_add<int64_t>(int64_t add_value, volatile int64_t* dest) {
 166 #ifdef ARM
 167   return arm_add_and_fetch(dest, add_value);
 168 #else
 169 #ifdef M68K
 170   return m68k_add_and_fetch(dest, add_value);
 171 #else
 172   return __sync_add_and_fetch(dest, add_value);
 173 #endif // M68K
 174 #endif // ARM
 175 }
 176 
 177 
 178 template <>
 179 inline int64_t GeneralizedAtomic::specialized_xchg<int64_t>(int64_t exchange_value, volatile int64_t* dest) {
 180 #ifdef ARM
 181   return arm_lock_test_and_set(dest, exchange_value);
 182 #else
 183 #ifdef M68K
 184   return m68k_lock_test_and_set(dest, exchange_value);
 185 #else
 186   intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
 187   __sync_synchronize();
 188   return result;
 189 #endif // M68K
 190 #endif // ARM
 191 }
 192 
 193 #endif // _LP64






 194 
 195 template <>
 196 inline int32_t GeneralizedAtomic::specialized_add<int32_t>(int32_t add_value, volatile int32_t* dest) {
 197 #ifdef ARM
 198   return arm_add_and_fetch(dest, add_value);
 199 #else
 200 #ifdef M68K
 201   return m68k_add_and_fetch(dest, add_value);
 202 #else
 203   return __sync_add_and_fetch(dest, add_value);
 204 #endif // M68K
 205 #endif // ARM



 206 }
 207 



 208 
 209 template <>
 210 inline int32_t GeneralizedAtomic::specialized_xchg<int32_t>(int32_t exchange_value, volatile int32_t* dest) {
 211 #ifdef ARM
 212   return arm_lock_test_and_set(dest, exchange_value);
 213 #else
 214 #ifdef M68K
 215   return m68k_lock_test_and_set(dest, exchange_value);
 216 #else
 217   // __sync_lock_test_and_set is a bizarrely named atomic exchange
 218   // operation.  Note that some platforms only support this with the
 219   // limitation that the only valid value to store is the immediate
 220   // constant 1.  There is a test for this in JNI_CreateJavaVM().
 221   int32_t result = __sync_lock_test_and_set (dest, exchange_value);
 222   // All atomic operations are expected to be full memory barriers
 223   // (see atomic.hpp). However, __sync_lock_test_and_set is not
 224   // a full memory barrier, but an acquire barrier. Hence, this added
 225   // barrier.
 226   __sync_synchronize();
 227   return result;
 228 #endif // M68K
 229 #endif // ARM
 230 }
 231 














 232 
 233 template <>
 234 inline int32_t GeneralizedAtomic::specialized_cmpxchg<int32_t>(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) {







 235 #ifdef ARM
 236   return arm_compare_and_swap(dest, compare_value, exchange_value);
 237 #else
 238 #ifdef M68K
 239   return m68k_compare_and_swap(dest, compare_value, exchange_value);
 240 #else
 241   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 242 #endif // M68K
 243 #endif // ARM
 244 }
 245 







 246 
 247 template <>
 248 inline int64_t GeneralizedAtomic::specialized_cmpxchg<int64_t>(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) {








 249   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);


 250 }
 251 










 252 
 253 template<>
 254 inline int64_t GeneralizedAtomic::specialized_load<int64_t>(const volatile int64_t* src) {
 255   volatile int64_t dest;
 256   os::atomic_copy64(src, &dest);
 257   return dest;
 258 }
 259 



 260 
 261 template<>
 262 inline void GeneralizedAtomic::specialized_store<int64_t>(int64_t store_value, volatile int64_t* dest) {
 263   os::atomic_copy64((volatile int64_t*)&store_value, dest);
 264 }
 265 
 266 #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
< prev index next >