< prev index next >

src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp

Print this page




 272     : [old_value]       "=&r"   (old_value),
 273                         "=m"    (*dest)
 274     /* in */
 275     : [dest]            "b"     (dest),
 276       [zero]            "r"     (zero),
 277       [exchange_value]  "r"     (exchange_value),
 278                         "m"     (*dest)
 279     /* clobber */
 280     : "cc",
 281       "memory"
 282     );
 283 
 284   return (intptr_t) old_value;
 285 }
 286 
 287 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
 288   return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
 289 }
 290 
 291 inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {
 292   if (order != memory_order_relaxed) {





 293     __asm__ __volatile__ (
 294       /* fence */
 295       strasm_sync
 296       );
 297   }
 298 }
 299 
 300 inline void cmpxchg_post_membar(cmpxchg_memory_order order) {
 301   if (order != memory_order_relaxed) {


 302     __asm__ __volatile__ (
 303       /* fence */
 304       strasm_sync
 305       );
 306   }
 307 }
 308 
 309 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
 310 inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
 311 
 312   // Note that cmpxchg guarantees a two-way memory barrier across
 313   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
 314   // specified otherwise (see atomic.hpp).
 315 
 316   // Using 32 bit internally.
 317   volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3);
 318 
 319 #ifdef VM_LITTLE_ENDIAN
 320   const unsigned int shift_amount        = ((uintptr_t)dest & 3) * 8;
 321 #else




 272     : [old_value]       "=&r"   (old_value),
 273                         "=m"    (*dest)
 274     /* in */
 275     : [dest]            "b"     (dest),
 276       [zero]            "r"     (zero),
 277       [exchange_value]  "r"     (exchange_value),
 278                         "m"     (*dest)
 279     /* clobber */
 280     : "cc",
 281       "memory"
 282     );
 283 
 284   return (intptr_t) old_value;
 285 }
 286 
 287 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
 288   return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
 289 }
 290 
 291 inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {
 292   if (order == memory_order_release) {
 293     __asm__ __volatile__ (
 294       /* release */
 295       strasm_lwsync
 296       );
 297   } else if (order != memory_order_relaxed) {
 298     __asm__ __volatile__ (
 299       /* fence */
 300       strasm_sync
 301       );
 302   }
 303 }
 304 
 305 inline void cmpxchg_post_membar(cmpxchg_memory_order order) {
 306   if (order == memory_order_release) {
 307     // no post membar
 308   } else if (order == memory_order_conservative) {
 309     __asm__ __volatile__ (
 310       /* fence */
 311       strasm_sync
 312       );
 313   }
 314 }
 315 
 316 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
 317 inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
 318 
 319   // Note that cmpxchg guarantees a two-way memory barrier across
 320   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
 321   // specified otherwise (see atomic.hpp).
 322 
 323   // Using 32 bit internally.
 324   volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3);
 325 
 326 #ifdef VM_LITTLE_ENDIAN
 327   const unsigned int shift_amount        = ((uintptr_t)dest & 3) * 8;
 328 #else


< prev index next >