< prev index next >

src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp

Print this page




 272 //
 273 // Inspecting the return value is the only way for the caller to determine
 274 // if the compare-and-swap instruction was successful:
 275 // - If return value and compare value compare equal, the compare-and-swap
 276 //   instruction was successful and the value in memory was replaced by the
 277 //   exchange value.
 278 // - If return value and compare value compare unequal, the compare-and-swap
 279 //   instruction was not successful. The value in memory was left unchanged.
 280 //
 281 // The s390 processors always fence before and after the csg instructions.
 282 // Thus we ignore the memory ordering argument. The docu says: "A serialization
 283 // function is performed before the operand is fetched and again after the
 284 // operation is completed."
 285 
 286 // No direct support for cmpxchg of bytes; emulate using int.
 287 template<>
 288 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
 289 
 290 template<>
 291 template<typename T>
 292 inline T Atomic::PlatformCmpxchg<4>::operator()(T xchg_val,
 293                                                 T volatile* dest,
 294                                                 T cmp_val,

 295                                                 atomic_memory_order unused) const {
 296   STATIC_ASSERT(4 == sizeof(T));
 297   T old;
 298 
 299   __asm__ __volatile__ (
 300     "   CS       %[old],%[upd],%[mem]    \n\t" // Try to xchg upd with mem.
 301     // outputs
 302     : [old] "=&d" (old)      // Write-only, prev value irrelevant.
 303     , [mem] "+Q"  (*dest)    // Read/write, memory to be updated atomically.
 304     // inputs
 305     : [upd] "d"   (xchg_val)
 306     ,       "0"   (cmp_val)  // Read-only, initial value for [old] (operand #0).
 307     // clobbered
 308     : "cc", "memory"
 309   );
 310 
 311   return old;
 312 }
 313 
 314 template<>
 315 template<typename T>
 316 inline T Atomic::PlatformCmpxchg<8>::operator()(T xchg_val,
 317                                                 T volatile* dest,
 318                                                 T cmp_val,

 319                                                 atomic_memory_order unused) const {
 320   STATIC_ASSERT(8 == sizeof(T));
 321   T old;
 322 
 323   __asm__ __volatile__ (
 324     "   CSG      %[old],%[upd],%[mem]    \n\t" // Try to xchg upd with mem.
 325     // outputs
 326     : [old] "=&d" (old)      // Write-only, prev value irrelevant.
 327     , [mem] "+Q"  (*dest)    // Read/write, memory to be updated atomically.
 328     // inputs
 329     : [upd] "d"   (xchg_val)
 330     ,       "0"   (cmp_val)  // Read-only, initial value for [old] (operand #0).
 331     // clobbered
 332     : "cc", "memory"
 333   );
 334 
 335   return old;
 336 }
 337 
 338 template<size_t byte_size>


 272 //
 273 // Inspecting the return value is the only way for the caller to determine
 274 // if the compare-and-swap instruction was successful:
 275 // - If return value and compare value compare equal, the compare-and-swap
 276 //   instruction was successful and the value in memory was replaced by the
 277 //   exchange value.
 278 // - If return value and compare value compare unequal, the compare-and-swap
 279 //   instruction was not successful. The value in memory was left unchanged.
 280 //
 281 // The s390 processors always fence before and after the csg instructions.
 282 // Thus we ignore the memory ordering argument. The docu says: "A serialization
 283 // function is performed before the operand is fetched and again after the
 284 // operation is completed."
 285 
 286 // No direct support for cmpxchg of bytes; emulate using int.
 287 template<>
 288 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
 289 
 290 template<>
 291 template<typename T>
 292 inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,

 293                                                 T cmp_val,
 294                                                 T xchg_val,
 295                                                 atomic_memory_order unused) const {
 296   STATIC_ASSERT(4 == sizeof(T));
 297   T old;
 298 
 299   __asm__ __volatile__ (
 300     "   CS       %[old],%[upd],%[mem]    \n\t" // Try to xchg upd with mem.
 301     // outputs
 302     : [old] "=&d" (old)      // Write-only, prev value irrelevant.
 303     , [mem] "+Q"  (*dest)    // Read/write, memory to be updated atomically.
 304     // inputs
 305     : [upd] "d"   (xchg_val)
 306     ,       "0"   (cmp_val)  // Read-only, initial value for [old] (operand #0).
 307     // clobbered
 308     : "cc", "memory"
 309   );
 310 
 311   return old;
 312 }
 313 
 314 template<>
 315 template<typename T>
 316 inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,

 317                                                 T cmp_val,
 318                                                 T xchg_val,
 319                                                 atomic_memory_order unused) const {
 320   STATIC_ASSERT(8 == sizeof(T));
 321   T old;
 322 
 323   __asm__ __volatile__ (
 324     "   CSG      %[old],%[upd],%[mem]    \n\t" // Try to xchg upd with mem.
 325     // outputs
 326     : [old] "=&d" (old)      // Write-only, prev value irrelevant.
 327     , [mem] "+Q"  (*dest)    // Read/write, memory to be updated atomically.
 328     // inputs
 329     : [upd] "d"   (xchg_val)
 330     ,       "0"   (cmp_val)  // Read-only, initial value for [old] (operand #0).
 331     // clobbered
 332     : "cc", "memory"
 333   );
 334 
 335   return old;
 336 }
 337 
 338 template<size_t byte_size>
< prev index next >