< prev index next >

src/os_cpu/linux_s390/vm/atomic_linux_s390.hpp

Print this page
rev 13266 : imported patch Atomic_refactoring
rev 13267 : [mq]: Atomic_polishing

*** 51,73 **** // On System z, all store operations are atomic if the address where the data is stored into // is an integer multiple of the data length. Furthermore, all stores are ordered: // a store which occurs conceptually before another store becomes visible to other CPUs // before the other store becomes visible. - inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } - inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } - inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } - inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } - inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } - inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } - - inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } - inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } - inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } - inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } - inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } - inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } //------------ // Atomic::add //------------ --- 51,60 ----
*** 80,90 **** // instruction is retried as often as required. // // The return value of the method is the value that was successfully stored. At the // time the caller receives back control, the value in memory may have changed already. ! inline jint Atomic::add(jint inc, volatile jint*dest) { unsigned int old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { __asm__ __volatile__ ( " LGFR 0,%[inc] \n\t" // save increment --- 67,78 ---- // instruction is retried as often as required. // // The return value of the method is the value that was successfully stored. At the // time the caller receives back control, the value in memory may have changed already. ! template <> ! inline int32_t Atomic::specialized_add<int32_t>(int32_t inc, volatile int32_t* dest) { unsigned int old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { __asm__ __volatile__ ( " LGFR 0,%[inc] \n\t" // save increment
*** 122,136 **** //---< clobbered >--- : "cc" ); } ! return (jint)upd; } ! inline intptr_t Atomic::add_ptr(intptr_t inc, volatile intptr_t* dest) { unsigned long old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { __asm__ __volatile__ ( " LGR 0,%[inc] \n\t" // save increment --- 110,125 ---- //---< clobbered >--- : "cc" ); } ! return (int32_t)upd; } ! template <> ! inline int64_t Atomic::specialized_add<int64_t>(int64_t inc, volatile int64_t* dest) { unsigned long old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { __asm__ __volatile__ ( " LGR 0,%[inc] \n\t" // save increment
*** 168,197 **** //---< clobbered >--- : "cc" ); } ! return (intptr_t)upd; } - inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)add_ptr(add_value, (volatile intptr_t*)dest); - } - - //------------ // Atomic::inc //------------ // These methods force the value in memory to be incremented (augmented by 1). // Both, memory value and increment, are treated as 32bit signed binary integers. // No overflow exceptions are recognized, and the condition code does not hold // information about the value in memory. // // The value in memory is updated by using a compare-and-swap instruction. The // instruction is retried as often as required. ! inline void Atomic::inc(volatile jint* dest) { unsigned int old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { // tty->print_cr("Atomic::inc called... dest @%p", dest); __asm__ __volatile__ ( --- 157,183 ---- //---< clobbered >--- : "cc" ); } ! return (int64_t)upd; } //------------ + // Atomic::inc //------------ // These methods force the value in memory to be incremented (augmented by 1). // Both, memory value and increment, are treated as 32bit signed binary integers. // No overflow exceptions are recognized, and the condition code does not hold // information about the value in memory. // // The value in memory is updated by using a compare-and-swap instruction. The // instruction is retried as often as required. ! template <> ! inline void Atomic::specialized_inc<int32_t>(volatile int32_t* dest) { unsigned int old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { // tty->print_cr("Atomic::inc called... dest @%p", dest); __asm__ __volatile__ (
*** 232,242 **** : "cc" ); } } ! inline void Atomic::inc_ptr(volatile intptr_t* dest) { unsigned long old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { __asm__ __volatile__ ( " LGHI 2,1 \n\t" // load increment --- 218,229 ---- : "cc" ); } } ! template <> ! inline void Atomic::specialized_inc<int64_t>(volatile int64_t* dest) { unsigned long old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { __asm__ __volatile__ ( " LGHI 2,1 \n\t" // load increment
*** 276,288 **** : "cc" ); } } - inline void Atomic::inc_ptr(volatile void* dest) { - inc_ptr((volatile intptr_t*)dest); - } //------------ // Atomic::dec //------------ // These methods force the value in memory to be decremented (augmented by -1). --- 263,272 ----
*** 291,301 **** // information about the value in memory. // // The value in memory is updated by using a compare-and-swap instruction. The // instruction is retried as often as required. ! inline void Atomic::dec(volatile jint* dest) { unsigned int old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { __asm__ __volatile__ ( " LGHI 2,-1 \n\t" // load increment --- 275,286 ---- // information about the value in memory. // // The value in memory is updated by using a compare-and-swap instruction. The // instruction is retried as often as required. ! template <> ! inline void Atomic::specialized_dec<int32_t>(volatile int32_t* dest) { unsigned int old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { __asm__ __volatile__ ( " LGHI 2,-1 \n\t" // load increment
*** 338,348 **** : "cc" ); } } ! inline void Atomic::dec_ptr(volatile intptr_t* dest) { unsigned long old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { __asm__ __volatile__ ( " LGHI 2,-1 \n\t" // load increment --- 323,334 ---- : "cc" ); } } ! template <> ! inline void Atomic::specialized_dec<int64_t>(volatile int64_t* dest) { unsigned long old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { __asm__ __volatile__ ( " LGHI 2,-1 \n\t" // load increment
*** 385,397 **** : "cc" ); } } - inline void Atomic::dec_ptr(volatile void* dest) { - dec_ptr((volatile intptr_t*)dest); - } //------------- // Atomic::xchg //------------- // These methods force the value in memory to be replaced by the new value passed --- 371,380 ----
*** 405,415 **** // the new value could be lost unnoticed, due to a store(new value) from // another thread. // // The return value is the (unchanged) value from memory as it was when the // replacement succeeded. ! inline jint Atomic::xchg (jint xchg_val, volatile jint* dest) { unsigned int old; __asm__ __volatile__ ( " LLGF %[old],%[mem] \n\t" // get old value "0: CS %[old],%[upd],%[mem] \n\t" // try to xchg upd with mem --- 388,399 ---- // the new value could be lost unnoticed, due to a store(new value) from // another thread. // // The return value is the (unchanged) value from memory as it was when the // replacement succeeded. ! template <> ! inline int32_t Atomic::specialized_xchg<int32_t>(int32_t xchg_val, volatile int32_t* dest) { unsigned int old; __asm__ __volatile__ ( " LLGF %[old],%[mem] \n\t" // get old value "0: CS %[old],%[upd],%[mem] \n\t" // try to xchg upd with mem
*** 421,434 **** : [upd] "d" (xchg_val) // read-only, value to be written to memory //---< clobbered >--- : "cc" ); ! return (jint)old; } ! inline intptr_t Atomic::xchg_ptr(intptr_t xchg_val, volatile intptr_t* dest) { unsigned long old; __asm__ __volatile__ ( " LG %[old],%[mem] \n\t" // get old value "0: CSG %[old],%[upd],%[mem] \n\t" // try to xchg upd with mem --- 405,419 ---- : [upd] "d" (xchg_val) // read-only, value to be written to memory //---< clobbered >--- : "cc" ); ! return (int32_t)old; } ! template <> ! inline int64_t Atomic::specialized_xchg<int64_t>(int64_t xchg_val, volatile int64_t* dest) { unsigned long old; __asm__ __volatile__ ( " LG %[old],%[mem] \n\t" // get old value "0: CSG %[old],%[upd],%[mem] \n\t" // try to xchg upd with mem
*** 443,455 **** ); return (intptr_t)old; } - inline void *Atomic::xchg_ptr(void *exchange_value, volatile void *dest) { - return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); - } //---------------- // Atomic::cmpxchg //---------------- // These methods compare the value in memory with a given compare value. --- 428,437 ----
*** 476,486 **** // The s390 processors always fence before and after the csg instructions. // Thus we ignore the memory ordering argument. The docu says: "A serialization // function is performed before the operand is fetched and again after the // operation is completed." ! jint Atomic::cmpxchg(jint xchg_val, volatile jint* dest, jint cmp_val, cmpxchg_memory_order unused) { unsigned long old; __asm__ __volatile__ ( " CS %[old],%[upd],%[mem] \n\t" // Try to xchg upd with mem. // outputs --- 458,469 ---- // The s390 processors always fence before and after the csg instructions. // Thus we ignore the memory ordering argument. The docu says: "A serialization // function is performed before the operand is fetched and again after the // operation is completed." ! template <> ! inline int32_t Atomic::specialized_cmpxchg<int32_t>(int32_t xchg_val, volatile int32_t* dest, int32_t cmp_val, cmpxchg_memory_order order) { unsigned long old; __asm__ __volatile__ ( " CS %[old],%[upd],%[mem] \n\t" // Try to xchg upd with mem. // outputs
*** 491,504 **** , "0" (cmp_val) // Read-only, initial value for [old] (operand #0). // clobbered : "cc" ); ! return (jint)old; } ! jlong Atomic::cmpxchg(jlong xchg_val, volatile jlong* dest, jlong cmp_val, cmpxchg_memory_order unused) { unsigned long old; __asm__ __volatile__ ( " CSG %[old],%[upd],%[mem] \n\t" // Try to xchg upd with mem. // outputs --- 474,488 ---- , "0" (cmp_val) // Read-only, initial value for [old] (operand #0). // clobbered : "cc" ); ! return (int32_t)old; } ! template <> ! inline int64_t Atomic::specialized_cmpxchg<int64_t>(int64_t xchg_val, volatile int64_t* dest, int64_t cmp_val, cmpxchg_memory_order order) { unsigned long old; __asm__ __volatile__ ( " CSG %[old],%[upd],%[mem] \n\t" // Try to xchg upd with mem. // outputs
*** 509,527 **** , "0" (cmp_val) // Read-only, initial value for [old] (operand #0). // clobbered : "cc" ); ! return (jlong)old; ! } ! ! void* Atomic::cmpxchg_ptr(void *xchg_val, volatile void* dest, void* cmp_val, cmpxchg_memory_order unused) { ! return (void*)cmpxchg((jlong)xchg_val, (volatile jlong*)dest, (jlong)cmp_val, unused); } - intptr_t Atomic::cmpxchg_ptr(intptr_t xchg_val, volatile intptr_t* dest, intptr_t cmp_val, cmpxchg_memory_order unused) { - return (intptr_t)cmpxchg((jlong)xchg_val, (volatile jlong*)dest, (jlong)cmp_val, unused); - } - - inline jlong Atomic::load(const volatile jlong* src) { return *src; } - #endif // OS_CPU_LINUX_S390_VM_ATOMIC_LINUX_S390_INLINE_HPP --- 493,501 ---- , "0" (cmp_val) // Read-only, initial value for [old] (operand #0). // clobbered : "cc" ); ! return (int64_t)old; } #endif // OS_CPU_LINUX_S390_VM_ATOMIC_LINUX_S390_INLINE_HPP
< prev index next >