< prev index next >

src/os_cpu/linux_s390/vm/atomic_linux_s390.hpp

Print this page
rev 13447 : imported patch linux_s390
rev 13452 : [mq]: coleen_review1
rev 13453 : [mq]: dholmes_review1

*** 476,486 **** // The s390 processors always fence before and after the csg instructions. // Thus we ignore the memory ordering argument. The docu says: "A serialization // function is performed before the operand is fetched and again after the // operation is completed." ! jint Atomic::cmpxchg(jint xchg_val, volatile jint* dest, jint cmp_val, cmpxchg_memory_order unused) { unsigned long old; __asm__ __volatile__ ( " CS %[old],%[upd],%[mem] \n\t" // Try to xchg upd with mem. // outputs --- 476,496 ---- // The s390 processors always fence before and after the csg instructions. // Thus we ignore the memory ordering argument. The docu says: "A serialization // function is performed before the operand is fetched and again after the // operation is completed." ! // No direct support for cmpxchg of bytes; emulate using int. ! template<> ! struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {}; ! ! template<> ! template<typename T> ! inline T Atomic::PlatformCmpxchg<4>::operator()(T xchg_val, ! T volatile* dest, ! T cmp_val, ! cmpxchg_memory_order unused) const { ! STATIC_ASSERT(4 == sizeof(T)); unsigned long old; __asm__ __volatile__ ( " CS %[old],%[upd],%[mem] \n\t" // Try to xchg upd with mem. // outputs
*** 491,505 **** , "0" (cmp_val) // Read-only, initial value for [old] (operand #0). // clobbered : "cc" ); ! return (jint)old; } ! jlong Atomic::cmpxchg(jlong xchg_val, volatile jlong* dest, jlong cmp_val, cmpxchg_memory_order unused) { ! unsigned long old; __asm__ __volatile__ ( " CSG %[old],%[upd],%[mem] \n\t" // Try to xchg upd with mem. // outputs : [old] "=&d" (old) // Write-only, prev value irrelevant. --- 501,521 ---- , "0" (cmp_val) // Read-only, initial value for [old] (operand #0). // clobbered : "cc" ); ! return IntegerTypes::cast<T>((uint32_t)old); } ! template<> ! template<typename T> ! inline T Atomic::PlatformCmpxchg<8>::operator()(T xchg_val, ! T volatile* dest, ! T cmp_val, ! cmpxchg_memory_order unused) const { ! STATIC_ASSERT(8 == sizeof(T)); ! T old; __asm__ __volatile__ ( " CSG %[old],%[upd],%[mem] \n\t" // Try to xchg upd with mem. // outputs : [old] "=&d" (old) // Write-only, prev value irrelevant.
*** 509,527 **** , "0" (cmp_val) // Read-only, initial value for [old] (operand #0). // clobbered : "cc" ); ! return (jlong)old; ! } ! ! void* Atomic::cmpxchg_ptr(void *xchg_val, volatile void* dest, void* cmp_val, cmpxchg_memory_order unused) { ! return (void*)cmpxchg((jlong)xchg_val, (volatile jlong*)dest, (jlong)cmp_val, unused); ! } ! ! intptr_t Atomic::cmpxchg_ptr(intptr_t xchg_val, volatile intptr_t* dest, intptr_t cmp_val, cmpxchg_memory_order unused) { ! return (intptr_t)cmpxchg((jlong)xchg_val, (volatile jlong*)dest, (jlong)cmp_val, unused); } inline jlong Atomic::load(const volatile jlong* src) { return *src; } #endif // OS_CPU_LINUX_S390_VM_ATOMIC_LINUX_S390_INLINE_HPP --- 525,535 ---- , "0" (cmp_val) // Read-only, initial value for [old] (operand #0). // clobbered : "cc" ); ! return old; } inline jlong Atomic::load(const volatile jlong* src) { return *src; } #endif // OS_CPU_LINUX_S390_VM_ATOMIC_LINUX_S390_INLINE_HPP
< prev index next >