< prev index next >

src/os_cpu/linux_s390/vm/atomic_linux_s390.hpp

Print this page
rev 13452 : [mq]: coleen_review1
rev 13453 : [mq]: dholmes_review1

@@ -476,19 +476,21 @@
 // The s390 processors always fence before and after the csg instructions.
 // Thus we ignore the memory ordering argument. The docu says: "A serialization
 // function is performed before the operand is fetched and again after the
 // operation is completed."
 
+// No direct support for cmpxchg of bytes; emulate using int.
 template<>
 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
 
 template<>
 template<typename T>
 inline T Atomic::PlatformCmpxchg<4>::operator()(T xchg_val,
                                                 T volatile* dest,
                                                 T cmp_val,
                                                 cmpxchg_memory_order unused) const {
+  STATIC_ASSERT(4 == sizeof(T));
   unsigned long old;
 
   __asm__ __volatile__ (
     "   CS       %[old],%[upd],%[mem]    \n\t" // Try to xchg upd with mem.
     // outputs

@@ -508,11 +510,12 @@
 template<typename T>
 inline T Atomic::PlatformCmpxchg<8>::operator()(T xchg_val,
                                                 T volatile* dest,
                                                 T cmp_val,
                                                 cmpxchg_memory_order unused) const {
-  unsigned long old;
+  STATIC_ASSERT(8 == sizeof(T));
+  T old;
 
   __asm__ __volatile__ (
     "   CSG      %[old],%[upd],%[mem]    \n\t" // Try to xchg upd with mem.
     // outputs
     : [old] "=&d" (old)      // Write-only, prev value irrelevant.

@@ -522,11 +525,11 @@
     ,       "0"   (cmp_val)  // Read-only, initial value for [old] (operand #0).
     // clobbered
     : "cc"
   );
 
-  return IntegerTypes::cast<T>(old);
+  return old;
 }
 
 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
 
 #endif // OS_CPU_LINUX_S390_VM_ATOMIC_LINUX_S390_INLINE_HPP
< prev index next >