< prev index next >

src/os_cpu/linux_s390/vm/atomic_linux_s390.hpp

Print this page
rev 13266 : imported patch Atomic_refactoring
rev 13267 : [mq]: Atomic_polishing

@@ -51,23 +51,10 @@
 
 // On System z, all store operations are atomic if the address where the data is stored into
 // is an integer multiple of the data length. Furthermore, all stores are ordered:
 // a store which occurs conceptually before another store becomes visible to other CPUs
 // before the other store becomes visible.
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
 
 
 //------------
 // Atomic::add
 //------------

@@ -80,11 +67,12 @@
 // instruction is retried as often as required.
 //
 // The return value of the method is the value that was successfully stored. At the
 // time the caller receives back control, the value in memory may have changed already.
 
-inline jint Atomic::add(jint inc, volatile jint*dest) {
+template <>
+inline int32_t Atomic::specialized_add<int32_t>(int32_t inc, volatile int32_t* dest) {
   unsigned int old, upd;
 
   if (VM_Version::has_LoadAndALUAtomicV1()) {
     __asm__ __volatile__ (
       "   LGFR     0,%[inc]                \n\t" // save increment

@@ -122,15 +110,16 @@
       //---<  clobbered  >---
       : "cc"
     );
   }
 
-  return (jint)upd;
+  return (int32_t)upd;
 }
 
 
-inline intptr_t Atomic::add_ptr(intptr_t inc, volatile intptr_t* dest) {
+template <>
+inline int64_t Atomic::specialized_add<int64_t>(int64_t inc, volatile int64_t* dest) {
   unsigned long old, upd;
 
   if (VM_Version::has_LoadAndALUAtomicV1()) {
     __asm__ __volatile__ (
       "   LGR      0,%[inc]                \n\t" // save increment

@@ -168,30 +157,27 @@
       //---<  clobbered  >---
       : "cc"
     );
   }
 
-  return (intptr_t)upd;
+  return (int64_t)upd;
 }
 
-inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
-}
-
-
 //------------
+
 // Atomic::inc
 //------------
 // These methods force the value in memory to be incremented (augmented by 1).
 // Both, memory value and increment, are treated as 32bit signed binary integers.
 // No overflow exceptions are recognized, and the condition code does not hold
 // information about the value in memory.
 //
 // The value in memory is updated by using a compare-and-swap instruction. The
 // instruction is retried as often as required.
 
-inline void Atomic::inc(volatile jint* dest) {
+template <>
+inline void Atomic::specialized_inc<int32_t>(volatile int32_t* dest) {
   unsigned int old, upd;
 
   if (VM_Version::has_LoadAndALUAtomicV1()) {
 //  tty->print_cr("Atomic::inc     called... dest @%p", dest);
     __asm__ __volatile__ (

@@ -232,11 +218,12 @@
       : "cc"
     );
   }
 }
 
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+template <>
+inline void Atomic::specialized_inc<int64_t>(volatile int64_t* dest) {
   unsigned long old, upd;
 
   if (VM_Version::has_LoadAndALUAtomicV1()) {
     __asm__ __volatile__ (
       "   LGHI     2,1                     \n\t" // load increment

@@ -276,13 +263,10 @@
       : "cc"
     );
   }
 }
 
-inline void Atomic::inc_ptr(volatile void* dest) {
-  inc_ptr((volatile intptr_t*)dest);
-}
 
 //------------
 // Atomic::dec
 //------------
 // These methods force the value in memory to be decremented (augmented by -1).

@@ -291,11 +275,12 @@
 // information about the value in memory.
 //
 // The value in memory is updated by using a compare-and-swap instruction. The
 // instruction is retried as often as required.
 
-inline void Atomic::dec(volatile jint* dest) {
+template <>
+inline void Atomic::specialized_dec<int32_t>(volatile int32_t* dest) {
   unsigned int old, upd;
 
   if (VM_Version::has_LoadAndALUAtomicV1()) {
     __asm__ __volatile__ (
       "   LGHI     2,-1                    \n\t" // load increment

@@ -338,11 +323,12 @@
       : "cc"
     );
   }
 }
 
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+template <>
+inline void Atomic::specialized_dec<int64_t>(volatile int64_t* dest) {
   unsigned long old, upd;
 
   if (VM_Version::has_LoadAndALUAtomicV1()) {
     __asm__ __volatile__ (
       "   LGHI     2,-1                    \n\t" // load increment

@@ -385,13 +371,10 @@
       : "cc"
     );
   }
 }
 
-inline void Atomic::dec_ptr(volatile void* dest) {
-  dec_ptr((volatile intptr_t*)dest);
-}
 
 //-------------
 // Atomic::xchg
 //-------------
 // These methods force the value in memory to be replaced by the new value passed

@@ -405,11 +388,12 @@
 // the new value could be lost unnoticed, due to a store(new value) from
 // another thread.
 //
 // The return value is the (unchanged) value from memory as it was when the
 // replacement succeeded.
-inline jint Atomic::xchg (jint xchg_val, volatile jint* dest) {
+template <>
+inline int32_t Atomic::specialized_xchg<int32_t>(int32_t xchg_val, volatile int32_t* dest) {
   unsigned int  old;
 
   __asm__ __volatile__ (
     "   LLGF     %[old],%[mem]           \n\t" // get old value
     "0: CS       %[old],%[upd],%[mem]    \n\t" // try to xchg upd with mem

@@ -421,14 +405,15 @@
     : [upd] "d"   (xchg_val) // read-only, value to be written to memory
     //---<  clobbered  >---
     : "cc"
   );
 
-  return (jint)old;
+  return (int32_t)old;
 }
 
-inline intptr_t Atomic::xchg_ptr(intptr_t xchg_val, volatile intptr_t* dest) {
+template <>
+inline int64_t Atomic::specialized_xchg<int64_t>(int64_t xchg_val, volatile int64_t* dest) {
   unsigned long old;
 
   __asm__ __volatile__ (
     "   LG       %[old],%[mem]           \n\t" // get old value
     "0: CSG      %[old],%[upd],%[mem]    \n\t" // try to xchg upd with mem

@@ -443,13 +428,10 @@
   );
 
   return (intptr_t)old;
 }
 
-inline void *Atomic::xchg_ptr(void *exchange_value, volatile void *dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
 
 //----------------
 // Atomic::cmpxchg
 //----------------
 // These methods compare the value in memory with a given compare value.

@@ -476,11 +458,12 @@
 // The s390 processors always fence before and after the csg instructions.
 // Thus we ignore the memory ordering argument. The docu says: "A serialization
 // function is performed before the operand is fetched and again after the
 // operation is completed."
 
-jint Atomic::cmpxchg(jint xchg_val, volatile jint* dest, jint cmp_val, cmpxchg_memory_order unused) {
+template <>
+inline int32_t Atomic::specialized_cmpxchg<int32_t>(int32_t xchg_val, volatile int32_t* dest, int32_t cmp_val, cmpxchg_memory_order order) {
   unsigned long old;
 
   __asm__ __volatile__ (
     "   CS       %[old],%[upd],%[mem]    \n\t" // Try to xchg upd with mem.
     // outputs

@@ -491,14 +474,15 @@
     ,       "0"   (cmp_val)  // Read-only, initial value for [old] (operand #0).
     // clobbered
     : "cc"
   );
 
-  return (jint)old;
+  return (int32_t)old;
 }
 
-jlong Atomic::cmpxchg(jlong xchg_val, volatile jlong* dest, jlong cmp_val, cmpxchg_memory_order unused) {
+template <>
+inline int64_t Atomic::specialized_cmpxchg<int64_t>(int64_t xchg_val, volatile int64_t* dest, int64_t cmp_val, cmpxchg_memory_order order) {
   unsigned long old;
 
   __asm__ __volatile__ (
     "   CSG      %[old],%[upd],%[mem]    \n\t" // Try to xchg upd with mem.
     // outputs

@@ -509,19 +493,9 @@
     ,       "0"   (cmp_val)  // Read-only, initial value for [old] (operand #0).
     // clobbered
     : "cc"
   );
 
-  return (jlong)old;
-}
-
-void* Atomic::cmpxchg_ptr(void *xchg_val, volatile void* dest, void* cmp_val, cmpxchg_memory_order unused) {
-  return (void*)cmpxchg((jlong)xchg_val, (volatile jlong*)dest, (jlong)cmp_val, unused);
+  return (int64_t)old;
 }
 
-intptr_t Atomic::cmpxchg_ptr(intptr_t xchg_val, volatile intptr_t* dest, intptr_t cmp_val, cmpxchg_memory_order unused) {
-  return (intptr_t)cmpxchg((jlong)xchg_val, (volatile jlong*)dest, (jlong)cmp_val, unused);
-}
-
-inline jlong Atomic::load(const volatile jlong* src) { return *src; }
-
 #endif // OS_CPU_LINUX_S390_VM_ATOMIC_LINUX_S390_INLINE_HPP
< prev index next >