< prev index next >

src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp

Print this page
rev 13323 : imported patch Atomic_refactoring
rev 13327 : [mq]: SpecializableAtomic

@@ -30,26 +30,10 @@
 #error "Atomic currently only impleneted for PPC64"
 #endif
 
 // Implementation of class atomic
 
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-inline jlong Atomic::load(const volatile jlong* src) { return *src; }
-
 //
 //   machine barrier instructions:
 //
 //   - ppc_sync            two-way memory barrier, aka fence
 //   - ppc_lwsync          orders  Store|Store,

@@ -91,12 +75,12 @@
 #define strasm_acquire                    strasm_lwsync
 #define strasm_fence                      strasm_sync
 #define strasm_nobarrier                  ""
 #define strasm_nobarrier_clobber_memory   ""
 
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-
+template <>
+inline int32_t GeneralizedAtomic::specialized_add<int32_t>(int32_t add_value, volatile int32_t* dest) {
   unsigned int result;
 
   __asm__ __volatile__ (
     strasm_lwsync
     "1: lwarx   %0,  0, %2    \n"

@@ -106,16 +90,16 @@
     strasm_isync
     : /*%0*/"=&r" (result)
     : /*%1*/"r" (add_value), /*%2*/"r" (dest)
     : "cc", "memory" );
 
-  return (jint) result;
+  return (int32_t) result;
 }
 
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-
+template <>
+inline int64_t GeneralizedAtomic::specialized_add<int64_t>(int64_t add_value, volatile int64_t* dest) {
   long result;
 
   __asm__ __volatile__ (
     strasm_lwsync
     "1: ldarx   %0,  0, %2    \n"

@@ -125,20 +109,15 @@
     strasm_isync
     : /*%0*/"=&r" (result)
     : /*%1*/"r" (add_value), /*%2*/"r" (dest)
     : "cc", "memory" );
 
-  return (intptr_t) result;
+  return (int64_t) result;
 }
 
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
-}
-
-
-inline void Atomic::inc    (volatile jint*     dest) {
-
+template <>
+inline void GeneralizedAtomic::specialized_inc<int32_t>(volatile int32_t* dest) {
   unsigned int temp;
 
   __asm__ __volatile__ (
     strasm_nobarrier
     "1: lwarx   %0,  0, %2    \n"

@@ -150,12 +129,12 @@
     : /*%2*/"r" (dest), "m" (*dest)
     : "cc" strasm_nobarrier_clobber_memory);
 
 }
 
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-
+template <>
+inline void GeneralizedAtomic::specialized_inc<int64_t>(volatile int64_t* dest) {
   long temp;
 
   __asm__ __volatile__ (
     strasm_nobarrier
     "1: ldarx   %0,  0, %2    \n"

@@ -167,17 +146,12 @@
     : /*%2*/"r" (dest), "m" (*dest)
     : "cc" strasm_nobarrier_clobber_memory);
 
 }
 
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  inc_ptr((volatile intptr_t*)dest);
-}
-
-
-inline void Atomic::dec    (volatile jint*     dest) {
-
+template <>
+inline void GeneralizedAtomic::specialized_dec<int32_t>(volatile int32_t* dest) {
   unsigned int temp;
 
   __asm__ __volatile__ (
     strasm_nobarrier
     "1: lwarx   %0,  0, %2    \n"

@@ -189,12 +163,12 @@
     : /*%2*/"r" (dest), "m" (*dest)
     : "cc" strasm_nobarrier_clobber_memory);
 
 }
 
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-
+template <>
+inline void GeneralizedAtomic::specialized_dec<int64_t>(volatile int64_t* dest) {
   long temp;
 
   __asm__ __volatile__ (
     strasm_nobarrier
     "1: ldarx   %0,  0, %2    \n"

@@ -206,16 +180,12 @@
     : /*%2*/"r" (dest), "m" (*dest)
     : "cc" strasm_nobarrier_clobber_memory);
 
 }
 
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  dec_ptr((volatile intptr_t*)dest);
-}
-
-inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
-
+template <>
+inline int32_t GeneralizedAtomic::specialized_xchg<int32_t>(int32_t exchange_value, volatile int32_t* dest) {
   // Note that xchg_ptr doesn't necessarily do an acquire
   // (see synchronizer.cpp).
 
   unsigned int old_value;
   const uint64_t zero = 0;

@@ -243,15 +213,15 @@
     /* clobber */
     : "cc",
       "memory"
     );
 
-  return (jint) old_value;
+  return (int32_t) old_value;
 }
 
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-
+template <>
+inline int64_t GeneralizedAtomic::specialized_xchg<int64_t>(int64_t exchange_value, volatile int64_t* dest) {
   // Note that xchg_ptr doesn't necessarily do an acquire
   // (see synchronizer.cpp).
 
   long old_value;
   const uint64_t zero = 0;

@@ -279,15 +249,11 @@
     /* clobber */
     : "cc",
       "memory"
     );
 
-  return (intptr_t) old_value;
-}
-
-inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+  return (int64_t) old_value;
 }
 
 inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {
   if (order != memory_order_relaxed) {
     __asm__ __volatile__ (

@@ -305,12 +271,12 @@
       );
   }
 }
 
 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
-
+template <>
+inline int8_t GeneralizedAtomic::specialized_cmpxchg<int8_t>(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) {
   // Note that cmpxchg guarantees a two-way memory barrier across
   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
   // specified otherwise (see atomic.hpp).
 
   // Using 32 bit internally.

@@ -366,15 +332,15 @@
       "memory"
     );
 
   cmpxchg_post_membar(order);
 
-  return (jbyte)(unsigned char)old_value;
+  return (int8_t)(unsigned char)old_value;
 }
 
-inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
-
+template <>
+inline int32_t GeneralizedAtomic::specialized_cmpxchg<int32_t>(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) {
   // Note that cmpxchg guarantees a two-way memory barrier across
   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
   // specified otherwise (see atomic.hpp).
 
   unsigned int old_value;

@@ -410,15 +376,15 @@
       "memory"
     );
 
   cmpxchg_post_membar(order);
 
-  return (jint) old_value;
+  return (int32_t) old_value;
 }
 
-inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
-
+template <>
+inline int64_t GeneralizedAtomic::specialized_cmpxchg<int64_t>(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) {
   // Note that cmpxchg guarantees a two-way memory barrier across
   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
   // specified otherwise (see atomic.hpp).
 
   long old_value;

@@ -454,19 +420,11 @@
       "memory"
     );
 
   cmpxchg_post_membar(order);
 
-  return (jlong) old_value;
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+  return (int64_t) old_value;
 }
 
 #undef strasm_sync
 #undef strasm_lwsync
 #undef strasm_isync
< prev index next >