< prev index next >

src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp

Print this page
rev 13444 : imported patch aix_ppc
rev 13452 : [mq]: coleen_review1
rev 13453 : [mq]: dholmes_review1

@@ -304,12 +304,17 @@
       strasm_sync
       );
   }
 }
 
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_CAST(1 == sizeof(T));
 
   // Note that cmpxchg guarantees a two-way memory barrier across
   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
   // specified otherwise (see atomic.hpp).
 

@@ -366,20 +371,26 @@
       "memory"
     );
 
   cmpxchg_post_membar(order);
 
-  return (jbyte)(unsigned char)old_value;
+  return IntegerTypes::cast<T>((unsigned char)old_value);
 }
 
-inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_CAST(4 == sizeof(T));
 
   // Note that cmpxchg guarantees a two-way memory barrier across
   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
   // specified otherwise (see atomic.hpp).
 
-  unsigned int old_value;
+  T old_value;
   const uint64_t zero = 0;
 
   cmpxchg_pre_membar(order);
 
   __asm__ __volatile__ (

@@ -410,20 +421,26 @@
       "memory"
     );
 
   cmpxchg_post_membar(order);
 
-  return (jint) old_value;
+  return old_value;
 }
 
-inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
+template<>
+template<typename T>
+inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
+                                                T volatile* dest,
+                                                T compare_value,
+                                                cmpxchg_memory_order order) const {
+  STATIC_CAST(8 == sizeof(T));
 
   // Note that cmpxchg guarantees a two-way memory barrier across
   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
   // specified otherwise (see atomic.hpp).
 
-  long old_value;
+  T old_value;
   const uint64_t zero = 0;
 
   cmpxchg_pre_membar(order);
 
   __asm__ __volatile__ (

@@ -454,19 +471,11 @@
       "memory"
     );
 
   cmpxchg_post_membar(order);
 
-  return (jlong) old_value;
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+  return old_value;
 }
 
 #undef strasm_sync
 #undef strasm_lwsync
 #undef strasm_isync
< prev index next >