< prev index next >

src/os_cpu/linux_ppc/vm/atomic_linux_ppc.hpp

Print this page
rev 13452 : [mq]: coleen_review1
rev 13453 : [mq]: dholmes_review1

@@ -310,10 +310,12 @@
 template<typename T>
 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
                                                 T volatile* dest,
                                                 T compare_value,
                                                 cmpxchg_memory_order order) const {
+  STATIC_ASSERT(1 == sizeof(T));
+
   // Note that cmpxchg guarantees a two-way memory barrier across
   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
   // specified otherwise (see atomic.hpp).
 
   // Using 32 bit internally.

@@ -378,15 +380,17 @@
 template<typename T>
 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
                                                 T volatile* dest,
                                                 T compare_value,
                                                 cmpxchg_memory_order order) const {
+  STATIC_ASSERT(4 == sizeof(T));
+
   // Note that cmpxchg guarantees a two-way memory barrier across
   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
   // specified otherwise (see atomic.hpp).
 
-  unsigned int old_value;
+  T old_value;
   const uint64_t zero = 0;
 
   cmpxchg_pre_membar(order);
 
   __asm__ __volatile__ (

@@ -417,24 +421,26 @@
       "memory"
     );
 
   cmpxchg_post_membar(order);
 
-  return IntegerTypes::cast<T>(old_value);
+  return old_value;
 }
 
 template<>
 template<typename T>
 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
                                                 T volatile* dest,
                                                 T compare_value,
                                                 cmpxchg_memory_order order) const {
+  STATIC_ASSERT(8 == sizeof(T));
+
   // Note that cmpxchg guarantees a two-way memory barrier across
   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
   // specified otherwise (see atomic.hpp).
 
-  long old_value;
+  T old_value;
   const uint64_t zero = 0;
 
   cmpxchg_pre_membar(order);
 
   __asm__ __volatile__ (

@@ -465,11 +471,11 @@
       "memory"
     );
 
   cmpxchg_post_membar(order);
 
-  return IntegerTypes::cast<T>(old_value);
+  return old_value;
 }
 
 #undef strasm_sync
 #undef strasm_lwsync
 #undef strasm_isync
< prev index next >