< prev index next >

src/os_cpu/bsd_x86/vm/atomic_bsd_x86.hpp

Print this page
rev 13452 : imported patch Atomic_cmpxchg
rev 13453 : imported patch Atomic_add
rev 13454 : [mq]: Atomic_add_v2

@@ -38,19 +38,34 @@
 inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
 inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
 inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
 
 
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  jint addend = add_value;
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D fetch_and_add(I add_value, D volatile* dest) const;
+};
+
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(I));
+  STATIC_ASSERT(4 == sizeof(D));
+  D old_value;
   __asm__ volatile (  "lock xaddl %0,(%2)"
-                    : "=r" (addend)
-                    : "0" (addend), "r" (dest)
+                    : "=r" (old_value)
+                    : "0" (add_value), "r" (dest)
                     : "cc", "memory");
-  return addend + add_value;
+  return old_value;
 }
 
+template<>
+struct Atomic::PlatformAdd<2>: Atomic::AddShortUsingInt {};
+
 inline void Atomic::inc    (volatile jint*     dest) {
   __asm__ volatile (  "lock addl $1,(%0)" :
                     : "r" (dest) : "cc", "memory");
 }
 

@@ -109,21 +124,21 @@
 
 #ifdef AMD64
 inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
 inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  intptr_t addend = add_value;
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(I));
+  STATIC_ASSERT(8 == sizeof(D));
+  D old_value;
   __asm__ __volatile__ (  "lock xaddq %0,(%2)"
-                        : "=r" (addend)
-                        : "0" (addend), "r" (dest)
+                        : "=r" (old_value)
+                        : "0" (add_value), "r" (dest)
                         : "cc", "memory");
-  return addend + add_value;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+  return old_value;
 }
 
 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
   __asm__ __volatile__ (  "lock addq $1,(%0)"
                         :

@@ -162,19 +177,10 @@
 
 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
 
 #else // !AMD64
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
-}
-
-
 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
   inc((volatile jint*)dest);
 }
 
 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
< prev index next >