< prev index next >

src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp

Print this page

        

@@ -26,21 +26,27 @@
 #define OS_CPU_LINUX_X86_ATOMIC_LINUX_X86_HPP
 
 // Implementation of class atomic
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformFetchAndAdd {
   template<typename D, typename I>
-  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const;
+  D operator()(D volatile* dest, I add_value, atomic_memory_order order) const;
+};
+
+template<size_t byte_size>
+struct Atomic::PlatformAddAndFetch {
+  template<typename D, typename I>
+  D operator()(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return Atomic::PlatformFetchAndAdd<byte_size>()(dest, add_value, order) + add_value;
+  }
 };
 
 template<>
 template<typename D, typename I>
-inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value,
-                                               atomic_memory_order order) const {
+inline D Atomic::PlatformAddAndFetch<4>::operator()(D volatile* dest, I add_value,
+                                                    atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
   D old_value;
   __asm__ volatile (  "lock xaddl %0,(%2)"
                     : "=r" (old_value)

@@ -92,12 +98,12 @@
 
 #ifdef AMD64
 
 template<>
 template<typename D, typename I>
-inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value,
-                                               atomic_memory_order order) const {
+inline D Atomic::PlatformFetchAndAdd<8>::operator()(D volatile* dest, I add_value,
+                                                    atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
   D old_value;
   __asm__ __volatile__ ("lock xaddq %0,(%2)"
                         : "=r" (old_value)
< prev index next >