< prev index next >

src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp

Print this page

        

@@ -55,53 +55,53 @@
 
 template<size_t byte_size>
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
-  template<typename I, typename D>
-  D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+  template<typename D, typename I>
+  D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
 };
 
 #ifdef AMD64
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
-  return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
+  return add_using_helper<int32_t>(os::atomic_add_func, dest, add_value);
 }
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
-  return add_using_helper<int64_t>(os::atomic_add_long_func, add_value, dest);
+  return add_using_helper<int64_t>(os::atomic_add_long_func, dest, add_value);
 }
 
 #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName)                  \
   template<>                                                            \
   template<typename T>                                                  \
-  inline T Atomic::PlatformXchg<ByteSize>::operator()(T exchange_value, \
-                                                      T volatile* dest, \
+  inline T Atomic::PlatformXchg<ByteSize>::operator()(T volatile* dest, \
+                                                      T exchange_value, \
                                                       atomic_memory_order order) const { \
     STATIC_ASSERT(ByteSize == sizeof(T));                               \
-    return xchg_using_helper<StubType>(StubName, exchange_value, dest); \
+    return xchg_using_helper<StubType>(StubName, dest, exchange_value); \
   }
 
 DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func)
 DEFINE_STUB_XCHG(8, int64_t, os::atomic_xchg_long_func)
 
 #undef DEFINE_STUB_XCHG
 
 #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName)               \
   template<>                                                            \
   template<typename T>                                                  \
-  inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \
-                                                         T volatile* dest, \
+  inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T volatile* dest, \
                                                          T compare_value, \
+                                                         T exchange_value, \
                                                          atomic_memory_order order) const { \
     STATIC_ASSERT(ByteSize == sizeof(T));                               \
-    return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
+    return cmpxchg_using_helper<StubType>(StubName, dest, compare_value, exchange_value); \
   }
 
 DEFINE_STUB_CMPXCHG(1, int8_t,  os::atomic_cmpxchg_byte_func)
 DEFINE_STUB_CMPXCHG(4, int32_t, os::atomic_cmpxchg_func)
 DEFINE_STUB_CMPXCHG(8, int64_t, os::atomic_cmpxchg_long_func)

@@ -109,12 +109,12 @@
 #undef DEFINE_STUB_CMPXCHG
 
 #else // !AMD64
 
 template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
   __asm {
     mov edx, dest;

@@ -125,12 +125,12 @@
   }
 }
 
 template<>
 template<typename T>
-inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
-                                             T volatile* dest,
+inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
+                                             T exchange_value,
                                              atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(T));
   // alternative for InterlockedExchange
   __asm {
     mov eax, exchange_value;

@@ -139,13 +139,13 @@
   }
 }
 
 template<>
 template<typename T>
-inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
-                                                T volatile* dest,
+inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
                                                 T compare_value,
+                                                T exchange_value,
                                                 atomic_memory_order order) const {
   STATIC_ASSERT(1 == sizeof(T));
   // alternative for InterlockedCompareExchange
   __asm {
     mov edx, dest

@@ -155,13 +155,13 @@
   }
 }
 
 template<>
 template<typename T>
-inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
-                                                T volatile* dest,
+inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
                                                 T compare_value,
+                                                T exchange_value,
                                                 atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(T));
   // alternative for InterlockedCompareExchange
   __asm {
     mov edx, dest

@@ -171,13 +171,13 @@
   }
 }
 
 template<>
 template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
-                                                T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
                                                 T compare_value,
+                                                T exchange_value,
                                                 atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(T));
   int32_t ex_lo  = (int32_t)exchange_value;
   int32_t ex_hi  = *( ((int32_t*)&exchange_value) + 1 );
   int32_t cmp_lo = (int32_t)compare_value;

@@ -211,12 +211,12 @@
   return dest;
 }
 
 template<>
 template<typename T>
-inline void Atomic::PlatformStore<8>::operator()(T store_value,
-                                                 T volatile* dest) const {
+inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
+                                                 T store_value) const {
   STATIC_ASSERT(8 == sizeof(T));
   volatile T* src = &store_value;
   __asm {
     mov eax, src
     fild     qword ptr [eax]

@@ -232,11 +232,11 @@
 #ifndef AMD64
 template<>
 struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
 {
   template <typename T>
-  void operator()(T v, volatile T* p) const {
+  void operator()(volatile T* p, T v) const {
     __asm {
       mov edx, p;
       mov al, v;
       xchg al, byte ptr [edx];
     }

@@ -245,11 +245,11 @@
 
 template<>
 struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
 {
   template <typename T>
-  void operator()(T v, volatile T* p) const {
+  void operator()(volatile T* p, T v) const {
     __asm {
       mov edx, p;
       mov ax, v;
       xchg ax, word ptr [edx];
     }

@@ -258,11 +258,11 @@
 
 template<>
 struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
 {
   template <typename T>
-  void operator()(T v, volatile T* p) const {
+  void operator()(volatile T* p, T v) const {
     __asm {
       mov edx, p;
       mov eax, v;
       xchg eax, dword ptr [edx];
     }
< prev index next >