< prev index next >

src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp

Print this page
rev 49898 : 8202080: Introduce ordering semantics for Atomic::add and other RMW atomics
Reviewed-by:

@@ -1,7 +1,7 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
  * published by the Free Software Foundation.

@@ -45,31 +45,34 @@
 template<size_t byte_size>
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
   template<typename I, typename D>
-  D add_and_fetch(I add_value, D volatile* dest) const;
+  D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
 };
 
 #ifdef AMD64
 template<>
 template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+                                               atomic_memory_order order) const {
   return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
 }
 
 template<>
 template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
+                                               atomic_memory_order order) const {
   return add_using_helper<int64_t>(os::atomic_add_long_func, add_value, dest);
 }
 
 #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName)                  \
   template<>                                                            \
   template<typename T>                                                  \
   inline T Atomic::PlatformXchg<ByteSize>::operator()(T exchange_value, \
-                                                      T volatile* dest) const { \
+                                                      T volatile* dest, \
+                                                      atomic_memory_order order) const { \
     STATIC_ASSERT(ByteSize == sizeof(T));                               \
     return xchg_using_helper<StubType>(StubName, exchange_value, dest); \
   }
 
 DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func)

@@ -81,11 +84,11 @@
   template<>                                                            \
   template<typename T>                                                  \
   inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \
                                                          T volatile* dest, \
                                                          T compare_value, \
-                                                         cmpxchg_memory_order order) const { \
+                                                         atomic_memory_order order) const { \
     STATIC_ASSERT(ByteSize == sizeof(T));                               \
     return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
   }
 
 DEFINE_STUB_CMPXCHG(1, int8_t,  os::atomic_cmpxchg_byte_func)

@@ -96,11 +99,12 @@
 
 #else // !AMD64
 
 template<>
 template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+                                               atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
   __asm {
     mov edx, dest;
     mov eax, add_value;

@@ -111,11 +115,12 @@
 }
 
 template<>
 template<typename T>
 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
-                                             T volatile* dest) const {
+                                             T volatile* dest,
+                                             atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(T));
   // alternative for InterlockedExchange
   __asm {
     mov eax, exchange_value;
     mov ecx, dest;

@@ -126,11 +131,11 @@
 template<>
 template<typename T>
 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
                                                 T volatile* dest,
                                                 T compare_value,
-                                                cmpxchg_memory_order order) const {
+                                                atomic_memory_order order) const {
   STATIC_ASSERT(1 == sizeof(T));
   // alternative for InterlockedCompareExchange
   __asm {
     mov edx, dest
     mov cl, exchange_value

@@ -142,11 +147,11 @@
 template<>
 template<typename T>
 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
                                                 T volatile* dest,
                                                 T compare_value,
-                                                cmpxchg_memory_order order) const {
+                                                atomic_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(T));
   // alternative for InterlockedCompareExchange
   __asm {
     mov edx, dest
     mov ecx, exchange_value

@@ -158,11 +163,11 @@
 template<>
 template<typename T>
 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
                                                 T volatile* dest,
                                                 T compare_value,
-                                                cmpxchg_memory_order order) const {
+                                                atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(T));
   int32_t ex_lo  = (int32_t)exchange_value;
   int32_t ex_hi  = *( ((int32_t*)&exchange_value) + 1 );
   int32_t cmp_lo = (int32_t)compare_value;
   int32_t cmp_hi = *( ((int32_t*)&compare_value) + 1 );
< prev index next >