< prev index next >

src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp

Print this page
rev 49986 : 8202080: Introduce ordering semantics for Atomic::add and other RMW atomics
Reviewed-by: lucy, rehn, dholmes

@@ -1,7 +1,7 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
  * published by the Free Software Foundation.

@@ -30,16 +30,17 @@
 template<size_t byte_size>
 struct Atomic::PlatformAdd
   : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
 {
   template<typename I, typename D>
-  D fetch_and_add(I add_value, D volatile* dest) const;
+  D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order /* order */) const;
 };
 
 template<>
 template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const {
+inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest,
+                                               atomic_memory_order /* order */) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
   D old_value;
   __asm__ volatile (  "lock xaddl %0,(%2)"
                     : "=r" (old_value)

@@ -49,11 +50,12 @@
 }
 
 template<>
 template<typename T>
 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
-                                             T volatile* dest) const {
+                                             T volatile* dest,
+                                             atomic_memory_order /* order */) const {
   STATIC_ASSERT(4 == sizeof(T));
   __asm__ volatile (  "xchgl (%2),%0"
                     : "=r" (exchange_value)
                     : "0" (exchange_value), "r" (dest)
                     : "memory");

@@ -63,11 +65,11 @@
 template<>
 template<typename T>
 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
                                                 T volatile* dest,
                                                 T compare_value,
-                                                cmpxchg_memory_order /* order */) const {
+                                                atomic_memory_order /* order */) const {
   STATIC_ASSERT(1 == sizeof(T));
   __asm__ volatile (  "lock cmpxchgb %1,(%3)"
                     : "=a" (exchange_value)
                     : "q" (exchange_value), "a" (compare_value), "r" (dest)
                     : "cc", "memory");

@@ -77,11 +79,11 @@
 template<>
 template<typename T>
 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
                                                 T volatile* dest,
                                                 T compare_value,
-                                                cmpxchg_memory_order /* order */) const {
+                                                atomic_memory_order /* order */) const {
   STATIC_ASSERT(4 == sizeof(T));
   __asm__ volatile (  "lock cmpxchgl %1,(%3)"
                     : "=a" (exchange_value)
                     : "r" (exchange_value), "a" (compare_value), "r" (dest)
                     : "cc", "memory");

@@ -89,11 +91,12 @@
 }
 
 #ifdef AMD64
 template<>
 template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const {
+inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest,
+                                               atomic_memory_order /* order */) const {
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
   D old_value;
   __asm__ __volatile__ (  "lock xaddq %0,(%2)"
                         : "=r" (old_value)

@@ -103,11 +106,12 @@
 }
 
 template<>
 template<typename T>
 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
-                                             T volatile* dest) const {
+                                             T volatile* dest,
+                                             atomic_memory_order /* order */) const {
   STATIC_ASSERT(8 == sizeof(T));
   __asm__ __volatile__ ("xchgq (%2),%0"
                         : "=r" (exchange_value)
                         : "0" (exchange_value), "r" (dest)
                         : "memory");

@@ -117,11 +121,11 @@
 template<>
 template<typename T>
 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
                                                 T volatile* dest,
                                                 T compare_value,
-                                                cmpxchg_memory_order /* order */) const {
+                                                atomic_memory_order /* order */) const {
   STATIC_ASSERT(8 == sizeof(T));
   __asm__ __volatile__ (  "lock cmpxchgq %1,(%3)"
                         : "=a" (exchange_value)
                         : "r" (exchange_value), "a" (compare_value), "r" (dest)
                         : "cc", "memory");

@@ -139,11 +143,11 @@
 template<>
 template<typename T>
 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
                                                 T volatile* dest,
                                                 T compare_value,
-                                                cmpxchg_memory_order order) const {
+                                                atomic_memory_order /* order */) const {
   STATIC_ASSERT(8 == sizeof(T));
   return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
 }
 
 template<>
< prev index next >