< prev index next >

src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp

Print this page
rev 49898 : 8202080: Introduce ordering semantics for Atomic::add and other RMW atomics
Reviewed-by:
   1 /*
   2  * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
  26 #define OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
  27 
  28 // Implementation of class atomic
  29 
  30 template<size_t byte_size>
  31 struct Atomic::PlatformAdd
  32   : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
  33 {
  34   template<typename I, typename D>
  35   D fetch_and_add(I add_value, D volatile* dest) const;
  36 };
  37 
  38 template<>
  39 template<typename I, typename D>
  40 inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const {

  41   STATIC_ASSERT(4 == sizeof(I));
  42   STATIC_ASSERT(4 == sizeof(D));
  43   D old_value;
  44   __asm__ volatile (  "lock xaddl %0,(%2)"
  45                     : "=r" (old_value)
  46                     : "0" (add_value), "r" (dest)
  47                     : "cc", "memory");
  48   return old_value;
  49 }
  50 
  51 template<>
  52 template<typename T>
  53 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
  54                                              T volatile* dest) const {

  55   STATIC_ASSERT(4 == sizeof(T));
  56   __asm__ volatile (  "xchgl (%2),%0"
  57                     : "=r" (exchange_value)
  58                     : "0" (exchange_value), "r" (dest)
  59                     : "memory");
  60   return exchange_value;
  61 }
  62 
  63 template<>
  64 template<typename T>
  65 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
  66                                                 T volatile* dest,
  67                                                 T compare_value,
  68                                                 cmpxchg_memory_order /* order */) const {
  69   STATIC_ASSERT(1 == sizeof(T));
  70   __asm__ volatile (  "lock cmpxchgb %1,(%3)"
  71                     : "=a" (exchange_value)
  72                     : "q" (exchange_value), "a" (compare_value), "r" (dest)
  73                     : "cc", "memory");
  74   return exchange_value;
  75 }
  76 
  77 template<>
  78 template<typename T>
  79 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
  80                                                 T volatile* dest,
  81                                                 T compare_value,
  82                                                 cmpxchg_memory_order /* order */) const {
  83   STATIC_ASSERT(4 == sizeof(T));
  84   __asm__ volatile (  "lock cmpxchgl %1,(%3)"
  85                     : "=a" (exchange_value)
  86                     : "r" (exchange_value), "a" (compare_value), "r" (dest)
  87                     : "cc", "memory");
  88   return exchange_value;
  89 }
  90 
  91 #ifdef AMD64
  92 template<>
  93 template<typename I, typename D>
  94 inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const {

  95   STATIC_ASSERT(8 == sizeof(I));
  96   STATIC_ASSERT(8 == sizeof(D));
  97   D old_value;
  98   __asm__ __volatile__ (  "lock xaddq %0,(%2)"
  99                         : "=r" (old_value)
 100                         : "0" (add_value), "r" (dest)
 101                         : "cc", "memory");
 102   return old_value;
 103 }
 104 
 105 template<>
 106 template<typename T>
 107 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
 108                                              T volatile* dest) const {

 109   STATIC_ASSERT(8 == sizeof(T));
 110   __asm__ __volatile__ ("xchgq (%2),%0"
 111                         : "=r" (exchange_value)
 112                         : "0" (exchange_value), "r" (dest)
 113                         : "memory");
 114   return exchange_value;
 115 }
 116 
 117 template<>
 118 template<typename T>
 119 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 120                                                 T volatile* dest,
 121                                                 T compare_value,
 122                                                 cmpxchg_memory_order /* order */) const {
 123   STATIC_ASSERT(8 == sizeof(T));
 124   __asm__ __volatile__ (  "lock cmpxchgq %1,(%3)"
 125                         : "=a" (exchange_value)
 126                         : "r" (exchange_value), "a" (compare_value), "r" (dest)
 127                         : "cc", "memory");
 128   return exchange_value;
 129 }
 130 
 131 #else // !AMD64
 132 
 133 extern "C" {
 134   // defined in bsd_x86.s
 135   int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t, bool);
 136   void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
 137 }
 138 
 139 template<>
 140 template<typename T>
 141 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 142                                                 T volatile* dest,
 143                                                 T compare_value,
 144                                                 cmpxchg_memory_order order) const {
 145   STATIC_ASSERT(8 == sizeof(T));
 146   return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
 147 }
 148 
 149 template<>
 150 template<typename T>
 151 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
 152   STATIC_ASSERT(8 == sizeof(T));
 153   volatile int64_t dest;
 154   _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
 155   return PrimitiveConversions::cast<T>(dest);
 156 }
 157 
 158 template<>
 159 template<typename T>
 160 inline void Atomic::PlatformStore<8>::operator()(T store_value,
 161                                                  T volatile* dest) const {
 162   STATIC_ASSERT(8 == sizeof(T));
 163   _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
 164 }
   1 /*
   2  * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
  26 #define OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
  27 
  28 // Implementation of class atomic
  29 
  30 template<size_t byte_size>
  31 struct Atomic::PlatformAdd
  32   : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
  33 {
  34   template<typename I, typename D>
  35   D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order /* order */) const;
  36 };
  37 
  38 template<>
  39 template<typename I, typename D>
  40 inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest,
  41                                                atomic_memory_order /* order */) const {
  42   STATIC_ASSERT(4 == sizeof(I));
  43   STATIC_ASSERT(4 == sizeof(D));
  44   D old_value;
  45   __asm__ volatile (  "lock xaddl %0,(%2)"
  46                     : "=r" (old_value)
  47                     : "0" (add_value), "r" (dest)
  48                     : "cc", "memory");
  49   return old_value;
  50 }
  51 
  52 template<>
  53 template<typename T>
  54 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
  55                                              T volatile* dest,
  56                                              atomic_memory_order /* order */) const {
  57   STATIC_ASSERT(4 == sizeof(T));
  58   __asm__ volatile (  "xchgl (%2),%0"
  59                     : "=r" (exchange_value)
  60                     : "0" (exchange_value), "r" (dest)
  61                     : "memory");
  62   return exchange_value;
  63 }
  64 
  65 template<>
  66 template<typename T>
  67 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
  68                                                 T volatile* dest,
  69                                                 T compare_value,
  70                                                 atomic_memory_order /* order */) const {
  71   STATIC_ASSERT(1 == sizeof(T));
  72   __asm__ volatile (  "lock cmpxchgb %1,(%3)"
  73                     : "=a" (exchange_value)
  74                     : "q" (exchange_value), "a" (compare_value), "r" (dest)
  75                     : "cc", "memory");
  76   return exchange_value;
  77 }
  78 
  79 template<>
  80 template<typename T>
  81 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
  82                                                 T volatile* dest,
  83                                                 T compare_value,
  84                                                 atomic_memory_order /* order */) const {
  85   STATIC_ASSERT(4 == sizeof(T));
  86   __asm__ volatile (  "lock cmpxchgl %1,(%3)"
  87                     : "=a" (exchange_value)
  88                     : "r" (exchange_value), "a" (compare_value), "r" (dest)
  89                     : "cc", "memory");
  90   return exchange_value;
  91 }
  92 
  93 #ifdef AMD64
  94 template<>
  95 template<typename I, typename D>
  96 inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest,
  97                                                atomic_memory_order /* order */) const {
  98   STATIC_ASSERT(8 == sizeof(I));
  99   STATIC_ASSERT(8 == sizeof(D));
 100   D old_value;
 101   __asm__ __volatile__ (  "lock xaddq %0,(%2)"
 102                         : "=r" (old_value)
 103                         : "0" (add_value), "r" (dest)
 104                         : "cc", "memory");
 105   return old_value;
 106 }
 107 
 108 template<>
 109 template<typename T>
 110 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
 111                                              T volatile* dest,
 112                                              atomic_memory_order /* order */) const {
 113   STATIC_ASSERT(8 == sizeof(T));
 114   __asm__ __volatile__ ("xchgq (%2),%0"
 115                         : "=r" (exchange_value)
 116                         : "0" (exchange_value), "r" (dest)
 117                         : "memory");
 118   return exchange_value;
 119 }
 120 
 121 template<>
 122 template<typename T>
 123 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 124                                                 T volatile* dest,
 125                                                 T compare_value,
 126                                                 atomic_memory_order /* order */) const {
 127   STATIC_ASSERT(8 == sizeof(T));
 128   __asm__ __volatile__ (  "lock cmpxchgq %1,(%3)"
 129                         : "=a" (exchange_value)
 130                         : "r" (exchange_value), "a" (compare_value), "r" (dest)
 131                         : "cc", "memory");
 132   return exchange_value;
 133 }
 134 
 135 #else // !AMD64
 136 
 137 extern "C" {
 138   // defined in bsd_x86.s
 139   int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t, bool);
 140   void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
 141 }
 142 
 143 template<>
 144 template<typename T>
 145 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 146                                                 T volatile* dest,
 147                                                 T compare_value,
 148                                                 atomic_memory_order /* order */) const {
 149   STATIC_ASSERT(8 == sizeof(T));
 150   return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
 151 }
 152 
 153 template<>
 154 template<typename T>
 155 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
 156   STATIC_ASSERT(8 == sizeof(T));
 157   volatile int64_t dest;
 158   _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
 159   return PrimitiveConversions::cast<T>(dest);
 160 }
 161 
 162 template<>
 163 template<typename T>
 164 inline void Atomic::PlatformStore<8>::operator()(T store_value,
 165                                                  T volatile* dest) const {
 166   STATIC_ASSERT(8 == sizeof(T));
 167   _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
 168 }
< prev index next >