< prev index next >

src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp

Print this page




  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_BSD_X86_ATOMIC_BSD_X86_HPP
  26 #define OS_CPU_BSD_X86_ATOMIC_BSD_X86_HPP
  27 
  28 // Implementation of class atomic
  29 
  30 template<size_t byte_size>
  31 struct Atomic::PlatformAdd
  32   : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
  33 {
  34   template<typename I, typename D>
  35   D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order /* order */) const;
  36 };
  37 
  38 template<>
  39 template<typename I, typename D>
  40 inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest,
  41                                                atomic_memory_order /* order */) const {
  42   STATIC_ASSERT(4 == sizeof(I));
  43   STATIC_ASSERT(4 == sizeof(D));
  44   D old_value;
  45   __asm__ volatile (  "lock xaddl %0,(%2)"
  46                     : "=r" (old_value)
  47                     : "0" (add_value), "r" (dest)
  48                     : "cc", "memory");
  49   return old_value;
  50 }
  51 
  52 template<>
  53 template<typename T>
  54 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
  55                                              T volatile* dest,
  56                                              atomic_memory_order /* order */) const {
  57   STATIC_ASSERT(4 == sizeof(T));
  58   __asm__ volatile (  "xchgl (%2),%0"
  59                     : "=r" (exchange_value)
  60                     : "0" (exchange_value), "r" (dest)
  61                     : "memory");
  62   return exchange_value;
  63 }
  64 
  65 template<>
  66 template<typename T>
  67 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
  68                                                 T volatile* dest,
  69                                                 T compare_value,

  70                                                 atomic_memory_order /* order */) const {
  71   STATIC_ASSERT(1 == sizeof(T));
  72   __asm__ volatile (  "lock cmpxchgb %1,(%3)"
  73                     : "=a" (exchange_value)
  74                     : "q" (exchange_value), "a" (compare_value), "r" (dest)
  75                     : "cc", "memory");
  76   return exchange_value;
  77 }
  78 
  79 template<>
  80 template<typename T>
  81 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
  82                                                 T volatile* dest,
  83                                                 T compare_value,

  84                                                 atomic_memory_order /* order */) const {
  85   STATIC_ASSERT(4 == sizeof(T));
  86   __asm__ volatile (  "lock cmpxchgl %1,(%3)"
  87                     : "=a" (exchange_value)
  88                     : "r" (exchange_value), "a" (compare_value), "r" (dest)
  89                     : "cc", "memory");
  90   return exchange_value;
  91 }
  92 
  93 #ifdef AMD64
  94 template<>
  95 template<typename I, typename D>
  96 inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest,
  97                                                atomic_memory_order /* order */) const {
  98   STATIC_ASSERT(8 == sizeof(I));
  99   STATIC_ASSERT(8 == sizeof(D));
 100   D old_value;
 101   __asm__ __volatile__ (  "lock xaddq %0,(%2)"
 102                         : "=r" (old_value)
 103                         : "0" (add_value), "r" (dest)
 104                         : "cc", "memory");
 105   return old_value;
 106 }
 107 
 108 template<>
 109 template<typename T>
 110 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
 111                                              T volatile* dest,
 112                                              atomic_memory_order /* order */) const {
 113   STATIC_ASSERT(8 == sizeof(T));
 114   __asm__ __volatile__ ("xchgq (%2),%0"
 115                         : "=r" (exchange_value)
 116                         : "0" (exchange_value), "r" (dest)
 117                         : "memory");
 118   return exchange_value;
 119 }
 120 
 121 template<>
 122 template<typename T>
 123 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 124                                                 T volatile* dest,
 125                                                 T compare_value,

 126                                                 atomic_memory_order /* order */) const {
 127   STATIC_ASSERT(8 == sizeof(T));
 128   __asm__ __volatile__ (  "lock cmpxchgq %1,(%3)"
 129                         : "=a" (exchange_value)
 130                         : "r" (exchange_value), "a" (compare_value), "r" (dest)
 131                         : "cc", "memory");
 132   return exchange_value;
 133 }
 134 
 135 #else // !AMD64
 136 
 137 extern "C" {
 138   // defined in bsd_x86.s
 139   int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t);
 140   void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
 141 }
 142 
 143 template<>
 144 template<typename T>
 145 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 146                                                 T volatile* dest,
 147                                                 T compare_value,

 148                                                 atomic_memory_order /* order */) const {
 149   STATIC_ASSERT(8 == sizeof(T));
 150   return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
 151 }
 152 
 153 template<>
 154 template<typename T>
 155 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
 156   STATIC_ASSERT(8 == sizeof(T));
 157   volatile int64_t dest;
 158   _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
 159   return PrimitiveConversions::cast<T>(dest);
 160 }
 161 
 162 template<>
 163 template<typename T>
 164 inline void Atomic::PlatformStore<8>::operator()(T store_value,
 165                                                  T volatile* dest) const {
 166   STATIC_ASSERT(8 == sizeof(T));
 167   _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
 168 }
 169 
 170 #endif // AMD64
 171 
 172 template<>
 173 struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
 174 {
 175   template <typename T>
 176   void operator()(T v, volatile T* p) const {
 177     __asm__ volatile (  "xchgb (%2),%0"
 178                       : "=q" (v)
 179                       : "0" (v), "r" (p)
 180                       : "memory");
 181   }
 182 };
 183 
 184 template<>
 185 struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
 186 {
 187   template <typename T>
 188   void operator()(T v, volatile T* p) const {
 189     __asm__ volatile (  "xchgw (%2),%0"
 190                       : "=r" (v)
 191                       : "0" (v), "r" (p)
 192                       : "memory");
 193   }
 194 };
 195 
 196 template<>
 197 struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
 198 {
 199   template <typename T>
 200   void operator()(T v, volatile T* p) const {
 201     __asm__ volatile (  "xchgl (%2),%0"
 202                       : "=r" (v)
 203                       : "0" (v), "r" (p)
 204                       : "memory");
 205   }
 206 };
 207 
 208 #ifdef AMD64
 209 template<>
 210 struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE>
 211 {
 212   template <typename T>
 213   void operator()(T v, volatile T* p) const {
 214     __asm__ volatile (  "xchgq (%2), %0"
 215                       : "=r" (v)
 216                       : "0" (v), "r" (p)
 217                       : "memory");
 218   }
 219 };
 220 #endif // AMD64
 221 
 222 #endif // OS_CPU_BSD_X86_ATOMIC_BSD_X86_HPP


  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_BSD_X86_ATOMIC_BSD_X86_HPP
  26 #define OS_CPU_BSD_X86_ATOMIC_BSD_X86_HPP
  27 
  28 // Implementation of class atomic
  29 
  30 template<size_t byte_size>
  31 struct Atomic::PlatformAdd
  32   : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
  33 {
  34   template<typename D, typename I>
  35   D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const;
  36 };
  37 
  38 template<>
  39 template<typename D, typename I>
  40 inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value,
  41                                                atomic_memory_order /* order */) const {
  42   STATIC_ASSERT(4 == sizeof(I));
  43   STATIC_ASSERT(4 == sizeof(D));
  44   D old_value;
  45   __asm__ volatile (  "lock xaddl %0,(%2)"
  46                     : "=r" (old_value)
  47                     : "0" (add_value), "r" (dest)
  48                     : "cc", "memory");
  49   return old_value;
  50 }
  51 
  52 template<>
  53 template<typename T>
  54 inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
  55                                              T exchange_value,
  56                                              atomic_memory_order /* order */) const {
  57   STATIC_ASSERT(4 == sizeof(T));
  58   __asm__ volatile (  "xchgl (%2),%0"
  59                     : "=r" (exchange_value)
  60                     : "0" (exchange_value), "r" (dest)
  61                     : "memory");
  62   return exchange_value;
  63 }
  64 
  65 template<>
  66 template<typename T>
  67 inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,

  68                                                 T compare_value,
  69                                                 T exchange_value,
  70                                                 atomic_memory_order /* order */) const {
  71   STATIC_ASSERT(1 == sizeof(T));
  72   __asm__ volatile (  "lock cmpxchgb %1,(%3)"
  73                     : "=a" (exchange_value)
  74                     : "q" (exchange_value), "a" (compare_value), "r" (dest)
  75                     : "cc", "memory");
  76   return exchange_value;
  77 }
  78 
  79 template<>
  80 template<typename T>
  81 inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,

  82                                                 T compare_value,
  83                                                 T exchange_value,
  84                                                 atomic_memory_order /* order */) const {
  85   STATIC_ASSERT(4 == sizeof(T));
  86   __asm__ volatile (  "lock cmpxchgl %1,(%3)"
  87                     : "=a" (exchange_value)
  88                     : "r" (exchange_value), "a" (compare_value), "r" (dest)
  89                     : "cc", "memory");
  90   return exchange_value;
  91 }
  92 
  93 #ifdef AMD64
  94 template<>
  95 template<typename D, typename I>
  96 inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value,
  97                                                atomic_memory_order /* order */) const {
  98   STATIC_ASSERT(8 == sizeof(I));
  99   STATIC_ASSERT(8 == sizeof(D));
 100   D old_value;
 101   __asm__ __volatile__ (  "lock xaddq %0,(%2)"
 102                         : "=r" (old_value)
 103                         : "0" (add_value), "r" (dest)
 104                         : "cc", "memory");
 105   return old_value;
 106 }
 107 
 108 template<>
 109 template<typename T>
 110 inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
 111                                              T exchange_value,
 112                                              atomic_memory_order /* order */) const {
 113   STATIC_ASSERT(8 == sizeof(T));
 114   __asm__ __volatile__ ("xchgq (%2),%0"
 115                         : "=r" (exchange_value)
 116                         : "0" (exchange_value), "r" (dest)
 117                         : "memory");
 118   return exchange_value;
 119 }
 120 
 121 template<>
 122 template<typename T>
 123 inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,

 124                                                 T compare_value,
 125                                                 T exchange_value,
 126                                                 atomic_memory_order /* order */) const {
 127   STATIC_ASSERT(8 == sizeof(T));
 128   __asm__ __volatile__ (  "lock cmpxchgq %1,(%3)"
 129                         : "=a" (exchange_value)
 130                         : "r" (exchange_value), "a" (compare_value), "r" (dest)
 131                         : "cc", "memory");
 132   return exchange_value;
 133 }
 134 
 135 #else // !AMD64
 136 
 137 extern "C" {
 138   // defined in bsd_x86.s
 139   int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t);
 140   void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
 141 }
 142 
 143 template<>
 144 template<typename T>
 145 inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,

 146                                                 T compare_value,
 147                                                 T exchange_value,
 148                                                 atomic_memory_order /* order */) const {
 149   STATIC_ASSERT(8 == sizeof(T));
 150   return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, dest, compare_value, exchange_value);
 151 }
 152 
 153 template<>
 154 template<typename T>
 155 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
 156   STATIC_ASSERT(8 == sizeof(T));
 157   volatile int64_t dest;
 158   _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
 159   return PrimitiveConversions::cast<T>(dest);
 160 }
 161 
 162 template<>
 163 template<typename T>
 164 inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
 165                                                  T store_value) const {
 166   STATIC_ASSERT(8 == sizeof(T));
 167   _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
 168 }
 169 
 170 #endif // AMD64
 171 
 172 template<>
 173 struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
 174 {
 175   template <typename T>
 176   void operator()(volatile T* p, T v) const {
 177     __asm__ volatile (  "xchgb (%2),%0"
 178                       : "=q" (v)
 179                       : "0" (v), "r" (p)
 180                       : "memory");
 181   }
 182 };
 183 
 184 template<>
 185 struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
 186 {
 187   template <typename T>
 188   void operator()(volatile T* p, T v) const {
 189     __asm__ volatile (  "xchgw (%2),%0"
 190                       : "=r" (v)
 191                       : "0" (v), "r" (p)
 192                       : "memory");
 193   }
 194 };
 195 
 196 template<>
 197 struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
 198 {
 199   template <typename T>
 200   void operator()(volatile T* p, T v) const {
 201     __asm__ volatile (  "xchgl (%2),%0"
 202                       : "=r" (v)
 203                       : "0" (v), "r" (p)
 204                       : "memory");
 205   }
 206 };
 207 
 208 #ifdef AMD64
 209 template<>
 210 struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE>
 211 {
 212   template <typename T>
 213   void operator()(volatile T* p, T v) const {
 214     __asm__ volatile (  "xchgq (%2), %0"
 215                       : "=r" (v)
 216                       : "0" (v), "r" (p)
 217                       : "memory");
 218   }
 219 };
 220 #endif // AMD64
 221 
 222 #endif // OS_CPU_BSD_X86_ATOMIC_BSD_X86_HPP
< prev index next >