< prev index next >

src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp

Print this page




  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_LINUX_X86_ATOMIC_LINUX_X86_HPP
  26 #define OS_CPU_LINUX_X86_ATOMIC_LINUX_X86_HPP
  27 
  28 // Implementation of class atomic
  29 
  30 template<size_t byte_size>
  31 struct Atomic::PlatformAdd
  32   : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
  33 {
  34   template<typename D, typename I>
  35   D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const;








  36 };
  37 
  38 template<>
  39 template<typename D, typename I>
  40 inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value,
  41                                                atomic_memory_order order) const {
  42   STATIC_ASSERT(4 == sizeof(I));
  43   STATIC_ASSERT(4 == sizeof(D));
  44   D old_value;
  45   __asm__ volatile (  "lock xaddl %0,(%2)"
  46                     : "=r" (old_value)
  47                     : "0" (add_value), "r" (dest)
  48                     : "cc", "memory");
  49   return old_value;
  50 }
  51 
  52 template<>
  53 template<typename T>
  54 inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
  55                                              T exchange_value,
  56                                              atomic_memory_order order) const {
  57   STATIC_ASSERT(4 == sizeof(T));
  58   __asm__ volatile (  "xchgl (%2),%0"
  59                     : "=r" (exchange_value)
  60                     : "0" (exchange_value), "r" (dest)
  61                     : "memory");


  77 }
  78 
  79 template<>
  80 template<typename T>
  81 inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
  82                                                 T compare_value,
  83                                                 T exchange_value,
  84                                                 atomic_memory_order /* order */) const {
  85   STATIC_ASSERT(4 == sizeof(T));
  86   __asm__ volatile ("lock cmpxchgl %1,(%3)"
  87                     : "=a" (exchange_value)
  88                     : "r" (exchange_value), "a" (compare_value), "r" (dest)
  89                     : "cc", "memory");
  90   return exchange_value;
  91 }
  92 
  93 #ifdef AMD64
  94 
  95 template<>
  96 template<typename D, typename I>
  97 inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value,
  98                                                atomic_memory_order order) const {
  99   STATIC_ASSERT(8 == sizeof(I));
 100   STATIC_ASSERT(8 == sizeof(D));
 101   D old_value;
 102   __asm__ __volatile__ ("lock xaddq %0,(%2)"
 103                         : "=r" (old_value)
 104                         : "0" (add_value), "r" (dest)
 105                         : "cc", "memory");
 106   return old_value;
 107 }
 108 
 109 template<>
 110 template<typename T>
 111 inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value,
 112                                              atomic_memory_order order) const {
 113   STATIC_ASSERT(8 == sizeof(T));
 114   __asm__ __volatile__ ("xchgq (%2),%0"
 115                         : "=r" (exchange_value)
 116                         : "0" (exchange_value), "r" (dest)
 117                         : "memory");
 118   return exchange_value;




  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_LINUX_X86_ATOMIC_LINUX_X86_HPP
  26 #define OS_CPU_LINUX_X86_ATOMIC_LINUX_X86_HPP
  27 
  28 // Implementation of class atomic
  29 
  30 template<size_t byte_size>
  31 struct Atomic::PlatformFetchAndAdd {


  32   template<typename D, typename I>
  33   D operator()(D volatile* dest, I add_value, atomic_memory_order order) const;
  34 };
  35 
  36 template<size_t byte_size>
  37 struct Atomic::PlatformAddAndFetch {
  38   template<typename D, typename I>
  39   D operator()(D volatile* dest, I add_value, atomic_memory_order order) const {
  40     return Atomic::PlatformFetchAndAdd<byte_size>()(dest, add_value, order) + add_value;
  41   }
  42 };
  43 
  44 template<>
  45 template<typename D, typename I>
  46 inline D Atomic::PlatformAddAndFetch<4>::operator()(D volatile* dest, I add_value,
  47                                                     atomic_memory_order order) const {
  48   STATIC_ASSERT(4 == sizeof(I));
  49   STATIC_ASSERT(4 == sizeof(D));
  50   D old_value;
  51   __asm__ volatile (  "lock xaddl %0,(%2)"
  52                     : "=r" (old_value)
  53                     : "0" (add_value), "r" (dest)
  54                     : "cc", "memory");
  55   return old_value;
  56 }
  57 
  58 template<>
  59 template<typename T>
  60 inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
  61                                              T exchange_value,
  62                                              atomic_memory_order order) const {
  63   STATIC_ASSERT(4 == sizeof(T));
  64   __asm__ volatile (  "xchgl (%2),%0"
  65                     : "=r" (exchange_value)
  66                     : "0" (exchange_value), "r" (dest)
  67                     : "memory");


  83 }
  84 
  85 template<>
  86 template<typename T>
  87 inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
  88                                                 T compare_value,
  89                                                 T exchange_value,
  90                                                 atomic_memory_order /* order */) const {
  91   STATIC_ASSERT(4 == sizeof(T));
  92   __asm__ volatile ("lock cmpxchgl %1,(%3)"
  93                     : "=a" (exchange_value)
  94                     : "r" (exchange_value), "a" (compare_value), "r" (dest)
  95                     : "cc", "memory");
  96   return exchange_value;
  97 }
  98 
  99 #ifdef AMD64
 100 
 101 template<>
 102 template<typename D, typename I>
 103 inline D Atomic::PlatformFetchAndAdd<8>::operator()(D volatile* dest, I add_value,
 104                                                     atomic_memory_order order) const {
 105   STATIC_ASSERT(8 == sizeof(I));
 106   STATIC_ASSERT(8 == sizeof(D));
 107   D old_value;
 108   __asm__ __volatile__ ("lock xaddq %0,(%2)"
 109                         : "=r" (old_value)
 110                         : "0" (add_value), "r" (dest)
 111                         : "cc", "memory");
 112   return old_value;
 113 }
 114 
 115 template<>
 116 template<typename T>
 117 inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value,
 118                                              atomic_memory_order order) const {
 119   STATIC_ASSERT(8 == sizeof(T));
 120   __asm__ __volatile__ ("xchgq (%2),%0"
 121                         : "=r" (exchange_value)
 122                         : "0" (exchange_value), "r" (dest)
 123                         : "memory");
 124   return exchange_value;


< prev index next >