< prev index next >

src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp

Print this page




  34   template<typename D, typename I>
  35   D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const;
  36 };
  37 
  38 template<>
  39 template<typename D, typename I>
  40 inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value,
  41                                                atomic_memory_order /* order */) const {
  42   STATIC_ASSERT(4 == sizeof(I));
  43   STATIC_ASSERT(4 == sizeof(D));
  44   D old_value;
  45   __asm__ volatile (  "lock xaddl %0,(%2)"
  46                     : "=r" (old_value)
  47                     : "0" (add_value), "r" (dest)
  48                     : "cc", "memory");
  49   return old_value;
  50 }
  51 
  52 template<>
  53 template<typename T>
  54 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
  55                                              T volatile* dest,
  56                                              atomic_memory_order /* order */) const {
  57   STATIC_ASSERT(4 == sizeof(T));
  58   __asm__ volatile (  "xchgl (%2),%0"
  59                     : "=r" (exchange_value)
  60                     : "0" (exchange_value), "r" (dest)
  61                     : "memory");
  62   return exchange_value;
  63 }
  64 
  65 template<>
  66 template<typename T>
  67 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
  68                                                 T volatile* dest,
  69                                                 T compare_value,
  70                                                 atomic_memory_order /* order */) const {
  71   STATIC_ASSERT(1 == sizeof(T));
  72   __asm__ volatile (  "lock cmpxchgb %1,(%3)"
  73                     : "=a" (exchange_value)
  74                     : "q" (exchange_value), "a" (compare_value), "r" (dest)
  75                     : "cc", "memory");


  90   return exchange_value;
  91 }
  92 
  93 #ifdef AMD64
  94 template<>
  95 template<typename D, typename I>
  96 inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value,
  97                                                atomic_memory_order /* order */) const {
  98   STATIC_ASSERT(8 == sizeof(I));
  99   STATIC_ASSERT(8 == sizeof(D));
 100   D old_value;
 101   __asm__ __volatile__ (  "lock xaddq %0,(%2)"
 102                         : "=r" (old_value)
 103                         : "0" (add_value), "r" (dest)
 104                         : "cc", "memory");
 105   return old_value;
 106 }
 107 
 108 template<>
 109 template<typename T>
 110 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
 111                                              T volatile* dest,
 112                                              atomic_memory_order /* order */) const {
 113   STATIC_ASSERT(8 == sizeof(T));
 114   __asm__ __volatile__ ("xchgq (%2),%0"
 115                         : "=r" (exchange_value)
 116                         : "0" (exchange_value), "r" (dest)
 117                         : "memory");
 118   return exchange_value;
 119 }
 120 
 121 template<>
 122 template<typename T>
 123 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 124                                                 T volatile* dest,
 125                                                 T compare_value,
 126                                                 atomic_memory_order /* order */) const {
 127   STATIC_ASSERT(8 == sizeof(T));
 128   __asm__ __volatile__ (  "lock cmpxchgq %1,(%3)"
 129                         : "=a" (exchange_value)
 130                         : "r" (exchange_value), "a" (compare_value), "r" (dest)
 131                         : "cc", "memory");




  34   template<typename D, typename I>
  35   D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const;
  36 };
  37 
  38 template<>
  39 template<typename D, typename I>
  40 inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value,
  41                                                atomic_memory_order /* order */) const {
  42   STATIC_ASSERT(4 == sizeof(I));
  43   STATIC_ASSERT(4 == sizeof(D));
  44   D old_value;
  45   __asm__ volatile (  "lock xaddl %0,(%2)"
  46                     : "=r" (old_value)
  47                     : "0" (add_value), "r" (dest)
  48                     : "cc", "memory");
  49   return old_value;
  50 }
  51 
  52 template<>
  53 template<typename T>
  54 inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
  55                                              T exchange_value,
  56                                              atomic_memory_order /* order */) const {
  57   STATIC_ASSERT(4 == sizeof(T));
  58   __asm__ volatile (  "xchgl (%2),%0"
  59                     : "=r" (exchange_value)
  60                     : "0" (exchange_value), "r" (dest)
  61                     : "memory");
  62   return exchange_value;
  63 }
  64 
  65 template<>
  66 template<typename T>
  67 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
  68                                                 T volatile* dest,
  69                                                 T compare_value,
  70                                                 atomic_memory_order /* order */) const {
  71   STATIC_ASSERT(1 == sizeof(T));
  72   __asm__ volatile (  "lock cmpxchgb %1,(%3)"
  73                     : "=a" (exchange_value)
  74                     : "q" (exchange_value), "a" (compare_value), "r" (dest)
  75                     : "cc", "memory");


  90   return exchange_value;
  91 }
  92 
  93 #ifdef AMD64
  94 template<>
  95 template<typename D, typename I>
  96 inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value,
  97                                                atomic_memory_order /* order */) const {
  98   STATIC_ASSERT(8 == sizeof(I));
  99   STATIC_ASSERT(8 == sizeof(D));
 100   D old_value;
 101   __asm__ __volatile__ (  "lock xaddq %0,(%2)"
 102                         : "=r" (old_value)
 103                         : "0" (add_value), "r" (dest)
 104                         : "cc", "memory");
 105   return old_value;
 106 }
 107 
 108 template<>
 109 template<typename T>
 110 inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
 111                                              T exchange_value,
 112                                              atomic_memory_order /* order */) const {
 113   STATIC_ASSERT(8 == sizeof(T));
 114   __asm__ __volatile__ ("xchgq (%2),%0"
 115                         : "=r" (exchange_value)
 116                         : "0" (exchange_value), "r" (dest)
 117                         : "memory");
 118   return exchange_value;
 119 }
 120 
 121 template<>
 122 template<typename T>
 123 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 124                                                 T volatile* dest,
 125                                                 T compare_value,
 126                                                 atomic_memory_order /* order */) const {
 127   STATIC_ASSERT(8 == sizeof(T));
 128   __asm__ __volatile__ (  "lock cmpxchgq %1,(%3)"
 129                         : "=a" (exchange_value)
 130                         : "r" (exchange_value), "a" (compare_value), "r" (dest)
 131                         : "cc", "memory");


< prev index next >