< prev index next >

src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp

Print this page
rev 13452 : imported patch Atomic_cmpxchg
rev 13453 : imported patch Atomic_add
rev 13454 : [mq]: Atomic_add_v2


  74 #endif
  75 }
  76 
  77 inline void Atomic::store (jlong value, jlong* dest) {
  78   store(value, (volatile jlong*)dest);
  79 }
  80 
  81 // As per atomic.hpp all read-modify-write operations have to provide two-way
  82 // barriers semantics. For AARCH64 we are using load-acquire-with-reservation and
  83 // store-release-with-reservation. While load-acquire combined with store-release
  84 // do not generally form two-way barriers, their use with reservations does - the
  85 // ARMv8 architecture manual Section F "Barrier Litmus Tests" indicates they
  86 // provide sequentially consistent semantics. All we need to add is an explicit
  87 // barrier in the failure path of the cmpxchg operations (as these don't execute
  88 // the store) - arguably this may be overly cautious as there is a very low
  89 // likelihood that the hardware would pull loads/stores into the region guarded
  90 // by the reservation.
  91 //
  92 // For ARMv7 we add explicit barriers in the stubs.
  93 
  94 inline jint Atomic::add(jint add_value, volatile jint* dest) {












  95 #ifdef AARCH64
  96   jint val;
  97   int tmp;
  98   __asm__ volatile(
  99     "1:\n\t"
 100     " ldaxr %w[val], [%[dest]]\n\t"
 101     " add %w[val], %w[val], %w[add_val]\n\t"
 102     " stlxr %w[tmp], %w[val], [%[dest]]\n\t"
 103     " cbnz %w[tmp], 1b\n\t"
 104     : [val] "=&r" (val), [tmp] "=&r" (tmp)
 105     : [add_val] "r" (add_value), [dest] "r" (dest)
 106     : "memory");
 107   return val;
 108 #else
 109   return (*os::atomic_add_func)(add_value, dest);
 110 #endif
 111 }
 112 
 113 inline void Atomic::inc(volatile jint* dest) {
 114   Atomic::add(1, (volatile jint *)dest);
 115 }
 116 
 117 inline void Atomic::dec(volatile jint* dest) {
 118   Atomic::add(-1, (volatile jint *)dest);
 119 }
 120 
 121 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
 122 #ifdef AARCH64
 123   intptr_t val;





 124   int tmp;
 125   __asm__ volatile(
 126     "1:\n\t"
 127     " ldaxr %[val], [%[dest]]\n\t"
 128     " add %[val], %[val], %[add_val]\n\t"
 129     " stlxr %w[tmp], %[val], [%[dest]]\n\t"
 130     " cbnz %w[tmp], 1b\n\t"
 131     : [val] "=&r" (val), [tmp] "=&r" (tmp)
 132     : [add_val] "r" (add_value), [dest] "r" (dest)
 133     : "memory");
 134   return val;
 135 #else
 136   return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
 137 #endif
 138 }

 139 
 140 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
 141   return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
 142 }
 143 
 144 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
 145   Atomic::add_ptr(1, dest);
 146 }
 147 
 148 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
 149   Atomic::add_ptr(-1, dest);
 150 }
 151 
 152 inline void Atomic::inc_ptr(volatile void* dest) {
 153   inc_ptr((volatile intptr_t*)dest);
 154 }
 155 
 156 inline void Atomic::dec_ptr(volatile void* dest) {
 157   dec_ptr((volatile intptr_t*)dest);
 158 }
 159 
 160 
 161 inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
 162 #ifdef AARCH64




  74 #endif
  75 }
  76 
  77 inline void Atomic::store (jlong value, jlong* dest) {
  78   store(value, (volatile jlong*)dest);
  79 }
  80 
  81 // As per atomic.hpp all read-modify-write operations have to provide two-way
  82 // barriers semantics. For AARCH64 we are using load-acquire-with-reservation and
  83 // store-release-with-reservation. While load-acquire combined with store-release
  84 // do not generally form two-way barriers, their use with reservations does - the
  85 // ARMv8 architecture manual Section F "Barrier Litmus Tests" indicates they
  86 // provide sequentially consistent semantics. All we need to add is an explicit
  87 // barrier in the failure path of the cmpxchg operations (as these don't execute
  88 // the store) - arguably this may be overly cautious as there is a very low
  89 // likelihood that the hardware would pull loads/stores into the region guarded
  90 // by the reservation.
  91 //
  92 // For ARMv7 we add explicit barriers in the stubs.
  93 
  94 template<size_t byte_size>
  95 struct Atomic::PlatformAdd
  96   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
  97 {
  98   template<typename I, typename D>
  99   D add_and_fetch(I add_value, D volatile* dest) const;
 100 };
 101 
 102 template<>
 103 template<typename I, typename D>
 104 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
 105   STATIC_ASSERT(4 == sizeof(I));
 106   STATIC_ASSERT(4 == sizeof(D));
 107 #ifdef AARCH64
 108   D val;
 109   int tmp;
 110   __asm__ volatile(
 111     "1:\n\t"
 112     " ldaxr %w[val], [%[dest]]\n\t"
 113     " add %w[val], %w[val], %w[add_val]\n\t"
 114     " stlxr %w[tmp], %w[val], [%[dest]]\n\t"
 115     " cbnz %w[tmp], 1b\n\t"
 116     : [val] "=&r" (val), [tmp] "=&r" (tmp)
 117     : [add_val] "r" (add_value), [dest] "r" (dest)
 118     : "memory");
 119   return val;
 120 #else
 121   return add_using_helper<jint>(os::atomic_add_func, add_value, dest);
 122 #endif
 123 }
 124 
 125 inline void Atomic::inc(volatile jint* dest) {
 126   Atomic::add(1, (volatile jint *)dest);
 127 }
 128 
 129 inline void Atomic::dec(volatile jint* dest) {
 130   Atomic::add(-1, (volatile jint *)dest);
 131 }
 132 

 133 #ifdef AARCH64
 134 template<>
 135 template<typename I, typename D>
 136 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
 137   STATIC_ASSERT(8 == sizeof(I));
 138   STATIC_ASSERT(8 == sizeof(D));
 139   D val;
 140   int tmp;
 141   __asm__ volatile(
 142     "1:\n\t"
 143     " ldaxr %[val], [%[dest]]\n\t"
 144     " add %[val], %[val], %[add_val]\n\t"
 145     " stlxr %w[tmp], %[val], [%[dest]]\n\t"
 146     " cbnz %w[tmp], 1b\n\t"
 147     : [val] "=&r" (val), [tmp] "=&r" (tmp)
 148     : [add_val] "r" (add_value), [dest] "r" (dest)
 149     : "memory");
 150   return val;



 151 }
 152 #endif // AARCH64
 153 
 154 template<>
 155 struct Atomic::PlatformAdd<2>: Atomic::AddShortUsingInt {};

 156 
 157 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
 158   Atomic::add_ptr(1, dest);
 159 }
 160 
 161 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
 162   Atomic::add_ptr(-1, dest);
 163 }
 164 
 165 inline void Atomic::inc_ptr(volatile void* dest) {
 166   inc_ptr((volatile intptr_t*)dest);
 167 }
 168 
 169 inline void Atomic::dec_ptr(volatile void* dest) {
 170   dec_ptr((volatile intptr_t*)dest);
 171 }
 172 
 173 
 174 inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
 175 #ifdef AARCH64


< prev index next >