< prev index next >

src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp

Print this page
rev 47321 : [mq]: Atomic_loadstore


 142       int prev = *ptr;
 143 
 144       if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
 145         return prev + add_value;
 146     }
 147 }
 148 
 149 /* Atomically write VALUE into `*PTR' and returns the previous
 150    contents of `*PTR'.  */
 151 static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
 152   for (;;) {
 153       // Loop until a __kernel_cmpxchg succeeds.
 154       int prev = *ptr;
 155 
 156       if (__kernel_cmpxchg (prev, newval, ptr) == 0)
 157         return prev;
 158     }
 159 }
 160 #endif // ARM
 161 
 162 inline void Atomic::store(jint store_value, volatile jint* dest) {
 163   *dest = store_value;
 164 }
 165 
 166 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
 167   *dest = store_value;
 168 }
 169 
 170 template<size_t byte_size>
 171 struct Atomic::PlatformAdd
 172   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 173 {
 174   template<typename I, typename D>
 175   D add_and_fetch(I add_value, D volatile* dest) const;
 176 };
 177 
 178 template<>
 179 template<typename I, typename D>
 180 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
 181   STATIC_ASSERT(4 == sizeof(I));
 182   STATIC_ASSERT(4 == sizeof(D));
 183 
 184 #ifdef ARM
 185   return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
 186 #else
 187 #ifdef M68K
 188   return add_using_helper<int>(m68k_add_and_fetch, add_value, dest);
 189 #else


 252   return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
 253 #else
 254 #ifdef M68K
 255   return cmpxchg_using_helper<int>(m68k_compare_and_swap, exchange_value, dest, compare_value);
 256 #else
 257   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 258 #endif // M68K
 259 #endif // ARM
 260 }
 261 
 262 template<>
 263 template<typename T>
 264 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 265                                                 T volatile* dest,
 266                                                 T compare_value,
 267                                                 cmpxchg_memory_order order) const {
 268   STATIC_ASSERT(8 == sizeof(T));
 269   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 270 }
 271 
 272 inline jlong Atomic::load(const volatile jlong* src) {



 273   volatile jlong dest;
 274   os::atomic_copy64(src, &dest);
 275   return dest;
 276 }
 277 
 278 inline void Atomic::store(jlong store_value, jlong* dest) {
 279   os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
 280 }
 281 
 282 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
 283   os::atomic_copy64((volatile jlong*)&store_value, dest);
 284 }
 285 
 286 #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP


 142       int prev = *ptr;
 143 
 144       if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
 145         return prev + add_value;
 146     }
 147 }
 148 
 149 /* Atomically write VALUE into `*PTR' and returns the previous
 150    contents of `*PTR'.  */
 151 static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
 152   for (;;) {
 153       // Loop until a __kernel_cmpxchg succeeds.
 154       int prev = *ptr;
 155 
 156       if (__kernel_cmpxchg (prev, newval, ptr) == 0)
 157         return prev;
 158     }
 159 }
 160 #endif // ARM
 161 








 162 template<size_t byte_size>
 163 struct Atomic::PlatformAdd
 164   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 165 {
 166   template<typename I, typename D>
 167   D add_and_fetch(I add_value, D volatile* dest) const;
 168 };
 169 
 170 template<>
 171 template<typename I, typename D>
 172 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
 173   STATIC_ASSERT(4 == sizeof(I));
 174   STATIC_ASSERT(4 == sizeof(D));
 175 
 176 #ifdef ARM
 177   return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
 178 #else
 179 #ifdef M68K
 180   return add_using_helper<int>(m68k_add_and_fetch, add_value, dest);
 181 #else


 244   return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
 245 #else
 246 #ifdef M68K
 247   return cmpxchg_using_helper<int>(m68k_compare_and_swap, exchange_value, dest, compare_value);
 248 #else
 249   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 250 #endif // M68K
 251 #endif // ARM
 252 }
 253 
 254 template<>
 255 template<typename T>
 256 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 257                                                 T volatile* dest,
 258                                                 T compare_value,
 259                                                 cmpxchg_memory_order order) const {
 260   STATIC_ASSERT(8 == sizeof(T));
 261   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 262 }
 263 
 264 template<>
 265 template<typename T>
 266 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
 267   STATIC_ASSERT(8 == sizeof(T));
 268   volatile jlong dest;
 269   os::atomic_copy64(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
 270   return PrimitiveConversions::cast<T>(dest);
 271 }
 272 
 273 template<>
 274 template<typename T>
 275 inline void Atomic::PlatformStore<8>::operator()(T store_value,
 276                                                  T volatile* dest) const {
 277   STATIC_ASSERT(8 == sizeof(T));
 278   os::atomic_copy64(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
 279 }
 280 
 281 #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
< prev index next >