< prev index next >

src/os_cpu/bsd_zero/vm/atomic_bsd_zero.hpp

Print this page
rev 13452 : imported patch Atomic_cmpxchg
rev 13453 : imported patch Atomic_add
rev 13454 : [mq]: Atomic_add_v2

*** 72,82 **** // We failed even though prev == oldval. Try again. } } /* Atomically add an int to memory. */ ! static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) { for (;;) { // Loop until success. int prev = *ptr; --- 72,82 ---- // We failed even though prev == oldval. Try again. } } /* Atomically add an int to memory. */ ! static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) { for (;;) { // Loop until success. int prev = *ptr;
*** 133,143 **** // We failed even though prev == oldval. Try again. } } /* Atomically add an int to memory. */ ! static inline int arm_add_and_fetch(volatile int *ptr, int add_value) { for (;;) { // Loop until a __kernel_cmpxchg succeeds. int prev = *ptr; --- 133,143 ---- // We failed even though prev == oldval. Try again. } } /* Atomically add an int to memory. */ ! static inline int arm_add_and_fetch(int add_value, volatile int *ptr) { for (;;) { // Loop until a __kernel_cmpxchg succeeds. int prev = *ptr;
*** 171,207 **** __sync_synchronize(); #endif *dest = store_value; } ! inline jint Atomic::add(jint add_value, volatile jint* dest) { #ifdef ARM ! return arm_add_and_fetch(dest, add_value); #else #ifdef M68K ! return m68k_add_and_fetch(dest, add_value); #else return __sync_add_and_fetch(dest, add_value); #endif // M68K #endif // ARM } ! inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { ! #ifdef ARM ! return arm_add_and_fetch(dest, add_value); ! #else ! #ifdef M68K ! return m68k_add_and_fetch(dest, add_value); ! #else return __sync_add_and_fetch(dest, add_value); - #endif // M68K - #endif // ARM } ! inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { ! return (void *) add_ptr(add_value, (volatile intptr_t *) dest); ! } inline void Atomic::inc(volatile jint* dest) { add(1, dest); } --- 171,216 ---- __sync_synchronize(); #endif *dest = store_value; } ! template<size_t byte_size> ! struct Atomic::PlatformAdd ! : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > ! { ! template<typename I, typename D> ! D add_and_fetch(I add_value, D volatile* dest) const; ! }; ! ! template<> ! template<typename I, typename D> ! inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { ! STATIC_CAST(4 == sizeof(I)); ! STATIC_CAST(4 == sizeof(D)); ! #ifdef ARM ! return add_using_helper<int>(arm_add_and_fetch, add_value, dest); #else #ifdef M68K ! return add_using_helper<int>(m68k_add_and_fetch, add_value, dest); #else return __sync_add_and_fetch(dest, add_value); #endif // M68K #endif // ARM } ! template<> ! template<typename I, typename D> ! inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const { ! STATIC_CAST(8 == sizeof(I)); ! STATIC_CAST(8 == sizeof(D)); ! return __sync_add_and_fetch(dest, add_value); } ! template<> ! struct Atomic::PlatformAdd<2>: Atomic::AddShortUsingInt {}; inline void Atomic::inc(volatile jint* dest) { add(1, dest); }
< prev index next >