146 } 147 } 148 149 /* Atomically write VALUE into `*PTR' and returns the previous 150 contents of `*PTR'. */ 151 static inline int arm_lock_test_and_set(int newval, volatile int *ptr) { 152 for (;;) { 153 // Loop until a __kernel_cmpxchg succeeds. 154 int prev = *ptr; 155 156 if (__kernel_cmpxchg (prev, newval, ptr) == 0) 157 return prev; 158 } 159 } 160 #endif // ARM 161 162 template<size_t byte_size> 163 struct Atomic::PlatformAdd 164 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > 165 { 166 template<typename I, typename D> 167 D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; 168 }; 169 170 template<> 171 template<typename I, typename D> 172 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, 173 atomic_memory_order order) const { 174 STATIC_ASSERT(4 == sizeof(I)); 175 STATIC_ASSERT(4 == sizeof(D)); 176 177 #ifdef ARM 178 return add_using_helper<int>(arm_add_and_fetch, add_value, dest); 179 #else 180 #ifdef M68K 181 return add_using_helper<int>(m68k_add_and_fetch, add_value, dest); 182 #else 183 return __sync_add_and_fetch(dest, add_value); 184 #endif // M68K 185 #endif // ARM 186 } 187 188 template<> 189 template<typename I, typename D> 190 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, 191 atomic_memory_order order) const { 192 STATIC_ASSERT(8 == sizeof(I)); 193 STATIC_ASSERT(8 == sizeof(D)); 194 195 return __sync_add_and_fetch(dest, add_value); 196 } 197 198 template<> 199 template<typename T> 200 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, 201 T volatile* dest, 202 atomic_memory_order order) const { 203 STATIC_ASSERT(4 == sizeof(T)); 204 #ifdef ARM 205 return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest); 206 #else 207 #ifdef M68K 208 return xchg_using_helper<int>(m68k_lock_test_and_set, exchange_value, dest); 209 #else 210 // __sync_lock_test_and_set is a bizarrely named atomic exchange | 146 } 147 } 148 149 /* Atomically write VALUE into `*PTR' and returns the previous 150 contents of `*PTR'. */ 151 static inline int arm_lock_test_and_set(int newval, volatile int *ptr) { 152 for (;;) { 153 // Loop until a __kernel_cmpxchg succeeds. 154 int prev = *ptr; 155 156 if (__kernel_cmpxchg (prev, newval, ptr) == 0) 157 return prev; 158 } 159 } 160 #endif // ARM 161 162 template<size_t byte_size> 163 struct Atomic::PlatformAdd 164 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > 165 { 166 template<typename D, typename I> 167 D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; 168 }; 169 170 template<> 171 template<typename D, typename I> 172 inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, 173 atomic_memory_order order) const { 174 STATIC_ASSERT(4 == sizeof(I)); 175 STATIC_ASSERT(4 == sizeof(D)); 176 177 #ifdef ARM 178 return add_using_helper<int>(arm_add_and_fetch, dest, add_value); 179 #else 180 #ifdef M68K 181 return add_using_helper<int>(m68k_add_and_fetch, dest, add_value); 182 #else 183 return __sync_add_and_fetch(dest, add_value); 184 #endif // M68K 185 #endif // ARM 186 } 187 188 template<> 189 template<typename D, typename !> 190 inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value, 191 atomic_memory_order order) const { 192 STATIC_ASSERT(8 == sizeof(I)); 193 STATIC_ASSERT(8 == sizeof(D)); 194 195 return __sync_add_and_fetch(dest, add_value); 196 } 197 198 template<> 199 template<typename T> 200 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, 201 T volatile* dest, 202 atomic_memory_order order) const { 203 STATIC_ASSERT(4 == sizeof(T)); 204 #ifdef ARM 205 return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest); 206 #else 207 #ifdef M68K 208 return xchg_using_helper<int>(m68k_lock_test_and_set, exchange_value, dest); 209 #else 210 // __sync_lock_test_and_set is a bizarrely named atomic exchange |