57 /* Perform an atomic compare and swap: if the current value of `*PTR'
58 is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of
59 `*PTR' before the operation.*/
60 static inline int m68k_compare_and_swap(int newval,
61 volatile int *ptr,
62 int oldval) {
63 for (;;) {
64 int prev = *ptr;
65 if (prev != oldval)
66 return prev;
67
68 if (__m68k_cmpxchg (prev, newval, ptr) == newval)
69 // Success.
70 return prev;
71
72 // We failed even though prev == oldval. Try again.
73 }
74 }
75
76 /* Atomically add an int to memory. */
77 static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
78 for (;;) {
79 // Loop until success.
80
81 int prev = *ptr;
82
83 if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
84 return prev + add_value;
85 }
86 }
87
88 /* Atomically write VALUE into `*PTR' and returns the previous
89 contents of `*PTR'. */
90 static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
91 for (;;) {
92 // Loop until success.
93 int prev = *ptr;
94
95 if (__m68k_cmpxchg (prev, newval, ptr) == prev)
96 return prev;
97 }
118 /* Perform an atomic compare and swap: if the current value of `*PTR'
119 is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of
120 `*PTR' before the operation.*/
121 static inline int arm_compare_and_swap(int newval,
122 volatile int *ptr,
123 int oldval) {
124 for (;;) {
125 int prev = *ptr;
126 if (prev != oldval)
127 return prev;
128
129 if (__kernel_cmpxchg (prev, newval, ptr) == 0)
130 // Success.
131 return prev;
132
133 // We failed even though prev == oldval. Try again.
134 }
135 }
136
137 /* Atomically add an int to memory. */
138 static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
139 for (;;) {
140 // Loop until a __kernel_cmpxchg succeeds.
141
142 int prev = *ptr;
143
144 if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
145 return prev + add_value;
146 }
147 }
148
149 /* Atomically write VALUE into `*PTR' and returns the previous
150 contents of `*PTR'. */
151 static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
152 for (;;) {
153 // Loop until a __kernel_cmpxchg succeeds.
154 int prev = *ptr;
155
156 if (__kernel_cmpxchg (prev, newval, ptr) == 0)
157 return prev;
158 }
159 }
160 #endif // ARM
161
162 inline void Atomic::store(jint store_value, volatile jint* dest) {
163 #if !defined(ARM) && !defined(M68K)
164 __sync_synchronize();
165 #endif
166 *dest = store_value;
167 }
168
169 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
170 #if !defined(ARM) && !defined(M68K)
171 __sync_synchronize();
172 #endif
173 *dest = store_value;
174 }
175
176 inline jint Atomic::add(jint add_value, volatile jint* dest) {
177 #ifdef ARM
178 return arm_add_and_fetch(dest, add_value);
179 #else
180 #ifdef M68K
181 return m68k_add_and_fetch(dest, add_value);
182 #else
183 return __sync_add_and_fetch(dest, add_value);
184 #endif // M68K
185 #endif // ARM
186 }
187
188 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
189 #ifdef ARM
190 return arm_add_and_fetch(dest, add_value);
191 #else
192 #ifdef M68K
193 return m68k_add_and_fetch(dest, add_value);
194 #else
195 return __sync_add_and_fetch(dest, add_value);
196 #endif // M68K
197 #endif // ARM
198 }
199
200 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
201 return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
202 }
203
204 inline void Atomic::inc(volatile jint* dest) {
205 add(1, dest);
206 }
207
208 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
209 add_ptr(1, dest);
210 }
211
212 inline void Atomic::inc_ptr(volatile void* dest) {
213 add_ptr(1, dest);
214 }
215
216 inline void Atomic::dec(volatile jint* dest) {
217 add(-1, dest);
218 }
219
220 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
221 add_ptr(-1, dest);
222 }
|
57 /* Perform an atomic compare and swap: if the current value of `*PTR'
58 is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of
59 `*PTR' before the operation.*/
60 static inline int m68k_compare_and_swap(int newval,
61 volatile int *ptr,
62 int oldval) {
63 for (;;) {
64 int prev = *ptr;
65 if (prev != oldval)
66 return prev;
67
68 if (__m68k_cmpxchg (prev, newval, ptr) == newval)
69 // Success.
70 return prev;
71
72 // We failed even though prev == oldval. Try again.
73 }
74 }
75
76 /* Atomically add an int to memory. */
77 static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) {
78 for (;;) {
79 // Loop until success.
80
81 int prev = *ptr;
82
83 if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
84 return prev + add_value;
85 }
86 }
87
88 /* Atomically write VALUE into `*PTR' and returns the previous
89 contents of `*PTR'. */
90 static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
91 for (;;) {
92 // Loop until success.
93 int prev = *ptr;
94
95 if (__m68k_cmpxchg (prev, newval, ptr) == prev)
96 return prev;
97 }
118 /* Perform an atomic compare and swap: if the current value of `*PTR'
119 is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of
120 `*PTR' before the operation.*/
121 static inline int arm_compare_and_swap(int newval,
122 volatile int *ptr,
123 int oldval) {
124 for (;;) {
125 int prev = *ptr;
126 if (prev != oldval)
127 return prev;
128
129 if (__kernel_cmpxchg (prev, newval, ptr) == 0)
130 // Success.
131 return prev;
132
133 // We failed even though prev == oldval. Try again.
134 }
135 }
136
137 /* Atomically add an int to memory. */
138 static inline int arm_add_and_fetch(int add_value, volatile int *ptr) {
139 for (;;) {
140 // Loop until a __kernel_cmpxchg succeeds.
141
142 int prev = *ptr;
143
144 if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
145 return prev + add_value;
146 }
147 }
148
149 /* Atomically write VALUE into `*PTR' and returns the previous
150 contents of `*PTR'. */
151 static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
152 for (;;) {
153 // Loop until a __kernel_cmpxchg succeeds.
154 int prev = *ptr;
155
156 if (__kernel_cmpxchg (prev, newval, ptr) == 0)
157 return prev;
158 }
159 }
160 #endif // ARM
161
162 inline void Atomic::store(jint store_value, volatile jint* dest) {
163 #if !defined(ARM) && !defined(M68K)
164 __sync_synchronize();
165 #endif
166 *dest = store_value;
167 }
168
169 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
170 #if !defined(ARM) && !defined(M68K)
171 __sync_synchronize();
172 #endif
173 *dest = store_value;
174 }
175
176 template<size_t byte_size>
177 struct Atomic::PlatformAdd
178 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
179 {
180 template<typename I, typename D>
181 D add_and_fetch(I add_value, D volatile* dest) const;
182 };
183
184 template<>
185 template<typename I, typename D>
186 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
187 STATIC_CAST(4 == sizeof(I));
188 STATIC_CAST(4 == sizeof(D));
189
190 #ifdef ARM
191 return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
192 #else
193 #ifdef M68K
194 return add_using_helper<int>(m68k_add_and_fetch, add_value, dest);
195 #else
196 return __sync_add_and_fetch(dest, add_value);
197 #endif // M68K
198 #endif // ARM
199 }
200
201 template<>
202 template<typename I, typename D>
203 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
204 STATIC_CAST(8 == sizeof(I));
205 STATIC_CAST(8 == sizeof(D));
206
207 return __sync_add_and_fetch(dest, add_value);
208 }
209
210 template<>
211 struct Atomic::PlatformAdd<2>: Atomic::AddShortUsingInt {};
212
213 inline void Atomic::inc(volatile jint* dest) {
214 add(1, dest);
215 }
216
217 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
218 add_ptr(1, dest);
219 }
220
221 inline void Atomic::inc_ptr(volatile void* dest) {
222 add_ptr(1, dest);
223 }
224
225 inline void Atomic::dec(volatile jint* dest) {
226 add(-1, dest);
227 }
228
229 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
230 add_ptr(-1, dest);
231 }
|