9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
27
28 #include "memory/allocation.hpp"
29 #include "metaprogramming/enableIf.hpp"
30 #include "metaprogramming/isIntegral.hpp"
31 #include "metaprogramming/isSame.hpp"
32 #include "metaprogramming/primitiveConversions.hpp"
33 #include "metaprogramming/removeCV.hpp"
34 #include "utilities/align.hpp"
35 #include "utilities/macros.hpp"
36
37 enum cmpxchg_memory_order {
38 memory_order_relaxed,
39 // Use value which doesn't interfere with C++2011. We need to be more conservative.
40 memory_order_conservative = 8
41 };
42
43 class Atomic : AllStatic {
44 public:
45 // Atomic operations on jlong types are not available on all 32-bit
46 // platforms. If atomic ops on jlongs are defined here they must only
47 // be used from code that verifies they are available at runtime and
48 // can provide an alternative action if not - see supports_cx8() for
49 // a means to test availability.
50
51 // The memory operations that are mentioned with each of the atomic
52 // function families come from src/share/vm/runtime/orderAccess.hpp,
53 // e.g., <fence> is described in that file and is implemented by the
65 inline static void store (jshort store_value, jshort* dest);
66 inline static void store (jint store_value, jint* dest);
67 // See comment above about using jlong atomics on 32-bit platforms
68 inline static void store (jlong store_value, jlong* dest);
69 inline static void store_ptr(intptr_t store_value, intptr_t* dest);
70 inline static void store_ptr(void* store_value, void* dest);
71
72 inline static void store (jbyte store_value, volatile jbyte* dest);
73 inline static void store (jshort store_value, volatile jshort* dest);
74 inline static void store (jint store_value, volatile jint* dest);
75 // See comment above about using jlong atomics on 32-bit platforms
76 inline static void store (jlong store_value, volatile jlong* dest);
77 inline static void store_ptr(intptr_t store_value, volatile intptr_t* dest);
78 inline static void store_ptr(void* store_value, volatile void* dest);
79
80 // See comment above about using jlong atomics on 32-bit platforms
81 inline static jlong load(const volatile jlong* src);
82
83 // Atomically add to a location. Returns updated value. add*() provide:
84 // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
85 inline static jshort add (jshort add_value, volatile jshort* dest);
86 inline static jint add (jint add_value, volatile jint* dest);
87 inline static size_t add (size_t add_value, volatile size_t* dest);
88 inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
89 inline static void* add_ptr(intptr_t add_value, volatile void* dest);
90
91 // Atomically increment location. inc*() provide:
92 // <fence> increment-dest <membar StoreLoad|StoreStore>
93 inline static void inc (volatile jint* dest);
94 inline static void inc (volatile jshort* dest);
95 inline static void inc (volatile size_t* dest);
96 inline static void inc_ptr(volatile intptr_t* dest);
97 inline static void inc_ptr(volatile void* dest);
98
99 // Atomically decrement a location. dec*() provide:
100 // <fence> decrement-dest <membar StoreLoad|StoreStore>
101 inline static void dec (volatile jint* dest);
102 inline static void dec (volatile jshort* dest);
103 inline static void dec (volatile size_t* dest);
104 inline static void dec_ptr(volatile intptr_t* dest);
105 inline static void dec_ptr(volatile void* dest);
106
107 // Performs atomic exchange of *dest with exchange_value. Returns old
108 // prior value of *dest. xchg*() provide:
109 // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
139 return cmpxchg(exchange_value, dest, compare_value, order);
140 }
141
142 inline static void* cmpxchg_ptr(void* exchange_value,
143 volatile void* dest,
144 void* compare_value,
145 cmpxchg_memory_order order = memory_order_conservative) {
146 return cmpxchg(exchange_value,
147 reinterpret_cast<void* volatile*>(dest),
148 compare_value,
149 order);
150 }
151
152 private:
153 // Test whether From is implicitly convertible to To.
154 // From and To must be pointer types.
155 // Note: Provides the limited subset of C++11 std::is_convertible
156 // that is needed here.
157 template<typename From, typename To> struct IsPointerConvertible;
158
159 // Dispatch handler for cmpxchg. Provides type-based validity
160 // checking and limited conversions around calls to the
161 // platform-specific implementation layer provided by
162 // PlatformCmpxchg.
163 template<typename T, typename D, typename U, typename Enable = void>
164 struct CmpxchgImpl;
165
166 // Platform-specific implementation of cmpxchg. Support for sizes
167 // of 1, 4, and 8 are required. The class is a function object that
168 // must be default constructable, with these requirements:
169 //
170 // - dest is of type T*.
171 // - exchange_value and compare_value are of type T.
172 // - order is of type cmpxchg_memory_order.
173 // - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>.
174 //
175 // Then
176 // platform_cmpxchg(exchange_value, dest, compare_value, order)
177 // must be a valid expression, returning a result convertible to T.
178 //
202 // this class.
203 public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
204 struct CmpxchgByteUsingInt;
205 private:
206 };
207
208 template<typename From, typename To>
209 struct Atomic::IsPointerConvertible<From*, To*> : AllStatic {
210 // Determine whether From* is implicitly convertible to To*, using
211 // the "sizeof trick".
212 typedef char yes;
213 typedef char (&no)[2];
214
215 static yes test(To*);
216 static no test(...);
217 static From* test_value;
218
219 static const bool value = (sizeof(yes) == sizeof(test(test_value)));
220 };
221
222 // Define the class before including platform file, which may specialize
223 // the operator definition. No generic definition of specializations
224 // of the operator template are provided, nor are there any generic
225 // specializations of the class. The platform file is responsible for
226 // providing those.
227 template<size_t byte_size>
228 struct Atomic::PlatformCmpxchg VALUE_OBJ_CLASS_SPEC {
229 template<typename T>
230 T operator()(T exchange_value,
231 T volatile* dest,
232 T compare_value,
233 cmpxchg_memory_order order) const;
234 };
235
236 // Define the class before including platform file, which may use this
237 // as a base class, requiring it be complete. The definition is later
238 // in this file, near the other definitions related to cmpxchg.
239 struct Atomic::CmpxchgByteUsingInt VALUE_OBJ_CLASS_SPEC {
240 template<typename T>
241 T operator()(T exchange_value,
242 T volatile* dest,
243 T compare_value,
244 cmpxchg_memory_order order) const;
245 };
246
247 // platform specific in-line definitions - must come before shared definitions
248
249 #include OS_CPU_HEADER(atomic)
250
251 // shared in-line definitions
252
253 // size_t casts...
254 #if (SIZE_MAX != UINTPTR_MAX)
255 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
256 #endif
257
258 inline size_t Atomic::add(size_t add_value, volatile size_t* dest) {
259 return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest);
260 }
261
262 inline void Atomic::inc(volatile size_t* dest) {
263 inc_ptr((volatile intptr_t*) dest);
264 }
265
266 inline void Atomic::dec(volatile size_t* dest) {
267 dec_ptr((volatile intptr_t*) dest);
268 }
269
270 template<typename T, typename D, typename U>
271 inline D Atomic::cmpxchg(T exchange_value,
272 D volatile* dest,
273 U compare_value,
274 cmpxchg_memory_order order) {
275 return CmpxchgImpl<T, D, U>()(exchange_value, dest, compare_value, order);
276 }
277
278 template<typename T, typename D>
279 inline bool Atomic::replace_if_null(T* value, D* volatile* dest,
396 // ... except for the one jbyte we want to update
397 reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value;
398
399 uint32_t res = cmpxchg(new_value, aligned_dest, cur, order);
400 if (res == cur) break; // success
401
402 // at least one byte in the int changed value, so update
403 // our view of the current int
404 cur = res;
405 // if our byte is still as cur we loop and try again
406 } while (cur_as_bytes[offset] == canon_compare_value);
407
408 return PrimitiveConversions::cast<T>(cur_as_bytes[offset]);
409 }
410
411 inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
412 assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
413 return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
414 }
415
416 inline jshort Atomic::add(jshort add_value, volatile jshort* dest) {
417 // Most platforms do not support atomic add on a 2-byte value. However,
418 // if the value occupies the most significant 16 bits of an aligned 32-bit
419 // word, then we can do this with an atomic add of (add_value << 16)
420 // to the 32-bit word.
421 //
422 // The least significant parts of this 32-bit word will never be affected, even
423 // in case of overflow/underflow.
424 //
425 // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
426 #ifdef VM_LITTLE_ENDIAN
427 assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
428 jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
429 #else
430 assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
431 jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
432 #endif
433 return (jshort)(new_value >> 16); // preserves sign
434 }
435
436 inline void Atomic::inc(volatile jshort* dest) {
437 (void)add(1, dest);
438 }
439
440 inline void Atomic::dec(volatile jshort* dest) {
441 (void)add(-1, dest);
442 }
443
444 #endif // SHARE_VM_RUNTIME_ATOMIC_HPP
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
27
28 #include "memory/allocation.hpp"
29 #include "metaprogramming/conditional.hpp"
30 #include "metaprogramming/enableIf.hpp"
31 #include "metaprogramming/isIntegral.hpp"
32 #include "metaprogramming/isPointer.hpp"
33 #include "metaprogramming/isSame.hpp"
34 #include "metaprogramming/primitiveConversions.hpp"
35 #include "metaprogramming/removeCV.hpp"
36 #include "metaprogramming/removePointer.hpp"
37 #include "utilities/align.hpp"
38 #include "utilities/macros.hpp"
39
40 enum cmpxchg_memory_order {
41 memory_order_relaxed,
42 // Use value which doesn't interfere with C++2011. We need to be more conservative.
43 memory_order_conservative = 8
44 };
45
46 class Atomic : AllStatic {
47 public:
48 // Atomic operations on jlong types are not available on all 32-bit
49 // platforms. If atomic ops on jlongs are defined here they must only
50 // be used from code that verifies they are available at runtime and
51 // can provide an alternative action if not - see supports_cx8() for
52 // a means to test availability.
53
54 // The memory operations that are mentioned with each of the atomic
55 // function families come from src/share/vm/runtime/orderAccess.hpp,
56 // e.g., <fence> is described in that file and is implemented by the
68 inline static void store (jshort store_value, jshort* dest);
69 inline static void store (jint store_value, jint* dest);
70 // See comment above about using jlong atomics on 32-bit platforms
71 inline static void store (jlong store_value, jlong* dest);
72 inline static void store_ptr(intptr_t store_value, intptr_t* dest);
73 inline static void store_ptr(void* store_value, void* dest);
74
75 inline static void store (jbyte store_value, volatile jbyte* dest);
76 inline static void store (jshort store_value, volatile jshort* dest);
77 inline static void store (jint store_value, volatile jint* dest);
78 // See comment above about using jlong atomics on 32-bit platforms
79 inline static void store (jlong store_value, volatile jlong* dest);
80 inline static void store_ptr(intptr_t store_value, volatile intptr_t* dest);
81 inline static void store_ptr(void* store_value, volatile void* dest);
82
83 // See comment above about using jlong atomics on 32-bit platforms
84 inline static jlong load(const volatile jlong* src);
85
86 // Atomically add to a location. Returns updated value. add*() provide:
87 // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
88
89 template<typename I, typename D>
90 inline static D add(I add_value, D volatile* dest);
91
92 inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest) {
93 return add(add_value, dest);
94 }
95
96 inline static void* add_ptr(intptr_t add_value, volatile void* dest) {
97 return add(add_value, reinterpret_cast<char* volatile*>(dest));
98 }
99
100 // Atomically increment location. inc*() provide:
101 // <fence> increment-dest <membar StoreLoad|StoreStore>
102 inline static void inc (volatile jint* dest);
103 inline static void inc (volatile jshort* dest);
104 inline static void inc (volatile size_t* dest);
105 inline static void inc_ptr(volatile intptr_t* dest);
106 inline static void inc_ptr(volatile void* dest);
107
108 // Atomically decrement a location. dec*() provide:
109 // <fence> decrement-dest <membar StoreLoad|StoreStore>
110 inline static void dec (volatile jint* dest);
111 inline static void dec (volatile jshort* dest);
112 inline static void dec (volatile size_t* dest);
113 inline static void dec_ptr(volatile intptr_t* dest);
114 inline static void dec_ptr(volatile void* dest);
115
116 // Performs atomic exchange of *dest with exchange_value. Returns old
117 // prior value of *dest. xchg*() provide:
118 // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
148 return cmpxchg(exchange_value, dest, compare_value, order);
149 }
150
151 inline static void* cmpxchg_ptr(void* exchange_value,
152 volatile void* dest,
153 void* compare_value,
154 cmpxchg_memory_order order = memory_order_conservative) {
155 return cmpxchg(exchange_value,
156 reinterpret_cast<void* volatile*>(dest),
157 compare_value,
158 order);
159 }
160
161 private:
162 // Test whether From is implicitly convertible to To.
163 // From and To must be pointer types.
164 // Note: Provides the limited subset of C++11 std::is_convertible
165 // that is needed here.
166 template<typename From, typename To> struct IsPointerConvertible;
167
168 // Dispatch handler for add. Provides type-based validity checking
169 // and limited conversions around calls to the platform-specific
170 // implementation layer provided by PlatformAdd.
171 template<typename I, typename D, typename Enable = void>
172 struct AddImpl;
173
174 // Platform-specific implementation of add. Support for sizes of 4
175 // bytes and (if different) pointer size bytes are required. The
176 // class is a function object that must be default constructable,
177 // with these requirements:
178 //
179 // - dest is of type D*, an integral or pointer type.
180 // - add_value is of type I, an integral type.
181 // - sizeof(I) == sizeof(D).
182 // - if D is an integral type, I == D.
183 // - platform_add is an object of type PlatformAdd<sizeof(D)>.
184 //
185 // Then
186 // platform_add(add_value, dest)
187 // must be a valid expression, returning a result convertible to D.
188 //
189 // No definition is provided; all platforms must explicitly define
190 // this class and any needed specializations.
191 template<size_t byte_size> struct PlatformAdd;
192
193 // Helper base classes for defining PlatformAdd. To use, define
194 // PlatformAdd or a specialization that derives from one of these,
195 // and include in the PlatformAdd definition the support function
196 // (described below) required by the base class.
197 //
198 // These classes implement the required function object protocol for
199 // PlatformAdd, using a support function template provided by the
200 // derived class. Let add_value (of type I) and dest (of type D) be
201 // the arguments the object is called with. If D is a pointer type
202 // P*, then let addend (of type I) be add_value * sizeof(P);
203 // otherwise, addend is add_value.
204 //
205 // FetchAndAdd requires the derived class to provide
206 // fetch_and_add(addend, dest)
207 // atomically adding addend to the value of dest, and returning the
208 // old value.
209 //
210 // AddAndFetch requires the derived class to provide
211 // add_and_fetch(addend, dest)
212 // atomically adding addend to the value of dest, and returning the
213 // new value.
214 //
215 // When D is a pointer type P*, both fetch_and_add and add_and_fetch
216 // treat it as if it were a uintptr_t; they do not perform any
217 // scaling of the addend, as that has already been done by the
218 // caller.
219 public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
220 template<typename Derived> struct FetchAndAdd;
221 template<typename Derived> struct AddAndFetch;
222 private:
223
224 // Support for platforms that implement some variants of add using a
225 // (typically out of line) non-template helper function. The
226 // generic arguments passed to PlatformAdd need to be translated to
227 // the appropriate type for the helper function, the helper function
228 // invoked on the translated arguments, and the result translated
229 // back. Type is the parameter / return type of the helper
230 // function. No scaling of add_value is performed when D is a pointer
231 // type, so this function can be used to implement the support function
232 // required by AddAndFetch.
233 template<typename Type, typename Fn, typename I, typename D>
234 static D add_using_helper(Fn fn, I add_value, D volatile* dest);
235
236 // Dispatch handler for cmpxchg. Provides type-based validity
237 // checking and limited conversions around calls to the
238 // platform-specific implementation layer provided by
239 // PlatformCmpxchg.
240 template<typename T, typename D, typename U, typename Enable = void>
241 struct CmpxchgImpl;
242
243 // Platform-specific implementation of cmpxchg. Support for sizes
244 // of 1, 4, and 8 are required. The class is a function object that
245 // must be default constructable, with these requirements:
246 //
247 // - dest is of type T*.
248 // - exchange_value and compare_value are of type T.
249 // - order is of type cmpxchg_memory_order.
250 // - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>.
251 //
252 // Then
253 // platform_cmpxchg(exchange_value, dest, compare_value, order)
254 // must be a valid expression, returning a result convertible to T.
255 //
279 // this class.
280 public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
281 struct CmpxchgByteUsingInt;
282 private:
283 };
284
285 template<typename From, typename To>
286 struct Atomic::IsPointerConvertible<From*, To*> : AllStatic {
287 // Determine whether From* is implicitly convertible to To*, using
288 // the "sizeof trick".
289 typedef char yes;
290 typedef char (&no)[2];
291
292 static yes test(To*);
293 static no test(...);
294 static From* test_value;
295
296 static const bool value = (sizeof(yes) == sizeof(test(test_value)));
297 };
298
299 // Define FetchAndAdd and AddAndFetch helper classes before including
300 // platform file, which may use these as base classes, requiring they
301 // be complete.
302
303 template<typename Derived>
304 struct Atomic::FetchAndAdd VALUE_OBJ_CLASS_SPEC {
305 template<typename I, typename D>
306 D operator()(I add_value, D volatile* dest) const;
307 };
308
309 template<typename Derived>
310 struct Atomic::AddAndFetch VALUE_OBJ_CLASS_SPEC {
311 template<typename I, typename D>
312 D operator()(I add_value, D volatile* dest) const;
313 };
314
315 // Define the class before including platform file, which may specialize
316 // the operator definition. No generic definition of specializations
317 // of the operator template are provided, nor are there any generic
318 // specializations of the class. The platform file is responsible for
319 // providing those.
320 template<size_t byte_size>
321 struct Atomic::PlatformCmpxchg VALUE_OBJ_CLASS_SPEC {
322 template<typename T>
323 T operator()(T exchange_value,
324 T volatile* dest,
325 T compare_value,
326 cmpxchg_memory_order order) const;
327 };
328
329 // Define the class before including platform file, which may use this
330 // as a base class, requiring it be complete. The definition is later
331 // in this file, near the other definitions related to cmpxchg.
332 struct Atomic::CmpxchgByteUsingInt VALUE_OBJ_CLASS_SPEC {
333 template<typename T>
334 T operator()(T exchange_value,
335 T volatile* dest,
336 T compare_value,
337 cmpxchg_memory_order order) const;
338 };
339
340 // platform specific in-line definitions - must come before shared definitions
341
342 #include OS_CPU_HEADER(atomic)
343
344 // shared in-line definitions
345
346 // size_t casts...
347 #if (SIZE_MAX != UINTPTR_MAX)
348 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
349 #endif
350
351 template<typename I, typename D>
352 inline D Atomic::add(I add_value, D volatile* dest) {
353 return AddImpl<I, D>()(add_value, dest);
354 }
355
356 template<typename I, typename D>
357 struct Atomic::AddImpl<
358 I, D,
359 typename EnableIf<IsIntegral<I>::value &&
360 IsIntegral<D>::value &&
361 (sizeof(I) <= sizeof(D)) &&
362 (IsSigned<I>::value == IsSigned<D>::value)>::type>
363 VALUE_OBJ_CLASS_SPEC
364 {
365 D operator()(I add_value, D volatile* dest) const {
366 D addend = add_value;
367 return PlatformAdd<sizeof(D)>()(addend, dest);
368 }
369 };
370
371 template<typename I, typename P>
372 struct Atomic::AddImpl<
373 I, P*,
374 typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
375 VALUE_OBJ_CLASS_SPEC
376 {
377 P* operator()(I add_value, P* volatile* dest) const {
378 STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
379 STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
380 typedef typename Conditional<IsSigned<I>::value,
381 intptr_t,
382 uintptr_t>::type CI;
383 CI addend = add_value;
384 return PlatformAdd<sizeof(P*)>()(addend, dest);
385 }
386 };
387
388 // Most platforms do not support atomic add on a 2-byte value. However,
389 // if the value occupies the most significant 16 bits of an aligned 32-bit
390 // word, then we can do this with an atomic add of (add_value << 16)
391 // to the 32-bit word.
392 //
393 // The least significant parts of this 32-bit word will never be affected, even
394 // in case of overflow/underflow.
395 //
396 // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
397 template<>
398 struct Atomic::AddImpl<jshort, jshort> VALUE_OBJ_CLASS_SPEC {
399 jshort operator()(jshort add_value, jshort volatile* dest) const {
400 #ifdef VM_LITTLE_ENDIAN
401 assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
402 jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
403 #else
404 assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
405 jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
406 #endif
407 return (jshort)(new_value >> 16); // preserves sign
408 }
409 };
410
411 template<typename Derived>
412 template<typename I, typename D>
413 inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest) const {
414 I addend = add_value;
415 // If D is a pointer type P*, scale by sizeof(P).
416 if (IsPointer<D>::value) {
417 addend *= sizeof(typename RemovePointer<D>::type);
418 }
419 D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest);
420 return old + add_value;
421 }
422
423 template<typename Derived>
424 template<typename I, typename D>
425 inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest) const {
426 // If D is a pointer type P*, scale by sizeof(P).
427 if (IsPointer<D>::value) {
428 add_value *= sizeof(typename RemovePointer<D>::type);
429 }
430 return static_cast<const Derived*>(this)->add_and_fetch(add_value, dest);
431 }
432
433 template<typename Type, typename Fn, typename I, typename D>
434 inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) {
435 return PrimitiveConversions::cast<D>(
436 fn(PrimitiveConversions::cast<Type>(add_value),
437 reinterpret_cast<Type volatile*>(dest)));
438 }
439
440 inline void Atomic::inc(volatile size_t* dest) {
441 inc_ptr((volatile intptr_t*) dest);
442 }
443
444 inline void Atomic::dec(volatile size_t* dest) {
445 dec_ptr((volatile intptr_t*) dest);
446 }
447
448 template<typename T, typename D, typename U>
449 inline D Atomic::cmpxchg(T exchange_value,
450 D volatile* dest,
451 U compare_value,
452 cmpxchg_memory_order order) {
453 return CmpxchgImpl<T, D, U>()(exchange_value, dest, compare_value, order);
454 }
455
456 template<typename T, typename D>
457 inline bool Atomic::replace_if_null(T* value, D* volatile* dest,
574 // ... except for the one jbyte we want to update
575 reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value;
576
577 uint32_t res = cmpxchg(new_value, aligned_dest, cur, order);
578 if (res == cur) break; // success
579
580 // at least one byte in the int changed value, so update
581 // our view of the current int
582 cur = res;
583 // if our byte is still as cur we loop and try again
584 } while (cur_as_bytes[offset] == canon_compare_value);
585
586 return PrimitiveConversions::cast<T>(cur_as_bytes[offset]);
587 }
588
589 inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
590 assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
591 return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
592 }
593
594 inline void Atomic::inc(volatile jshort* dest) {
595 (void)add(jshort(1), dest);
596 }
597
598 inline void Atomic::dec(volatile jshort* dest) {
599 (void)add(jshort(-1), dest);
600 }
601
602 #endif // SHARE_VM_RUNTIME_ATOMIC_HPP
|