47 public:
48 // Atomic operations on jlong types are not available on all 32-bit
49 // platforms. If atomic ops on jlongs are defined here they must only
50 // be used from code that verifies they are available at runtime and
51 // can provide an alternative action if not - see supports_cx8() for
52 // a means to test availability.
53
54 // The memory operations that are mentioned with each of the atomic
55 // function families come from src/share/vm/runtime/orderAccess.hpp,
56 // e.g., <fence> is described in that file and is implemented by the
57 // OrderAccess::fence() function. See that file for the gory details
58 // on the Memory Access Ordering Model.
59
60 // All of the atomic operations that imply a read-modify-write action
61 // guarantee a two-way memory barrier across that operation. Historically
62 // these semantics reflect the strength of atomic operations that are
63 // provided on SPARC/X86. We assume that strength is necessary unless
64 // we can prove that a weaker form is sufficiently safe.
65
66 // Atomically store to a location
67 inline static void store (jbyte store_value, jbyte* dest);
68 inline static void store (jshort store_value, jshort* dest);
69 inline static void store (jint store_value, jint* dest);
70 // See comment above about using jlong atomics on 32-bit platforms
71 inline static void store (jlong store_value, jlong* dest);
72 inline static void store_ptr(intptr_t store_value, intptr_t* dest);
73 inline static void store_ptr(void* store_value, void* dest);
74
75 inline static void store (jbyte store_value, volatile jbyte* dest);
76 inline static void store (jshort store_value, volatile jshort* dest);
77 inline static void store (jint store_value, volatile jint* dest);
78 // See comment above about using jlong atomics on 32-bit platforms
79 inline static void store (jlong store_value, volatile jlong* dest);
80 inline static void store_ptr(intptr_t store_value, volatile intptr_t* dest);
81 inline static void store_ptr(void* store_value, volatile void* dest);
82
83 // See comment above about using jlong atomics on 32-bit platforms
84 inline static jlong load(const volatile jlong* src);
85
86 // Atomically add to a location. Returns updated value. add*() provide:
87 // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
88
89 template<typename I, typename D>
90 inline static D add(I add_value, D volatile* dest);
91
92 inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest) {
93 return add(add_value, dest);
94 }
95
96 inline static void* add_ptr(intptr_t add_value, volatile void* dest) {
97 return add(add_value, reinterpret_cast<char* volatile*>(dest));
98 }
99
100 // Atomically increment location. inc() provide:
101 // <fence> increment-dest <membar StoreLoad|StoreStore>
102 // The type D may be either a pointer type, or an integral
103 // type. If it is a pointer type, then the increment is
104 // scaled to the size of the type pointed to by the pointer.
157 return cmpxchg(exchange_value, dest, compare_value, order);
158 }
159
160 inline static void* cmpxchg_ptr(void* exchange_value,
161 volatile void* dest,
162 void* compare_value,
163 cmpxchg_memory_order order = memory_order_conservative) {
164 return cmpxchg(exchange_value,
165 reinterpret_cast<void* volatile*>(dest),
166 compare_value,
167 order);
168 }
169
170 private:
171 // Test whether From is implicitly convertible to To.
172 // From and To must be pointer types.
173 // Note: Provides the limited subset of C++11 std::is_convertible
174 // that is needed here.
175 template<typename From, typename To> struct IsPointerConvertible;
176
177 // Dispatch handler for add. Provides type-based validity checking
178 // and limited conversions around calls to the platform-specific
179 // implementation layer provided by PlatformAdd.
180 template<typename I, typename D, typename Enable = void>
181 struct AddImpl;
182
183 // Platform-specific implementation of add. Support for sizes of 4
184 // bytes and (if different) pointer size bytes are required. The
185 // class is a function object that must be default constructable,
186 // with these requirements:
187 //
188 // - dest is of type D*, an integral or pointer type.
189 // - add_value is of type I, an integral type.
190 // - sizeof(I) == sizeof(D).
191 // - if D is an integral type, I == D.
192 // - platform_add is an object of type PlatformAdd<sizeof(D)>.
193 //
194 // Then
195 // platform_add(add_value, dest)
196 // must be a valid expression, returning a result convertible to D.
327 template<typename Type, typename Fn, typename T>
328 static T xchg_using_helper(Fn fn,
329 T exchange_value,
330 T volatile* dest);
331 };
332
333 template<typename From, typename To>
334 struct Atomic::IsPointerConvertible<From*, To*> : AllStatic {
335 // Determine whether From* is implicitly convertible to To*, using
336 // the "sizeof trick".
337 typedef char yes;
338 typedef char (&no)[2];
339
340 static yes test(To*);
341 static no test(...);
342 static From* test_value;
343
344 static const bool value = (sizeof(yes) == sizeof(test(test_value)));
345 };
346
347 // Define FetchAndAdd and AddAndFetch helper classes before including
348 // platform file, which may use these as base classes, requiring they
349 // be complete.
350
351 template<typename Derived>
352 struct Atomic::FetchAndAdd VALUE_OBJ_CLASS_SPEC {
353 template<typename I, typename D>
354 D operator()(I add_value, D volatile* dest) const;
355 };
356
357 template<typename Derived>
358 struct Atomic::AddAndFetch VALUE_OBJ_CLASS_SPEC {
359 template<typename I, typename D>
360 D operator()(I add_value, D volatile* dest) const;
361 };
362
363 template<typename D>
364 inline void Atomic::inc(D volatile* dest) {
365 STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
366 typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
406 // of the operator template are provided, nor are there any generic
407 // specializations of the class. The platform file is responsible for
408 // providing those.
409 template<size_t byte_size>
410 struct Atomic::PlatformXchg VALUE_OBJ_CLASS_SPEC {
411 template<typename T>
412 T operator()(T exchange_value,
413 T volatile* dest) const;
414 };
415
416 // platform specific in-line definitions - must come before shared definitions
417
418 #include OS_CPU_HEADER(atomic)
419
420 // shared in-line definitions
421
422 // size_t casts...
423 #if (SIZE_MAX != UINTPTR_MAX)
424 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
425 #endif
426
427 template<typename I, typename D>
428 inline D Atomic::add(I add_value, D volatile* dest) {
429 return AddImpl<I, D>()(add_value, dest);
430 }
431
432 template<typename I, typename D>
433 struct Atomic::AddImpl<
434 I, D,
435 typename EnableIf<IsIntegral<I>::value &&
436 IsIntegral<D>::value &&
437 (sizeof(I) <= sizeof(D)) &&
438 (IsSigned<I>::value == IsSigned<D>::value)>::type>
439 VALUE_OBJ_CLASS_SPEC
440 {
441 D operator()(I add_value, D volatile* dest) const {
442 D addend = add_value;
443 return PlatformAdd<sizeof(D)>()(addend, dest);
444 }
445 };
|
47 public:
48 // Atomic operations on jlong types are not available on all 32-bit
49 // platforms. If atomic ops on jlongs are defined here they must only
50 // be used from code that verifies they are available at runtime and
51 // can provide an alternative action if not - see supports_cx8() for
52 // a means to test availability.
53
54 // The memory operations that are mentioned with each of the atomic
55 // function families come from src/share/vm/runtime/orderAccess.hpp,
56 // e.g., <fence> is described in that file and is implemented by the
57 // OrderAccess::fence() function. See that file for the gory details
58 // on the Memory Access Ordering Model.
59
60 // All of the atomic operations that imply a read-modify-write action
61 // guarantee a two-way memory barrier across that operation. Historically
62 // these semantics reflect the strength of atomic operations that are
63 // provided on SPARC/X86. We assume that strength is necessary unless
64 // we can prove that a weaker form is sufficiently safe.
65
66 // Atomically store to a location
67 // The type T must be either a pointer type convertible to or equal
68 // to D, an integral/enum type equal to D, or a type equal to D that
69 // is primitive convertible using PrimitiveConversions.
70 template<typename T, typename D>
71 inline static void store(T store_value, volatile D* dest);
72
73 inline static void store_ptr(intptr_t store_value, volatile intptr_t* dest) {
74 Atomic::store(store_value, dest);
75 }
76
77 inline static void store_ptr(void* store_value, volatile void* dest) {
78 Atomic::store(store_value, reinterpret_cast<void* volatile*>(dest));
79 }
80
81 // Atomically load from a location
82 // The type T must be either a pointer type, an integral/enum type,
83 // or a type that is primitive convertible using PrimitiveConversions.
84 template<typename T>
85 inline static T load(const volatile T* dest);
86
87 // Atomically add to a location. Returns updated value. add*() provide:
88 // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
89
90 template<typename I, typename D>
91 inline static D add(I add_value, D volatile* dest);
92
93 inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest) {
94 return add(add_value, dest);
95 }
96
97 inline static void* add_ptr(intptr_t add_value, volatile void* dest) {
98 return add(add_value, reinterpret_cast<char* volatile*>(dest));
99 }
100
101 // Atomically increment location. inc() provide:
102 // <fence> increment-dest <membar StoreLoad|StoreStore>
103 // The type D may be either a pointer type, or an integral
104 // type. If it is a pointer type, then the increment is
105 // scaled to the size of the type pointed to by the pointer.
158 return cmpxchg(exchange_value, dest, compare_value, order);
159 }
160
161 inline static void* cmpxchg_ptr(void* exchange_value,
162 volatile void* dest,
163 void* compare_value,
164 cmpxchg_memory_order order = memory_order_conservative) {
165 return cmpxchg(exchange_value,
166 reinterpret_cast<void* volatile*>(dest),
167 compare_value,
168 order);
169 }
170
171 private:
172 // Test whether From is implicitly convertible to To.
173 // From and To must be pointer types.
174 // Note: Provides the limited subset of C++11 std::is_convertible
175 // that is needed here.
176 template<typename From, typename To> struct IsPointerConvertible;
177
178 // Dispatch handler for store. Provides type-based validity
179 // checking and limited conversions around calls to the platform-
180 // specific implementation layer provided by PlatformOp.
181 template<typename T, typename D, typename PlatformOp, typename Enable = void>
182 struct StoreImpl;
183
184 // Platform-specific implementation of store. Support for sizes
185 // of 1, 2, 4, and (if different) pointer size bytes are required.
186 // The class is a function object that must be default constructable,
187 // with these requirements:
188 //
189 // either:
190 // - dest is of type D*, an integral, enum or pointer type.
191 // - new_value are of type T, an integral, enum or pointer type D or
192 // pointer type convertible to D.
193 // or:
194 // - T and D are the same and are primitive convertible using PrimitiveConversions
195 // and either way:
196 // - platform_store is an object of type PlatformStore<sizeof(T)>.
197 //
198 // Then
199 // platform_store(new_value, dest)
200 // must be a valid expression.
201 //
202 // The default implementation is a volatile store. If a platform
203 // requires more for e.g. 64 bit stores, a specialization is required
204 template<size_t byte_size> struct PlatformStore;
205
206 // Dispatch handler for load. Provides type-based validity
207 // checking and limited conversions around calls to the platform-
208 // specific implementation layer provided by PlatformOp.
209 template<typename T, typename PlatformOp, typename Enable = void>
210 struct LoadImpl;
211
212 // Platform-specific implementation of load. Support for sizes of
213 // 1, 2, 4 bytes and (if different) pointer size bytes are required.
214 // The class is a function object that must be default
215 // constructable, with these requirements:
216 //
217 // - dest is of type T*, an integral, enum or pointer type, or
218 // T is convertible to a primitive type using PrimitiveConversions
219 // - platform_load is an object of type PlatformLoad<sizeof(T)>.
220 //
221 // Then
222 // platform_load(src)
223 // must be a valid expression, returning a result convertible to T.
224 //
225 // The default implementation is a volatile load. If a platform
226 // requires more for e.g. 64 bit loads, a specialization is required
227 template<size_t byte_size> struct PlatformLoad;
228
229 // Dispatch handler for add. Provides type-based validity checking
230 // and limited conversions around calls to the platform-specific
231 // implementation layer provided by PlatformAdd.
232 template<typename I, typename D, typename Enable = void>
233 struct AddImpl;
234
235 // Platform-specific implementation of add. Support for sizes of 4
236 // bytes and (if different) pointer size bytes are required. The
237 // class is a function object that must be default constructable,
238 // with these requirements:
239 //
240 // - dest is of type D*, an integral or pointer type.
241 // - add_value is of type I, an integral type.
242 // - sizeof(I) == sizeof(D).
243 // - if D is an integral type, I == D.
244 // - platform_add is an object of type PlatformAdd<sizeof(D)>.
245 //
246 // Then
247 // platform_add(add_value, dest)
248 // must be a valid expression, returning a result convertible to D.
379 template<typename Type, typename Fn, typename T>
380 static T xchg_using_helper(Fn fn,
381 T exchange_value,
382 T volatile* dest);
383 };
384
385 template<typename From, typename To>
386 struct Atomic::IsPointerConvertible<From*, To*> : AllStatic {
387 // Determine whether From* is implicitly convertible to To*, using
388 // the "sizeof trick".
389 typedef char yes;
390 typedef char (&no)[2];
391
392 static yes test(To*);
393 static no test(...);
394 static From* test_value;
395
396 static const bool value = (sizeof(yes) == sizeof(test(test_value)));
397 };
398
399 // Handle load for pointer, integral and enum types.
400 template<typename T, typename PlatformOp>
401 struct Atomic::LoadImpl<
402 T,
403 PlatformOp,
404 typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value || IsPointer<T>::value>::type>
405 VALUE_OBJ_CLASS_SPEC
406 {
407 T operator()(T const volatile* dest) const {
408 // Forward to the platform handler for the size of T.
409 return PlatformOp()(dest);
410 }
411 };
412
413 // Handle load for types that have a translator.
414 //
415 // All the involved types must be identical.
416 //
417 // This translates the original call into a call on the decayed
418 // arguments, and returns the recovered result of that translated
419 // call.
420 template<typename T, typename PlatformOp>
421 struct Atomic::LoadImpl<
422 T,
423 PlatformOp,
424 typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
425 VALUE_OBJ_CLASS_SPEC
426 {
427 T operator()(T const volatile* dest) const {
428 typedef PrimitiveConversions::Translate<T> Translator;
429 typedef typename Translator::Decayed Decayed;
430 STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
431 Decayed result = PlatformOp()(reinterpret_cast<Decayed const volatile*>(dest));
432 return Translator::recover(result);
433 }
434 };
435
436 // Default implementation of atomic load if a specific platform
437 // does not provide a specialization for a certain size class.
438 // For increased safety, the default implementation only allows
439 // load types that are pointer sized or smaller. If a platform still
440 // supports wide atomics, then it has to use specialization
441 // of Atomic::PlatformLoad for that wider size class.
442 template<size_t byte_size>
443 struct Atomic::PlatformLoad VALUE_OBJ_CLASS_SPEC {
444 template<typename T>
445 T operator()(T const volatile* dest) const {
446 STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
447 return *dest;
448 }
449 };
450
451 // Handle store for integral and enum types.
452 //
453 // All the involved types must be identical.
454 template<typename T, typename PlatformOp>
455 struct Atomic::StoreImpl<
456 T, T,
457 PlatformOp,
458 typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
459 VALUE_OBJ_CLASS_SPEC
460 {
461 void operator()(T new_value, T volatile* dest) const {
462 // Forward to the platform handler for the size of T.
463 PlatformOp()(new_value, dest);
464 }
465 };
466
467 // Handle store for pointer types.
468 //
469 // The new_value must be implicitly convertible to the
470 // destination's type; it must be type-correct to store the
471 // new_value in the destination.
472 template<typename T, typename D, typename PlatformOp>
473 struct Atomic::StoreImpl<
474 T*, D*,
475 PlatformOp,
476 typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
477 VALUE_OBJ_CLASS_SPEC
478 {
479 void operator()(T* new_value, D* volatile* dest) const {
480 // Allow derived to base conversion, and adding cv-qualifiers.
481 D* value = new_value;
482 PlatformOp()(value, dest);
483 }
484 };
485
486 // Handle store for types that have a translator.
487 //
488 // All the involved types must be identical.
489 //
490 // This translates the original call into a call on the decayed
491 // arguments.
492 template<typename T, typename PlatformOp>
493 struct Atomic::StoreImpl<
494 T, T,
495 PlatformOp,
496 typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
497 VALUE_OBJ_CLASS_SPEC
498 {
499 void operator()(T new_value, T volatile* dest) const {
500 typedef PrimitiveConversions::Translate<T> Translator;
501 typedef typename Translator::Decayed Decayed;
502 STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
503 PlatformOp()(Translator::decay(new_value),
504 reinterpret_cast<Decayed volatile*>(dest));
505 }
506 };
507
508 // Default implementation of atomic store if a specific platform
509 // does not provide a specialization for a certain size class.
510 // For increased safety, the default implementation only allows
511 // storing types that are pointer sized or smaller. If a platform still
512 // supports wide atomics, then it has to use specialization
513 // of Atomic::PlatformStore for that wider size class.
514 template<size_t byte_size>
515 struct Atomic::PlatformStore VALUE_OBJ_CLASS_SPEC {
516 template<typename T>
517 void operator()(T new_value,
518 T volatile* dest) const {
519 STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
520 (void)const_cast<T&>(*dest = new_value);
521 }
522 };
523
524 // Define FetchAndAdd and AddAndFetch helper classes before including
525 // platform file, which may use these as base classes, requiring they
526 // be complete.
527
528 template<typename Derived>
529 struct Atomic::FetchAndAdd VALUE_OBJ_CLASS_SPEC {
530 template<typename I, typename D>
531 D operator()(I add_value, D volatile* dest) const;
532 };
533
534 template<typename Derived>
535 struct Atomic::AddAndFetch VALUE_OBJ_CLASS_SPEC {
536 template<typename I, typename D>
537 D operator()(I add_value, D volatile* dest) const;
538 };
539
540 template<typename D>
541 inline void Atomic::inc(D volatile* dest) {
542 STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
543 typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
583 // of the operator template are provided, nor are there any generic
584 // specializations of the class. The platform file is responsible for
585 // providing those.
586 template<size_t byte_size>
587 struct Atomic::PlatformXchg VALUE_OBJ_CLASS_SPEC {
588 template<typename T>
589 T operator()(T exchange_value,
590 T volatile* dest) const;
591 };
592
593 // platform specific in-line definitions - must come before shared definitions
594
595 #include OS_CPU_HEADER(atomic)
596
597 // shared in-line definitions
598
599 // size_t casts...
600 #if (SIZE_MAX != UINTPTR_MAX)
601 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
602 #endif
603
604 template<typename T>
605 inline T Atomic::load(const volatile T* dest) {
606 return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest);
607 }
608
609 template<typename T, typename D>
610 inline void Atomic::store(T store_value, volatile D* dest) {
611 StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest);
612 }
613
614 template<typename I, typename D>
615 inline D Atomic::add(I add_value, D volatile* dest) {
616 return AddImpl<I, D>()(add_value, dest);
617 }
618
619 template<typename I, typename D>
620 struct Atomic::AddImpl<
621 I, D,
622 typename EnableIf<IsIntegral<I>::value &&
623 IsIntegral<D>::value &&
624 (sizeof(I) <= sizeof(D)) &&
625 (IsSigned<I>::value == IsSigned<D>::value)>::type>
626 VALUE_OBJ_CLASS_SPEC
627 {
628 D operator()(I add_value, D volatile* dest) const {
629 D addend = add_value;
630 return PlatformAdd<sizeof(D)>()(addend, dest);
631 }
632 };
|