20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
27
28 #include "memory/allocation.hpp"
29 #include "metaprogramming/conditional.hpp"
30 #include "metaprogramming/enableIf.hpp"
31 #include "metaprogramming/isIntegral.hpp"
32 #include "metaprogramming/isPointer.hpp"
33 #include "metaprogramming/isSame.hpp"
34 #include "metaprogramming/primitiveConversions.hpp"
35 #include "metaprogramming/removeCV.hpp"
36 #include "metaprogramming/removePointer.hpp"
37 #include "utilities/align.hpp"
38 #include "utilities/macros.hpp"
39
40 enum cmpxchg_memory_order {
41 memory_order_relaxed,
42 // Use value which doesn't interfere with C++2011. We need to be more conservative.
43 memory_order_conservative = 8
44 };
45
46 class Atomic : AllStatic {
47 public:
48 // Atomic operations on int64 types are not available on all 32-bit
49 // platforms. If atomic ops on int64 are defined here they must only
50 // be used from code that verifies they are available at runtime and
51 // can provide an alternative action if not - see supports_cx8() for
52 // a means to test availability.
53
54 // The memory operations that are mentioned with each of the atomic
55 // function families come from src/share/vm/runtime/orderAccess.hpp,
56 // e.g., <fence> is described in that file and is implemented by the
57 // OrderAccess::fence() function. See that file for the gory details
58 // on the Memory Access Ordering Model.
59
60 // All of the atomic operations that imply a read-modify-write action
61 // guarantee a two-way memory barrier across that operation. Historically
62 // these semantics reflect the strength of atomic operations that are
63 // provided on SPARC/X86. We assume that strength is necessary unless
64 // we can prove that a weaker form is sufficiently safe.
65
66 // Atomically store to a location
67 // The type T must be either a pointer type convertible to or equal
68 // to D, an integral/enum type equal to D, or a type equal to D that
69 // is primitive convertible using PrimitiveConversions.
70 template<typename T, typename D>
71 inline static void store(T store_value, volatile D* dest);
72
73 // Atomically load from a location
74 // The type T must be either a pointer type, an integral/enum type,
75 // or a type that is primitive convertible using PrimitiveConversions.
76 template<typename T>
77 inline static T load(const volatile T* dest);
78
79 // Atomically add to a location. Returns updated value. add*() provide:
80 // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
81
82 template<typename I, typename D>
83 inline static D add(I add_value, D volatile* dest);
84
85 template<typename I, typename D>
86 inline static D sub(I sub_value, D volatile* dest);
87
88 // Atomically increment location. inc() provide:
89 // <fence> increment-dest <membar StoreLoad|StoreStore>
90 // The type D may be either a pointer type, or an integral
91 // type. If it is a pointer type, then the increment is
92 // scaled to the size of the type pointed to by the pointer.
93 template<typename D>
94 inline static void inc(D volatile* dest);
95
96 // Atomically decrement a location. dec() provide:
97 // <fence> decrement-dest <membar StoreLoad|StoreStore>
98 // The type D may be either a pointer type, or an integral
99 // type. If it is a pointer type, then the decrement is
100 // scaled to the size of the type pointed to by the pointer.
101 template<typename D>
102 inline static void dec(D volatile* dest);
103
104 // Performs atomic exchange of *dest with exchange_value. Returns old
105 // prior value of *dest. xchg*() provide:
106 // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
107 // The type T must be either a pointer type convertible to or equal
108 // to D, an integral/enum type equal to D, or a type equal to D that
109 // is primitive convertible using PrimitiveConversions.
110 template<typename T, typename D>
111 inline static D xchg(T exchange_value, volatile D* dest);
112
113 // Performs atomic compare of *dest and compare_value, and exchanges
114 // *dest with exchange_value if the comparison succeeded. Returns prior
115 // value of *dest. cmpxchg*() provide:
116 // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
117
118 template<typename T, typename D, typename U>
119 inline static D cmpxchg(T exchange_value,
120 D volatile* dest,
121 U compare_value,
122 cmpxchg_memory_order order = memory_order_conservative);
123
124 // Performs atomic compare of *dest and NULL, and replaces *dest
125 // with exchange_value if the comparison succeeded. Returns true if
126 // the comparison succeeded and the exchange occurred. This is
127 // often used as part of lazy initialization, as a lock-free
128 // alternative to the Double-Checked Locking Pattern.
129 template<typename T, typename D>
130 inline static bool replace_if_null(T* value, D* volatile* dest,
131 cmpxchg_memory_order order = memory_order_conservative);
132
133 private:
134 WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointerConvertible is declared private
135 // Test whether From is implicitly convertible to To.
136 // From and To must be pointer types.
137 // Note: Provides the limited subset of C++11 std::is_convertible
138 // that is needed here.
139 template<typename From, typename To> struct IsPointerConvertible;
140
141 protected:
142 // Dispatch handler for store. Provides type-based validity
143 // checking and limited conversions around calls to the platform-
144 // specific implementation layer provided by PlatformOp.
145 template<typename T, typename D, typename PlatformOp, typename Enable = void>
146 struct StoreImpl;
147
148 // Platform-specific implementation of store. Support for sizes
149 // of 1, 2, 4, and (if different) pointer size bytes are required.
150 // The class is a function object that must be default constructable,
151 // with these requirements:
255 // back. Type is the parameter / return type of the helper
256 // function. No scaling of add_value is performed when D is a pointer
257 // type, so this function can be used to implement the support function
258 // required by AddAndFetch.
259 template<typename Type, typename Fn, typename I, typename D>
260 static D add_using_helper(Fn fn, I add_value, D volatile* dest);
261
262 // Dispatch handler for cmpxchg. Provides type-based validity
263 // checking and limited conversions around calls to the
264 // platform-specific implementation layer provided by
265 // PlatformCmpxchg.
266 template<typename T, typename D, typename U, typename Enable = void>
267 struct CmpxchgImpl;
268
269 // Platform-specific implementation of cmpxchg. Support for sizes
270 // of 1, 4, and 8 are required. The class is a function object that
271 // must be default constructable, with these requirements:
272 //
273 // - dest is of type T*.
274 // - exchange_value and compare_value are of type T.
275 // - order is of type cmpxchg_memory_order.
276 // - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>.
277 //
278 // Then
279 // platform_cmpxchg(exchange_value, dest, compare_value, order)
280 // must be a valid expression, returning a result convertible to T.
281 //
282 // A default definition is provided, which declares a function template
283 // T operator()(T, T volatile*, T, cmpxchg_memory_order) const
284 //
285 // For each required size, a platform must either provide an
286 // appropriate definition of that function, or must entirely
287 // specialize the class template for that size.
288 template<size_t byte_size> struct PlatformCmpxchg;
289
290 // Support for platforms that implement some variants of cmpxchg
291 // using a (typically out of line) non-template helper function.
292 // The generic arguments passed to PlatformCmpxchg need to be
293 // translated to the appropriate type for the helper function, the
294 // helper invoked on the translated arguments, and the result
295 // translated back. Type is the parameter / return type of the
296 // helper function.
297 template<typename Type, typename Fn, typename T>
298 static T cmpxchg_using_helper(Fn fn,
299 T exchange_value,
300 T volatile* dest,
301 T compare_value);
302
303 // Support platforms that do not provide Read-Modify-Write
310 // Dispatch handler for xchg. Provides type-based validity
311 // checking and limited conversions around calls to the
312 // platform-specific implementation layer provided by
313 // PlatformXchg.
314 template<typename T, typename D, typename Enable = void>
315 struct XchgImpl;
316
317 // Platform-specific implementation of xchg. Support for sizes
318 // of 4, and sizeof(intptr_t) are required. The class is a function
319 // object that must be default constructable, with these requirements:
320 //
321 // - dest is of type T*.
322 // - exchange_value is of type T.
323 // - platform_xchg is an object of type PlatformXchg<sizeof(T)>.
324 //
325 // Then
326 // platform_xchg(exchange_value, dest)
327 // must be a valid expression, returning a result convertible to T.
328 //
329 // A default definition is provided, which declares a function template
330 // T operator()(T, T volatile*, T, cmpxchg_memory_order) const
331 //
332 // For each required size, a platform must either provide an
333 // appropriate definition of that function, or must entirely
334 // specialize the class template for that size.
335 template<size_t byte_size> struct PlatformXchg;
336
337 // Support for platforms that implement some variants of xchg
338 // using a (typically out of line) non-template helper function.
339 // The generic arguments passed to PlatformXchg need to be
340 // translated to the appropriate type for the helper function, the
341 // helper invoked on the translated arguments, and the result
342 // translated back. Type is the parameter / return type of the
343 // helper function.
344 template<typename Type, typename Fn, typename T>
345 static T xchg_using_helper(Fn fn,
346 T exchange_value,
347 T volatile* dest);
348 };
349
350 template<typename From, typename To>
471 // storing types that are pointer sized or smaller. If a platform still
472 // supports wide atomics, then it has to use specialization
473 // of Atomic::PlatformStore for that wider size class.
474 template<size_t byte_size>
475 struct Atomic::PlatformStore {
476 template<typename T>
477 void operator()(T new_value,
478 T volatile* dest) const {
479 STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
480 (void)const_cast<T&>(*dest = new_value);
481 }
482 };
483
484 // Define FetchAndAdd and AddAndFetch helper classes before including
485 // platform file, which may use these as base classes, requiring they
486 // be complete.
487
488 template<typename Derived>
489 struct Atomic::FetchAndAdd {
490 template<typename I, typename D>
491 D operator()(I add_value, D volatile* dest) const;
492 };
493
494 template<typename Derived>
495 struct Atomic::AddAndFetch {
496 template<typename I, typename D>
497 D operator()(I add_value, D volatile* dest) const;
498 };
499
500 template<typename D>
501 inline void Atomic::inc(D volatile* dest) {
502 STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
503 typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
504 Atomic::add(I(1), dest);
505 }
506
507 template<typename D>
508 inline void Atomic::dec(D volatile* dest) {
509 STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
510 typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
511 // Assumes two's complement integer representation.
512 #pragma warning(suppress: 4146)
513 Atomic::add(I(-1), dest);
514 }
515
516 template<typename I, typename D>
517 inline D Atomic::sub(I sub_value, D volatile* dest) {
518 STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
519 STATIC_ASSERT(IsIntegral<I>::value);
520 // If D is a pointer type, use [u]intptr_t as the addend type,
521 // matching signedness of I. Otherwise, use D as the addend type.
522 typedef typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type PI;
523 typedef typename Conditional<IsPointer<D>::value, PI, D>::type AddendType;
524 // Only allow conversions that can't change the value.
525 STATIC_ASSERT(IsSigned<I>::value == IsSigned<AddendType>::value);
526 STATIC_ASSERT(sizeof(I) <= sizeof(AddendType));
527 AddendType addend = sub_value;
528 // Assumes two's complement integer representation.
529 #pragma warning(suppress: 4146) // In case AddendType is not signed.
530 return Atomic::add(-addend, dest);
531 }
532
533 // Define the class before including platform file, which may specialize
534 // the operator definition. No generic definition of specializations
535 // of the operator template are provided, nor are there any generic
536 // specializations of the class. The platform file is responsible for
537 // providing those.
538 template<size_t byte_size>
539 struct Atomic::PlatformCmpxchg {
540 template<typename T>
541 T operator()(T exchange_value,
542 T volatile* dest,
543 T compare_value,
544 cmpxchg_memory_order order) const;
545 };
546
547 // Define the class before including platform file, which may use this
548 // as a base class, requiring it be complete. The definition is later
549 // in this file, near the other definitions related to cmpxchg.
550 struct Atomic::CmpxchgByteUsingInt {
551 template<typename T>
552 T operator()(T exchange_value,
553 T volatile* dest,
554 T compare_value,
555 cmpxchg_memory_order order) const;
556 };
557
558 // Define the class before including platform file, which may specialize
559 // the operator definition. No generic definition of specializations
560 // of the operator template are provided, nor are there any generic
561 // specializations of the class. The platform file is responsible for
562 // providing those.
563 template<size_t byte_size>
564 struct Atomic::PlatformXchg {
565 template<typename T>
566 T operator()(T exchange_value,
567 T volatile* dest) const;
568 };
569
570 // platform specific in-line definitions - must come before shared definitions
571
572 #include OS_CPU_HEADER(atomic)
573
574 // shared in-line definitions
575
576 // size_t casts...
577 #if (SIZE_MAX != UINTPTR_MAX)
578 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
579 #endif
580
581 template<typename T>
582 inline T Atomic::load(const volatile T* dest) {
583 return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest);
584 }
585
586 template<typename T, typename D>
587 inline void Atomic::store(T store_value, volatile D* dest) {
588 StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest);
589 }
590
591 template<typename I, typename D>
592 inline D Atomic::add(I add_value, D volatile* dest) {
593 return AddImpl<I, D>()(add_value, dest);
594 }
595
596 template<typename I, typename D>
597 struct Atomic::AddImpl<
598 I, D,
599 typename EnableIf<IsIntegral<I>::value &&
600 IsIntegral<D>::value &&
601 (sizeof(I) <= sizeof(D)) &&
602 (IsSigned<I>::value == IsSigned<D>::value)>::type>
603 {
604 D operator()(I add_value, D volatile* dest) const {
605 D addend = add_value;
606 return PlatformAdd<sizeof(D)>()(addend, dest);
607 }
608 };
609
610 template<typename I, typename P>
611 struct Atomic::AddImpl<
612 I, P*,
613 typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
614 {
615 P* operator()(I add_value, P* volatile* dest) const {
616 STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
617 STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
618 typedef typename Conditional<IsSigned<I>::value,
619 intptr_t,
620 uintptr_t>::type CI;
621 CI addend = add_value;
622 return PlatformAdd<sizeof(P*)>()(addend, dest);
623 }
624 };
625
626 // Most platforms do not support atomic add on a 2-byte value. However,
627 // if the value occupies the most significant 16 bits of an aligned 32-bit
628 // word, then we can do this with an atomic add of (add_value << 16)
629 // to the 32-bit word.
630 //
631 // The least significant parts of this 32-bit word will never be affected, even
632 // in case of overflow/underflow.
633 //
634 // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
635 template<>
636 struct Atomic::AddImpl<short, short> {
637 short operator()(short add_value, short volatile* dest) const {
638 #ifdef VM_LITTLE_ENDIAN
639 assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
640 int new_value = Atomic::add(add_value << 16, (volatile int*)(dest-1));
641 #else
642 assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
643 int new_value = Atomic::add(add_value << 16, (volatile int*)(dest));
644 #endif
645 return (short)(new_value >> 16); // preserves sign
646 }
647 };
648
649 template<typename Derived>
650 template<typename I, typename D>
651 inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest) const {
652 I addend = add_value;
653 // If D is a pointer type P*, scale by sizeof(P).
654 if (IsPointer<D>::value) {
655 addend *= sizeof(typename RemovePointer<D>::type);
656 }
657 D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest);
658 return old + add_value;
659 }
660
661 template<typename Derived>
662 template<typename I, typename D>
663 inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest) const {
664 // If D is a pointer type P*, scale by sizeof(P).
665 if (IsPointer<D>::value) {
666 add_value *= sizeof(typename RemovePointer<D>::type);
667 }
668 return static_cast<const Derived*>(this)->add_and_fetch(add_value, dest);
669 }
670
671 template<typename Type, typename Fn, typename I, typename D>
672 inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) {
673 return PrimitiveConversions::cast<D>(
674 fn(PrimitiveConversions::cast<Type>(add_value),
675 reinterpret_cast<Type volatile*>(dest)));
676 }
677
678 template<typename T, typename D, typename U>
679 inline D Atomic::cmpxchg(T exchange_value,
680 D volatile* dest,
681 U compare_value,
682 cmpxchg_memory_order order) {
683 return CmpxchgImpl<T, D, U>()(exchange_value, dest, compare_value, order);
684 }
685
686 template<typename T, typename D>
687 inline bool Atomic::replace_if_null(T* value, D* volatile* dest,
688 cmpxchg_memory_order order) {
689 // Presently using a trivial implementation in terms of cmpxchg.
690 // Consider adding platform support, to permit the use of compiler
691 // intrinsics like gcc's __sync_bool_compare_and_swap.
692 D* expected_null = NULL;
693 return expected_null == cmpxchg(value, dest, expected_null, order);
694 }
695
696 // Handle cmpxchg for integral and enum types.
697 //
698 // All the involved types must be identical.
699 template<typename T>
700 struct Atomic::CmpxchgImpl<
701 T, T, T,
702 typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
703 {
704 T operator()(T exchange_value, T volatile* dest, T compare_value,
705 cmpxchg_memory_order order) const {
706 // Forward to the platform handler for the size of T.
707 return PlatformCmpxchg<sizeof(T)>()(exchange_value,
708 dest,
709 compare_value,
710 order);
711 }
712 };
713
714 // Handle cmpxchg for pointer types.
715 //
716 // The destination's type and the compare_value type must be the same,
717 // ignoring cv-qualifiers; we don't care about the cv-qualifiers of
718 // the compare_value.
719 //
720 // The exchange_value must be implicitly convertible to the
721 // destination's type; it must be type-correct to store the
722 // exchange_value in the destination.
723 template<typename T, typename D, typename U>
724 struct Atomic::CmpxchgImpl<
725 T*, D*, U*,
726 typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value &&
727 IsSame<typename RemoveCV<D>::type,
728 typename RemoveCV<U>::type>::value>::type>
729 {
730 D* operator()(T* exchange_value, D* volatile* dest, U* compare_value,
731 cmpxchg_memory_order order) const {
732 // Allow derived to base conversion, and adding cv-qualifiers.
733 D* new_value = exchange_value;
734 // Don't care what the CV qualifiers for compare_value are,
735 // but we need to match D* when calling platform support.
736 D* old_value = const_cast<D*>(compare_value);
737 return PlatformCmpxchg<sizeof(D*)>()(new_value, dest, old_value, order);
738 }
739 };
740
741 // Handle cmpxchg for types that have a translator.
742 //
743 // All the involved types must be identical.
744 //
745 // This translates the original call into a call on the decayed
746 // arguments, and returns the recovered result of that translated
747 // call.
748 template<typename T>
749 struct Atomic::CmpxchgImpl<
750 T, T, T,
751 typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
752 {
753 T operator()(T exchange_value, T volatile* dest, T compare_value,
754 cmpxchg_memory_order order) const {
755 typedef PrimitiveConversions::Translate<T> Translator;
756 typedef typename Translator::Decayed Decayed;
757 STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
758 return Translator::recover(
759 cmpxchg(Translator::decay(exchange_value),
760 reinterpret_cast<Decayed volatile*>(dest),
761 Translator::decay(compare_value),
762 order));
763 }
764 };
765
766 template<typename Type, typename Fn, typename T>
767 inline T Atomic::cmpxchg_using_helper(Fn fn,
768 T exchange_value,
769 T volatile* dest,
770 T compare_value) {
771 STATIC_ASSERT(sizeof(Type) == sizeof(T));
772 return PrimitiveConversions::cast<T>(
773 fn(PrimitiveConversions::cast<Type>(exchange_value),
774 reinterpret_cast<Type volatile*>(dest),
775 PrimitiveConversions::cast<Type>(compare_value)));
776 }
777
778 template<typename T>
779 inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value,
780 T volatile* dest,
781 T compare_value,
782 cmpxchg_memory_order order) const {
783 STATIC_ASSERT(sizeof(T) == sizeof(uint8_t));
784 uint8_t canon_exchange_value = exchange_value;
785 uint8_t canon_compare_value = compare_value;
786 volatile uint32_t* aligned_dest
787 = reinterpret_cast<volatile uint32_t*>(align_down(dest, sizeof(uint32_t)));
788 size_t offset = pointer_delta(dest, aligned_dest, 1);
789 uint32_t cur = *aligned_dest;
790 uint8_t* cur_as_bytes = reinterpret_cast<uint8_t*>(&cur);
791
792 // current value may not be what we are looking for, so force it
793 // to that value so the initial cmpxchg will fail if it is different
794 cur_as_bytes[offset] = canon_compare_value;
795
796 // always execute a real cmpxchg so that we get the required memory
797 // barriers even on initial failure
798 do {
799 // value to swap in matches current value ...
800 uint32_t new_value = cur;
801 // ... except for the one byte we want to update
802 reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value;
804 uint32_t res = cmpxchg(new_value, aligned_dest, cur, order);
805 if (res == cur) break; // success
806
807 // at least one byte in the int changed value, so update
808 // our view of the current int
809 cur = res;
810 // if our byte is still as cur we loop and try again
811 } while (cur_as_bytes[offset] == canon_compare_value);
812
813 return PrimitiveConversions::cast<T>(cur_as_bytes[offset]);
814 }
815
816 // Handle xchg for integral and enum types.
817 //
818 // All the involved types must be identical.
819 template<typename T>
820 struct Atomic::XchgImpl<
821 T, T,
822 typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
823 {
824 T operator()(T exchange_value, T volatile* dest) const {
825 // Forward to the platform handler for the size of T.
826 return PlatformXchg<sizeof(T)>()(exchange_value, dest);
827 }
828 };
829
830 // Handle xchg for pointer types.
831 //
832 // The exchange_value must be implicitly convertible to the
833 // destination's type; it must be type-correct to store the
834 // exchange_value in the destination.
835 template<typename T, typename D>
836 struct Atomic::XchgImpl<
837 T*, D*,
838 typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
839 {
840 D* operator()(T* exchange_value, D* volatile* dest) const {
841 // Allow derived to base conversion, and adding cv-qualifiers.
842 D* new_value = exchange_value;
843 return PlatformXchg<sizeof(D*)>()(new_value, dest);
844 }
845 };
846
847 // Handle xchg for types that have a translator.
848 //
849 // All the involved types must be identical.
850 //
851 // This translates the original call into a call on the decayed
852 // arguments, and returns the recovered result of that translated
853 // call.
854 template<typename T>
855 struct Atomic::XchgImpl<
856 T, T,
857 typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
858 {
859 T operator()(T exchange_value, T volatile* dest) const {
860 typedef PrimitiveConversions::Translate<T> Translator;
861 typedef typename Translator::Decayed Decayed;
862 STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
863 return Translator::recover(
864 xchg(Translator::decay(exchange_value),
865 reinterpret_cast<Decayed volatile*>(dest)));
866 }
867 };
868
869 template<typename Type, typename Fn, typename T>
870 inline T Atomic::xchg_using_helper(Fn fn,
871 T exchange_value,
872 T volatile* dest) {
873 STATIC_ASSERT(sizeof(Type) == sizeof(T));
874 return PrimitiveConversions::cast<T>(
875 fn(PrimitiveConversions::cast<Type>(exchange_value),
876 reinterpret_cast<Type volatile*>(dest)));
877 }
878
879 template<typename T, typename D>
880 inline D Atomic::xchg(T exchange_value, volatile D* dest) {
881 return XchgImpl<T, D>()(exchange_value, dest);
882 }
883
884 #endif // SHARE_VM_RUNTIME_ATOMIC_HPP
|
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
27
28 #include "memory/allocation.hpp"
29 #include "metaprogramming/conditional.hpp"
30 #include "metaprogramming/enableIf.hpp"
31 #include "metaprogramming/isIntegral.hpp"
32 #include "metaprogramming/isPointer.hpp"
33 #include "metaprogramming/isSame.hpp"
34 #include "metaprogramming/primitiveConversions.hpp"
35 #include "metaprogramming/removeCV.hpp"
36 #include "metaprogramming/removePointer.hpp"
37 #include "utilities/align.hpp"
38 #include "utilities/macros.hpp"
39
40 enum atomic_memory_order {
41 // We support most semantics like in C++11.
42 memory_order_relaxed,
43 memory_order_consume,
44 memory_order_acquire,
45 memory_order_release,
46 memory_order_acq_rel,
47 // We need to be more conservative than seq_cst on PPC64.
48 memory_order_conservative = 8
49 };
50
51 class Atomic : AllStatic {
52 public:
53 // Atomic operations on int64 types are not available on all 32-bit
54 // platforms. If atomic ops on int64 are defined here they must only
55 // be used from code that verifies they are available at runtime and
56 // can provide an alternative action if not - see supports_cx8() for
57 // a means to test availability.
58
59 // The memory operations that are mentioned with each of the atomic
60 // function families come from src/share/vm/runtime/orderAccess.hpp,
61 // e.g., <fence> is described in that file and is implemented by the
62 // OrderAccess::fence() function. See that file for the gory details
63 // on the Memory Access Ordering Model.
64
65 // All of the atomic operations that imply a read-modify-write action
66 // guarantee a two-way memory barrier across that operation. Historically
67 // these semantics reflect the strength of atomic operations that are
68 // provided on SPARC/X86. We assume that strength is necessary unless
69 // we can prove that a weaker form is sufficiently safe.
70
71 // Atomically store to a location
72 // The type T must be either a pointer type convertible to or equal
73 // to D, an integral/enum type equal to D, or a type equal to D that
74 // is primitive convertible using PrimitiveConversions.
75 template<typename T, typename D>
76 inline static void store(T store_value, volatile D* dest);
77
78 // Atomically load from a location
79 // The type T must be either a pointer type, an integral/enum type,
80 // or a type that is primitive convertible using PrimitiveConversions.
81 template<typename T>
82 inline static T load(const volatile T* dest);
83
84 // Atomically add to a location. Returns updated value. add*() provide:
85 // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
86
87 template<typename I, typename D>
88 inline static D add(I add_value, D volatile* dest,
89 atomic_memory_order order = memory_order_conservative);
90
91 template<typename I, typename D>
92 inline static D sub(I sub_value, D volatile* dest,
93 atomic_memory_order order = memory_order_conservative);
94
95 // Atomically increment location. inc() provide:
96 // <fence> increment-dest <membar StoreLoad|StoreStore>
97 // The type D may be either a pointer type, or an integral
98 // type. If it is a pointer type, then the increment is
99 // scaled to the size of the type pointed to by the pointer.
100 template<typename D>
101 inline static void inc(D volatile* dest,
102 atomic_memory_order order = memory_order_conservative);
103
104 // Atomically decrement a location. dec() provide:
105 // <fence> decrement-dest <membar StoreLoad|StoreStore>
106 // The type D may be either a pointer type, or an integral
107 // type. If it is a pointer type, then the decrement is
108 // scaled to the size of the type pointed to by the pointer.
109 template<typename D>
110 inline static void dec(D volatile* dest,
111 atomic_memory_order order = memory_order_conservative);
112
113 // Performs atomic exchange of *dest with exchange_value. Returns old
114 // prior value of *dest. xchg*() provide:
115 // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
116 // The type T must be either a pointer type convertible to or equal
117 // to D, an integral/enum type equal to D, or a type equal to D that
118 // is primitive convertible using PrimitiveConversions.
119 template<typename T, typename D>
120 inline static D xchg(T exchange_value, volatile D* dest,
121 atomic_memory_order order = memory_order_conservative);
122
123 // Performs atomic compare of *dest and compare_value, and exchanges
124 // *dest with exchange_value if the comparison succeeded. Returns prior
125 // value of *dest. cmpxchg*() provide:
126 // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
127
128 template<typename T, typename D, typename U>
129 inline static D cmpxchg(T exchange_value,
130 D volatile* dest,
131 U compare_value,
132 atomic_memory_order order = memory_order_conservative);
133
134 // Performs atomic compare of *dest and NULL, and replaces *dest
135 // with exchange_value if the comparison succeeded. Returns true if
136 // the comparison succeeded and the exchange occurred. This is
137 // often used as part of lazy initialization, as a lock-free
138 // alternative to the Double-Checked Locking Pattern.
139 template<typename T, typename D>
140 inline static bool replace_if_null(T* value, D* volatile* dest,
141 atomic_memory_order order = memory_order_conservative);
142
143 private:
144 WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointerConvertible is declared private
145 // Test whether From is implicitly convertible to To.
146 // From and To must be pointer types.
147 // Note: Provides the limited subset of C++11 std::is_convertible
148 // that is needed here.
149 template<typename From, typename To> struct IsPointerConvertible;
150
151 protected:
152 // Dispatch handler for store. Provides type-based validity
153 // checking and limited conversions around calls to the platform-
154 // specific implementation layer provided by PlatformOp.
155 template<typename T, typename D, typename PlatformOp, typename Enable = void>
156 struct StoreImpl;
157
158 // Platform-specific implementation of store. Support for sizes
159 // of 1, 2, 4, and (if different) pointer size bytes are required.
160 // The class is a function object that must be default constructable,
161 // with these requirements:
265 // back. Type is the parameter / return type of the helper
266 // function. No scaling of add_value is performed when D is a pointer
267 // type, so this function can be used to implement the support function
268 // required by AddAndFetch.
269 template<typename Type, typename Fn, typename I, typename D>
270 static D add_using_helper(Fn fn, I add_value, D volatile* dest);
271
272 // Dispatch handler for cmpxchg. Provides type-based validity
273 // checking and limited conversions around calls to the
274 // platform-specific implementation layer provided by
275 // PlatformCmpxchg.
276 template<typename T, typename D, typename U, typename Enable = void>
277 struct CmpxchgImpl;
278
279 // Platform-specific implementation of cmpxchg. Support for sizes
280 // of 1, 4, and 8 are required. The class is a function object that
281 // must be default constructable, with these requirements:
282 //
283 // - dest is of type T*.
284 // - exchange_value and compare_value are of type T.
285 // - order is of type atomic_memory_order.
286 // - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>.
287 //
288 // Then
289 // platform_cmpxchg(exchange_value, dest, compare_value, order)
290 // must be a valid expression, returning a result convertible to T.
291 //
292 // A default definition is provided, which declares a function template
293 // T operator()(T, T volatile*, T, atomic_memory_order) const
294 //
295 // For each required size, a platform must either provide an
296 // appropriate definition of that function, or must entirely
297 // specialize the class template for that size.
298 template<size_t byte_size> struct PlatformCmpxchg;
299
300 // Support for platforms that implement some variants of cmpxchg
301 // using a (typically out of line) non-template helper function.
302 // The generic arguments passed to PlatformCmpxchg need to be
303 // translated to the appropriate type for the helper function, the
304 // helper invoked on the translated arguments, and the result
305 // translated back. Type is the parameter / return type of the
306 // helper function.
307 template<typename Type, typename Fn, typename T>
308 static T cmpxchg_using_helper(Fn fn,
309 T exchange_value,
310 T volatile* dest,
311 T compare_value);
312
313 // Support platforms that do not provide Read-Modify-Write
320 // Dispatch handler for xchg. Provides type-based validity
321 // checking and limited conversions around calls to the
322 // platform-specific implementation layer provided by
323 // PlatformXchg.
324 template<typename T, typename D, typename Enable = void>
325 struct XchgImpl;
326
327 // Platform-specific implementation of xchg. Support for sizes
328 // of 4, and sizeof(intptr_t) are required. The class is a function
329 // object that must be default constructable, with these requirements:
330 //
331 // - dest is of type T*.
332 // - exchange_value is of type T.
333 // - platform_xchg is an object of type PlatformXchg<sizeof(T)>.
334 //
335 // Then
336 // platform_xchg(exchange_value, dest)
337 // must be a valid expression, returning a result convertible to T.
338 //
339 // A default definition is provided, which declares a function template
340 // T operator()(T, T volatile*, T, atomic_memory_order) const
341 //
342 // For each required size, a platform must either provide an
343 // appropriate definition of that function, or must entirely
344 // specialize the class template for that size.
345 template<size_t byte_size> struct PlatformXchg;
346
347 // Support for platforms that implement some variants of xchg
348 // using a (typically out of line) non-template helper function.
349 // The generic arguments passed to PlatformXchg need to be
350 // translated to the appropriate type for the helper function, the
351 // helper invoked on the translated arguments, and the result
352 // translated back. Type is the parameter / return type of the
353 // helper function.
354 template<typename Type, typename Fn, typename T>
355 static T xchg_using_helper(Fn fn,
356 T exchange_value,
357 T volatile* dest);
358 };
359
360 template<typename From, typename To>
481 // storing types that are pointer sized or smaller. If a platform still
482 // supports wide atomics, then it has to use specialization
483 // of Atomic::PlatformStore for that wider size class.
484 template<size_t byte_size>
485 struct Atomic::PlatformStore {
486 template<typename T>
487 void operator()(T new_value,
488 T volatile* dest) const {
489 STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
490 (void)const_cast<T&>(*dest = new_value);
491 }
492 };
493
494 // Define FetchAndAdd and AddAndFetch helper classes before including
495 // platform file, which may use these as base classes, requiring they
496 // be complete.
497
498 template<typename Derived>
499 struct Atomic::FetchAndAdd {
500 template<typename I, typename D>
501 D operator()(I add_value, D volatile* dest, atomic_memory_order order) const;
502 };
503
504 template<typename Derived>
505 struct Atomic::AddAndFetch {
506 template<typename I, typename D>
507 D operator()(I add_value, D volatile* dest, atomic_memory_order order) const;
508 };
509
510 template<typename D>
511 inline void Atomic::inc(D volatile* dest, atomic_memory_order order) {
512 STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
513 typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
514 Atomic::add(I(1), dest, order);
515 }
516
517 template<typename D>
518 inline void Atomic::dec(D volatile* dest, atomic_memory_order order) {
519 STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
520 typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
521 // Assumes two's complement integer representation.
522 #pragma warning(suppress: 4146)
523 Atomic::add(I(-1), dest, order);
524 }
525
526 template<typename I, typename D>
527 inline D Atomic::sub(I sub_value, D volatile* dest, atomic_memory_order order) {
528 STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
529 STATIC_ASSERT(IsIntegral<I>::value);
530 // If D is a pointer type, use [u]intptr_t as the addend type,
531 // matching signedness of I. Otherwise, use D as the addend type.
532 typedef typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type PI;
533 typedef typename Conditional<IsPointer<D>::value, PI, D>::type AddendType;
534 // Only allow conversions that can't change the value.
535 STATIC_ASSERT(IsSigned<I>::value == IsSigned<AddendType>::value);
536 STATIC_ASSERT(sizeof(I) <= sizeof(AddendType));
537 AddendType addend = sub_value;
538 // Assumes two's complement integer representation.
539 #pragma warning(suppress: 4146) // In case AddendType is not signed.
540 return Atomic::add(-addend, dest, order);
541 }
542
543 // Define the class before including platform file, which may specialize
544 // the operator definition. No generic definition of specializations
545 // of the operator template are provided, nor are there any generic
546 // specializations of the class. The platform file is responsible for
547 // providing those.
548 template<size_t byte_size>
549 struct Atomic::PlatformCmpxchg {
550 template<typename T>
551 T operator()(T exchange_value,
552 T volatile* dest,
553 T compare_value,
554 atomic_memory_order order) const;
555 };
556
557 // Define the class before including platform file, which may use this
558 // as a base class, requiring it be complete. The definition is later
559 // in this file, near the other definitions related to cmpxchg.
560 struct Atomic::CmpxchgByteUsingInt {
561 template<typename T>
562 T operator()(T exchange_value,
563 T volatile* dest,
564 T compare_value,
565 atomic_memory_order order) const;
566 };
567
568 // Define the class before including platform file, which may specialize
569 // the operator definition. No generic definition of specializations
570 // of the operator template are provided, nor are there any generic
571 // specializations of the class. The platform file is responsible for
572 // providing those.
573 template<size_t byte_size>
574 struct Atomic::PlatformXchg {
575 template<typename T>
576 T operator()(T exchange_value,
577 T volatile* dest,
578 atomic_memory_order order) const;
579 };
580
581 // platform specific in-line definitions - must come before shared definitions
582
583 #include OS_CPU_HEADER(atomic)
584
585 // shared in-line definitions
586
587 // size_t casts...
588 #if (SIZE_MAX != UINTPTR_MAX)
589 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
590 #endif
591
592 template<typename T>
593 inline T Atomic::load(const volatile T* dest) {
594 return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest);
595 }
596
597 template<typename T, typename D>
598 inline void Atomic::store(T store_value, volatile D* dest) {
599 StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest);
600 }
601
602 template<typename I, typename D>
603 inline D Atomic::add(I add_value, D volatile* dest,
604 atomic_memory_order order) {
605 return AddImpl<I, D>()(add_value, dest, order);
606 }
607
608 template<typename I, typename D>
609 struct Atomic::AddImpl<
610 I, D,
611 typename EnableIf<IsIntegral<I>::value &&
612 IsIntegral<D>::value &&
613 (sizeof(I) <= sizeof(D)) &&
614 (IsSigned<I>::value == IsSigned<D>::value)>::type>
615 {
616 D operator()(I add_value, D volatile* dest, atomic_memory_order order) const {
617 D addend = add_value;
618 return PlatformAdd<sizeof(D)>()(addend, dest, order);
619 }
620 };
621
622 template<typename I, typename P>
623 struct Atomic::AddImpl<
624 I, P*,
625 typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
626 {
627 P* operator()(I add_value, P* volatile* dest, atomic_memory_order order) const {
628 STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
629 STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
630 typedef typename Conditional<IsSigned<I>::value,
631 intptr_t,
632 uintptr_t>::type CI;
633 CI addend = add_value;
634 return PlatformAdd<sizeof(P*)>()(addend, dest, order);
635 }
636 };
637
638 // Most platforms do not support atomic add on a 2-byte value. However,
639 // if the value occupies the most significant 16 bits of an aligned 32-bit
640 // word, then we can do this with an atomic add of (add_value << 16)
641 // to the 32-bit word.
642 //
643 // The least significant parts of this 32-bit word will never be affected, even
644 // in case of overflow/underflow.
645 //
646 // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
647 template<>
648 struct Atomic::AddImpl<short, short> {
649 short operator()(short add_value, short volatile* dest, atomic_memory_order order) const {
650 #ifdef VM_LITTLE_ENDIAN
651 assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
652 int new_value = Atomic::add(add_value << 16, (volatile int*)(dest-1), order);
653 #else
654 assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
655 int new_value = Atomic::add(add_value << 16, (volatile int*)(dest), order);
656 #endif
657 return (short)(new_value >> 16); // preserves sign
658 }
659 };
660
661 template<typename Derived>
662 template<typename I, typename D>
663 inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest,
664 atomic_memory_order order) const {
665 I addend = add_value;
666 // If D is a pointer type P*, scale by sizeof(P).
667 if (IsPointer<D>::value) {
668 addend *= sizeof(typename RemovePointer<D>::type);
669 }
670 D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest, order);
671 return old + add_value;
672 }
673
674 template<typename Derived>
675 template<typename I, typename D>
676 inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest,
677 atomic_memory_order order) const {
678 // If D is a pointer type P*, scale by sizeof(P).
679 if (IsPointer<D>::value) {
680 add_value *= sizeof(typename RemovePointer<D>::type);
681 }
682 return static_cast<const Derived*>(this)->add_and_fetch(add_value, dest, order);
683 }
684
685 template<typename Type, typename Fn, typename I, typename D>
686 inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) {
687 return PrimitiveConversions::cast<D>(
688 fn(PrimitiveConversions::cast<Type>(add_value),
689 reinterpret_cast<Type volatile*>(dest)));
690 }
691
692 template<typename T, typename D, typename U>
693 inline D Atomic::cmpxchg(T exchange_value,
694 D volatile* dest,
695 U compare_value,
696 atomic_memory_order order) {
697 return CmpxchgImpl<T, D, U>()(exchange_value, dest, compare_value, order);
698 }
699
700 template<typename T, typename D>
701 inline bool Atomic::replace_if_null(T* value, D* volatile* dest,
702 atomic_memory_order order) {
703 // Presently using a trivial implementation in terms of cmpxchg.
704 // Consider adding platform support, to permit the use of compiler
705 // intrinsics like gcc's __sync_bool_compare_and_swap.
706 D* expected_null = NULL;
707 return expected_null == cmpxchg(value, dest, expected_null, order);
708 }
709
710 // Handle cmpxchg for integral and enum types.
711 //
712 // All the involved types must be identical.
713 template<typename T>
714 struct Atomic::CmpxchgImpl<
715 T, T, T,
716 typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
717 {
718 T operator()(T exchange_value, T volatile* dest, T compare_value,
719 atomic_memory_order order) const {
720 // Forward to the platform handler for the size of T.
721 return PlatformCmpxchg<sizeof(T)>()(exchange_value,
722 dest,
723 compare_value,
724 order);
725 }
726 };
727
728 // Handle cmpxchg for pointer types.
729 //
730 // The destination's type and the compare_value type must be the same,
731 // ignoring cv-qualifiers; we don't care about the cv-qualifiers of
732 // the compare_value.
733 //
734 // The exchange_value must be implicitly convertible to the
735 // destination's type; it must be type-correct to store the
736 // exchange_value in the destination.
737 template<typename T, typename D, typename U>
738 struct Atomic::CmpxchgImpl<
739 T*, D*, U*,
740 typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value &&
741 IsSame<typename RemoveCV<D>::type,
742 typename RemoveCV<U>::type>::value>::type>
743 {
744 D* operator()(T* exchange_value, D* volatile* dest, U* compare_value,
745 atomic_memory_order order) const {
746 // Allow derived to base conversion, and adding cv-qualifiers.
747 D* new_value = exchange_value;
748 // Don't care what the CV qualifiers for compare_value are,
749 // but we need to match D* when calling platform support.
750 D* old_value = const_cast<D*>(compare_value);
751 return PlatformCmpxchg<sizeof(D*)>()(new_value, dest, old_value, order);
752 }
753 };
754
755 // Handle cmpxchg for types that have a translator.
756 //
757 // All the involved types must be identical.
758 //
759 // This translates the original call into a call on the decayed
760 // arguments, and returns the recovered result of that translated
761 // call.
762 template<typename T>
763 struct Atomic::CmpxchgImpl<
764 T, T, T,
765 typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
766 {
767 T operator()(T exchange_value, T volatile* dest, T compare_value,
768 atomic_memory_order order) const {
769 typedef PrimitiveConversions::Translate<T> Translator;
770 typedef typename Translator::Decayed Decayed;
771 STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
772 return Translator::recover(
773 cmpxchg(Translator::decay(exchange_value),
774 reinterpret_cast<Decayed volatile*>(dest),
775 Translator::decay(compare_value),
776 order));
777 }
778 };
779
780 template<typename Type, typename Fn, typename T>
781 inline T Atomic::cmpxchg_using_helper(Fn fn,
782 T exchange_value,
783 T volatile* dest,
784 T compare_value) {
785 STATIC_ASSERT(sizeof(Type) == sizeof(T));
786 return PrimitiveConversions::cast<T>(
787 fn(PrimitiveConversions::cast<Type>(exchange_value),
788 reinterpret_cast<Type volatile*>(dest),
789 PrimitiveConversions::cast<Type>(compare_value)));
790 }
791
792 template<typename T>
793 inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value,
794 T volatile* dest,
795 T compare_value,
796 atomic_memory_order order) const {
797 STATIC_ASSERT(sizeof(T) == sizeof(uint8_t));
798 uint8_t canon_exchange_value = exchange_value;
799 uint8_t canon_compare_value = compare_value;
800 volatile uint32_t* aligned_dest
801 = reinterpret_cast<volatile uint32_t*>(align_down(dest, sizeof(uint32_t)));
802 size_t offset = pointer_delta(dest, aligned_dest, 1);
803 uint32_t cur = *aligned_dest;
804 uint8_t* cur_as_bytes = reinterpret_cast<uint8_t*>(&cur);
805
806 // current value may not be what we are looking for, so force it
807 // to that value so the initial cmpxchg will fail if it is different
808 cur_as_bytes[offset] = canon_compare_value;
809
810 // always execute a real cmpxchg so that we get the required memory
811 // barriers even on initial failure
812 do {
813 // value to swap in matches current value ...
814 uint32_t new_value = cur;
815 // ... except for the one byte we want to update
816 reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value;
818 uint32_t res = cmpxchg(new_value, aligned_dest, cur, order);
819 if (res == cur) break; // success
820
821 // at least one byte in the int changed value, so update
822 // our view of the current int
823 cur = res;
824 // if our byte is still as cur we loop and try again
825 } while (cur_as_bytes[offset] == canon_compare_value);
826
827 return PrimitiveConversions::cast<T>(cur_as_bytes[offset]);
828 }
829
830 // Handle xchg for integral and enum types.
831 //
832 // All the involved types must be identical.
833 template<typename T>
834 struct Atomic::XchgImpl<
835 T, T,
836 typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
837 {
838 T operator()(T exchange_value, T volatile* dest, atomic_memory_order order) const {
839 // Forward to the platform handler for the size of T.
840 return PlatformXchg<sizeof(T)>()(exchange_value, dest, order);
841 }
842 };
843
844 // Handle xchg for pointer types.
845 //
846 // The exchange_value must be implicitly convertible to the
847 // destination's type; it must be type-correct to store the
848 // exchange_value in the destination.
849 template<typename T, typename D>
850 struct Atomic::XchgImpl<
851 T*, D*,
852 typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
853 {
854 D* operator()(T* exchange_value, D* volatile* dest, atomic_memory_order order) const {
855 // Allow derived to base conversion, and adding cv-qualifiers.
856 D* new_value = exchange_value;
857 return PlatformXchg<sizeof(D*)>()(new_value, dest, order);
858 }
859 };
860
861 // Handle xchg for types that have a translator.
862 //
863 // All the involved types must be identical.
864 //
865 // This translates the original call into a call on the decayed
866 // arguments, and returns the recovered result of that translated
867 // call.
868 template<typename T>
869 struct Atomic::XchgImpl<
870 T, T,
871 typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
872 {
873 T operator()(T exchange_value, T volatile* dest, atomic_memory_order order) const {
874 typedef PrimitiveConversions::Translate<T> Translator;
875 typedef typename Translator::Decayed Decayed;
876 STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
877 return Translator::recover(
878 xchg(Translator::decay(exchange_value),
879 reinterpret_cast<Decayed volatile*>(dest),
880 order));
881 }
882 };
883
884 template<typename Type, typename Fn, typename T>
885 inline T Atomic::xchg_using_helper(Fn fn,
886 T exchange_value,
887 T volatile* dest) {
888 STATIC_ASSERT(sizeof(Type) == sizeof(T));
889 return PrimitiveConversions::cast<T>(
890 fn(PrimitiveConversions::cast<Type>(exchange_value),
891 reinterpret_cast<Type volatile*>(dest)));
892 }
893
894 template<typename T, typename D>
895 inline D Atomic::xchg(T exchange_value, volatile D* dest, atomic_memory_order order) {
896 return XchgImpl<T, D>()(exchange_value, dest, order);
897 }
898
899 #endif // SHARE_VM_RUNTIME_ATOMIC_HPP
|