28 #include "memory/allocation.hpp"
29 #include "metaprogramming/conditional.hpp"
30 #include "metaprogramming/enableIf.hpp"
31 #include "metaprogramming/isIntegral.hpp"
32 #include "metaprogramming/isPointer.hpp"
33 #include "metaprogramming/isSame.hpp"
34 #include "metaprogramming/primitiveConversions.hpp"
35 #include "metaprogramming/removeCV.hpp"
36 #include "metaprogramming/removePointer.hpp"
37 #include "utilities/align.hpp"
38 #include "utilities/macros.hpp"
39
40 enum cmpxchg_memory_order {
41 memory_order_relaxed,
42 // Use value which doesn't interfere with C++2011. We need to be more conservative.
43 memory_order_conservative = 8
44 };
45
46 class Atomic : AllStatic {
47 public:
48 // Atomic operations on jlong types are not available on all 32-bit
49 // platforms. If atomic ops on jlongs are defined here they must only
50 // be used from code that verifies they are available at runtime and
51 // can provide an alternative action if not - see supports_cx8() for
52 // a means to test availability.
53
54 // The memory operations that are mentioned with each of the atomic
55 // function families come from src/share/vm/runtime/orderAccess.hpp,
56 // e.g., <fence> is described in that file and is implemented by the
57 // OrderAccess::fence() function. See that file for the gory details
58 // on the Memory Access Ordering Model.
59
60 // All of the atomic operations that imply a read-modify-write action
61 // guarantee a two-way memory barrier across that operation. Historically
62 // these semantics reflect the strength of atomic operations that are
63 // provided on SPARC/X86. We assume that strength is necessary unless
64 // we can prove that a weaker form is sufficiently safe.
65
66 // Atomically store to a location
67 // The type T must be either a pointer type convertible to or equal
68 // to D, an integral/enum type equal to D, or a type equal to D that
69 // is primitive convertible using PrimitiveConversions.
622 STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
623 STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
624 typedef typename Conditional<IsSigned<I>::value,
625 intptr_t,
626 uintptr_t>::type CI;
627 CI addend = add_value;
628 return PlatformAdd<sizeof(P*)>()(addend, dest);
629 }
630 };
631
632 // Most platforms do not support atomic add on a 2-byte value. However,
633 // if the value occupies the most significant 16 bits of an aligned 32-bit
634 // word, then we can do this with an atomic add of (add_value << 16)
635 // to the 32-bit word.
636 //
637 // The least significant parts of this 32-bit word will never be affected, even
638 // in case of overflow/underflow.
639 //
640 // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
641 template<>
642 struct Atomic::AddImpl<jshort, jshort> VALUE_OBJ_CLASS_SPEC {
643 jshort operator()(jshort add_value, jshort volatile* dest) const {
644 #ifdef VM_LITTLE_ENDIAN
645 assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
646 jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
647 #else
648 assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
649 jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
650 #endif
651 return (jshort)(new_value >> 16); // preserves sign
652 }
653 };
654
655 template<typename Derived>
656 template<typename I, typename D>
657 inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest) const {
658 I addend = add_value;
659 // If D is a pointer type P*, scale by sizeof(P).
660 if (IsPointer<D>::value) {
661 addend *= sizeof(typename RemovePointer<D>::type);
662 }
663 D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest);
664 return old + add_value;
665 }
666
667 template<typename Derived>
668 template<typename I, typename D>
669 inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest) const {
670 // If D is a pointer type P*, scale by sizeof(P).
671 if (IsPointer<D>::value) {
790 T compare_value,
791 cmpxchg_memory_order order) const {
792 STATIC_ASSERT(sizeof(T) == sizeof(uint8_t));
793 uint8_t canon_exchange_value = exchange_value;
794 uint8_t canon_compare_value = compare_value;
795 volatile uint32_t* aligned_dest
796 = reinterpret_cast<volatile uint32_t*>(align_down(dest, sizeof(uint32_t)));
797 size_t offset = pointer_delta(dest, aligned_dest, 1);
798 uint32_t cur = *aligned_dest;
799 uint8_t* cur_as_bytes = reinterpret_cast<uint8_t*>(&cur);
800
801 // current value may not be what we are looking for, so force it
802 // to that value so the initial cmpxchg will fail if it is different
803 cur_as_bytes[offset] = canon_compare_value;
804
805 // always execute a real cmpxchg so that we get the required memory
806 // barriers even on initial failure
807 do {
808 // value to swap in matches current value ...
809 uint32_t new_value = cur;
810 // ... except for the one jbyte we want to update
811 reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value;
812
813 uint32_t res = cmpxchg(new_value, aligned_dest, cur, order);
814 if (res == cur) break; // success
815
816 // at least one byte in the int changed value, so update
817 // our view of the current int
818 cur = res;
819 // if our byte is still as cur we loop and try again
820 } while (cur_as_bytes[offset] == canon_compare_value);
821
822 return PrimitiveConversions::cast<T>(cur_as_bytes[offset]);
823 }
824
825 // Handle xchg for integral and enum types.
826 //
827 // All the involved types must be identical.
828 template<typename T>
829 struct Atomic::XchgImpl<
830 T, T,
|
28 #include "memory/allocation.hpp"
29 #include "metaprogramming/conditional.hpp"
30 #include "metaprogramming/enableIf.hpp"
31 #include "metaprogramming/isIntegral.hpp"
32 #include "metaprogramming/isPointer.hpp"
33 #include "metaprogramming/isSame.hpp"
34 #include "metaprogramming/primitiveConversions.hpp"
35 #include "metaprogramming/removeCV.hpp"
36 #include "metaprogramming/removePointer.hpp"
37 #include "utilities/align.hpp"
38 #include "utilities/macros.hpp"
39
40 enum cmpxchg_memory_order {
41 memory_order_relaxed,
42 // Use value which doesn't interfere with C++2011. We need to be more conservative.
43 memory_order_conservative = 8
44 };
45
46 class Atomic : AllStatic {
47 public:
48 // Atomic operations on int64 types are not available on all 32-bit
49 // platforms. If atomic ops on int64 are defined here they must only
50 // be used from code that verifies they are available at runtime and
51 // can provide an alternative action if not - see supports_cx8() for
52 // a means to test availability.
53
54 // The memory operations that are mentioned with each of the atomic
55 // function families come from src/share/vm/runtime/orderAccess.hpp,
56 // e.g., <fence> is described in that file and is implemented by the
57 // OrderAccess::fence() function. See that file for the gory details
58 // on the Memory Access Ordering Model.
59
60 // All of the atomic operations that imply a read-modify-write action
61 // guarantee a two-way memory barrier across that operation. Historically
62 // these semantics reflect the strength of atomic operations that are
63 // provided on SPARC/X86. We assume that strength is necessary unless
64 // we can prove that a weaker form is sufficiently safe.
65
66 // Atomically store to a location
67 // The type T must be either a pointer type convertible to or equal
68 // to D, an integral/enum type equal to D, or a type equal to D that
69 // is primitive convertible using PrimitiveConversions.
622 STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
623 STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
624 typedef typename Conditional<IsSigned<I>::value,
625 intptr_t,
626 uintptr_t>::type CI;
627 CI addend = add_value;
628 return PlatformAdd<sizeof(P*)>()(addend, dest);
629 }
630 };
631
632 // Most platforms do not support atomic add on a 2-byte value. However,
633 // if the value occupies the most significant 16 bits of an aligned 32-bit
634 // word, then we can do this with an atomic add of (add_value << 16)
635 // to the 32-bit word.
636 //
637 // The least significant parts of this 32-bit word will never be affected, even
638 // in case of overflow/underflow.
639 //
640 // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
641 template<>
642 struct Atomic::AddImpl<short, short> VALUE_OBJ_CLASS_SPEC {
643 short operator()(short add_value, short volatile* dest) const {
644 #ifdef VM_LITTLE_ENDIAN
645 assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
646 int new_value = Atomic::add(add_value << 16, (volatile int*)(dest-1));
647 #else
648 assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
649 int new_value = Atomic::add(add_value << 16, (volatile int*)(dest));
650 #endif
651 return (short)(new_value >> 16); // preserves sign
652 }
653 };
654
655 template<typename Derived>
656 template<typename I, typename D>
657 inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest) const {
658 I addend = add_value;
659 // If D is a pointer type P*, scale by sizeof(P).
660 if (IsPointer<D>::value) {
661 addend *= sizeof(typename RemovePointer<D>::type);
662 }
663 D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest);
664 return old + add_value;
665 }
666
667 template<typename Derived>
668 template<typename I, typename D>
669 inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest) const {
670 // If D is a pointer type P*, scale by sizeof(P).
671 if (IsPointer<D>::value) {
790 T compare_value,
791 cmpxchg_memory_order order) const {
792 STATIC_ASSERT(sizeof(T) == sizeof(uint8_t));
793 uint8_t canon_exchange_value = exchange_value;
794 uint8_t canon_compare_value = compare_value;
795 volatile uint32_t* aligned_dest
796 = reinterpret_cast<volatile uint32_t*>(align_down(dest, sizeof(uint32_t)));
797 size_t offset = pointer_delta(dest, aligned_dest, 1);
798 uint32_t cur = *aligned_dest;
799 uint8_t* cur_as_bytes = reinterpret_cast<uint8_t*>(&cur);
800
801 // current value may not be what we are looking for, so force it
802 // to that value so the initial cmpxchg will fail if it is different
803 cur_as_bytes[offset] = canon_compare_value;
804
805 // always execute a real cmpxchg so that we get the required memory
806 // barriers even on initial failure
807 do {
808 // value to swap in matches current value ...
809 uint32_t new_value = cur;
810 // ... except for the one byte we want to update
811 reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value;
812
813 uint32_t res = cmpxchg(new_value, aligned_dest, cur, order);
814 if (res == cur) break; // success
815
816 // at least one byte in the int changed value, so update
817 // our view of the current int
818 cur = res;
819 // if our byte is still as cur we loop and try again
820 } while (cur_as_bytes[offset] == canon_compare_value);
821
822 return PrimitiveConversions::cast<T>(cur_as_bytes[offset]);
823 }
824
825 // Handle xchg for integral and enum types.
826 //
827 // All the involved types must be identical.
828 template<typename T>
829 struct Atomic::XchgImpl<
830 T, T,
|