< prev index next >
src/share/vm/runtime/atomic.hpp
Print this page
rev 13281 : imported patch Atomic_refactoring
rev 13283 : imported patch Atomic_polishing_v2
rev 13284 : [mq]: Atomic_aliasing_1
@@ -24,10 +24,12 @@
#ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
#define SHARE_VM_RUNTIME_ATOMIC_HPP
#include "memory/allocation.hpp"
+#include "metaprogramming/conditional.hpp"
+#include "metaprogramming/enableIf.hpp"
#include "metaprogramming/integerTypes.hpp"
#include "metaprogramming/isIntegral.hpp"
#include "metaprogramming/isPointer.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
@@ -39,10 +41,35 @@
memory_order_conservative = 8
};
class Atomic : AllStatic {
template<typename T> class Never: public FalseType {};
+ typedef char* CanonicalPointer;
+
+ // The type is CanonicalPointer for pointers, otherwise canonical integer
+ template<typename T>
+ struct CanonicalType : AllStatic {
+ typedef typename Conditional<IsPointer<T>::value, CanonicalPointer, typename IntegerTypes::Signed<T>::type>::type type;
+ };
+
+ template<typename T>
+ static typename EnableIf<IsPointer<T>::value, CanonicalPointer>::type
+ cast_to_canonical(T value) { return reinterpret_cast<CanonicalPointer>(value); }
+
+ template<typename T>
+ static typename EnableIf<!IsPointer<T>::value, typename IntegerTypes::Signed<T>::type>::type
+ cast_to_canonical(T value) { return IntegerTypes::cast_to_signed(value); }
+
+ template<typename T, typename U>
+ static typename EnableIf<IsPointer<U>::value, T>::type cast_from_canonical(U value) {
+ return reinterpret_cast<T>(value);
+ }
+
+ template<typename T, typename U>
+ static typename EnableIf<!IsPointer<U>::value, T>::type cast_from_canonical(U value) {
+ return IntegerTypes::cast<T>(value);
+ }
template <typename T>
inline static void specialized_store(T store_value, volatile T* dest) {
STATIC_ASSERT(sizeof(T) <= size_t(BytesPerWord)); // Does the machine support atomic wide accesses?
(void)const_cast<T&>(*dest = store_value);
@@ -68,12 +95,21 @@
template <typename T>
inline static void specialized_dec(volatile T* dest) {
add(-1, dest);
}
+ // If the platform does not offer a specialization for pointers,
+ // try using the canonical pointer integer instead
+ template <typename T>
+ inline static typename EnableIf<IsPointer<T>::value, T>::type specialized_xchg(T exchange_value, volatile T* dest) {
+ typedef typename IntegerTypes::Signed<T>::type Raw;
+ Raw result = specialized_xchg(IntegerTypes::cast_to_signed(exchange_value), reinterpret_cast<volatile Raw*>(dest));
+ return IntegerTypes::cast<T>(result);
+ }
+
template <typename T>
- inline static T specialized_xchg(T exchange_value, volatile T* dest) {
+ inline static typename EnableIf<!IsPointer<T>::value, T>::type specialized_xchg(T exchange_value, volatile T* dest) {
STATIC_ASSERT(Never<T>::value);
return exchange_value;
}
template <typename T>
@@ -284,15 +320,15 @@
}
}
template <typename T, typename U>
inline U Atomic::xchg(T exchange_value, volatile U* dest) {
- typedef typename IntegerTypes::Signed<U>::type Raw;
+ typedef typename CanonicalType<U>::type Raw;
U exchange_value_cast = exchange_value;
- Raw result = specialized_xchg(IntegerTypes::cast_to_signed(exchange_value_cast),
+ Raw result = specialized_xchg(cast_to_canonical(exchange_value_cast),
reinterpret_cast<volatile Raw*>(dest));
- return IntegerTypes::cast<U>(result);
+ return cast_from_canonical<U>(result);
}
template <typename T, typename U, typename V>
inline U Atomic::cmpxchg(T exchange_value, volatile U* dest, V compare_value, cmpxchg_memory_order order) {
typedef typename IntegerTypes::Signed<U>::type Raw;
< prev index next >