< prev index next >

src/hotspot/share/oops/accessBackend.inline.hpp

Print this page

        

*** 83,121 **** return oop_load<T>(field_addr(base, offset)); } template <DecoratorSet decorators> template <typename T> ! inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg(T new_value, void* addr, T compare_value) { typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded; Encoded encoded_new = encode(new_value); Encoded encoded_compare = encode(compare_value); ! Encoded encoded_result = atomic_cmpxchg(encoded_new, ! reinterpret_cast<Encoded*>(addr), ! encoded_compare); return decode<T>(encoded_result); } template <DecoratorSet decorators> template <typename T> ! inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { ! return oop_atomic_cmpxchg(new_value, field_addr(base, offset), compare_value); } template <DecoratorSet decorators> template <typename T> ! inline T RawAccessBarrier<decorators>::oop_atomic_xchg(T new_value, void* addr) { typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded; Encoded encoded_new = encode(new_value); ! Encoded encoded_result = atomic_xchg(encoded_new, reinterpret_cast<Encoded*>(addr)); return decode<T>(encoded_result); } template <DecoratorSet decorators> template <typename T> ! inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { ! return oop_atomic_xchg(new_value, field_addr(base, offset)); } template <DecoratorSet decorators> template <typename T> inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, --- 83,121 ---- return oop_load<T>(field_addr(base, offset)); } template <DecoratorSet decorators> template <typename T> ! inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg(void* addr, T compare_value, T new_value) { typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded; Encoded encoded_new = encode(new_value); Encoded encoded_compare = encode(compare_value); ! Encoded encoded_result = atomic_cmpxchg(reinterpret_cast<Encoded*>(addr), ! encoded_compare, ! encoded_new); return decode<T>(encoded_result); } template <DecoratorSet decorators> template <typename T> ! inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) { ! return oop_atomic_cmpxchg(field_addr(base, offset), compare_value, new_value); } template <DecoratorSet decorators> template <typename T> ! inline T RawAccessBarrier<decorators>::oop_atomic_xchg(void* addr, T new_value) { typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded; Encoded encoded_new = encode(new_value); ! Encoded encoded_result = atomic_xchg(reinterpret_cast<Encoded*>(addr), encoded_new); return decode<T>(encoded_result); } template <DecoratorSet decorators> template <typename T> ! inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) { ! return oop_atomic_xchg(field_addr(base, offset), new_value); } template <DecoratorSet decorators> template <typename T> inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
*** 172,226 **** template <DecoratorSet decorators> template <DecoratorSet ds, typename T> inline typename EnableIf< HasDecorator<ds, MO_RELAXED>::value>::type RawAccessBarrier<decorators>::store_internal(void* addr, T value) { ! Atomic::store(value, reinterpret_cast<volatile T*>(addr)); } template <DecoratorSet decorators> template <DecoratorSet ds, typename T> inline typename EnableIf< HasDecorator<ds, MO_RELAXED>::value, T>::type ! RawAccessBarrier<decorators>::atomic_cmpxchg_internal(T new_value, void* addr, T compare_value) { ! return Atomic::cmpxchg(new_value, ! reinterpret_cast<volatile T*>(addr), compare_value, memory_order_relaxed); } template <DecoratorSet decorators> template <DecoratorSet ds, typename T> inline typename EnableIf< HasDecorator<ds, MO_SEQ_CST>::value, T>::type ! RawAccessBarrier<decorators>::atomic_cmpxchg_internal(T new_value, void* addr, T compare_value) { ! return Atomic::cmpxchg(new_value, ! reinterpret_cast<volatile T*>(addr), compare_value, memory_order_conservative); } template <DecoratorSet decorators> template <DecoratorSet ds, typename T> inline typename EnableIf< HasDecorator<ds, MO_SEQ_CST>::value, T>::type ! RawAccessBarrier<decorators>::atomic_xchg_internal(T new_value, void* addr) { ! return Atomic::xchg(new_value, ! reinterpret_cast<volatile T*>(addr)); } // For platforms that do not have native support for wide atomics, // we can emulate the atomicity using a lock. So here we check // whether that is necessary or not. template <DecoratorSet ds> template <DecoratorSet decorators, typename T> inline typename EnableIf< AccessInternal::PossiblyLockedAccess<T>::value, T>::type ! RawAccessBarrier<ds>::atomic_xchg_maybe_locked(T new_value, void* addr) { if (!AccessInternal::wide_atomic_needs_locking()) { ! return atomic_xchg_internal<ds>(new_value, addr); } else { AccessInternal::AccessLocker access_lock; volatile T* p = reinterpret_cast<volatile T*>(addr); T old_val = RawAccess<>::load(p); RawAccess<>::store(p, new_value); --- 172,226 ---- template <DecoratorSet decorators> template <DecoratorSet ds, typename T> inline typename EnableIf< HasDecorator<ds, MO_RELAXED>::value>::type RawAccessBarrier<decorators>::store_internal(void* addr, T value) { ! Atomic::store(reinterpret_cast<volatile T*>(addr), value); } template <DecoratorSet decorators> template <DecoratorSet ds, typename T> inline typename EnableIf< HasDecorator<ds, MO_RELAXED>::value, T>::type ! RawAccessBarrier<decorators>::atomic_cmpxchg_internal(void* addr, T compare_value, T new_value) { ! return Atomic::cmpxchg(reinterpret_cast<volatile T*>(addr), compare_value, + new_value, memory_order_relaxed); } template <DecoratorSet decorators> template <DecoratorSet ds, typename T> inline typename EnableIf< HasDecorator<ds, MO_SEQ_CST>::value, T>::type ! RawAccessBarrier<decorators>::atomic_cmpxchg_internal(void* addr, T compare_value, T new_value) { ! return Atomic::cmpxchg(reinterpret_cast<volatile T*>(addr), compare_value, + new_value, memory_order_conservative); } template <DecoratorSet decorators> template <DecoratorSet ds, typename T> inline typename EnableIf< HasDecorator<ds, MO_SEQ_CST>::value, T>::type ! RawAccessBarrier<decorators>::atomic_xchg_internal(void* addr, T new_value) { ! return Atomic::xchg(reinterpret_cast<volatile T*>(addr), ! new_value); } // For platforms that do not have native support for wide atomics, // we can emulate the atomicity using a lock. So here we check // whether that is necessary or not. template <DecoratorSet ds> template <DecoratorSet decorators, typename T> inline typename EnableIf< AccessInternal::PossiblyLockedAccess<T>::value, T>::type ! RawAccessBarrier<ds>::atomic_xchg_maybe_locked(void* addr, T new_value) { if (!AccessInternal::wide_atomic_needs_locking()) { ! return atomic_xchg_internal<ds>(addr, new_value); } else { AccessInternal::AccessLocker access_lock; volatile T* p = reinterpret_cast<volatile T*>(addr); T old_val = RawAccess<>::load(p); RawAccess<>::store(p, new_value);
*** 230,242 **** template <DecoratorSet ds> template <DecoratorSet decorators, typename T> inline typename EnableIf< AccessInternal::PossiblyLockedAccess<T>::value, T>::type ! RawAccessBarrier<ds>::atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) { if (!AccessInternal::wide_atomic_needs_locking()) { ! return atomic_cmpxchg_internal<ds>(new_value, addr, compare_value); } else { AccessInternal::AccessLocker access_lock; volatile T* p = reinterpret_cast<volatile T*>(addr); T old_val = RawAccess<>::load(p); if (old_val == compare_value) { --- 230,242 ---- template <DecoratorSet ds> template <DecoratorSet decorators, typename T> inline typename EnableIf< AccessInternal::PossiblyLockedAccess<T>::value, T>::type ! RawAccessBarrier<ds>::atomic_cmpxchg_maybe_locked(void* addr, T compare_value, T new_value) { if (!AccessInternal::wide_atomic_needs_locking()) { ! return atomic_cmpxchg_internal<ds>(addr, compare_value, new_value); } else { AccessInternal::AccessLocker access_lock; volatile T* p = reinterpret_cast<volatile T*>(addr); T old_val = RawAccess<>::load(p); if (old_val == compare_value) {
< prev index next >