--- old/src/hotspot/share/oops/accessBackend.hpp 2019-11-21 11:54:17.984069946 +0100 +++ new/src/hotspot/share/oops/accessBackend.hpp 2019-11-21 11:54:17.724065548 +0100 @@ -103,12 +103,12 @@ typedef T (*load_at_func_t)(oop base, ptrdiff_t offset); typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value); typedef T (*atomic_cmpxchg_at_func_t)(T new_value, oop base, ptrdiff_t offset, T compare_value); - typedef T (*atomic_xchg_at_func_t)(T new_value, oop base, ptrdiff_t offset); + typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value); typedef T (*load_func_t)(void* addr); typedef void (*store_func_t)(void* addr, T value); typedef T (*atomic_cmpxchg_func_t)(T new_value, void* addr, T compare_value); - typedef T (*atomic_xchg_func_t)(T new_value, void* addr); + typedef T (*atomic_xchg_func_t)(void* addr, T new_value); typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, @@ -303,7 +303,7 @@ template static typename EnableIf< HasDecorator::value, T>::type - atomic_xchg_internal(T new_value, void* addr); + atomic_xchg_internal(void* addr, T new_value); // The following *_locked mechanisms serve the purpose of handling atomic operations // that are larger than a machine can handle, and then possibly opt for using @@ -324,14 +324,14 @@ template static inline typename EnableIf< !AccessInternal::PossiblyLockedAccess::value, T>::type - atomic_xchg_maybe_locked(T new_value, void* addr) { - return atomic_xchg_internal(new_value, addr); + atomic_xchg_maybe_locked(void* addr, T new_value) { + return atomic_xchg_internal(addr, new_value); } template static typename EnableIf< AccessInternal::PossiblyLockedAccess::value, T>::type - atomic_xchg_maybe_locked(T new_value, void* addr); + atomic_xchg_maybe_locked(void* addr, T new_value); public: template @@ -350,8 +350,8 @@ } template - static inline T atomic_xchg(T new_value, void* addr) { - return atomic_xchg_maybe_locked(new_value, addr); + static inline T atomic_xchg(void* addr, T new_value) { + return atomic_xchg_maybe_locked(addr, new_value); } template @@ -375,9 +375,9 @@ static T oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value); template - static T oop_atomic_xchg(T new_value, void* addr); + static T oop_atomic_xchg(void* addr, T new_value); template - static T oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset); + static T oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value); template static void store_at(oop base, ptrdiff_t offset, T value) { @@ -395,8 +395,8 @@ } template - static T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { - return atomic_xchg(new_value, field_addr(base, offset)); + static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) { + return atomic_xchg(field_addr(base, offset), new_value); } template @@ -539,10 +539,10 @@ typedef typename AccessFunction::type func_t; static func_t _atomic_xchg_func; - static T atomic_xchg_init(T new_value, void* addr); + static T atomic_xchg_init(void* addr, T new_value); - static inline T atomic_xchg(T new_value, void* addr) { - return _atomic_xchg_func(new_value, addr); + static inline T atomic_xchg(void* addr, T new_value) { + return _atomic_xchg_func(addr, new_value); } }; @@ -551,10 +551,10 @@ typedef typename AccessFunction::type func_t; static func_t _atomic_xchg_at_func; - static T atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset); + static T atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value); - static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { - return _atomic_xchg_at_func(new_value, base, offset); + static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) { + return _atomic_xchg_at_func(base, offset, new_value); } }; @@ -838,56 +838,56 @@ template inline static typename EnableIf< HasDecorator::value && CanHardwireRaw::value, T>::type - atomic_xchg(T new_value, void* addr) { + atomic_xchg(void* addr, T new_value) { typedef RawAccessBarrier Raw; if (HasDecorator::value) { - return Raw::oop_atomic_xchg(new_value, addr); + return Raw::oop_atomic_xchg(addr, new_value); } else { - return Raw::atomic_xchg(new_value, addr); + return Raw::atomic_xchg(addr, new_value); } } template inline static typename EnableIf< HasDecorator::value && !CanHardwireRaw::value, T>::type - atomic_xchg(T new_value, void* addr) { + atomic_xchg(void* addr, T new_value) { if (UseCompressedOops) { const DecoratorSet expanded_decorators = decorators | convert_compressed_oops; - return PreRuntimeDispatch::atomic_xchg(new_value, addr); + return PreRuntimeDispatch::atomic_xchg(addr, new_value); } else { const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops; - return PreRuntimeDispatch::atomic_xchg(new_value, addr); + return PreRuntimeDispatch::atomic_xchg(addr, new_value); } } template inline static typename EnableIf< !HasDecorator::value, T>::type - atomic_xchg(T new_value, void* addr) { + atomic_xchg(void* addr, T new_value) { if (is_hardwired_primitive()) { const DecoratorSet expanded_decorators = decorators | AS_RAW; - return PreRuntimeDispatch::atomic_xchg(new_value, addr); + return PreRuntimeDispatch::atomic_xchg(addr, new_value); } else { - return RuntimeDispatch::atomic_xchg(new_value, addr); + return RuntimeDispatch::atomic_xchg(addr, new_value); } } template inline static typename EnableIf< HasDecorator::value, T>::type - atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { - return atomic_xchg(new_value, field_addr(base, offset)); + atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) { + return atomic_xchg(field_addr(base, offset), new_value); } template inline static typename EnableIf< !HasDecorator::value, T>::type - atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { + atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) { if (is_hardwired_primitive()) { const DecoratorSet expanded_decorators = decorators | AS_RAW; - return PreRuntimeDispatch::atomic_xchg(new_value, base, offset); + return PreRuntimeDispatch::atomic_xchg(base, offset, new_value); } else { - return RuntimeDispatch::atomic_xchg_at(new_value, base, offset); + return RuntimeDispatch::atomic_xchg_at(base, offset, new_value); } } @@ -1045,29 +1045,29 @@ } template - inline T atomic_xchg_reduce_types(T new_value, T* addr) { + inline T atomic_xchg_reduce_types(T* addr, T new_value) { const DecoratorSet expanded_decorators = decorators; - return PreRuntimeDispatch::atomic_xchg(new_value, addr); + return PreRuntimeDispatch::atomic_xchg(addr, new_value); } template - inline oop atomic_xchg_reduce_types(oop new_value, narrowOop* addr) { + inline oop atomic_xchg_reduce_types(narrowOop* addr, oop new_value) { const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | INTERNAL_RT_USE_COMPRESSED_OOPS; - return PreRuntimeDispatch::atomic_xchg(new_value, addr); + return PreRuntimeDispatch::atomic_xchg(addr, new_value); } template - inline narrowOop atomic_xchg_reduce_types(narrowOop new_value, narrowOop* addr) { + inline narrowOop atomic_xchg_reduce_types(narrowOop* addr, narrowOop new_value) { const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | INTERNAL_RT_USE_COMPRESSED_OOPS; - return PreRuntimeDispatch::atomic_xchg(new_value, addr); + return PreRuntimeDispatch::atomic_xchg(addr, new_value); } template - inline oop atomic_xchg_reduce_types(oop new_value, HeapWord* addr) { + inline oop atomic_xchg_reduce_types(HeapWord* addr, oop new_value) { const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP; - return PreRuntimeDispatch::atomic_xchg(new_value, addr); + return PreRuntimeDispatch::atomic_xchg(addr, new_value); } template @@ -1224,19 +1224,19 @@ } template - inline T atomic_xchg(T new_value, P* addr) { + inline T atomic_xchg(P* addr, T new_value) { verify_types(); typedef typename Decay

::type DecayedP; typedef typename Decay::type DecayedT; DecayedT new_decayed_value = new_value; // atomic_xchg is only available in SEQ_CST flavour. const DecoratorSet expanded_decorators = DecoratorFixup::value; - return atomic_xchg_reduce_types(new_decayed_value, - const_cast(addr)); + return atomic_xchg_reduce_types(const_cast(addr), + new_decayed_value); } template - inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { + inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) { verify_types(); typedef typename Decay::type DecayedT; DecayedT new_decayed_value = new_value; @@ -1244,7 +1244,7 @@ const DecoratorSet expanded_decorators = DecoratorFixup::value ? INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value; - return PreRuntimeDispatch::atomic_xchg_at(new_decayed_value, base, offset); + return PreRuntimeDispatch::atomic_xchg_at(base, offset, new_decayed_value); } template