/* * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #ifndef SHARE_OOPS_ACCESSBACKEND_INLINE_HPP #define SHARE_OOPS_ACCESSBACKEND_INLINE_HPP #include "oops/access.hpp" #include "oops/accessBackend.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/oopsHierarchy.hpp" #include "oops/valueKlass.hpp" template template inline typename EnableIf< AccessInternal::MustConvertCompressedOop::value, T>::type RawAccessBarrier::decode_internal(typename HeapOopType::type value) { if (HasDecorator::value) { return CompressedOops::decode_not_null(value); } else { return CompressedOops::decode(value); } } template template inline typename EnableIf< AccessInternal::MustConvertCompressedOop::value, typename HeapOopType::type>::type RawAccessBarrier::encode_internal(T value) { if (HasDecorator::value) { return CompressedOops::encode_not_null(value); } else { return CompressedOops::encode(value); } } template template inline void RawAccessBarrier::oop_store(void* addr, T value) { typedef typename AccessInternal::EncodedType::type Encoded; Encoded encoded = encode(value); store(reinterpret_cast(addr), encoded); } template template inline void RawAccessBarrier::oop_store_at(oop base, ptrdiff_t offset, T value) { oop_store(field_addr(base, offset), value); } template template inline T RawAccessBarrier::oop_load(void* addr) { typedef typename AccessInternal::EncodedType::type Encoded; Encoded encoded = load(reinterpret_cast(addr)); return decode(encoded); } template template inline T RawAccessBarrier::oop_load_at(oop base, ptrdiff_t offset) { return oop_load(field_addr(base, offset)); } template template inline T RawAccessBarrier::oop_atomic_cmpxchg(T new_value, void* addr, T compare_value) { typedef typename AccessInternal::EncodedType::type Encoded; Encoded encoded_new = encode(new_value); Encoded encoded_compare = encode(compare_value); Encoded encoded_result = atomic_cmpxchg(encoded_new, reinterpret_cast(addr), encoded_compare); return decode(encoded_result); } template template inline T RawAccessBarrier::oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { return oop_atomic_cmpxchg(new_value, field_addr(base, offset), compare_value); } template template inline T RawAccessBarrier::oop_atomic_xchg(T new_value, void* addr) { typedef typename AccessInternal::EncodedType::type Encoded; Encoded encoded_new = encode(new_value); Encoded encoded_result = atomic_xchg(encoded_new, reinterpret_cast(addr)); return decode(encoded_result); } template template inline T RawAccessBarrier::oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { return oop_atomic_xchg(new_value, field_addr(base, offset)); } template template inline void RawAccessBarrier::oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, size_t length) { arraycopy(src_obj, src_offset_in_bytes, src_raw, dst_obj, dst_offset_in_bytes, dst_raw, length); } template template inline typename EnableIf< HasDecorator::value, T>::type RawAccessBarrier::load_internal(void* addr) { if (support_IRIW_for_not_multiple_copy_atomic_cpu) { OrderAccess::fence(); } return OrderAccess::load_acquire(reinterpret_cast(addr)); } template template inline typename EnableIf< HasDecorator::value, T>::type RawAccessBarrier::load_internal(void* addr) { return OrderAccess::load_acquire(reinterpret_cast(addr)); } template template inline typename EnableIf< HasDecorator::value, T>::type RawAccessBarrier::load_internal(void* addr) { return Atomic::load(reinterpret_cast(addr)); } template template inline typename EnableIf< HasDecorator::value>::type RawAccessBarrier::store_internal(void* addr, T value) { OrderAccess::release_store_fence(reinterpret_cast(addr), value); } template template inline typename EnableIf< HasDecorator::value>::type RawAccessBarrier::store_internal(void* addr, T value) { OrderAccess::release_store(reinterpret_cast(addr), value); } template template inline typename EnableIf< HasDecorator::value>::type RawAccessBarrier::store_internal(void* addr, T value) { Atomic::store(value, reinterpret_cast(addr)); } template template inline typename EnableIf< HasDecorator::value, T>::type RawAccessBarrier::atomic_cmpxchg_internal(T new_value, void* addr, T compare_value) { return Atomic::cmpxchg(new_value, reinterpret_cast(addr), compare_value, memory_order_relaxed); } template template inline typename EnableIf< HasDecorator::value, T>::type RawAccessBarrier::atomic_cmpxchg_internal(T new_value, void* addr, T compare_value) { return Atomic::cmpxchg(new_value, reinterpret_cast(addr), compare_value, memory_order_conservative); } template template inline typename EnableIf< HasDecorator::value, T>::type RawAccessBarrier::atomic_xchg_internal(T new_value, void* addr) { return Atomic::xchg(new_value, reinterpret_cast(addr)); } // For platforms that do not have native support for wide atomics, // we can emulate the atomicity using a lock. So here we check // whether that is necessary or not. template template inline typename EnableIf< AccessInternal::PossiblyLockedAccess::value, T>::type RawAccessBarrier::atomic_xchg_maybe_locked(T new_value, void* addr) { if (!AccessInternal::wide_atomic_needs_locking()) { return atomic_xchg_internal(new_value, addr); } else { AccessInternal::AccessLocker access_lock; volatile T* p = reinterpret_cast(addr); T old_val = RawAccess<>::load(p); RawAccess<>::store(p, new_value); return old_val; } } template template inline typename EnableIf< AccessInternal::PossiblyLockedAccess::value, T>::type RawAccessBarrier::atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) { if (!AccessInternal::wide_atomic_needs_locking()) { return atomic_cmpxchg_internal(new_value, addr, compare_value); } else { AccessInternal::AccessLocker access_lock; volatile T* p = reinterpret_cast(addr); T old_val = RawAccess<>::load(p); if (old_val == compare_value) { RawAccess<>::store(p, new_value); } return old_val; } } class RawAccessBarrierArrayCopy: public AllStatic { template struct IsHeapWordSized: public IntegralConstant { }; public: template static inline typename EnableIf< HasDecorator::value>::type arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, size_t length) { src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw); dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw); // We do not check for ARRAYCOPY_ATOMIC for oops, because they are unconditionally always atomic. if (HasDecorator::value) { AccessInternal::arraycopy_arrayof_conjoint_oops(src_raw, dst_raw, length); } else { typedef typename HeapOopType::type OopType; AccessInternal::arraycopy_conjoint_oops(reinterpret_cast(src_raw), reinterpret_cast(dst_raw), length); } } template static inline typename EnableIf< !HasDecorator::value && HasDecorator::value>::type arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, size_t length) { src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw); dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw); AccessInternal::arraycopy_arrayof_conjoint(src_raw, dst_raw, length); } template static inline typename EnableIf< !HasDecorator::value && HasDecorator::value && IsHeapWordSized::value>::type arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, size_t length) { src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw); dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw); // There is only a disjoint optimization for word granularity copying if (HasDecorator::value) { AccessInternal::arraycopy_disjoint_words_atomic(src_raw, dst_raw, length); } else { AccessInternal::arraycopy_disjoint_words(src_raw, dst_raw, length); } } template static inline typename EnableIf< !HasDecorator::value && !(HasDecorator::value && IsHeapWordSized::value) && !HasDecorator::value && !HasDecorator::value>::type arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, size_t length) { src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw); dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw); AccessInternal::arraycopy_conjoint(src_raw, dst_raw, length); } template static inline typename EnableIf< !HasDecorator::value && !(HasDecorator::value && IsHeapWordSized::value) && !HasDecorator::value && HasDecorator::value>::type arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, size_t length) { src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw); dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw); AccessInternal::arraycopy_conjoint_atomic(src_raw, dst_raw, length); } }; template<> struct RawAccessBarrierArrayCopy::IsHeapWordSized: public IntegralConstant { }; template template inline void RawAccessBarrier::arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, size_t length) { RawAccessBarrierArrayCopy::arraycopy(src_obj, src_offset_in_bytes, src_raw, dst_obj, dst_offset_in_bytes, dst_raw, length); } template inline void RawAccessBarrier::clone(oop src, oop dst, size_t size) { // 4839641 (4840070): We must do an oop-atomic copy, because if another thread // is modifying a reference field in the clonee, a non-oop-atomic copy might // be suspended in the middle of copying the pointer and end up with parts // of two different pointers in the field. Subsequent dereferences will crash. // 4846409: an oop-copy of objects with long or double fields or arrays of same // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead // of oops. We know objects are aligned on a minimum of an jlong boundary. // The same is true of StubRoutines::object_copy and the various oop_copy // variants, and of the code generated by the inline_native_clone intrinsic. assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned"); AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast((oopDesc*)src), reinterpret_cast((oopDesc*)dst), align_object_size(size) / HeapWordsPerLong); // Clear the header dst->init_mark_raw(); } template inline void RawAccessBarrier::value_copy(void* src, void* dst, ValueKlass* md) { assert(is_aligned(src, md->get_alignment()) && is_aligned(dst, md->get_alignment()), "Unalign value_copy"); AccessInternal::arraycopy_conjoint_atomic(src, dst, static_cast(md->get_exact_size_in_bytes())); } #endif // SHARE_OOPS_ACCESSBACKEND_INLINE_HPP