< prev index next >

src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp

8198949_arraycopy

rename things

*** 23,38 **** */ #ifndef SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP #define SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP ! #include "gc/shared/barrierSet.inline.hpp" #include "gc/shared/modRefBarrierSet.hpp" #include "oops/klass.inline.hpp" #include "oops/objArrayOop.hpp" #include "oops/oop.hpp" template <DecoratorSet decorators, typename BarrierSetT> template <typename T> inline void ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>:: oop_store_in_heap(T* addr, oop value) { BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set()); --- 23,60 ---- */ #ifndef SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP #define SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP ! #include "gc/shared/barrierSet.hpp" #include "gc/shared/modRefBarrierSet.hpp" #include "oops/klass.inline.hpp" #include "oops/objArrayOop.hpp" #include "oops/oop.hpp" + // count is number of array elements being written + void ModRefBarrierSet::write_ref_array(HeapWord* start, size_t count) { + HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize)); + // In the case of compressed oops, start and end may potentially be misaligned; + // so we need to conservatively align the first downward (this is not + // strictly necessary for current uses, but a case of good hygiene and, + // if you will, aesthetics) and the second upward (this is essential for + // current uses) to a HeapWord boundary, so we mark all cards overlapping + // this write. If this evolves in the future to calling a + // logging barrier of narrow oop granularity, like the pre-barrier for G1 + // (mentioned here merely by way of example), we will need to change this + // interface, so it is "exactly precise" (if i may be allowed the adverbial + // redundancy for emphasis) and does not include narrow oop slots not + // included in the original write interval. + HeapWord* aligned_start = align_down(start, HeapWordSize); + HeapWord* aligned_end = align_up (end, HeapWordSize); + // If compressed oops were not being used, these should already be aligned + assert(UseCompressedOops || (aligned_start == start && aligned_end == end), + "Expected heap word alignment of start and end"); + write_ref_array_work(MemRegion(aligned_start, aligned_end)); + } + template <DecoratorSet decorators, typename BarrierSetT> template <typename T> inline void ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>:: oop_store_in_heap(T* addr, oop value) { BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set()); ***************
*** 71,81 **** oop_arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) { BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set()); if (!HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value) { // Optimized covariant case ! bs->write_ref_array_pre(dst, (int)length, HasDecorator<decorators, AS_DEST_NOT_INITIALIZED>::value); Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length); bs->write_ref_array((HeapWord*)dst, length); } else { Klass* bound = objArrayOop(dst_obj)->element_klass(); --- 93,103 ---- oop_arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) { BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set()); if (!HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value) { // Optimized covariant case ! bs->write_ref_array_pre(dst, length, HasDecorator<decorators, AS_DEST_NOT_INITIALIZED>::value); Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length); bs->write_ref_array((HeapWord*)dst, length); } else { Klass* bound = objArrayOop(dst_obj)->element_klass();
< prev index next >