/* * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #ifndef SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP #define SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP #include "gc/shared/modRefBarrierSet.hpp" #include "oops/oop.hpp" // count is number of array elements being written void ModRefBarrierSet::write_ref_array(HeapWord* start, size_t count) { assert(count <= (size_t)max_intx, "count too large"); HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize)); // In the case of compressed oops, start and end may potentially be misaligned; // so we need to conservatively align the first downward (this is not // strictly necessary for current uses, but a case of good hygiene and, // if you will, aesthetics) and the second upward (this is essential for // current uses) to a HeapWord boundary, so we mark all cards overlapping // this write. If this evolves in the future to calling a // logging barrier of narrow oop granularity, like the pre-barrier for G1 // (mentioned here merely by way of example), we will need to change this // interface, so it is "exactly precise" (if i may be allowed the adverbial // redundancy for emphasis) and does not include narrow oop slots not // included in the original write interval. HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize); HeapWord* aligned_end = (HeapWord*)align_size_up ((uintptr_t)end, HeapWordSize); // If compressed oops were not being used, these should already be aligned assert(UseCompressedOops || (aligned_start == start && aligned_end == end), "Expected heap word alignment of start and end"); write_ref_array_region(MemRegion(aligned_start, aligned_end)); } template inline void ModRefBarrierSet::AccessBarrier:: oop_store(void* addr, oop value) { if (!DecoratorTest::HAS_ACCESS_ON_HEAP || DecoratorTest::HAS_ACCESS_WEAK) { Basic::oop_store(addr, value); } else { BarrierSetT *bs = barrier_set_cast(barrier_set()); bs->BarrierSetT::template write_ref_field_pre(addr); Basic::oop_store(addr, value); bs->BarrierSetT::template write_ref_field_post(addr, value); } } template inline void ModRefBarrierSet::AccessBarrier:: oop_store_at(Klass* base, ptrdiff_t offset, oop value) { BarrierSetT *bs = barrier_set_cast(barrier_set()); bs->BarrierSetT::klass_update_barrier_set_pre(base, (oop*)Basic::field_addr(base, offset)); Basic::oop_store(Basic::field_addr(base, offset), value); bs->BarrierSetT::klass_update_barrier_set(base, (oop*)Basic::field_addr(base, offset), value); } template inline oop ModRefBarrierSet::AccessBarrier:: oop_cas(oop new_value, void* addr, oop compare_value) { if (!DecoratorTest::HAS_ACCESS_ON_HEAP || DecoratorTest::HAS_ACCESS_WEAK) { return Basic::oop_cas(new_value, addr, compare_value); } else { BarrierSetT *bs = barrier_set_cast(barrier_set()); bs->BarrierSetT::template write_ref_field_pre(addr); oop result = Basic::oop_cas(new_value, addr, compare_value); if (result == compare_value) { bs->BarrierSetT::template write_ref_field_post(addr, new_value); } return result; } } template inline oop ModRefBarrierSet::AccessBarrier::oop_swap(oop new_value, void* addr) { if (!DecoratorTest::HAS_ACCESS_ON_HEAP || DecoratorTest::HAS_ACCESS_WEAK) { return Basic::oop_swap(new_value, addr); } else { BarrierSetT *bs = barrier_set_cast(barrier_set()); bs->BarrierSetT::template write_ref_field_pre(addr); oop result = Basic::oop_swap(new_value, addr); bs->BarrierSetT::template write_ref_field_post(addr, new_value); return result; } } template template inline bool ModRefBarrierSet::AccessBarrier:: oop_copy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) { BarrierSetT *bs = barrier_set_cast(barrier_set()); if (DecoratorTest::HAS_DEST_COVARIANT) { // Optimized case bs->BarrierSetT::write_ref_array_pre(dst, (int)length, DecoratorTest::HAS_DEST_NOT_INITIALIZED); Basic::template oop_copy(src_obj, dst_obj, src, dst, length); bs->BarrierSetT::write_ref_array((HeapWord*)dst, length); } else { Klass* bound = bound_for_array((oop)dst_obj); T* from = src; T* end = from + length; for (T* p = dst; from < end; from++, p++) { // XXX this is going to be slow. T element = *from; // even slower now bool element_is_null = oopDesc::is_null(element); oop new_val = element_is_null ? oop(NULL) : oopDesc::decode_heap_oop_not_null(element); if (element_is_null || is_bounded_by(new_val, bound)) { bs->BarrierSetT::template write_ref_field_pre((void*)p); *p = element; } else { // We must do a barrier to cover the partial copy. const size_t pd = pointer_delta(p, dst, (size_t)heapOopSize); // pointer delta is scaled to number of elements (length field in // objArrayOop) which we assume is 32 bit. assert(pd == (size_t)(int)pd, "length field overflow"); bs->BarrierSetT::write_ref_array((HeapWord*)dst, pd); return false; } } bs->BarrierSetT::write_ref_array((HeapWord*)dst, length); } return true; } template inline void ModRefBarrierSet::AccessBarrier:: clone(oop src, oop dst, size_t size) { Basic::clone(src, dst, size); BarrierSetT *bs = barrier_set_cast(barrier_set()); bs->BarrierSetT::write_region(MemRegion((HeapWord*)(void*)dst, size)); } #endif // SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP