1 /*
   2  * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP
  26 #define SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP
  27 
  28 #include "gc/shared/modRefBarrierSet.hpp"
  29 #include "oops/oop.hpp"
  30 
  31 // count is number of array elements being written
  32 void ModRefBarrierSet::write_ref_array(HeapWord* start, size_t count) {
  33   assert(count <= (size_t)max_intx, "count too large");
  34   HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
  35   // In the case of compressed oops, start and end may potentially be misaligned;
  36   // so we need to conservatively align the first downward (this is not
  37   // strictly necessary for current uses, but a case of good hygiene and,
  38   // if you will, aesthetics) and the second upward (this is essential for
  39   // current uses) to a HeapWord boundary, so we mark all cards overlapping
  40   // this write. If this evolves in the future to calling a
  41   // logging barrier of narrow oop granularity, like the pre-barrier for G1
  42   // (mentioned here merely by way of example), we will need to change this
  43   // interface, so it is "exactly precise" (if i may be allowed the adverbial
  44   // redundancy for emphasis) and does not include narrow oop slots not
  45   // included in the original write interval.
  46   HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize);
  47   HeapWord* aligned_end   = (HeapWord*)align_size_up  ((uintptr_t)end,   HeapWordSize);
  48   // If compressed oops were not being used, these should already be aligned
  49   assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
  50          "Expected heap word alignment of start and end");
  51   write_ref_array_region(MemRegion(aligned_start, aligned_end));
  52 }
  53 
  54 template <DecoratorSet decorators, typename BarrierSetT>
  55 inline void ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
  56 oop_store(void* addr, oop value) {
  57   if (!DecoratorTest<decorators>::HAS_ACCESS_ON_HEAP || DecoratorTest<decorators>::HAS_ACCESS_WEAK) {
  58     Basic::oop_store(addr, value);
  59   } else {
  60     BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
  61     bs->BarrierSetT::template write_ref_field_pre<decorators>(addr);
  62     Basic::oop_store(addr, value);
  63     bs->BarrierSetT::template write_ref_field_post<decorators>(addr, value);
  64   }
  65 }
  66 
  67 template <DecoratorSet decorators, typename BarrierSetT>
  68 inline void ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
  69 oop_store_at(Klass* base, ptrdiff_t offset, oop value) {
  70   BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
  71   bs->BarrierSetT::klass_update_barrier_set_pre(base, (oop*)Basic::field_addr(base, offset));
  72   Basic::oop_store(Basic::field_addr(base, offset), value);
  73   bs->BarrierSetT::klass_update_barrier_set(base, (oop*)Basic::field_addr(base, offset), value);
  74 }
  75 
  76 template <DecoratorSet decorators, typename BarrierSetT>
  77 inline oop ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
  78 oop_cas(oop new_value, void* addr, oop compare_value) {
  79   if (!DecoratorTest<decorators>::HAS_ACCESS_ON_HEAP || DecoratorTest<decorators>::HAS_ACCESS_WEAK) {
  80     return Basic::oop_cas(new_value, addr, compare_value);
  81   } else {
  82     BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
  83     bs->BarrierSetT::template write_ref_field_pre<decorators>(addr);
  84     oop result = Basic::oop_cas(new_value, addr, compare_value);
  85     if (result == compare_value) {
  86       bs->BarrierSetT::template write_ref_field_post<decorators>(addr, new_value);
  87     }
  88     return result;
  89   }
  90 }
  91 
  92 template <DecoratorSet decorators, typename BarrierSetT>
  93 inline oop ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_swap(oop new_value, void* addr) {
  94   if (!DecoratorTest<decorators>::HAS_ACCESS_ON_HEAP || DecoratorTest<decorators>::HAS_ACCESS_WEAK) {
  95     return Basic::oop_swap(new_value, addr);
  96   } else {
  97     BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
  98     bs->BarrierSetT::template write_ref_field_pre<decorators>(addr);
  99     oop result = Basic::oop_swap(new_value, addr);
 100     bs->BarrierSetT::template write_ref_field_post<decorators>(addr, new_value);
 101     return result;
 102   }
 103 }
 104 
 105 template <DecoratorSet decorators, typename BarrierSetT>
 106 template <typename T>
 107 inline bool ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
 108 oop_copy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
 109   BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
 110 
 111   if (DecoratorTest<decorators>::HAS_DEST_COVARIANT) {
 112     // Optimized case
 113     bs->BarrierSetT::write_ref_array_pre(dst, (int)length,
 114                                          DecoratorTest<decorators>::HAS_DEST_NOT_INITIALIZED);
 115     Basic::template oop_copy<T>(src_obj, dst_obj, src, dst, length);
 116     bs->BarrierSetT::write_ref_array((HeapWord*)dst, length);
 117   } else {
 118     Klass* bound = bound_for_array((oop)dst_obj);
 119     T* from = src;
 120     T* end = from + length;
 121     for (T* p = dst; from < end; from++, p++) {
 122       // XXX this is going to be slow.
 123       T element = *from;
 124       // even slower now
 125       bool element_is_null = oopDesc::is_null(element);
 126       oop new_val = element_is_null ? oop(NULL)
 127                                     : oopDesc::decode_heap_oop_not_null(element);
 128       if (element_is_null || is_bounded_by(new_val, bound)) {
 129         bs->BarrierSetT::template write_ref_field_pre<decorators>((void*)p);
 130         *p = element;
 131       } else {
 132         // We must do a barrier to cover the partial copy.
 133         const size_t pd = pointer_delta(p, dst, (size_t)heapOopSize);
 134         // pointer delta is scaled to number of elements (length field in
 135         // objArrayOop) which we assume is 32 bit.
 136         assert(pd == (size_t)(int)pd, "length field overflow");
 137         bs->BarrierSetT::write_ref_array((HeapWord*)dst, pd);
 138         return false;
 139       }
 140     }
 141     bs->BarrierSetT::write_ref_array((HeapWord*)dst, length);
 142   }
 143   return true;
 144 }
 145 
 146 template <DecoratorSet decorators, typename BarrierSetT>
 147 inline void ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
 148 clone(oop src, oop dst, size_t size) {
 149   Basic::clone(src, dst, size);
 150   BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
 151   bs->BarrierSetT::write_region(MemRegion((HeapWord*)(void*)dst, size));
 152 }
 153 
 154 #endif // SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP