1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP
  27 
  28 #include "gc_implementation/g1/concurrentMark.inline.hpp"
  29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
  30 #include "gc_implementation/g1/g1OopClosures.hpp"
  31 #include "gc_implementation/g1/g1RemSet.hpp"
  32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  33 
  34 /*
  35  * This really ought to be an inline function, but apparently the C++
  36  * compiler sometimes sees fit to ignore inline declarations.  Sigh.
  37  */
  38 
  39 template <class T>
  40 inline void FilterIntoCSClosure::do_oop_nv(T* p) {
  41   T heap_oop = oopDesc::load_heap_oop(p);
  42   if (!oopDesc::is_null(heap_oop) &&
  43       _g1->obj_in_cs(oopDesc::decode_heap_oop_not_null(heap_oop))) {
  44     _oc->do_oop(p);
  45   }
  46 }
  47 
  48 template <class T>
  49 inline void FilterOutOfRegionClosure::do_oop_nv(T* p) {
  50   T heap_oop = oopDesc::load_heap_oop(p);
  51   if (!oopDesc::is_null(heap_oop)) {
  52     HeapWord* obj_hw = (HeapWord*)oopDesc::decode_heap_oop_not_null(heap_oop);
  53     if (obj_hw < _r_bottom || obj_hw >= _r_end) {
  54       _oc->do_oop(p);
  55     }
  56   }
  57 }
  58 
  59 // This closure is applied to the fields of the objects that have just been copied.
  60 template <class T>
  61 inline void G1ParScanClosure::do_oop_nv(T* p) {
  62   T heap_oop = oopDesc::load_heap_oop(p);
  63 
  64   if (!oopDesc::is_null(heap_oop)) {
  65     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  66     if (_g1->in_cset_fast_test(obj)) {
  67       // We're not going to even bother checking whether the object is
  68       // already forwarded or not, as this usually causes an immediate
  69       // stall. We'll try to prefetch the object (for write, given that
  70       // we might need to install the forwarding reference) and we'll
  71       // get back to it when pop it from the queue
  72       Prefetch::write(obj->mark_addr(), 0);
  73       Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
  74 
  75       // slightly paranoid test; I'm trying to catch potential
  76       // problems before we go into push_on_queue to know where the
  77       // problem is coming from
  78       assert((obj == oopDesc::load_decode_heap_oop(p)) ||
  79              (obj->is_forwarded() &&
  80                  obj->forwardee() == oopDesc::load_decode_heap_oop(p)),
  81              "p should still be pointing to obj or to its forwardee");
  82 
  83       _par_scan_state->push_on_queue(p);
  84     } else {
  85       _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
  86     }
  87   }
  88 }
  89 
  90 template <class T>
  91 inline void G1ParPushHeapRSClosure::do_oop_nv(T* p) {
  92   T heap_oop = oopDesc::load_heap_oop(p);
  93 
  94   if (!oopDesc::is_null(heap_oop)) {
  95     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  96     if (_g1->in_cset_fast_test(obj)) {
  97       Prefetch::write(obj->mark_addr(), 0);
  98       Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
  99 
 100       // Place on the references queue
 101       _par_scan_state->push_on_queue(p);
 102     }
 103   }
 104 }
 105 
 106 template <class T>
 107 inline void G1CMOopClosure::do_oop_nv(T* p) {
 108   assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
 109   assert(!_g1h->is_on_master_free_list(
 110                     _g1h->heap_region_containing((HeapWord*) p)), "invariant");
 111 
 112   oop obj = oopDesc::load_decode_heap_oop(p);
 113   if (_cm->verbose_high()) {
 114     gclog_or_tty->print_cr("[%u] we're looking at location "
 115                            "*"PTR_FORMAT" = "PTR_FORMAT,
 116                            _task->worker_id(), p, (void*) obj);
 117   }
 118   _task->deal_with_reference(obj);
 119 }
 120 
 121 template <class T>
 122 inline void G1RootRegionScanClosure::do_oop_nv(T* p) {
 123   T heap_oop = oopDesc::load_heap_oop(p);
 124   if (!oopDesc::is_null(heap_oop)) {
 125     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
 126     HeapRegion* hr = _g1h->heap_region_containing((HeapWord*) obj);
 127     if (hr != NULL) {
 128       _cm->grayRoot(obj, obj->size(), _worker_id, hr);
 129     }
 130   }
 131 }
 132 
 133 template <class T>
 134 inline void G1Mux2Closure::do_oop_nv(T* p) {
 135   // Apply first closure; then apply the second.
 136   _c1->do_oop(p);
 137   _c2->do_oop(p);
 138 }
 139 
 140 template <class T>
 141 inline void G1TriggerClosure::do_oop_nv(T* p) {
 142   // Record that this closure was actually applied (triggered).
 143   _triggered = true;
 144 }
 145 
 146 template <class T>
 147 inline void G1InvokeIfNotTriggeredClosure::do_oop_nv(T* p) {
 148   if (!_trigger_cl->triggered()) {
 149     _oop_cl->do_oop(p);
 150   }
 151 }
 152 
 153 template <class T>
 154 inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) {
 155   oop obj = oopDesc::load_decode_heap_oop(p);
 156 #ifdef ASSERT
 157   // can't do because of races
 158   // assert(obj == NULL || obj->is_oop(), "expected an oop");
 159 
 160   // Do the safe subset of is_oop
 161   if (obj != NULL) {
 162 #ifdef CHECK_UNHANDLED_OOPS
 163     oopDesc* o = obj.obj();
 164 #else
 165     oopDesc* o = obj;
 166 #endif // CHECK_UNHANDLED_OOPS
 167     assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
 168     assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
 169   }
 170 #endif // ASSERT
 171 
 172   assert(_from != NULL, "from region must be non-NULL");
 173   assert(_from->is_in_reserved(p), "p is not in from");
 174 
 175   HeapRegion* to = _g1->heap_region_containing(obj);
 176   if (to != NULL && _from != to) {
 177     // The _record_refs_into_cset flag is true during the RSet
 178     // updating part of an evacuation pause. It is false at all
 179     // other times:
 180     //  * rebuilding the remembered sets after a full GC
 181     //  * during concurrent refinement.
 182     //  * updating the remembered sets of regions in the collection
 183     //    set in the event of an evacuation failure (when deferred
 184     //    updates are enabled).
 185 
 186     if (_record_refs_into_cset && to->in_collection_set()) {
 187       // We are recording references that point into the collection
 188       // set and this particular reference does exactly that...
 189       // If the referenced object has already been forwarded
 190       // to itself, we are handling an evacuation failure and
 191       // we have already visited/tried to copy this object
 192       // there is no need to retry.
 193       if (!self_forwarded(obj)) {
 194         assert(_push_ref_cl != NULL, "should not be null");
 195         // Push the reference in the refs queue of the G1ParScanThreadState
 196         // instance for this worker thread.
 197         _push_ref_cl->do_oop(p);
 198       }
 199 
 200       // Deferred updates to the CSet are either discarded (in the normal case),
 201       // or processed (if an evacuation failure occurs) at the end
 202       // of the collection.
 203       // See G1RemSet::cleanup_after_oops_into_collection_set_do().
 204       return;
 205     }
 206 
 207     // We either don't care about pushing references that point into the
 208     // collection set (i.e. we're not during an evacuation pause) _or_
 209     // the reference doesn't point into the collection set. Either way
 210     // we add the reference directly to the RSet of the region containing
 211     // the referenced object.
 212     assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
 213     to->rem_set()->add_reference(p, _worker_i);
 214   }
 215 }
 216 
 217 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP