1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP 26 #define SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP 27 28 #include "gc/g1/g1CollectedHeap.hpp" 29 #include "gc/g1/g1ConcurrentMark.inline.hpp" 30 #include "gc/g1/g1OopClosures.hpp" 31 #include "gc/g1/g1ParScanThreadState.inline.hpp" 32 #include "gc/g1/g1RemSet.hpp" 33 #include "gc/g1/g1RemSet.inline.hpp" 34 #include "gc/g1/heapRegion.inline.hpp" 35 #include "gc/g1/heapRegionRemSet.hpp" 36 #include "memory/iterator.inline.hpp" 37 #include "runtime/prefetch.inline.hpp" 38 39 /* 40 * This really ought to be an inline function, but apparently the C++ 41 * compiler sometimes sees fit to ignore inline declarations. Sigh. 42 */ 43 44 template <class T> 45 inline void FilterIntoCSClosure::do_oop_work(T* p) { 46 T heap_oop = oopDesc::load_heap_oop(p); 47 if (!oopDesc::is_null(heap_oop) && 48 _g1->is_in_cset_or_humongous(oopDesc::decode_heap_oop_not_null(heap_oop))) { 49 _oc->do_oop(p); 50 } 51 } 52 53 template <class T> 54 inline void FilterOutOfRegionClosure::do_oop_nv(T* p) { 55 T heap_oop = oopDesc::load_heap_oop(p); 56 if (!oopDesc::is_null(heap_oop)) { 57 HeapWord* obj_hw = (HeapWord*)oopDesc::decode_heap_oop_not_null(heap_oop); 58 if (obj_hw < _r_bottom || obj_hw >= _r_end) { 59 _oc->do_oop(p); 60 } 61 } 62 } 63 64 // This closure is applied to the fields of the objects that have just been copied. 65 template <class T> 66 inline void G1ParScanClosure::do_oop_nv(T* p) { 67 T heap_oop = oopDesc::load_heap_oop(p); 68 69 if (!oopDesc::is_null(heap_oop)) { 70 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 71 const InCSetState state = _g1->in_cset_state(obj); 72 if (state.is_in_cset()) { 73 // We're not going to even bother checking whether the object is 74 // already forwarded or not, as this usually causes an immediate 75 // stall. We'll try to prefetch the object (for write, given that 76 // we might need to install the forwarding reference) and we'll 77 // get back to it when pop it from the queue 78 Prefetch::write(obj->mark_addr(), 0); 79 Prefetch::read(obj->mark_addr(), (HeapWordSize*2)); 80 81 // slightly paranoid test; I'm trying to catch potential 82 // problems before we go into push_on_queue to know where the 83 // problem is coming from 84 assert((obj == oopDesc::load_decode_heap_oop(p)) || 85 (obj->is_forwarded() && 86 obj->forwardee() == oopDesc::load_decode_heap_oop(p)), 87 "p should still be pointing to obj or to its forwardee"); 88 89 _par_scan_state->push_on_queue(p); 90 } else { 91 if (state.is_humongous()) { 92 _g1->set_humongous_is_live(obj); 93 } else if (state.is_ext()) { 94 _par_scan_state->do_oop_ext(p); 95 } 96 _par_scan_state->update_rs(_from, p, obj); 97 } 98 } 99 } 100 101 template <class T> 102 inline void G1ParPushHeapRSClosure::do_oop_nv(T* p) { 103 T heap_oop = oopDesc::load_heap_oop(p); 104 105 if (!oopDesc::is_null(heap_oop)) { 106 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 107 const InCSetState state = _g1->in_cset_state(obj); 108 if (state.is_in_cset_or_humongous()) { 109 Prefetch::write(obj->mark_addr(), 0); 110 Prefetch::read(obj->mark_addr(), (HeapWordSize*2)); 111 112 // Place on the references queue 113 _par_scan_state->push_on_queue(p); 114 } else if (state.is_ext()) { 115 _par_scan_state->do_oop_ext(p); 116 } else { 117 assert(!_g1->is_in_cset(obj), "checking"); 118 } 119 } 120 } 121 122 template <class T> 123 inline void G1CMOopClosure::do_oop_nv(T* p) { 124 oop obj = oopDesc::load_decode_heap_oop(p); 125 _task->deal_with_reference(obj); 126 } 127 128 template <class T> 129 inline void G1RootRegionScanClosure::do_oop_nv(T* p) { 130 T heap_oop = oopDesc::load_heap_oop(p); 131 if (!oopDesc::is_null(heap_oop)) { 132 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 133 HeapRegion* hr = _g1h->heap_region_containing((HeapWord*) obj); 134 _cm->grayRoot(obj, hr); 135 } 136 } 137 138 template <class T> 139 inline void G1Mux2Closure::do_oop_work(T* p) { 140 // Apply first closure; then apply the second. 141 _c1->do_oop(p); 142 _c2->do_oop(p); 143 } 144 void G1Mux2Closure::do_oop(oop* p) { do_oop_work(p); } 145 void G1Mux2Closure::do_oop(narrowOop* p) { do_oop_work(p); } 146 147 template <class T> 148 inline void G1TriggerClosure::do_oop_work(T* p) { 149 // Record that this closure was actually applied (triggered). 150 _triggered = true; 151 } 152 void G1TriggerClosure::do_oop(oop* p) { do_oop_work(p); } 153 void G1TriggerClosure::do_oop(narrowOop* p) { do_oop_work(p); } 154 155 template <class T> 156 inline void G1InvokeIfNotTriggeredClosure::do_oop_work(T* p) { 157 if (!_trigger_cl->triggered()) { 158 _oop_cl->do_oop(p); 159 } 160 } 161 void G1InvokeIfNotTriggeredClosure::do_oop(oop* p) { do_oop_work(p); } 162 void G1InvokeIfNotTriggeredClosure::do_oop(narrowOop* p) { do_oop_work(p); } 163 164 template <class T> 165 inline void G1UpdateRSOrPushRefOopClosure::do_oop_work(T* p) { 166 oop obj = oopDesc::load_decode_heap_oop(p); 167 if (obj == NULL) { 168 return; 169 } 170 171 #ifdef ASSERT 172 // can't do because of races 173 // assert(obj == NULL || obj->is_oop(), "expected an oop"); 174 assert(check_obj_alignment(obj), "not oop aligned"); 175 assert(_g1->is_in_reserved(obj), "must be in heap"); 176 #endif // ASSERT 177 178 assert(_from != NULL, "from region must be non-NULL"); 179 assert(_from->is_in_reserved(p) || 180 (_from->is_humongous() && 181 _g1->heap_region_containing(p)->is_humongous() && 182 _from->humongous_start_region() == _g1->heap_region_containing(p)->humongous_start_region()), 183 "p " PTR_FORMAT " is not in the same region %u or part of the correct humongous object starting at region %u.", 184 p2i(p), _from->hrm_index(), _from->humongous_start_region()->hrm_index()); 185 186 HeapRegion* to = _g1->heap_region_containing(obj); 187 if (_from == to) { 188 // Normally this closure should only be called with cross-region references. 189 // But since Java threads are manipulating the references concurrently and we 190 // reload the values things may have changed. 191 // Also this check lets slip through references from a humongous continues region 192 // to its humongous start region, as they are in different regions, and adds a 193 // remembered set entry. This is benign (apart from memory usage), as we never 194 // try to either evacuate or eager reclaim these kind of regions. 195 return; 196 } 197 198 // The _record_refs_into_cset flag is true during the RSet 199 // updating part of an evacuation pause. It is false at all 200 // other times: 201 // * rebuilding the remembered sets after a full GC 202 // * during concurrent refinement. 203 // * updating the remembered sets of regions in the collection 204 // set in the event of an evacuation failure (when deferred 205 // updates are enabled). 206 207 if (_record_refs_into_cset && to->in_collection_set()) { 208 // We are recording references that point into the collection 209 // set and this particular reference does exactly that... 210 // If the referenced object has already been forwarded 211 // to itself, we are handling an evacuation failure and 212 // we have already visited/tried to copy this object 213 // there is no need to retry. 214 if (!self_forwarded(obj)) { 215 assert(_push_ref_cl != NULL, "should not be null"); 216 // Push the reference in the refs queue of the G1ParScanThreadState 217 // instance for this worker thread. 218 _push_ref_cl->do_oop(p); 219 } 220 221 // Deferred updates to the CSet are either discarded (in the normal case), 222 // or processed (if an evacuation failure occurs) at the end 223 // of the collection. 224 // See G1RemSet::cleanup_after_oops_into_collection_set_do(). 225 } else { 226 // We either don't care about pushing references that point into the 227 // collection set (i.e. we're not during an evacuation pause) _or_ 228 // the reference doesn't point into the collection set. Either way 229 // we add the reference directly to the RSet of the region containing 230 // the referenced object. 231 assert(to->rem_set() != NULL, "Need per-region 'into' remsets."); 232 to->rem_set()->add_reference(p, _worker_i); 233 } 234 } 235 void G1UpdateRSOrPushRefOopClosure::do_oop(oop* p) { do_oop_work(p); } 236 void G1UpdateRSOrPushRefOopClosure::do_oop(narrowOop* p) { do_oop_work(p); } 237 238 template <class T> 239 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) { 240 if (_g1->heap_region_containing(new_obj)->is_young()) { 241 _scanned_klass->record_modified_oops(); 242 } 243 } 244 245 void G1ParCopyHelper::mark_object(oop obj) { 246 assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet"); 247 248 // We know that the object is not moving so it's safe to read its size. 249 _cm->grayRoot(obj); 250 } 251 252 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) { 253 assert(from_obj->is_forwarded(), "from obj should be forwarded"); 254 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee"); 255 assert(from_obj != to_obj, "should not be self-forwarded"); 256 257 assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet"); 258 assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet"); 259 260 // The object might be in the process of being copied by another 261 // worker so we cannot trust that its to-space image is 262 // well-formed. So we have to read its size from its from-space 263 // image which we know should not be changing. 264 _cm->grayRoot(to_obj); 265 } 266 267 template <G1Barrier barrier, G1Mark do_mark_object, bool use_ext> 268 template <class T> 269 void G1ParCopyClosure<barrier, do_mark_object, use_ext>::do_oop_work(T* p) { 270 T heap_oop = oopDesc::load_heap_oop(p); 271 272 if (oopDesc::is_null(heap_oop)) { 273 return; 274 } 275 276 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 277 278 assert(_worker_id == _par_scan_state->worker_id(), "sanity"); 279 280 const InCSetState state = _g1->in_cset_state(obj); 281 if (state.is_in_cset()) { 282 oop forwardee; 283 markOop m = obj->mark(); 284 if (m->is_marked()) { 285 forwardee = (oop) m->decode_pointer(); 286 } else { 287 forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m); 288 } 289 assert(forwardee != NULL, "forwardee should not be NULL"); 290 oopDesc::encode_store_heap_oop(p, forwardee); 291 if (do_mark_object != G1MarkNone && forwardee != obj) { 292 // If the object is self-forwarded we don't need to explicitly 293 // mark it, the evacuation failure protocol will do so. 294 mark_forwarded_object(obj, forwardee); 295 } 296 297 if (barrier == G1BarrierKlass) { 298 do_klass_barrier(p, forwardee); 299 } 300 } else { 301 if (state.is_humongous()) { 302 _g1->set_humongous_is_live(obj); 303 } 304 305 if (use_ext && state.is_ext()) { 306 _par_scan_state->do_oop_ext(p); 307 } 308 // The object is not in collection set. If we're a root scanning 309 // closure during an initial mark pause then attempt to mark the object. 310 if (do_mark_object == G1MarkFromRoot) { 311 mark_object(obj); 312 } 313 } 314 } 315 316 #endif // SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP