1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP 26 #define SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP 27 28 #include "gc/g1/g1CollectedHeap.hpp" 29 #include "gc/g1/g1ConcurrentMark.inline.hpp" 30 #include "gc/g1/g1OopClosures.hpp" 31 #include "gc/g1/g1ParScanThreadState.inline.hpp" 32 #include "gc/g1/g1RemSet.hpp" 33 #include "gc/g1/heapRegion.inline.hpp" 34 #include "gc/g1/heapRegionRemSet.hpp" 35 #include "memory/iterator.inline.hpp" 36 #include "oops/access.inline.hpp" 37 #include "oops/compressedOops.inline.hpp" 38 #include "oops/oopsHierarchy.hpp" 39 #include "runtime/prefetch.inline.hpp" 40 41 template <class T> 42 inline void G1ScanClosureBase::prefetch_and_push(T* p, const oop obj) { 43 // We're not going to even bother checking whether the object is 44 // already forwarded or not, as this usually causes an immediate 45 // stall. We'll try to prefetch the object (for write, given that 46 // we might need to install the forwarding reference) and we'll 47 // get back to it when pop it from the queue 48 Prefetch::write(obj->mark_addr(), 0); 49 Prefetch::read(obj->mark_addr(), (HeapWordSize*2)); 50 51 // slightly paranoid test; I'm trying to catch potential 52 // problems before we go into push_on_queue to know where the 53 // problem is coming from 54 assert((obj == RawAccess<>::oop_load(p)) || 55 (obj->is_forwarded() && 56 obj->forwardee() == RawAccess<>::oop_load(p)), 57 "p should still be pointing to obj or to its forwardee"); 58 59 _par_scan_state->push_on_queue(p); 60 } 61 62 template <class T> 63 inline void G1ScanClosureBase::handle_non_cset_obj_common(InCSetState const state, T* p, oop const obj) { 64 if (state.is_humongous()) { 65 _g1->set_humongous_is_live(obj); 66 } 67 } 68 69 template <class T> 70 inline void G1ScanEvacuatedObjClosure::do_oop_nv(T* p) { 71 T heap_oop = RawAccess<>::oop_load(p); 72 73 if (CompressedOops::is_null(heap_oop)) { 74 return; 75 } 76 oop obj = CompressedOops::decode_not_null(heap_oop); 77 const InCSetState state = _g1->in_cset_state(obj); 78 if (state.is_in_cset()) { 79 prefetch_and_push(p, obj); 80 } else { 81 if (HeapRegion::is_in_same_region(p, obj)) { 82 return; 83 } 84 handle_non_cset_obj_common(state, p, obj); 85 _par_scan_state->update_rs(_from, p, obj); 86 } 87 } 88 89 template <class T> 90 inline void G1CMOopClosure::do_oop_nv(T* p) { 91 oop obj = RawAccess<MO_VOLATILE>::oop_load(p); 92 _task->deal_with_reference(obj); 93 } 94 95 template <class T> 96 inline void G1RootRegionScanClosure::do_oop_nv(T* p) { 97 T heap_oop = RawAccess<MO_VOLATILE>::oop_load(p); 98 if (CompressedOops::is_null(heap_oop)) { 99 return; 100 } 101 oop obj = CompressedOops::decode_not_null(heap_oop); 102 _cm->mark_in_next_bitmap(obj); 103 } 104 105 template <class T> 106 inline static void check_obj_during_refinement(T* p, oop const obj) { 107 #ifdef ASSERT 108 G1CollectedHeap* g1 = G1CollectedHeap::heap(); 109 // can't do because of races 110 // assert(oopDesc::is_oop_or_null(obj), "expected an oop"); 111 assert(check_obj_alignment(obj), "not oop aligned"); 112 assert(g1->is_in_reserved(obj), "must be in heap"); 113 114 HeapRegion* from = g1->heap_region_containing(p); 115 116 assert(from != NULL, "from region must be non-NULL"); 117 assert(from->is_in_reserved(p) || 118 (from->is_humongous() && 119 g1->heap_region_containing(p)->is_humongous() && 120 from->humongous_start_region() == g1->heap_region_containing(p)->humongous_start_region()), 121 "p " PTR_FORMAT " is not in the same region %u or part of the correct humongous object starting at region %u.", 122 p2i(p), from->hrm_index(), from->humongous_start_region()->hrm_index()); 123 #endif // ASSERT 124 } 125 126 template <class T> 127 inline void G1ConcurrentRefineOopClosure::do_oop_nv(T* p) { 128 T o = RawAccess<MO_VOLATILE>::oop_load(p); 129 if (CompressedOops::is_null(o)) { 130 return; 131 } 132 oop obj = CompressedOops::decode_not_null(o); 133 134 check_obj_during_refinement(p, obj); 135 136 if (HeapRegion::is_in_same_region(p, obj)) { 137 // Normally this closure should only be called with cross-region references. 138 // But since Java threads are manipulating the references concurrently and we 139 // reload the values things may have changed. 140 // Also this check lets slip through references from a humongous continues region 141 // to its humongous start region, as they are in different regions, and adds a 142 // remembered set entry. This is benign (apart from memory usage), as we never 143 // try to either evacuate or eager reclaim humonguous arrays of j.l.O. 144 return; 145 } 146 147 HeapRegion* to = _g1->heap_region_containing(obj); 148 149 assert(to->rem_set() != NULL, "Need per-region 'into' remsets."); 150 to->rem_set()->add_reference(p, _worker_i); 151 } 152 153 template <class T> 154 inline void G1ScanObjsDuringUpdateRSClosure::do_oop_nv(T* p) { 155 T o = RawAccess<>::oop_load(p); 156 if (CompressedOops::is_null(o)) { 157 return; 158 } 159 oop obj = CompressedOops::decode_not_null(o); 160 161 check_obj_during_refinement(p, obj); 162 163 assert(!_g1->is_in_cset((HeapWord*)p), "Oop originates from " PTR_FORMAT " (region: %u) which is in the collection set.", p2i(p), _g1->addr_to_region((HeapWord*)p)); 164 const InCSetState state = _g1->in_cset_state(obj); 165 if (state.is_in_cset()) { 166 // Since the source is always from outside the collection set, here we implicitly know 167 // that this is a cross-region reference too. 168 prefetch_and_push(p, obj); 169 } else { 170 HeapRegion* to = _g1->heap_region_containing(obj); 171 if (_from == to) { 172 return; 173 } 174 handle_non_cset_obj_common(state, p, obj); 175 to->rem_set()->add_reference(p, _worker_i); 176 } 177 } 178 179 template <class T> 180 inline void G1ScanObjsDuringScanRSClosure::do_oop_nv(T* p) { 181 T heap_oop = RawAccess<>::oop_load(p); 182 if (CompressedOops::is_null(heap_oop)) { 183 return; 184 } 185 oop obj = CompressedOops::decode_not_null(heap_oop); 186 187 const InCSetState state = _g1->in_cset_state(obj); 188 if (state.is_in_cset()) { 189 prefetch_and_push(p, obj); 190 } else { 191 if (HeapRegion::is_in_same_region(p, obj)) { 192 return; 193 } 194 handle_non_cset_obj_common(state, p, obj); 195 } 196 } 197 198 void G1ParCopyHelper::do_cld_barrier(oop new_obj) { 199 if (_g1->heap_region_containing(new_obj)->is_young()) { 200 _scanned_cld->record_modified_oops(); 201 } 202 } 203 204 void G1ParCopyHelper::mark_object(oop obj) { 205 assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet"); 206 207 _cm->mark_in_next_bitmap(obj); 208 } 209 210 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) { 211 assert(from_obj->is_forwarded(), "from obj should be forwarded"); 212 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee"); 213 assert(from_obj != to_obj, "should not be self-forwarded"); 214 215 assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet"); 216 assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet"); 217 218 _cm->mark_in_next_bitmap(to_obj); 219 } 220 221 template <G1Barrier barrier, G1Mark do_mark_object> 222 template <class T> 223 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) { 224 T heap_oop = RawAccess<>::oop_load(p); 225 226 if (CompressedOops::is_null(heap_oop)) { 227 return; 228 } 229 230 oop obj = CompressedOops::decode_not_null(heap_oop); 231 232 assert(_worker_id == _par_scan_state->worker_id(), "sanity"); 233 234 const InCSetState state = _g1->in_cset_state(obj); 235 if (state.is_in_cset()) { 236 oop forwardee; 237 markOop m = obj->mark(); 238 if (m->is_marked()) { 239 forwardee = (oop) m->decode_pointer(); 240 } else { 241 forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m); 242 } 243 assert(forwardee != NULL, "forwardee should not be NULL"); 244 RawAccess<>::oop_store(p, forwardee); 245 if (do_mark_object != G1MarkNone && forwardee != obj) { 246 // If the object is self-forwarded we don't need to explicitly 247 // mark it, the evacuation failure protocol will do so. 248 mark_forwarded_object(obj, forwardee); 249 } 250 251 if (barrier == G1BarrierCLD) { 252 do_cld_barrier(forwardee); 253 } 254 } else { 255 if (state.is_humongous()) { 256 _g1->set_humongous_is_live(obj); 257 } 258 259 // The object is not in collection set. If we're a root scanning 260 // closure during an initial mark pause then attempt to mark the object. 261 if (do_mark_object == G1MarkFromRoot) { 262 mark_object(obj); 263 } 264 } 265 } 266 #endif // SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP