1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP 26 #define SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP 27 28 #include "gc/g1/g1CollectedHeap.hpp" 29 #include "gc/g1/g1ConcurrentMark.inline.hpp" 30 #include "gc/g1/g1OopClosures.hpp" 31 #include "gc/g1/g1ParScanThreadState.inline.hpp" 32 #include "gc/g1/g1RemSet.hpp" 33 #include "gc/g1/heapRegion.inline.hpp" 34 #include "gc/g1/heapRegionRemSet.hpp" 35 #include "memory/iterator.inline.hpp" 36 #include "oops/access.inline.hpp" 37 #include "oops/compressedOops.inline.hpp" 38 #include "oops/oopsHierarchy.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "runtime/prefetch.inline.hpp" 41 #include "utilities/ticks.inline.hpp" 42 43 template <class T> 44 inline void G1ScanClosureBase::prefetch_and_push(T* p, const oop obj) { 45 // We're not going to even bother checking whether the object is 46 // already forwarded or not, as this usually causes an immediate 47 // stall. We'll try to prefetch the object (for write, given that 48 // we might need to install the forwarding reference) and we'll 49 // get back to it when pop it from the queue 50 Prefetch::write(obj->mark_addr_raw(), 0); 51 Prefetch::read(obj->mark_addr_raw(), (HeapWordSize*2)); 52 53 // slightly paranoid test; I'm trying to catch potential 54 // problems before we go into push_on_queue to know where the 55 // problem is coming from 56 assert((obj == RawAccess<>::oop_load(p)) || 57 (obj->is_forwarded() && 58 obj->forwardee() == RawAccess<>::oop_load(p)), 59 "p should still be pointing to obj or to its forwardee"); 60 61 _par_scan_state->push_on_queue(p); 62 } 63 64 template <class T> 65 inline void G1ScanClosureBase::handle_non_cset_obj_common(InCSetState const state, T* p, oop const obj) { 66 if (state.is_humongous()) { 67 _g1h->set_humongous_is_live(obj); 68 } 69 } 70 71 inline void G1ScanClosureBase::trim_queue_partially() { 72 _trim_ticks += _par_scan_state->trim_queue_partially(); 73 } 74 75 inline Tickspan G1ScanClosureBase::trim_ticks_and_reset() { 76 Tickspan result = _trim_ticks; 77 _trim_ticks = Tickspan(); 78 return result; 79 } 80 81 template <class T> 82 inline void G1ScanEvacuatedObjClosure::do_oop_nv(T* p) { 83 T heap_oop = RawAccess<>::oop_load(p); 84 85 if (CompressedOops::is_null(heap_oop)) { 86 return; 87 } 88 oop obj = CompressedOops::decode_not_null(heap_oop); 89 const InCSetState state = _g1h->in_cset_state(obj); 90 if (state.is_in_cset()) { 91 prefetch_and_push(p, obj); 92 } else { 93 if (HeapRegion::is_in_same_region(p, obj)) { 94 return; 95 } 96 handle_non_cset_obj_common(state, p, obj); 97 _par_scan_state->update_rs(_from, p, obj); 98 } 99 } 100 101 template <class T> 102 inline void G1CMOopClosure::do_oop_nv(T* p) { 103 _task->deal_with_reference(p); 104 } 105 106 template <class T> 107 inline void G1RootRegionScanClosure::do_oop_nv(T* p) { 108 T heap_oop = RawAccess<MO_VOLATILE>::oop_load(p); 109 if (CompressedOops::is_null(heap_oop)) { 110 return; 111 } 112 oop obj = CompressedOops::decode_not_null(heap_oop); 113 _cm->mark_in_next_bitmap(_worker_id, obj); 114 } 115 116 template <class T> 117 inline static void check_obj_during_refinement(T* p, oop const obj) { 118 #ifdef ASSERT 119 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 120 // can't do because of races 121 // assert(oopDesc::is_oop_or_null(obj), "expected an oop"); 122 assert(check_obj_alignment(obj), "not oop aligned"); 123 assert(g1h->is_in_reserved(obj), "must be in heap"); 124 125 HeapRegion* from = g1h->heap_region_containing(p); 126 127 assert(from != NULL, "from region must be non-NULL"); 128 assert(from->is_in_reserved(p) || 129 (from->is_humongous() && 130 g1h->heap_region_containing(p)->is_humongous() && 131 from->humongous_start_region() == g1h->heap_region_containing(p)->humongous_start_region()), 132 "p " PTR_FORMAT " is not in the same region %u or part of the correct humongous object starting at region %u.", 133 p2i(p), from->hrm_index(), from->humongous_start_region()->hrm_index()); 134 #endif // ASSERT 135 } 136 137 template <class T> 138 inline void G1ConcurrentRefineOopClosure::do_oop_nv(T* p) { 139 T o = RawAccess<MO_VOLATILE>::oop_load(p); 140 if (CompressedOops::is_null(o)) { 141 return; 142 } 143 oop obj = CompressedOops::decode_not_null(o); 144 145 check_obj_during_refinement(p, obj); 146 147 if (HeapRegion::is_in_same_region(p, obj)) { 148 // Normally this closure should only be called with cross-region references. 149 // But since Java threads are manipulating the references concurrently and we 150 // reload the values things may have changed. 151 // Also this check lets slip through references from a humongous continues region 152 // to its humongous start region, as they are in different regions, and adds a 153 // remembered set entry. This is benign (apart from memory usage), as we never 154 // try to either evacuate or eager reclaim humonguous arrays of j.l.O. 155 return; 156 } 157 158 HeapRegionRemSet* to_rem_set = _g1h->heap_region_containing(obj)->rem_set(); 159 160 assert(to_rem_set != NULL, "Need per-region 'into' remsets."); 161 if (to_rem_set->is_tracked()) { 162 to_rem_set->add_reference(p, _worker_i); 163 } 164 } 165 166 template <class T> 167 inline void G1ScanObjsDuringUpdateRSClosure::do_oop_nv(T* p) { 168 T o = RawAccess<>::oop_load(p); 169 if (CompressedOops::is_null(o)) { 170 return; 171 } 172 oop obj = CompressedOops::decode_not_null(o); 173 174 check_obj_during_refinement(p, obj); 175 176 assert(!_g1h->is_in_cset((HeapWord*)p), "Oop originates from " PTR_FORMAT " (region: %u) which is in the collection set.", p2i(p), _g1h->addr_to_region((HeapWord*)p)); 177 const InCSetState state = _g1h->in_cset_state(obj); 178 if (state.is_in_cset()) { 179 // Since the source is always from outside the collection set, here we implicitly know 180 // that this is a cross-region reference too. 181 prefetch_and_push(p, obj); 182 } else { 183 HeapRegion* to = _g1h->heap_region_containing(obj); 184 if (_from == to) { 185 return; 186 } 187 handle_non_cset_obj_common(state, p, obj); 188 to->rem_set()->add_reference(p, _worker_i); 189 } 190 } 191 192 template <class T> 193 inline void G1ScanObjsDuringScanRSClosure::do_oop_nv(T* p) { 194 T heap_oop = RawAccess<>::oop_load(p); 195 if (CompressedOops::is_null(heap_oop)) { 196 return; 197 } 198 oop obj = CompressedOops::decode_not_null(heap_oop); 199 200 const InCSetState state = _g1h->in_cset_state(obj); 201 if (state.is_in_cset()) { 202 prefetch_and_push(p, obj); 203 } else { 204 if (HeapRegion::is_in_same_region(p, obj)) { 205 return; 206 } 207 handle_non_cset_obj_common(state, p, obj); 208 } 209 } 210 211 void G1ParCopyHelper::do_cld_barrier(oop new_obj) { 212 if (_g1h->heap_region_containing(new_obj)->is_young()) { 213 _scanned_cld->record_modified_oops(); 214 } 215 } 216 217 inline Tickspan G1ParCopyHelper::trim_ticks_and_reset() { 218 Tickspan result = _trim_ticks; 219 _trim_ticks = Tickspan(); 220 return result; 221 } 222 223 void G1ParCopyHelper::mark_object(oop obj) { 224 assert(!_g1h->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet"); 225 226 // We know that the object is not moving so it's safe to read its size. 227 _cm->mark_in_next_bitmap(_worker_id, obj); 228 } 229 230 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) { 231 assert(from_obj->is_forwarded(), "from obj should be forwarded"); 232 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee"); 233 assert(from_obj != to_obj, "should not be self-forwarded"); 234 235 assert(_g1h->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet"); 236 assert(!_g1h->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet"); 237 238 // The object might be in the process of being copied by another 239 // worker so we cannot trust that its to-space image is 240 // well-formed. So we have to read its size from its from-space 241 // image which we know should not be changing. 242 _cm->mark_in_next_bitmap(_worker_id, to_obj, from_obj->size()); 243 } 244 245 void G1ParCopyHelper::trim_queue_partially() { 246 _trim_ticks += _par_scan_state->trim_queue_partially(); 247 } 248 249 template <G1Barrier barrier, G1Mark do_mark_object> 250 template <class T> 251 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) { 252 T heap_oop = RawAccess<>::oop_load(p); 253 254 if (CompressedOops::is_null(heap_oop)) { 255 return; 256 } 257 258 oop obj = CompressedOops::decode_not_null(heap_oop); 259 260 assert(_worker_id == _par_scan_state->worker_id(), "sanity"); 261 262 const InCSetState state = _g1h->in_cset_state(obj); 263 if (state.is_in_cset()) { 264 oop forwardee; 265 markOop m = obj->mark_raw(); 266 if (m->is_marked()) { 267 forwardee = (oop) m->decode_pointer(); 268 } else { 269 forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m); 270 } 271 assert(forwardee != NULL, "forwardee should not be NULL"); 272 RawAccess<OOP_NOT_NULL>::oop_store(p, forwardee); 273 if (do_mark_object != G1MarkNone && forwardee != obj) { 274 // If the object is self-forwarded we don't need to explicitly 275 // mark it, the evacuation failure protocol will do so. 276 mark_forwarded_object(obj, forwardee); 277 } 278 279 if (barrier == G1BarrierCLD) { 280 do_cld_barrier(forwardee); 281 } 282 } else { 283 if (state.is_humongous()) { 284 _g1h->set_humongous_is_live(obj); 285 } 286 287 // The object is not in collection set. If we're a root scanning 288 // closure during an initial mark pause then attempt to mark the object. 289 if (do_mark_object == G1MarkFromRoot) { 290 mark_object(obj); 291 } 292 } 293 trim_queue_partially(); 294 } 295 296 template <class T> void G1RebuildRemSetClosure::do_oop_nv(T* p) { 297 oop const obj = RawAccess<MO_VOLATILE>::oop_load(p); 298 if (obj == NULL) { 299 return; 300 } 301 302 if (HeapRegion::is_in_same_region(p, obj)) { 303 return; 304 } 305 306 HeapRegion* to = _g1h->heap_region_containing(obj); 307 HeapRegionRemSet* rem_set = to->rem_set(); 308 rem_set->add_reference(p, _worker_id); 309 } 310 311 #endif // SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP