1 /* 2 * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_G1_G1CONCURRENTMARK_INLINE_HPP 26 #define SHARE_GC_G1_G1CONCURRENTMARK_INLINE_HPP 27 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1ConcurrentMark.hpp" 30 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp" 31 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp" 32 #include "gc/g1/g1OopClosures.inline.hpp" 33 #include "gc/g1/g1Policy.hpp" 34 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp" 35 #include "gc/g1/g1RemSetTrackingPolicy.hpp" 36 #include "gc/g1/heapRegionRemSet.hpp" 37 #include "gc/g1/heapRegion.hpp" 38 #include "gc/shared/suspendibleThreadSet.hpp" 39 #include "gc/shared/taskqueue.inline.hpp" 40 #include "utilities/bitMap.inline.hpp" 41 42 inline bool G1CMIsAliveClosure::do_object_b(oop obj) { 43 return !_g1h->is_obj_ill(obj); 44 } 45 46 inline bool G1CMSubjectToDiscoveryClosure::do_object_b(oop obj) { 47 // Re-check whether the passed object is null. With ReferentBasedDiscovery the 48 // mutator may have changed the referent's value (i.e. cleared it) between the 49 // time the referent was determined to be potentially alive and calling this 50 // method. 51 if (obj == NULL) { 52 return false; 53 } 54 assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj)); 55 return _g1h->heap_region_containing(obj)->is_old_or_humongous_or_archive(); 56 } 57 58 inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, oop const obj) { 59 HeapRegion* const hr = _g1h->heap_region_containing(obj); 60 return mark_in_next_bitmap(worker_id, hr, obj); 61 } 62 63 inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, HeapRegion* const hr, oop const obj) { 64 assert(hr != NULL, "just checking"); 65 assert(hr->is_in_reserved(obj), "Attempting to mark object at " PTR_FORMAT " that is not contained in the given region %u", p2i(obj), hr->hrm_index()); 66 67 if (hr->obj_allocated_since_next_marking(obj)) { 68 return false; 69 } 70 71 // Some callers may have stale objects to mark above nTAMS after humongous reclaim. 72 // Can't assert that this is a valid object at this point, since it might be in the process of being copied by another thread. 73 assert(!hr->is_continues_humongous(), "Should not try to mark object " PTR_FORMAT " in Humongous continues region %u above nTAMS " PTR_FORMAT, p2i(obj), hr->hrm_index(), p2i(hr->next_top_at_mark_start())); 74 75 if (hr->is_open_archive()) { 76 log_debug(gc)("mark OA obj " PTR_FORMAT, p2i(obj)); 77 } 78 bool success = _next_mark_bitmap->par_mark(obj); 79 if (success) { 80 add_to_liveness(worker_id, obj, obj->size()); 81 } 82 return success; 83 } 84 85 #ifndef PRODUCT 86 template<typename Fn> 87 inline void G1CMMarkStack::iterate(Fn fn) const { 88 assert_at_safepoint_on_vm_thread(); 89 90 size_t num_chunks = 0; 91 92 TaskQueueEntryChunk* cur = _chunk_list; 93 while (cur != NULL) { 94 guarantee(num_chunks <= _chunks_in_chunk_list, "Found " SIZE_FORMAT " oop chunks which is more than there should be", num_chunks); 95 96 for (size_t i = 0; i < EntriesPerChunk; ++i) { 97 if (cur->data[i].is_null()) { 98 break; 99 } 100 fn(cur->data[i]); 101 } 102 cur = cur->next; 103 num_chunks++; 104 } 105 } 106 #endif 107 108 // It scans an object and visits its children. 109 inline void G1CMTask::scan_task_entry(G1TaskQueueEntry task_entry) { process_grey_task_entry<true>(task_entry); } 110 111 inline void G1CMTask::push(G1TaskQueueEntry task_entry) { 112 assert(task_entry.is_array_slice() || _g1h->is_in_reserved(task_entry.obj()), "invariant"); 113 assert(task_entry.is_array_slice() || !_g1h->is_on_master_free_list( 114 _g1h->heap_region_containing(task_entry.obj())), "invariant"); 115 assert(task_entry.is_array_slice() || !_g1h->is_obj_ill(task_entry.obj()), "invariant"); // FIXME!!! 116 assert(task_entry.is_array_slice() || _next_mark_bitmap->is_marked(cast_from_oop<HeapWord*>(task_entry.obj())), "invariant"); 117 118 if (!_task_queue->push(task_entry)) { 119 // The local task queue looks full. We need to push some entries 120 // to the global stack. 121 move_entries_to_global_stack(); 122 123 // this should succeed since, even if we overflow the global 124 // stack, we should have definitely removed some entries from the 125 // local queue. So, there must be space on it. 126 bool success = _task_queue->push(task_entry); 127 assert(success, "invariant"); 128 } 129 } 130 131 inline bool G1CMTask::is_below_finger(oop obj, HeapWord* global_finger) const { 132 // If obj is above the global finger, then the mark bitmap scan 133 // will find it later, and no push is needed. Similarly, if we have 134 // a current region and obj is between the local finger and the 135 // end of the current region, then no push is needed. The tradeoff 136 // of checking both vs only checking the global finger is that the 137 // local check will be more accurate and so result in fewer pushes, 138 // but may also be a little slower. 139 HeapWord* objAddr = cast_from_oop<HeapWord*>(obj); 140 if (_finger != NULL) { 141 // We have a current region. 142 143 // Finger and region values are all NULL or all non-NULL. We 144 // use _finger to check since we immediately use its value. 145 assert(_curr_region != NULL, "invariant"); 146 assert(_region_limit != NULL, "invariant"); 147 assert(_region_limit <= global_finger, "invariant"); 148 149 // True if obj is less than the local finger, or is between 150 // the region limit and the global finger. 151 if (objAddr < _finger) { 152 return true; 153 } else if (objAddr < _region_limit) { 154 return false; 155 } // Else check global finger. 156 } 157 // Check global finger. 158 return objAddr < global_finger; 159 } 160 161 template<bool scan> 162 inline void G1CMTask::process_grey_task_entry(G1TaskQueueEntry task_entry) { 163 assert(scan || (task_entry.is_oop() && task_entry.obj()->is_typeArray()), "Skipping scan of grey non-typeArray"); 164 assert(task_entry.is_array_slice() || _next_mark_bitmap->is_marked(cast_from_oop<HeapWord*>(task_entry.obj())), 165 "Any stolen object should be a slice or marked"); 166 167 if (scan) { 168 if (task_entry.is_array_slice()) { 169 _words_scanned += _objArray_processor.process_slice(task_entry.slice()); 170 } else { 171 oop obj = task_entry.obj(); 172 if (G1CMObjArrayProcessor::should_be_sliced(obj)) { 173 _words_scanned += _objArray_processor.process_obj(obj); 174 } else { 175 _words_scanned += obj->oop_iterate_size(_cm_oop_closure);; 176 } 177 } 178 } 179 check_limits(); 180 } 181 182 inline size_t G1CMTask::scan_objArray(objArrayOop obj, MemRegion mr) { 183 obj->oop_iterate(_cm_oop_closure, mr); 184 return mr.word_size(); 185 } 186 187 inline HeapWord* G1ConcurrentMark::top_at_rebuild_start(uint region) const { 188 assert(region < _g1h->max_reserved_regions(), "Tried to access TARS for region %u out of bounds", region); 189 return _top_at_rebuild_starts[region]; 190 } 191 192 inline void G1ConcurrentMark::update_top_at_rebuild_start(HeapRegion* r) { 193 uint const region = r->hrm_index(); 194 assert(region < _g1h->max_reserved_regions(), "Tried to access TARS for region %u out of bounds", region); 195 assert(_top_at_rebuild_starts[region] == NULL, 196 "TARS for region %u has already been set to " PTR_FORMAT " should be NULL", 197 region, p2i(_top_at_rebuild_starts[region])); 198 G1RemSetTrackingPolicy* tracker = _g1h->policy()->remset_tracker(); 199 if (tracker->needs_scan_for_rebuild(r)) { 200 _top_at_rebuild_starts[region] = r->top(); 201 } else { 202 // Leave TARS at NULL. 203 } 204 } 205 206 inline void G1CMTask::update_liveness(oop const obj, const size_t obj_size) { 207 _mark_stats_cache.add_live_words(_g1h->addr_to_region(cast_from_oop<HeapWord*>(obj)), obj_size); 208 } 209 210 inline void G1ConcurrentMark::add_to_liveness(uint worker_id, oop const obj, size_t size) { 211 task(worker_id)->update_liveness(obj, size); 212 } 213 214 inline void G1CMTask::abort_marking_if_regular_check_fail() { 215 if (!regular_clock_call()) { 216 set_has_aborted(); 217 } 218 } 219 220 inline bool G1CMTask::make_reference_grey(oop obj) { 221 if (!_cm->mark_in_next_bitmap(_worker_id, obj)) { 222 return false; 223 } 224 225 // No OrderAccess:store_load() is needed. It is implicit in the 226 // CAS done in G1CMBitMap::parMark() call in the routine above. 227 HeapWord* global_finger = _cm->finger(); 228 229 // We only need to push a newly grey object on the mark 230 // stack if it is in a section of memory the mark bitmap 231 // scan has already examined. Mark bitmap scanning 232 // maintains progress "fingers" for determining that. 233 // 234 // Notice that the global finger might be moving forward 235 // concurrently. This is not a problem. In the worst case, we 236 // mark the object while it is above the global finger and, by 237 // the time we read the global finger, it has moved forward 238 // past this object. In this case, the object will probably 239 // be visited when a task is scanning the region and will also 240 // be pushed on the stack. So, some duplicate work, but no 241 // correctness problems. 242 if (is_below_finger(obj, global_finger)) { 243 G1TaskQueueEntry entry = G1TaskQueueEntry::from_oop(obj); 244 if (obj->is_typeArray()) { 245 // Immediately process arrays of primitive types, rather 246 // than pushing on the mark stack. This keeps us from 247 // adding humongous objects to the mark stack that might 248 // be reclaimed before the entry is processed - see 249 // selection of candidates for eager reclaim of humongous 250 // objects. The cost of the additional type test is 251 // mitigated by avoiding a trip through the mark stack, 252 // by only doing a bookkeeping update and avoiding the 253 // actual scan of the object - a typeArray contains no 254 // references, and the metadata is built-in. 255 process_grey_task_entry<false>(entry); 256 } else { 257 push(entry); 258 } 259 } 260 return true; 261 } 262 263 template <class T> 264 inline bool G1CMTask::deal_with_reference(T* p) { 265 increment_refs_reached(); 266 oop const obj = RawAccess<MO_RELAXED>::oop_load(p); 267 if (obj == NULL) { 268 return false; 269 } 270 return make_reference_grey(obj); 271 } 272 273 inline void G1ConcurrentMark::mark_in_prev_bitmap(oop p) { 274 assert(!_prev_mark_bitmap->is_marked(p), "sanity"); 275 _prev_mark_bitmap->mark(p); 276 } 277 278 bool G1ConcurrentMark::is_marked_in_prev_bitmap(oop p) const { 279 assert(p != NULL && oopDesc::is_oop(p), "expected an oop"); 280 return _prev_mark_bitmap->is_marked(cast_from_oop<HeapWord*>(p)); 281 } 282 283 bool G1ConcurrentMark::is_marked_in_next_bitmap(oop p) const { 284 assert(p != NULL && oopDesc::is_oop(p), "expected an oop"); 285 return _next_mark_bitmap->is_marked(cast_from_oop<HeapWord*>(p)); 286 } 287 288 inline bool G1ConcurrentMark::do_yield_check() { 289 if (SuspendibleThreadSet::should_yield()) { 290 SuspendibleThreadSet::yield(); 291 return true; 292 } else { 293 return false; 294 } 295 } 296 297 #endif // SHARE_GC_G1_G1CONCURRENTMARK_INLINE_HPP