1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_CONCURRENTMARK_INLINE_HPP 26 #define SHARE_VM_GC_G1_CONCURRENTMARK_INLINE_HPP 27 28 #include "gc/g1/concurrentMark.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/shared/taskqueue.inline.hpp" 31 32 // Utility routine to set an exclusive range of cards on the given 33 // card liveness bitmap 34 inline void ConcurrentMark::set_card_bitmap_range(BitMap* card_bm, 35 BitMap::idx_t start_idx, 36 BitMap::idx_t end_idx, 37 bool is_par) { 38 39 // Set the exclusive bit range [start_idx, end_idx). 40 assert((end_idx - start_idx) > 0, "at least one card"); 41 assert(end_idx <= card_bm->size(), "sanity"); 42 43 // Silently clip the end index 44 end_idx = MIN2(end_idx, card_bm->size()); 45 46 // For small ranges use a simple loop; otherwise use set_range or 47 // use par_at_put_range (if parallel). The range is made up of the 48 // cards that are spanned by an object/mem region so 8 cards will 49 // allow up to object sizes up to 4K to be handled using the loop. 50 if ((end_idx - start_idx) <= 8) { 51 for (BitMap::idx_t i = start_idx; i < end_idx; i += 1) { 52 if (is_par) { 53 card_bm->par_set_bit(i); 54 } else { 55 card_bm->set_bit(i); 56 } 57 } 58 } else { 59 // Note BitMap::par_at_put_range() and BitMap::set_range() are exclusive. 60 if (is_par) { 61 card_bm->par_at_put_range(start_idx, end_idx, true); 62 } else { 63 card_bm->set_range(start_idx, end_idx); 64 } 65 } 66 } 67 68 // Returns the index in the liveness accounting card bitmap 69 // for the given address 70 inline BitMap::idx_t ConcurrentMark::card_bitmap_index_for(HeapWord* addr) { 71 // Below, the term "card num" means the result of shifting an address 72 // by the card shift -- address 0 corresponds to card number 0. One 73 // must subtract the card num of the bottom of the heap to obtain a 74 // card table index. 75 intptr_t card_num = intptr_t(uintptr_t(addr) >> CardTableModRefBS::card_shift); 76 return card_num - heap_bottom_card_num(); 77 } 78 79 // Counts the given memory region in the given task/worker 80 // counting data structures. 81 inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr, 82 size_t* marked_bytes_array, 83 BitMap* task_card_bm) { 84 G1CollectedHeap* g1h = _g1h; 85 CardTableModRefBS* ct_bs = g1h->g1_barrier_set(); 86 87 HeapWord* start = mr.start(); 88 HeapWord* end = mr.end(); 89 size_t region_size_bytes = mr.byte_size(); 90 uint index = hr->hrm_index(); 91 92 assert(!hr->is_continues_humongous(), "should not be HC region"); 93 assert(hr == g1h->heap_region_containing(start), "sanity"); 94 assert(hr == g1h->heap_region_containing(mr.last()), "sanity"); 95 assert(marked_bytes_array != NULL, "pre-condition"); 96 assert(task_card_bm != NULL, "pre-condition"); 97 98 // Add to the task local marked bytes for this region. 99 marked_bytes_array[index] += region_size_bytes; 100 101 BitMap::idx_t start_idx = card_bitmap_index_for(start); 102 BitMap::idx_t end_idx = card_bitmap_index_for(end); 103 104 // Note: if we're looking at the last region in heap - end 105 // could be actually just beyond the end of the heap; end_idx 106 // will then correspond to a (non-existent) card that is also 107 // just beyond the heap. 108 if (g1h->is_in_g1_reserved(end) && !ct_bs->is_card_aligned(end)) { 109 // end of region is not card aligned - increment to cover 110 // all the cards spanned by the region. 111 end_idx += 1; 112 } 113 // The card bitmap is task/worker specific => no need to use 114 // the 'par' BitMap routines. 115 // Set bits in the exclusive bit range [start_idx, end_idx). 116 set_card_bitmap_range(task_card_bm, start_idx, end_idx, false /* is_par */); 117 } 118 119 // Counts the given memory region in the task/worker counting 120 // data structures for the given worker id. 121 inline void ConcurrentMark::count_region(MemRegion mr, 122 HeapRegion* hr, 123 uint worker_id) { 124 size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id); 125 BitMap* task_card_bm = count_card_bitmap_for(worker_id); 126 count_region(mr, hr, marked_bytes_array, task_card_bm); 127 } 128 129 // Counts the given object in the given task/worker counting data structures. 130 inline void ConcurrentMark::count_object(oop obj, 131 HeapRegion* hr, 132 size_t* marked_bytes_array, 133 BitMap* task_card_bm) { 134 MemRegion mr((HeapWord*)obj, obj->size()); 135 count_region(mr, hr, marked_bytes_array, task_card_bm); 136 } 137 138 // Attempts to mark the given object and, if successful, counts 139 // the object in the given task/worker counting structures. 140 inline bool ConcurrentMark::par_mark_and_count(oop obj, 141 HeapRegion* hr, 142 size_t* marked_bytes_array, 143 BitMap* task_card_bm) { 144 HeapWord* addr = (HeapWord*)obj; 145 if (_nextMarkBitMap->parMark(addr)) { 146 // Update the task specific count data for the object. 147 count_object(obj, hr, marked_bytes_array, task_card_bm); 148 return true; 149 } 150 return false; 151 } 152 153 // Attempts to mark the given object and, if successful, counts 154 // the object in the task/worker counting structures for the 155 // given worker id. 156 inline bool ConcurrentMark::par_mark_and_count(oop obj, 157 size_t word_size, 158 HeapRegion* hr, 159 uint worker_id) { 160 HeapWord* addr = (HeapWord*)obj; 161 if (_nextMarkBitMap->parMark(addr)) { 162 MemRegion mr(addr, word_size); 163 count_region(mr, hr, worker_id); 164 return true; 165 } 166 return false; 167 } 168 169 inline bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) { 170 HeapWord* start_addr = MAX2(startWord(), mr.start()); 171 HeapWord* end_addr = MIN2(endWord(), mr.end()); 172 173 if (end_addr > start_addr) { 174 // Right-open interval [start-offset, end-offset). 175 BitMap::idx_t start_offset = heapWordToOffset(start_addr); 176 BitMap::idx_t end_offset = heapWordToOffset(end_addr); 177 178 start_offset = _bm.get_next_one_offset(start_offset, end_offset); 179 while (start_offset < end_offset) { 180 if (!cl->do_bit(start_offset)) { 181 return false; 182 } 183 HeapWord* next_addr = MIN2(nextObject(offsetToHeapWord(start_offset)), end_addr); 184 BitMap::idx_t next_offset = heapWordToOffset(next_addr); 185 start_offset = _bm.get_next_one_offset(next_offset, end_offset); 186 } 187 } 188 return true; 189 } 190 191 inline bool CMBitMapRO::iterate(BitMapClosure* cl) { 192 MemRegion mr(startWord(), sizeInWords()); 193 return iterate(cl, mr); 194 } 195 196 #define check_mark(addr) \ 197 assert(_bmStartWord <= (addr) && (addr) < (_bmStartWord + _bmWordSize), \ 198 "outside underlying space?"); \ 199 assert(G1CollectedHeap::heap()->is_in_exact(addr), \ 200 "Trying to access not available bitmap " PTR_FORMAT \ 201 " corresponding to " PTR_FORMAT " (%u)", \ 202 p2i(this), p2i(addr), G1CollectedHeap::heap()->addr_to_region(addr)); 203 204 inline void CMBitMap::mark(HeapWord* addr) { 205 check_mark(addr); 206 _bm.set_bit(heapWordToOffset(addr)); 207 } 208 209 inline void CMBitMap::clear(HeapWord* addr) { 210 check_mark(addr); 211 _bm.clear_bit(heapWordToOffset(addr)); 212 } 213 214 inline bool CMBitMap::parMark(HeapWord* addr) { 215 check_mark(addr); 216 return _bm.par_set_bit(heapWordToOffset(addr)); 217 } 218 219 inline bool CMBitMap::parClear(HeapWord* addr) { 220 check_mark(addr); 221 return _bm.par_clear_bit(heapWordToOffset(addr)); 222 } 223 224 #undef check_mark 225 226 template<typename Fn> 227 inline void CMMarkStack::iterate(Fn fn) { 228 assert(_saved_index == _index, "saved index: %d index: %d", _saved_index, _index); 229 for (int i = 0; i < _index; ++i) { 230 fn(_base[i]); 231 } 232 } 233 234 // It scans an object and visits its children. 235 inline void CMTask::scan_object(oop obj) { process_grey_object<true>(obj); } 236 237 inline void CMTask::push(oop obj) { 238 HeapWord* objAddr = (HeapWord*) obj; 239 assert(_g1h->is_in_g1_reserved(objAddr), "invariant"); 240 assert(!_g1h->is_on_master_free_list( 241 _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant"); 242 assert(!_g1h->is_obj_ill(obj), "invariant"); 243 assert(_nextMarkBitMap->isMarked(objAddr), "invariant"); 244 245 if (!_task_queue->push(obj)) { 246 // The local task queue looks full. We need to push some entries 247 // to the global stack. 248 move_entries_to_global_stack(); 249 250 // this should succeed since, even if we overflow the global 251 // stack, we should have definitely removed some entries from the 252 // local queue. So, there must be space on it. 253 bool success = _task_queue->push(obj); 254 assert(success, "invariant"); 255 } 256 } 257 258 inline bool CMTask::is_below_finger(oop obj, HeapWord* global_finger) const { 259 // If obj is above the global finger, then the mark bitmap scan 260 // will find it later, and no push is needed. Similarly, if we have 261 // a current region and obj is between the local finger and the 262 // end of the current region, then no push is needed. The tradeoff 263 // of checking both vs only checking the global finger is that the 264 // local check will be more accurate and so result in fewer pushes, 265 // but may also be a little slower. 266 HeapWord* objAddr = (HeapWord*)obj; 267 if (_finger != NULL) { 268 // We have a current region. 269 270 // Finger and region values are all NULL or all non-NULL. We 271 // use _finger to check since we immediately use its value. 272 assert(_curr_region != NULL, "invariant"); 273 assert(_region_limit != NULL, "invariant"); 274 assert(_region_limit <= global_finger, "invariant"); 275 276 // True if obj is less than the local finger, or is between 277 // the region limit and the global finger. 278 if (objAddr < _finger) { 279 return true; 280 } else if (objAddr < _region_limit) { 281 return false; 282 } // Else check global finger. 283 } 284 // Check global finger. 285 return objAddr < global_finger; 286 } 287 288 template<bool scan> 289 inline void CMTask::process_grey_object(oop obj) { 290 assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray"); 291 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); 292 293 size_t obj_size = obj->size(); 294 _words_scanned += obj_size; 295 296 if (scan) { 297 obj->oop_iterate(_cm_oop_closure); 298 } 299 check_limits(); 300 } 301 302 303 304 inline void CMTask::make_reference_grey(oop obj, HeapRegion* hr) { 305 if (_cm->par_mark_and_count(obj, hr, _marked_bytes_array, _card_bm)) { 306 // No OrderAccess:store_load() is needed. It is implicit in the 307 // CAS done in CMBitMap::parMark() call in the routine above. 308 HeapWord* global_finger = _cm->finger(); 309 310 // We only need to push a newly grey object on the mark 311 // stack if it is in a section of memory the mark bitmap 312 // scan has already examined. Mark bitmap scanning 313 // maintains progress "fingers" for determining that. 314 // 315 // Notice that the global finger might be moving forward 316 // concurrently. This is not a problem. In the worst case, we 317 // mark the object while it is above the global finger and, by 318 // the time we read the global finger, it has moved forward 319 // past this object. In this case, the object will probably 320 // be visited when a task is scanning the region and will also 321 // be pushed on the stack. So, some duplicate work, but no 322 // correctness problems. 323 if (is_below_finger(obj, global_finger)) { 324 if (obj->is_typeArray()) { 325 // Immediately process arrays of primitive types, rather 326 // than pushing on the mark stack. This keeps us from 327 // adding humongous objects to the mark stack that might 328 // be reclaimed before the entry is processed - see 329 // selection of candidates for eager reclaim of humongous 330 // objects. The cost of the additional type test is 331 // mitigated by avoiding a trip through the mark stack, 332 // by only doing a bookkeeping update and avoiding the 333 // actual scan of the object - a typeArray contains no 334 // references, and the metadata is built-in. 335 process_grey_object<false>(obj); 336 } else { 337 push(obj); 338 } 339 } 340 } 341 } 342 343 inline void CMTask::deal_with_reference(oop obj) { 344 increment_refs_reached(); 345 346 HeapWord* objAddr = (HeapWord*) obj; 347 assert(obj->is_oop_or_null(true /* ignore mark word */), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj)); 348 if (_g1h->is_in_g1_reserved(objAddr)) { 349 assert(obj != NULL, "null check is implicit"); 350 if (!_nextMarkBitMap->isMarked(objAddr)) { 351 // Only get the containing region if the object is not marked on the 352 // bitmap (otherwise, it's a waste of time since we won't do 353 // anything with it). 354 HeapRegion* hr = _g1h->heap_region_containing_raw(obj); 355 if (!hr->obj_allocated_since_next_marking(obj)) { 356 make_reference_grey(obj, hr); 357 } 358 } 359 } 360 } 361 362 inline void ConcurrentMark::markPrev(oop p) { 363 assert(!_prevMarkBitMap->isMarked((HeapWord*) p), "sanity"); 364 // Note we are overriding the read-only view of the prev map here, via 365 // the cast. 366 ((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*) p); 367 } 368 369 inline void ConcurrentMark::grayRoot(oop obj, size_t word_size, 370 uint worker_id, HeapRegion* hr) { 371 assert(obj != NULL, "pre-condition"); 372 HeapWord* addr = (HeapWord*) obj; 373 if (hr == NULL) { 374 hr = _g1h->heap_region_containing_raw(addr); 375 } else { 376 assert(hr->is_in(addr), "pre-condition"); 377 } 378 assert(hr != NULL, "sanity"); 379 // Given that we're looking for a region that contains an object 380 // header it's impossible to get back a HC region. 381 assert(!hr->is_continues_humongous(), "sanity"); 382 383 // We cannot assert that word_size == obj->size() given that obj 384 // might not be in a consistent state (another thread might be in 385 // the process of copying it). So the best thing we can do is to 386 // assert that word_size is under an upper bound which is its 387 // containing region's capacity. 388 assert(word_size * HeapWordSize <= hr->capacity(), 389 "size: " SIZE_FORMAT " capacity: " SIZE_FORMAT " " HR_FORMAT, 390 word_size * HeapWordSize, hr->capacity(), 391 HR_FORMAT_PARAMS(hr)); 392 393 if (addr < hr->next_top_at_mark_start()) { 394 if (!_nextMarkBitMap->isMarked(addr)) { 395 par_mark_and_count(obj, word_size, hr, worker_id); 396 } 397 } 398 } 399 400 #endif // SHARE_VM_GC_G1_CONCURRENTMARK_INLINE_HPP