1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1BarrierSet.hpp" 27 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc/g1/g1CardTable.inline.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/g1ConcurrentRefine.hpp" 31 #include "gc/g1/g1DirtyCardQueue.hpp" 32 #include "gc/g1/g1FromCardCache.hpp" 33 #include "gc/g1/g1GCPhaseTimes.hpp" 34 #include "gc/g1/g1HotCardCache.hpp" 35 #include "gc/g1/g1OopClosures.inline.hpp" 36 #include "gc/g1/g1RootClosures.hpp" 37 #include "gc/g1/g1RemSet.hpp" 38 #include "gc/g1/heapRegion.inline.hpp" 39 #include "gc/g1/heapRegionManager.inline.hpp" 40 #include "gc/g1/heapRegionRemSet.hpp" 41 #include "gc/shared/gcTraceTime.inline.hpp" 42 #include "gc/shared/suspendibleThreadSet.hpp" 43 #include "jfr/jfrEvents.hpp" 44 #include "memory/iterator.hpp" 45 #include "memory/resourceArea.hpp" 46 #include "oops/access.inline.hpp" 47 #include "oops/oop.inline.hpp" 48 #include "runtime/os.hpp" 49 #include "utilities/align.hpp" 50 #include "utilities/globalDefinitions.hpp" 51 #include "utilities/intHisto.hpp" 52 #include "utilities/stack.inline.hpp" 53 #include "utilities/ticks.hpp" 54 55 // Collects information about the overall remembered set scan progress during an evacuation. 56 class G1RemSetScanState : public CHeapObj<mtGC> { 57 private: 58 class G1ClearCardTableTask : public AbstractGangTask { 59 G1CollectedHeap* _g1h; 60 uint* _dirty_region_list; 61 size_t _num_dirty_regions; 62 size_t _chunk_length; 63 64 size_t volatile _cur_dirty_regions; 65 public: 66 G1ClearCardTableTask(G1CollectedHeap* g1h, 67 uint* dirty_region_list, 68 size_t num_dirty_regions, 69 size_t chunk_length) : 70 AbstractGangTask("G1 Clear Card Table Task"), 71 _g1h(g1h), 72 _dirty_region_list(dirty_region_list), 73 _num_dirty_regions(num_dirty_regions), 74 _chunk_length(chunk_length), 75 _cur_dirty_regions(0) { 76 77 assert(chunk_length > 0, "must be"); 78 } 79 80 static size_t chunk_size() { return M; } 81 82 void work(uint worker_id) { 83 while (_cur_dirty_regions < _num_dirty_regions) { 84 size_t next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length; 85 size_t max = MIN2(next + _chunk_length, _num_dirty_regions); 86 87 for (size_t i = next; i < max; i++) { 88 HeapRegion* r = _g1h->region_at(_dirty_region_list[i]); 89 if (!r->is_survivor()) { 90 r->clear_cardtable(); 91 } 92 } 93 } 94 } 95 }; 96 97 size_t _max_regions; 98 99 // Scan progress for the remembered set of a single region. Transitions from 100 // Unclaimed -> Claimed -> Complete. 101 // At each of the transitions the thread that does the transition needs to perform 102 // some special action once. This is the reason for the extra "Claimed" state. 103 typedef jint G1RemsetIterState; 104 105 static const G1RemsetIterState Unclaimed = 0; // The remembered set has not been scanned yet. 106 static const G1RemsetIterState Claimed = 1; // The remembered set is currently being scanned. 107 static const G1RemsetIterState Complete = 2; // The remembered set has been completely scanned. 108 109 G1RemsetIterState volatile* _iter_states; 110 // The current location where the next thread should continue scanning in a region's 111 // remembered set. 112 size_t volatile* _iter_claims; 113 114 // Temporary buffer holding the regions we used to store remembered set scan duplicate 115 // information. These are also called "dirty". Valid entries are from [0.._cur_dirty_region) 116 uint* _dirty_region_buffer; 117 118 typedef jbyte IsDirtyRegionState; 119 static const IsDirtyRegionState Clean = 0; 120 static const IsDirtyRegionState Dirty = 1; 121 // Holds a flag for every region whether it is in the _dirty_region_buffer already 122 // to avoid duplicates. Uses jbyte since there are no atomic instructions for bools. 123 IsDirtyRegionState* _in_dirty_region_buffer; 124 size_t _cur_dirty_region; 125 126 // Creates a snapshot of the current _top values at the start of collection to 127 // filter out card marks that we do not want to scan. 128 class G1ResetScanTopClosure : public HeapRegionClosure { 129 private: 130 HeapWord** _scan_top; 131 public: 132 G1ResetScanTopClosure(HeapWord** scan_top) : _scan_top(scan_top) { } 133 134 virtual bool do_heap_region(HeapRegion* r) { 135 uint hrm_index = r->hrm_index(); 136 if (!r->in_collection_set() && r->is_old_or_humongous_or_archive() && !r->is_empty()) { 137 _scan_top[hrm_index] = r->top(); 138 } else { 139 _scan_top[hrm_index] = NULL; 140 } 141 return false; 142 } 143 }; 144 145 // For each region, contains the maximum top() value to be used during this garbage 146 // collection. Subsumes common checks like filtering out everything but old and 147 // humongous regions outside the collection set. 148 // This is valid because we are not interested in scanning stray remembered set 149 // entries from free or archive regions. 150 HeapWord** _scan_top; 151 public: 152 G1RemSetScanState() : 153 _max_regions(0), 154 _iter_states(NULL), 155 _iter_claims(NULL), 156 _dirty_region_buffer(NULL), 157 _in_dirty_region_buffer(NULL), 158 _cur_dirty_region(0), 159 _scan_top(NULL) { 160 } 161 162 ~G1RemSetScanState() { 163 if (_iter_states != NULL) { 164 FREE_C_HEAP_ARRAY(G1RemsetIterState, _iter_states); 165 } 166 if (_iter_claims != NULL) { 167 FREE_C_HEAP_ARRAY(size_t, _iter_claims); 168 } 169 if (_dirty_region_buffer != NULL) { 170 FREE_C_HEAP_ARRAY(uint, _dirty_region_buffer); 171 } 172 if (_in_dirty_region_buffer != NULL) { 173 FREE_C_HEAP_ARRAY(IsDirtyRegionState, _in_dirty_region_buffer); 174 } 175 if (_scan_top != NULL) { 176 FREE_C_HEAP_ARRAY(HeapWord*, _scan_top); 177 } 178 } 179 180 void initialize(uint max_regions) { 181 assert(_iter_states == NULL, "Must not be initialized twice"); 182 assert(_iter_claims == NULL, "Must not be initialized twice"); 183 _max_regions = max_regions; 184 _iter_states = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_regions, mtGC); 185 _iter_claims = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 186 _dirty_region_buffer = NEW_C_HEAP_ARRAY(uint, max_regions, mtGC); 187 _in_dirty_region_buffer = NEW_C_HEAP_ARRAY(IsDirtyRegionState, max_regions, mtGC); 188 _scan_top = NEW_C_HEAP_ARRAY(HeapWord*, max_regions, mtGC); 189 } 190 191 void reset() { 192 for (uint i = 0; i < _max_regions; i++) { 193 _iter_states[i] = Unclaimed; 194 _scan_top[i] = NULL; 195 } 196 197 G1ResetScanTopClosure cl(_scan_top); 198 G1CollectedHeap::heap()->heap_region_iterate(&cl); 199 200 memset((void*)_iter_claims, 0, _max_regions * sizeof(size_t)); 201 memset(_in_dirty_region_buffer, Clean, _max_regions * sizeof(IsDirtyRegionState)); 202 _cur_dirty_region = 0; 203 } 204 205 // Attempt to claim the remembered set of the region for iteration. Returns true 206 // if this call caused the transition from Unclaimed to Claimed. 207 inline bool claim_iter(uint region) { 208 assert(region < _max_regions, "Tried to access invalid region %u", region); 209 if (_iter_states[region] != Unclaimed) { 210 return false; 211 } 212 G1RemsetIterState res = Atomic::cmpxchg(Claimed, &_iter_states[region], Unclaimed); 213 return (res == Unclaimed); 214 } 215 216 // Try to atomically sets the iteration state to "complete". Returns true for the 217 // thread that caused the transition. 218 inline bool set_iter_complete(uint region) { 219 if (iter_is_complete(region)) { 220 return false; 221 } 222 G1RemsetIterState res = Atomic::cmpxchg(Complete, &_iter_states[region], Claimed); 223 return (res == Claimed); 224 } 225 226 // Returns true if the region's iteration is complete. 227 inline bool iter_is_complete(uint region) const { 228 assert(region < _max_regions, "Tried to access invalid region %u", region); 229 return _iter_states[region] == Complete; 230 } 231 232 // The current position within the remembered set of the given region. 233 inline size_t iter_claimed(uint region) const { 234 assert(region < _max_regions, "Tried to access invalid region %u", region); 235 return _iter_claims[region]; 236 } 237 238 // Claim the next block of cards within the remembered set of the region with 239 // step size. 240 inline size_t iter_claimed_next(uint region, size_t step) { 241 return Atomic::add(step, &_iter_claims[region]) - step; 242 } 243 244 void add_dirty_region(uint region) { 245 if (_in_dirty_region_buffer[region] == Dirty) { 246 return; 247 } 248 249 bool marked_as_dirty = Atomic::cmpxchg(Dirty, &_in_dirty_region_buffer[region], Clean) == Clean; 250 if (marked_as_dirty) { 251 size_t allocated = Atomic::add(1u, &_cur_dirty_region) - 1; 252 _dirty_region_buffer[allocated] = region; 253 } 254 } 255 256 HeapWord* scan_top(uint region_idx) const { 257 return _scan_top[region_idx]; 258 } 259 260 // Clear the card table of "dirty" regions. 261 void clear_card_table(WorkGang* workers) { 262 if (_cur_dirty_region == 0) { 263 return; 264 } 265 266 size_t const num_chunks = align_up(_cur_dirty_region * HeapRegion::CardsPerRegion, G1ClearCardTableTask::chunk_size()) / G1ClearCardTableTask::chunk_size(); 267 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 268 size_t const chunk_length = G1ClearCardTableTask::chunk_size() / HeapRegion::CardsPerRegion; 269 270 // Iterate over the dirty cards region list. 271 G1ClearCardTableTask cl(G1CollectedHeap::heap(), _dirty_region_buffer, _cur_dirty_region, chunk_length); 272 273 log_debug(gc, ergo)("Running %s using %u workers for " SIZE_FORMAT " " 274 "units of work for " SIZE_FORMAT " regions.", 275 cl.name(), num_workers, num_chunks, _cur_dirty_region); 276 workers->run_task(&cl, num_workers); 277 278 #ifndef PRODUCT 279 G1CollectedHeap::heap()->verifier()->verify_card_table_cleanup(); 280 #endif 281 } 282 }; 283 284 G1RemSet::G1RemSet(G1CollectedHeap* g1h, 285 G1CardTable* ct, 286 G1HotCardCache* hot_card_cache) : 287 _scan_state(new G1RemSetScanState()), 288 _prev_period_summary(), 289 _g1h(g1h), 290 _num_conc_refined_cards(0), 291 _ct(ct), 292 _g1p(_g1h->policy()), 293 _hot_card_cache(hot_card_cache) { 294 } 295 296 G1RemSet::~G1RemSet() { 297 if (_scan_state != NULL) { 298 delete _scan_state; 299 } 300 } 301 302 uint G1RemSet::num_par_rem_sets() { 303 return G1DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::max_num_threads() + MAX2(ConcGCThreads, ParallelGCThreads); 304 } 305 306 void G1RemSet::initialize(size_t capacity, uint max_regions) { 307 G1FromCardCache::initialize(num_par_rem_sets(), max_regions); 308 _scan_state->initialize(max_regions); 309 } 310 311 G1ScanRSForRegionClosure::G1ScanRSForRegionClosure(G1RemSetScanState* scan_state, 312 G1ScanObjsDuringScanRSClosure* scan_obj_on_card, 313 G1ParScanThreadState* pss, 314 G1GCPhaseTimes::GCParPhases phase, 315 uint worker_i) : 316 _g1h(G1CollectedHeap::heap()), 317 _ct(_g1h->card_table()), 318 _pss(pss), 319 _scan_objs_on_card_cl(scan_obj_on_card), 320 _scan_state(scan_state), 321 _phase(phase), 322 _worker_i(worker_i), 323 _cards_scanned(0), 324 _cards_claimed(0), 325 _cards_skipped(0), 326 _rem_set_root_scan_time(), 327 _rem_set_trim_partially_time(), 328 _strong_code_root_scan_time(), 329 _strong_code_trim_partially_time() { 330 } 331 332 void G1ScanRSForRegionClosure::claim_card(size_t card_index, const uint region_idx_for_card){ 333 _ct->set_card_claimed(card_index); 334 _scan_state->add_dirty_region(region_idx_for_card); 335 } 336 337 void G1ScanRSForRegionClosure::scan_card(MemRegion mr, uint region_idx_for_card) { 338 HeapRegion* const card_region = _g1h->region_at(region_idx_for_card); 339 assert(!card_region->is_young(), "Should not scan card in young region %u", region_idx_for_card); 340 card_region->oops_on_card_seq_iterate_careful<true>(mr, _scan_objs_on_card_cl); 341 _scan_objs_on_card_cl->trim_queue_partially(); 342 _cards_scanned++; 343 } 344 345 void G1ScanRSForRegionClosure::scan_rem_set_roots(HeapRegion* r) { 346 EventGCPhaseParallel event; 347 uint const region_idx = r->hrm_index(); 348 349 if (_scan_state->claim_iter(region_idx)) { 350 // If we ever free the collection set concurrently, we should also 351 // clear the card table concurrently therefore we won't need to 352 // add regions of the collection set to the dirty cards region. 353 _scan_state->add_dirty_region(region_idx); 354 } 355 356 if (r->rem_set()->cardset_is_empty()) { 357 return; 358 } 359 360 // We claim cards in blocks so as to reduce the contention. 361 size_t const block_size = G1RSetScanBlockSize; 362 363 HeapRegionRemSetIterator iter(r->rem_set()); 364 size_t card_index; 365 366 size_t claimed_card_block = _scan_state->iter_claimed_next(region_idx, block_size); 367 for (size_t current_card = 0; iter.has_next(card_index); current_card++) { 368 if (current_card >= claimed_card_block + block_size) { 369 claimed_card_block = _scan_state->iter_claimed_next(region_idx, block_size); 370 } 371 if (current_card < claimed_card_block) { 372 _cards_skipped++; 373 continue; 374 } 375 _cards_claimed++; 376 377 HeapWord* const card_start = _g1h->bot()->address_for_index_raw(card_index); 378 uint const region_idx_for_card = _g1h->addr_to_region(card_start); 379 380 #ifdef ASSERT 381 HeapRegion* hr = _g1h->region_at_or_null(region_idx_for_card); 382 assert(hr == NULL || hr->is_in_reserved(card_start), 383 "Card start " PTR_FORMAT " to scan outside of region %u", p2i(card_start), _g1h->region_at(region_idx_for_card)->hrm_index()); 384 #endif 385 HeapWord* const top = _scan_state->scan_top(region_idx_for_card); 386 if (card_start >= top) { 387 continue; 388 } 389 390 // If the card is dirty, then G1 will scan it during Update RS. 391 if (_ct->is_card_claimed(card_index) || _ct->is_card_dirty(card_index)) { 392 continue; 393 } 394 395 // We claim lazily (so races are possible but they're benign), which reduces the 396 // number of duplicate scans (the rsets of the regions in the cset can intersect). 397 // Claim the card after checking bounds above: the remembered set may contain 398 // random cards into current survivor, and we would then have an incorrectly 399 // claimed card in survivor space. Card table clear does not reset the card table 400 // of survivor space regions. 401 claim_card(card_index, region_idx_for_card); 402 403 MemRegion const mr(card_start, MIN2(card_start + BOTConstants::N_words, top)); 404 405 scan_card(mr, region_idx_for_card); 406 } 407 event.commit(GCId::current(), _worker_i, G1GCPhaseTimes::phase_name(_phase)); 408 } 409 410 void G1ScanRSForRegionClosure::scan_strong_code_roots(HeapRegion* r) { 411 EventGCPhaseParallel event; 412 // We pass a weak code blobs closure to the remembered set scanning because we want to avoid 413 // treating the nmethods visited to act as roots for concurrent marking. 414 // We only want to make sure that the oops in the nmethods are adjusted with regard to the 415 // objects copied by the current evacuation. 416 r->strong_code_roots_do(_pss->closures()->weak_codeblobs()); 417 event.commit(GCId::current(), _worker_i, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::CodeRoots)); 418 } 419 420 bool G1ScanRSForRegionClosure::do_heap_region(HeapRegion* r) { 421 assert(r->in_collection_set(), 422 "Should only be called on elements of the collection set but region %u is not.", 423 r->hrm_index()); 424 uint const region_idx = r->hrm_index(); 425 426 // Do an early out if we know we are complete. 427 if (_scan_state->iter_is_complete(region_idx)) { 428 return false; 429 } 430 431 { 432 G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_root_scan_time, _rem_set_trim_partially_time); 433 scan_rem_set_roots(r); 434 } 435 436 if (_scan_state->set_iter_complete(region_idx)) { 437 G1EvacPhaseWithTrimTimeTracker timer(_pss, _strong_code_root_scan_time, _strong_code_trim_partially_time); 438 // Scan the strong code root list attached to the current region 439 scan_strong_code_roots(r); 440 } 441 return false; 442 } 443 444 void G1RemSet::scan_rem_set(G1ParScanThreadState* pss, uint worker_i) { 445 G1ScanObjsDuringScanRSClosure scan_cl(_g1h, pss); 446 G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, pss, G1GCPhaseTimes::ScanRS, worker_i); 447 _g1h->collection_set_iterate_from(&cl, worker_i); 448 449 G1GCPhaseTimes* p = _g1p->phase_times(); 450 451 p->record_time_secs(G1GCPhaseTimes::ScanRS, worker_i, cl.rem_set_root_scan_time().seconds()); 452 p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, cl.rem_set_trim_partially_time().seconds()); 453 454 p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_scanned(), G1GCPhaseTimes::ScanRSScannedCards); 455 p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_claimed(), G1GCPhaseTimes::ScanRSClaimedCards); 456 p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_skipped(), G1GCPhaseTimes::ScanRSSkippedCards); 457 458 p->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, cl.strong_code_root_scan_time().seconds()); 459 p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, cl.strong_code_root_trim_partially_time().seconds()); 460 } 461 462 // Closure used for updating rem sets. Only called during an evacuation pause. 463 class G1RefineCardClosure: public G1CardTableEntryClosure { 464 G1RemSet* _g1rs; 465 G1ScanObjsDuringUpdateRSClosure* _update_rs_cl; 466 467 size_t _cards_scanned; 468 size_t _cards_skipped; 469 public: 470 G1RefineCardClosure(G1CollectedHeap* g1h, G1ScanObjsDuringUpdateRSClosure* update_rs_cl) : 471 _g1rs(g1h->rem_set()), _update_rs_cl(update_rs_cl), _cards_scanned(0), _cards_skipped(0) 472 {} 473 474 bool do_card_ptr(jbyte* card_ptr, uint worker_i) { 475 // The only time we care about recording cards that 476 // contain references that point into the collection set 477 // is during RSet updating within an evacuation pause. 478 // In this case worker_i should be the id of a GC worker thread. 479 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); 480 481 bool card_scanned = _g1rs->refine_card_during_gc(card_ptr, _update_rs_cl); 482 483 if (card_scanned) { 484 _update_rs_cl->trim_queue_partially(); 485 _cards_scanned++; 486 } else { 487 _cards_skipped++; 488 } 489 return true; 490 } 491 492 size_t cards_scanned() const { return _cards_scanned; } 493 size_t cards_skipped() const { return _cards_skipped; } 494 }; 495 496 void G1RemSet::update_rem_set(G1ParScanThreadState* pss, uint worker_i) { 497 G1GCPhaseTimes* p = _g1p->phase_times(); 498 499 // Apply closure to log entries in the HCC. 500 if (G1HotCardCache::default_use_cache()) { 501 G1EvacPhaseTimesTracker x(p, pss, G1GCPhaseTimes::ScanHCC, worker_i); 502 503 G1ScanObjsDuringUpdateRSClosure scan_hcc_cl(_g1h, pss); 504 G1RefineCardClosure refine_card_cl(_g1h, &scan_hcc_cl); 505 _g1h->iterate_hcc_closure(&refine_card_cl, worker_i); 506 } 507 508 // Now apply the closure to all remaining log entries. 509 { 510 G1EvacPhaseTimesTracker x(p, pss, G1GCPhaseTimes::UpdateRS, worker_i); 511 512 G1ScanObjsDuringUpdateRSClosure update_rs_cl(_g1h, pss); 513 G1RefineCardClosure refine_card_cl(_g1h, &update_rs_cl); 514 _g1h->iterate_dirty_card_closure(&refine_card_cl, worker_i); 515 516 p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_scanned(), G1GCPhaseTimes::UpdateRSScannedCards); 517 p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_skipped(), G1GCPhaseTimes::UpdateRSSkippedCards); 518 } 519 } 520 521 void G1RemSet::oops_into_collection_set_do(G1ParScanThreadState* pss, uint worker_i) { 522 update_rem_set(pss, worker_i); 523 scan_rem_set(pss, worker_i);; 524 } 525 526 void G1RemSet::prepare_for_oops_into_collection_set_do() { 527 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); 528 dcqs.concatenate_logs(); 529 530 _scan_state->reset(); 531 } 532 533 void G1RemSet::cleanup_after_oops_into_collection_set_do() { 534 G1GCPhaseTimes* phase_times = _g1h->phase_times(); 535 536 // Set all cards back to clean. 537 double start = os::elapsedTime(); 538 _scan_state->clear_card_table(_g1h->workers()); 539 phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0); 540 } 541 542 inline void check_card_ptr(jbyte* card_ptr, G1CardTable* ct) { 543 #ifdef ASSERT 544 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 545 assert(g1h->is_in_exact(ct->addr_for(card_ptr)), 546 "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap", 547 p2i(card_ptr), 548 ct->index_for(ct->addr_for(card_ptr)), 549 p2i(ct->addr_for(card_ptr)), 550 g1h->addr_to_region(ct->addr_for(card_ptr))); 551 #endif 552 } 553 554 void G1RemSet::refine_card_concurrently(jbyte* card_ptr, 555 uint worker_i) { 556 assert(!_g1h->is_gc_active(), "Only call concurrently"); 557 558 // Construct the region representing the card. 559 HeapWord* start = _ct->addr_for(card_ptr); 560 // And find the region containing it. 561 HeapRegion* r = _g1h->heap_region_containing_or_null(start); 562 563 // If this is a (stale) card into an uncommitted region, exit. 564 if (r == NULL) { 565 return; 566 } 567 568 check_card_ptr(card_ptr, _ct); 569 570 // If the card is no longer dirty, nothing to do. 571 if (*card_ptr != G1CardTable::dirty_card_val()) { 572 return; 573 } 574 575 // This check is needed for some uncommon cases where we should 576 // ignore the card. 577 // 578 // The region could be young. Cards for young regions are 579 // distinctly marked (set to g1_young_gen), so the post-barrier will 580 // filter them out. However, that marking is performed 581 // concurrently. A write to a young object could occur before the 582 // card has been marked young, slipping past the filter. 583 // 584 // The card could be stale, because the region has been freed since 585 // the card was recorded. In this case the region type could be 586 // anything. If (still) free or (reallocated) young, just ignore 587 // it. If (reallocated) old or humongous, the later card trimming 588 // and additional checks in iteration may detect staleness. At 589 // worst, we end up processing a stale card unnecessarily. 590 // 591 // In the normal (non-stale) case, the synchronization between the 592 // enqueueing of the card and processing it here will have ensured 593 // we see the up-to-date region type here. 594 if (!r->is_old_or_humongous_or_archive()) { 595 return; 596 } 597 598 // The result from the hot card cache insert call is either: 599 // * pointer to the current card 600 // (implying that the current card is not 'hot'), 601 // * null 602 // (meaning we had inserted the card ptr into the "hot" card cache, 603 // which had some headroom), 604 // * a pointer to a "hot" card that was evicted from the "hot" cache. 605 // 606 607 if (_hot_card_cache->use_cache()) { 608 assert(!SafepointSynchronize::is_at_safepoint(), "sanity"); 609 610 const jbyte* orig_card_ptr = card_ptr; 611 card_ptr = _hot_card_cache->insert(card_ptr); 612 if (card_ptr == NULL) { 613 // There was no eviction. Nothing to do. 614 return; 615 } else if (card_ptr != orig_card_ptr) { 616 // Original card was inserted and an old card was evicted. 617 start = _ct->addr_for(card_ptr); 618 r = _g1h->heap_region_containing(start); 619 620 // Check whether the region formerly in the cache should be 621 // ignored, as discussed earlier for the original card. The 622 // region could have been freed while in the cache. 623 if (!r->is_old_or_humongous_or_archive()) { 624 return; 625 } 626 } // Else we still have the original card. 627 } 628 629 // Trim the region designated by the card to what's been allocated 630 // in the region. The card could be stale, or the card could cover 631 // (part of) an object at the end of the allocated space and extend 632 // beyond the end of allocation. 633 634 // Non-humongous objects are only allocated in the old-gen during 635 // GC, so if region is old then top is stable. Humongous object 636 // allocation sets top last; if top has not yet been set, this is 637 // a stale card and we'll end up with an empty intersection. If 638 // this is not a stale card, the synchronization between the 639 // enqueuing of the card and processing it here will have ensured 640 // we see the up-to-date top here. 641 HeapWord* scan_limit = r->top(); 642 643 if (scan_limit <= start) { 644 // If the trimmed region is empty, the card must be stale. 645 return; 646 } 647 648 // Okay to clean and process the card now. There are still some 649 // stale card cases that may be detected by iteration and dealt with 650 // as iteration failure. 651 *const_cast<volatile jbyte*>(card_ptr) = G1CardTable::clean_card_val(); 652 653 // This fence serves two purposes. First, the card must be cleaned 654 // before processing the contents. Second, we can't proceed with 655 // processing until after the read of top, for synchronization with 656 // possibly concurrent humongous object allocation. It's okay that 657 // reading top and reading type were racy wrto each other. We need 658 // both set, in any order, to proceed. 659 OrderAccess::fence(); 660 661 // Don't use addr_for(card_ptr + 1) which can ask for 662 // a card beyond the heap. 663 HeapWord* end = start + G1CardTable::card_size_in_words; 664 MemRegion dirty_region(start, MIN2(scan_limit, end)); 665 assert(!dirty_region.is_empty(), "sanity"); 666 667 G1ConcurrentRefineOopClosure conc_refine_cl(_g1h, worker_i); 668 669 bool card_processed = 670 r->oops_on_card_seq_iterate_careful<false>(dirty_region, &conc_refine_cl); 671 672 // If unable to process the card then we encountered an unparsable 673 // part of the heap (e.g. a partially allocated object) while 674 // processing a stale card. Despite the card being stale, redirty 675 // and re-enqueue, because we've already cleaned the card. Without 676 // this we could incorrectly discard a non-stale card. 677 if (!card_processed) { 678 // The card might have gotten re-dirtied and re-enqueued while we 679 // worked. (In fact, it's pretty likely.) 680 if (*card_ptr != G1CardTable::dirty_card_val()) { 681 *card_ptr = G1CardTable::dirty_card_val(); 682 MutexLockerEx x(Shared_DirtyCardQ_lock, 683 Mutex::_no_safepoint_check_flag); 684 G1DirtyCardQueue* sdcq = 685 G1BarrierSet::dirty_card_queue_set().shared_dirty_card_queue(); 686 sdcq->enqueue(card_ptr); 687 } 688 } else { 689 _num_conc_refined_cards++; // Unsynchronized update, only used for logging. 690 } 691 } 692 693 bool G1RemSet::refine_card_during_gc(jbyte* card_ptr, 694 G1ScanObjsDuringUpdateRSClosure* update_rs_cl) { 695 assert(_g1h->is_gc_active(), "Only call during GC"); 696 697 // Construct the region representing the card. 698 HeapWord* card_start = _ct->addr_for(card_ptr); 699 // And find the region containing it. 700 uint const card_region_idx = _g1h->addr_to_region(card_start); 701 702 HeapWord* scan_limit = _scan_state->scan_top(card_region_idx); 703 if (scan_limit == NULL) { 704 // This is a card into an uncommitted region. We need to bail out early as we 705 // should not access the corresponding card table entry. 706 return false; 707 } 708 709 check_card_ptr(card_ptr, _ct); 710 711 // If the card is no longer dirty, nothing to do. This covers cards that were already 712 // scanned as parts of the remembered sets. 713 if (*card_ptr != G1CardTable::dirty_card_val()) { 714 return false; 715 } 716 717 // We claim lazily (so races are possible but they're benign), which reduces the 718 // number of potential duplicate scans (multiple threads may enqueue the same card twice). 719 *card_ptr = G1CardTable::clean_card_val() | G1CardTable::claimed_card_val(); 720 721 _scan_state->add_dirty_region(card_region_idx); 722 if (scan_limit <= card_start) { 723 // If the card starts above the area in the region containing objects to scan, skip it. 724 return false; 725 } 726 727 // Don't use addr_for(card_ptr + 1) which can ask for 728 // a card beyond the heap. 729 HeapWord* card_end = card_start + G1CardTable::card_size_in_words; 730 MemRegion dirty_region(card_start, MIN2(scan_limit, card_end)); 731 assert(!dirty_region.is_empty(), "sanity"); 732 733 HeapRegion* const card_region = _g1h->region_at(card_region_idx); 734 assert(!card_region->is_young(), "Should not scan card in young region %u", card_region_idx); 735 bool card_processed = card_region->oops_on_card_seq_iterate_careful<true>(dirty_region, update_rs_cl); 736 assert(card_processed, "must be"); 737 return true; 738 } 739 740 void G1RemSet::print_periodic_summary_info(const char* header, uint period_count) { 741 if ((G1SummarizeRSetStatsPeriod > 0) && log_is_enabled(Trace, gc, remset) && 742 (period_count % G1SummarizeRSetStatsPeriod == 0)) { 743 744 G1RemSetSummary current(this); 745 _prev_period_summary.subtract_from(¤t); 746 747 Log(gc, remset) log; 748 log.trace("%s", header); 749 ResourceMark rm; 750 LogStream ls(log.trace()); 751 _prev_period_summary.print_on(&ls); 752 753 _prev_period_summary.set(¤t); 754 } 755 } 756 757 void G1RemSet::print_summary_info() { 758 Log(gc, remset, exit) log; 759 if (log.is_trace()) { 760 log.trace(" Cumulative RS summary"); 761 G1RemSetSummary current(this); 762 ResourceMark rm; 763 LogStream ls(log.trace()); 764 current.print_on(&ls); 765 } 766 } 767 768 class G1RebuildRemSetTask: public AbstractGangTask { 769 // Aggregate the counting data that was constructed concurrently 770 // with marking. 771 class G1RebuildRemSetHeapRegionClosure : public HeapRegionClosure { 772 G1ConcurrentMark* _cm; 773 G1RebuildRemSetClosure _update_cl; 774 775 // Applies _update_cl to the references of the given object, limiting objArrays 776 // to the given MemRegion. Returns the amount of words actually scanned. 777 size_t scan_for_references(oop const obj, MemRegion mr) { 778 size_t const obj_size = obj->size(); 779 // All non-objArrays and objArrays completely within the mr 780 // can be scanned without passing the mr. 781 if (!obj->is_objArray() || mr.contains(MemRegion((HeapWord*)obj, obj_size))) { 782 obj->oop_iterate(&_update_cl); 783 return obj_size; 784 } 785 // This path is for objArrays crossing the given MemRegion. Only scan the 786 // area within the MemRegion. 787 obj->oop_iterate(&_update_cl, mr); 788 return mr.intersection(MemRegion((HeapWord*)obj, obj_size)).word_size(); 789 } 790 791 // A humongous object is live (with respect to the scanning) either 792 // a) it is marked on the bitmap as such 793 // b) its TARS is larger than TAMS, i.e. has been allocated during marking. 794 bool is_humongous_live(oop const humongous_obj, const G1CMBitMap* const bitmap, HeapWord* tams, HeapWord* tars) const { 795 return bitmap->is_marked(humongous_obj) || (tars > tams); 796 } 797 798 // Iterator over the live objects within the given MemRegion. 799 class LiveObjIterator : public StackObj { 800 const G1CMBitMap* const _bitmap; 801 const HeapWord* _tams; 802 const MemRegion _mr; 803 HeapWord* _current; 804 805 bool is_below_tams() const { 806 return _current < _tams; 807 } 808 809 bool is_live(HeapWord* obj) const { 810 return !is_below_tams() || _bitmap->is_marked(obj); 811 } 812 813 HeapWord* bitmap_limit() const { 814 return MIN2(const_cast<HeapWord*>(_tams), _mr.end()); 815 } 816 817 void move_if_below_tams() { 818 if (is_below_tams() && has_next()) { 819 _current = _bitmap->get_next_marked_addr(_current, bitmap_limit()); 820 } 821 } 822 public: 823 LiveObjIterator(const G1CMBitMap* const bitmap, const HeapWord* tams, const MemRegion mr, HeapWord* first_oop_into_mr) : 824 _bitmap(bitmap), 825 _tams(tams), 826 _mr(mr), 827 _current(first_oop_into_mr) { 828 829 assert(_current <= _mr.start(), 830 "First oop " PTR_FORMAT " should extend into mr [" PTR_FORMAT ", " PTR_FORMAT ")", 831 p2i(first_oop_into_mr), p2i(mr.start()), p2i(mr.end())); 832 833 // Step to the next live object within the MemRegion if needed. 834 if (is_live(_current)) { 835 // Non-objArrays were scanned by the previous part of that region. 836 if (_current < mr.start() && !oop(_current)->is_objArray()) { 837 _current += oop(_current)->size(); 838 // We might have positioned _current on a non-live object. Reposition to the next 839 // live one if needed. 840 move_if_below_tams(); 841 } 842 } else { 843 // The object at _current can only be dead if below TAMS, so we can use the bitmap. 844 // immediately. 845 _current = _bitmap->get_next_marked_addr(_current, bitmap_limit()); 846 assert(_current == _mr.end() || is_live(_current), 847 "Current " PTR_FORMAT " should be live (%s) or beyond the end of the MemRegion (" PTR_FORMAT ")", 848 p2i(_current), BOOL_TO_STR(is_live(_current)), p2i(_mr.end())); 849 } 850 } 851 852 void move_to_next() { 853 _current += next()->size(); 854 move_if_below_tams(); 855 } 856 857 oop next() const { 858 oop result = oop(_current); 859 assert(is_live(_current), 860 "Object " PTR_FORMAT " must be live TAMS " PTR_FORMAT " below %d mr " PTR_FORMAT " " PTR_FORMAT " outside %d", 861 p2i(_current), p2i(_tams), _tams > _current, p2i(_mr.start()), p2i(_mr.end()), _mr.contains(result)); 862 return result; 863 } 864 865 bool has_next() const { 866 return _current < _mr.end(); 867 } 868 }; 869 870 // Rebuild remembered sets in the part of the region specified by mr and hr. 871 // Objects between the bottom of the region and the TAMS are checked for liveness 872 // using the given bitmap. Objects between TAMS and TARS are assumed to be live. 873 // Returns the number of live words between bottom and TAMS. 874 size_t rebuild_rem_set_in_region(const G1CMBitMap* const bitmap, 875 HeapWord* const top_at_mark_start, 876 HeapWord* const top_at_rebuild_start, 877 HeapRegion* hr, 878 MemRegion mr) { 879 size_t marked_words = 0; 880 881 if (hr->is_humongous()) { 882 oop const humongous_obj = oop(hr->humongous_start_region()->bottom()); 883 if (is_humongous_live(humongous_obj, bitmap, top_at_mark_start, top_at_rebuild_start)) { 884 // We need to scan both [bottom, TAMS) and [TAMS, top_at_rebuild_start); 885 // however in case of humongous objects it is sufficient to scan the encompassing 886 // area (top_at_rebuild_start is always larger or equal to TAMS) as one of the 887 // two areas will be zero sized. I.e. TAMS is either 888 // the same as bottom or top(_at_rebuild_start). There is no way TAMS has a different 889 // value: this would mean that TAMS points somewhere into the object. 890 assert(hr->top() == top_at_mark_start || hr->top() == top_at_rebuild_start, 891 "More than one object in the humongous region?"); 892 humongous_obj->oop_iterate(&_update_cl, mr); 893 return top_at_mark_start != hr->bottom() ? mr.intersection(MemRegion((HeapWord*)humongous_obj, humongous_obj->size())).byte_size() : 0; 894 } else { 895 return 0; 896 } 897 } 898 899 for (LiveObjIterator it(bitmap, top_at_mark_start, mr, hr->block_start(mr.start())); it.has_next(); it.move_to_next()) { 900 oop obj = it.next(); 901 size_t scanned_size = scan_for_references(obj, mr); 902 if ((HeapWord*)obj < top_at_mark_start) { 903 marked_words += scanned_size; 904 } 905 } 906 907 return marked_words * HeapWordSize; 908 } 909 public: 910 G1RebuildRemSetHeapRegionClosure(G1CollectedHeap* g1h, 911 G1ConcurrentMark* cm, 912 uint worker_id) : 913 HeapRegionClosure(), 914 _cm(cm), 915 _update_cl(g1h, worker_id) { } 916 917 bool do_heap_region(HeapRegion* hr) { 918 if (_cm->has_aborted()) { 919 return true; 920 } 921 922 uint const region_idx = hr->hrm_index(); 923 DEBUG_ONLY(HeapWord* const top_at_rebuild_start_check = _cm->top_at_rebuild_start(region_idx);) 924 assert(top_at_rebuild_start_check == NULL || 925 top_at_rebuild_start_check > hr->bottom(), 926 "A TARS (" PTR_FORMAT ") == bottom() (" PTR_FORMAT ") indicates the old region %u is empty (%s)", 927 p2i(top_at_rebuild_start_check), p2i(hr->bottom()), region_idx, hr->get_type_str()); 928 929 size_t total_marked_bytes = 0; 930 size_t const chunk_size_in_words = G1RebuildRemSetChunkSize / HeapWordSize; 931 932 HeapWord* const top_at_mark_start = hr->prev_top_at_mark_start(); 933 934 HeapWord* cur = hr->bottom(); 935 while (cur < hr->end()) { 936 // After every iteration (yield point) we need to check whether the region's 937 // TARS changed due to e.g. eager reclaim. 938 HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx); 939 if (top_at_rebuild_start == NULL) { 940 return false; 941 } 942 943 MemRegion next_chunk = MemRegion(hr->bottom(), top_at_rebuild_start).intersection(MemRegion(cur, chunk_size_in_words)); 944 if (next_chunk.is_empty()) { 945 break; 946 } 947 948 const Ticks start = Ticks::now(); 949 size_t marked_bytes = rebuild_rem_set_in_region(_cm->prev_mark_bitmap(), 950 top_at_mark_start, 951 top_at_rebuild_start, 952 hr, 953 next_chunk); 954 Tickspan time = Ticks::now() - start; 955 956 log_trace(gc, remset, tracking)("Rebuilt region %u " 957 "live " SIZE_FORMAT " " 958 "time %.3fms " 959 "marked bytes " SIZE_FORMAT " " 960 "bot " PTR_FORMAT " " 961 "TAMS " PTR_FORMAT " " 962 "TARS " PTR_FORMAT, 963 region_idx, 964 _cm->liveness(region_idx) * HeapWordSize, 965 time.seconds() * 1000.0, 966 marked_bytes, 967 p2i(hr->bottom()), 968 p2i(top_at_mark_start), 969 p2i(top_at_rebuild_start)); 970 971 if (marked_bytes > 0) { 972 total_marked_bytes += marked_bytes; 973 } 974 cur += chunk_size_in_words; 975 976 _cm->do_yield_check(); 977 if (_cm->has_aborted()) { 978 return true; 979 } 980 } 981 // In the final iteration of the loop the region might have been eagerly reclaimed. 982 // Simply filter out those regions. We can not just use region type because there 983 // might have already been new allocations into these regions. 984 DEBUG_ONLY(HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx);) 985 assert(top_at_rebuild_start == NULL || 986 total_marked_bytes == hr->marked_bytes(), 987 "Marked bytes " SIZE_FORMAT " for region %u (%s) in [bottom, TAMS) do not match calculated marked bytes " SIZE_FORMAT " " 988 "(" PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT ")", 989 total_marked_bytes, hr->hrm_index(), hr->get_type_str(), hr->marked_bytes(), 990 p2i(hr->bottom()), p2i(top_at_mark_start), p2i(top_at_rebuild_start)); 991 // Abort state may have changed after the yield check. 992 return _cm->has_aborted(); 993 } 994 }; 995 996 HeapRegionClaimer _hr_claimer; 997 G1ConcurrentMark* _cm; 998 999 uint _worker_id_offset; 1000 public: 1001 G1RebuildRemSetTask(G1ConcurrentMark* cm, 1002 uint n_workers, 1003 uint worker_id_offset) : 1004 AbstractGangTask("G1 Rebuild Remembered Set"), 1005 _hr_claimer(n_workers), 1006 _cm(cm), 1007 _worker_id_offset(worker_id_offset) { 1008 } 1009 1010 void work(uint worker_id) { 1011 SuspendibleThreadSetJoiner sts_join; 1012 1013 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1014 1015 G1RebuildRemSetHeapRegionClosure cl(g1h, _cm, _worker_id_offset + worker_id); 1016 g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id); 1017 } 1018 }; 1019 1020 void G1RemSet::rebuild_rem_set(G1ConcurrentMark* cm, 1021 WorkGang* workers, 1022 uint worker_id_offset) { 1023 uint num_workers = workers->active_workers(); 1024 1025 G1RebuildRemSetTask cl(cm, 1026 num_workers, 1027 worker_id_offset); 1028 workers->run_task(&cl, num_workers); 1029 }