1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/dirtyCardQueue.hpp" 27 #include "gc/g1/g1BarrierSet.hpp" 28 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 29 #include "gc/g1/g1CardTable.inline.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1ConcurrentRefine.hpp" 32 #include "gc/g1/g1FromCardCache.hpp" 33 #include "gc/g1/g1GCPhaseTimes.hpp" 34 #include "gc/g1/g1HotCardCache.hpp" 35 #include "gc/g1/g1OopClosures.inline.hpp" 36 #include "gc/g1/g1RootClosures.hpp" 37 #include "gc/g1/g1RemSet.hpp" 38 #include "gc/g1/heapRegion.inline.hpp" 39 #include "gc/g1/heapRegionManager.inline.hpp" 40 #include "gc/g1/heapRegionRemSet.hpp" 41 #include "gc/shared/gcTraceTime.inline.hpp" 42 #include "gc/shared/suspendibleThreadSet.hpp" 43 #include "jfr/jfrEvents.hpp" 44 #include "memory/iterator.hpp" 45 #include "memory/resourceArea.hpp" 46 #include "oops/access.inline.hpp" 47 #include "oops/oop.inline.hpp" 48 #include "runtime/os.hpp" 49 #include "utilities/align.hpp" 50 #include "utilities/globalDefinitions.hpp" 51 #include "utilities/intHisto.hpp" 52 #include "utilities/stack.inline.hpp" 53 #include "utilities/ticks.hpp" 54 55 // Collects information about the overall remembered set scan progress during an evacuation. 56 class G1RemSetScanState : public CHeapObj<mtGC> { 57 private: 58 class G1ClearCardTableTask : public AbstractGangTask { 59 G1CollectedHeap* _g1h; 60 uint* _dirty_region_list; 61 size_t _num_dirty_regions; 62 size_t _chunk_length; 63 64 size_t volatile _cur_dirty_regions; 65 public: 66 G1ClearCardTableTask(G1CollectedHeap* g1h, 67 uint* dirty_region_list, 68 size_t num_dirty_regions, 69 size_t chunk_length) : 70 AbstractGangTask("G1 Clear Card Table Task"), 71 _g1h(g1h), 72 _dirty_region_list(dirty_region_list), 73 _num_dirty_regions(num_dirty_regions), 74 _chunk_length(chunk_length), 75 _cur_dirty_regions(0) { 76 77 assert(chunk_length > 0, "must be"); 78 } 79 80 static size_t chunk_size() { return M; } 81 82 void work(uint worker_id) { 83 while (_cur_dirty_regions < _num_dirty_regions) { 84 size_t next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length; 85 size_t max = MIN2(next + _chunk_length, _num_dirty_regions); 86 87 for (size_t i = next; i < max; i++) { 88 HeapRegion* r = _g1h->region_at(_dirty_region_list[i]); 89 if (!r->is_survivor()) { 90 r->clear_cardtable(); 91 } 92 } 93 } 94 } 95 }; 96 97 size_t _max_regions; 98 99 // Scan progress for the remembered set of a single region. Transitions from 100 // Unclaimed -> Claimed -> Complete. 101 // At each of the transitions the thread that does the transition needs to perform 102 // some special action once. This is the reason for the extra "Claimed" state. 103 typedef jint G1RemsetIterState; 104 105 static const G1RemsetIterState Unclaimed = 0; // The remembered set has not been scanned yet. 106 static const G1RemsetIterState Claimed = 1; // The remembered set is currently being scanned. 107 static const G1RemsetIterState Complete = 2; // The remembered set has been completely scanned. 108 109 G1RemsetIterState volatile* _iter_states; 110 // The current location where the next thread should continue scanning in a region's 111 // remembered set. 112 size_t volatile* _iter_claims; 113 114 // Temporary buffer holding the regions we used to store remembered set scan duplicate 115 // information. These are also called "dirty". Valid entries are from [0.._cur_dirty_region) 116 uint* _dirty_region_buffer; 117 118 typedef jbyte IsDirtyRegionState; 119 static const IsDirtyRegionState Clean = 0; 120 static const IsDirtyRegionState Dirty = 1; 121 // Holds a flag for every region whether it is in the _dirty_region_buffer already 122 // to avoid duplicates. Uses jbyte since there are no atomic instructions for bools. 123 IsDirtyRegionState* _in_dirty_region_buffer; 124 size_t _cur_dirty_region; 125 126 // Creates a snapshot of the current _top values at the start of collection to 127 // filter out card marks that we do not want to scan. 128 class G1ResetScanTopClosure : public HeapRegionClosure { 129 private: 130 HeapWord** _scan_top; 131 public: 132 G1ResetScanTopClosure(HeapWord** scan_top) : _scan_top(scan_top) { } 133 134 virtual bool do_heap_region(HeapRegion* r) { 135 uint hrm_index = r->hrm_index(); 136 if (!r->in_collection_set() && r->is_old_or_humongous_or_archive()) { 137 _scan_top[hrm_index] = r->top(); 138 } else { 139 _scan_top[hrm_index] = r->bottom(); 140 } 141 return false; 142 } 143 }; 144 145 // For each region, contains the maximum top() value to be used during this garbage 146 // collection. Subsumes common checks like filtering out everything but old and 147 // humongous regions outside the collection set. 148 // This is valid because we are not interested in scanning stray remembered set 149 // entries from free or archive regions. 150 HeapWord** _scan_top; 151 public: 152 G1RemSetScanState() : 153 _max_regions(0), 154 _iter_states(NULL), 155 _iter_claims(NULL), 156 _dirty_region_buffer(NULL), 157 _in_dirty_region_buffer(NULL), 158 _cur_dirty_region(0), 159 _scan_top(NULL) { 160 } 161 162 ~G1RemSetScanState() { 163 if (_iter_states != NULL) { 164 FREE_C_HEAP_ARRAY(G1RemsetIterState, _iter_states); 165 } 166 if (_iter_claims != NULL) { 167 FREE_C_HEAP_ARRAY(size_t, _iter_claims); 168 } 169 if (_dirty_region_buffer != NULL) { 170 FREE_C_HEAP_ARRAY(uint, _dirty_region_buffer); 171 } 172 if (_in_dirty_region_buffer != NULL) { 173 FREE_C_HEAP_ARRAY(IsDirtyRegionState, _in_dirty_region_buffer); 174 } 175 if (_scan_top != NULL) { 176 FREE_C_HEAP_ARRAY(HeapWord*, _scan_top); 177 } 178 } 179 180 void initialize(uint max_regions) { 181 assert(_iter_states == NULL, "Must not be initialized twice"); 182 assert(_iter_claims == NULL, "Must not be initialized twice"); 183 _max_regions = max_regions; 184 _iter_states = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_regions, mtGC); 185 _iter_claims = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 186 _dirty_region_buffer = NEW_C_HEAP_ARRAY(uint, max_regions, mtGC); 187 _in_dirty_region_buffer = NEW_C_HEAP_ARRAY(IsDirtyRegionState, max_regions, mtGC); 188 _scan_top = NEW_C_HEAP_ARRAY(HeapWord*, max_regions, mtGC); 189 } 190 191 void reset() { 192 for (uint i = 0; i < _max_regions; i++) { 193 _iter_states[i] = Unclaimed; 194 } 195 196 G1ResetScanTopClosure cl(_scan_top); 197 G1CollectedHeap::heap()->heap_region_iterate(&cl); 198 199 memset((void*)_iter_claims, 0, _max_regions * sizeof(size_t)); 200 memset(_in_dirty_region_buffer, Clean, _max_regions * sizeof(IsDirtyRegionState)); 201 _cur_dirty_region = 0; 202 } 203 204 // Attempt to claim the remembered set of the region for iteration. Returns true 205 // if this call caused the transition from Unclaimed to Claimed. 206 inline bool claim_iter(uint region) { 207 assert(region < _max_regions, "Tried to access invalid region %u", region); 208 if (_iter_states[region] != Unclaimed) { 209 return false; 210 } 211 G1RemsetIterState res = Atomic::cmpxchg(Claimed, &_iter_states[region], Unclaimed); 212 return (res == Unclaimed); 213 } 214 215 // Try to atomically sets the iteration state to "complete". Returns true for the 216 // thread that caused the transition. 217 inline bool set_iter_complete(uint region) { 218 if (iter_is_complete(region)) { 219 return false; 220 } 221 G1RemsetIterState res = Atomic::cmpxchg(Complete, &_iter_states[region], Claimed); 222 return (res == Claimed); 223 } 224 225 // Returns true if the region's iteration is complete. 226 inline bool iter_is_complete(uint region) const { 227 assert(region < _max_regions, "Tried to access invalid region %u", region); 228 return _iter_states[region] == Complete; 229 } 230 231 // The current position within the remembered set of the given region. 232 inline size_t iter_claimed(uint region) const { 233 assert(region < _max_regions, "Tried to access invalid region %u", region); 234 return _iter_claims[region]; 235 } 236 237 // Claim the next block of cards within the remembered set of the region with 238 // step size. 239 inline size_t iter_claimed_next(uint region, size_t step) { 240 return Atomic::add(step, &_iter_claims[region]) - step; 241 } 242 243 void add_dirty_region(uint region) { 244 if (_in_dirty_region_buffer[region] == Dirty) { 245 return; 246 } 247 248 bool marked_as_dirty = Atomic::cmpxchg(Dirty, &_in_dirty_region_buffer[region], Clean) == Clean; 249 if (marked_as_dirty) { 250 size_t allocated = Atomic::add(1u, &_cur_dirty_region) - 1; 251 _dirty_region_buffer[allocated] = region; 252 } 253 } 254 255 HeapWord* scan_top(uint region_idx) const { 256 return _scan_top[region_idx]; 257 } 258 259 // Clear the card table of "dirty" regions. 260 void clear_card_table(WorkGang* workers) { 261 if (_cur_dirty_region == 0) { 262 return; 263 } 264 265 size_t const num_chunks = align_up(_cur_dirty_region * HeapRegion::CardsPerRegion, G1ClearCardTableTask::chunk_size()) / G1ClearCardTableTask::chunk_size(); 266 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 267 size_t const chunk_length = G1ClearCardTableTask::chunk_size() / HeapRegion::CardsPerRegion; 268 269 // Iterate over the dirty cards region list. 270 G1ClearCardTableTask cl(G1CollectedHeap::heap(), _dirty_region_buffer, _cur_dirty_region, chunk_length); 271 272 log_debug(gc, ergo)("Running %s using %u workers for " SIZE_FORMAT " " 273 "units of work for " SIZE_FORMAT " regions.", 274 cl.name(), num_workers, num_chunks, _cur_dirty_region); 275 workers->run_task(&cl, num_workers); 276 277 #ifndef PRODUCT 278 G1CollectedHeap::heap()->verifier()->verify_card_table_cleanup(); 279 #endif 280 } 281 }; 282 283 G1RemSet::G1RemSet(G1CollectedHeap* g1h, 284 G1CardTable* ct, 285 G1HotCardCache* hot_card_cache) : 286 _scan_state(new G1RemSetScanState()), 287 _prev_period_summary(), 288 _g1h(g1h), 289 _num_conc_refined_cards(0), 290 _ct(ct), 291 _g1p(_g1h->g1_policy()), 292 _hot_card_cache(hot_card_cache) { 293 } 294 295 G1RemSet::~G1RemSet() { 296 if (_scan_state != NULL) { 297 delete _scan_state; 298 } 299 } 300 301 uint G1RemSet::num_par_rem_sets() { 302 return DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::max_num_threads() + MAX2(ConcGCThreads, ParallelGCThreads); 303 } 304 305 void G1RemSet::initialize(size_t capacity, uint max_regions) { 306 G1FromCardCache::initialize(num_par_rem_sets(), max_regions); 307 _scan_state->initialize(max_regions); 308 } 309 310 G1ScanRSForRegionClosure::G1ScanRSForRegionClosure(G1RemSetScanState* scan_state, 311 G1ScanObjsDuringScanRSClosure* scan_obj_on_card, 312 G1ParScanThreadState* pss, 313 uint worker_i) : 314 _g1h(G1CollectedHeap::heap()), 315 _ct(_g1h->card_table()), 316 _pss(pss), 317 _scan_objs_on_card_cl(scan_obj_on_card), 318 _scan_state(scan_state), 319 _worker_i(worker_i), 320 _cards_scanned(0), 321 _cards_claimed(0), 322 _cards_skipped(0), 323 _rem_set_root_scan_time(), 324 _rem_set_trim_partially_time(), 325 _strong_code_root_scan_time(), 326 _strong_code_trim_partially_time() { 327 } 328 329 void G1ScanRSForRegionClosure::claim_card(size_t card_index, const uint region_idx_for_card){ 330 _ct->set_card_claimed(card_index); 331 _scan_state->add_dirty_region(region_idx_for_card); 332 } 333 334 void G1ScanRSForRegionClosure::scan_card(MemRegion mr, uint region_idx_for_card) { 335 HeapRegion* const card_region = _g1h->region_at(region_idx_for_card); 336 _scan_objs_on_card_cl->set_region(card_region); 337 card_region->oops_on_card_seq_iterate_careful<true>(mr, _scan_objs_on_card_cl); 338 _scan_objs_on_card_cl->trim_queue_partially(); 339 _cards_scanned++; 340 } 341 342 void G1ScanRSForRegionClosure::scan_rem_set_roots(HeapRegion* r) { 343 EventGCPhaseParallel event; 344 uint const region_idx = r->hrm_index(); 345 346 if (_scan_state->claim_iter(region_idx)) { 347 // If we ever free the collection set concurrently, we should also 348 // clear the card table concurrently therefore we won't need to 349 // add regions of the collection set to the dirty cards region. 350 _scan_state->add_dirty_region(region_idx); 351 } 352 353 // We claim cards in blocks so as to reduce the contention. 354 size_t const block_size = G1RSetScanBlockSize; 355 356 HeapRegionRemSetIterator iter(r->rem_set()); 357 size_t card_index; 358 359 size_t claimed_card_block = _scan_state->iter_claimed_next(region_idx, block_size); 360 for (size_t current_card = 0; iter.has_next(card_index); current_card++) { 361 if (current_card >= claimed_card_block + block_size) { 362 claimed_card_block = _scan_state->iter_claimed_next(region_idx, block_size); 363 } 364 if (current_card < claimed_card_block) { 365 _cards_skipped++; 366 continue; 367 } 368 _cards_claimed++; 369 370 // If the card is dirty, then G1 will scan it during Update RS. 371 if (_ct->is_card_claimed(card_index) || _ct->is_card_dirty(card_index)) { 372 continue; 373 } 374 375 HeapWord* const card_start = _g1h->bot()->address_for_index(card_index); 376 uint const region_idx_for_card = _g1h->addr_to_region(card_start); 377 378 assert(_g1h->region_at(region_idx_for_card)->is_in_reserved(card_start), 379 "Card start " PTR_FORMAT " to scan outside of region %u", p2i(card_start), _g1h->region_at(region_idx_for_card)->hrm_index()); 380 HeapWord* const top = _scan_state->scan_top(region_idx_for_card); 381 if (card_start >= top) { 382 continue; 383 } 384 385 // We claim lazily (so races are possible but they're benign), which reduces the 386 // number of duplicate scans (the rsets of the regions in the cset can intersect). 387 // Claim the card after checking bounds above: the remembered set may contain 388 // random cards into current survivor, and we would then have an incorrectly 389 // claimed card in survivor space. Card table clear does not reset the card table 390 // of survivor space regions. 391 claim_card(card_index, region_idx_for_card); 392 393 MemRegion const mr(card_start, MIN2(card_start + BOTConstants::N_words, top)); 394 395 scan_card(mr, region_idx_for_card); 396 } 397 event.commit(GCId::current(), _worker_i, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::ScanRS)); 398 } 399 400 void G1ScanRSForRegionClosure::scan_strong_code_roots(HeapRegion* r) { 401 EventGCPhaseParallel event; 402 r->strong_code_roots_do(_pss->closures()->weak_codeblobs()); 403 event.commit(GCId::current(), _worker_i, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::CodeRoots)); 404 } 405 406 bool G1ScanRSForRegionClosure::do_heap_region(HeapRegion* r) { 407 assert(r->in_collection_set(), 408 "Should only be called on elements of the collection set but region %u is not.", 409 r->hrm_index()); 410 uint const region_idx = r->hrm_index(); 411 412 // Do an early out if we know we are complete. 413 if (_scan_state->iter_is_complete(region_idx)) { 414 return false; 415 } 416 417 { 418 G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_root_scan_time, _rem_set_trim_partially_time); 419 scan_rem_set_roots(r); 420 } 421 422 if (_scan_state->set_iter_complete(region_idx)) { 423 G1EvacPhaseWithTrimTimeTracker timer(_pss, _strong_code_root_scan_time, _strong_code_trim_partially_time); 424 // Scan the strong code root list attached to the current region 425 scan_strong_code_roots(r); 426 } 427 return false; 428 } 429 430 void G1RemSet::scan_rem_set(G1ParScanThreadState* pss, uint worker_i) { 431 G1ScanObjsDuringScanRSClosure scan_cl(_g1h, pss); 432 G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, pss, worker_i); 433 _g1h->collection_set_iterate_from(&cl, worker_i); 434 435 G1GCPhaseTimes* p = _g1p->phase_times(); 436 437 p->record_time_secs(G1GCPhaseTimes::ScanRS, worker_i, cl.rem_set_root_scan_time().seconds()); 438 p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, cl.rem_set_trim_partially_time().seconds()); 439 440 p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_scanned(), G1GCPhaseTimes::ScanRSScannedCards); 441 p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_claimed(), G1GCPhaseTimes::ScanRSClaimedCards); 442 p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_skipped(), G1GCPhaseTimes::ScanRSSkippedCards); 443 444 p->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, cl.strong_code_root_scan_time().seconds()); 445 p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, cl.strong_code_root_trim_partially_time().seconds()); 446 } 447 448 // Closure used for updating rem sets. Only called during an evacuation pause. 449 class G1RefineCardClosure: public CardTableEntryClosure { 450 G1RemSet* _g1rs; 451 G1ScanObjsDuringUpdateRSClosure* _update_rs_cl; 452 453 size_t _cards_scanned; 454 size_t _cards_skipped; 455 public: 456 G1RefineCardClosure(G1CollectedHeap* g1h, G1ScanObjsDuringUpdateRSClosure* update_rs_cl) : 457 _g1rs(g1h->g1_rem_set()), _update_rs_cl(update_rs_cl), _cards_scanned(0), _cards_skipped(0) 458 {} 459 460 bool do_card_ptr(jbyte* card_ptr, uint worker_i) { 461 // The only time we care about recording cards that 462 // contain references that point into the collection set 463 // is during RSet updating within an evacuation pause. 464 // In this case worker_i should be the id of a GC worker thread. 465 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); 466 467 bool card_scanned = _g1rs->refine_card_during_gc(card_ptr, _update_rs_cl); 468 469 if (card_scanned) { 470 _update_rs_cl->trim_queue_partially(); 471 _cards_scanned++; 472 } else { 473 _cards_skipped++; 474 } 475 return true; 476 } 477 478 size_t cards_scanned() const { return _cards_scanned; } 479 size_t cards_skipped() const { return _cards_skipped; } 480 }; 481 482 void G1RemSet::update_rem_set(G1ParScanThreadState* pss, uint worker_i) { 483 G1GCPhaseTimes* p = _g1p->phase_times(); 484 485 // Apply closure to log entries in the HCC. 486 if (G1HotCardCache::default_use_cache()) { 487 G1EvacPhaseTimesTracker x(p, pss, G1GCPhaseTimes::ScanHCC, worker_i); 488 489 G1ScanObjsDuringUpdateRSClosure scan_hcc_cl(_g1h, pss, worker_i); 490 G1RefineCardClosure refine_card_cl(_g1h, &scan_hcc_cl); 491 _g1h->iterate_hcc_closure(&refine_card_cl, worker_i); 492 } 493 494 // Now apply the closure to all remaining log entries. 495 { 496 G1EvacPhaseTimesTracker x(p, pss, G1GCPhaseTimes::UpdateRS, worker_i); 497 498 G1ScanObjsDuringUpdateRSClosure update_rs_cl(_g1h, pss, worker_i); 499 G1RefineCardClosure refine_card_cl(_g1h, &update_rs_cl); 500 _g1h->iterate_dirty_card_closure(&refine_card_cl, worker_i); 501 502 p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_scanned(), G1GCPhaseTimes::UpdateRSScannedCards); 503 p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_skipped(), G1GCPhaseTimes::UpdateRSSkippedCards); 504 } 505 } 506 507 void G1RemSet::cleanupHRRS() { 508 HeapRegionRemSet::cleanup(); 509 } 510 511 void G1RemSet::oops_into_collection_set_do(G1ParScanThreadState* pss, uint worker_i) { 512 update_rem_set(pss, worker_i); 513 scan_rem_set(pss, worker_i);; 514 } 515 516 void G1RemSet::prepare_for_oops_into_collection_set_do() { 517 DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); 518 dcqs.concatenate_logs(); 519 520 _scan_state->reset(); 521 } 522 523 void G1RemSet::cleanup_after_oops_into_collection_set_do() { 524 G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times(); 525 526 // Set all cards back to clean. 527 double start = os::elapsedTime(); 528 _scan_state->clear_card_table(_g1h->workers()); 529 phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0); 530 } 531 532 inline void check_card_ptr(jbyte* card_ptr, G1CardTable* ct) { 533 #ifdef ASSERT 534 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 535 assert(g1h->is_in_exact(ct->addr_for(card_ptr)), 536 "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap", 537 p2i(card_ptr), 538 ct->index_for(ct->addr_for(card_ptr)), 539 p2i(ct->addr_for(card_ptr)), 540 g1h->addr_to_region(ct->addr_for(card_ptr))); 541 #endif 542 } 543 544 void G1RemSet::refine_card_concurrently(jbyte* card_ptr, 545 uint worker_i) { 546 assert(!_g1h->is_gc_active(), "Only call concurrently"); 547 548 check_card_ptr(card_ptr, _ct); 549 550 // If the card is no longer dirty, nothing to do. 551 if (*card_ptr != G1CardTable::dirty_card_val()) { 552 return; 553 } 554 555 // Construct the region representing the card. 556 HeapWord* start = _ct->addr_for(card_ptr); 557 // And find the region containing it. 558 HeapRegion* r = _g1h->heap_region_containing(start); 559 560 // This check is needed for some uncommon cases where we should 561 // ignore the card. 562 // 563 // The region could be young. Cards for young regions are 564 // distinctly marked (set to g1_young_gen), so the post-barrier will 565 // filter them out. However, that marking is performed 566 // concurrently. A write to a young object could occur before the 567 // card has been marked young, slipping past the filter. 568 // 569 // The card could be stale, because the region has been freed since 570 // the card was recorded. In this case the region type could be 571 // anything. If (still) free or (reallocated) young, just ignore 572 // it. If (reallocated) old or humongous, the later card trimming 573 // and additional checks in iteration may detect staleness. At 574 // worst, we end up processing a stale card unnecessarily. 575 // 576 // In the normal (non-stale) case, the synchronization between the 577 // enqueueing of the card and processing it here will have ensured 578 // we see the up-to-date region type here. 579 if (!r->is_old_or_humongous_or_archive()) { 580 return; 581 } 582 583 // The result from the hot card cache insert call is either: 584 // * pointer to the current card 585 // (implying that the current card is not 'hot'), 586 // * null 587 // (meaning we had inserted the card ptr into the "hot" card cache, 588 // which had some headroom), 589 // * a pointer to a "hot" card that was evicted from the "hot" cache. 590 // 591 592 if (_hot_card_cache->use_cache()) { 593 assert(!SafepointSynchronize::is_at_safepoint(), "sanity"); 594 595 const jbyte* orig_card_ptr = card_ptr; 596 card_ptr = _hot_card_cache->insert(card_ptr); 597 if (card_ptr == NULL) { 598 // There was no eviction. Nothing to do. 599 return; 600 } else if (card_ptr != orig_card_ptr) { 601 // Original card was inserted and an old card was evicted. 602 start = _ct->addr_for(card_ptr); 603 r = _g1h->heap_region_containing(start); 604 605 // Check whether the region formerly in the cache should be 606 // ignored, as discussed earlier for the original card. The 607 // region could have been freed while in the cache. 608 if (!r->is_old_or_humongous_or_archive()) { 609 return; 610 } 611 } // Else we still have the original card. 612 } 613 614 // Trim the region designated by the card to what's been allocated 615 // in the region. The card could be stale, or the card could cover 616 // (part of) an object at the end of the allocated space and extend 617 // beyond the end of allocation. 618 619 // Non-humongous objects are only allocated in the old-gen during 620 // GC, so if region is old then top is stable. Humongous object 621 // allocation sets top last; if top has not yet been set, this is 622 // a stale card and we'll end up with an empty intersection. If 623 // this is not a stale card, the synchronization between the 624 // enqueuing of the card and processing it here will have ensured 625 // we see the up-to-date top here. 626 HeapWord* scan_limit = r->top(); 627 628 if (scan_limit <= start) { 629 // If the trimmed region is empty, the card must be stale. 630 return; 631 } 632 633 // Okay to clean and process the card now. There are still some 634 // stale card cases that may be detected by iteration and dealt with 635 // as iteration failure. 636 *const_cast<volatile jbyte*>(card_ptr) = G1CardTable::clean_card_val(); 637 638 // This fence serves two purposes. First, the card must be cleaned 639 // before processing the contents. Second, we can't proceed with 640 // processing until after the read of top, for synchronization with 641 // possibly concurrent humongous object allocation. It's okay that 642 // reading top and reading type were racy wrto each other. We need 643 // both set, in any order, to proceed. 644 OrderAccess::fence(); 645 646 // Don't use addr_for(card_ptr + 1) which can ask for 647 // a card beyond the heap. 648 HeapWord* end = start + G1CardTable::card_size_in_words; 649 MemRegion dirty_region(start, MIN2(scan_limit, end)); 650 assert(!dirty_region.is_empty(), "sanity"); 651 652 G1ConcurrentRefineOopClosure conc_refine_cl(_g1h, worker_i); 653 654 bool card_processed = 655 r->oops_on_card_seq_iterate_careful<false>(dirty_region, &conc_refine_cl); 656 657 // If unable to process the card then we encountered an unparsable 658 // part of the heap (e.g. a partially allocated object) while 659 // processing a stale card. Despite the card being stale, redirty 660 // and re-enqueue, because we've already cleaned the card. Without 661 // this we could incorrectly discard a non-stale card. 662 if (!card_processed) { 663 // The card might have gotten re-dirtied and re-enqueued while we 664 // worked. (In fact, it's pretty likely.) 665 if (*card_ptr != G1CardTable::dirty_card_val()) { 666 *card_ptr = G1CardTable::dirty_card_val(); 667 MutexLockerEx x(Shared_DirtyCardQ_lock, 668 Mutex::_no_safepoint_check_flag); 669 DirtyCardQueue* sdcq = 670 G1BarrierSet::dirty_card_queue_set().shared_dirty_card_queue(); 671 sdcq->enqueue(card_ptr); 672 } 673 } else { 674 _num_conc_refined_cards++; // Unsynchronized update, only used for logging. 675 } 676 } 677 678 bool G1RemSet::refine_card_during_gc(jbyte* card_ptr, 679 G1ScanObjsDuringUpdateRSClosure* update_rs_cl) { 680 assert(_g1h->is_gc_active(), "Only call during GC"); 681 682 check_card_ptr(card_ptr, _ct); 683 684 // If the card is no longer dirty, nothing to do. This covers cards that were already 685 // scanned as parts of the remembered sets. 686 if (*card_ptr != G1CardTable::dirty_card_val()) { 687 return false; 688 } 689 690 // We claim lazily (so races are possible but they're benign), which reduces the 691 // number of potential duplicate scans (multiple threads may enqueue the same card twice). 692 *card_ptr = G1CardTable::clean_card_val() | G1CardTable::claimed_card_val(); 693 694 // Construct the region representing the card. 695 HeapWord* card_start = _ct->addr_for(card_ptr); 696 // And find the region containing it. 697 uint const card_region_idx = _g1h->addr_to_region(card_start); 698 699 _scan_state->add_dirty_region(card_region_idx); 700 HeapWord* scan_limit = _scan_state->scan_top(card_region_idx); 701 if (scan_limit <= card_start) { 702 // If the card starts above the area in the region containing objects to scan, skip it. 703 return false; 704 } 705 706 // Don't use addr_for(card_ptr + 1) which can ask for 707 // a card beyond the heap. 708 HeapWord* card_end = card_start + G1CardTable::card_size_in_words; 709 MemRegion dirty_region(card_start, MIN2(scan_limit, card_end)); 710 assert(!dirty_region.is_empty(), "sanity"); 711 712 HeapRegion* const card_region = _g1h->region_at(card_region_idx); 713 update_rs_cl->set_region(card_region); 714 bool card_processed = card_region->oops_on_card_seq_iterate_careful<true>(dirty_region, update_rs_cl); 715 assert(card_processed, "must be"); 716 return true; 717 } 718 719 void G1RemSet::print_periodic_summary_info(const char* header, uint period_count) { 720 if ((G1SummarizeRSetStatsPeriod > 0) && log_is_enabled(Trace, gc, remset) && 721 (period_count % G1SummarizeRSetStatsPeriod == 0)) { 722 723 G1RemSetSummary current(this); 724 _prev_period_summary.subtract_from(¤t); 725 726 Log(gc, remset) log; 727 log.trace("%s", header); 728 ResourceMark rm; 729 LogStream ls(log.trace()); 730 _prev_period_summary.print_on(&ls); 731 732 _prev_period_summary.set(¤t); 733 } 734 } 735 736 void G1RemSet::print_summary_info() { 737 Log(gc, remset, exit) log; 738 if (log.is_trace()) { 739 log.trace(" Cumulative RS summary"); 740 G1RemSetSummary current(this); 741 ResourceMark rm; 742 LogStream ls(log.trace()); 743 current.print_on(&ls); 744 } 745 } 746 747 class G1RebuildRemSetTask: public AbstractGangTask { 748 // Aggregate the counting data that was constructed concurrently 749 // with marking. 750 class G1RebuildRemSetHeapRegionClosure : public HeapRegionClosure { 751 G1ConcurrentMark* _cm; 752 G1RebuildRemSetClosure _update_cl; 753 754 // Applies _update_cl to the references of the given object, limiting objArrays 755 // to the given MemRegion. Returns the amount of words actually scanned. 756 size_t scan_for_references(oop const obj, MemRegion mr) { 757 size_t const obj_size = obj->size(); 758 // All non-objArrays and objArrays completely within the mr 759 // can be scanned without passing the mr. 760 if (!obj->is_objArray() || mr.contains(MemRegion((HeapWord*)obj, obj_size))) { 761 obj->oop_iterate(&_update_cl); 762 return obj_size; 763 } 764 // This path is for objArrays crossing the given MemRegion. Only scan the 765 // area within the MemRegion. 766 obj->oop_iterate(&_update_cl, mr); 767 return mr.intersection(MemRegion((HeapWord*)obj, obj_size)).word_size(); 768 } 769 770 // A humongous object is live (with respect to the scanning) either 771 // a) it is marked on the bitmap as such 772 // b) its TARS is larger than TAMS, i.e. has been allocated during marking. 773 bool is_humongous_live(oop const humongous_obj, const G1CMBitMap* const bitmap, HeapWord* tams, HeapWord* tars) const { 774 return bitmap->is_marked(humongous_obj) || (tars > tams); 775 } 776 777 // Iterator over the live objects within the given MemRegion. 778 class LiveObjIterator : public StackObj { 779 const G1CMBitMap* const _bitmap; 780 const HeapWord* _tams; 781 const MemRegion _mr; 782 HeapWord* _current; 783 784 bool is_below_tams() const { 785 return _current < _tams; 786 } 787 788 bool is_live(HeapWord* obj) const { 789 return !is_below_tams() || _bitmap->is_marked(obj); 790 } 791 792 HeapWord* bitmap_limit() const { 793 return MIN2(const_cast<HeapWord*>(_tams), _mr.end()); 794 } 795 796 void move_if_below_tams() { 797 if (is_below_tams() && has_next()) { 798 _current = _bitmap->get_next_marked_addr(_current, bitmap_limit()); 799 } 800 } 801 public: 802 LiveObjIterator(const G1CMBitMap* const bitmap, const HeapWord* tams, const MemRegion mr, HeapWord* first_oop_into_mr) : 803 _bitmap(bitmap), 804 _tams(tams), 805 _mr(mr), 806 _current(first_oop_into_mr) { 807 808 assert(_current <= _mr.start(), 809 "First oop " PTR_FORMAT " should extend into mr [" PTR_FORMAT ", " PTR_FORMAT ")", 810 p2i(first_oop_into_mr), p2i(mr.start()), p2i(mr.end())); 811 812 // Step to the next live object within the MemRegion if needed. 813 if (is_live(_current)) { 814 // Non-objArrays were scanned by the previous part of that region. 815 if (_current < mr.start() && !oop(_current)->is_objArray()) { 816 _current += oop(_current)->size(); 817 // We might have positioned _current on a non-live object. Reposition to the next 818 // live one if needed. 819 move_if_below_tams(); 820 } 821 } else { 822 // The object at _current can only be dead if below TAMS, so we can use the bitmap. 823 // immediately. 824 _current = _bitmap->get_next_marked_addr(_current, bitmap_limit()); 825 assert(_current == _mr.end() || is_live(_current), 826 "Current " PTR_FORMAT " should be live (%s) or beyond the end of the MemRegion (" PTR_FORMAT ")", 827 p2i(_current), BOOL_TO_STR(is_live(_current)), p2i(_mr.end())); 828 } 829 } 830 831 void move_to_next() { 832 _current += next()->size(); 833 move_if_below_tams(); 834 } 835 836 oop next() const { 837 oop result = oop(_current); 838 assert(is_live(_current), 839 "Object " PTR_FORMAT " must be live TAMS " PTR_FORMAT " below %d mr " PTR_FORMAT " " PTR_FORMAT " outside %d", 840 p2i(_current), p2i(_tams), _tams > _current, p2i(_mr.start()), p2i(_mr.end()), _mr.contains(result)); 841 return result; 842 } 843 844 bool has_next() const { 845 return _current < _mr.end(); 846 } 847 }; 848 849 // Rebuild remembered sets in the part of the region specified by mr and hr. 850 // Objects between the bottom of the region and the TAMS are checked for liveness 851 // using the given bitmap. Objects between TAMS and TARS are assumed to be live. 852 // Returns the number of live words between bottom and TAMS. 853 size_t rebuild_rem_set_in_region(const G1CMBitMap* const bitmap, 854 HeapWord* const top_at_mark_start, 855 HeapWord* const top_at_rebuild_start, 856 HeapRegion* hr, 857 MemRegion mr) { 858 size_t marked_words = 0; 859 860 if (hr->is_humongous()) { 861 oop const humongous_obj = oop(hr->humongous_start_region()->bottom()); 862 if (is_humongous_live(humongous_obj, bitmap, top_at_mark_start, top_at_rebuild_start)) { 863 // We need to scan both [bottom, TAMS) and [TAMS, top_at_rebuild_start); 864 // however in case of humongous objects it is sufficient to scan the encompassing 865 // area (top_at_rebuild_start is always larger or equal to TAMS) as one of the 866 // two areas will be zero sized. I.e. TAMS is either 867 // the same as bottom or top(_at_rebuild_start). There is no way TAMS has a different 868 // value: this would mean that TAMS points somewhere into the object. 869 assert(hr->top() == top_at_mark_start || hr->top() == top_at_rebuild_start, 870 "More than one object in the humongous region?"); 871 humongous_obj->oop_iterate(&_update_cl, mr); 872 return top_at_mark_start != hr->bottom() ? mr.intersection(MemRegion((HeapWord*)humongous_obj, humongous_obj->size())).byte_size() : 0; 873 } else { 874 return 0; 875 } 876 } 877 878 for (LiveObjIterator it(bitmap, top_at_mark_start, mr, hr->block_start(mr.start())); it.has_next(); it.move_to_next()) { 879 oop obj = it.next(); 880 size_t scanned_size = scan_for_references(obj, mr); 881 if ((HeapWord*)obj < top_at_mark_start) { 882 marked_words += scanned_size; 883 } 884 } 885 886 return marked_words * HeapWordSize; 887 } 888 public: 889 G1RebuildRemSetHeapRegionClosure(G1CollectedHeap* g1h, 890 G1ConcurrentMark* cm, 891 uint worker_id) : 892 HeapRegionClosure(), 893 _cm(cm), 894 _update_cl(g1h, worker_id) { } 895 896 bool do_heap_region(HeapRegion* hr) { 897 if (_cm->has_aborted()) { 898 return true; 899 } 900 901 uint const region_idx = hr->hrm_index(); 902 DEBUG_ONLY(HeapWord* const top_at_rebuild_start_check = _cm->top_at_rebuild_start(region_idx);) 903 assert(top_at_rebuild_start_check == NULL || 904 top_at_rebuild_start_check > hr->bottom(), 905 "A TARS (" PTR_FORMAT ") == bottom() (" PTR_FORMAT ") indicates the old region %u is empty (%s)", 906 p2i(top_at_rebuild_start_check), p2i(hr->bottom()), region_idx, hr->get_type_str()); 907 908 size_t total_marked_bytes = 0; 909 size_t const chunk_size_in_words = G1RebuildRemSetChunkSize / HeapWordSize; 910 911 HeapWord* const top_at_mark_start = hr->prev_top_at_mark_start(); 912 913 HeapWord* cur = hr->bottom(); 914 while (cur < hr->end()) { 915 // After every iteration (yield point) we need to check whether the region's 916 // TARS changed due to e.g. eager reclaim. 917 HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx); 918 if (top_at_rebuild_start == NULL) { 919 return false; 920 } 921 922 MemRegion next_chunk = MemRegion(hr->bottom(), top_at_rebuild_start).intersection(MemRegion(cur, chunk_size_in_words)); 923 if (next_chunk.is_empty()) { 924 break; 925 } 926 927 const Ticks start = Ticks::now(); 928 size_t marked_bytes = rebuild_rem_set_in_region(_cm->prev_mark_bitmap(), 929 top_at_mark_start, 930 top_at_rebuild_start, 931 hr, 932 next_chunk); 933 Tickspan time = Ticks::now() - start; 934 935 log_trace(gc, remset, tracking)("Rebuilt region %u " 936 "live " SIZE_FORMAT " " 937 "time %.3fms " 938 "marked bytes " SIZE_FORMAT " " 939 "bot " PTR_FORMAT " " 940 "TAMS " PTR_FORMAT " " 941 "TARS " PTR_FORMAT, 942 region_idx, 943 _cm->liveness(region_idx) * HeapWordSize, 944 time.seconds() * 1000.0, 945 marked_bytes, 946 p2i(hr->bottom()), 947 p2i(top_at_mark_start), 948 p2i(top_at_rebuild_start)); 949 950 if (marked_bytes > 0) { 951 total_marked_bytes += marked_bytes; 952 } 953 cur += chunk_size_in_words; 954 955 _cm->do_yield_check(); 956 if (_cm->has_aborted()) { 957 return true; 958 } 959 } 960 // In the final iteration of the loop the region might have been eagerly reclaimed. 961 // Simply filter out those regions. We can not just use region type because there 962 // might have already been new allocations into these regions. 963 DEBUG_ONLY(HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx);) 964 assert(top_at_rebuild_start == NULL || 965 total_marked_bytes == hr->marked_bytes(), 966 "Marked bytes " SIZE_FORMAT " for region %u (%s) in [bottom, TAMS) do not match calculated marked bytes " SIZE_FORMAT " " 967 "(" PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT ")", 968 total_marked_bytes, hr->hrm_index(), hr->get_type_str(), hr->marked_bytes(), 969 p2i(hr->bottom()), p2i(top_at_mark_start), p2i(top_at_rebuild_start)); 970 // Abort state may have changed after the yield check. 971 return _cm->has_aborted(); 972 } 973 }; 974 975 HeapRegionClaimer _hr_claimer; 976 G1ConcurrentMark* _cm; 977 978 uint _worker_id_offset; 979 public: 980 G1RebuildRemSetTask(G1ConcurrentMark* cm, 981 uint n_workers, 982 uint worker_id_offset) : 983 AbstractGangTask("G1 Rebuild Remembered Set"), 984 _hr_claimer(n_workers), 985 _cm(cm), 986 _worker_id_offset(worker_id_offset) { 987 } 988 989 void work(uint worker_id) { 990 SuspendibleThreadSetJoiner sts_join; 991 992 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 993 994 G1RebuildRemSetHeapRegionClosure cl(g1h, _cm, _worker_id_offset + worker_id); 995 g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id); 996 } 997 }; 998 999 void G1RemSet::rebuild_rem_set(G1ConcurrentMark* cm, 1000 WorkGang* workers, 1001 uint worker_id_offset) { 1002 uint num_workers = workers->active_workers(); 1003 1004 G1RebuildRemSetTask cl(cm, 1005 num_workers, 1006 worker_id_offset); 1007 workers->run_task(&cl, num_workers); 1008 }