1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/concurrentG1Refine.hpp" 27 #include "gc/g1/dirtyCardQueue.hpp" 28 #include "gc/g1/g1BarrierSet.hpp" 29 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 30 #include "gc/g1/g1CardTable.inline.hpp" 31 #include "gc/g1/g1CollectedHeap.inline.hpp" 32 #include "gc/g1/g1FromCardCache.hpp" 33 #include "gc/g1/g1GCPhaseTimes.hpp" 34 #include "gc/g1/g1HotCardCache.hpp" 35 #include "gc/g1/g1OopClosures.inline.hpp" 36 #include "gc/g1/g1RemSet.inline.hpp" 37 #include "gc/g1/heapRegion.inline.hpp" 38 #include "gc/g1/heapRegionManager.inline.hpp" 39 #include "gc/g1/heapRegionRemSet.hpp" 40 #include "gc/shared/gcTraceTime.inline.hpp" 41 #include "memory/iterator.hpp" 42 #include "memory/resourceArea.hpp" 43 #include "oops/oop.inline.hpp" 44 #include "utilities/globalDefinitions.hpp" 45 #include "utilities/intHisto.hpp" 46 #include "utilities/stack.inline.hpp" 47 48 // Collects information about the overall remembered set scan progress during an evacuation. 49 class G1RemSetScanState : public CHeapObj<mtGC> { 50 private: 51 class G1ClearCardTableTask : public AbstractGangTask { 52 G1CollectedHeap* _g1h; 53 uint* _dirty_region_list; 54 size_t _num_dirty_regions; 55 size_t _chunk_length; 56 57 size_t volatile _cur_dirty_regions; 58 public: 59 G1ClearCardTableTask(G1CollectedHeap* g1h, 60 uint* dirty_region_list, 61 size_t num_dirty_regions, 62 size_t chunk_length) : 63 AbstractGangTask("G1 Clear Card Table Task"), 64 _g1h(g1h), 65 _dirty_region_list(dirty_region_list), 66 _num_dirty_regions(num_dirty_regions), 67 _chunk_length(chunk_length), 68 _cur_dirty_regions(0) { 69 70 assert(chunk_length > 0, "must be"); 71 } 72 73 static size_t chunk_size() { return M; } 74 75 void work(uint worker_id) { 76 G1CardTable* ct = _g1h->g1_card_table(); 77 78 while (_cur_dirty_regions < _num_dirty_regions) { 79 size_t next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length; 80 size_t max = MIN2(next + _chunk_length, _num_dirty_regions); 81 82 for (size_t i = next; i < max; i++) { 83 HeapRegion* r = _g1h->region_at(_dirty_region_list[i]); 84 if (!r->is_survivor()) { 85 ct->clear(MemRegion(r->bottom(), r->end())); 86 } 87 } 88 } 89 } 90 }; 91 92 size_t _max_regions; 93 94 // Scan progress for the remembered set of a single region. Transitions from 95 // Unclaimed -> Claimed -> Complete. 96 // At each of the transitions the thread that does the transition needs to perform 97 // some special action once. This is the reason for the extra "Claimed" state. 98 typedef jint G1RemsetIterState; 99 100 static const G1RemsetIterState Unclaimed = 0; // The remembered set has not been scanned yet. 101 static const G1RemsetIterState Claimed = 1; // The remembered set is currently being scanned. 102 static const G1RemsetIterState Complete = 2; // The remembered set has been completely scanned. 103 104 G1RemsetIterState volatile* _iter_states; 105 // The current location where the next thread should continue scanning in a region's 106 // remembered set. 107 size_t volatile* _iter_claims; 108 109 // Temporary buffer holding the regions we used to store remembered set scan duplicate 110 // information. These are also called "dirty". Valid entries are from [0.._cur_dirty_region) 111 uint* _dirty_region_buffer; 112 113 typedef jbyte IsDirtyRegionState; 114 static const IsDirtyRegionState Clean = 0; 115 static const IsDirtyRegionState Dirty = 1; 116 // Holds a flag for every region whether it is in the _dirty_region_buffer already 117 // to avoid duplicates. Uses jbyte since there are no atomic instructions for bools. 118 IsDirtyRegionState* _in_dirty_region_buffer; 119 size_t _cur_dirty_region; 120 public: 121 G1RemSetScanState() : 122 _max_regions(0), 123 _iter_states(NULL), 124 _iter_claims(NULL), 125 _dirty_region_buffer(NULL), 126 _in_dirty_region_buffer(NULL), 127 _cur_dirty_region(0) { 128 129 } 130 131 ~G1RemSetScanState() { 132 if (_iter_states != NULL) { 133 FREE_C_HEAP_ARRAY(G1RemsetIterState, _iter_states); 134 } 135 if (_iter_claims != NULL) { 136 FREE_C_HEAP_ARRAY(size_t, _iter_claims); 137 } 138 if (_dirty_region_buffer != NULL) { 139 FREE_C_HEAP_ARRAY(uint, _dirty_region_buffer); 140 } 141 if (_in_dirty_region_buffer != NULL) { 142 FREE_C_HEAP_ARRAY(IsDirtyRegionState, _in_dirty_region_buffer); 143 } 144 } 145 146 void initialize(uint max_regions) { 147 assert(_iter_states == NULL, "Must not be initialized twice"); 148 assert(_iter_claims == NULL, "Must not be initialized twice"); 149 _max_regions = max_regions; 150 _iter_states = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_regions, mtGC); 151 _iter_claims = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 152 _dirty_region_buffer = NEW_C_HEAP_ARRAY(uint, max_regions, mtGC); 153 _in_dirty_region_buffer = NEW_C_HEAP_ARRAY(IsDirtyRegionState, max_regions, mtGC); 154 } 155 156 void reset() { 157 for (uint i = 0; i < _max_regions; i++) { 158 _iter_states[i] = Unclaimed; 159 } 160 memset((void*)_iter_claims, 0, _max_regions * sizeof(size_t)); 161 memset(_in_dirty_region_buffer, Clean, _max_regions * sizeof(IsDirtyRegionState)); 162 _cur_dirty_region = 0; 163 } 164 165 // Attempt to claim the remembered set of the region for iteration. Returns true 166 // if this call caused the transition from Unclaimed to Claimed. 167 inline bool claim_iter(uint region) { 168 assert(region < _max_regions, "Tried to access invalid region %u", region); 169 if (_iter_states[region] != Unclaimed) { 170 return false; 171 } 172 jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_states[region]), Unclaimed); 173 return (res == Unclaimed); 174 } 175 176 // Try to atomically sets the iteration state to "complete". Returns true for the 177 // thread that caused the transition. 178 inline bool set_iter_complete(uint region) { 179 if (iter_is_complete(region)) { 180 return false; 181 } 182 jint res = Atomic::cmpxchg(Complete, (jint*)(&_iter_states[region]), Claimed); 183 return (res == Claimed); 184 } 185 186 // Returns true if the region's iteration is complete. 187 inline bool iter_is_complete(uint region) const { 188 assert(region < _max_regions, "Tried to access invalid region %u", region); 189 return _iter_states[region] == Complete; 190 } 191 192 // The current position within the remembered set of the given region. 193 inline size_t iter_claimed(uint region) const { 194 assert(region < _max_regions, "Tried to access invalid region %u", region); 195 return _iter_claims[region]; 196 } 197 198 // Claim the next block of cards within the remembered set of the region with 199 // step size. 200 inline size_t iter_claimed_next(uint region, size_t step) { 201 return Atomic::add(step, &_iter_claims[region]) - step; 202 } 203 204 void add_dirty_region(uint region) { 205 if (_in_dirty_region_buffer[region] == Dirty) { 206 return; 207 } 208 209 bool marked_as_dirty = Atomic::cmpxchg(Dirty, &_in_dirty_region_buffer[region], Clean) == Clean; 210 if (marked_as_dirty) { 211 size_t allocated = Atomic::add(1, &_cur_dirty_region) - 1; 212 _dirty_region_buffer[allocated] = region; 213 } 214 } 215 216 // Clear the card table of "dirty" regions. 217 void clear_card_table(WorkGang* workers) { 218 if (_cur_dirty_region == 0) { 219 return; 220 } 221 222 size_t const num_chunks = align_size_up(_cur_dirty_region * HeapRegion::CardsPerRegion, G1ClearCardTableTask::chunk_size()) / G1ClearCardTableTask::chunk_size(); 223 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 224 size_t const chunk_length = G1ClearCardTableTask::chunk_size() / HeapRegion::CardsPerRegion; 225 226 // Iterate over the dirty cards region list. 227 G1ClearCardTableTask cl(G1CollectedHeap::heap(), _dirty_region_buffer, _cur_dirty_region, chunk_length); 228 229 log_debug(gc, ergo)("Running %s using %u workers for " SIZE_FORMAT " " 230 "units of work for " SIZE_FORMAT " regions.", 231 cl.name(), num_workers, num_chunks, _cur_dirty_region); 232 workers->run_task(&cl, num_workers); 233 234 #ifndef PRODUCT 235 // Need to synchronize with concurrent cleanup since it needs to 236 // finish its card table clearing before we can verify. 237 G1CollectedHeap::heap()->wait_while_free_regions_coming(); 238 G1CollectedHeap::heap()->verifier()->verify_card_table_cleanup(); 239 #endif 240 } 241 }; 242 243 G1RemSet::G1RemSet(G1CollectedHeap* g1, 244 G1CardTable* ct, 245 G1HotCardCache* hot_card_cache) : 246 _g1(g1), 247 _scan_state(new G1RemSetScanState()), 248 _conc_refine_cards(0), 249 _ct(ct), 250 _g1p(_g1->g1_policy()), 251 _hot_card_cache(hot_card_cache), 252 _prev_period_summary(), 253 _into_cset_dirty_card_queue_set(false) 254 { 255 if (log_is_enabled(Trace, gc, remset)) { 256 _prev_period_summary.initialize(this); 257 } 258 // Initialize the card queue set used to hold cards containing 259 // references into the collection set. 260 _into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code 261 DirtyCardQ_CBL_mon, 262 DirtyCardQ_FL_lock, 263 -1, // never trigger processing 264 -1, // no limit on length 265 Shared_DirtyCardQ_lock, 266 &G1BarrierSet::dirty_card_queue_set()); 267 } 268 269 G1RemSet::~G1RemSet() { 270 if (_scan_state != NULL) { 271 delete _scan_state; 272 } 273 } 274 275 uint G1RemSet::num_par_rem_sets() { 276 return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads); 277 } 278 279 void G1RemSet::initialize(size_t capacity, uint max_regions) { 280 G1FromCardCache::initialize(num_par_rem_sets(), max_regions); 281 _scan_state->initialize(max_regions); 282 { 283 GCTraceTime(Debug, gc, marking)("Initialize Card Live Data"); 284 _card_live_data.initialize(capacity, max_regions); 285 } 286 if (G1PretouchAuxiliaryMemory) { 287 GCTraceTime(Debug, gc, marking)("Pre-Touch Card Live Data"); 288 _card_live_data.pretouch(); 289 } 290 } 291 292 G1ScanRSClosure::G1ScanRSClosure(G1RemSetScanState* scan_state, 293 G1ParPushHeapRSClosure* push_heap_cl, 294 CodeBlobClosure* code_root_cl, 295 uint worker_i) : 296 _scan_state(scan_state), 297 _push_heap_cl(push_heap_cl), 298 _code_root_cl(code_root_cl), 299 _strong_code_root_scan_time_sec(0.0), 300 _cards(0), 301 _cards_done(0), 302 _worker_i(worker_i) { 303 _g1h = G1CollectedHeap::heap(); 304 _bot = _g1h->bot(); 305 _ct = _g1h->g1_card_table(); 306 _block_size = MAX2<size_t>(G1RSetScanBlockSize, 1); 307 } 308 309 void G1ScanRSClosure::scan_card(size_t index, HeapRegion *r) { 310 // Stack allocate the DirtyCardToOopClosure instance 311 HeapRegionDCTOC cl(_g1h, r, _push_heap_cl, G1CardTable::Precise); 312 313 // Set the "from" region in the closure. 314 _push_heap_cl->set_region(r); 315 MemRegion card_region(_bot->address_for_index(index), BOTConstants::N_words); 316 MemRegion pre_gc_allocated(r->bottom(), r->scan_top()); 317 MemRegion mr = pre_gc_allocated.intersection(card_region); 318 if (!mr.is_empty() && !_ct->is_card_claimed(index)) { 319 // We make the card as "claimed" lazily (so races are possible 320 // but they're benign), which reduces the number of duplicate 321 // scans (the rsets of the regions in the cset can intersect). 322 _ct->set_card_claimed(index); 323 _cards_done++; 324 cl.do_MemRegion(mr); 325 } 326 } 327 328 void G1ScanRSClosure::scan_strong_code_roots(HeapRegion* r) { 329 double scan_start = os::elapsedTime(); 330 r->strong_code_roots_do(_code_root_cl); 331 _strong_code_root_scan_time_sec += (os::elapsedTime() - scan_start); 332 } 333 334 bool G1ScanRSClosure::doHeapRegion(HeapRegion* r) { 335 assert(r->in_collection_set(), "should only be called on elements of CS."); 336 uint region_idx = r->hrm_index(); 337 338 if (_scan_state->iter_is_complete(region_idx)) { 339 return false; 340 } 341 if (_scan_state->claim_iter(region_idx)) { 342 // If we ever free the collection set concurrently, we should also 343 // clear the card table concurrently therefore we won't need to 344 // add regions of the collection set to the dirty cards region. 345 _scan_state->add_dirty_region(region_idx); 346 } 347 348 HeapRegionRemSetIterator iter(r->rem_set()); 349 size_t card_index; 350 351 // We claim cards in block so as to reduce the contention. The block size is determined by 352 // the G1RSetScanBlockSize parameter. 353 size_t claimed_card_block = _scan_state->iter_claimed_next(region_idx, _block_size); 354 for (size_t current_card = 0; iter.has_next(card_index); current_card++) { 355 if (current_card >= claimed_card_block + _block_size) { 356 claimed_card_block = _scan_state->iter_claimed_next(region_idx, _block_size); 357 } 358 if (current_card < claimed_card_block) { 359 continue; 360 } 361 HeapWord* card_start = _g1h->bot()->address_for_index(card_index); 362 363 HeapRegion* card_region = _g1h->heap_region_containing(card_start); 364 _cards++; 365 366 _scan_state->add_dirty_region(card_region->hrm_index()); 367 368 // If the card is dirty, then we will scan it during updateRS. 369 if (!card_region->in_collection_set() && 370 !_ct->is_card_dirty(card_index)) { 371 scan_card(card_index, card_region); 372 } 373 } 374 if (_scan_state->set_iter_complete(region_idx)) { 375 // Scan the strong code root list attached to the current region 376 scan_strong_code_roots(r); 377 } 378 return false; 379 } 380 381 size_t G1RemSet::scan_rem_set(G1ParPushHeapRSClosure* oops_in_heap_closure, 382 CodeBlobClosure* heap_region_codeblobs, 383 uint worker_i) { 384 double rs_time_start = os::elapsedTime(); 385 386 G1ScanRSClosure cl(_scan_state, oops_in_heap_closure, heap_region_codeblobs, worker_i); 387 _g1->collection_set_iterate_from(&cl, worker_i); 388 389 double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) - 390 cl.strong_code_root_scan_time_sec(); 391 392 _g1p->phase_times()->record_time_secs(G1GCPhaseTimes::ScanRS, worker_i, scan_rs_time_sec); 393 _g1p->phase_times()->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, cl.strong_code_root_scan_time_sec()); 394 395 return cl.cards_done(); 396 } 397 398 // Closure used for updating RSets and recording references that 399 // point into the collection set. Only called during an 400 // evacuation pause. 401 402 class RefineRecordRefsIntoCSCardTableEntryClosure: public CardTableEntryClosure { 403 G1RemSet* _g1rs; 404 DirtyCardQueue* _into_cset_dcq; 405 G1ParPushHeapRSClosure* _cl; 406 public: 407 RefineRecordRefsIntoCSCardTableEntryClosure(G1CollectedHeap* g1h, 408 DirtyCardQueue* into_cset_dcq, 409 G1ParPushHeapRSClosure* cl) : 410 _g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq), _cl(cl) 411 {} 412 413 bool do_card_ptr(jbyte* card_ptr, uint worker_i) { 414 // The only time we care about recording cards that 415 // contain references that point into the collection set 416 // is during RSet updating within an evacuation pause. 417 // In this case worker_i should be the id of a GC worker thread. 418 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); 419 assert(worker_i < ParallelGCThreads, "should be a GC worker"); 420 421 if (_g1rs->refine_card(card_ptr, worker_i, _cl)) { 422 // 'card_ptr' contains references that point into the collection 423 // set. We need to record the card in the DCQS 424 // (_into_cset_dirty_card_queue_set) 425 // that's used for that purpose. 426 // 427 // Enqueue the card 428 _into_cset_dcq->enqueue(card_ptr); 429 } 430 return true; 431 } 432 }; 433 434 void G1RemSet::update_rem_set(DirtyCardQueue* into_cset_dcq, 435 G1ParPushHeapRSClosure* oops_in_heap_closure, 436 uint worker_i) { 437 RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq, oops_in_heap_closure); 438 439 G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i); 440 if (G1HotCardCache::default_use_cache()) { 441 // Apply the closure to the entries of the hot card cache. 442 G1GCParPhaseTimesTracker y(_g1p->phase_times(), G1GCPhaseTimes::ScanHCC, worker_i); 443 _g1->iterate_hcc_closure(&into_cset_update_rs_cl, worker_i); 444 } 445 // Apply the closure to all remaining log entries. 446 _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, worker_i); 447 } 448 449 void G1RemSet::cleanupHRRS() { 450 HeapRegionRemSet::cleanup(); 451 } 452 453 size_t G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* cl, 454 CodeBlobClosure* heap_region_codeblobs, 455 uint worker_i) { 456 // A DirtyCardQueue that is used to hold cards containing references 457 // that point into the collection set. This DCQ is associated with a 458 // special DirtyCardQueueSet (see g1CollectedHeap.hpp). Under normal 459 // circumstances (i.e. the pause successfully completes), these cards 460 // are just discarded (there's no need to update the RSets of regions 461 // that were in the collection set - after the pause these regions 462 // are wholly 'free' of live objects. In the event of an evacuation 463 // failure the cards/buffers in this queue set are passed to the 464 // DirtyCardQueueSet that is used to manage RSet updates 465 DirtyCardQueue into_cset_dcq(&_into_cset_dirty_card_queue_set); 466 467 update_rem_set(&into_cset_dcq, cl, worker_i); 468 return scan_rem_set(cl, heap_region_codeblobs, worker_i);; 469 } 470 471 void G1RemSet::prepare_for_oops_into_collection_set_do() { 472 _g1->set_refine_cte_cl_concurrency(false); 473 DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); 474 dcqs.concatenate_logs(); 475 476 _scan_state->reset(); 477 } 478 479 void G1RemSet::cleanup_after_oops_into_collection_set_do() { 480 G1GCPhaseTimes* phase_times = _g1->g1_policy()->phase_times(); 481 // Cleanup after copy 482 _g1->set_refine_cte_cl_concurrency(true); 483 484 // Set all cards back to clean. 485 double start = os::elapsedTime(); 486 _scan_state->clear_card_table(_g1->workers()); 487 phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0); 488 489 DirtyCardQueueSet& into_cset_dcqs = _into_cset_dirty_card_queue_set; 490 491 if (_g1->evacuation_failed()) { 492 double restore_remembered_set_start = os::elapsedTime(); 493 494 // Restore remembered sets for the regions pointing into the collection set. 495 // We just need to transfer the completed buffers from the DirtyCardQueueSet 496 // used to hold cards that contain references that point into the collection set 497 // to the DCQS used to hold the deferred RS updates. 498 _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs); 499 phase_times->record_evac_fail_restore_remsets((os::elapsedTime() - restore_remembered_set_start) * 1000.0); 500 } 501 502 // Free any completed buffers in the DirtyCardQueueSet used to hold cards 503 // which contain references that point into the collection. 504 _into_cset_dirty_card_queue_set.clear(); 505 assert(_into_cset_dirty_card_queue_set.completed_buffers_num() == 0, 506 "all buffers should be freed"); 507 _into_cset_dirty_card_queue_set.clear_n_completed_buffers(); 508 } 509 510 class G1ScrubRSClosure: public HeapRegionClosure { 511 G1CollectedHeap* _g1h; 512 G1CardLiveData* _live_data; 513 public: 514 G1ScrubRSClosure(G1CardLiveData* live_data) : 515 _g1h(G1CollectedHeap::heap()), 516 _live_data(live_data) { } 517 518 bool doHeapRegion(HeapRegion* r) { 519 if (!r->is_continues_humongous()) { 520 r->rem_set()->scrub(_live_data); 521 } 522 return false; 523 } 524 }; 525 526 void G1RemSet::scrub(uint worker_num, HeapRegionClaimer *hrclaimer) { 527 G1ScrubRSClosure scrub_cl(&_card_live_data); 528 _g1->heap_region_par_iterate(&scrub_cl, worker_num, hrclaimer); 529 } 530 531 G1UpdateRSOrPushRefOopClosure::G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h, 532 G1ParPushHeapRSClosure* push_ref_cl, 533 bool record_refs_into_cset, 534 uint worker_i) : 535 _g1(g1h), 536 _from(NULL), 537 _record_refs_into_cset(record_refs_into_cset), 538 _has_refs_into_cset(false), 539 _push_ref_cl(push_ref_cl), 540 _worker_i(worker_i) { } 541 542 // Returns true if the given card contains references that point 543 // into the collection set, if we're checking for such references; 544 // false otherwise. 545 546 bool G1RemSet::refine_card(jbyte* card_ptr, 547 uint worker_i, 548 G1ParPushHeapRSClosure* oops_in_heap_closure) { 549 assert(_g1->is_in_exact(_ct->addr_for(card_ptr)), 550 "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap", 551 p2i(card_ptr), 552 _ct->index_for(_ct->addr_for(card_ptr)), 553 p2i(_ct->addr_for(card_ptr)), 554 _g1->addr_to_region(_ct->addr_for(card_ptr))); 555 556 bool check_for_refs_into_cset = oops_in_heap_closure != NULL; 557 558 // If the card is no longer dirty, nothing to do. 559 if (*card_ptr != G1CardTable::dirty_card_val()) { 560 // No need to return that this card contains refs that point 561 // into the collection set. 562 return false; 563 } 564 565 // Construct the region representing the card. 566 HeapWord* start = _ct->addr_for(card_ptr); 567 // And find the region containing it. 568 HeapRegion* r = _g1->heap_region_containing(start); 569 570 // This check is needed for some uncommon cases where we should 571 // ignore the card. 572 // 573 // The region could be young. Cards for young regions are 574 // distinctly marked (set to g1_young_gen), so the post-barrier will 575 // filter them out. However, that marking is performed 576 // concurrently. A write to a young object could occur before the 577 // card has been marked young, slipping past the filter. 578 // 579 // The card could be stale, because the region has been freed since 580 // the card was recorded. In this case the region type could be 581 // anything. If (still) free or (reallocated) young, just ignore 582 // it. If (reallocated) old or humongous, the later card trimming 583 // and additional checks in iteration may detect staleness. At 584 // worst, we end up processing a stale card unnecessarily. 585 // 586 // In the normal (non-stale) case, the synchronization between the 587 // enqueueing of the card and processing it here will have ensured 588 // we see the up-to-date region type here. 589 if (!r->is_old_or_humongous()) { 590 return false; 591 } 592 593 // While we are processing RSet buffers during the collection, we 594 // actually don't want to scan any cards on the collection set, 595 // since we don't want to update remembered sets with entries that 596 // point into the collection set, given that live objects from the 597 // collection set are about to move and such entries will be stale 598 // very soon. This change also deals with a reliability issue which 599 // involves scanning a card in the collection set and coming across 600 // an array that was being chunked and looking malformed. Note, 601 // however, that if evacuation fails, we have to scan any objects 602 // that were not moved and create any missing entries. 603 if (r->in_collection_set()) { 604 return false; 605 } 606 607 // The result from the hot card cache insert call is either: 608 // * pointer to the current card 609 // (implying that the current card is not 'hot'), 610 // * null 611 // (meaning we had inserted the card ptr into the "hot" card cache, 612 // which had some headroom), 613 // * a pointer to a "hot" card that was evicted from the "hot" cache. 614 // 615 616 if (_hot_card_cache->use_cache()) { 617 assert(!check_for_refs_into_cset, "sanity"); 618 assert(!SafepointSynchronize::is_at_safepoint(), "sanity"); 619 620 const jbyte* orig_card_ptr = card_ptr; 621 card_ptr = _hot_card_cache->insert(card_ptr); 622 if (card_ptr == NULL) { 623 // There was no eviction. Nothing to do. 624 return false; 625 } else if (card_ptr != orig_card_ptr) { 626 // Original card was inserted and an old card was evicted. 627 start = _ct->addr_for(card_ptr); 628 r = _g1->heap_region_containing(start); 629 630 // Check whether the region formerly in the cache should be 631 // ignored, as discussed earlier for the original card. The 632 // region could have been freed while in the cache. The cset is 633 // not relevant here, since we're in concurrent phase. 634 if (!r->is_old_or_humongous()) { 635 return false; 636 } 637 } // Else we still have the original card. 638 } 639 640 // Trim the region designated by the card to what's been allocated 641 // in the region. The card could be stale, or the card could cover 642 // (part of) an object at the end of the allocated space and extend 643 // beyond the end of allocation. 644 HeapWord* scan_limit; 645 if (_g1->is_gc_active()) { 646 // If we're in a STW GC, then a card might be in a GC alloc region 647 // and extend onto a GC LAB, which may not be parsable. Stop such 648 // at the "scan_top" of the region. 649 scan_limit = r->scan_top(); 650 } else { 651 // Non-humongous objects are only allocated in the old-gen during 652 // GC, so if region is old then top is stable. Humongous object 653 // allocation sets top last; if top has not yet been set, this is 654 // a stale card and we'll end up with an empty intersection. If 655 // this is not a stale card, the synchronization between the 656 // enqueuing of the card and processing it here will have ensured 657 // we see the up-to-date top here. 658 scan_limit = r->top(); 659 } 660 if (scan_limit <= start) { 661 // If the trimmed region is empty, the card must be stale. 662 return false; 663 } 664 665 // Okay to clean and process the card now. There are still some 666 // stale card cases that may be detected by iteration and dealt with 667 // as iteration failure. 668 *const_cast<volatile jbyte*>(card_ptr) = G1CardTable::clean_card_val(); 669 670 // This fence serves two purposes. First, the card must be cleaned 671 // before processing the contents. Second, we can't proceed with 672 // processing until after the read of top, for synchronization with 673 // possibly concurrent humongous object allocation. It's okay that 674 // reading top and reading type were racy wrto each other. We need 675 // both set, in any order, to proceed. 676 OrderAccess::fence(); 677 678 // Don't use addr_for(card_ptr + 1) which can ask for 679 // a card beyond the heap. 680 HeapWord* end = start + G1CardTable::card_size_in_words; 681 MemRegion dirty_region(start, MIN2(scan_limit, end)); 682 assert(!dirty_region.is_empty(), "sanity"); 683 684 G1UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1, 685 oops_in_heap_closure, 686 check_for_refs_into_cset, 687 worker_i); 688 update_rs_oop_cl.set_from(r); 689 690 bool card_processed = 691 r->oops_on_card_seq_iterate_careful(dirty_region, 692 &update_rs_oop_cl); 693 694 // If unable to process the card then we encountered an unparsable 695 // part of the heap (e.g. a partially allocated object) while 696 // processing a stale card. Despite the card being stale, redirty 697 // and re-enqueue, because we've already cleaned the card. Without 698 // this we could incorrectly discard a non-stale card. 699 if (!card_processed) { 700 assert(!_g1->is_gc_active(), "Unparsable heap during GC"); 701 // The card might have gotten re-dirtied and re-enqueued while we 702 // worked. (In fact, it's pretty likely.) 703 if (*card_ptr != G1CardTable::dirty_card_val()) { 704 *card_ptr = G1CardTable::dirty_card_val(); 705 MutexLockerEx x(Shared_DirtyCardQ_lock, 706 Mutex::_no_safepoint_check_flag); 707 DirtyCardQueue* sdcq = 708 G1BarrierSet::dirty_card_queue_set().shared_dirty_card_queue(); 709 sdcq->enqueue(card_ptr); 710 } 711 } else { 712 _conc_refine_cards++; 713 } 714 715 // This gets set to true if the card being refined has references that point 716 // into the collection set. 717 bool has_refs_into_cset = update_rs_oop_cl.has_refs_into_cset(); 718 719 // We should only be detecting that the card contains references 720 // that point into the collection set if the current thread is 721 // a GC worker thread. 722 assert(!has_refs_into_cset || SafepointSynchronize::is_at_safepoint(), 723 "invalid result at non safepoint"); 724 725 return has_refs_into_cset; 726 } 727 728 void G1RemSet::print_periodic_summary_info(const char* header, uint period_count) { 729 if ((G1SummarizeRSetStatsPeriod > 0) && log_is_enabled(Trace, gc, remset) && 730 (period_count % G1SummarizeRSetStatsPeriod == 0)) { 731 732 if (!_prev_period_summary.initialized()) { 733 _prev_period_summary.initialize(this); 734 } 735 736 G1RemSetSummary current; 737 current.initialize(this); 738 _prev_period_summary.subtract_from(¤t); 739 740 Log(gc, remset) log; 741 log.trace("%s", header); 742 ResourceMark rm; 743 _prev_period_summary.print_on(log.trace_stream()); 744 745 _prev_period_summary.set(¤t); 746 } 747 } 748 749 void G1RemSet::print_summary_info() { 750 Log(gc, remset, exit) log; 751 if (log.is_trace()) { 752 log.trace(" Cumulative RS summary"); 753 G1RemSetSummary current; 754 current.initialize(this); 755 ResourceMark rm; 756 current.print_on(log.trace_stream()); 757 } 758 } 759 760 void G1RemSet::prepare_for_verify() { 761 if (G1HRRSFlushLogBuffersOnVerify && 762 (VerifyBeforeGC || VerifyAfterGC) 763 && (!_g1->collector_state()->full_collection() || G1VerifyRSetsDuringFullGC)) { 764 cleanupHRRS(); 765 _g1->set_refine_cte_cl_concurrency(false); 766 if (SafepointSynchronize::is_at_safepoint()) { 767 DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); 768 dcqs.concatenate_logs(); 769 } 770 771 bool use_hot_card_cache = _hot_card_cache->use_cache(); 772 _hot_card_cache->set_use_cache(false); 773 774 DirtyCardQueue into_cset_dcq(&_into_cset_dirty_card_queue_set); 775 update_rem_set(&into_cset_dcq, NULL, 0); 776 _into_cset_dirty_card_queue_set.clear(); 777 778 _hot_card_cache->set_use_cache(use_hot_card_cache); 779 assert(G1BarrierSet::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); 780 } 781 } 782 783 void G1RemSet::create_card_live_data(WorkGang* workers, G1CMBitMap* mark_bitmap) { 784 _card_live_data.create(workers, mark_bitmap); 785 } 786 787 void G1RemSet::finalize_card_live_data(WorkGang* workers, G1CMBitMap* mark_bitmap) { 788 _card_live_data.finalize(workers, mark_bitmap); 789 } 790 791 void G1RemSet::verify_card_live_data(WorkGang* workers, G1CMBitMap* bitmap) { 792 _card_live_data.verify(workers, bitmap); 793 } 794 795 void G1RemSet::clear_card_live_data(WorkGang* workers) { 796 _card_live_data.clear(workers); 797 } 798 799 #ifdef ASSERT 800 void G1RemSet::verify_card_live_data_is_clear() { 801 _card_live_data.verify_is_clear(); 802 } 803 #endif