1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/concurrentG1Refine.hpp" 27 #include "gc/g1/concurrentG1RefineThread.hpp" 28 #include "gc/g1/dirtyCardQueue.hpp" 29 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1CollectorPolicy.hpp" 32 #include "gc/g1/g1FromCardCache.hpp" 33 #include "gc/g1/g1GCPhaseTimes.hpp" 34 #include "gc/g1/g1HotCardCache.hpp" 35 #include "gc/g1/g1OopClosures.inline.hpp" 36 #include "gc/g1/g1RemSet.inline.hpp" 37 #include "gc/g1/g1SATBCardTableModRefBS.inline.hpp" 38 #include "gc/g1/heapRegion.inline.hpp" 39 #include "gc/g1/heapRegionManager.inline.hpp" 40 #include "gc/g1/heapRegionRemSet.hpp" 41 #include "gc/shared/gcTraceTime.inline.hpp" 42 #include "memory/iterator.hpp" 43 #include "memory/resourceArea.hpp" 44 #include "oops/oop.inline.hpp" 45 #include "utilities/globalDefinitions.hpp" 46 #include "utilities/intHisto.hpp" 47 #include "utilities/stack.inline.hpp" 48 49 // Collects information about the remembered set scan progress during an evacuation. 50 class G1RemSetScanState : public CHeapObj<mtGC> { 51 private: 52 // Scan progress for the remembered set of a single region. Transitions from 53 // Unclaimed -> Claimed -> Complete. 54 // At each of the transitions the thread that does the transition needs to perform 55 // some special action once. This is the reason for the extra "Claimed" state. 56 typedef jint G1RemsetIterState; 57 58 static const G1RemsetIterState Unclaimed = 0; // The remembered set has not been scanned yet. 59 static const G1RemsetIterState Claimed = 1; // The remembered set is currently being scanned. 60 static const G1RemsetIterState Complete = 2; // The remembered set has been completely scanned. 61 62 G1RemsetIterState* _iter_state; 63 // The current location where the next thread should continue scanning in a region's 64 // remembered set. 65 size_t* _iter_claimed; 66 67 public: 68 G1RemSetScanState() : 69 _iter_state(NULL), 70 _iter_claimed(NULL) { 71 72 } 73 74 ~G1RemSetScanState() { 75 if (_iter_state != NULL) { 76 FREE_C_HEAP_ARRAY(G1RemsetIterState, _iter_state); 77 } 78 if (_iter_claimed != NULL) { 79 FREE_C_HEAP_ARRAY(size_t, _iter_claimed); 80 } 81 } 82 83 void initialize(uint max_regions) { 84 _iter_state = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_regions, mtGC); 85 _iter_claimed = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 86 } 87 88 void reset(uint max_regions) { 89 for (uint i = 0; i < max_regions; i++) { 90 _iter_state[i] = Unclaimed; 91 } 92 memset(_iter_claimed, 0, max_regions * sizeof(size_t)); 93 } 94 // Attempt to claim the remembered set of the region for iteration. Returns true 95 // if this call caused the transition from Unclaimed to Claimed. 96 inline bool claim_iter(uint region) { 97 if (_iter_state[region] != Unclaimed) { 98 return false; 99 } 100 jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state[region]), Unclaimed); 101 return (res == Unclaimed); 102 } 103 // Try to atomically sets the iteration state to "complete". Returns true for the 104 // thread that caused the transition. 105 inline bool set_iter_complete(uint region) { 106 if (iter_is_complete(region)) { 107 return false; 108 } 109 jint res = Atomic::cmpxchg(Complete, (jint*)(&_iter_state[region]), Claimed); 110 return (res == Claimed); 111 } 112 // Returns true if the region's iteration is complete. 113 inline bool iter_is_complete(uint region) { 114 return _iter_state[region] == Complete; 115 } 116 // The current position within the remembered set of the given region. 117 inline size_t iter_claimed(uint region) const { 118 return _iter_claimed[region]; 119 } 120 // Claim the next block of cards within the remembered set of the region with 121 // step size. 122 inline size_t iter_claimed_next(uint region, size_t step) { 123 return Atomic::add(step, &_iter_claimed[region]) - step; 124 } 125 }; 126 127 G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs) : 128 _g1(g1), 129 _scan_state(new G1RemSetScanState()), 130 _conc_refine_cards(0), 131 _ct_bs(ct_bs), 132 _g1p(_g1->g1_policy()), 133 _cg1r(g1->concurrent_g1_refine()), 134 _prev_period_summary(), 135 _into_cset_dirty_card_queue_set(false) 136 { 137 if (log_is_enabled(Trace, gc, remset)) { 138 _prev_period_summary.initialize(this); 139 } 140 // Initialize the card queue set used to hold cards containing 141 // references into the collection set. 142 _into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code 143 DirtyCardQ_CBL_mon, 144 DirtyCardQ_FL_lock, 145 -1, // never trigger processing 146 -1, // no limit on length 147 Shared_DirtyCardQ_lock, 148 &JavaThread::dirty_card_queue_set()); 149 } 150 151 G1RemSet::~G1RemSet() { 152 if (_scan_state != NULL) { 153 delete _scan_state; 154 } 155 } 156 157 uint G1RemSet::num_par_rem_sets() { 158 return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads); 159 } 160 161 void G1RemSet::initialize(size_t capacity, uint max_regions) { 162 G1FromCardCache::initialize(num_par_rem_sets(), max_regions); 163 _scan_state->initialize(max_regions); 164 { 165 GCTraceTime(Debug, gc, marking)("Initialize Card Live Data"); 166 _card_live_data.initialize(capacity, max_regions); 167 } 168 if (G1PretouchAuxiliaryMemory) { 169 GCTraceTime(Debug, gc, marking)("Pre-Touch Card Live Data"); 170 _card_live_data.pretouch(); 171 } 172 } 173 174 G1ScanRSClosure::G1ScanRSClosure(G1RemSetScanState* scan_state, 175 G1ParPushHeapRSClosure* push_heap_cl, 176 CodeBlobClosure* code_root_cl, 177 uint worker_i) : 178 _scan_state(scan_state), 179 _push_heap_cl(push_heap_cl), 180 _code_root_cl(code_root_cl), 181 _strong_code_root_scan_time_sec(0.0), 182 _cards(0), 183 _cards_done(0), 184 _worker_i(worker_i) { 185 _g1h = G1CollectedHeap::heap(); 186 _bot = _g1h->bot(); 187 _ct_bs = _g1h->g1_barrier_set(); 188 _block_size = MAX2<size_t>(G1RSetScanBlockSize, 1); 189 } 190 191 void G1ScanRSClosure::scan_card(size_t index, HeapRegion *r) { 192 // Stack allocate the DirtyCardToOopClosure instance 193 HeapRegionDCTOC cl(_g1h, r, _push_heap_cl, CardTableModRefBS::Precise); 194 195 // Set the "from" region in the closure. 196 _push_heap_cl->set_region(r); 197 MemRegion card_region(_bot->address_for_index(index), BOTConstants::N_words); 198 MemRegion pre_gc_allocated(r->bottom(), r->scan_top()); 199 MemRegion mr = pre_gc_allocated.intersection(card_region); 200 if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) { 201 // We make the card as "claimed" lazily (so races are possible 202 // but they're benign), which reduces the number of duplicate 203 // scans (the rsets of the regions in the cset can intersect). 204 _ct_bs->set_card_claimed(index); 205 _cards_done++; 206 cl.do_MemRegion(mr); 207 } 208 } 209 210 void G1ScanRSClosure::scan_strong_code_roots(HeapRegion* r) { 211 double scan_start = os::elapsedTime(); 212 r->strong_code_roots_do(_code_root_cl); 213 _strong_code_root_scan_time_sec += (os::elapsedTime() - scan_start); 214 } 215 216 bool G1ScanRSClosure::doHeapRegion(HeapRegion* r) { 217 assert(r->in_collection_set(), "should only be called on elements of CS."); 218 uint region_idx = r->hrm_index(); 219 220 if (_scan_state->iter_is_complete(region_idx)) { 221 return false; 222 } 223 if (_scan_state->claim_iter(region_idx)) { 224 // If we ever free the collection set concurrently, we should also 225 // clear the card table concurrently therefore we won't need to 226 // add regions of the collection set to the dirty cards region. 227 _g1h->push_dirty_cards_region(r); 228 } 229 230 HeapRegionRemSetIterator iter(r->rem_set()); 231 size_t card_index; 232 233 // We claim cards in block so as to reduce the contention. The block size is determined by 234 // the G1RSetScanBlockSize parameter. 235 size_t jump_to_card = _scan_state->iter_claimed_next(region_idx, _block_size); 236 for (size_t current_card = 0; iter.has_next(card_index); current_card++) { 237 if (current_card >= jump_to_card + _block_size) { 238 jump_to_card = _scan_state->iter_claimed_next(region_idx, _block_size); 239 } 240 if (current_card < jump_to_card) continue; 241 HeapWord* card_start = _g1h->bot()->address_for_index(card_index); 242 243 HeapRegion* card_region = _g1h->heap_region_containing(card_start); 244 _cards++; 245 246 if (!card_region->is_on_dirty_cards_region_list()) { 247 _g1h->push_dirty_cards_region(card_region); 248 } 249 250 // If the card is dirty, then we will scan it during updateRS. 251 if (!card_region->in_collection_set() && 252 !_ct_bs->is_card_dirty(card_index)) { 253 scan_card(card_index, card_region); 254 } 255 } 256 if (_scan_state->set_iter_complete(region_idx)) { 257 // Scan the strong code root list attached to the current region 258 scan_strong_code_roots(r); 259 } 260 return false; 261 } 262 263 size_t G1RemSet::scan_rem_set(G1ParPushHeapRSClosure* oops_in_heap_closure, 264 CodeBlobClosure* heap_region_codeblobs, 265 uint worker_i) { 266 double rs_time_start = os::elapsedTime(); 267 268 HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i); 269 270 G1ScanRSClosure cl(_scan_state, oops_in_heap_closure, heap_region_codeblobs, worker_i); 271 _g1->collection_set_iterate_from(startRegion, &cl); 272 273 double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) - 274 cl.strong_code_root_scan_time_sec(); 275 276 _g1p->phase_times()->record_time_secs(G1GCPhaseTimes::ScanRS, worker_i, scan_rs_time_sec); 277 _g1p->phase_times()->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, cl.strong_code_root_scan_time_sec()); 278 279 return cl.cards_done(); 280 } 281 282 // Closure used for updating RSets and recording references that 283 // point into the collection set. Only called during an 284 // evacuation pause. 285 286 class RefineRecordRefsIntoCSCardTableEntryClosure: public CardTableEntryClosure { 287 G1RemSet* _g1rs; 288 DirtyCardQueue* _into_cset_dcq; 289 G1ParPushHeapRSClosure* _cl; 290 public: 291 RefineRecordRefsIntoCSCardTableEntryClosure(G1CollectedHeap* g1h, 292 DirtyCardQueue* into_cset_dcq, 293 G1ParPushHeapRSClosure* cl) : 294 _g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq), _cl(cl) 295 {} 296 297 bool do_card_ptr(jbyte* card_ptr, uint worker_i) { 298 // The only time we care about recording cards that 299 // contain references that point into the collection set 300 // is during RSet updating within an evacuation pause. 301 // In this case worker_i should be the id of a GC worker thread. 302 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); 303 assert(worker_i < ParallelGCThreads, "should be a GC worker"); 304 305 if (_g1rs->refine_card(card_ptr, worker_i, _cl)) { 306 // 'card_ptr' contains references that point into the collection 307 // set. We need to record the card in the DCQS 308 // (_into_cset_dirty_card_queue_set) 309 // that's used for that purpose. 310 // 311 // Enqueue the card 312 _into_cset_dcq->enqueue(card_ptr); 313 } 314 return true; 315 } 316 }; 317 318 void G1RemSet::update_rem_set(DirtyCardQueue* into_cset_dcq, 319 G1ParPushHeapRSClosure* oops_in_heap_closure, 320 uint worker_i) { 321 RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq, oops_in_heap_closure); 322 323 G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i); 324 if (ConcurrentG1Refine::hot_card_cache_enabled()) { 325 // Apply the closure to the entries of the hot card cache. 326 G1GCParPhaseTimesTracker y(_g1p->phase_times(), G1GCPhaseTimes::ScanHCC, worker_i); 327 _g1->iterate_hcc_closure(&into_cset_update_rs_cl, worker_i); 328 } 329 // Apply the closure to all remaining log entries. 330 _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, worker_i); 331 } 332 333 void G1RemSet::cleanupHRRS() { 334 HeapRegionRemSet::cleanup(); 335 } 336 337 size_t G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* cl, 338 CodeBlobClosure* heap_region_codeblobs, 339 uint worker_i) { 340 // A DirtyCardQueue that is used to hold cards containing references 341 // that point into the collection set. This DCQ is associated with a 342 // special DirtyCardQueueSet (see g1CollectedHeap.hpp). Under normal 343 // circumstances (i.e. the pause successfully completes), these cards 344 // are just discarded (there's no need to update the RSets of regions 345 // that were in the collection set - after the pause these regions 346 // are wholly 'free' of live objects. In the event of an evacuation 347 // failure the cards/buffers in this queue set are passed to the 348 // DirtyCardQueueSet that is used to manage RSet updates 349 DirtyCardQueue into_cset_dcq(&_into_cset_dirty_card_queue_set); 350 351 update_rem_set(&into_cset_dcq, cl, worker_i); 352 return scan_rem_set(cl, heap_region_codeblobs, worker_i);; 353 } 354 355 void G1RemSet::prepare_for_oops_into_collection_set_do() { 356 _g1->set_refine_cte_cl_concurrency(false); 357 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 358 dcqs.concatenate_logs(); 359 360 _scan_state->reset(_g1->max_regions()); 361 } 362 363 void G1RemSet::cleanup_after_oops_into_collection_set_do() { 364 // Cleanup after copy 365 _g1->set_refine_cte_cl_concurrency(true); 366 // Set all cards back to clean. 367 _g1->cleanUpCardTable(); 368 369 DirtyCardQueueSet& into_cset_dcqs = _into_cset_dirty_card_queue_set; 370 371 if (_g1->evacuation_failed()) { 372 double restore_remembered_set_start = os::elapsedTime(); 373 374 // Restore remembered sets for the regions pointing into the collection set. 375 // We just need to transfer the completed buffers from the DirtyCardQueueSet 376 // used to hold cards that contain references that point into the collection set 377 // to the DCQS used to hold the deferred RS updates. 378 _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs); 379 _g1->g1_policy()->phase_times()->record_evac_fail_restore_remsets((os::elapsedTime() - restore_remembered_set_start) * 1000.0); 380 } 381 382 // Free any completed buffers in the DirtyCardQueueSet used to hold cards 383 // which contain references that point into the collection. 384 _into_cset_dirty_card_queue_set.clear(); 385 assert(_into_cset_dirty_card_queue_set.completed_buffers_num() == 0, 386 "all buffers should be freed"); 387 _into_cset_dirty_card_queue_set.clear_n_completed_buffers(); 388 } 389 390 class G1ScrubRSClosure: public HeapRegionClosure { 391 G1CollectedHeap* _g1h; 392 G1CardLiveData* _live_data; 393 public: 394 G1ScrubRSClosure(G1CardLiveData* live_data) : 395 _g1h(G1CollectedHeap::heap()), 396 _live_data(live_data) { } 397 398 bool doHeapRegion(HeapRegion* r) { 399 if (!r->is_continues_humongous()) { 400 r->rem_set()->scrub(_live_data); 401 } 402 return false; 403 } 404 }; 405 406 void G1RemSet::scrub(uint worker_num, HeapRegionClaimer *hrclaimer) { 407 G1ScrubRSClosure scrub_cl(&_card_live_data); 408 _g1->heap_region_par_iterate(&scrub_cl, worker_num, hrclaimer); 409 } 410 411 G1TriggerClosure::G1TriggerClosure() : 412 _triggered(false) { } 413 414 G1InvokeIfNotTriggeredClosure::G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t_cl, 415 OopClosure* oop_cl) : 416 _trigger_cl(t_cl), _oop_cl(oop_cl) { } 417 418 G1Mux2Closure::G1Mux2Closure(OopClosure *c1, OopClosure *c2) : 419 _c1(c1), _c2(c2) { } 420 421 G1UpdateRSOrPushRefOopClosure:: 422 G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h, 423 G1RemSet* rs, 424 G1ParPushHeapRSClosure* push_ref_cl, 425 bool record_refs_into_cset, 426 uint worker_i) : 427 _g1(g1h), _g1_rem_set(rs), _from(NULL), 428 _record_refs_into_cset(record_refs_into_cset), 429 _push_ref_cl(push_ref_cl), _worker_i(worker_i) { } 430 431 // Returns true if the given card contains references that point 432 // into the collection set, if we're checking for such references; 433 // false otherwise. 434 435 bool G1RemSet::refine_card(jbyte* card_ptr, 436 uint worker_i, 437 G1ParPushHeapRSClosure* oops_in_heap_closure) { 438 assert(_g1->is_in_exact(_ct_bs->addr_for(card_ptr)), 439 "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap", 440 p2i(card_ptr), 441 _ct_bs->index_for(_ct_bs->addr_for(card_ptr)), 442 p2i(_ct_bs->addr_for(card_ptr)), 443 _g1->addr_to_region(_ct_bs->addr_for(card_ptr))); 444 445 bool check_for_refs_into_cset = oops_in_heap_closure != NULL; 446 447 // If the card is no longer dirty, nothing to do. 448 if (*card_ptr != CardTableModRefBS::dirty_card_val()) { 449 // No need to return that this card contains refs that point 450 // into the collection set. 451 return false; 452 } 453 454 // Construct the region representing the card. 455 HeapWord* start = _ct_bs->addr_for(card_ptr); 456 // And find the region containing it. 457 HeapRegion* r = _g1->heap_region_containing(start); 458 459 // Why do we have to check here whether a card is on a young region, 460 // given that we dirty young regions and, as a result, the 461 // post-barrier is supposed to filter them out and never to enqueue 462 // them? When we allocate a new region as the "allocation region" we 463 // actually dirty its cards after we release the lock, since card 464 // dirtying while holding the lock was a performance bottleneck. So, 465 // as a result, it is possible for other threads to actually 466 // allocate objects in the region (after the acquire the lock) 467 // before all the cards on the region are dirtied. This is unlikely, 468 // and it doesn't happen often, but it can happen. So, the extra 469 // check below filters out those cards. 470 if (r->is_young()) { 471 return false; 472 } 473 474 // While we are processing RSet buffers during the collection, we 475 // actually don't want to scan any cards on the collection set, 476 // since we don't want to update remembered sets with entries that 477 // point into the collection set, given that live objects from the 478 // collection set are about to move and such entries will be stale 479 // very soon. This change also deals with a reliability issue which 480 // involves scanning a card in the collection set and coming across 481 // an array that was being chunked and looking malformed. Note, 482 // however, that if evacuation fails, we have to scan any objects 483 // that were not moved and create any missing entries. 484 if (r->in_collection_set()) { 485 return false; 486 } 487 488 // The result from the hot card cache insert call is either: 489 // * pointer to the current card 490 // (implying that the current card is not 'hot'), 491 // * null 492 // (meaning we had inserted the card ptr into the "hot" card cache, 493 // which had some headroom), 494 // * a pointer to a "hot" card that was evicted from the "hot" cache. 495 // 496 497 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); 498 if (hot_card_cache->use_cache()) { 499 assert(!check_for_refs_into_cset, "sanity"); 500 assert(!SafepointSynchronize::is_at_safepoint(), "sanity"); 501 502 card_ptr = hot_card_cache->insert(card_ptr); 503 if (card_ptr == NULL) { 504 // There was no eviction. Nothing to do. 505 return false; 506 } 507 508 start = _ct_bs->addr_for(card_ptr); 509 r = _g1->heap_region_containing(start); 510 511 // Checking whether the region we got back from the cache 512 // is young here is inappropriate. The region could have been 513 // freed, reallocated and tagged as young while in the cache. 514 // Hence we could see its young type change at any time. 515 } 516 517 // Don't use addr_for(card_ptr + 1) which can ask for 518 // a card beyond the heap. This is not safe without a perm 519 // gen at the upper end of the heap. 520 HeapWord* end = start + CardTableModRefBS::card_size_in_words; 521 MemRegion dirtyRegion(start, end); 522 523 G1UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1, 524 _g1->g1_rem_set(), 525 oops_in_heap_closure, 526 check_for_refs_into_cset, 527 worker_i); 528 update_rs_oop_cl.set_from(r); 529 530 G1TriggerClosure trigger_cl; 531 FilterIntoCSClosure into_cs_cl(_g1, &trigger_cl); 532 G1InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl); 533 G1Mux2Closure mux(&invoke_cl, &update_rs_oop_cl); 534 535 FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r, 536 (check_for_refs_into_cset ? 537 (OopClosure*)&mux : 538 (OopClosure*)&update_rs_oop_cl)); 539 540 // The region for the current card may be a young region. The 541 // current card may have been a card that was evicted from the 542 // card cache. When the card was inserted into the cache, we had 543 // determined that its region was non-young. While in the cache, 544 // the region may have been freed during a cleanup pause, reallocated 545 // and tagged as young. 546 // 547 // We wish to filter out cards for such a region but the current 548 // thread, if we're running concurrently, may "see" the young type 549 // change at any time (so an earlier "is_young" check may pass or 550 // fail arbitrarily). We tell the iteration code to perform this 551 // filtering when it has been determined that there has been an actual 552 // allocation in this region and making it safe to check the young type. 553 bool filter_young = true; 554 555 HeapWord* stop_point = 556 r->oops_on_card_seq_iterate_careful(dirtyRegion, 557 &filter_then_update_rs_oop_cl, 558 filter_young, 559 card_ptr); 560 561 // If stop_point is non-null, then we encountered an unallocated region 562 // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the 563 // card and re-enqueue: if we put off the card until a GC pause, then the 564 // unallocated portion will be filled in. Alternatively, we might try 565 // the full complexity of the technique used in "regular" precleaning. 566 if (stop_point != NULL) { 567 // The card might have gotten re-dirtied and re-enqueued while we 568 // worked. (In fact, it's pretty likely.) 569 if (*card_ptr != CardTableModRefBS::dirty_card_val()) { 570 *card_ptr = CardTableModRefBS::dirty_card_val(); 571 MutexLockerEx x(Shared_DirtyCardQ_lock, 572 Mutex::_no_safepoint_check_flag); 573 DirtyCardQueue* sdcq = 574 JavaThread::dirty_card_queue_set().shared_dirty_card_queue(); 575 sdcq->enqueue(card_ptr); 576 } 577 } else { 578 _conc_refine_cards++; 579 } 580 581 // This gets set to true if the card being refined has 582 // references that point into the collection set. 583 bool has_refs_into_cset = trigger_cl.triggered(); 584 585 // We should only be detecting that the card contains references 586 // that point into the collection set if the current thread is 587 // a GC worker thread. 588 assert(!has_refs_into_cset || SafepointSynchronize::is_at_safepoint(), 589 "invalid result at non safepoint"); 590 591 return has_refs_into_cset; 592 } 593 594 void G1RemSet::print_periodic_summary_info(const char* header, uint period_count) { 595 if ((G1SummarizeRSetStatsPeriod > 0) && log_is_enabled(Trace, gc, remset) && 596 (period_count % G1SummarizeRSetStatsPeriod == 0)) { 597 598 if (!_prev_period_summary.initialized()) { 599 _prev_period_summary.initialize(this); 600 } 601 602 G1RemSetSummary current; 603 current.initialize(this); 604 _prev_period_summary.subtract_from(¤t); 605 606 Log(gc, remset) log; 607 log.trace("%s", header); 608 ResourceMark rm; 609 _prev_period_summary.print_on(log.trace_stream()); 610 611 _prev_period_summary.set(¤t); 612 } 613 } 614 615 void G1RemSet::print_summary_info() { 616 Log(gc, remset, exit) log; 617 if (log.is_trace()) { 618 log.trace(" Cumulative RS summary"); 619 G1RemSetSummary current; 620 current.initialize(this); 621 ResourceMark rm; 622 current.print_on(log.trace_stream()); 623 } 624 } 625 626 void G1RemSet::prepare_for_verify() { 627 if (G1HRRSFlushLogBuffersOnVerify && 628 (VerifyBeforeGC || VerifyAfterGC) 629 && (!_g1->collector_state()->full_collection() || G1VerifyRSetsDuringFullGC)) { 630 cleanupHRRS(); 631 _g1->set_refine_cte_cl_concurrency(false); 632 if (SafepointSynchronize::is_at_safepoint()) { 633 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 634 dcqs.concatenate_logs(); 635 } 636 637 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); 638 bool use_hot_card_cache = hot_card_cache->use_cache(); 639 hot_card_cache->set_use_cache(false); 640 641 DirtyCardQueue into_cset_dcq(&_into_cset_dirty_card_queue_set); 642 update_rem_set(&into_cset_dcq, NULL, 0); 643 _into_cset_dirty_card_queue_set.clear(); 644 645 hot_card_cache->set_use_cache(use_hot_card_cache); 646 assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); 647 } 648 } 649 650 void G1RemSet::create_card_live_data(WorkGang* workers, G1CMBitMap* mark_bitmap) { 651 _card_live_data.create(workers, mark_bitmap); 652 } 653 654 void G1RemSet::finalize_card_live_data(WorkGang* workers, G1CMBitMap* mark_bitmap) { 655 _card_live_data.finalize(workers, mark_bitmap); 656 } 657 658 void G1RemSet::verify_card_live_data(WorkGang* workers, G1CMBitMap* bitmap) { 659 _card_live_data.verify(workers, bitmap); 660 } 661 662 void G1RemSet::clear_card_live_data(WorkGang* workers) { 663 _card_live_data.clear(workers); 664 } 665 666 #ifdef ASSERT 667 void G1RemSet::verify_card_live_data_is_clear() { 668 _card_live_data.verify_is_clear(); 669 } 670 #endif