1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/concurrentG1Refine.hpp" 27 #include "gc/g1/concurrentG1RefineThread.hpp" 28 #include "gc/g1/dirtyCardQueue.hpp" 29 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1CollectorPolicy.hpp" 32 #include "gc/g1/g1FromCardCache.hpp" 33 #include "gc/g1/g1GCPhaseTimes.hpp" 34 #include "gc/g1/g1HotCardCache.hpp" 35 #include "gc/g1/g1OopClosures.inline.hpp" 36 #include "gc/g1/g1RemSet.inline.hpp" 37 #include "gc/g1/g1SATBCardTableModRefBS.inline.hpp" 38 #include "gc/g1/heapRegion.inline.hpp" 39 #include "gc/g1/heapRegionManager.inline.hpp" 40 #include "gc/g1/heapRegionRemSet.hpp" 41 #include "gc/shared/gcTraceTime.inline.hpp" 42 #include "memory/iterator.hpp" 43 #include "memory/resourceArea.hpp" 44 #include "oops/oop.inline.hpp" 45 #include "utilities/globalDefinitions.hpp" 46 #include "utilities/intHisto.hpp" 47 #include "utilities/stack.inline.hpp" 48 49 G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs) : 50 _g1(g1), 51 _conc_refine_cards(0), 52 _ct_bs(ct_bs), 53 _g1p(_g1->g1_policy()), 54 _cg1r(g1->concurrent_g1_refine()), 55 _cset_rs_update_cl(NULL), 56 _prev_period_summary(), 57 _into_cset_dirty_card_queue_set(false) 58 { 59 _cset_rs_update_cl = NEW_C_HEAP_ARRAY(G1ParPushHeapRSClosure*, n_workers(), mtGC); 60 for (uint i = 0; i < n_workers(); i++) { 61 _cset_rs_update_cl[i] = NULL; 62 } 63 if (log_is_enabled(Trace, gc, remset)) { 64 _prev_period_summary.initialize(this); 65 } 66 // Initialize the card queue set used to hold cards containing 67 // references into the collection set. 68 _into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code 69 DirtyCardQ_CBL_mon, 70 DirtyCardQ_FL_lock, 71 -1, // never trigger processing 72 -1, // no limit on length 73 Shared_DirtyCardQ_lock, 74 &JavaThread::dirty_card_queue_set()); 75 } 76 77 G1RemSet::~G1RemSet() { 78 for (uint i = 0; i < n_workers(); i++) { 79 assert(_cset_rs_update_cl[i] == NULL, "it should be"); 80 } 81 FREE_C_HEAP_ARRAY(G1ParPushHeapRSClosure*, _cset_rs_update_cl); 82 } 83 84 uint G1RemSet::num_par_rem_sets() { 85 return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads); 86 } 87 88 void G1RemSet::initialize(size_t capacity, uint max_regions) { 89 G1FromCardCache::initialize(num_par_rem_sets(), max_regions); 90 { 91 GCTraceTime(Debug, gc, marking)("Initialize Card Live Data"); 92 _card_live_data.initialize(capacity, max_regions); 93 } 94 if (G1PretouchAuxiliaryMemory) { 95 GCTraceTime(Debug, gc, marking)("Pre-Touch Card Live Data"); 96 _card_live_data.pretouch(); 97 } 98 } 99 100 ScanRSClosure::ScanRSClosure(G1ParPushHeapRSClosure* oc, 101 CodeBlobClosure* code_root_cl, 102 uint worker_i) : 103 _oc(oc), 104 _code_root_cl(code_root_cl), 105 _strong_code_root_scan_time_sec(0.0), 106 _cards(0), 107 _cards_done(0), 108 _worker_i(worker_i), 109 _try_claimed(false) { 110 _g1h = G1CollectedHeap::heap(); 111 _bot = _g1h->bot(); 112 _ct_bs = _g1h->g1_barrier_set(); 113 _block_size = MAX2<size_t>(G1RSetScanBlockSize, 1); 114 } 115 116 void ScanRSClosure::scanCard(size_t index, HeapRegion *r) { 117 // Stack allocate the DirtyCardToOopClosure instance 118 HeapRegionDCTOC cl(_g1h, r, _oc, 119 CardTableModRefBS::Precise); 120 121 // Set the "from" region in the closure. 122 _oc->set_region(r); 123 MemRegion card_region(_bot->address_for_index(index), BOTConstants::N_words); 124 MemRegion pre_gc_allocated(r->bottom(), r->scan_top()); 125 MemRegion mr = pre_gc_allocated.intersection(card_region); 126 if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) { 127 // We make the card as "claimed" lazily (so races are possible 128 // but they're benign), which reduces the number of duplicate 129 // scans (the rsets of the regions in the cset can intersect). 130 _ct_bs->set_card_claimed(index); 131 _cards_done++; 132 cl.do_MemRegion(mr); 133 } 134 } 135 136 void ScanRSClosure::scan_strong_code_roots(HeapRegion* r) { 137 double scan_start = os::elapsedTime(); 138 r->strong_code_roots_do(_code_root_cl); 139 _strong_code_root_scan_time_sec += (os::elapsedTime() - scan_start); 140 } 141 142 bool ScanRSClosure::doHeapRegion(HeapRegion* r) { 143 assert(r->in_collection_set(), "should only be called on elements of CS."); 144 HeapRegionRemSet* hrrs = r->rem_set(); 145 if (hrrs->iter_is_complete()) return false; // All done. 146 if (!_try_claimed && !hrrs->claim_iter()) return false; 147 // If we ever free the collection set concurrently, we should also 148 // clear the card table concurrently therefore we won't need to 149 // add regions of the collection set to the dirty cards region. 150 _g1h->push_dirty_cards_region(r); 151 // If we didn't return above, then 152 // _try_claimed || r->claim_iter() 153 // is true: either we're supposed to work on claimed-but-not-complete 154 // regions, or we successfully claimed the region. 155 156 HeapRegionRemSetIterator iter(hrrs); 157 size_t card_index; 158 159 // We claim cards in block so as to reduce the contention. The block size is determined by 160 // the G1RSetScanBlockSize parameter. 161 size_t jump_to_card = hrrs->iter_claimed_next(_block_size); 162 for (size_t current_card = 0; iter.has_next(card_index); current_card++) { 163 if (current_card >= jump_to_card + _block_size) { 164 jump_to_card = hrrs->iter_claimed_next(_block_size); 165 } 166 if (current_card < jump_to_card) continue; 167 HeapWord* card_start = _g1h->bot()->address_for_index(card_index); 168 169 HeapRegion* card_region = _g1h->heap_region_containing(card_start); 170 _cards++; 171 172 if (!card_region->is_on_dirty_cards_region_list()) { 173 _g1h->push_dirty_cards_region(card_region); 174 } 175 176 // If the card is dirty, then we will scan it during updateRS. 177 if (!card_region->in_collection_set() && 178 !_ct_bs->is_card_dirty(card_index)) { 179 scanCard(card_index, card_region); 180 } 181 } 182 if (!_try_claimed) { 183 // Scan the strong code root list attached to the current region 184 scan_strong_code_roots(r); 185 186 hrrs->set_iter_complete(); 187 } 188 return false; 189 } 190 191 size_t G1RemSet::scanRS(G1ParPushHeapRSClosure* oc, 192 CodeBlobClosure* heap_region_codeblobs, 193 uint worker_i) { 194 double rs_time_start = os::elapsedTime(); 195 196 HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i); 197 198 ScanRSClosure scanRScl(oc, heap_region_codeblobs, worker_i); 199 200 _g1->collection_set_iterate_from(startRegion, &scanRScl); 201 scanRScl.set_try_claimed(); 202 _g1->collection_set_iterate_from(startRegion, &scanRScl); 203 204 double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) 205 - scanRScl.strong_code_root_scan_time_sec(); 206 207 _g1p->phase_times()->record_time_secs(G1GCPhaseTimes::ScanRS, worker_i, scan_rs_time_sec); 208 _g1p->phase_times()->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, scanRScl.strong_code_root_scan_time_sec()); 209 210 return scanRScl.cards_done(); 211 } 212 213 // Closure used for updating RSets and recording references that 214 // point into the collection set. Only called during an 215 // evacuation pause. 216 217 class RefineRecordRefsIntoCSCardTableEntryClosure: public CardTableEntryClosure { 218 G1RemSet* _g1rs; 219 DirtyCardQueue* _into_cset_dcq; 220 public: 221 RefineRecordRefsIntoCSCardTableEntryClosure(G1CollectedHeap* g1h, 222 DirtyCardQueue* into_cset_dcq) : 223 _g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq) 224 {} 225 226 bool do_card_ptr(volatile jbyte* card_ptr, uint worker_i) { 227 // The only time we care about recording cards that 228 // contain references that point into the collection set 229 // is during RSet updating within an evacuation pause. 230 // In this case worker_i should be the id of a GC worker thread. 231 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); 232 assert(worker_i < ParallelGCThreads, "should be a GC worker"); 233 234 if (_g1rs->refine_card(card_ptr, worker_i, true)) { 235 // 'card_ptr' contains references that point into the collection 236 // set. We need to record the card in the DCQS 237 // (_into_cset_dirty_card_queue_set) 238 // that's used for that purpose. 239 // 240 // Enqueue the card 241 _into_cset_dcq->enqueue(card_ptr); 242 } 243 return true; 244 } 245 }; 246 247 void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i) { 248 RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq); 249 250 G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i); 251 if (ConcurrentG1Refine::hot_card_cache_enabled()) { 252 // Apply the closure to the entries of the hot card cache. 253 G1GCParPhaseTimesTracker y(_g1p->phase_times(), G1GCPhaseTimes::ScanHCC, worker_i); 254 _g1->iterate_hcc_closure(&into_cset_update_rs_cl, worker_i); 255 } 256 // Apply the closure to all remaining log entries. 257 _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, worker_i); 258 } 259 260 void G1RemSet::cleanupHRRS() { 261 HeapRegionRemSet::cleanup(); 262 } 263 264 size_t G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc, 265 CodeBlobClosure* heap_region_codeblobs, 266 uint worker_i) { 267 // We cache the value of 'oc' closure into the appropriate slot in the 268 // _cset_rs_update_cl for this worker 269 assert(worker_i < n_workers(), "sanity"); 270 _cset_rs_update_cl[worker_i] = oc; 271 272 // A DirtyCardQueue that is used to hold cards containing references 273 // that point into the collection set. This DCQ is associated with a 274 // special DirtyCardQueueSet (see g1CollectedHeap.hpp). Under normal 275 // circumstances (i.e. the pause successfully completes), these cards 276 // are just discarded (there's no need to update the RSets of regions 277 // that were in the collection set - after the pause these regions 278 // are wholly 'free' of live objects. In the event of an evacuation 279 // failure the cards/buffers in this queue set are passed to the 280 // DirtyCardQueueSet that is used to manage RSet updates 281 DirtyCardQueue into_cset_dcq(&_into_cset_dirty_card_queue_set); 282 283 updateRS(&into_cset_dcq, worker_i); 284 size_t cards_scanned = scanRS(oc, heap_region_codeblobs, worker_i); 285 286 // We now clear the cached values of _cset_rs_update_cl for this worker 287 _cset_rs_update_cl[worker_i] = NULL; 288 return cards_scanned; 289 } 290 291 void G1RemSet::prepare_for_oops_into_collection_set_do() { 292 _g1->set_refine_cte_cl_concurrency(false); 293 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 294 dcqs.concatenate_logs(); 295 } 296 297 void G1RemSet::cleanup_after_oops_into_collection_set_do() { 298 // Cleanup after copy 299 _g1->set_refine_cte_cl_concurrency(true); 300 // Set all cards back to clean. 301 _g1->cleanUpCardTable(); 302 303 DirtyCardQueueSet& into_cset_dcqs = _into_cset_dirty_card_queue_set; 304 305 if (_g1->evacuation_failed()) { 306 double restore_remembered_set_start = os::elapsedTime(); 307 308 // Restore remembered sets for the regions pointing into the collection set. 309 // We just need to transfer the completed buffers from the DirtyCardQueueSet 310 // used to hold cards that contain references that point into the collection set 311 // to the DCQS used to hold the deferred RS updates. 312 _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs); 313 _g1->g1_policy()->phase_times()->record_evac_fail_restore_remsets((os::elapsedTime() - restore_remembered_set_start) * 1000.0); 314 } 315 316 // Free any completed buffers in the DirtyCardQueueSet used to hold cards 317 // which contain references that point into the collection. 318 _into_cset_dirty_card_queue_set.clear(); 319 assert(_into_cset_dirty_card_queue_set.completed_buffers_num() == 0, 320 "all buffers should be freed"); 321 _into_cset_dirty_card_queue_set.clear_n_completed_buffers(); 322 } 323 324 class G1ScrubRSClosure: public HeapRegionClosure { 325 G1CollectedHeap* _g1h; 326 G1CardLiveData* _live_data; 327 public: 328 G1ScrubRSClosure(G1CardLiveData* live_data) : 329 _g1h(G1CollectedHeap::heap()), 330 _live_data(live_data) { } 331 332 bool doHeapRegion(HeapRegion* r) { 333 if (!r->is_continues_humongous()) { 334 r->rem_set()->scrub(_live_data); 335 } 336 return false; 337 } 338 }; 339 340 void G1RemSet::scrub(uint worker_num, HeapRegionClaimer *hrclaimer) { 341 G1ScrubRSClosure scrub_cl(&_card_live_data); 342 _g1->heap_region_par_iterate(&scrub_cl, worker_num, hrclaimer); 343 } 344 345 G1TriggerClosure::G1TriggerClosure() : 346 _triggered(false) { } 347 348 G1InvokeIfNotTriggeredClosure::G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t_cl, 349 OopClosure* oop_cl) : 350 _trigger_cl(t_cl), _oop_cl(oop_cl) { } 351 352 G1Mux2Closure::G1Mux2Closure(OopClosure *c1, OopClosure *c2) : 353 _c1(c1), _c2(c2) { } 354 355 G1UpdateRSOrPushRefOopClosure:: 356 G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h, 357 G1RemSet* rs, 358 G1ParPushHeapRSClosure* push_ref_cl, 359 bool record_refs_into_cset, 360 uint worker_i) : 361 _g1(g1h), _g1_rem_set(rs), _from(NULL), 362 _record_refs_into_cset(record_refs_into_cset), 363 _push_ref_cl(push_ref_cl), _worker_i(worker_i) { } 364 365 // Returns true if the given card contains references that point 366 // into the collection set, if we're checking for such references; 367 // false otherwise. 368 369 bool G1RemSet::refine_card(volatile jbyte* card_ptr, uint worker_i, 370 bool check_for_refs_into_cset) { 371 assert(_g1->is_in_exact(_ct_bs->addr_for(card_ptr)), 372 "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap", 373 p2i((jbyte*)card_ptr), 374 _ct_bs->index_for(_ct_bs->addr_for(card_ptr)), 375 p2i(_ct_bs->addr_for(card_ptr)), 376 _g1->addr_to_region(_ct_bs->addr_for(card_ptr))); 377 378 // If the card is no longer dirty, nothing to do. 379 if (*card_ptr != CardTableModRefBS::dirty_card_val()) { 380 // No need to return that this card contains refs that point 381 // into the collection set. 382 return false; 383 } 384 385 // Construct the region representing the card. 386 HeapWord* start = _ct_bs->addr_for(card_ptr); 387 // And find the region containing it. 388 HeapRegion* r = _g1->heap_region_containing(start); 389 390 // Why do we have to check here whether a card is on a young region, 391 // given that we dirty young regions and, as a result, the 392 // post-barrier is supposed to filter them out and never to enqueue 393 // them? When we allocate a new region as the "allocation region" we 394 // actually dirty its cards after we release the lock, since card 395 // dirtying while holding the lock was a performance bottleneck. So, 396 // as a result, it is possible for other threads to actually 397 // allocate objects in the region (after the acquire the lock) 398 // before all the cards on the region are dirtied. This is unlikely, 399 // and it doesn't happen often, but it can happen. So, the extra 400 // check below filters out those cards. 401 if (r->is_young()) { 402 return false; 403 } 404 405 // While we are processing RSet buffers during the collection, we 406 // actually don't want to scan any cards on the collection set, 407 // since we don't want to update remembered sets with entries that 408 // point into the collection set, given that live objects from the 409 // collection set are about to move and such entries will be stale 410 // very soon. This change also deals with a reliability issue which 411 // involves scanning a card in the collection set and coming across 412 // an array that was being chunked and looking malformed. Note, 413 // however, that if evacuation fails, we have to scan any objects 414 // that were not moved and create any missing entries. 415 if (r->in_collection_set()) { 416 return false; 417 } 418 419 // The result from the hot card cache insert call is either: 420 // * pointer to the current card 421 // (implying that the current card is not 'hot'), 422 // * null 423 // (meaning we had inserted the card ptr into the "hot" card cache, 424 // which had some headroom), 425 // * a pointer to a "hot" card that was evicted from the "hot" cache. 426 // 427 428 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); 429 if (hot_card_cache->use_cache()) { 430 assert(!check_for_refs_into_cset, "sanity"); 431 assert(!SafepointSynchronize::is_at_safepoint(), "sanity"); 432 433 card_ptr = hot_card_cache->insert(card_ptr); 434 if (card_ptr == NULL) { 435 // There was no eviction. Nothing to do. 436 return false; 437 } 438 439 start = _ct_bs->addr_for(card_ptr); 440 r = _g1->heap_region_containing(start); 441 442 // Checking whether the region we got back from the cache 443 // is young here is inappropriate. The region could have been 444 // freed, reallocated and tagged as young while in the cache. 445 // Hence we could see its young type change at any time. 446 } 447 448 // Don't use addr_for(card_ptr + 1) which can ask for 449 // a card beyond the heap. This is not safe without a perm 450 // gen at the upper end of the heap. 451 HeapWord* end = start + CardTableModRefBS::card_size_in_words; 452 MemRegion dirtyRegion(start, end); 453 454 G1ParPushHeapRSClosure* oops_in_heap_closure = NULL; 455 if (check_for_refs_into_cset) { 456 // ConcurrentG1RefineThreads have worker numbers larger than what 457 // _cset_rs_update_cl[] is set up to handle. But those threads should 458 // only be active outside of a collection which means that when they 459 // reach here they should have check_for_refs_into_cset == false. 460 assert((size_t)worker_i < n_workers(), "index of worker larger than _cset_rs_update_cl[].length"); 461 oops_in_heap_closure = _cset_rs_update_cl[worker_i]; 462 } 463 G1UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1, 464 _g1->g1_rem_set(), 465 oops_in_heap_closure, 466 check_for_refs_into_cset, 467 worker_i); 468 update_rs_oop_cl.set_from(r); 469 470 G1TriggerClosure trigger_cl; 471 FilterIntoCSClosure into_cs_cl(_g1, &trigger_cl); 472 G1InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl); 473 G1Mux2Closure mux(&invoke_cl, &update_rs_oop_cl); 474 475 FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r, 476 (check_for_refs_into_cset ? 477 (OopClosure*)&mux : 478 (OopClosure*)&update_rs_oop_cl)); 479 480 // The region for the current card may be a young region. The 481 // current card may have been a card that was evicted from the 482 // card cache. When the card was inserted into the cache, we had 483 // determined that its region was non-young. While in the cache, 484 // the region may have been freed during a cleanup pause, reallocated 485 // and tagged as young. 486 // 487 // We wish to filter out cards for such a region but the current 488 // thread, if we're running concurrently, may "see" the young type 489 // change at any time (so an earlier "is_young" check may pass or 490 // fail arbitrarily). We tell the iteration code to perform this 491 // filtering when it has been determined that there has been an actual 492 // allocation in this region and making it safe to check the young type. 493 bool filter_young = true; 494 495 HeapWord* stop_point = 496 r->oops_on_card_seq_iterate_careful(dirtyRegion, 497 &filter_then_update_rs_oop_cl, 498 filter_young, 499 card_ptr); 500 501 // If stop_point is non-null, then we encountered an unallocated region 502 // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the 503 // card and re-enqueue: if we put off the card until a GC pause, then the 504 // unallocated portion will be filled in. Alternatively, we might try 505 // the full complexity of the technique used in "regular" precleaning. 506 if (stop_point != NULL) { 507 // The card might have gotten re-dirtied and re-enqueued while we 508 // worked. (In fact, it's pretty likely.) 509 if (*card_ptr != CardTableModRefBS::dirty_card_val()) { 510 *card_ptr = CardTableModRefBS::dirty_card_val(); 511 MutexLockerEx x(Shared_DirtyCardQ_lock, 512 Mutex::_no_safepoint_check_flag); 513 DirtyCardQueue* sdcq = 514 JavaThread::dirty_card_queue_set().shared_dirty_card_queue(); 515 sdcq->enqueue(card_ptr); 516 } 517 } else { 518 _conc_refine_cards++; 519 } 520 521 // This gets set to true if the card being refined has 522 // references that point into the collection set. 523 bool has_refs_into_cset = trigger_cl.triggered(); 524 525 // We should only be detecting that the card contains references 526 // that point into the collection set if the current thread is 527 // a GC worker thread. 528 assert(!has_refs_into_cset || SafepointSynchronize::is_at_safepoint(), 529 "invalid result at non safepoint"); 530 531 return has_refs_into_cset; 532 } 533 534 void G1RemSet::print_periodic_summary_info(const char* header, uint period_count) { 535 if ((G1SummarizeRSetStatsPeriod > 0) && log_is_enabled(Trace, gc, remset) && 536 (period_count % G1SummarizeRSetStatsPeriod == 0)) { 537 538 if (!_prev_period_summary.initialized()) { 539 _prev_period_summary.initialize(this); 540 } 541 542 G1RemSetSummary current; 543 current.initialize(this); 544 _prev_period_summary.subtract_from(¤t); 545 546 Log(gc, remset) log; 547 log.trace("%s", header); 548 ResourceMark rm; 549 _prev_period_summary.print_on(log.trace_stream()); 550 551 _prev_period_summary.set(¤t); 552 } 553 } 554 555 void G1RemSet::print_summary_info() { 556 Log(gc, remset, exit) log; 557 if (log.is_trace()) { 558 log.trace(" Cumulative RS summary"); 559 G1RemSetSummary current; 560 current.initialize(this); 561 ResourceMark rm; 562 current.print_on(log.trace_stream()); 563 } 564 } 565 566 void G1RemSet::prepare_for_verify() { 567 if (G1HRRSFlushLogBuffersOnVerify && 568 (VerifyBeforeGC || VerifyAfterGC) 569 && (!_g1->collector_state()->full_collection() || G1VerifyRSetsDuringFullGC)) { 570 cleanupHRRS(); 571 _g1->set_refine_cte_cl_concurrency(false); 572 if (SafepointSynchronize::is_at_safepoint()) { 573 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 574 dcqs.concatenate_logs(); 575 } 576 577 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); 578 bool use_hot_card_cache = hot_card_cache->use_cache(); 579 hot_card_cache->set_use_cache(false); 580 581 DirtyCardQueue into_cset_dcq(&_into_cset_dirty_card_queue_set); 582 updateRS(&into_cset_dcq, 0); 583 _into_cset_dirty_card_queue_set.clear(); 584 585 hot_card_cache->set_use_cache(use_hot_card_cache); 586 assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); 587 } 588 } 589 590 void G1RemSet::create_card_live_data(WorkGang* workers, G1CMBitMap* mark_bitmap) { 591 _card_live_data.create(workers, mark_bitmap); 592 } 593 594 void G1RemSet::finalize_card_live_data(WorkGang* workers, G1CMBitMap* mark_bitmap) { 595 _card_live_data.finalize(workers, mark_bitmap); 596 } 597 598 void G1RemSet::verify_card_live_data(WorkGang* workers, G1CMBitMap* bitmap) { 599 _card_live_data.verify(workers, bitmap); 600 } 601 602 void G1RemSet::clear_card_live_data(WorkGang* workers) { 603 _card_live_data.clear(workers); 604 } 605 606 #ifdef ASSERT 607 void G1RemSet::verify_card_live_data_is_clear() { 608 _card_live_data.verify_is_clear(); 609 } 610 #endif