1 /* 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/g1/bufferingOopClosure.hpp" 27 #include "gc_implementation/g1/concurrentG1Refine.hpp" 28 #include "gc_implementation/g1/concurrentG1RefineThread.hpp" 29 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" 30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 31 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 32 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 33 #include "gc_implementation/g1/g1RemSet.inline.hpp" 34 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 35 #include "memory/iterator.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "utilities/intHisto.hpp" 38 39 #define CARD_REPEAT_HISTO 0 40 41 #if CARD_REPEAT_HISTO 42 static size_t ct_freq_sz; 43 static jbyte* ct_freq = NULL; 44 45 void init_ct_freq_table(size_t heap_sz_bytes) { 46 if (ct_freq == NULL) { 47 ct_freq_sz = heap_sz_bytes/CardTableModRefBS::card_size; 48 ct_freq = new jbyte[ct_freq_sz]; 49 for (size_t j = 0; j < ct_freq_sz; j++) ct_freq[j] = 0; 50 } 51 } 52 53 void ct_freq_note_card(size_t index) { 54 assert(0 <= index && index < ct_freq_sz, "Bounds error."); 55 if (ct_freq[index] < 100) { ct_freq[index]++; } 56 } 57 58 static IntHistogram card_repeat_count(10, 10); 59 60 void ct_freq_update_histo_and_reset() { 61 for (size_t j = 0; j < ct_freq_sz; j++) { 62 card_repeat_count.add_entry(ct_freq[j]); 63 ct_freq[j] = 0; 64 } 65 66 } 67 #endif 68 69 70 class IntoCSOopClosure: public OopsInHeapRegionClosure { 71 OopsInHeapRegionClosure* _blk; 72 G1CollectedHeap* _g1; 73 public: 74 IntoCSOopClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) : 75 _g1(g1), _blk(blk) {} 76 void set_region(HeapRegion* from) { 77 _blk->set_region(from); 78 } 79 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 80 virtual void do_oop( oop* p) { do_oop_work(p); } 81 template <class T> void do_oop_work(T* p) { 82 oop obj = oopDesc::load_decode_heap_oop(p); 83 if (_g1->obj_in_cs(obj)) _blk->do_oop(p); 84 } 85 bool apply_to_weak_ref_discovered_field() { return true; } 86 bool idempotent() { return true; } 87 }; 88 89 class IntoCSRegionClosure: public HeapRegionClosure { 90 IntoCSOopClosure _blk; 91 G1CollectedHeap* _g1; 92 public: 93 IntoCSRegionClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) : 94 _g1(g1), _blk(g1, blk) {} 95 bool doHeapRegion(HeapRegion* r) { 96 if (!r->in_collection_set()) { 97 _blk.set_region(r); 98 if (r->isHumongous()) { 99 if (r->startsHumongous()) { 100 oop obj = oop(r->bottom()); 101 obj->oop_iterate(&_blk); 102 } 103 } else { 104 r->oop_before_save_marks_iterate(&_blk); 105 } 106 } 107 return false; 108 } 109 }; 110 111 class VerifyRSCleanCardOopClosure: public OopClosure { 112 G1CollectedHeap* _g1; 113 public: 114 VerifyRSCleanCardOopClosure(G1CollectedHeap* g1) : _g1(g1) {} 115 116 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 117 virtual void do_oop( oop* p) { do_oop_work(p); } 118 template <class T> void do_oop_work(T* p) { 119 oop obj = oopDesc::load_decode_heap_oop(p); 120 HeapRegion* to = _g1->heap_region_containing(obj); 121 guarantee(to == NULL || !to->in_collection_set(), 122 "Missed a rem set member."); 123 } 124 }; 125 126 G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs) 127 : _g1(g1), _conc_refine_cards(0), 128 _ct_bs(ct_bs), _g1p(_g1->g1_policy()), 129 _cg1r(g1->concurrent_g1_refine()), 130 _traversal_in_progress(false), 131 _cset_rs_update_cl(NULL), 132 _cards_scanned(NULL), _total_cards_scanned(0) 133 { 134 _seq_task = new SubTasksDone(NumSeqTasks); 135 guarantee(n_workers() > 0, "There should be some workers"); 136 _cset_rs_update_cl = NEW_C_HEAP_ARRAY(OopsInHeapRegionClosure*, n_workers()); 137 for (uint i = 0; i < n_workers(); i++) { 138 _cset_rs_update_cl[i] = NULL; 139 } 140 } 141 142 G1RemSet::~G1RemSet() { 143 delete _seq_task; 144 for (uint i = 0; i < n_workers(); i++) { 145 assert(_cset_rs_update_cl[i] == NULL, "it should be"); 146 } 147 FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl); 148 } 149 150 void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) { 151 if (_g1->is_in_g1_reserved(mr.start())) { 152 _n += (int) ((mr.byte_size() / CardTableModRefBS::card_size)); 153 if (_start_first == NULL) _start_first = mr.start(); 154 } 155 } 156 157 class ScanRSClosure : public HeapRegionClosure { 158 size_t _cards_done, _cards; 159 G1CollectedHeap* _g1h; 160 OopsInHeapRegionClosure* _oc; 161 G1BlockOffsetSharedArray* _bot_shared; 162 CardTableModRefBS *_ct_bs; 163 int _worker_i; 164 int _block_size; 165 bool _try_claimed; 166 public: 167 ScanRSClosure(OopsInHeapRegionClosure* oc, int worker_i) : 168 _oc(oc), 169 _cards(0), 170 _cards_done(0), 171 _worker_i(worker_i), 172 _try_claimed(false) 173 { 174 _g1h = G1CollectedHeap::heap(); 175 _bot_shared = _g1h->bot_shared(); 176 _ct_bs = (CardTableModRefBS*) (_g1h->barrier_set()); 177 _block_size = MAX2<int>(G1RSetScanBlockSize, 1); 178 } 179 180 void set_try_claimed() { _try_claimed = true; } 181 182 void scanCard(size_t index, HeapRegion *r) { 183 _cards_done++; 184 DirtyCardToOopClosure* cl = 185 r->new_dcto_closure(_oc, 186 CardTableModRefBS::Precise, 187 HeapRegionDCTOC::IntoCSFilterKind); 188 189 // Set the "from" region in the closure. 190 _oc->set_region(r); 191 HeapWord* card_start = _bot_shared->address_for_index(index); 192 HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words; 193 Space *sp = SharedHeap::heap()->space_containing(card_start); 194 MemRegion sm_region; 195 if (ParallelGCThreads > 0) { 196 // first find the used area 197 sm_region = sp->used_region_at_save_marks(); 198 } else { 199 // The closure is not idempotent. We shouldn't look at objects 200 // allocated during the GC. 201 sm_region = sp->used_region_at_save_marks(); 202 } 203 MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end)); 204 if (!mr.is_empty()) { 205 cl->do_MemRegion(mr); 206 } 207 } 208 209 void printCard(HeapRegion* card_region, size_t card_index, 210 HeapWord* card_start) { 211 gclog_or_tty->print_cr("T %d Region [" PTR_FORMAT ", " PTR_FORMAT ") " 212 "RS names card %p: " 213 "[" PTR_FORMAT ", " PTR_FORMAT ")", 214 _worker_i, 215 card_region->bottom(), card_region->end(), 216 card_index, 217 card_start, card_start + G1BlockOffsetSharedArray::N_words); 218 } 219 220 bool doHeapRegion(HeapRegion* r) { 221 assert(r->in_collection_set(), "should only be called on elements of CS."); 222 HeapRegionRemSet* hrrs = r->rem_set(); 223 if (hrrs->iter_is_complete()) return false; // All done. 224 if (!_try_claimed && !hrrs->claim_iter()) return false; 225 _g1h->push_dirty_cards_region(r); 226 // If we didn't return above, then 227 // _try_claimed || r->claim_iter() 228 // is true: either we're supposed to work on claimed-but-not-complete 229 // regions, or we successfully claimed the region. 230 HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i); 231 hrrs->init_iterator(iter); 232 size_t card_index; 233 234 // We claim cards in block so as to recude the contention. The block size is determined by 235 // the G1RSetScanBlockSize parameter. 236 size_t jump_to_card = hrrs->iter_claimed_next(_block_size); 237 for (size_t current_card = 0; iter->has_next(card_index); current_card++) { 238 if (current_card >= jump_to_card + _block_size) { 239 jump_to_card = hrrs->iter_claimed_next(_block_size); 240 } 241 if (current_card < jump_to_card) continue; 242 HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index); 243 #if 0 244 gclog_or_tty->print("Rem set iteration yielded card [" PTR_FORMAT ", " PTR_FORMAT ").\n", 245 card_start, card_start + CardTableModRefBS::card_size_in_words); 246 #endif 247 248 HeapRegion* card_region = _g1h->heap_region_containing(card_start); 249 assert(card_region != NULL, "Yielding cards not in the heap?"); 250 _cards++; 251 252 if (!card_region->is_on_dirty_cards_region_list()) { 253 _g1h->push_dirty_cards_region(card_region); 254 } 255 256 // If the card is dirty, then we will scan it during updateRS. 257 if (!card_region->in_collection_set() && !_ct_bs->is_card_dirty(card_index)) { 258 // We make the card as "claimed" lazily (so races are possible but they're benign), 259 // which reduces the number of duplicate scans (the rsets of the regions in the cset 260 // can intersect). 261 if (!_ct_bs->is_card_claimed(card_index)) { 262 _ct_bs->set_card_claimed(card_index); 263 scanCard(card_index, card_region); 264 } 265 } 266 } 267 if (!_try_claimed) { 268 hrrs->set_iter_complete(); 269 } 270 return false; 271 } 272 // Set all cards back to clean. 273 void cleanup() {_g1h->cleanUpCardTable();} 274 size_t cards_done() { return _cards_done;} 275 size_t cards_looked_up() { return _cards;} 276 }; 277 278 // We want the parallel threads to start their scanning at 279 // different collection set regions to avoid contention. 280 // If we have: 281 // n collection set regions 282 // p threads 283 // Then thread t will start at region t * floor (n/p) 284 285 HeapRegion* G1RemSet::calculateStartRegion(int worker_i) { 286 HeapRegion* result = _g1p->collection_set(); 287 if (ParallelGCThreads > 0) { 288 size_t cs_size = _g1p->collection_set_size(); 289 int n_workers = _g1->workers()->total_workers(); 290 size_t cs_spans = cs_size / n_workers; 291 size_t ind = cs_spans * worker_i; 292 for (size_t i = 0; i < ind; i++) 293 result = result->next_in_collection_set(); 294 } 295 return result; 296 } 297 298 void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) { 299 double rs_time_start = os::elapsedTime(); 300 HeapRegion *startRegion = calculateStartRegion(worker_i); 301 302 ScanRSClosure scanRScl(oc, worker_i); 303 _g1->collection_set_iterate_from(startRegion, &scanRScl); 304 scanRScl.set_try_claimed(); 305 _g1->collection_set_iterate_from(startRegion, &scanRScl); 306 307 double scan_rs_time_sec = os::elapsedTime() - rs_time_start; 308 309 assert( _cards_scanned != NULL, "invariant" ); 310 _cards_scanned[worker_i] = scanRScl.cards_done(); 311 312 _g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0); 313 } 314 315 // Closure used for updating RSets and recording references that 316 // point into the collection set. Only called during an 317 // evacuation pause. 318 319 class RefineRecordRefsIntoCSCardTableEntryClosure: public CardTableEntryClosure { 320 G1RemSet* _g1rs; 321 DirtyCardQueue* _into_cset_dcq; 322 public: 323 RefineRecordRefsIntoCSCardTableEntryClosure(G1CollectedHeap* g1h, 324 DirtyCardQueue* into_cset_dcq) : 325 _g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq) 326 {} 327 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 328 // The only time we care about recording cards that 329 // contain references that point into the collection set 330 // is during RSet updating within an evacuation pause. 331 // In this case worker_i should be the id of a GC worker thread. 332 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); 333 assert(worker_i < (int) DirtyCardQueueSet::num_par_ids(), "should be a GC worker"); 334 335 if (_g1rs->concurrentRefineOneCard(card_ptr, worker_i, true)) { 336 // 'card_ptr' contains references that point into the collection 337 // set. We need to record the card in the DCQS 338 // (G1CollectedHeap::into_cset_dirty_card_queue_set()) 339 // that's used for that purpose. 340 // 341 // Enqueue the card 342 _into_cset_dcq->enqueue(card_ptr); 343 } 344 return true; 345 } 346 }; 347 348 void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) { 349 double start = os::elapsedTime(); 350 // Apply the given closure to all remaining log entries. 351 RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq); 352 _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i); 353 354 // Now there should be no dirty cards. 355 if (G1RSLogCheckCardTable) { 356 CountNonCleanMemRegionClosure cl(_g1); 357 _ct_bs->mod_card_iterate(&cl); 358 // XXX This isn't true any more: keeping cards of young regions 359 // marked dirty broke it. Need some reasonable fix. 360 guarantee(cl.n() == 0, "Card table should be clean."); 361 } 362 363 _g1p->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0); 364 } 365 366 #ifndef PRODUCT 367 class PrintRSClosure : public HeapRegionClosure { 368 int _count; 369 public: 370 PrintRSClosure() : _count(0) {} 371 bool doHeapRegion(HeapRegion* r) { 372 HeapRegionRemSet* hrrs = r->rem_set(); 373 _count += (int) hrrs->occupied(); 374 if (hrrs->occupied() == 0) { 375 gclog_or_tty->print("Heap Region [" PTR_FORMAT ", " PTR_FORMAT ") " 376 "has no remset entries\n", 377 r->bottom(), r->end()); 378 } else { 379 gclog_or_tty->print("Printing rem set for heap region [" PTR_FORMAT ", " PTR_FORMAT ")\n", 380 r->bottom(), r->end()); 381 r->print(); 382 hrrs->print(); 383 gclog_or_tty->print("\nDone printing rem set\n"); 384 } 385 return false; 386 } 387 int occupied() {return _count;} 388 }; 389 #endif 390 391 class CountRSSizeClosure: public HeapRegionClosure { 392 size_t _n; 393 size_t _tot; 394 size_t _max; 395 HeapRegion* _max_r; 396 enum { 397 N = 20, 398 MIN = 6 399 }; 400 int _histo[N]; 401 public: 402 CountRSSizeClosure() : _n(0), _tot(0), _max(0), _max_r(NULL) { 403 for (int i = 0; i < N; i++) _histo[i] = 0; 404 } 405 bool doHeapRegion(HeapRegion* r) { 406 if (!r->continuesHumongous()) { 407 size_t occ = r->rem_set()->occupied(); 408 _n++; 409 _tot += occ; 410 if (occ > _max) { 411 _max = occ; 412 _max_r = r; 413 } 414 // Fit it into a histo bin. 415 int s = 1 << MIN; 416 int i = 0; 417 while (occ > (size_t) s && i < (N-1)) { 418 s = s << 1; 419 i++; 420 } 421 _histo[i]++; 422 } 423 return false; 424 } 425 size_t n() { return _n; } 426 size_t tot() { return _tot; } 427 size_t mx() { return _max; } 428 HeapRegion* mxr() { return _max_r; } 429 void print_histo() { 430 int mx = N; 431 while (mx >= 0) { 432 if (_histo[mx-1] > 0) break; 433 mx--; 434 } 435 gclog_or_tty->print_cr("Number of regions with given RS sizes:"); 436 gclog_or_tty->print_cr(" <= %8d %8d", 1 << MIN, _histo[0]); 437 for (int i = 1; i < mx-1; i++) { 438 gclog_or_tty->print_cr(" %8d - %8d %8d", 439 (1 << (MIN + i - 1)) + 1, 440 1 << (MIN + i), 441 _histo[i]); 442 } 443 gclog_or_tty->print_cr(" > %8d %8d", (1 << (MIN+mx-2))+1, _histo[mx-1]); 444 } 445 }; 446 447 void G1RemSet::cleanupHRRS() { 448 HeapRegionRemSet::cleanup(); 449 } 450 451 void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc, 452 int worker_i) { 453 #if CARD_REPEAT_HISTO 454 ct_freq_update_histo_and_reset(); 455 #endif 456 if (worker_i == 0) { 457 _cg1r->clear_and_record_card_counts(); 458 } 459 460 // Make this into a command-line flag... 461 if (G1RSCountHisto && (ParallelGCThreads == 0 || worker_i == 0)) { 462 CountRSSizeClosure count_cl; 463 _g1->heap_region_iterate(&count_cl); 464 gclog_or_tty->print_cr("Avg of %d RS counts is %f, max is %d, " 465 "max region is " PTR_FORMAT, 466 count_cl.n(), (float)count_cl.tot()/(float)count_cl.n(), 467 count_cl.mx(), count_cl.mxr()); 468 count_cl.print_histo(); 469 } 470 471 // We cache the value of 'oc' closure into the appropriate slot in the 472 // _cset_rs_update_cl for this worker 473 assert(worker_i < (int)n_workers(), "sanity"); 474 _cset_rs_update_cl[worker_i] = oc; 475 476 // A DirtyCardQueue that is used to hold cards containing references 477 // that point into the collection set. This DCQ is associated with a 478 // special DirtyCardQueueSet (see g1CollectedHeap.hpp). Under normal 479 // circumstances (i.e. the pause successfully completes), these cards 480 // are just discarded (there's no need to update the RSets of regions 481 // that were in the collection set - after the pause these regions 482 // are wholly 'free' of live objects. In the event of an evacuation 483 // failure the cards/buffers in this queue set are: 484 // * passed to the DirtyCardQueueSet that is used to manage deferred 485 // RSet updates, or 486 // * scanned for references that point into the collection set 487 // and the RSet of the corresponding region in the collection set 488 // is updated immediately. 489 DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set()); 490 491 assert((ParallelGCThreads > 0) || worker_i == 0, "invariant"); 492 493 // The two flags below were introduced temporarily to serialize 494 // the updating and scanning of remembered sets. There are some 495 // race conditions when these two operations are done in parallel 496 // and they are causing failures. When we resolve said race 497 // conditions, we'll revert back to parallel remembered set 498 // updating and scanning. See CRs 6677707 and 6677708. 499 if (G1UseParallelRSetUpdating || (worker_i == 0)) { 500 updateRS(&into_cset_dcq, worker_i); 501 } else { 502 _g1p->record_update_rs_processed_buffers(worker_i, 0.0); 503 _g1p->record_update_rs_time(worker_i, 0.0); 504 } 505 if (G1UseParallelRSetScanning || (worker_i == 0)) { 506 scanRS(oc, worker_i); 507 } else { 508 _g1p->record_scan_rs_time(worker_i, 0.0); 509 } 510 511 // We now clear the cached values of _cset_rs_update_cl for this worker 512 _cset_rs_update_cl[worker_i] = NULL; 513 } 514 515 void G1RemSet::prepare_for_oops_into_collection_set_do() { 516 #if G1_REM_SET_LOGGING 517 PrintRSClosure cl; 518 _g1->collection_set_iterate(&cl); 519 #endif 520 cleanupHRRS(); 521 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine(); 522 _g1->set_refine_cte_cl_concurrency(false); 523 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 524 dcqs.concatenate_logs(); 525 526 assert(!_traversal_in_progress, "Invariant between iterations."); 527 set_traversal(true); 528 if (ParallelGCThreads > 0) { 529 _seq_task->set_n_threads((int)n_workers()); 530 } 531 guarantee( _cards_scanned == NULL, "invariant" ); 532 _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers()); 533 for (uint i = 0; i < n_workers(); ++i) { 534 _cards_scanned[i] = 0; 535 } 536 _total_cards_scanned = 0; 537 } 538 539 540 class cleanUpIteratorsClosure : public HeapRegionClosure { 541 bool doHeapRegion(HeapRegion *r) { 542 HeapRegionRemSet* hrrs = r->rem_set(); 543 hrrs->init_for_par_iteration(); 544 return false; 545 } 546 }; 547 548 // This closure, applied to a DirtyCardQueueSet, is used to immediately 549 // update the RSets for the regions in the CSet. For each card it iterates 550 // through the oops which coincide with that card. It scans the reference 551 // fields in each oop; when it finds an oop that points into the collection 552 // set, the RSet for the region containing the referenced object is updated. 553 // Note: _par_traversal_in_progress in the G1RemSet must be FALSE; otherwise 554 // the UpdateRSetImmediate closure will cause cards to be enqueued on to 555 // the DCQS that we're iterating over, causing an infinite loop. 556 class UpdateRSetCardTableEntryIntoCSetClosure: public CardTableEntryClosure { 557 G1CollectedHeap* _g1; 558 CardTableModRefBS* _ct_bs; 559 public: 560 UpdateRSetCardTableEntryIntoCSetClosure(G1CollectedHeap* g1, 561 CardTableModRefBS* bs): 562 _g1(g1), _ct_bs(bs) 563 { } 564 565 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 566 // Construct the region representing the card. 567 HeapWord* start = _ct_bs->addr_for(card_ptr); 568 // And find the region containing it. 569 HeapRegion* r = _g1->heap_region_containing(start); 570 assert(r != NULL, "unexpected null"); 571 572 // Scan oops in the card looking for references into the collection set 573 HeapWord* end = _ct_bs->addr_for(card_ptr + 1); 574 MemRegion scanRegion(start, end); 575 576 UpdateRSetImmediate update_rs_cl(_g1->g1_rem_set()); 577 FilterIntoCSClosure update_rs_cset_oop_cl(NULL, _g1, &update_rs_cl); 578 FilterOutOfRegionClosure filter_then_update_rs_cset_oop_cl(r, &update_rs_cset_oop_cl); 579 580 // We can pass false as the "filter_young" parameter here as: 581 // * we should be in a STW pause, 582 // * the DCQS to which this closure is applied is used to hold 583 // references that point into the collection set from the prior 584 // RSet updating, 585 // * the post-write barrier shouldn't be logging updates to young 586 // regions (but there is a situation where this can happen - see 587 // the comment in G1RemSet::concurrentRefineOneCard below - 588 // that should not be applicable here), and 589 // * during actual RSet updating, the filtering of cards in young 590 // regions in HeapRegion::oops_on_card_seq_iterate_careful is 591 // employed. 592 // As a result, when this closure is applied to "refs into cset" 593 // DCQS, we shouldn't see any cards in young regions. 594 update_rs_cl.set_region(r); 595 HeapWord* stop_point = 596 r->oops_on_card_seq_iterate_careful(scanRegion, 597 &filter_then_update_rs_cset_oop_cl, 598 false /* filter_young */); 599 600 // Since this is performed in the event of an evacuation failure, we 601 // we shouldn't see a non-null stop point 602 assert(stop_point == NULL, "saw an unallocated region"); 603 return true; 604 } 605 }; 606 607 void G1RemSet::cleanup_after_oops_into_collection_set_do() { 608 guarantee( _cards_scanned != NULL, "invariant" ); 609 _total_cards_scanned = 0; 610 for (uint i = 0; i < n_workers(); ++i) 611 _total_cards_scanned += _cards_scanned[i]; 612 FREE_C_HEAP_ARRAY(size_t, _cards_scanned); 613 _cards_scanned = NULL; 614 // Cleanup after copy 615 #if G1_REM_SET_LOGGING 616 PrintRSClosure cl; 617 _g1->heap_region_iterate(&cl); 618 #endif 619 _g1->set_refine_cte_cl_concurrency(true); 620 cleanUpIteratorsClosure iterClosure; 621 _g1->collection_set_iterate(&iterClosure); 622 // Set all cards back to clean. 623 _g1->cleanUpCardTable(); 624 625 set_traversal(false); 626 627 DirtyCardQueueSet& into_cset_dcqs = _g1->into_cset_dirty_card_queue_set(); 628 int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num(); 629 630 if (_g1->evacuation_failed()) { 631 // Restore remembered sets for the regions pointing into the collection set. 632 633 if (G1DeferredRSUpdate) { 634 // If deferred RS updates are enabled then we just need to transfer 635 // the completed buffers from (a) the DirtyCardQueueSet used to hold 636 // cards that contain references that point into the collection set 637 // to (b) the DCQS used to hold the deferred RS updates 638 _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs); 639 } else { 640 641 CardTableModRefBS* bs = (CardTableModRefBS*)_g1->barrier_set(); 642 UpdateRSetCardTableEntryIntoCSetClosure update_rs_cset_immediate(_g1, bs); 643 644 int n_completed_buffers = 0; 645 while (into_cset_dcqs.apply_closure_to_completed_buffer(&update_rs_cset_immediate, 646 0, 0, true)) { 647 n_completed_buffers++; 648 } 649 assert(n_completed_buffers == into_cset_n_buffers, "missed some buffers"); 650 } 651 } 652 653 // Free any completed buffers in the DirtyCardQueueSet used to hold cards 654 // which contain references that point into the collection. 655 _g1->into_cset_dirty_card_queue_set().clear(); 656 assert(_g1->into_cset_dirty_card_queue_set().completed_buffers_num() == 0, 657 "all buffers should be freed"); 658 _g1->into_cset_dirty_card_queue_set().clear_n_completed_buffers(); 659 660 assert(!_traversal_in_progress, "Invariant between iterations."); 661 } 662 663 class UpdateRSObjectClosure: public ObjectClosure { 664 UpdateRSOopClosure* _update_rs_oop_cl; 665 public: 666 UpdateRSObjectClosure(UpdateRSOopClosure* update_rs_oop_cl) : 667 _update_rs_oop_cl(update_rs_oop_cl) {} 668 void do_object(oop obj) { 669 obj->oop_iterate(_update_rs_oop_cl); 670 } 671 672 }; 673 674 class ScrubRSClosure: public HeapRegionClosure { 675 G1CollectedHeap* _g1h; 676 BitMap* _region_bm; 677 BitMap* _card_bm; 678 CardTableModRefBS* _ctbs; 679 public: 680 ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) : 681 _g1h(G1CollectedHeap::heap()), 682 _region_bm(region_bm), _card_bm(card_bm), 683 _ctbs(NULL) 684 { 685 ModRefBarrierSet* bs = _g1h->mr_bs(); 686 guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition"); 687 _ctbs = (CardTableModRefBS*)bs; 688 } 689 690 bool doHeapRegion(HeapRegion* r) { 691 if (!r->continuesHumongous()) { 692 r->rem_set()->scrub(_ctbs, _region_bm, _card_bm); 693 } 694 return false; 695 } 696 }; 697 698 void G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) { 699 ScrubRSClosure scrub_cl(region_bm, card_bm); 700 _g1->heap_region_iterate(&scrub_cl); 701 } 702 703 void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm, 704 int worker_num, int claim_val) { 705 ScrubRSClosure scrub_cl(region_bm, card_bm); 706 _g1->heap_region_par_iterate_chunked(&scrub_cl, worker_num, claim_val); 707 } 708 709 710 static IntHistogram out_of_histo(50, 50); 711 712 class TriggerClosure : public OopClosure { 713 bool _trigger; 714 public: 715 TriggerClosure() : _trigger(false) { } 716 bool value() const { return _trigger; } 717 template <class T> void do_oop_nv(T* p) { _trigger = true; } 718 virtual void do_oop(oop* p) { do_oop_nv(p); } 719 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 720 }; 721 722 class InvokeIfNotTriggeredClosure: public OopClosure { 723 TriggerClosure* _t; 724 OopClosure* _oc; 725 public: 726 InvokeIfNotTriggeredClosure(TriggerClosure* t, OopClosure* oc): 727 _t(t), _oc(oc) { } 728 template <class T> void do_oop_nv(T* p) { 729 if (!_t->value()) _oc->do_oop(p); 730 } 731 virtual void do_oop(oop* p) { do_oop_nv(p); } 732 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 733 }; 734 735 class Mux2Closure : public OopClosure { 736 OopClosure* _c1; 737 OopClosure* _c2; 738 public: 739 Mux2Closure(OopClosure *c1, OopClosure *c2) : _c1(c1), _c2(c2) { } 740 template <class T> void do_oop_nv(T* p) { 741 _c1->do_oop(p); _c2->do_oop(p); 742 } 743 virtual void do_oop(oop* p) { do_oop_nv(p); } 744 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 745 }; 746 747 bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i, 748 bool check_for_refs_into_cset) { 749 // Construct the region representing the card. 750 HeapWord* start = _ct_bs->addr_for(card_ptr); 751 // And find the region containing it. 752 HeapRegion* r = _g1->heap_region_containing(start); 753 assert(r != NULL, "unexpected null"); 754 755 HeapWord* end = _ct_bs->addr_for(card_ptr + 1); 756 MemRegion dirtyRegion(start, end); 757 758 #if CARD_REPEAT_HISTO 759 init_ct_freq_table(_g1->g1_reserved_obj_bytes()); 760 ct_freq_note_card(_ct_bs->index_for(start)); 761 #endif 762 763 UpdateRSOopClosure update_rs_oop_cl(this, worker_i); 764 update_rs_oop_cl.set_from(r); 765 766 TriggerClosure trigger_cl; 767 FilterIntoCSClosure into_cs_cl(NULL, _g1, &trigger_cl); 768 InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl); 769 Mux2Closure mux(&invoke_cl, &update_rs_oop_cl); 770 771 FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r, 772 (check_for_refs_into_cset ? 773 (OopClosure*)&mux : 774 (OopClosure*)&update_rs_oop_cl)); 775 776 // Undirty the card. 777 *card_ptr = CardTableModRefBS::clean_card_val(); 778 // We must complete this write before we do any of the reads below. 779 OrderAccess::storeload(); 780 // And process it, being careful of unallocated portions of TLAB's. 781 782 // The region for the current card may be a young region. The 783 // current card may have been a card that was evicted from the 784 // card cache. When the card was inserted into the cache, we had 785 // determined that its region was non-young. While in the cache, 786 // the region may have been freed during a cleanup pause, reallocated 787 // and tagged as young. 788 // 789 // We wish to filter out cards for such a region but the current 790 // thread, if we're running conucrrently, may "see" the young type 791 // change at any time (so an earlier "is_young" check may pass or 792 // fail arbitrarily). We tell the iteration code to perform this 793 // filtering when it has been determined that there has been an actual 794 // allocation in this region and making it safe to check the young type. 795 bool filter_young = true; 796 797 HeapWord* stop_point = 798 r->oops_on_card_seq_iterate_careful(dirtyRegion, 799 &filter_then_update_rs_oop_cl, 800 filter_young); 801 802 // If stop_point is non-null, then we encountered an unallocated region 803 // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the 804 // card and re-enqueue: if we put off the card until a GC pause, then the 805 // unallocated portion will be filled in. Alternatively, we might try 806 // the full complexity of the technique used in "regular" precleaning. 807 if (stop_point != NULL) { 808 // The card might have gotten re-dirtied and re-enqueued while we 809 // worked. (In fact, it's pretty likely.) 810 if (*card_ptr != CardTableModRefBS::dirty_card_val()) { 811 *card_ptr = CardTableModRefBS::dirty_card_val(); 812 MutexLockerEx x(Shared_DirtyCardQ_lock, 813 Mutex::_no_safepoint_check_flag); 814 DirtyCardQueue* sdcq = 815 JavaThread::dirty_card_queue_set().shared_dirty_card_queue(); 816 sdcq->enqueue(card_ptr); 817 } 818 } else { 819 out_of_histo.add_entry(filter_then_update_rs_oop_cl.out_of_region()); 820 _conc_refine_cards++; 821 } 822 823 return trigger_cl.value(); 824 } 825 826 bool G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i, 827 bool check_for_refs_into_cset) { 828 // If the card is no longer dirty, nothing to do. 829 if (*card_ptr != CardTableModRefBS::dirty_card_val()) { 830 // No need to return that this card contains refs that point 831 // into the collection set. 832 return false; 833 } 834 835 // Construct the region representing the card. 836 HeapWord* start = _ct_bs->addr_for(card_ptr); 837 // And find the region containing it. 838 HeapRegion* r = _g1->heap_region_containing(start); 839 if (r == NULL) { 840 guarantee(_g1->is_in_permanent(start), "Or else where?"); 841 // Again no need to return that this card contains refs that 842 // point into the collection set. 843 return false; // Not in the G1 heap (might be in perm, for example.) 844 } 845 // Why do we have to check here whether a card is on a young region, 846 // given that we dirty young regions and, as a result, the 847 // post-barrier is supposed to filter them out and never to enqueue 848 // them? When we allocate a new region as the "allocation region" we 849 // actually dirty its cards after we release the lock, since card 850 // dirtying while holding the lock was a performance bottleneck. So, 851 // as a result, it is possible for other threads to actually 852 // allocate objects in the region (after the acquire the lock) 853 // before all the cards on the region are dirtied. This is unlikely, 854 // and it doesn't happen often, but it can happen. So, the extra 855 // check below filters out those cards. 856 if (r->is_young()) { 857 return false; 858 } 859 // While we are processing RSet buffers during the collection, we 860 // actually don't want to scan any cards on the collection set, 861 // since we don't want to update remebered sets with entries that 862 // point into the collection set, given that live objects from the 863 // collection set are about to move and such entries will be stale 864 // very soon. This change also deals with a reliability issue which 865 // involves scanning a card in the collection set and coming across 866 // an array that was being chunked and looking malformed. Note, 867 // however, that if evacuation fails, we have to scan any objects 868 // that were not moved and create any missing entries. 869 if (r->in_collection_set()) { 870 return false; 871 } 872 873 // Should we defer processing the card? 874 // 875 // Previously the result from the insert_cache call would be 876 // either card_ptr (implying that card_ptr was currently "cold"), 877 // null (meaning we had inserted the card ptr into the "hot" 878 // cache, which had some headroom), or a "hot" card ptr 879 // extracted from the "hot" cache. 880 // 881 // Now that the _card_counts cache in the ConcurrentG1Refine 882 // instance is an evicting hash table, the result we get back 883 // could be from evicting the card ptr in an already occupied 884 // bucket (in which case we have replaced the card ptr in the 885 // bucket with card_ptr and "defer" is set to false). To avoid 886 // having a data structure (updates to which would need a lock) 887 // to hold these unprocessed dirty cards, we need to immediately 888 // process card_ptr. The actions needed to be taken on return 889 // from cache_insert are summarized in the following table: 890 // 891 // res defer action 892 // -------------------------------------------------------------- 893 // null false card evicted from _card_counts & replaced with 894 // card_ptr; evicted ptr added to hot cache. 895 // No need to process res; immediately process card_ptr 896 // 897 // null true card not evicted from _card_counts; card_ptr added 898 // to hot cache. 899 // Nothing to do. 900 // 901 // non-null false card evicted from _card_counts & replaced with 902 // card_ptr; evicted ptr is currently "cold" or 903 // caused an eviction from the hot cache. 904 // Immediately process res; process card_ptr. 905 // 906 // non-null true card not evicted from _card_counts; card_ptr is 907 // currently cold, or caused an eviction from hot 908 // cache. 909 // Immediately process res; no need to process card_ptr. 910 911 912 jbyte* res = card_ptr; 913 bool defer = false; 914 915 // This gets set to true if the card being refined has references 916 // that point into the collection set. 917 bool oops_into_cset = false; 918 919 if (_cg1r->use_cache()) { 920 jbyte* res = _cg1r->cache_insert(card_ptr, &defer); 921 if (res != NULL && (res != card_ptr || defer)) { 922 start = _ct_bs->addr_for(res); 923 r = _g1->heap_region_containing(start); 924 if (r == NULL) { 925 assert(_g1->is_in_permanent(start), "Or else where?"); 926 } else { 927 // Checking whether the region we got back from the cache 928 // is young here is inappropriate. The region could have been 929 // freed, reallocated and tagged as young while in the cache. 930 // Hence we could see its young type change at any time. 931 // 932 // Process card pointer we get back from the hot card cache. This 933 // will check whether the region containing the card is young 934 // _after_ checking that the region has been allocated from. 935 oops_into_cset = concurrentRefineOneCard_impl(res, worker_i, 936 false /* check_for_refs_into_cset */); 937 // The above call to concurrentRefineOneCard_impl is only 938 // performed if the hot card cache is enabled. This cache is 939 // disabled during an evacuation pause - which is the only 940 // time when we need know if the card contains references 941 // that point into the collection set. Also when the hot card 942 // cache is enabled, this code is executed by the concurrent 943 // refine threads - rather than the GC worker threads - and 944 // concurrentRefineOneCard_impl will return false. 945 assert(!oops_into_cset, "should not see true here"); 946 } 947 } 948 } 949 950 if (!defer) { 951 oops_into_cset = 952 concurrentRefineOneCard_impl(card_ptr, worker_i, check_for_refs_into_cset); 953 // We should only be detecting that the card contains references 954 // that point into the collection set if the current thread is 955 // a GC worker thread. 956 assert(!oops_into_cset || SafepointSynchronize::is_at_safepoint(), 957 "invalid result at non safepoint"); 958 } 959 return oops_into_cset; 960 } 961 962 class HRRSStatsIter: public HeapRegionClosure { 963 size_t _occupied; 964 size_t _total_mem_sz; 965 size_t _max_mem_sz; 966 HeapRegion* _max_mem_sz_region; 967 public: 968 HRRSStatsIter() : 969 _occupied(0), 970 _total_mem_sz(0), 971 _max_mem_sz(0), 972 _max_mem_sz_region(NULL) 973 {} 974 975 bool doHeapRegion(HeapRegion* r) { 976 if (r->continuesHumongous()) return false; 977 size_t mem_sz = r->rem_set()->mem_size(); 978 if (mem_sz > _max_mem_sz) { 979 _max_mem_sz = mem_sz; 980 _max_mem_sz_region = r; 981 } 982 _total_mem_sz += mem_sz; 983 size_t occ = r->rem_set()->occupied(); 984 _occupied += occ; 985 return false; 986 } 987 size_t total_mem_sz() { return _total_mem_sz; } 988 size_t max_mem_sz() { return _max_mem_sz; } 989 size_t occupied() { return _occupied; } 990 HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; } 991 }; 992 993 class PrintRSThreadVTimeClosure : public ThreadClosure { 994 public: 995 virtual void do_thread(Thread *t) { 996 ConcurrentG1RefineThread* crt = (ConcurrentG1RefineThread*) t; 997 gclog_or_tty->print(" %5.2f", crt->vtime_accum()); 998 } 999 }; 1000 1001 void G1RemSet::print_summary_info() { 1002 G1CollectedHeap* g1 = G1CollectedHeap::heap(); 1003 1004 #if CARD_REPEAT_HISTO 1005 gclog_or_tty->print_cr("\nG1 card_repeat count histogram: "); 1006 gclog_or_tty->print_cr(" # of repeats --> # of cards with that number."); 1007 card_repeat_count.print_on(gclog_or_tty); 1008 #endif 1009 1010 if (FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT) { 1011 gclog_or_tty->print_cr("\nG1 rem-set out-of-region histogram: "); 1012 gclog_or_tty->print_cr(" # of CS ptrs --> # of cards with that number."); 1013 out_of_histo.print_on(gclog_or_tty); 1014 } 1015 gclog_or_tty->print_cr("\n Concurrent RS processed %d cards", 1016 _conc_refine_cards); 1017 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1018 jint tot_processed_buffers = 1019 dcqs.processed_buffers_mut() + dcqs.processed_buffers_rs_thread(); 1020 gclog_or_tty->print_cr(" Of %d completed buffers:", tot_processed_buffers); 1021 gclog_or_tty->print_cr(" %8d (%5.1f%%) by conc RS threads.", 1022 dcqs.processed_buffers_rs_thread(), 1023 100.0*(float)dcqs.processed_buffers_rs_thread()/ 1024 (float)tot_processed_buffers); 1025 gclog_or_tty->print_cr(" %8d (%5.1f%%) by mutator threads.", 1026 dcqs.processed_buffers_mut(), 1027 100.0*(float)dcqs.processed_buffers_mut()/ 1028 (float)tot_processed_buffers); 1029 gclog_or_tty->print_cr(" Conc RS threads times(s)"); 1030 PrintRSThreadVTimeClosure p; 1031 gclog_or_tty->print(" "); 1032 g1->concurrent_g1_refine()->threads_do(&p); 1033 gclog_or_tty->print_cr(""); 1034 1035 HRRSStatsIter blk; 1036 g1->heap_region_iterate(&blk); 1037 gclog_or_tty->print_cr(" Total heap region rem set sizes = " SIZE_FORMAT "K." 1038 " Max = " SIZE_FORMAT "K.", 1039 blk.total_mem_sz()/K, blk.max_mem_sz()/K); 1040 gclog_or_tty->print_cr(" Static structures = " SIZE_FORMAT "K," 1041 " free_lists = " SIZE_FORMAT "K.", 1042 HeapRegionRemSet::static_mem_size()/K, 1043 HeapRegionRemSet::fl_mem_size()/K); 1044 gclog_or_tty->print_cr(" %d occupied cards represented.", 1045 blk.occupied()); 1046 gclog_or_tty->print_cr(" Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )" 1047 ", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.", 1048 blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(), 1049 (blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K, 1050 (blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K); 1051 gclog_or_tty->print_cr(" Did %d coarsenings.", HeapRegionRemSet::n_coarsenings()); 1052 } 1053 1054 void G1RemSet::prepare_for_verify() { 1055 if (G1HRRSFlushLogBuffersOnVerify && 1056 (VerifyBeforeGC || VerifyAfterGC) 1057 && !_g1->full_collection()) { 1058 cleanupHRRS(); 1059 _g1->set_refine_cte_cl_concurrency(false); 1060 if (SafepointSynchronize::is_at_safepoint()) { 1061 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1062 dcqs.concatenate_logs(); 1063 } 1064 bool cg1r_use_cache = _cg1r->use_cache(); 1065 _cg1r->set_use_cache(false); 1066 DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set()); 1067 updateRS(&into_cset_dcq, 0); 1068 _g1->into_cset_dirty_card_queue_set().clear(); 1069 _cg1r->set_use_cache(cg1r_use_cache); 1070 1071 assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); 1072 } 1073 }