1 /* 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/g1/bufferingOopClosure.hpp" 27 #include "gc_implementation/g1/concurrentG1Refine.hpp" 28 #include "gc_implementation/g1/concurrentG1RefineThread.hpp" 29 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" 30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 31 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 32 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 33 #include "gc_implementation/g1/g1RemSet.inline.hpp" 34 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 35 #include "memory/iterator.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "utilities/intHisto.hpp" 38 39 #define CARD_REPEAT_HISTO 0 40 41 #if CARD_REPEAT_HISTO 42 static size_t ct_freq_sz; 43 static jbyte* ct_freq = NULL; 44 45 void init_ct_freq_table(size_t heap_sz_bytes) { 46 if (ct_freq == NULL) { 47 ct_freq_sz = heap_sz_bytes/CardTableModRefBS::card_size; 48 ct_freq = new jbyte[ct_freq_sz]; 49 for (size_t j = 0; j < ct_freq_sz; j++) ct_freq[j] = 0; 50 } 51 } 52 53 void ct_freq_note_card(size_t index) { 54 assert(0 <= index && index < ct_freq_sz, "Bounds error."); 55 if (ct_freq[index] < 100) { ct_freq[index]++; } 56 } 57 58 static IntHistogram card_repeat_count(10, 10); 59 60 void ct_freq_update_histo_and_reset() { 61 for (size_t j = 0; j < ct_freq_sz; j++) { 62 card_repeat_count.add_entry(ct_freq[j]); 63 ct_freq[j] = 0; 64 } 65 66 } 67 #endif 68 69 70 class IntoCSOopClosure: public OopsInHeapRegionClosure { 71 OopsInHeapRegionClosure* _blk; 72 G1CollectedHeap* _g1; 73 public: 74 IntoCSOopClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) : 75 _g1(g1), _blk(blk) {} 76 void set_region(HeapRegion* from) { 77 _blk->set_region(from); 78 } 79 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 80 virtual void do_oop( oop* p) { do_oop_work(p); } 81 template <class T> void do_oop_work(T* p) { 82 oop obj = oopDesc::load_decode_heap_oop(p); 83 if (_g1->obj_in_cs(obj)) _blk->do_oop(p); 84 } 85 bool apply_to_weak_ref_discovered_field() { return true; } 86 bool idempotent() { return true; } 87 }; 88 89 class IntoCSRegionClosure: public HeapRegionClosure { 90 IntoCSOopClosure _blk; 91 G1CollectedHeap* _g1; 92 public: 93 IntoCSRegionClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) : 94 _g1(g1), _blk(g1, blk) {} 95 bool doHeapRegion(HeapRegion* r) { 96 if (!r->in_collection_set()) { 97 _blk.set_region(r); 98 if (r->isHumongous()) { 99 if (r->startsHumongous()) { 100 oop obj = oop(r->bottom()); 101 obj->oop_iterate(&_blk); 102 } 103 } else { 104 r->oop_before_save_marks_iterate(&_blk); 105 } 106 } 107 return false; 108 } 109 }; 110 111 void 112 StupidG1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc, 113 int worker_i) { 114 IntoCSRegionClosure rc(_g1, oc); 115 _g1->heap_region_iterate(&rc); 116 } 117 118 class VerifyRSCleanCardOopClosure: public OopClosure { 119 G1CollectedHeap* _g1; 120 public: 121 VerifyRSCleanCardOopClosure(G1CollectedHeap* g1) : _g1(g1) {} 122 123 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 124 virtual void do_oop( oop* p) { do_oop_work(p); } 125 template <class T> void do_oop_work(T* p) { 126 oop obj = oopDesc::load_decode_heap_oop(p); 127 HeapRegion* to = _g1->heap_region_containing(obj); 128 guarantee(to == NULL || !to->in_collection_set(), 129 "Missed a rem set member."); 130 } 131 }; 132 133 HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs) 134 : G1RemSet(g1), _ct_bs(ct_bs), _g1p(_g1->g1_policy()), 135 _cg1r(g1->concurrent_g1_refine()), 136 _traversal_in_progress(false), 137 _cset_rs_update_cl(NULL), 138 _cards_scanned(NULL), _total_cards_scanned(0) 139 { 140 _seq_task = new SubTasksDone(NumSeqTasks); 141 guarantee(n_workers() > 0, "There should be some workers"); 142 _cset_rs_update_cl = NEW_C_HEAP_ARRAY(OopsInHeapRegionClosure*, n_workers()); 143 for (uint i = 0; i < n_workers(); i++) { 144 _cset_rs_update_cl[i] = NULL; 145 } 146 } 147 148 HRInto_G1RemSet::~HRInto_G1RemSet() { 149 delete _seq_task; 150 for (uint i = 0; i < n_workers(); i++) { 151 assert(_cset_rs_update_cl[i] == NULL, "it should be"); 152 } 153 FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl); 154 } 155 156 void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) { 157 if (_g1->is_in_g1_reserved(mr.start())) { 158 _n += (int) ((mr.byte_size() / CardTableModRefBS::card_size)); 159 if (_start_first == NULL) _start_first = mr.start(); 160 } 161 } 162 163 class ScanRSClosure : public HeapRegionClosure { 164 size_t _cards_done, _cards; 165 G1CollectedHeap* _g1h; 166 OopsInHeapRegionClosure* _oc; 167 G1BlockOffsetSharedArray* _bot_shared; 168 CardTableModRefBS *_ct_bs; 169 int _worker_i; 170 int _block_size; 171 bool _try_claimed; 172 public: 173 ScanRSClosure(OopsInHeapRegionClosure* oc, int worker_i) : 174 _oc(oc), 175 _cards(0), 176 _cards_done(0), 177 _worker_i(worker_i), 178 _try_claimed(false) 179 { 180 _g1h = G1CollectedHeap::heap(); 181 _bot_shared = _g1h->bot_shared(); 182 _ct_bs = (CardTableModRefBS*) (_g1h->barrier_set()); 183 _block_size = MAX2<int>(G1RSetScanBlockSize, 1); 184 } 185 186 void set_try_claimed() { _try_claimed = true; } 187 188 void scanCard(size_t index, HeapRegion *r) { 189 _cards_done++; 190 DirtyCardToOopClosure* cl = 191 r->new_dcto_closure(_oc, 192 CardTableModRefBS::Precise, 193 HeapRegionDCTOC::IntoCSFilterKind); 194 195 // Set the "from" region in the closure. 196 _oc->set_region(r); 197 HeapWord* card_start = _bot_shared->address_for_index(index); 198 HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words; 199 Space *sp = SharedHeap::heap()->space_containing(card_start); 200 MemRegion sm_region; 201 if (ParallelGCThreads > 0) { 202 // first find the used area 203 sm_region = sp->used_region_at_save_marks(); 204 } else { 205 // The closure is not idempotent. We shouldn't look at objects 206 // allocated during the GC. 207 sm_region = sp->used_region_at_save_marks(); 208 } 209 MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end)); 210 if (!mr.is_empty()) { 211 cl->do_MemRegion(mr); 212 } 213 } 214 215 void printCard(HeapRegion* card_region, size_t card_index, 216 HeapWord* card_start) { 217 gclog_or_tty->print_cr("T %d Region [" PTR_FORMAT ", " PTR_FORMAT ") " 218 "RS names card %p: " 219 "[" PTR_FORMAT ", " PTR_FORMAT ")", 220 _worker_i, 221 card_region->bottom(), card_region->end(), 222 card_index, 223 card_start, card_start + G1BlockOffsetSharedArray::N_words); 224 } 225 226 bool doHeapRegion(HeapRegion* r) { 227 assert(r->in_collection_set(), "should only be called on elements of CS."); 228 HeapRegionRemSet* hrrs = r->rem_set(); 229 if (hrrs->iter_is_complete()) return false; // All done. 230 if (!_try_claimed && !hrrs->claim_iter()) return false; 231 _g1h->push_dirty_cards_region(r); 232 // If we didn't return above, then 233 // _try_claimed || r->claim_iter() 234 // is true: either we're supposed to work on claimed-but-not-complete 235 // regions, or we successfully claimed the region. 236 HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i); 237 hrrs->init_iterator(iter); 238 size_t card_index; 239 240 // We claim cards in block so as to recude the contention. The block size is determined by 241 // the G1RSetScanBlockSize parameter. 242 size_t jump_to_card = hrrs->iter_claimed_next(_block_size); 243 for (size_t current_card = 0; iter->has_next(card_index); current_card++) { 244 if (current_card >= jump_to_card + _block_size) { 245 jump_to_card = hrrs->iter_claimed_next(_block_size); 246 } 247 if (current_card < jump_to_card) continue; 248 HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index); 249 #if 0 250 gclog_or_tty->print("Rem set iteration yielded card [" PTR_FORMAT ", " PTR_FORMAT ").\n", 251 card_start, card_start + CardTableModRefBS::card_size_in_words); 252 #endif 253 254 HeapRegion* card_region = _g1h->heap_region_containing(card_start); 255 assert(card_region != NULL, "Yielding cards not in the heap?"); 256 _cards++; 257 258 if (!card_region->is_on_dirty_cards_region_list()) { 259 _g1h->push_dirty_cards_region(card_region); 260 } 261 262 // If the card is dirty, then we will scan it during updateRS. 263 if (!card_region->in_collection_set() && !_ct_bs->is_card_dirty(card_index)) { 264 // We make the card as "claimed" lazily (so races are possible but they're benign), 265 // which reduces the number of duplicate scans (the rsets of the regions in the cset 266 // can intersect). 267 if (!_ct_bs->is_card_claimed(card_index)) { 268 _ct_bs->set_card_claimed(card_index); 269 scanCard(card_index, card_region); 270 } 271 } 272 } 273 if (!_try_claimed) { 274 hrrs->set_iter_complete(); 275 } 276 return false; 277 } 278 // Set all cards back to clean. 279 void cleanup() {_g1h->cleanUpCardTable();} 280 size_t cards_done() { return _cards_done;} 281 size_t cards_looked_up() { return _cards;} 282 }; 283 284 // We want the parallel threads to start their scanning at 285 // different collection set regions to avoid contention. 286 // If we have: 287 // n collection set regions 288 // p threads 289 // Then thread t will start at region t * floor (n/p) 290 291 HeapRegion* HRInto_G1RemSet::calculateStartRegion(int worker_i) { 292 HeapRegion* result = _g1p->collection_set(); 293 if (ParallelGCThreads > 0) { 294 size_t cs_size = _g1p->collection_set_size(); 295 int n_workers = _g1->workers()->total_workers(); 296 size_t cs_spans = cs_size / n_workers; 297 size_t ind = cs_spans * worker_i; 298 for (size_t i = 0; i < ind; i++) 299 result = result->next_in_collection_set(); 300 } 301 return result; 302 } 303 304 void HRInto_G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) { 305 double rs_time_start = os::elapsedTime(); 306 HeapRegion *startRegion = calculateStartRegion(worker_i); 307 308 ScanRSClosure scanRScl(oc, worker_i); 309 _g1->collection_set_iterate_from(startRegion, &scanRScl); 310 scanRScl.set_try_claimed(); 311 _g1->collection_set_iterate_from(startRegion, &scanRScl); 312 313 double scan_rs_time_sec = os::elapsedTime() - rs_time_start; 314 315 assert( _cards_scanned != NULL, "invariant" ); 316 _cards_scanned[worker_i] = scanRScl.cards_done(); 317 318 _g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0); 319 } 320 321 // Closure used for updating RSets and recording references that 322 // point into the collection set. Only called during an 323 // evacuation pause. 324 325 class RefineRecordRefsIntoCSCardTableEntryClosure: public CardTableEntryClosure { 326 G1RemSet* _g1rs; 327 DirtyCardQueue* _into_cset_dcq; 328 public: 329 RefineRecordRefsIntoCSCardTableEntryClosure(G1CollectedHeap* g1h, 330 DirtyCardQueue* into_cset_dcq) : 331 _g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq) 332 {} 333 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 334 // The only time we care about recording cards that 335 // contain references that point into the collection set 336 // is during RSet updating within an evacuation pause. 337 // In this case worker_i should be the id of a GC worker thread. 338 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); 339 assert(worker_i < (int) DirtyCardQueueSet::num_par_ids(), "should be a GC worker"); 340 341 if (_g1rs->concurrentRefineOneCard(card_ptr, worker_i, true)) { 342 // 'card_ptr' contains references that point into the collection 343 // set. We need to record the card in the DCQS 344 // (G1CollectedHeap::into_cset_dirty_card_queue_set()) 345 // that's used for that purpose. 346 // 347 // Enqueue the card 348 _into_cset_dcq->enqueue(card_ptr); 349 } 350 return true; 351 } 352 }; 353 354 void HRInto_G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) { 355 double start = os::elapsedTime(); 356 // Apply the given closure to all remaining log entries. 357 RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq); 358 _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i); 359 360 // Now there should be no dirty cards. 361 if (G1RSLogCheckCardTable) { 362 CountNonCleanMemRegionClosure cl(_g1); 363 _ct_bs->mod_card_iterate(&cl); 364 // XXX This isn't true any more: keeping cards of young regions 365 // marked dirty broke it. Need some reasonable fix. 366 guarantee(cl.n() == 0, "Card table should be clean."); 367 } 368 369 _g1p->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0); 370 } 371 372 #ifndef PRODUCT 373 class PrintRSClosure : public HeapRegionClosure { 374 int _count; 375 public: 376 PrintRSClosure() : _count(0) {} 377 bool doHeapRegion(HeapRegion* r) { 378 HeapRegionRemSet* hrrs = r->rem_set(); 379 _count += (int) hrrs->occupied(); 380 if (hrrs->occupied() == 0) { 381 gclog_or_tty->print("Heap Region [" PTR_FORMAT ", " PTR_FORMAT ") " 382 "has no remset entries\n", 383 r->bottom(), r->end()); 384 } else { 385 gclog_or_tty->print("Printing rem set for heap region [" PTR_FORMAT ", " PTR_FORMAT ")\n", 386 r->bottom(), r->end()); 387 r->print(); 388 hrrs->print(); 389 gclog_or_tty->print("\nDone printing rem set\n"); 390 } 391 return false; 392 } 393 int occupied() {return _count;} 394 }; 395 #endif 396 397 class CountRSSizeClosure: public HeapRegionClosure { 398 size_t _n; 399 size_t _tot; 400 size_t _max; 401 HeapRegion* _max_r; 402 enum { 403 N = 20, 404 MIN = 6 405 }; 406 int _histo[N]; 407 public: 408 CountRSSizeClosure() : _n(0), _tot(0), _max(0), _max_r(NULL) { 409 for (int i = 0; i < N; i++) _histo[i] = 0; 410 } 411 bool doHeapRegion(HeapRegion* r) { 412 if (!r->continuesHumongous()) { 413 size_t occ = r->rem_set()->occupied(); 414 _n++; 415 _tot += occ; 416 if (occ > _max) { 417 _max = occ; 418 _max_r = r; 419 } 420 // Fit it into a histo bin. 421 int s = 1 << MIN; 422 int i = 0; 423 while (occ > (size_t) s && i < (N-1)) { 424 s = s << 1; 425 i++; 426 } 427 _histo[i]++; 428 } 429 return false; 430 } 431 size_t n() { return _n; } 432 size_t tot() { return _tot; } 433 size_t mx() { return _max; } 434 HeapRegion* mxr() { return _max_r; } 435 void print_histo() { 436 int mx = N; 437 while (mx >= 0) { 438 if (_histo[mx-1] > 0) break; 439 mx--; 440 } 441 gclog_or_tty->print_cr("Number of regions with given RS sizes:"); 442 gclog_or_tty->print_cr(" <= %8d %8d", 1 << MIN, _histo[0]); 443 for (int i = 1; i < mx-1; i++) { 444 gclog_or_tty->print_cr(" %8d - %8d %8d", 445 (1 << (MIN + i - 1)) + 1, 446 1 << (MIN + i), 447 _histo[i]); 448 } 449 gclog_or_tty->print_cr(" > %8d %8d", (1 << (MIN+mx-2))+1, _histo[mx-1]); 450 } 451 }; 452 453 void HRInto_G1RemSet::cleanupHRRS() { 454 HeapRegionRemSet::cleanup(); 455 } 456 457 void 458 HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc, 459 int worker_i) { 460 #if CARD_REPEAT_HISTO 461 ct_freq_update_histo_and_reset(); 462 #endif 463 if (worker_i == 0) { 464 _cg1r->clear_and_record_card_counts(); 465 } 466 467 // Make this into a command-line flag... 468 if (G1RSCountHisto && (ParallelGCThreads == 0 || worker_i == 0)) { 469 CountRSSizeClosure count_cl; 470 _g1->heap_region_iterate(&count_cl); 471 gclog_or_tty->print_cr("Avg of %d RS counts is %f, max is %d, " 472 "max region is " PTR_FORMAT, 473 count_cl.n(), (float)count_cl.tot()/(float)count_cl.n(), 474 count_cl.mx(), count_cl.mxr()); 475 count_cl.print_histo(); 476 } 477 478 // We cache the value of 'oc' closure into the appropriate slot in the 479 // _cset_rs_update_cl for this worker 480 assert(worker_i < (int)n_workers(), "sanity"); 481 _cset_rs_update_cl[worker_i] = oc; 482 483 // A DirtyCardQueue that is used to hold cards containing references 484 // that point into the collection set. This DCQ is associated with a 485 // special DirtyCardQueueSet (see g1CollectedHeap.hpp). Under normal 486 // circumstances (i.e. the pause successfully completes), these cards 487 // are just discarded (there's no need to update the RSets of regions 488 // that were in the collection set - after the pause these regions 489 // are wholly 'free' of live objects. In the event of an evacuation 490 // failure the cards/buffers in this queue set are: 491 // * passed to the DirtyCardQueueSet that is used to manage deferred 492 // RSet updates, or 493 // * scanned for references that point into the collection set 494 // and the RSet of the corresponding region in the collection set 495 // is updated immediately. 496 DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set()); 497 498 assert((ParallelGCThreads > 0) || worker_i == 0, "invariant"); 499 500 // The two flags below were introduced temporarily to serialize 501 // the updating and scanning of remembered sets. There are some 502 // race conditions when these two operations are done in parallel 503 // and they are causing failures. When we resolve said race 504 // conditions, we'll revert back to parallel remembered set 505 // updating and scanning. See CRs 6677707 and 6677708. 506 if (G1UseParallelRSetUpdating || (worker_i == 0)) { 507 updateRS(&into_cset_dcq, worker_i); 508 } else { 509 _g1p->record_update_rs_processed_buffers(worker_i, 0.0); 510 _g1p->record_update_rs_time(worker_i, 0.0); 511 } 512 if (G1UseParallelRSetScanning || (worker_i == 0)) { 513 scanRS(oc, worker_i); 514 } else { 515 _g1p->record_scan_rs_time(worker_i, 0.0); 516 } 517 518 // We now clear the cached values of _cset_rs_update_cl for this worker 519 _cset_rs_update_cl[worker_i] = NULL; 520 } 521 522 void HRInto_G1RemSet:: 523 prepare_for_oops_into_collection_set_do() { 524 #if G1_REM_SET_LOGGING 525 PrintRSClosure cl; 526 _g1->collection_set_iterate(&cl); 527 #endif 528 cleanupHRRS(); 529 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine(); 530 _g1->set_refine_cte_cl_concurrency(false); 531 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 532 dcqs.concatenate_logs(); 533 534 assert(!_traversal_in_progress, "Invariant between iterations."); 535 set_traversal(true); 536 if (ParallelGCThreads > 0) { 537 _seq_task->set_par_threads((int)n_workers()); 538 } 539 guarantee( _cards_scanned == NULL, "invariant" ); 540 _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers()); 541 for (uint i = 0; i < n_workers(); ++i) { 542 _cards_scanned[i] = 0; 543 } 544 _total_cards_scanned = 0; 545 } 546 547 548 class cleanUpIteratorsClosure : public HeapRegionClosure { 549 bool doHeapRegion(HeapRegion *r) { 550 HeapRegionRemSet* hrrs = r->rem_set(); 551 hrrs->init_for_par_iteration(); 552 return false; 553 } 554 }; 555 556 // This closure, applied to a DirtyCardQueueSet, is used to immediately 557 // update the RSets for the regions in the CSet. For each card it iterates 558 // through the oops which coincide with that card. It scans the reference 559 // fields in each oop; when it finds an oop that points into the collection 560 // set, the RSet for the region containing the referenced object is updated. 561 // Note: _par_traversal_in_progress in the G1RemSet must be FALSE; otherwise 562 // the UpdateRSetImmediate closure will cause cards to be enqueued on to 563 // the DCQS that we're iterating over, causing an infinite loop. 564 class UpdateRSetCardTableEntryIntoCSetClosure: public CardTableEntryClosure { 565 G1CollectedHeap* _g1; 566 CardTableModRefBS* _ct_bs; 567 public: 568 UpdateRSetCardTableEntryIntoCSetClosure(G1CollectedHeap* g1, 569 CardTableModRefBS* bs): 570 _g1(g1), _ct_bs(bs) 571 { } 572 573 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 574 // Construct the region representing the card. 575 HeapWord* start = _ct_bs->addr_for(card_ptr); 576 // And find the region containing it. 577 HeapRegion* r = _g1->heap_region_containing(start); 578 assert(r != NULL, "unexpected null"); 579 580 // Scan oops in the card looking for references into the collection set 581 HeapWord* end = _ct_bs->addr_for(card_ptr + 1); 582 MemRegion scanRegion(start, end); 583 584 UpdateRSetImmediate update_rs_cl(_g1->g1_rem_set()); 585 FilterIntoCSClosure update_rs_cset_oop_cl(NULL, _g1, &update_rs_cl); 586 FilterOutOfRegionClosure filter_then_update_rs_cset_oop_cl(r, &update_rs_cset_oop_cl); 587 588 // We can pass false as the "filter_young" parameter here as: 589 // * we should be in a STW pause, 590 // * the DCQS to which this closure is applied is used to hold 591 // references that point into the collection set from the prior 592 // RSet updating, 593 // * the post-write barrier shouldn't be logging updates to young 594 // regions (but there is a situation where this can happen - see 595 // the comment in HRInto_G1RemSet::concurrentRefineOneCard below - 596 // that should not be applicable here), and 597 // * during actual RSet updating, the filtering of cards in young 598 // regions in HeapRegion::oops_on_card_seq_iterate_careful is 599 // employed. 600 // As a result, when this closure is applied to "refs into cset" 601 // DCQS, we shouldn't see any cards in young regions. 602 update_rs_cl.set_region(r); 603 HeapWord* stop_point = 604 r->oops_on_card_seq_iterate_careful(scanRegion, 605 &filter_then_update_rs_cset_oop_cl, 606 false /* filter_young */); 607 608 // Since this is performed in the event of an evacuation failure, we 609 // we shouldn't see a non-null stop point 610 assert(stop_point == NULL, "saw an unallocated region"); 611 return true; 612 } 613 }; 614 615 void HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do() { 616 guarantee( _cards_scanned != NULL, "invariant" ); 617 _total_cards_scanned = 0; 618 for (uint i = 0; i < n_workers(); ++i) 619 _total_cards_scanned += _cards_scanned[i]; 620 FREE_C_HEAP_ARRAY(size_t, _cards_scanned); 621 _cards_scanned = NULL; 622 // Cleanup after copy 623 #if G1_REM_SET_LOGGING 624 PrintRSClosure cl; 625 _g1->heap_region_iterate(&cl); 626 #endif 627 _g1->set_refine_cte_cl_concurrency(true); 628 cleanUpIteratorsClosure iterClosure; 629 _g1->collection_set_iterate(&iterClosure); 630 // Set all cards back to clean. 631 _g1->cleanUpCardTable(); 632 633 set_traversal(false); 634 635 DirtyCardQueueSet& into_cset_dcqs = _g1->into_cset_dirty_card_queue_set(); 636 int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num(); 637 638 if (_g1->evacuation_failed()) { 639 // Restore remembered sets for the regions pointing into the collection set. 640 641 if (G1DeferredRSUpdate) { 642 // If deferred RS updates are enabled then we just need to transfer 643 // the completed buffers from (a) the DirtyCardQueueSet used to hold 644 // cards that contain references that point into the collection set 645 // to (b) the DCQS used to hold the deferred RS updates 646 _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs); 647 } else { 648 649 CardTableModRefBS* bs = (CardTableModRefBS*)_g1->barrier_set(); 650 UpdateRSetCardTableEntryIntoCSetClosure update_rs_cset_immediate(_g1, bs); 651 652 int n_completed_buffers = 0; 653 while (into_cset_dcqs.apply_closure_to_completed_buffer(&update_rs_cset_immediate, 654 0, 0, true)) { 655 n_completed_buffers++; 656 } 657 assert(n_completed_buffers == into_cset_n_buffers, "missed some buffers"); 658 } 659 } 660 661 // Free any completed buffers in the DirtyCardQueueSet used to hold cards 662 // which contain references that point into the collection. 663 _g1->into_cset_dirty_card_queue_set().clear(); 664 assert(_g1->into_cset_dirty_card_queue_set().completed_buffers_num() == 0, 665 "all buffers should be freed"); 666 _g1->into_cset_dirty_card_queue_set().clear_n_completed_buffers(); 667 668 assert(!_traversal_in_progress, "Invariant between iterations."); 669 } 670 671 class UpdateRSObjectClosure: public ObjectClosure { 672 UpdateRSOopClosure* _update_rs_oop_cl; 673 public: 674 UpdateRSObjectClosure(UpdateRSOopClosure* update_rs_oop_cl) : 675 _update_rs_oop_cl(update_rs_oop_cl) {} 676 void do_object(oop obj) { 677 obj->oop_iterate(_update_rs_oop_cl); 678 } 679 680 }; 681 682 class ScrubRSClosure: public HeapRegionClosure { 683 G1CollectedHeap* _g1h; 684 BitMap* _region_bm; 685 BitMap* _card_bm; 686 CardTableModRefBS* _ctbs; 687 public: 688 ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) : 689 _g1h(G1CollectedHeap::heap()), 690 _region_bm(region_bm), _card_bm(card_bm), 691 _ctbs(NULL) 692 { 693 ModRefBarrierSet* bs = _g1h->mr_bs(); 694 guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition"); 695 _ctbs = (CardTableModRefBS*)bs; 696 } 697 698 bool doHeapRegion(HeapRegion* r) { 699 if (!r->continuesHumongous()) { 700 r->rem_set()->scrub(_ctbs, _region_bm, _card_bm); 701 } 702 return false; 703 } 704 }; 705 706 void HRInto_G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) { 707 ScrubRSClosure scrub_cl(region_bm, card_bm); 708 _g1->heap_region_iterate(&scrub_cl); 709 } 710 711 void HRInto_G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm, 712 int worker_num, int claim_val) { 713 ScrubRSClosure scrub_cl(region_bm, card_bm); 714 _g1->heap_region_par_iterate_chunked(&scrub_cl, worker_num, claim_val); 715 } 716 717 718 static IntHistogram out_of_histo(50, 50); 719 720 class TriggerClosure : public OopClosure { 721 bool _trigger; 722 public: 723 TriggerClosure() : _trigger(false) { } 724 bool value() const { return _trigger; } 725 template <class T> void do_oop_nv(T* p) { _trigger = true; } 726 virtual void do_oop(oop* p) { do_oop_nv(p); } 727 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 728 }; 729 730 class InvokeIfNotTriggeredClosure: public OopClosure { 731 TriggerClosure* _t; 732 OopClosure* _oc; 733 public: 734 InvokeIfNotTriggeredClosure(TriggerClosure* t, OopClosure* oc): 735 _t(t), _oc(oc) { } 736 template <class T> void do_oop_nv(T* p) { 737 if (!_t->value()) _oc->do_oop(p); 738 } 739 virtual void do_oop(oop* p) { do_oop_nv(p); } 740 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 741 }; 742 743 class Mux2Closure : public OopClosure { 744 OopClosure* _c1; 745 OopClosure* _c2; 746 public: 747 Mux2Closure(OopClosure *c1, OopClosure *c2) : _c1(c1), _c2(c2) { } 748 template <class T> void do_oop_nv(T* p) { 749 _c1->do_oop(p); _c2->do_oop(p); 750 } 751 virtual void do_oop(oop* p) { do_oop_nv(p); } 752 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 753 }; 754 755 bool HRInto_G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i, 756 bool check_for_refs_into_cset) { 757 // Construct the region representing the card. 758 HeapWord* start = _ct_bs->addr_for(card_ptr); 759 // And find the region containing it. 760 HeapRegion* r = _g1->heap_region_containing(start); 761 assert(r != NULL, "unexpected null"); 762 763 HeapWord* end = _ct_bs->addr_for(card_ptr + 1); 764 MemRegion dirtyRegion(start, end); 765 766 #if CARD_REPEAT_HISTO 767 init_ct_freq_table(_g1->g1_reserved_obj_bytes()); 768 ct_freq_note_card(_ct_bs->index_for(start)); 769 #endif 770 771 UpdateRSOopClosure update_rs_oop_cl(this, worker_i); 772 update_rs_oop_cl.set_from(r); 773 774 TriggerClosure trigger_cl; 775 FilterIntoCSClosure into_cs_cl(NULL, _g1, &trigger_cl); 776 InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl); 777 Mux2Closure mux(&invoke_cl, &update_rs_oop_cl); 778 779 FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r, 780 (check_for_refs_into_cset ? 781 (OopClosure*)&mux : 782 (OopClosure*)&update_rs_oop_cl)); 783 784 // Undirty the card. 785 *card_ptr = CardTableModRefBS::clean_card_val(); 786 // We must complete this write before we do any of the reads below. 787 OrderAccess::storeload(); 788 // And process it, being careful of unallocated portions of TLAB's. 789 790 // The region for the current card may be a young region. The 791 // current card may have been a card that was evicted from the 792 // card cache. When the card was inserted into the cache, we had 793 // determined that its region was non-young. While in the cache, 794 // the region may have been freed during a cleanup pause, reallocated 795 // and tagged as young. 796 // 797 // We wish to filter out cards for such a region but the current 798 // thread, if we're running conucrrently, may "see" the young type 799 // change at any time (so an earlier "is_young" check may pass or 800 // fail arbitrarily). We tell the iteration code to perform this 801 // filtering when it has been determined that there has been an actual 802 // allocation in this region and making it safe to check the young type. 803 bool filter_young = true; 804 805 HeapWord* stop_point = 806 r->oops_on_card_seq_iterate_careful(dirtyRegion, 807 &filter_then_update_rs_oop_cl, 808 filter_young); 809 810 // If stop_point is non-null, then we encountered an unallocated region 811 // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the 812 // card and re-enqueue: if we put off the card until a GC pause, then the 813 // unallocated portion will be filled in. Alternatively, we might try 814 // the full complexity of the technique used in "regular" precleaning. 815 if (stop_point != NULL) { 816 // The card might have gotten re-dirtied and re-enqueued while we 817 // worked. (In fact, it's pretty likely.) 818 if (*card_ptr != CardTableModRefBS::dirty_card_val()) { 819 *card_ptr = CardTableModRefBS::dirty_card_val(); 820 MutexLockerEx x(Shared_DirtyCardQ_lock, 821 Mutex::_no_safepoint_check_flag); 822 DirtyCardQueue* sdcq = 823 JavaThread::dirty_card_queue_set().shared_dirty_card_queue(); 824 sdcq->enqueue(card_ptr); 825 } 826 } else { 827 out_of_histo.add_entry(filter_then_update_rs_oop_cl.out_of_region()); 828 _conc_refine_cards++; 829 } 830 831 return trigger_cl.value(); 832 } 833 834 bool HRInto_G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i, 835 bool check_for_refs_into_cset) { 836 // If the card is no longer dirty, nothing to do. 837 if (*card_ptr != CardTableModRefBS::dirty_card_val()) { 838 // No need to return that this card contains refs that point 839 // into the collection set. 840 return false; 841 } 842 843 // Construct the region representing the card. 844 HeapWord* start = _ct_bs->addr_for(card_ptr); 845 // And find the region containing it. 846 HeapRegion* r = _g1->heap_region_containing(start); 847 if (r == NULL) { 848 guarantee(_g1->is_in_permanent(start), "Or else where?"); 849 // Again no need to return that this card contains refs that 850 // point into the collection set. 851 return false; // Not in the G1 heap (might be in perm, for example.) 852 } 853 // Why do we have to check here whether a card is on a young region, 854 // given that we dirty young regions and, as a result, the 855 // post-barrier is supposed to filter them out and never to enqueue 856 // them? When we allocate a new region as the "allocation region" we 857 // actually dirty its cards after we release the lock, since card 858 // dirtying while holding the lock was a performance bottleneck. So, 859 // as a result, it is possible for other threads to actually 860 // allocate objects in the region (after the acquire the lock) 861 // before all the cards on the region are dirtied. This is unlikely, 862 // and it doesn't happen often, but it can happen. So, the extra 863 // check below filters out those cards. 864 if (r->is_young()) { 865 return false; 866 } 867 // While we are processing RSet buffers during the collection, we 868 // actually don't want to scan any cards on the collection set, 869 // since we don't want to update remebered sets with entries that 870 // point into the collection set, given that live objects from the 871 // collection set are about to move and such entries will be stale 872 // very soon. This change also deals with a reliability issue which 873 // involves scanning a card in the collection set and coming across 874 // an array that was being chunked and looking malformed. Note, 875 // however, that if evacuation fails, we have to scan any objects 876 // that were not moved and create any missing entries. 877 if (r->in_collection_set()) { 878 return false; 879 } 880 881 // Should we defer processing the card? 882 // 883 // Previously the result from the insert_cache call would be 884 // either card_ptr (implying that card_ptr was currently "cold"), 885 // null (meaning we had inserted the card ptr into the "hot" 886 // cache, which had some headroom), or a "hot" card ptr 887 // extracted from the "hot" cache. 888 // 889 // Now that the _card_counts cache in the ConcurrentG1Refine 890 // instance is an evicting hash table, the result we get back 891 // could be from evicting the card ptr in an already occupied 892 // bucket (in which case we have replaced the card ptr in the 893 // bucket with card_ptr and "defer" is set to false). To avoid 894 // having a data structure (updates to which would need a lock) 895 // to hold these unprocessed dirty cards, we need to immediately 896 // process card_ptr. The actions needed to be taken on return 897 // from cache_insert are summarized in the following table: 898 // 899 // res defer action 900 // -------------------------------------------------------------- 901 // null false card evicted from _card_counts & replaced with 902 // card_ptr; evicted ptr added to hot cache. 903 // No need to process res; immediately process card_ptr 904 // 905 // null true card not evicted from _card_counts; card_ptr added 906 // to hot cache. 907 // Nothing to do. 908 // 909 // non-null false card evicted from _card_counts & replaced with 910 // card_ptr; evicted ptr is currently "cold" or 911 // caused an eviction from the hot cache. 912 // Immediately process res; process card_ptr. 913 // 914 // non-null true card not evicted from _card_counts; card_ptr is 915 // currently cold, or caused an eviction from hot 916 // cache. 917 // Immediately process res; no need to process card_ptr. 918 919 920 jbyte* res = card_ptr; 921 bool defer = false; 922 923 // This gets set to true if the card being refined has references 924 // that point into the collection set. 925 bool oops_into_cset = false; 926 927 if (_cg1r->use_cache()) { 928 jbyte* res = _cg1r->cache_insert(card_ptr, &defer); 929 if (res != NULL && (res != card_ptr || defer)) { 930 start = _ct_bs->addr_for(res); 931 r = _g1->heap_region_containing(start); 932 if (r == NULL) { 933 assert(_g1->is_in_permanent(start), "Or else where?"); 934 } else { 935 // Checking whether the region we got back from the cache 936 // is young here is inappropriate. The region could have been 937 // freed, reallocated and tagged as young while in the cache. 938 // Hence we could see its young type change at any time. 939 // 940 // Process card pointer we get back from the hot card cache. This 941 // will check whether the region containing the card is young 942 // _after_ checking that the region has been allocated from. 943 oops_into_cset = concurrentRefineOneCard_impl(res, worker_i, 944 false /* check_for_refs_into_cset */); 945 // The above call to concurrentRefineOneCard_impl is only 946 // performed if the hot card cache is enabled. This cache is 947 // disabled during an evacuation pause - which is the only 948 // time when we need know if the card contains references 949 // that point into the collection set. Also when the hot card 950 // cache is enabled, this code is executed by the concurrent 951 // refine threads - rather than the GC worker threads - and 952 // concurrentRefineOneCard_impl will return false. 953 assert(!oops_into_cset, "should not see true here"); 954 } 955 } 956 } 957 958 if (!defer) { 959 oops_into_cset = 960 concurrentRefineOneCard_impl(card_ptr, worker_i, check_for_refs_into_cset); 961 // We should only be detecting that the card contains references 962 // that point into the collection set if the current thread is 963 // a GC worker thread. 964 assert(!oops_into_cset || SafepointSynchronize::is_at_safepoint(), 965 "invalid result at non safepoint"); 966 } 967 return oops_into_cset; 968 } 969 970 class HRRSStatsIter: public HeapRegionClosure { 971 size_t _occupied; 972 size_t _total_mem_sz; 973 size_t _max_mem_sz; 974 HeapRegion* _max_mem_sz_region; 975 public: 976 HRRSStatsIter() : 977 _occupied(0), 978 _total_mem_sz(0), 979 _max_mem_sz(0), 980 _max_mem_sz_region(NULL) 981 {} 982 983 bool doHeapRegion(HeapRegion* r) { 984 if (r->continuesHumongous()) return false; 985 size_t mem_sz = r->rem_set()->mem_size(); 986 if (mem_sz > _max_mem_sz) { 987 _max_mem_sz = mem_sz; 988 _max_mem_sz_region = r; 989 } 990 _total_mem_sz += mem_sz; 991 size_t occ = r->rem_set()->occupied(); 992 _occupied += occ; 993 return false; 994 } 995 size_t total_mem_sz() { return _total_mem_sz; } 996 size_t max_mem_sz() { return _max_mem_sz; } 997 size_t occupied() { return _occupied; } 998 HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; } 999 }; 1000 1001 class PrintRSThreadVTimeClosure : public ThreadClosure { 1002 public: 1003 virtual void do_thread(Thread *t) { 1004 ConcurrentG1RefineThread* crt = (ConcurrentG1RefineThread*) t; 1005 gclog_or_tty->print(" %5.2f", crt->vtime_accum()); 1006 } 1007 }; 1008 1009 void HRInto_G1RemSet::print_summary_info() { 1010 G1CollectedHeap* g1 = G1CollectedHeap::heap(); 1011 1012 #if CARD_REPEAT_HISTO 1013 gclog_or_tty->print_cr("\nG1 card_repeat count histogram: "); 1014 gclog_or_tty->print_cr(" # of repeats --> # of cards with that number."); 1015 card_repeat_count.print_on(gclog_or_tty); 1016 #endif 1017 1018 if (FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT) { 1019 gclog_or_tty->print_cr("\nG1 rem-set out-of-region histogram: "); 1020 gclog_or_tty->print_cr(" # of CS ptrs --> # of cards with that number."); 1021 out_of_histo.print_on(gclog_or_tty); 1022 } 1023 gclog_or_tty->print_cr("\n Concurrent RS processed %d cards", 1024 _conc_refine_cards); 1025 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1026 jint tot_processed_buffers = 1027 dcqs.processed_buffers_mut() + dcqs.processed_buffers_rs_thread(); 1028 gclog_or_tty->print_cr(" Of %d completed buffers:", tot_processed_buffers); 1029 gclog_or_tty->print_cr(" %8d (%5.1f%%) by conc RS threads.", 1030 dcqs.processed_buffers_rs_thread(), 1031 100.0*(float)dcqs.processed_buffers_rs_thread()/ 1032 (float)tot_processed_buffers); 1033 gclog_or_tty->print_cr(" %8d (%5.1f%%) by mutator threads.", 1034 dcqs.processed_buffers_mut(), 1035 100.0*(float)dcqs.processed_buffers_mut()/ 1036 (float)tot_processed_buffers); 1037 gclog_or_tty->print_cr(" Conc RS threads times(s)"); 1038 PrintRSThreadVTimeClosure p; 1039 gclog_or_tty->print(" "); 1040 g1->concurrent_g1_refine()->threads_do(&p); 1041 gclog_or_tty->print_cr(""); 1042 1043 if (G1UseHRIntoRS) { 1044 HRRSStatsIter blk; 1045 g1->heap_region_iterate(&blk); 1046 gclog_or_tty->print_cr(" Total heap region rem set sizes = " SIZE_FORMAT "K." 1047 " Max = " SIZE_FORMAT "K.", 1048 blk.total_mem_sz()/K, blk.max_mem_sz()/K); 1049 gclog_or_tty->print_cr(" Static structures = " SIZE_FORMAT "K," 1050 " free_lists = " SIZE_FORMAT "K.", 1051 HeapRegionRemSet::static_mem_size()/K, 1052 HeapRegionRemSet::fl_mem_size()/K); 1053 gclog_or_tty->print_cr(" %d occupied cards represented.", 1054 blk.occupied()); 1055 gclog_or_tty->print_cr(" Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )" 1056 ", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.", 1057 blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(), 1058 (blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K, 1059 (blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K); 1060 gclog_or_tty->print_cr(" Did %d coarsenings.", 1061 HeapRegionRemSet::n_coarsenings()); 1062 1063 } 1064 } 1065 1066 void HRInto_G1RemSet::prepare_for_verify() { 1067 if (G1HRRSFlushLogBuffersOnVerify && 1068 (VerifyBeforeGC || VerifyAfterGC) 1069 && !_g1->full_collection()) { 1070 cleanupHRRS(); 1071 _g1->set_refine_cte_cl_concurrency(false); 1072 if (SafepointSynchronize::is_at_safepoint()) { 1073 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1074 dcqs.concatenate_logs(); 1075 } 1076 bool cg1r_use_cache = _cg1r->use_cache(); 1077 _cg1r->set_use_cache(false); 1078 DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set()); 1079 updateRS(&into_cset_dcq, 0); 1080 _g1->into_cset_dirty_card_queue_set().clear(); 1081 _cg1r->set_use_cache(cg1r_use_cache); 1082 1083 assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); 1084 } 1085 }