1 /* 2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_g1CollectedHeap.cpp.incl" 27 28 // turn it on so that the contents of the young list (scan-only / 29 // to-be-collected) are printed at "strategic" points before / during 30 // / after the collection --- this is useful for debugging 31 #define SCAN_ONLY_VERBOSE 0 32 // CURRENT STATUS 33 // This file is under construction. Search for "FIXME". 34 35 // INVARIANTS/NOTES 36 // 37 // All allocation activity covered by the G1CollectedHeap interface is 38 // serialized by acquiring the HeapLock. This happens in 39 // mem_allocate_work, which all such allocation functions call. 40 // (Note that this does not apply to TLAB allocation, which is not part 41 // of this interface: it is done by clients of this interface.) 42 43 // Local to this file. 44 45 // Finds the first HeapRegion. 46 // No longer used, but might be handy someday. 47 48 class FindFirstRegionClosure: public HeapRegionClosure { 49 HeapRegion* _a_region; 50 public: 51 FindFirstRegionClosure() : _a_region(NULL) {} 52 bool doHeapRegion(HeapRegion* r) { 53 _a_region = r; 54 return true; 55 } 56 HeapRegion* result() { return _a_region; } 57 }; 58 59 60 class RefineCardTableEntryClosure: public CardTableEntryClosure { 61 SuspendibleThreadSet* _sts; 62 G1RemSet* _g1rs; 63 ConcurrentG1Refine* _cg1r; 64 bool _concurrent; 65 public: 66 RefineCardTableEntryClosure(SuspendibleThreadSet* sts, 67 G1RemSet* g1rs, 68 ConcurrentG1Refine* cg1r) : 69 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) 70 {} 71 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 72 _g1rs->concurrentRefineOneCard(card_ptr, worker_i); 73 if (_concurrent && _sts->should_yield()) { 74 // Caller will actually yield. 75 return false; 76 } 77 // Otherwise, we finished successfully; return true. 78 return true; 79 } 80 void set_concurrent(bool b) { _concurrent = b; } 81 }; 82 83 84 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { 85 int _calls; 86 G1CollectedHeap* _g1h; 87 CardTableModRefBS* _ctbs; 88 int _histo[256]; 89 public: 90 ClearLoggedCardTableEntryClosure() : 91 _calls(0) 92 { 93 _g1h = G1CollectedHeap::heap(); 94 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); 95 for (int i = 0; i < 256; i++) _histo[i] = 0; 96 } 97 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 98 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { 99 _calls++; 100 unsigned char* ujb = (unsigned char*)card_ptr; 101 int ind = (int)(*ujb); 102 _histo[ind]++; 103 *card_ptr = -1; 104 } 105 return true; 106 } 107 int calls() { return _calls; } 108 void print_histo() { 109 gclog_or_tty->print_cr("Card table value histogram:"); 110 for (int i = 0; i < 256; i++) { 111 if (_histo[i] != 0) { 112 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); 113 } 114 } 115 } 116 }; 117 118 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { 119 int _calls; 120 G1CollectedHeap* _g1h; 121 CardTableModRefBS* _ctbs; 122 public: 123 RedirtyLoggedCardTableEntryClosure() : 124 _calls(0) 125 { 126 _g1h = G1CollectedHeap::heap(); 127 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); 128 } 129 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 130 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { 131 _calls++; 132 *card_ptr = 0; 133 } 134 return true; 135 } 136 int calls() { return _calls; } 137 }; 138 139 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { 140 public: 141 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 142 *card_ptr = CardTableModRefBS::dirty_card_val(); 143 return true; 144 } 145 }; 146 147 YoungList::YoungList(G1CollectedHeap* g1h) 148 : _g1h(g1h), _head(NULL), 149 _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL), 150 _length(0), _scan_only_length(0), 151 _last_sampled_rs_lengths(0), 152 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) 153 { 154 guarantee( check_list_empty(false), "just making sure..." ); 155 } 156 157 void YoungList::push_region(HeapRegion *hr) { 158 assert(!hr->is_young(), "should not already be young"); 159 assert(hr->get_next_young_region() == NULL, "cause it should!"); 160 161 hr->set_next_young_region(_head); 162 _head = hr; 163 164 hr->set_young(); 165 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); 166 ++_length; 167 } 168 169 void YoungList::add_survivor_region(HeapRegion* hr) { 170 assert(hr->is_survivor(), "should be flagged as survivor region"); 171 assert(hr->get_next_young_region() == NULL, "cause it should!"); 172 173 hr->set_next_young_region(_survivor_head); 174 if (_survivor_head == NULL) { 175 _survivor_tail = hr; 176 } 177 _survivor_head = hr; 178 179 ++_survivor_length; 180 } 181 182 HeapRegion* YoungList::pop_region() { 183 while (_head != NULL) { 184 assert( length() > 0, "list should not be empty" ); 185 HeapRegion* ret = _head; 186 _head = ret->get_next_young_region(); 187 ret->set_next_young_region(NULL); 188 --_length; 189 assert(ret->is_young(), "region should be very young"); 190 191 // Replace 'Survivor' region type with 'Young'. So the region will 192 // be treated as a young region and will not be 'confused' with 193 // newly created survivor regions. 194 if (ret->is_survivor()) { 195 ret->set_young(); 196 } 197 198 if (!ret->is_scan_only()) { 199 return ret; 200 } 201 202 // scan-only, we'll add it to the scan-only list 203 if (_scan_only_tail == NULL) { 204 guarantee( _scan_only_head == NULL, "invariant" ); 205 206 _scan_only_head = ret; 207 _curr_scan_only = ret; 208 } else { 209 guarantee( _scan_only_head != NULL, "invariant" ); 210 _scan_only_tail->set_next_young_region(ret); 211 } 212 guarantee( ret->get_next_young_region() == NULL, "invariant" ); 213 _scan_only_tail = ret; 214 215 // no need to be tagged as scan-only any more 216 ret->set_young(); 217 218 ++_scan_only_length; 219 } 220 assert( length() == 0, "list should be empty" ); 221 return NULL; 222 } 223 224 void YoungList::empty_list(HeapRegion* list) { 225 while (list != NULL) { 226 HeapRegion* next = list->get_next_young_region(); 227 list->set_next_young_region(NULL); 228 list->uninstall_surv_rate_group(); 229 list->set_not_young(); 230 list = next; 231 } 232 } 233 234 void YoungList::empty_list() { 235 assert(check_list_well_formed(), "young list should be well formed"); 236 237 empty_list(_head); 238 _head = NULL; 239 _length = 0; 240 241 empty_list(_scan_only_head); 242 _scan_only_head = NULL; 243 _scan_only_tail = NULL; 244 _scan_only_length = 0; 245 _curr_scan_only = NULL; 246 247 empty_list(_survivor_head); 248 _survivor_head = NULL; 249 _survivor_tail = NULL; 250 _survivor_length = 0; 251 252 _last_sampled_rs_lengths = 0; 253 254 assert(check_list_empty(false), "just making sure..."); 255 } 256 257 bool YoungList::check_list_well_formed() { 258 bool ret = true; 259 260 size_t length = 0; 261 HeapRegion* curr = _head; 262 HeapRegion* last = NULL; 263 while (curr != NULL) { 264 if (!curr->is_young() || curr->is_scan_only()) { 265 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " 266 "incorrectly tagged (%d, %d)", 267 curr->bottom(), curr->end(), 268 curr->is_young(), curr->is_scan_only()); 269 ret = false; 270 } 271 ++length; 272 last = curr; 273 curr = curr->get_next_young_region(); 274 } 275 ret = ret && (length == _length); 276 277 if (!ret) { 278 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); 279 gclog_or_tty->print_cr("### list has %d entries, _length is %d", 280 length, _length); 281 } 282 283 bool scan_only_ret = true; 284 length = 0; 285 curr = _scan_only_head; 286 last = NULL; 287 while (curr != NULL) { 288 if (!curr->is_young() || curr->is_scan_only()) { 289 gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" " 290 "incorrectly tagged (%d, %d)", 291 curr->bottom(), curr->end(), 292 curr->is_young(), curr->is_scan_only()); 293 scan_only_ret = false; 294 } 295 ++length; 296 last = curr; 297 curr = curr->get_next_young_region(); 298 } 299 scan_only_ret = scan_only_ret && (length == _scan_only_length); 300 301 if ( (last != _scan_only_tail) || 302 (_scan_only_head == NULL && _scan_only_tail != NULL) || 303 (_scan_only_head != NULL && _scan_only_tail == NULL) ) { 304 gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly"); 305 scan_only_ret = false; 306 } 307 308 if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) { 309 gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly"); 310 scan_only_ret = false; 311 } 312 313 if (!scan_only_ret) { 314 gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!"); 315 gclog_or_tty->print_cr("### list has %d entries, _scan_only_length is %d", 316 length, _scan_only_length); 317 } 318 319 return ret && scan_only_ret; 320 } 321 322 bool YoungList::check_list_empty(bool ignore_scan_only_list, 323 bool check_sample) { 324 bool ret = true; 325 326 if (_length != 0) { 327 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", 328 _length); 329 ret = false; 330 } 331 if (check_sample && _last_sampled_rs_lengths != 0) { 332 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); 333 ret = false; 334 } 335 if (_head != NULL) { 336 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); 337 ret = false; 338 } 339 if (!ret) { 340 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); 341 } 342 343 if (ignore_scan_only_list) 344 return ret; 345 346 bool scan_only_ret = true; 347 if (_scan_only_length != 0) { 348 gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d", 349 _scan_only_length); 350 scan_only_ret = false; 351 } 352 if (_scan_only_head != NULL) { 353 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head"); 354 scan_only_ret = false; 355 } 356 if (_scan_only_tail != NULL) { 357 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail"); 358 scan_only_ret = false; 359 } 360 if (!scan_only_ret) { 361 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty"); 362 } 363 364 return ret && scan_only_ret; 365 } 366 367 void 368 YoungList::rs_length_sampling_init() { 369 _sampled_rs_lengths = 0; 370 _curr = _head; 371 } 372 373 bool 374 YoungList::rs_length_sampling_more() { 375 return _curr != NULL; 376 } 377 378 void 379 YoungList::rs_length_sampling_next() { 380 assert( _curr != NULL, "invariant" ); 381 _sampled_rs_lengths += _curr->rem_set()->occupied(); 382 _curr = _curr->get_next_young_region(); 383 if (_curr == NULL) { 384 _last_sampled_rs_lengths = _sampled_rs_lengths; 385 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); 386 } 387 } 388 389 void 390 YoungList::reset_auxilary_lists() { 391 // We could have just "moved" the scan-only list to the young list. 392 // However, the scan-only list is ordered according to the region 393 // age in descending order, so, by moving one entry at a time, we 394 // ensure that it is recreated in ascending order. 395 396 guarantee( is_empty(), "young list should be empty" ); 397 assert(check_list_well_formed(), "young list should be well formed"); 398 399 // Add survivor regions to SurvRateGroup. 400 _g1h->g1_policy()->note_start_adding_survivor_regions(); 401 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); 402 for (HeapRegion* curr = _survivor_head; 403 curr != NULL; 404 curr = curr->get_next_young_region()) { 405 _g1h->g1_policy()->set_region_survivors(curr); 406 } 407 _g1h->g1_policy()->note_stop_adding_survivor_regions(); 408 409 if (_survivor_head != NULL) { 410 _head = _survivor_head; 411 _length = _survivor_length + _scan_only_length; 412 _survivor_tail->set_next_young_region(_scan_only_head); 413 } else { 414 _head = _scan_only_head; 415 _length = _scan_only_length; 416 } 417 418 for (HeapRegion* curr = _scan_only_head; 419 curr != NULL; 420 curr = curr->get_next_young_region()) { 421 curr->recalculate_age_in_surv_rate_group(); 422 } 423 _scan_only_head = NULL; 424 _scan_only_tail = NULL; 425 _scan_only_length = 0; 426 _curr_scan_only = NULL; 427 428 _survivor_head = NULL; 429 _survivor_tail = NULL; 430 _survivor_length = 0; 431 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); 432 433 assert(check_list_well_formed(), "young list should be well formed"); 434 } 435 436 void YoungList::print() { 437 HeapRegion* lists[] = {_head, _scan_only_head, _survivor_head}; 438 const char* names[] = {"YOUNG", "SCAN-ONLY", "SURVIVOR"}; 439 440 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { 441 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); 442 HeapRegion *curr = lists[list]; 443 if (curr == NULL) 444 gclog_or_tty->print_cr(" empty"); 445 while (curr != NULL) { 446 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " 447 "age: %4d, y: %d, s-o: %d, surv: %d", 448 curr->bottom(), curr->end(), 449 curr->top(), 450 curr->prev_top_at_mark_start(), 451 curr->next_top_at_mark_start(), 452 curr->top_at_conc_mark_count(), 453 curr->age_in_surv_rate_group_cond(), 454 curr->is_young(), 455 curr->is_scan_only(), 456 curr->is_survivor()); 457 curr = curr->get_next_young_region(); 458 } 459 } 460 461 gclog_or_tty->print_cr(""); 462 } 463 464 void G1CollectedHeap::stop_conc_gc_threads() { 465 _cg1r->cg1rThread()->stop(); 466 _czft->stop(); 467 _cmThread->stop(); 468 } 469 470 471 void G1CollectedHeap::check_ct_logs_at_safepoint() { 472 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 473 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); 474 475 // Count the dirty cards at the start. 476 CountNonCleanMemRegionClosure count1(this); 477 ct_bs->mod_card_iterate(&count1); 478 int orig_count = count1.n(); 479 480 // First clear the logged cards. 481 ClearLoggedCardTableEntryClosure clear; 482 dcqs.set_closure(&clear); 483 dcqs.apply_closure_to_all_completed_buffers(); 484 dcqs.iterate_closure_all_threads(false); 485 clear.print_histo(); 486 487 // Now ensure that there's no dirty cards. 488 CountNonCleanMemRegionClosure count2(this); 489 ct_bs->mod_card_iterate(&count2); 490 if (count2.n() != 0) { 491 gclog_or_tty->print_cr("Card table has %d entries; %d originally", 492 count2.n(), orig_count); 493 } 494 guarantee(count2.n() == 0, "Card table should be clean."); 495 496 RedirtyLoggedCardTableEntryClosure redirty; 497 JavaThread::dirty_card_queue_set().set_closure(&redirty); 498 dcqs.apply_closure_to_all_completed_buffers(); 499 dcqs.iterate_closure_all_threads(false); 500 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", 501 clear.calls(), orig_count); 502 guarantee(redirty.calls() == clear.calls(), 503 "Or else mechanism is broken."); 504 505 CountNonCleanMemRegionClosure count3(this); 506 ct_bs->mod_card_iterate(&count3); 507 if (count3.n() != orig_count) { 508 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", 509 orig_count, count3.n()); 510 guarantee(count3.n() >= orig_count, "Should have restored them all."); 511 } 512 513 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); 514 } 515 516 // Private class members. 517 518 G1CollectedHeap* G1CollectedHeap::_g1h; 519 520 // Private methods. 521 522 // Finds a HeapRegion that can be used to allocate a given size of block. 523 524 525 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size, 526 bool do_expand, 527 bool zero_filled) { 528 ConcurrentZFThread::note_region_alloc(); 529 HeapRegion* res = alloc_free_region_from_lists(zero_filled); 530 if (res == NULL && do_expand) { 531 expand(word_size * HeapWordSize); 532 res = alloc_free_region_from_lists(zero_filled); 533 assert(res == NULL || 534 (!res->isHumongous() && 535 (!zero_filled || 536 res->zero_fill_state() == HeapRegion::Allocated)), 537 "Alloc Regions must be zero filled (and non-H)"); 538 } 539 if (res != NULL && res->is_empty()) _free_regions--; 540 assert(res == NULL || 541 (!res->isHumongous() && 542 (!zero_filled || 543 res->zero_fill_state() == HeapRegion::Allocated)), 544 "Non-young alloc Regions must be zero filled (and non-H)"); 545 546 if (G1TraceRegions) { 547 if (res != NULL) { 548 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " 549 "top "PTR_FORMAT, 550 res->hrs_index(), res->bottom(), res->end(), res->top()); 551 } 552 } 553 554 return res; 555 } 556 557 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose, 558 size_t word_size, 559 bool zero_filled) { 560 HeapRegion* alloc_region = NULL; 561 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { 562 alloc_region = newAllocRegion_work(word_size, true, zero_filled); 563 if (purpose == GCAllocForSurvived && alloc_region != NULL) { 564 alloc_region->set_survivor(); 565 } 566 ++_gc_alloc_region_counts[purpose]; 567 } else { 568 g1_policy()->note_alloc_region_limit_reached(purpose); 569 } 570 return alloc_region; 571 } 572 573 // If could fit into free regions w/o expansion, try. 574 // Otherwise, if can expand, do so. 575 // Otherwise, if using ex regions might help, try with ex given back. 576 HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) { 577 assert(regions_accounted_for(), "Region leakage!"); 578 579 // We can't allocate H regions while cleanupComplete is running, since 580 // some of the regions we find to be empty might not yet be added to the 581 // unclean list. (If we're already at a safepoint, this call is 582 // unnecessary, not to mention wrong.) 583 if (!SafepointSynchronize::is_at_safepoint()) 584 wait_for_cleanup_complete(); 585 586 size_t num_regions = 587 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; 588 589 // Special case if < one region??? 590 591 // Remember the ft size. 592 size_t x_size = expansion_regions(); 593 594 HeapWord* res = NULL; 595 bool eliminated_allocated_from_lists = false; 596 597 // Can the allocation potentially fit in the free regions? 598 if (free_regions() >= num_regions) { 599 res = _hrs->obj_allocate(word_size); 600 } 601 if (res == NULL) { 602 // Try expansion. 603 size_t fs = _hrs->free_suffix(); 604 if (fs + x_size >= num_regions) { 605 expand((num_regions - fs) * HeapRegion::GrainBytes); 606 res = _hrs->obj_allocate(word_size); 607 assert(res != NULL, "This should have worked."); 608 } else { 609 // Expansion won't help. Are there enough free regions if we get rid 610 // of reservations? 611 size_t avail = free_regions(); 612 if (avail >= num_regions) { 613 res = _hrs->obj_allocate(word_size); 614 if (res != NULL) { 615 remove_allocated_regions_from_lists(); 616 eliminated_allocated_from_lists = true; 617 } 618 } 619 } 620 } 621 if (res != NULL) { 622 // Increment by the number of regions allocated. 623 // FIXME: Assumes regions all of size GrainBytes. 624 #ifndef PRODUCT 625 mr_bs()->verify_clean_region(MemRegion(res, res + num_regions * 626 HeapRegion::GrainWords)); 627 #endif 628 if (!eliminated_allocated_from_lists) 629 remove_allocated_regions_from_lists(); 630 _summary_bytes_used += word_size * HeapWordSize; 631 _free_regions -= num_regions; 632 _num_humongous_regions += (int) num_regions; 633 } 634 assert(regions_accounted_for(), "Region Leakage"); 635 return res; 636 } 637 638 HeapWord* 639 G1CollectedHeap::attempt_allocation_slow(size_t word_size, 640 bool permit_collection_pause) { 641 HeapWord* res = NULL; 642 HeapRegion* allocated_young_region = NULL; 643 644 assert( SafepointSynchronize::is_at_safepoint() || 645 Heap_lock->owned_by_self(), "pre condition of the call" ); 646 647 if (isHumongous(word_size)) { 648 // Allocation of a humongous object can, in a sense, complete a 649 // partial region, if the previous alloc was also humongous, and 650 // caused the test below to succeed. 651 if (permit_collection_pause) 652 do_collection_pause_if_appropriate(word_size); 653 res = humongousObjAllocate(word_size); 654 assert(_cur_alloc_region == NULL 655 || !_cur_alloc_region->isHumongous(), 656 "Prevent a regression of this bug."); 657 658 } else { 659 // We may have concurrent cleanup working at the time. Wait for it 660 // to complete. In the future we would probably want to make the 661 // concurrent cleanup truly concurrent by decoupling it from the 662 // allocation. 663 if (!SafepointSynchronize::is_at_safepoint()) 664 wait_for_cleanup_complete(); 665 // If we do a collection pause, this will be reset to a non-NULL 666 // value. If we don't, nulling here ensures that we allocate a new 667 // region below. 668 if (_cur_alloc_region != NULL) { 669 // We're finished with the _cur_alloc_region. 670 _summary_bytes_used += _cur_alloc_region->used(); 671 _cur_alloc_region = NULL; 672 } 673 assert(_cur_alloc_region == NULL, "Invariant."); 674 // Completion of a heap region is perhaps a good point at which to do 675 // a collection pause. 676 if (permit_collection_pause) 677 do_collection_pause_if_appropriate(word_size); 678 // Make sure we have an allocation region available. 679 if (_cur_alloc_region == NULL) { 680 if (!SafepointSynchronize::is_at_safepoint()) 681 wait_for_cleanup_complete(); 682 bool next_is_young = should_set_young_locked(); 683 // If the next region is not young, make sure it's zero-filled. 684 _cur_alloc_region = newAllocRegion(word_size, !next_is_young); 685 if (_cur_alloc_region != NULL) { 686 _summary_bytes_used -= _cur_alloc_region->used(); 687 if (next_is_young) { 688 set_region_short_lived_locked(_cur_alloc_region); 689 allocated_young_region = _cur_alloc_region; 690 } 691 } 692 } 693 assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(), 694 "Prevent a regression of this bug."); 695 696 // Now retry the allocation. 697 if (_cur_alloc_region != NULL) { 698 res = _cur_alloc_region->allocate(word_size); 699 } 700 } 701 702 // NOTE: fails frequently in PRT 703 assert(regions_accounted_for(), "Region leakage!"); 704 705 if (res != NULL) { 706 if (!SafepointSynchronize::is_at_safepoint()) { 707 assert( permit_collection_pause, "invariant" ); 708 assert( Heap_lock->owned_by_self(), "invariant" ); 709 Heap_lock->unlock(); 710 } 711 712 if (allocated_young_region != NULL) { 713 HeapRegion* hr = allocated_young_region; 714 HeapWord* bottom = hr->bottom(); 715 HeapWord* end = hr->end(); 716 MemRegion mr(bottom, end); 717 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr); 718 } 719 } 720 721 assert( SafepointSynchronize::is_at_safepoint() || 722 (res == NULL && Heap_lock->owned_by_self()) || 723 (res != NULL && !Heap_lock->owned_by_self()), 724 "post condition of the call" ); 725 726 return res; 727 } 728 729 HeapWord* 730 G1CollectedHeap::mem_allocate(size_t word_size, 731 bool is_noref, 732 bool is_tlab, 733 bool* gc_overhead_limit_was_exceeded) { 734 debug_only(check_for_valid_allocation_state()); 735 assert(no_gc_in_progress(), "Allocation during gc not allowed"); 736 HeapWord* result = NULL; 737 738 // Loop until the allocation is satisified, 739 // or unsatisfied after GC. 740 for (int try_count = 1; /* return or throw */; try_count += 1) { 741 int gc_count_before; 742 { 743 Heap_lock->lock(); 744 result = attempt_allocation(word_size); 745 if (result != NULL) { 746 // attempt_allocation should have unlocked the heap lock 747 assert(is_in(result), "result not in heap"); 748 return result; 749 } 750 // Read the gc count while the heap lock is held. 751 gc_count_before = SharedHeap::heap()->total_collections(); 752 Heap_lock->unlock(); 753 } 754 755 // Create the garbage collection operation... 756 VM_G1CollectForAllocation op(word_size, 757 gc_count_before); 758 759 // ...and get the VM thread to execute it. 760 VMThread::execute(&op); 761 if (op.prologue_succeeded()) { 762 result = op.result(); 763 assert(result == NULL || is_in(result), "result not in heap"); 764 return result; 765 } 766 767 // Give a warning if we seem to be looping forever. 768 if ((QueuedAllocationWarningCount > 0) && 769 (try_count % QueuedAllocationWarningCount == 0)) { 770 warning("G1CollectedHeap::mem_allocate_work retries %d times", 771 try_count); 772 } 773 } 774 } 775 776 void G1CollectedHeap::abandon_cur_alloc_region() { 777 if (_cur_alloc_region != NULL) { 778 // We're finished with the _cur_alloc_region. 779 if (_cur_alloc_region->is_empty()) { 780 _free_regions++; 781 free_region(_cur_alloc_region); 782 } else { 783 _summary_bytes_used += _cur_alloc_region->used(); 784 } 785 _cur_alloc_region = NULL; 786 } 787 } 788 789 class PostMCRemSetClearClosure: public HeapRegionClosure { 790 ModRefBarrierSet* _mr_bs; 791 public: 792 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} 793 bool doHeapRegion(HeapRegion* r) { 794 r->reset_gc_time_stamp(); 795 if (r->continuesHumongous()) 796 return false; 797 HeapRegionRemSet* hrrs = r->rem_set(); 798 if (hrrs != NULL) hrrs->clear(); 799 // You might think here that we could clear just the cards 800 // corresponding to the used region. But no: if we leave a dirty card 801 // in a region we might allocate into, then it would prevent that card 802 // from being enqueued, and cause it to be missed. 803 // Re: the performance cost: we shouldn't be doing full GC anyway! 804 _mr_bs->clear(MemRegion(r->bottom(), r->end())); 805 return false; 806 } 807 }; 808 809 810 class PostMCRemSetInvalidateClosure: public HeapRegionClosure { 811 ModRefBarrierSet* _mr_bs; 812 public: 813 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} 814 bool doHeapRegion(HeapRegion* r) { 815 if (r->continuesHumongous()) return false; 816 if (r->used_region().word_size() != 0) { 817 _mr_bs->invalidate(r->used_region(), true /*whole heap*/); 818 } 819 return false; 820 } 821 }; 822 823 class RebuildRSOutOfRegionClosure: public HeapRegionClosure { 824 G1CollectedHeap* _g1h; 825 UpdateRSOopClosure _cl; 826 int _worker_i; 827 public: 828 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : 829 _cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i), 830 _worker_i(worker_i), 831 _g1h(g1) 832 { } 833 bool doHeapRegion(HeapRegion* r) { 834 if (!r->continuesHumongous()) { 835 _cl.set_from(r); 836 r->oop_iterate(&_cl); 837 } 838 return false; 839 } 840 }; 841 842 class ParRebuildRSTask: public AbstractGangTask { 843 G1CollectedHeap* _g1; 844 public: 845 ParRebuildRSTask(G1CollectedHeap* g1) 846 : AbstractGangTask("ParRebuildRSTask"), 847 _g1(g1) 848 { } 849 850 void work(int i) { 851 RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); 852 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, 853 HeapRegion::RebuildRSClaimValue); 854 } 855 }; 856 857 void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs, 858 size_t word_size) { 859 ResourceMark rm; 860 861 if (full && DisableExplicitGC) { 862 gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n"); 863 return; 864 } 865 866 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 867 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); 868 869 if (GC_locker::is_active()) { 870 return; // GC is disabled (e.g. JNI GetXXXCritical operation) 871 } 872 873 { 874 IsGCActiveMark x; 875 876 // Timing 877 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 878 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 879 TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty); 880 881 double start = os::elapsedTime(); 882 GCOverheadReporter::recordSTWStart(start); 883 g1_policy()->record_full_collection_start(); 884 885 gc_prologue(true); 886 increment_total_collections(); 887 888 size_t g1h_prev_used = used(); 889 assert(used() == recalculate_used(), "Should be equal"); 890 891 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { 892 HandleMark hm; // Discard invalid handles created during verification 893 prepare_for_verify(); 894 gclog_or_tty->print(" VerifyBeforeGC:"); 895 Universe::verify(true); 896 } 897 assert(regions_accounted_for(), "Region leakage!"); 898 899 COMPILER2_PRESENT(DerivedPointerTable::clear()); 900 901 // We want to discover references, but not process them yet. 902 // This mode is disabled in 903 // instanceRefKlass::process_discovered_references if the 904 // generation does some collection work, or 905 // instanceRefKlass::enqueue_discovered_references if the 906 // generation returns without doing any work. 907 ref_processor()->disable_discovery(); 908 ref_processor()->abandon_partial_discovery(); 909 ref_processor()->verify_no_references_recorded(); 910 911 // Abandon current iterations of concurrent marking and concurrent 912 // refinement, if any are in progress. 913 concurrent_mark()->abort(); 914 915 // Make sure we'll choose a new allocation region afterwards. 916 abandon_cur_alloc_region(); 917 assert(_cur_alloc_region == NULL, "Invariant."); 918 g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS(); 919 tear_down_region_lists(); 920 set_used_regions_to_need_zero_fill(); 921 if (g1_policy()->in_young_gc_mode()) { 922 empty_young_list(); 923 g1_policy()->set_full_young_gcs(true); 924 } 925 926 // Temporarily make reference _discovery_ single threaded (non-MT). 927 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false); 928 929 // Temporarily make refs discovery atomic 930 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); 931 932 // Temporarily clear _is_alive_non_header 933 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); 934 935 ref_processor()->enable_discovery(); 936 ref_processor()->setup_policy(clear_all_soft_refs); 937 938 // Do collection work 939 { 940 HandleMark hm; // Discard invalid handles created during gc 941 G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs); 942 } 943 // Because freeing humongous regions may have added some unclean 944 // regions, it is necessary to tear down again before rebuilding. 945 tear_down_region_lists(); 946 rebuild_region_lists(); 947 948 _summary_bytes_used = recalculate_used(); 949 950 ref_processor()->enqueue_discovered_references(); 951 952 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 953 954 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { 955 HandleMark hm; // Discard invalid handles created during verification 956 gclog_or_tty->print(" VerifyAfterGC:"); 957 prepare_for_verify(); 958 Universe::verify(false); 959 } 960 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); 961 962 reset_gc_time_stamp(); 963 // Since everything potentially moved, we will clear all remembered 964 // sets, and clear all cards. Later we will rebuild remebered 965 // sets. We will also reset the GC time stamps of the regions. 966 PostMCRemSetClearClosure rs_clear(mr_bs()); 967 heap_region_iterate(&rs_clear); 968 969 // Resize the heap if necessary. 970 resize_if_necessary_after_full_collection(full ? 0 : word_size); 971 972 if (_cg1r->use_cache()) { 973 _cg1r->clear_and_record_card_counts(); 974 _cg1r->clear_hot_cache(); 975 } 976 977 // Rebuild remembered sets of all regions. 978 if (ParallelGCThreads > 0) { 979 ParRebuildRSTask rebuild_rs_task(this); 980 assert(check_heap_region_claim_values( 981 HeapRegion::InitialClaimValue), "sanity check"); 982 set_par_threads(workers()->total_workers()); 983 workers()->run_task(&rebuild_rs_task); 984 set_par_threads(0); 985 assert(check_heap_region_claim_values( 986 HeapRegion::RebuildRSClaimValue), "sanity check"); 987 reset_heap_region_claim_values(); 988 } else { 989 RebuildRSOutOfRegionClosure rebuild_rs(this); 990 heap_region_iterate(&rebuild_rs); 991 } 992 993 if (PrintGC) { 994 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); 995 } 996 997 if (true) { // FIXME 998 // Ask the permanent generation to adjust size for full collections 999 perm()->compute_new_size(); 1000 } 1001 1002 double end = os::elapsedTime(); 1003 GCOverheadReporter::recordSTWEnd(end); 1004 g1_policy()->record_full_collection_end(); 1005 1006 #ifdef TRACESPINNING 1007 ParallelTaskTerminator::print_termination_counts(); 1008 #endif 1009 1010 gc_epilogue(true); 1011 1012 // Abandon concurrent refinement. This must happen last: in the 1013 // dirty-card logging system, some cards may be dirty by weak-ref 1014 // processing, and may be enqueued. But the whole card table is 1015 // dirtied, so this should abandon those logs, and set "do_traversal" 1016 // to true. 1017 concurrent_g1_refine()->set_pya_restart(); 1018 assert(!G1DeferredRSUpdate 1019 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); 1020 assert(regions_accounted_for(), "Region leakage!"); 1021 } 1022 1023 if (g1_policy()->in_young_gc_mode()) { 1024 _young_list->reset_sampled_info(); 1025 assert( check_young_list_empty(false, false), 1026 "young list should be empty at this point"); 1027 } 1028 } 1029 1030 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { 1031 do_collection(true, clear_all_soft_refs, 0); 1032 } 1033 1034 // This code is mostly copied from TenuredGeneration. 1035 void 1036 G1CollectedHeap:: 1037 resize_if_necessary_after_full_collection(size_t word_size) { 1038 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); 1039 1040 // Include the current allocation, if any, and bytes that will be 1041 // pre-allocated to support collections, as "used". 1042 const size_t used_after_gc = used(); 1043 const size_t capacity_after_gc = capacity(); 1044 const size_t free_after_gc = capacity_after_gc - used_after_gc; 1045 1046 // We don't have floating point command-line arguments 1047 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100; 1048 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 1049 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100; 1050 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 1051 1052 size_t minimum_desired_capacity = (size_t) (used_after_gc / maximum_used_percentage); 1053 size_t maximum_desired_capacity = (size_t) (used_after_gc / minimum_used_percentage); 1054 1055 // Don't shrink less than the initial size. 1056 minimum_desired_capacity = 1057 MAX2(minimum_desired_capacity, 1058 collector_policy()->initial_heap_byte_size()); 1059 maximum_desired_capacity = 1060 MAX2(maximum_desired_capacity, 1061 collector_policy()->initial_heap_byte_size()); 1062 1063 // We are failing here because minimum_desired_capacity is 1064 assert(used_after_gc <= minimum_desired_capacity, "sanity check"); 1065 assert(minimum_desired_capacity <= maximum_desired_capacity, "sanity check"); 1066 1067 if (PrintGC && Verbose) { 1068 const double free_percentage = ((double)free_after_gc) / capacity(); 1069 gclog_or_tty->print_cr("Computing new size after full GC "); 1070 gclog_or_tty->print_cr(" " 1071 " minimum_free_percentage: %6.2f", 1072 minimum_free_percentage); 1073 gclog_or_tty->print_cr(" " 1074 " maximum_free_percentage: %6.2f", 1075 maximum_free_percentage); 1076 gclog_or_tty->print_cr(" " 1077 " capacity: %6.1fK" 1078 " minimum_desired_capacity: %6.1fK" 1079 " maximum_desired_capacity: %6.1fK", 1080 capacity() / (double) K, 1081 minimum_desired_capacity / (double) K, 1082 maximum_desired_capacity / (double) K); 1083 gclog_or_tty->print_cr(" " 1084 " free_after_gc : %6.1fK" 1085 " used_after_gc : %6.1fK", 1086 free_after_gc / (double) K, 1087 used_after_gc / (double) K); 1088 gclog_or_tty->print_cr(" " 1089 " free_percentage: %6.2f", 1090 free_percentage); 1091 } 1092 if (capacity() < minimum_desired_capacity) { 1093 // Don't expand unless it's significant 1094 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; 1095 expand(expand_bytes); 1096 if (PrintGC && Verbose) { 1097 gclog_or_tty->print_cr(" expanding:" 1098 " minimum_desired_capacity: %6.1fK" 1099 " expand_bytes: %6.1fK", 1100 minimum_desired_capacity / (double) K, 1101 expand_bytes / (double) K); 1102 } 1103 1104 // No expansion, now see if we want to shrink 1105 } else if (capacity() > maximum_desired_capacity) { 1106 // Capacity too large, compute shrinking size 1107 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; 1108 shrink(shrink_bytes); 1109 if (PrintGC && Verbose) { 1110 gclog_or_tty->print_cr(" " 1111 " shrinking:" 1112 " initSize: %.1fK" 1113 " maximum_desired_capacity: %.1fK", 1114 collector_policy()->initial_heap_byte_size() / (double) K, 1115 maximum_desired_capacity / (double) K); 1116 gclog_or_tty->print_cr(" " 1117 " shrink_bytes: %.1fK", 1118 shrink_bytes / (double) K); 1119 } 1120 } 1121 } 1122 1123 1124 HeapWord* 1125 G1CollectedHeap::satisfy_failed_allocation(size_t word_size) { 1126 HeapWord* result = NULL; 1127 1128 // In a G1 heap, we're supposed to keep allocation from failing by 1129 // incremental pauses. Therefore, at least for now, we'll favor 1130 // expansion over collection. (This might change in the future if we can 1131 // do something smarter than full collection to satisfy a failed alloc.) 1132 1133 result = expand_and_allocate(word_size); 1134 if (result != NULL) { 1135 assert(is_in(result), "result not in heap"); 1136 return result; 1137 } 1138 1139 // OK, I guess we have to try collection. 1140 1141 do_collection(false, false, word_size); 1142 1143 result = attempt_allocation(word_size, /*permit_collection_pause*/false); 1144 1145 if (result != NULL) { 1146 assert(is_in(result), "result not in heap"); 1147 return result; 1148 } 1149 1150 // Try collecting soft references. 1151 do_collection(false, true, word_size); 1152 result = attempt_allocation(word_size, /*permit_collection_pause*/false); 1153 if (result != NULL) { 1154 assert(is_in(result), "result not in heap"); 1155 return result; 1156 } 1157 1158 // What else? We might try synchronous finalization later. If the total 1159 // space available is large enough for the allocation, then a more 1160 // complete compaction phase than we've tried so far might be 1161 // appropriate. 1162 return NULL; 1163 } 1164 1165 // Attempting to expand the heap sufficiently 1166 // to support an allocation of the given "word_size". If 1167 // successful, perform the allocation and return the address of the 1168 // allocated block, or else "NULL". 1169 1170 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { 1171 size_t expand_bytes = word_size * HeapWordSize; 1172 if (expand_bytes < MinHeapDeltaBytes) { 1173 expand_bytes = MinHeapDeltaBytes; 1174 } 1175 expand(expand_bytes); 1176 assert(regions_accounted_for(), "Region leakage!"); 1177 HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */); 1178 return result; 1179 } 1180 1181 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) { 1182 size_t pre_used = 0; 1183 size_t cleared_h_regions = 0; 1184 size_t freed_regions = 0; 1185 UncleanRegionList local_list; 1186 free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions, 1187 freed_regions, &local_list); 1188 1189 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, 1190 &local_list); 1191 return pre_used; 1192 } 1193 1194 void 1195 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr, 1196 size_t& pre_used, 1197 size_t& cleared_h, 1198 size_t& freed_regions, 1199 UncleanRegionList* list, 1200 bool par) { 1201 assert(!hr->continuesHumongous(), "should have filtered these out"); 1202 size_t res = 0; 1203 if (!hr->popular() && hr->used() > 0 && hr->garbage_bytes() == hr->used()) { 1204 if (!hr->is_young()) { 1205 if (G1PolicyVerbose > 0) 1206 gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)" 1207 " during cleanup", hr, hr->used()); 1208 free_region_work(hr, pre_used, cleared_h, freed_regions, list, par); 1209 } 1210 } 1211 } 1212 1213 // FIXME: both this and shrink could probably be more efficient by 1214 // doing one "VirtualSpace::expand_by" call rather than several. 1215 void G1CollectedHeap::expand(size_t expand_bytes) { 1216 size_t old_mem_size = _g1_storage.committed_size(); 1217 // We expand by a minimum of 1K. 1218 expand_bytes = MAX2(expand_bytes, (size_t)K); 1219 size_t aligned_expand_bytes = 1220 ReservedSpace::page_align_size_up(expand_bytes); 1221 aligned_expand_bytes = align_size_up(aligned_expand_bytes, 1222 HeapRegion::GrainBytes); 1223 expand_bytes = aligned_expand_bytes; 1224 while (expand_bytes > 0) { 1225 HeapWord* base = (HeapWord*)_g1_storage.high(); 1226 // Commit more storage. 1227 bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes); 1228 if (!successful) { 1229 expand_bytes = 0; 1230 } else { 1231 expand_bytes -= HeapRegion::GrainBytes; 1232 // Expand the committed region. 1233 HeapWord* high = (HeapWord*) _g1_storage.high(); 1234 _g1_committed.set_end(high); 1235 // Create a new HeapRegion. 1236 MemRegion mr(base, high); 1237 bool is_zeroed = !_g1_max_committed.contains(base); 1238 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); 1239 1240 // Now update max_committed if necessary. 1241 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high)); 1242 1243 // Add it to the HeapRegionSeq. 1244 _hrs->insert(hr); 1245 // Set the zero-fill state, according to whether it's already 1246 // zeroed. 1247 { 1248 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 1249 if (is_zeroed) { 1250 hr->set_zero_fill_complete(); 1251 put_free_region_on_list_locked(hr); 1252 } else { 1253 hr->set_zero_fill_needed(); 1254 put_region_on_unclean_list_locked(hr); 1255 } 1256 } 1257 _free_regions++; 1258 // And we used up an expansion region to create it. 1259 _expansion_regions--; 1260 // Tell the cardtable about it. 1261 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); 1262 // And the offset table as well. 1263 _bot_shared->resize(_g1_committed.word_size()); 1264 } 1265 } 1266 if (Verbose && PrintGC) { 1267 size_t new_mem_size = _g1_storage.committed_size(); 1268 gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK", 1269 old_mem_size/K, aligned_expand_bytes/K, 1270 new_mem_size/K); 1271 } 1272 } 1273 1274 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) 1275 { 1276 size_t old_mem_size = _g1_storage.committed_size(); 1277 size_t aligned_shrink_bytes = 1278 ReservedSpace::page_align_size_down(shrink_bytes); 1279 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, 1280 HeapRegion::GrainBytes); 1281 size_t num_regions_deleted = 0; 1282 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); 1283 1284 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); 1285 if (mr.byte_size() > 0) 1286 _g1_storage.shrink_by(mr.byte_size()); 1287 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); 1288 1289 _g1_committed.set_end(mr.start()); 1290 _free_regions -= num_regions_deleted; 1291 _expansion_regions += num_regions_deleted; 1292 1293 // Tell the cardtable about it. 1294 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); 1295 1296 // And the offset table as well. 1297 _bot_shared->resize(_g1_committed.word_size()); 1298 1299 HeapRegionRemSet::shrink_heap(n_regions()); 1300 1301 if (Verbose && PrintGC) { 1302 size_t new_mem_size = _g1_storage.committed_size(); 1303 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", 1304 old_mem_size/K, aligned_shrink_bytes/K, 1305 new_mem_size/K); 1306 } 1307 } 1308 1309 void G1CollectedHeap::shrink(size_t shrink_bytes) { 1310 release_gc_alloc_regions(); 1311 tear_down_region_lists(); // We will rebuild them in a moment. 1312 shrink_helper(shrink_bytes); 1313 rebuild_region_lists(); 1314 } 1315 1316 // Public methods. 1317 1318 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 1319 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 1320 #endif // _MSC_VER 1321 1322 1323 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : 1324 SharedHeap(policy_), 1325 _g1_policy(policy_), 1326 _ref_processor(NULL), 1327 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), 1328 _bot_shared(NULL), 1329 _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"), 1330 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), 1331 _evac_failure_scan_stack(NULL) , 1332 _mark_in_progress(false), 1333 _cg1r(NULL), _czft(NULL), _summary_bytes_used(0), 1334 _cur_alloc_region(NULL), 1335 _refine_cte_cl(NULL), 1336 _free_region_list(NULL), _free_region_list_size(0), 1337 _free_regions(0), 1338 _popular_object_boundary(NULL), 1339 _cur_pop_hr_index(0), 1340 _popular_regions_to_be_evacuated(NULL), 1341 _pop_obj_rc_at_copy(), 1342 _full_collection(false), 1343 _unclean_region_list(), 1344 _unclean_regions_coming(false), 1345 _young_list(new YoungList(this)), 1346 _gc_time_stamp(0), 1347 _surviving_young_words(NULL), 1348 _in_cset_fast_test(NULL), 1349 _in_cset_fast_test_base(NULL) 1350 { 1351 _g1h = this; // To catch bugs. 1352 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { 1353 vm_exit_during_initialization("Failed necessary allocation."); 1354 } 1355 int n_queues = MAX2((int)ParallelGCThreads, 1); 1356 _task_queues = new RefToScanQueueSet(n_queues); 1357 1358 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); 1359 assert(n_rem_sets > 0, "Invariant."); 1360 1361 HeapRegionRemSetIterator** iter_arr = 1362 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); 1363 for (int i = 0; i < n_queues; i++) { 1364 iter_arr[i] = new HeapRegionRemSetIterator(); 1365 } 1366 _rem_set_iterator = iter_arr; 1367 1368 for (int i = 0; i < n_queues; i++) { 1369 RefToScanQueue* q = new RefToScanQueue(); 1370 q->initialize(); 1371 _task_queues->register_queue(i, q); 1372 } 1373 1374 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 1375 _gc_alloc_regions[ap] = NULL; 1376 _gc_alloc_region_counts[ap] = 0; 1377 } 1378 guarantee(_task_queues != NULL, "task_queues allocation failure."); 1379 } 1380 1381 jint G1CollectedHeap::initialize() { 1382 os::enable_vtime(); 1383 1384 // Necessary to satisfy locking discipline assertions. 1385 1386 MutexLocker x(Heap_lock); 1387 1388 // While there are no constraints in the GC code that HeapWordSize 1389 // be any particular value, there are multiple other areas in the 1390 // system which believe this to be true (e.g. oop->object_size in some 1391 // cases incorrectly returns the size in wordSize units rather than 1392 // HeapWordSize). 1393 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); 1394 1395 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); 1396 size_t max_byte_size = collector_policy()->max_heap_byte_size(); 1397 1398 // Ensure that the sizes are properly aligned. 1399 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); 1400 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); 1401 1402 // We allocate this in any case, but only do no work if the command line 1403 // param is off. 1404 _cg1r = new ConcurrentG1Refine(); 1405 1406 // Reserve the maximum. 1407 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); 1408 // Includes the perm-gen. 1409 ReservedSpace heap_rs(max_byte_size + pgs->max_size(), 1410 HeapRegion::GrainBytes, 1411 false /*ism*/); 1412 1413 if (!heap_rs.is_reserved()) { 1414 vm_exit_during_initialization("Could not reserve enough space for object heap"); 1415 return JNI_ENOMEM; 1416 } 1417 1418 // It is important to do this in a way such that concurrent readers can't 1419 // temporarily think somethings in the heap. (I've actually seen this 1420 // happen in asserts: DLD.) 1421 _reserved.set_word_size(0); 1422 _reserved.set_start((HeapWord*)heap_rs.base()); 1423 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); 1424 1425 _expansion_regions = max_byte_size/HeapRegion::GrainBytes; 1426 1427 _num_humongous_regions = 0; 1428 1429 // Create the gen rem set (and barrier set) for the entire reserved region. 1430 _rem_set = collector_policy()->create_rem_set(_reserved, 2); 1431 set_barrier_set(rem_set()->bs()); 1432 if (barrier_set()->is_a(BarrierSet::ModRef)) { 1433 _mr_bs = (ModRefBarrierSet*)_barrier_set; 1434 } else { 1435 vm_exit_during_initialization("G1 requires a mod ref bs."); 1436 return JNI_ENOMEM; 1437 } 1438 1439 // Also create a G1 rem set. 1440 if (G1UseHRIntoRS) { 1441 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { 1442 _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs()); 1443 } else { 1444 vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); 1445 return JNI_ENOMEM; 1446 } 1447 } else { 1448 _g1_rem_set = new StupidG1RemSet(this); 1449 } 1450 1451 // Carve out the G1 part of the heap. 1452 1453 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); 1454 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), 1455 g1_rs.size()/HeapWordSize); 1456 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); 1457 1458 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); 1459 1460 _g1_storage.initialize(g1_rs, 0); 1461 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); 1462 _g1_max_committed = _g1_committed; 1463 _hrs = new HeapRegionSeq(_expansion_regions); 1464 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); 1465 guarantee(_cur_alloc_region == NULL, "from constructor"); 1466 1467 _bot_shared = new G1BlockOffsetSharedArray(_reserved, 1468 heap_word_size(init_byte_size)); 1469 1470 _g1h = this; 1471 1472 // Create the ConcurrentMark data structure and thread. 1473 // (Must do this late, so that "max_regions" is defined.) 1474 _cm = new ConcurrentMark(heap_rs, (int) max_regions()); 1475 _cmThread = _cm->cmThread(); 1476 1477 // ...and the concurrent zero-fill thread, if necessary. 1478 if (G1ConcZeroFill) { 1479 _czft = new ConcurrentZFThread(); 1480 } 1481 1482 1483 1484 // Allocate the popular regions; take them off free lists. 1485 size_t pop_byte_size = G1NumPopularRegions * HeapRegion::GrainBytes; 1486 expand(pop_byte_size); 1487 _popular_object_boundary = 1488 _g1_reserved.start() + (G1NumPopularRegions * HeapRegion::GrainWords); 1489 for (int i = 0; i < G1NumPopularRegions; i++) { 1490 HeapRegion* hr = newAllocRegion(HeapRegion::GrainWords); 1491 // assert(hr != NULL && hr->bottom() < _popular_object_boundary, 1492 // "Should be enough, and all should be below boundary."); 1493 hr->set_popular(true); 1494 } 1495 assert(_cur_pop_hr_index == 0, "Start allocating at the first region."); 1496 1497 // Initialize the from_card cache structure of HeapRegionRemSet. 1498 HeapRegionRemSet::init_heap(max_regions()); 1499 1500 // Now expand into the rest of the initial heap size. 1501 expand(init_byte_size - pop_byte_size); 1502 1503 // Perform any initialization actions delegated to the policy. 1504 g1_policy()->init(); 1505 1506 g1_policy()->note_start_of_mark_thread(); 1507 1508 _refine_cte_cl = 1509 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), 1510 g1_rem_set(), 1511 concurrent_g1_refine()); 1512 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); 1513 1514 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, 1515 SATB_Q_FL_lock, 1516 0, 1517 Shared_SATB_Q_lock); 1518 if (G1RSBarrierUseQueue) { 1519 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, 1520 DirtyCardQ_FL_lock, 1521 G1DirtyCardQueueMax, 1522 Shared_DirtyCardQ_lock); 1523 } 1524 if (G1DeferredRSUpdate) { 1525 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, 1526 DirtyCardQ_FL_lock, 1527 0, 1528 Shared_DirtyCardQ_lock, 1529 &JavaThread::dirty_card_queue_set()); 1530 } 1531 // In case we're keeping closure specialization stats, initialize those 1532 // counts and that mechanism. 1533 SpecializationStats::clear(); 1534 1535 _gc_alloc_region_list = NULL; 1536 1537 // Do later initialization work for concurrent refinement. 1538 _cg1r->init(); 1539 1540 const char* group_names[] = { "CR", "ZF", "CM", "CL" }; 1541 GCOverheadReporter::initGCOverheadReporter(4, group_names); 1542 1543 return JNI_OK; 1544 } 1545 1546 void G1CollectedHeap::ref_processing_init() { 1547 SharedHeap::ref_processing_init(); 1548 MemRegion mr = reserved_region(); 1549 _ref_processor = ReferenceProcessor::create_ref_processor( 1550 mr, // span 1551 false, // Reference discovery is not atomic 1552 // (though it shouldn't matter here.) 1553 true, // mt_discovery 1554 NULL, // is alive closure: need to fill this in for efficiency 1555 ParallelGCThreads, 1556 ParallelRefProcEnabled, 1557 true); // Setting next fields of discovered 1558 // lists requires a barrier. 1559 } 1560 1561 size_t G1CollectedHeap::capacity() const { 1562 return _g1_committed.byte_size(); 1563 } 1564 1565 void G1CollectedHeap::iterate_dirty_card_closure(bool concurrent, 1566 int worker_i) { 1567 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1568 int n_completed_buffers = 0; 1569 while (dcqs.apply_closure_to_completed_buffer(worker_i, 0, true)) { 1570 n_completed_buffers++; 1571 } 1572 g1_policy()->record_update_rs_processed_buffers(worker_i, 1573 (double) n_completed_buffers); 1574 dcqs.clear_n_completed_buffers(); 1575 // Finish up the queue... 1576 if (worker_i == 0) concurrent_g1_refine()->clean_up_cache(worker_i, 1577 g1_rem_set()); 1578 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); 1579 } 1580 1581 1582 // Computes the sum of the storage used by the various regions. 1583 1584 size_t G1CollectedHeap::used() const { 1585 assert(Heap_lock->owner() != NULL, 1586 "Should be owned on this thread's behalf."); 1587 size_t result = _summary_bytes_used; 1588 if (_cur_alloc_region != NULL) 1589 result += _cur_alloc_region->used(); 1590 return result; 1591 } 1592 1593 class SumUsedClosure: public HeapRegionClosure { 1594 size_t _used; 1595 public: 1596 SumUsedClosure() : _used(0) {} 1597 bool doHeapRegion(HeapRegion* r) { 1598 if (!r->continuesHumongous()) { 1599 _used += r->used(); 1600 } 1601 return false; 1602 } 1603 size_t result() { return _used; } 1604 }; 1605 1606 size_t G1CollectedHeap::recalculate_used() const { 1607 SumUsedClosure blk; 1608 _hrs->iterate(&blk); 1609 return blk.result(); 1610 } 1611 1612 #ifndef PRODUCT 1613 class SumUsedRegionsClosure: public HeapRegionClosure { 1614 size_t _num; 1615 public: 1616 // _num is set to 1 to account for the popular region 1617 SumUsedRegionsClosure() : _num(G1NumPopularRegions) {} 1618 bool doHeapRegion(HeapRegion* r) { 1619 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { 1620 _num += 1; 1621 } 1622 return false; 1623 } 1624 size_t result() { return _num; } 1625 }; 1626 1627 size_t G1CollectedHeap::recalculate_used_regions() const { 1628 SumUsedRegionsClosure blk; 1629 _hrs->iterate(&blk); 1630 return blk.result(); 1631 } 1632 #endif // PRODUCT 1633 1634 size_t G1CollectedHeap::unsafe_max_alloc() { 1635 if (_free_regions > 0) return HeapRegion::GrainBytes; 1636 // otherwise, is there space in the current allocation region? 1637 1638 // We need to store the current allocation region in a local variable 1639 // here. The problem is that this method doesn't take any locks and 1640 // there may be other threads which overwrite the current allocation 1641 // region field. attempt_allocation(), for example, sets it to NULL 1642 // and this can happen *after* the NULL check here but before the call 1643 // to free(), resulting in a SIGSEGV. Note that this doesn't appear 1644 // to be a problem in the optimized build, since the two loads of the 1645 // current allocation region field are optimized away. 1646 HeapRegion* car = _cur_alloc_region; 1647 1648 // FIXME: should iterate over all regions? 1649 if (car == NULL) { 1650 return 0; 1651 } 1652 return car->free(); 1653 } 1654 1655 void G1CollectedHeap::collect(GCCause::Cause cause) { 1656 // The caller doesn't have the Heap_lock 1657 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 1658 MutexLocker ml(Heap_lock); 1659 collect_locked(cause); 1660 } 1661 1662 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 1663 assert(Thread::current()->is_VM_thread(), "Precondition#1"); 1664 assert(Heap_lock->is_locked(), "Precondition#2"); 1665 GCCauseSetter gcs(this, cause); 1666 switch (cause) { 1667 case GCCause::_heap_inspection: 1668 case GCCause::_heap_dump: { 1669 HandleMark hm; 1670 do_full_collection(false); // don't clear all soft refs 1671 break; 1672 } 1673 default: // XXX FIX ME 1674 ShouldNotReachHere(); // Unexpected use of this function 1675 } 1676 } 1677 1678 1679 void G1CollectedHeap::collect_locked(GCCause::Cause cause) { 1680 // Don't want to do a GC until cleanup is completed. 1681 wait_for_cleanup_complete(); 1682 1683 // Read the GC count while holding the Heap_lock 1684 int gc_count_before = SharedHeap::heap()->total_collections(); 1685 { 1686 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back 1687 VM_G1CollectFull op(gc_count_before, cause); 1688 VMThread::execute(&op); 1689 } 1690 } 1691 1692 bool G1CollectedHeap::is_in(const void* p) const { 1693 if (_g1_committed.contains(p)) { 1694 HeapRegion* hr = _hrs->addr_to_region(p); 1695 return hr->is_in(p); 1696 } else { 1697 return _perm_gen->as_gen()->is_in(p); 1698 } 1699 } 1700 1701 // Iteration functions. 1702 1703 // Iterates an OopClosure over all ref-containing fields of objects 1704 // within a HeapRegion. 1705 1706 class IterateOopClosureRegionClosure: public HeapRegionClosure { 1707 MemRegion _mr; 1708 OopClosure* _cl; 1709 public: 1710 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) 1711 : _mr(mr), _cl(cl) {} 1712 bool doHeapRegion(HeapRegion* r) { 1713 if (! r->continuesHumongous()) { 1714 r->oop_iterate(_cl); 1715 } 1716 return false; 1717 } 1718 }; 1719 1720 void G1CollectedHeap::oop_iterate(OopClosure* cl) { 1721 IterateOopClosureRegionClosure blk(_g1_committed, cl); 1722 _hrs->iterate(&blk); 1723 } 1724 1725 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) { 1726 IterateOopClosureRegionClosure blk(mr, cl); 1727 _hrs->iterate(&blk); 1728 } 1729 1730 // Iterates an ObjectClosure over all objects within a HeapRegion. 1731 1732 class IterateObjectClosureRegionClosure: public HeapRegionClosure { 1733 ObjectClosure* _cl; 1734 public: 1735 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} 1736 bool doHeapRegion(HeapRegion* r) { 1737 if (! r->continuesHumongous()) { 1738 r->object_iterate(_cl); 1739 } 1740 return false; 1741 } 1742 }; 1743 1744 void G1CollectedHeap::object_iterate(ObjectClosure* cl) { 1745 IterateObjectClosureRegionClosure blk(cl); 1746 _hrs->iterate(&blk); 1747 } 1748 1749 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { 1750 // FIXME: is this right? 1751 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); 1752 } 1753 1754 // Calls a SpaceClosure on a HeapRegion. 1755 1756 class SpaceClosureRegionClosure: public HeapRegionClosure { 1757 SpaceClosure* _cl; 1758 public: 1759 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} 1760 bool doHeapRegion(HeapRegion* r) { 1761 _cl->do_space(r); 1762 return false; 1763 } 1764 }; 1765 1766 void G1CollectedHeap::space_iterate(SpaceClosure* cl) { 1767 SpaceClosureRegionClosure blk(cl); 1768 _hrs->iterate(&blk); 1769 } 1770 1771 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { 1772 _hrs->iterate(cl); 1773 } 1774 1775 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, 1776 HeapRegionClosure* cl) { 1777 _hrs->iterate_from(r, cl); 1778 } 1779 1780 void 1781 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { 1782 _hrs->iterate_from(idx, cl); 1783 } 1784 1785 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } 1786 1787 void 1788 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, 1789 int worker, 1790 jint claim_value) { 1791 const size_t regions = n_regions(); 1792 const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1); 1793 // try to spread out the starting points of the workers 1794 const size_t start_index = regions / worker_num * (size_t) worker; 1795 1796 // each worker will actually look at all regions 1797 for (size_t count = 0; count < regions; ++count) { 1798 const size_t index = (start_index + count) % regions; 1799 assert(0 <= index && index < regions, "sanity"); 1800 HeapRegion* r = region_at(index); 1801 // we'll ignore "continues humongous" regions (we'll process them 1802 // when we come across their corresponding "start humongous" 1803 // region) and regions already claimed 1804 if (r->claim_value() == claim_value || r->continuesHumongous()) { 1805 continue; 1806 } 1807 // OK, try to claim it 1808 if (r->claimHeapRegion(claim_value)) { 1809 // success! 1810 assert(!r->continuesHumongous(), "sanity"); 1811 if (r->startsHumongous()) { 1812 // If the region is "starts humongous" we'll iterate over its 1813 // "continues humongous" first; in fact we'll do them 1814 // first. The order is important. In on case, calling the 1815 // closure on the "starts humongous" region might de-allocate 1816 // and clear all its "continues humongous" regions and, as a 1817 // result, we might end up processing them twice. So, we'll do 1818 // them first (notice: most closures will ignore them anyway) and 1819 // then we'll do the "starts humongous" region. 1820 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { 1821 HeapRegion* chr = region_at(ch_index); 1822 1823 // if the region has already been claimed or it's not 1824 // "continues humongous" we're done 1825 if (chr->claim_value() == claim_value || 1826 !chr->continuesHumongous()) { 1827 break; 1828 } 1829 1830 // Noone should have claimed it directly. We can given 1831 // that we claimed its "starts humongous" region. 1832 assert(chr->claim_value() != claim_value, "sanity"); 1833 assert(chr->humongous_start_region() == r, "sanity"); 1834 1835 if (chr->claimHeapRegion(claim_value)) { 1836 // we should always be able to claim it; noone else should 1837 // be trying to claim this region 1838 1839 bool res2 = cl->doHeapRegion(chr); 1840 assert(!res2, "Should not abort"); 1841 1842 // Right now, this holds (i.e., no closure that actually 1843 // does something with "continues humongous" regions 1844 // clears them). We might have to weaken it in the future, 1845 // but let's leave these two asserts here for extra safety. 1846 assert(chr->continuesHumongous(), "should still be the case"); 1847 assert(chr->humongous_start_region() == r, "sanity"); 1848 } else { 1849 guarantee(false, "we should not reach here"); 1850 } 1851 } 1852 } 1853 1854 assert(!r->continuesHumongous(), "sanity"); 1855 bool res = cl->doHeapRegion(r); 1856 assert(!res, "Should not abort"); 1857 } 1858 } 1859 } 1860 1861 class ResetClaimValuesClosure: public HeapRegionClosure { 1862 public: 1863 bool doHeapRegion(HeapRegion* r) { 1864 r->set_claim_value(HeapRegion::InitialClaimValue); 1865 return false; 1866 } 1867 }; 1868 1869 void 1870 G1CollectedHeap::reset_heap_region_claim_values() { 1871 ResetClaimValuesClosure blk; 1872 heap_region_iterate(&blk); 1873 } 1874 1875 #ifdef ASSERT 1876 // This checks whether all regions in the heap have the correct claim 1877 // value. I also piggy-backed on this a check to ensure that the 1878 // humongous_start_region() information on "continues humongous" 1879 // regions is correct. 1880 1881 class CheckClaimValuesClosure : public HeapRegionClosure { 1882 private: 1883 jint _claim_value; 1884 size_t _failures; 1885 HeapRegion* _sh_region; 1886 public: 1887 CheckClaimValuesClosure(jint claim_value) : 1888 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } 1889 bool doHeapRegion(HeapRegion* r) { 1890 if (r->claim_value() != _claim_value) { 1891 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " 1892 "claim value = %d, should be %d", 1893 r->bottom(), r->end(), r->claim_value(), 1894 _claim_value); 1895 ++_failures; 1896 } 1897 if (!r->isHumongous()) { 1898 _sh_region = NULL; 1899 } else if (r->startsHumongous()) { 1900 _sh_region = r; 1901 } else if (r->continuesHumongous()) { 1902 if (r->humongous_start_region() != _sh_region) { 1903 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " 1904 "HS = "PTR_FORMAT", should be "PTR_FORMAT, 1905 r->bottom(), r->end(), 1906 r->humongous_start_region(), 1907 _sh_region); 1908 ++_failures; 1909 } 1910 } 1911 return false; 1912 } 1913 size_t failures() { 1914 return _failures; 1915 } 1916 }; 1917 1918 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { 1919 CheckClaimValuesClosure cl(claim_value); 1920 heap_region_iterate(&cl); 1921 return cl.failures() == 0; 1922 } 1923 #endif // ASSERT 1924 1925 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { 1926 HeapRegion* r = g1_policy()->collection_set(); 1927 while (r != NULL) { 1928 HeapRegion* next = r->next_in_collection_set(); 1929 if (cl->doHeapRegion(r)) { 1930 cl->incomplete(); 1931 return; 1932 } 1933 r = next; 1934 } 1935 } 1936 1937 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, 1938 HeapRegionClosure *cl) { 1939 assert(r->in_collection_set(), 1940 "Start region must be a member of the collection set."); 1941 HeapRegion* cur = r; 1942 while (cur != NULL) { 1943 HeapRegion* next = cur->next_in_collection_set(); 1944 if (cl->doHeapRegion(cur) && false) { 1945 cl->incomplete(); 1946 return; 1947 } 1948 cur = next; 1949 } 1950 cur = g1_policy()->collection_set(); 1951 while (cur != r) { 1952 HeapRegion* next = cur->next_in_collection_set(); 1953 if (cl->doHeapRegion(cur) && false) { 1954 cl->incomplete(); 1955 return; 1956 } 1957 cur = next; 1958 } 1959 } 1960 1961 CompactibleSpace* G1CollectedHeap::first_compactible_space() { 1962 return _hrs->length() > 0 ? _hrs->at(0) : NULL; 1963 } 1964 1965 1966 Space* G1CollectedHeap::space_containing(const void* addr) const { 1967 Space* res = heap_region_containing(addr); 1968 if (res == NULL) 1969 res = perm_gen()->space_containing(addr); 1970 return res; 1971 } 1972 1973 HeapWord* G1CollectedHeap::block_start(const void* addr) const { 1974 Space* sp = space_containing(addr); 1975 if (sp != NULL) { 1976 return sp->block_start(addr); 1977 } 1978 return NULL; 1979 } 1980 1981 size_t G1CollectedHeap::block_size(const HeapWord* addr) const { 1982 Space* sp = space_containing(addr); 1983 assert(sp != NULL, "block_size of address outside of heap"); 1984 return sp->block_size(addr); 1985 } 1986 1987 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { 1988 Space* sp = space_containing(addr); 1989 return sp->block_is_obj(addr); 1990 } 1991 1992 bool G1CollectedHeap::supports_tlab_allocation() const { 1993 return true; 1994 } 1995 1996 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { 1997 return HeapRegion::GrainBytes; 1998 } 1999 2000 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { 2001 // Return the remaining space in the cur alloc region, but not less than 2002 // the min TLAB size. 2003 // Also, no more than half the region size, since we can't allow tlabs to 2004 // grow big enough to accomodate humongous objects. 2005 2006 // We need to story it locally, since it might change between when we 2007 // test for NULL and when we use it later. 2008 ContiguousSpace* cur_alloc_space = _cur_alloc_region; 2009 if (cur_alloc_space == NULL) { 2010 return HeapRegion::GrainBytes/2; 2011 } else { 2012 return MAX2(MIN2(cur_alloc_space->free(), 2013 (size_t)(HeapRegion::GrainBytes/2)), 2014 (size_t)MinTLABSize); 2015 } 2016 } 2017 2018 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t size) { 2019 bool dummy; 2020 return G1CollectedHeap::mem_allocate(size, false, true, &dummy); 2021 } 2022 2023 bool G1CollectedHeap::allocs_are_zero_filled() { 2024 return false; 2025 } 2026 2027 size_t G1CollectedHeap::large_typearray_limit() { 2028 // FIXME 2029 return HeapRegion::GrainBytes/HeapWordSize; 2030 } 2031 2032 size_t G1CollectedHeap::max_capacity() const { 2033 return _g1_committed.byte_size(); 2034 } 2035 2036 jlong G1CollectedHeap::millis_since_last_gc() { 2037 // assert(false, "NYI"); 2038 return 0; 2039 } 2040 2041 2042 void G1CollectedHeap::prepare_for_verify() { 2043 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { 2044 ensure_parsability(false); 2045 } 2046 g1_rem_set()->prepare_for_verify(); 2047 } 2048 2049 class VerifyLivenessOopClosure: public OopClosure { 2050 G1CollectedHeap* g1h; 2051 public: 2052 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { 2053 g1h = _g1h; 2054 } 2055 void do_oop(narrowOop *p) { 2056 guarantee(false, "NYI"); 2057 } 2058 void do_oop(oop *p) { 2059 oop obj = *p; 2060 assert(obj == NULL || !g1h->is_obj_dead(obj), 2061 "Dead object referenced by a not dead object"); 2062 } 2063 }; 2064 2065 class VerifyObjsInRegionClosure: public ObjectClosure { 2066 G1CollectedHeap* _g1h; 2067 size_t _live_bytes; 2068 HeapRegion *_hr; 2069 public: 2070 VerifyObjsInRegionClosure(HeapRegion *hr) : _live_bytes(0), _hr(hr) { 2071 _g1h = G1CollectedHeap::heap(); 2072 } 2073 void do_object(oop o) { 2074 VerifyLivenessOopClosure isLive(_g1h); 2075 assert(o != NULL, "Huh?"); 2076 if (!_g1h->is_obj_dead(o)) { 2077 o->oop_iterate(&isLive); 2078 if (!_hr->obj_allocated_since_prev_marking(o)) 2079 _live_bytes += (o->size() * HeapWordSize); 2080 } 2081 } 2082 size_t live_bytes() { return _live_bytes; } 2083 }; 2084 2085 class PrintObjsInRegionClosure : public ObjectClosure { 2086 HeapRegion *_hr; 2087 G1CollectedHeap *_g1; 2088 public: 2089 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { 2090 _g1 = G1CollectedHeap::heap(); 2091 }; 2092 2093 void do_object(oop o) { 2094 if (o != NULL) { 2095 HeapWord *start = (HeapWord *) o; 2096 size_t word_sz = o->size(); 2097 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT 2098 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", 2099 (void*) o, word_sz, 2100 _g1->isMarkedPrev(o), 2101 _g1->isMarkedNext(o), 2102 _hr->obj_allocated_since_prev_marking(o)); 2103 HeapWord *end = start + word_sz; 2104 HeapWord *cur; 2105 int *val; 2106 for (cur = start; cur < end; cur++) { 2107 val = (int *) cur; 2108 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); 2109 } 2110 } 2111 } 2112 }; 2113 2114 class VerifyRegionClosure: public HeapRegionClosure { 2115 public: 2116 bool _allow_dirty; 2117 bool _par; 2118 VerifyRegionClosure(bool allow_dirty, bool par = false) 2119 : _allow_dirty(allow_dirty), _par(par) {} 2120 bool doHeapRegion(HeapRegion* r) { 2121 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, 2122 "Should be unclaimed at verify points."); 2123 if (!r->isHumongous() || (r->isHumongous() && r->startsHumongous())) { 2124 VerifyObjsInRegionClosure not_dead_yet_cl(r); 2125 r->verify(_allow_dirty); 2126 r->object_iterate(¬_dead_yet_cl); 2127 guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(), 2128 "More live objects than counted in last complete marking."); 2129 } 2130 return false; 2131 } 2132 }; 2133 2134 class VerifyRootsClosure: public OopsInGenClosure { 2135 private: 2136 G1CollectedHeap* _g1h; 2137 bool _failures; 2138 2139 public: 2140 VerifyRootsClosure() : 2141 _g1h(G1CollectedHeap::heap()), _failures(false) { } 2142 2143 bool failures() { return _failures; } 2144 2145 void do_oop(narrowOop* p) { 2146 guarantee(false, "NYI"); 2147 } 2148 2149 void do_oop(oop* p) { 2150 oop obj = *p; 2151 if (obj != NULL) { 2152 if (_g1h->is_obj_dead(obj)) { 2153 gclog_or_tty->print_cr("Root location "PTR_FORMAT" " 2154 "points to dead obj "PTR_FORMAT, p, (void*) obj); 2155 obj->print_on(gclog_or_tty); 2156 _failures = true; 2157 } 2158 } 2159 } 2160 }; 2161 2162 // This is the task used for parallel heap verification. 2163 2164 class G1ParVerifyTask: public AbstractGangTask { 2165 private: 2166 G1CollectedHeap* _g1h; 2167 bool _allow_dirty; 2168 2169 public: 2170 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty) : 2171 AbstractGangTask("Parallel verify task"), 2172 _g1h(g1h), _allow_dirty(allow_dirty) { } 2173 2174 void work(int worker_i) { 2175 HandleMark hm; 2176 VerifyRegionClosure blk(_allow_dirty, true); 2177 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, 2178 HeapRegion::ParVerifyClaimValue); 2179 } 2180 }; 2181 2182 void G1CollectedHeap::verify(bool allow_dirty, bool silent) { 2183 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { 2184 if (!silent) { gclog_or_tty->print("roots "); } 2185 VerifyRootsClosure rootsCl; 2186 process_strong_roots(false, 2187 SharedHeap::SO_AllClasses, 2188 &rootsCl, 2189 &rootsCl); 2190 rem_set()->invalidate(perm_gen()->used_region(), false); 2191 if (!silent) { gclog_or_tty->print("heapRegions "); } 2192 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { 2193 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), 2194 "sanity check"); 2195 2196 G1ParVerifyTask task(this, allow_dirty); 2197 int n_workers = workers()->total_workers(); 2198 set_par_threads(n_workers); 2199 workers()->run_task(&task); 2200 set_par_threads(0); 2201 2202 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), 2203 "sanity check"); 2204 2205 reset_heap_region_claim_values(); 2206 2207 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), 2208 "sanity check"); 2209 } else { 2210 VerifyRegionClosure blk(allow_dirty); 2211 _hrs->iterate(&blk); 2212 } 2213 if (!silent) gclog_or_tty->print("remset "); 2214 rem_set()->verify(); 2215 guarantee(!rootsCl.failures(), "should not have had failures"); 2216 } else { 2217 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); 2218 } 2219 } 2220 2221 class PrintRegionClosure: public HeapRegionClosure { 2222 outputStream* _st; 2223 public: 2224 PrintRegionClosure(outputStream* st) : _st(st) {} 2225 bool doHeapRegion(HeapRegion* r) { 2226 r->print_on(_st); 2227 return false; 2228 } 2229 }; 2230 2231 void G1CollectedHeap::print() const { print_on(gclog_or_tty); } 2232 2233 void G1CollectedHeap::print_on(outputStream* st) const { 2234 PrintRegionClosure blk(st); 2235 _hrs->iterate(&blk); 2236 } 2237 2238 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { 2239 if (ParallelGCThreads > 0) { 2240 workers()->print_worker_threads(); 2241 } 2242 st->print("\"G1 concurrent mark GC Thread\" "); 2243 _cmThread->print(); 2244 st->cr(); 2245 st->print("\"G1 concurrent refinement GC Thread\" "); 2246 _cg1r->cg1rThread()->print_on(st); 2247 st->cr(); 2248 st->print("\"G1 zero-fill GC Thread\" "); 2249 _czft->print_on(st); 2250 st->cr(); 2251 } 2252 2253 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { 2254 if (ParallelGCThreads > 0) { 2255 workers()->threads_do(tc); 2256 } 2257 tc->do_thread(_cmThread); 2258 tc->do_thread(_cg1r->cg1rThread()); 2259 tc->do_thread(_czft); 2260 } 2261 2262 void G1CollectedHeap::print_tracing_info() const { 2263 concurrent_g1_refine()->print_final_card_counts(); 2264 2265 // We'll overload this to mean "trace GC pause statistics." 2266 if (TraceGen0Time || TraceGen1Time) { 2267 // The "G1CollectorPolicy" is keeping track of these stats, so delegate 2268 // to that. 2269 g1_policy()->print_tracing_info(); 2270 } 2271 if (SummarizeG1RSStats) { 2272 g1_rem_set()->print_summary_info(); 2273 } 2274 if (SummarizeG1ConcMark) { 2275 concurrent_mark()->print_summary_info(); 2276 } 2277 if (SummarizeG1ZFStats) { 2278 ConcurrentZFThread::print_summary_info(); 2279 } 2280 if (G1SummarizePopularity) { 2281 print_popularity_summary_info(); 2282 } 2283 g1_policy()->print_yg_surv_rate_info(); 2284 2285 GCOverheadReporter::printGCOverhead(); 2286 2287 SpecializationStats::print(); 2288 } 2289 2290 2291 int G1CollectedHeap::addr_to_arena_id(void* addr) const { 2292 HeapRegion* hr = heap_region_containing(addr); 2293 if (hr == NULL) { 2294 return 0; 2295 } else { 2296 return 1; 2297 } 2298 } 2299 2300 G1CollectedHeap* G1CollectedHeap::heap() { 2301 assert(_sh->kind() == CollectedHeap::G1CollectedHeap, 2302 "not a garbage-first heap"); 2303 return _g1h; 2304 } 2305 2306 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { 2307 if (PrintHeapAtGC){ 2308 gclog_or_tty->print_cr(" {Heap before GC collections=%d:", total_collections()); 2309 Universe::print(); 2310 } 2311 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); 2312 // Call allocation profiler 2313 AllocationProfiler::iterate_since_last_gc(); 2314 // Fill TLAB's and such 2315 ensure_parsability(true); 2316 } 2317 2318 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { 2319 // FIXME: what is this about? 2320 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" 2321 // is set. 2322 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), 2323 "derived pointer present")); 2324 2325 if (PrintHeapAtGC){ 2326 gclog_or_tty->print_cr(" Heap after GC collections=%d:", total_collections()); 2327 Universe::print(); 2328 gclog_or_tty->print("} "); 2329 } 2330 } 2331 2332 void G1CollectedHeap::do_collection_pause() { 2333 // Read the GC count while holding the Heap_lock 2334 // we need to do this _before_ wait_for_cleanup_complete(), to 2335 // ensure that we do not give up the heap lock and potentially 2336 // pick up the wrong count 2337 int gc_count_before = SharedHeap::heap()->total_collections(); 2338 2339 // Don't want to do a GC pause while cleanup is being completed! 2340 wait_for_cleanup_complete(); 2341 2342 g1_policy()->record_stop_world_start(); 2343 { 2344 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back 2345 VM_G1IncCollectionPause op(gc_count_before); 2346 VMThread::execute(&op); 2347 } 2348 } 2349 2350 void 2351 G1CollectedHeap::doConcurrentMark() { 2352 if (G1ConcMark) { 2353 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); 2354 if (!_cmThread->in_progress()) { 2355 _cmThread->set_started(); 2356 CGC_lock->notify(); 2357 } 2358 } 2359 } 2360 2361 class VerifyMarkedObjsClosure: public ObjectClosure { 2362 G1CollectedHeap* _g1h; 2363 public: 2364 VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} 2365 void do_object(oop obj) { 2366 assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, 2367 "markandsweep mark should agree with concurrent deadness"); 2368 } 2369 }; 2370 2371 void 2372 G1CollectedHeap::checkConcurrentMark() { 2373 VerifyMarkedObjsClosure verifycl(this); 2374 // MutexLockerEx x(getMarkBitMapLock(), 2375 // Mutex::_no_safepoint_check_flag); 2376 object_iterate(&verifycl); 2377 } 2378 2379 void G1CollectedHeap::do_sync_mark() { 2380 _cm->checkpointRootsInitial(); 2381 _cm->markFromRoots(); 2382 _cm->checkpointRootsFinal(false); 2383 } 2384 2385 // <NEW PREDICTION> 2386 2387 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, 2388 bool young) { 2389 return _g1_policy->predict_region_elapsed_time_ms(hr, young); 2390 } 2391 2392 void G1CollectedHeap::check_if_region_is_too_expensive(double 2393 predicted_time_ms) { 2394 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); 2395 } 2396 2397 size_t G1CollectedHeap::pending_card_num() { 2398 size_t extra_cards = 0; 2399 JavaThread *curr = Threads::first(); 2400 while (curr != NULL) { 2401 DirtyCardQueue& dcq = curr->dirty_card_queue(); 2402 extra_cards += dcq.size(); 2403 curr = curr->next(); 2404 } 2405 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 2406 size_t buffer_size = dcqs.buffer_size(); 2407 size_t buffer_num = dcqs.completed_buffers_num(); 2408 return buffer_size * buffer_num + extra_cards; 2409 } 2410 2411 size_t G1CollectedHeap::max_pending_card_num() { 2412 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 2413 size_t buffer_size = dcqs.buffer_size(); 2414 size_t buffer_num = dcqs.completed_buffers_num(); 2415 int thread_num = Threads::number_of_threads(); 2416 return (buffer_num + thread_num) * buffer_size; 2417 } 2418 2419 size_t G1CollectedHeap::cards_scanned() { 2420 HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set(); 2421 return g1_rset->cardsScanned(); 2422 } 2423 2424 void 2425 G1CollectedHeap::setup_surviving_young_words() { 2426 guarantee( _surviving_young_words == NULL, "pre-condition" ); 2427 size_t array_length = g1_policy()->young_cset_length(); 2428 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); 2429 if (_surviving_young_words == NULL) { 2430 vm_exit_out_of_memory(sizeof(size_t) * array_length, 2431 "Not enough space for young surv words summary."); 2432 } 2433 memset(_surviving_young_words, 0, array_length * sizeof(size_t)); 2434 for (size_t i = 0; i < array_length; ++i) { 2435 guarantee( _surviving_young_words[i] == 0, "invariant" ); 2436 } 2437 } 2438 2439 void 2440 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { 2441 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 2442 size_t array_length = g1_policy()->young_cset_length(); 2443 for (size_t i = 0; i < array_length; ++i) 2444 _surviving_young_words[i] += surv_young_words[i]; 2445 } 2446 2447 void 2448 G1CollectedHeap::cleanup_surviving_young_words() { 2449 guarantee( _surviving_young_words != NULL, "pre-condition" ); 2450 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); 2451 _surviving_young_words = NULL; 2452 } 2453 2454 // </NEW PREDICTION> 2455 2456 void 2457 G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) { 2458 char verbose_str[128]; 2459 sprintf(verbose_str, "GC pause "); 2460 if (popular_region != NULL) 2461 strcat(verbose_str, "(popular)"); 2462 else if (g1_policy()->in_young_gc_mode()) { 2463 if (g1_policy()->full_young_gcs()) 2464 strcat(verbose_str, "(young)"); 2465 else 2466 strcat(verbose_str, "(partial)"); 2467 } 2468 bool reset_should_initiate_conc_mark = false; 2469 if (popular_region != NULL && g1_policy()->should_initiate_conc_mark()) { 2470 // we currently do not allow an initial mark phase to be piggy-backed 2471 // on a popular pause 2472 reset_should_initiate_conc_mark = true; 2473 g1_policy()->unset_should_initiate_conc_mark(); 2474 } 2475 if (g1_policy()->should_initiate_conc_mark()) 2476 strcat(verbose_str, " (initial-mark)"); 2477 2478 GCCauseSetter x(this, (popular_region == NULL ? 2479 GCCause::_g1_inc_collection_pause : 2480 GCCause::_g1_pop_region_collection_pause)); 2481 2482 // if PrintGCDetails is on, we'll print long statistics information 2483 // in the collector policy code, so let's not print this as the output 2484 // is messy if we do. 2485 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 2486 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 2487 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); 2488 2489 ResourceMark rm; 2490 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 2491 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); 2492 guarantee(!is_gc_active(), "collection is not reentrant"); 2493 assert(regions_accounted_for(), "Region leakage!"); 2494 2495 increment_gc_time_stamp(); 2496 2497 if (g1_policy()->in_young_gc_mode()) { 2498 assert(check_young_list_well_formed(), 2499 "young list should be well formed"); 2500 } 2501 2502 if (GC_locker::is_active()) { 2503 return; // GC is disabled (e.g. JNI GetXXXCritical operation) 2504 } 2505 2506 bool abandoned = false; 2507 { // Call to jvmpi::post_class_unload_events must occur outside of active GC 2508 IsGCActiveMark x; 2509 2510 gc_prologue(false); 2511 increment_total_collections(); 2512 2513 #if G1_REM_SET_LOGGING 2514 gclog_or_tty->print_cr("\nJust chose CS, heap:"); 2515 print(); 2516 #endif 2517 2518 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { 2519 HandleMark hm; // Discard invalid handles created during verification 2520 prepare_for_verify(); 2521 gclog_or_tty->print(" VerifyBeforeGC:"); 2522 Universe::verify(false); 2523 } 2524 2525 COMPILER2_PRESENT(DerivedPointerTable::clear()); 2526 2527 // We want to turn off ref discovery, if necessary, and turn it back on 2528 // on again later if we do. 2529 bool was_enabled = ref_processor()->discovery_enabled(); 2530 if (was_enabled) ref_processor()->disable_discovery(); 2531 2532 // Forget the current alloc region (we might even choose it to be part 2533 // of the collection set!). 2534 abandon_cur_alloc_region(); 2535 2536 // The elapsed time induced by the start time below deliberately elides 2537 // the possible verification above. 2538 double start_time_sec = os::elapsedTime(); 2539 GCOverheadReporter::recordSTWStart(start_time_sec); 2540 size_t start_used_bytes = used(); 2541 if (!G1ConcMark) { 2542 do_sync_mark(); 2543 } 2544 2545 g1_policy()->record_collection_pause_start(start_time_sec, 2546 start_used_bytes); 2547 2548 guarantee(_in_cset_fast_test == NULL, "invariant"); 2549 guarantee(_in_cset_fast_test_base == NULL, "invariant"); 2550 _in_cset_fast_test_length = max_regions(); 2551 _in_cset_fast_test_base = 2552 NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); 2553 memset(_in_cset_fast_test_base, false, 2554 _in_cset_fast_test_length * sizeof(bool)); 2555 // We're biasing _in_cset_fast_test to avoid subtracting the 2556 // beginning of the heap every time we want to index; basically 2557 // it's the same with what we do with the card table. 2558 _in_cset_fast_test = _in_cset_fast_test_base - 2559 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); 2560 2561 #if SCAN_ONLY_VERBOSE 2562 _young_list->print(); 2563 #endif // SCAN_ONLY_VERBOSE 2564 2565 if (g1_policy()->should_initiate_conc_mark()) { 2566 concurrent_mark()->checkpointRootsInitialPre(); 2567 } 2568 save_marks(); 2569 2570 // We must do this before any possible evacuation that should propagate 2571 // marks, including evacuation of popular objects in a popular pause. 2572 if (mark_in_progress()) { 2573 double start_time_sec = os::elapsedTime(); 2574 2575 _cm->drainAllSATBBuffers(); 2576 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; 2577 g1_policy()->record_satb_drain_time(finish_mark_ms); 2578 2579 } 2580 // Record the number of elements currently on the mark stack, so we 2581 // only iterate over these. (Since evacuation may add to the mark 2582 // stack, doing more exposes race conditions.) If no mark is in 2583 // progress, this will be zero. 2584 _cm->set_oops_do_bound(); 2585 2586 assert(regions_accounted_for(), "Region leakage."); 2587 2588 bool abandoned = false; 2589 2590 if (mark_in_progress()) 2591 concurrent_mark()->newCSet(); 2592 2593 // Now choose the CS. 2594 if (popular_region == NULL) { 2595 g1_policy()->choose_collection_set(); 2596 } else { 2597 // We may be evacuating a single region (for popularity). 2598 g1_policy()->record_popular_pause_preamble_start(); 2599 popularity_pause_preamble(popular_region); 2600 g1_policy()->record_popular_pause_preamble_end(); 2601 abandoned = (g1_policy()->collection_set() == NULL); 2602 // Now we allow more regions to be added (we have to collect 2603 // all popular regions). 2604 if (!abandoned) { 2605 g1_policy()->choose_collection_set(popular_region); 2606 } 2607 } 2608 // We may abandon a pause if we find no region that will fit in the MMU 2609 // pause. 2610 abandoned = (g1_policy()->collection_set() == NULL); 2611 2612 // Nothing to do if we were unable to choose a collection set. 2613 if (!abandoned) { 2614 #if G1_REM_SET_LOGGING 2615 gclog_or_tty->print_cr("\nAfter pause, heap:"); 2616 print(); 2617 #endif 2618 2619 setup_surviving_young_words(); 2620 2621 // Set up the gc allocation regions. 2622 get_gc_alloc_regions(); 2623 2624 // Actually do the work... 2625 evacuate_collection_set(); 2626 free_collection_set(g1_policy()->collection_set()); 2627 g1_policy()->clear_collection_set(); 2628 2629 FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base); 2630 // this is more for peace of mind; we're nulling them here and 2631 // we're expecting them to be null at the beginning of the next GC 2632 _in_cset_fast_test = NULL; 2633 _in_cset_fast_test_base = NULL; 2634 2635 if (popular_region != NULL) { 2636 // We have to wait until now, because we don't want the region to 2637 // be rescheduled for pop-evac during RS update. 2638 popular_region->set_popular_pending(false); 2639 } 2640 2641 release_gc_alloc_regions(); 2642 2643 cleanup_surviving_young_words(); 2644 2645 if (g1_policy()->in_young_gc_mode()) { 2646 _young_list->reset_sampled_info(); 2647 assert(check_young_list_empty(true), 2648 "young list should be empty"); 2649 2650 #if SCAN_ONLY_VERBOSE 2651 _young_list->print(); 2652 #endif // SCAN_ONLY_VERBOSE 2653 2654 g1_policy()->record_survivor_regions(_young_list->survivor_length(), 2655 _young_list->first_survivor_region(), 2656 _young_list->last_survivor_region()); 2657 _young_list->reset_auxilary_lists(); 2658 } 2659 } else { 2660 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 2661 } 2662 2663 if (evacuation_failed()) { 2664 _summary_bytes_used = recalculate_used(); 2665 } else { 2666 // The "used" of the the collection set have already been subtracted 2667 // when they were freed. Add in the bytes evacuated. 2668 _summary_bytes_used += g1_policy()->bytes_in_to_space(); 2669 } 2670 2671 if (g1_policy()->in_young_gc_mode() && 2672 g1_policy()->should_initiate_conc_mark()) { 2673 concurrent_mark()->checkpointRootsInitialPost(); 2674 set_marking_started(); 2675 doConcurrentMark(); 2676 } 2677 2678 #if SCAN_ONLY_VERBOSE 2679 _young_list->print(); 2680 #endif // SCAN_ONLY_VERBOSE 2681 2682 double end_time_sec = os::elapsedTime(); 2683 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; 2684 g1_policy()->record_pause_time_ms(pause_time_ms); 2685 GCOverheadReporter::recordSTWEnd(end_time_sec); 2686 g1_policy()->record_collection_pause_end(popular_region != NULL, 2687 abandoned); 2688 2689 assert(regions_accounted_for(), "Region leakage."); 2690 2691 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { 2692 HandleMark hm; // Discard invalid handles created during verification 2693 gclog_or_tty->print(" VerifyAfterGC:"); 2694 prepare_for_verify(); 2695 Universe::verify(false); 2696 } 2697 2698 if (was_enabled) ref_processor()->enable_discovery(); 2699 2700 { 2701 size_t expand_bytes = g1_policy()->expansion_amount(); 2702 if (expand_bytes > 0) { 2703 size_t bytes_before = capacity(); 2704 expand(expand_bytes); 2705 } 2706 } 2707 2708 if (mark_in_progress()) { 2709 concurrent_mark()->update_g1_committed(); 2710 } 2711 2712 #ifdef TRACESPINNING 2713 ParallelTaskTerminator::print_termination_counts(); 2714 #endif 2715 2716 gc_epilogue(false); 2717 } 2718 2719 assert(verify_region_lists(), "Bad region lists."); 2720 2721 if (reset_should_initiate_conc_mark) 2722 g1_policy()->set_should_initiate_conc_mark(); 2723 2724 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { 2725 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); 2726 print_tracing_info(); 2727 vm_exit(-1); 2728 } 2729 } 2730 2731 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { 2732 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); 2733 HeapWord* original_top = NULL; 2734 if (r != NULL) 2735 original_top = r->top(); 2736 2737 // We will want to record the used space in r as being there before gc. 2738 // One we install it as a GC alloc region it's eligible for allocation. 2739 // So record it now and use it later. 2740 size_t r_used = 0; 2741 if (r != NULL) { 2742 r_used = r->used(); 2743 2744 if (ParallelGCThreads > 0) { 2745 // need to take the lock to guard against two threads calling 2746 // get_gc_alloc_region concurrently (very unlikely but...) 2747 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 2748 r->save_marks(); 2749 } 2750 } 2751 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; 2752 _gc_alloc_regions[purpose] = r; 2753 if (old_alloc_region != NULL) { 2754 // Replace aliases too. 2755 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 2756 if (_gc_alloc_regions[ap] == old_alloc_region) { 2757 _gc_alloc_regions[ap] = r; 2758 } 2759 } 2760 } 2761 if (r != NULL) { 2762 push_gc_alloc_region(r); 2763 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { 2764 // We are using a region as a GC alloc region after it has been used 2765 // as a mutator allocation region during the current marking cycle. 2766 // The mutator-allocated objects are currently implicitly marked, but 2767 // when we move hr->next_top_at_mark_start() forward at the the end 2768 // of the GC pause, they won't be. We therefore mark all objects in 2769 // the "gap". We do this object-by-object, since marking densely 2770 // does not currently work right with marking bitmap iteration. This 2771 // means we rely on TLAB filling at the start of pauses, and no 2772 // "resuscitation" of filled TLAB's. If we want to do this, we need 2773 // to fix the marking bitmap iteration. 2774 HeapWord* curhw = r->next_top_at_mark_start(); 2775 HeapWord* t = original_top; 2776 2777 while (curhw < t) { 2778 oop cur = (oop)curhw; 2779 // We'll assume parallel for generality. This is rare code. 2780 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? 2781 curhw = curhw + cur->size(); 2782 } 2783 assert(curhw == t, "Should have parsed correctly."); 2784 } 2785 if (G1PolicyVerbose > 1) { 2786 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " 2787 "for survivors:", r->bottom(), original_top, r->end()); 2788 r->print(); 2789 } 2790 g1_policy()->record_before_bytes(r_used); 2791 } 2792 } 2793 2794 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { 2795 assert(Thread::current()->is_VM_thread() || 2796 par_alloc_during_gc_lock()->owned_by_self(), "Precondition"); 2797 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), 2798 "Precondition."); 2799 hr->set_is_gc_alloc_region(true); 2800 hr->set_next_gc_alloc_region(_gc_alloc_region_list); 2801 _gc_alloc_region_list = hr; 2802 } 2803 2804 #ifdef G1_DEBUG 2805 class FindGCAllocRegion: public HeapRegionClosure { 2806 public: 2807 bool doHeapRegion(HeapRegion* r) { 2808 if (r->is_gc_alloc_region()) { 2809 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", 2810 r->hrs_index(), r->bottom()); 2811 } 2812 return false; 2813 } 2814 }; 2815 #endif // G1_DEBUG 2816 2817 void G1CollectedHeap::forget_alloc_region_list() { 2818 assert(Thread::current()->is_VM_thread(), "Precondition"); 2819 while (_gc_alloc_region_list != NULL) { 2820 HeapRegion* r = _gc_alloc_region_list; 2821 assert(r->is_gc_alloc_region(), "Invariant."); 2822 _gc_alloc_region_list = r->next_gc_alloc_region(); 2823 r->set_next_gc_alloc_region(NULL); 2824 r->set_is_gc_alloc_region(false); 2825 if (r->is_survivor()) { 2826 if (r->is_empty()) { 2827 r->set_not_young(); 2828 } else { 2829 _young_list->add_survivor_region(r); 2830 } 2831 } 2832 if (r->is_empty()) { 2833 ++_free_regions; 2834 } 2835 } 2836 #ifdef G1_DEBUG 2837 FindGCAllocRegion fa; 2838 heap_region_iterate(&fa); 2839 #endif // G1_DEBUG 2840 } 2841 2842 2843 bool G1CollectedHeap::check_gc_alloc_regions() { 2844 // TODO: allocation regions check 2845 return true; 2846 } 2847 2848 void G1CollectedHeap::get_gc_alloc_regions() { 2849 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 2850 // Create new GC alloc regions. 2851 HeapRegion* alloc_region = _gc_alloc_regions[ap]; 2852 // Clear this alloc region, so that in case it turns out to be 2853 // unacceptable, we end up with no allocation region, rather than a bad 2854 // one. 2855 _gc_alloc_regions[ap] = NULL; 2856 if (alloc_region == NULL || alloc_region->in_collection_set()) { 2857 // Can't re-use old one. Allocate a new one. 2858 alloc_region = newAllocRegionWithExpansion(ap, 0); 2859 } 2860 if (alloc_region != NULL) { 2861 set_gc_alloc_region(ap, alloc_region); 2862 } 2863 } 2864 // Set alternative regions for allocation purposes that have reached 2865 // thier limit. 2866 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 2867 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); 2868 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { 2869 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; 2870 } 2871 } 2872 assert(check_gc_alloc_regions(), "alloc regions messed up"); 2873 } 2874 2875 void G1CollectedHeap::release_gc_alloc_regions() { 2876 // We keep a separate list of all regions that have been alloc regions in 2877 // the current collection pause. Forget that now. 2878 forget_alloc_region_list(); 2879 2880 // The current alloc regions contain objs that have survived 2881 // collection. Make them no longer GC alloc regions. 2882 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 2883 HeapRegion* r = _gc_alloc_regions[ap]; 2884 if (r != NULL && r->is_empty()) { 2885 { 2886 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 2887 r->set_zero_fill_complete(); 2888 put_free_region_on_list_locked(r); 2889 } 2890 } 2891 // set_gc_alloc_region will also NULLify all aliases to the region 2892 set_gc_alloc_region(ap, NULL); 2893 _gc_alloc_region_counts[ap] = 0; 2894 } 2895 } 2896 2897 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { 2898 _drain_in_progress = false; 2899 set_evac_failure_closure(cl); 2900 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); 2901 } 2902 2903 void G1CollectedHeap::finalize_for_evac_failure() { 2904 assert(_evac_failure_scan_stack != NULL && 2905 _evac_failure_scan_stack->length() == 0, 2906 "Postcondition"); 2907 assert(!_drain_in_progress, "Postcondition"); 2908 // Don't have to delete, since the scan stack is a resource object. 2909 _evac_failure_scan_stack = NULL; 2910 } 2911 2912 2913 2914 // *** Sequential G1 Evacuation 2915 2916 HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) { 2917 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; 2918 // let the caller handle alloc failure 2919 if (alloc_region == NULL) return NULL; 2920 assert(isHumongous(word_size) || !alloc_region->isHumongous(), 2921 "Either the object is humongous or the region isn't"); 2922 HeapWord* block = alloc_region->allocate(word_size); 2923 if (block == NULL) { 2924 block = allocate_during_gc_slow(purpose, alloc_region, false, word_size); 2925 } 2926 return block; 2927 } 2928 2929 class G1IsAliveClosure: public BoolObjectClosure { 2930 G1CollectedHeap* _g1; 2931 public: 2932 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} 2933 void do_object(oop p) { assert(false, "Do not call."); } 2934 bool do_object_b(oop p) { 2935 // It is reachable if it is outside the collection set, or is inside 2936 // and forwarded. 2937 2938 #ifdef G1_DEBUG 2939 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", 2940 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), 2941 !_g1->obj_in_cs(p) || p->is_forwarded()); 2942 #endif // G1_DEBUG 2943 2944 return !_g1->obj_in_cs(p) || p->is_forwarded(); 2945 } 2946 }; 2947 2948 class G1KeepAliveClosure: public OopClosure { 2949 G1CollectedHeap* _g1; 2950 public: 2951 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} 2952 void do_oop(narrowOop* p) { 2953 guarantee(false, "NYI"); 2954 } 2955 void do_oop(oop* p) { 2956 oop obj = *p; 2957 #ifdef G1_DEBUG 2958 if (PrintGC && Verbose) { 2959 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, 2960 p, (void*) obj, (void*) *p); 2961 } 2962 #endif // G1_DEBUG 2963 2964 if (_g1->obj_in_cs(obj)) { 2965 assert( obj->is_forwarded(), "invariant" ); 2966 *p = obj->forwardee(); 2967 2968 #ifdef G1_DEBUG 2969 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, 2970 (void*) obj, (void*) *p); 2971 #endif // G1_DEBUG 2972 } 2973 } 2974 }; 2975 2976 class UpdateRSetImmediate : public OopsInHeapRegionClosure { 2977 private: 2978 G1CollectedHeap* _g1; 2979 G1RemSet* _g1_rem_set; 2980 public: 2981 UpdateRSetImmediate(G1CollectedHeap* g1) : 2982 _g1(g1), _g1_rem_set(g1->g1_rem_set()) {} 2983 2984 void do_oop(narrowOop* p) { 2985 guarantee(false, "NYI"); 2986 } 2987 void do_oop(oop* p) { 2988 assert(_from->is_in_reserved(p), "paranoia"); 2989 if (*p != NULL && !_from->is_survivor()) { 2990 _g1_rem_set->par_write_ref(_from, p, 0); 2991 } 2992 } 2993 }; 2994 2995 class UpdateRSetDeferred : public OopsInHeapRegionClosure { 2996 private: 2997 G1CollectedHeap* _g1; 2998 DirtyCardQueue *_dcq; 2999 CardTableModRefBS* _ct_bs; 3000 3001 public: 3002 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : 3003 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} 3004 3005 void do_oop(narrowOop* p) { 3006 guarantee(false, "NYI"); 3007 } 3008 void do_oop(oop* p) { 3009 assert(_from->is_in_reserved(p), "paranoia"); 3010 if (!_from->is_in_reserved(*p) && !_from->is_survivor()) { 3011 size_t card_index = _ct_bs->index_for(p); 3012 if (_ct_bs->mark_card_deferred(card_index)) { 3013 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); 3014 } 3015 } 3016 } 3017 }; 3018 3019 3020 3021 class RemoveSelfPointerClosure: public ObjectClosure { 3022 private: 3023 G1CollectedHeap* _g1; 3024 ConcurrentMark* _cm; 3025 HeapRegion* _hr; 3026 size_t _prev_marked_bytes; 3027 size_t _next_marked_bytes; 3028 OopsInHeapRegionClosure *_cl; 3029 public: 3030 RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) : 3031 _g1(g1), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), 3032 _next_marked_bytes(0), _cl(cl) {} 3033 3034 size_t prev_marked_bytes() { return _prev_marked_bytes; } 3035 size_t next_marked_bytes() { return _next_marked_bytes; } 3036 3037 // The original idea here was to coalesce evacuated and dead objects. 3038 // However that caused complications with the block offset table (BOT). 3039 // In particular if there were two TLABs, one of them partially refined. 3040 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| 3041 // The BOT entries of the unrefined part of TLAB_2 point to the start 3042 // of TLAB_2. If the last object of the TLAB_1 and the first object 3043 // of TLAB_2 are coalesced, then the cards of the unrefined part 3044 // would point into middle of the filler object. 3045 // 3046 // The current approach is to not coalesce and leave the BOT contents intact. 3047 void do_object(oop obj) { 3048 if (obj->is_forwarded() && obj->forwardee() == obj) { 3049 // The object failed to move. 3050 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); 3051 _cm->markPrev(obj); 3052 assert(_cm->isPrevMarked(obj), "Should be marked!"); 3053 _prev_marked_bytes += (obj->size() * HeapWordSize); 3054 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { 3055 _cm->markAndGrayObjectIfNecessary(obj); 3056 } 3057 obj->set_mark(markOopDesc::prototype()); 3058 // While we were processing RSet buffers during the 3059 // collection, we actually didn't scan any cards on the 3060 // collection set, since we didn't want to update remebered 3061 // sets with entries that point into the collection set, given 3062 // that live objects fromthe collection set are about to move 3063 // and such entries will be stale very soon. This change also 3064 // dealt with a reliability issue which involved scanning a 3065 // card in the collection set and coming across an array that 3066 // was being chunked and looking malformed. The problem is 3067 // that, if evacuation fails, we might have remembered set 3068 // entries missing given that we skipped cards on the 3069 // collection set. So, we'll recreate such entries now. 3070 obj->oop_iterate(_cl); 3071 assert(_cm->isPrevMarked(obj), "Should be marked!"); 3072 } else { 3073 // The object has been either evacuated or is dead. Fill it with a 3074 // dummy object. 3075 MemRegion mr((HeapWord*)obj, obj->size()); 3076 CollectedHeap::fill_with_object(mr); 3077 _cm->clearRangeBothMaps(mr); 3078 } 3079 } 3080 }; 3081 3082 void G1CollectedHeap::remove_self_forwarding_pointers() { 3083 UpdateRSetImmediate immediate_update(_g1h); 3084 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); 3085 UpdateRSetDeferred deferred_update(_g1h, &dcq); 3086 OopsInHeapRegionClosure *cl; 3087 if (G1DeferredRSUpdate) { 3088 cl = &deferred_update; 3089 } else { 3090 cl = &immediate_update; 3091 } 3092 HeapRegion* cur = g1_policy()->collection_set(); 3093 while (cur != NULL) { 3094 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); 3095 3096 RemoveSelfPointerClosure rspc(_g1h, cl); 3097 if (cur->evacuation_failed()) { 3098 assert(cur->in_collection_set(), "bad CS"); 3099 cl->set_region(cur); 3100 cur->object_iterate(&rspc); 3101 3102 // A number of manipulations to make the TAMS be the current top, 3103 // and the marked bytes be the ones observed in the iteration. 3104 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { 3105 // The comments below are the postconditions achieved by the 3106 // calls. Note especially the last such condition, which says that 3107 // the count of marked bytes has been properly restored. 3108 cur->note_start_of_marking(false); 3109 // _next_top_at_mark_start == top, _next_marked_bytes == 0 3110 cur->add_to_marked_bytes(rspc.prev_marked_bytes()); 3111 // _next_marked_bytes == prev_marked_bytes. 3112 cur->note_end_of_marking(); 3113 // _prev_top_at_mark_start == top(), 3114 // _prev_marked_bytes == prev_marked_bytes 3115 } 3116 // If there is no mark in progress, we modified the _next variables 3117 // above needlessly, but harmlessly. 3118 if (_g1h->mark_in_progress()) { 3119 cur->note_start_of_marking(false); 3120 // _next_top_at_mark_start == top, _next_marked_bytes == 0 3121 // _next_marked_bytes == next_marked_bytes. 3122 } 3123 3124 // Now make sure the region has the right index in the sorted array. 3125 g1_policy()->note_change_in_marked_bytes(cur); 3126 } 3127 cur = cur->next_in_collection_set(); 3128 } 3129 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); 3130 3131 // Now restore saved marks, if any. 3132 if (_objs_with_preserved_marks != NULL) { 3133 assert(_preserved_marks_of_objs != NULL, "Both or none."); 3134 assert(_objs_with_preserved_marks->length() == 3135 _preserved_marks_of_objs->length(), "Both or none."); 3136 guarantee(_objs_with_preserved_marks->length() == 3137 _preserved_marks_of_objs->length(), "Both or none."); 3138 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { 3139 oop obj = _objs_with_preserved_marks->at(i); 3140 markOop m = _preserved_marks_of_objs->at(i); 3141 obj->set_mark(m); 3142 } 3143 // Delete the preserved marks growable arrays (allocated on the C heap). 3144 delete _objs_with_preserved_marks; 3145 delete _preserved_marks_of_objs; 3146 _objs_with_preserved_marks = NULL; 3147 _preserved_marks_of_objs = NULL; 3148 } 3149 } 3150 3151 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { 3152 _evac_failure_scan_stack->push(obj); 3153 } 3154 3155 void G1CollectedHeap::drain_evac_failure_scan_stack() { 3156 assert(_evac_failure_scan_stack != NULL, "precondition"); 3157 3158 while (_evac_failure_scan_stack->length() > 0) { 3159 oop obj = _evac_failure_scan_stack->pop(); 3160 _evac_failure_closure->set_region(heap_region_containing(obj)); 3161 obj->oop_iterate_backwards(_evac_failure_closure); 3162 } 3163 } 3164 3165 void G1CollectedHeap::handle_evacuation_failure(oop old) { 3166 markOop m = old->mark(); 3167 // forward to self 3168 assert(!old->is_forwarded(), "precondition"); 3169 3170 old->forward_to(old); 3171 handle_evacuation_failure_common(old, m); 3172 } 3173 3174 oop 3175 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, 3176 oop old) { 3177 markOop m = old->mark(); 3178 oop forward_ptr = old->forward_to_atomic(old); 3179 if (forward_ptr == NULL) { 3180 // Forward-to-self succeeded. 3181 if (_evac_failure_closure != cl) { 3182 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); 3183 assert(!_drain_in_progress, 3184 "Should only be true while someone holds the lock."); 3185 // Set the global evac-failure closure to the current thread's. 3186 assert(_evac_failure_closure == NULL, "Or locking has failed."); 3187 set_evac_failure_closure(cl); 3188 // Now do the common part. 3189 handle_evacuation_failure_common(old, m); 3190 // Reset to NULL. 3191 set_evac_failure_closure(NULL); 3192 } else { 3193 // The lock is already held, and this is recursive. 3194 assert(_drain_in_progress, "This should only be the recursive case."); 3195 handle_evacuation_failure_common(old, m); 3196 } 3197 return old; 3198 } else { 3199 // Someone else had a place to copy it. 3200 return forward_ptr; 3201 } 3202 } 3203 3204 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { 3205 set_evacuation_failed(true); 3206 3207 preserve_mark_if_necessary(old, m); 3208 3209 HeapRegion* r = heap_region_containing(old); 3210 if (!r->evacuation_failed()) { 3211 r->set_evacuation_failed(true); 3212 if (G1TraceRegions) { 3213 gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" " 3214 "["PTR_FORMAT","PTR_FORMAT")\n", 3215 r, r->bottom(), r->end()); 3216 } 3217 } 3218 3219 push_on_evac_failure_scan_stack(old); 3220 3221 if (!_drain_in_progress) { 3222 // prevent recursion in copy_to_survivor_space() 3223 _drain_in_progress = true; 3224 drain_evac_failure_scan_stack(); 3225 _drain_in_progress = false; 3226 } 3227 } 3228 3229 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { 3230 if (m != markOopDesc::prototype()) { 3231 if (_objs_with_preserved_marks == NULL) { 3232 assert(_preserved_marks_of_objs == NULL, "Both or none."); 3233 _objs_with_preserved_marks = 3234 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); 3235 _preserved_marks_of_objs = 3236 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); 3237 } 3238 _objs_with_preserved_marks->push(obj); 3239 _preserved_marks_of_objs->push(m); 3240 } 3241 } 3242 3243 // *** Parallel G1 Evacuation 3244 3245 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, 3246 size_t word_size) { 3247 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; 3248 // let the caller handle alloc failure 3249 if (alloc_region == NULL) return NULL; 3250 3251 HeapWord* block = alloc_region->par_allocate(word_size); 3252 if (block == NULL) { 3253 MutexLockerEx x(par_alloc_during_gc_lock(), 3254 Mutex::_no_safepoint_check_flag); 3255 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); 3256 } 3257 return block; 3258 } 3259 3260 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region, 3261 bool par) { 3262 // Another thread might have obtained alloc_region for the given 3263 // purpose, and might be attempting to allocate in it, and might 3264 // succeed. Therefore, we can't do the "finalization" stuff on the 3265 // region below until we're sure the last allocation has happened. 3266 // We ensure this by allocating the remaining space with a garbage 3267 // object. 3268 if (par) par_allocate_remaining_space(alloc_region); 3269 // Now we can do the post-GC stuff on the region. 3270 alloc_region->note_end_of_copying(); 3271 g1_policy()->record_after_bytes(alloc_region->used()); 3272 } 3273 3274 HeapWord* 3275 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, 3276 HeapRegion* alloc_region, 3277 bool par, 3278 size_t word_size) { 3279 HeapWord* block = NULL; 3280 // In the parallel case, a previous thread to obtain the lock may have 3281 // already assigned a new gc_alloc_region. 3282 if (alloc_region != _gc_alloc_regions[purpose]) { 3283 assert(par, "But should only happen in parallel case."); 3284 alloc_region = _gc_alloc_regions[purpose]; 3285 if (alloc_region == NULL) return NULL; 3286 block = alloc_region->par_allocate(word_size); 3287 if (block != NULL) return block; 3288 // Otherwise, continue; this new region is empty, too. 3289 } 3290 assert(alloc_region != NULL, "We better have an allocation region"); 3291 retire_alloc_region(alloc_region, par); 3292 3293 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { 3294 // Cannot allocate more regions for the given purpose. 3295 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); 3296 // Is there an alternative? 3297 if (purpose != alt_purpose) { 3298 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; 3299 // Has not the alternative region been aliased? 3300 if (alloc_region != alt_region && alt_region != NULL) { 3301 // Try to allocate in the alternative region. 3302 if (par) { 3303 block = alt_region->par_allocate(word_size); 3304 } else { 3305 block = alt_region->allocate(word_size); 3306 } 3307 // Make an alias. 3308 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; 3309 if (block != NULL) { 3310 return block; 3311 } 3312 retire_alloc_region(alt_region, par); 3313 } 3314 // Both the allocation region and the alternative one are full 3315 // and aliased, replace them with a new allocation region. 3316 purpose = alt_purpose; 3317 } else { 3318 set_gc_alloc_region(purpose, NULL); 3319 return NULL; 3320 } 3321 } 3322 3323 // Now allocate a new region for allocation. 3324 alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/); 3325 3326 // let the caller handle alloc failure 3327 if (alloc_region != NULL) { 3328 3329 assert(check_gc_alloc_regions(), "alloc regions messed up"); 3330 assert(alloc_region->saved_mark_at_top(), 3331 "Mark should have been saved already."); 3332 // We used to assert that the region was zero-filled here, but no 3333 // longer. 3334 3335 // This must be done last: once it's installed, other regions may 3336 // allocate in it (without holding the lock.) 3337 set_gc_alloc_region(purpose, alloc_region); 3338 3339 if (par) { 3340 block = alloc_region->par_allocate(word_size); 3341 } else { 3342 block = alloc_region->allocate(word_size); 3343 } 3344 // Caller handles alloc failure. 3345 } else { 3346 // This sets other apis using the same old alloc region to NULL, also. 3347 set_gc_alloc_region(purpose, NULL); 3348 } 3349 return block; // May be NULL. 3350 } 3351 3352 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { 3353 HeapWord* block = NULL; 3354 size_t free_words; 3355 do { 3356 free_words = r->free()/HeapWordSize; 3357 // If there's too little space, no one can allocate, so we're done. 3358 if (free_words < (size_t)oopDesc::header_size()) return; 3359 // Otherwise, try to claim it. 3360 block = r->par_allocate(free_words); 3361 } while (block == NULL); 3362 fill_with_object(block, free_words); 3363 } 3364 3365 #define use_local_bitmaps 1 3366 #define verify_local_bitmaps 0 3367 3368 #ifndef PRODUCT 3369 3370 class GCLabBitMap; 3371 class GCLabBitMapClosure: public BitMapClosure { 3372 private: 3373 ConcurrentMark* _cm; 3374 GCLabBitMap* _bitmap; 3375 3376 public: 3377 GCLabBitMapClosure(ConcurrentMark* cm, 3378 GCLabBitMap* bitmap) { 3379 _cm = cm; 3380 _bitmap = bitmap; 3381 } 3382 3383 virtual bool do_bit(size_t offset); 3384 }; 3385 3386 #endif // PRODUCT 3387 3388 #define oop_buffer_length 256 3389 3390 class GCLabBitMap: public BitMap { 3391 private: 3392 ConcurrentMark* _cm; 3393 3394 int _shifter; 3395 size_t _bitmap_word_covers_words; 3396 3397 // beginning of the heap 3398 HeapWord* _heap_start; 3399 3400 // this is the actual start of the GCLab 3401 HeapWord* _real_start_word; 3402 3403 // this is the actual end of the GCLab 3404 HeapWord* _real_end_word; 3405 3406 // this is the first word, possibly located before the actual start 3407 // of the GCLab, that corresponds to the first bit of the bitmap 3408 HeapWord* _start_word; 3409 3410 // size of a GCLab in words 3411 size_t _gclab_word_size; 3412 3413 static int shifter() { 3414 return MinObjAlignment - 1; 3415 } 3416 3417 // how many heap words does a single bitmap word corresponds to? 3418 static size_t bitmap_word_covers_words() { 3419 return BitsPerWord << shifter(); 3420 } 3421 3422 static size_t gclab_word_size() { 3423 return ParallelGCG1AllocBufferSize / HeapWordSize; 3424 } 3425 3426 static size_t bitmap_size_in_bits() { 3427 size_t bits_in_bitmap = gclab_word_size() >> shifter(); 3428 // We are going to ensure that the beginning of a word in this 3429 // bitmap also corresponds to the beginning of a word in the 3430 // global marking bitmap. To handle the case where a GCLab 3431 // starts from the middle of the bitmap, we need to add enough 3432 // space (i.e. up to a bitmap word) to ensure that we have 3433 // enough bits in the bitmap. 3434 return bits_in_bitmap + BitsPerWord - 1; 3435 } 3436 public: 3437 GCLabBitMap(HeapWord* heap_start) 3438 : BitMap(bitmap_size_in_bits()), 3439 _cm(G1CollectedHeap::heap()->concurrent_mark()), 3440 _shifter(shifter()), 3441 _bitmap_word_covers_words(bitmap_word_covers_words()), 3442 _heap_start(heap_start), 3443 _gclab_word_size(gclab_word_size()), 3444 _real_start_word(NULL), 3445 _real_end_word(NULL), 3446 _start_word(NULL) 3447 { 3448 guarantee( size_in_words() >= bitmap_size_in_words(), 3449 "just making sure"); 3450 } 3451 3452 inline unsigned heapWordToOffset(HeapWord* addr) { 3453 unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter; 3454 assert(offset < size(), "offset should be within bounds"); 3455 return offset; 3456 } 3457 3458 inline HeapWord* offsetToHeapWord(size_t offset) { 3459 HeapWord* addr = _start_word + (offset << _shifter); 3460 assert(_real_start_word <= addr && addr < _real_end_word, "invariant"); 3461 return addr; 3462 } 3463 3464 bool fields_well_formed() { 3465 bool ret1 = (_real_start_word == NULL) && 3466 (_real_end_word == NULL) && 3467 (_start_word == NULL); 3468 if (ret1) 3469 return true; 3470 3471 bool ret2 = _real_start_word >= _start_word && 3472 _start_word < _real_end_word && 3473 (_real_start_word + _gclab_word_size) == _real_end_word && 3474 (_start_word + _gclab_word_size + _bitmap_word_covers_words) 3475 > _real_end_word; 3476 return ret2; 3477 } 3478 3479 inline bool mark(HeapWord* addr) { 3480 guarantee(use_local_bitmaps, "invariant"); 3481 assert(fields_well_formed(), "invariant"); 3482 3483 if (addr >= _real_start_word && addr < _real_end_word) { 3484 assert(!isMarked(addr), "should not have already been marked"); 3485 3486 // first mark it on the bitmap 3487 at_put(heapWordToOffset(addr), true); 3488 3489 return true; 3490 } else { 3491 return false; 3492 } 3493 } 3494 3495 inline bool isMarked(HeapWord* addr) { 3496 guarantee(use_local_bitmaps, "invariant"); 3497 assert(fields_well_formed(), "invariant"); 3498 3499 return at(heapWordToOffset(addr)); 3500 } 3501 3502 void set_buffer(HeapWord* start) { 3503 guarantee(use_local_bitmaps, "invariant"); 3504 clear(); 3505 3506 assert(start != NULL, "invariant"); 3507 _real_start_word = start; 3508 _real_end_word = start + _gclab_word_size; 3509 3510 size_t diff = 3511 pointer_delta(start, _heap_start) % _bitmap_word_covers_words; 3512 _start_word = start - diff; 3513 3514 assert(fields_well_formed(), "invariant"); 3515 } 3516 3517 #ifndef PRODUCT 3518 void verify() { 3519 // verify that the marks have been propagated 3520 GCLabBitMapClosure cl(_cm, this); 3521 iterate(&cl); 3522 } 3523 #endif // PRODUCT 3524 3525 void retire() { 3526 guarantee(use_local_bitmaps, "invariant"); 3527 assert(fields_well_formed(), "invariant"); 3528 3529 if (_start_word != NULL) { 3530 CMBitMap* mark_bitmap = _cm->nextMarkBitMap(); 3531 3532 // this means that the bitmap was set up for the GCLab 3533 assert(_real_start_word != NULL && _real_end_word != NULL, "invariant"); 3534 3535 mark_bitmap->mostly_disjoint_range_union(this, 3536 0, // always start from the start of the bitmap 3537 _start_word, 3538 size_in_words()); 3539 _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word)); 3540 3541 #ifndef PRODUCT 3542 if (use_local_bitmaps && verify_local_bitmaps) 3543 verify(); 3544 #endif // PRODUCT 3545 } else { 3546 assert(_real_start_word == NULL && _real_end_word == NULL, "invariant"); 3547 } 3548 } 3549 3550 static size_t bitmap_size_in_words() { 3551 return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord; 3552 } 3553 }; 3554 3555 #ifndef PRODUCT 3556 3557 bool GCLabBitMapClosure::do_bit(size_t offset) { 3558 HeapWord* addr = _bitmap->offsetToHeapWord(offset); 3559 guarantee(_cm->isMarked(oop(addr)), "it should be!"); 3560 return true; 3561 } 3562 3563 #endif // PRODUCT 3564 3565 class G1ParGCAllocBuffer: public ParGCAllocBuffer { 3566 private: 3567 bool _retired; 3568 bool _during_marking; 3569 GCLabBitMap _bitmap; 3570 3571 public: 3572 G1ParGCAllocBuffer() : 3573 ParGCAllocBuffer(ParallelGCG1AllocBufferSize / HeapWordSize), 3574 _during_marking(G1CollectedHeap::heap()->mark_in_progress()), 3575 _bitmap(G1CollectedHeap::heap()->reserved_region().start()), 3576 _retired(false) 3577 { } 3578 3579 inline bool mark(HeapWord* addr) { 3580 guarantee(use_local_bitmaps, "invariant"); 3581 assert(_during_marking, "invariant"); 3582 return _bitmap.mark(addr); 3583 } 3584 3585 inline void set_buf(HeapWord* buf) { 3586 if (use_local_bitmaps && _during_marking) 3587 _bitmap.set_buffer(buf); 3588 ParGCAllocBuffer::set_buf(buf); 3589 _retired = false; 3590 } 3591 3592 inline void retire(bool end_of_gc, bool retain) { 3593 if (_retired) 3594 return; 3595 if (use_local_bitmaps && _during_marking) { 3596 _bitmap.retire(); 3597 } 3598 ParGCAllocBuffer::retire(end_of_gc, retain); 3599 _retired = true; 3600 } 3601 }; 3602 3603 3604 class G1ParScanThreadState : public StackObj { 3605 protected: 3606 G1CollectedHeap* _g1h; 3607 RefToScanQueue* _refs; 3608 DirtyCardQueue _dcq; 3609 CardTableModRefBS* _ct_bs; 3610 G1RemSet* _g1_rem; 3611 3612 typedef GrowableArray<oop*> OverflowQueue; 3613 OverflowQueue* _overflowed_refs; 3614 3615 G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount]; 3616 ageTable _age_table; 3617 3618 size_t _alloc_buffer_waste; 3619 size_t _undo_waste; 3620 3621 OopsInHeapRegionClosure* _evac_failure_cl; 3622 G1ParScanHeapEvacClosure* _evac_cl; 3623 G1ParScanPartialArrayClosure* _partial_scan_cl; 3624 3625 int _hash_seed; 3626 int _queue_num; 3627 3628 int _term_attempts; 3629 #if G1_DETAILED_STATS 3630 int _pushes, _pops, _steals, _steal_attempts; 3631 int _overflow_pushes; 3632 #endif 3633 3634 double _start; 3635 double _start_strong_roots; 3636 double _strong_roots_time; 3637 double _start_term; 3638 double _term_time; 3639 3640 // Map from young-age-index (0 == not young, 1 is youngest) to 3641 // surviving words. base is what we get back from the malloc call 3642 size_t* _surviving_young_words_base; 3643 // this points into the array, as we use the first few entries for padding 3644 size_t* _surviving_young_words; 3645 3646 #define PADDING_ELEM_NUM (64 / sizeof(size_t)) 3647 3648 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } 3649 3650 void add_to_undo_waste(size_t waste) { _undo_waste += waste; } 3651 3652 DirtyCardQueue& dirty_card_queue() { return _dcq; } 3653 CardTableModRefBS* ctbs() { return _ct_bs; } 3654 3655 void immediate_rs_update(HeapRegion* from, oop* p, int tid) { 3656 if (!from->is_survivor()) { 3657 _g1_rem->par_write_ref(from, p, tid); 3658 } 3659 } 3660 3661 void deferred_rs_update(HeapRegion* from, oop* p, int tid) { 3662 // If the new value of the field points to the same region or 3663 // is the to-space, we don't need to include it in the Rset updates. 3664 if (!from->is_in_reserved(*p) && !from->is_survivor()) { 3665 size_t card_index = ctbs()->index_for(p); 3666 // If the card hasn't been added to the buffer, do it. 3667 if (ctbs()->mark_card_deferred(card_index)) { 3668 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index)); 3669 } 3670 } 3671 } 3672 3673 public: 3674 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) 3675 : _g1h(g1h), 3676 _refs(g1h->task_queue(queue_num)), 3677 _dcq(&g1h->dirty_card_queue_set()), 3678 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), 3679 _g1_rem(g1h->g1_rem_set()), 3680 _hash_seed(17), _queue_num(queue_num), 3681 _term_attempts(0), 3682 _age_table(false), 3683 #if G1_DETAILED_STATS 3684 _pushes(0), _pops(0), _steals(0), 3685 _steal_attempts(0), _overflow_pushes(0), 3686 #endif 3687 _strong_roots_time(0), _term_time(0), 3688 _alloc_buffer_waste(0), _undo_waste(0) 3689 { 3690 // we allocate G1YoungSurvRateNumRegions plus one entries, since 3691 // we "sacrifice" entry 0 to keep track of surviving bytes for 3692 // non-young regions (where the age is -1) 3693 // We also add a few elements at the beginning and at the end in 3694 // an attempt to eliminate cache contention 3695 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); 3696 size_t array_length = PADDING_ELEM_NUM + 3697 real_length + 3698 PADDING_ELEM_NUM; 3699 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); 3700 if (_surviving_young_words_base == NULL) 3701 vm_exit_out_of_memory(array_length * sizeof(size_t), 3702 "Not enough space for young surv histo."); 3703 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; 3704 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); 3705 3706 _overflowed_refs = new OverflowQueue(10); 3707 3708 _start = os::elapsedTime(); 3709 } 3710 3711 ~G1ParScanThreadState() { 3712 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); 3713 } 3714 3715 RefToScanQueue* refs() { return _refs; } 3716 OverflowQueue* overflowed_refs() { return _overflowed_refs; } 3717 ageTable* age_table() { return &_age_table; } 3718 3719 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { 3720 return &_alloc_buffers[purpose]; 3721 } 3722 3723 size_t alloc_buffer_waste() { return _alloc_buffer_waste; } 3724 size_t undo_waste() { return _undo_waste; } 3725 3726 void push_on_queue(oop* ref) { 3727 assert(ref != NULL, "invariant"); 3728 assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), "invariant"); 3729 3730 if (!refs()->push(ref)) { 3731 overflowed_refs()->push(ref); 3732 IF_G1_DETAILED_STATS(note_overflow_push()); 3733 } else { 3734 IF_G1_DETAILED_STATS(note_push()); 3735 } 3736 } 3737 3738 void pop_from_queue(oop*& ref) { 3739 if (!refs()->pop_local(ref)) { 3740 ref = NULL; 3741 } else { 3742 assert(ref != NULL, "invariant"); 3743 assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), 3744 "invariant"); 3745 3746 IF_G1_DETAILED_STATS(note_pop()); 3747 } 3748 } 3749 3750 void pop_from_overflow_queue(oop*& ref) { 3751 ref = overflowed_refs()->pop(); 3752 } 3753 3754 int refs_to_scan() { return refs()->size(); } 3755 int overflowed_refs_to_scan() { return overflowed_refs()->length(); } 3756 3757 void update_rs(HeapRegion* from, oop* p, int tid) { 3758 if (G1DeferredRSUpdate) { 3759 deferred_rs_update(from, p, tid); 3760 } else { 3761 immediate_rs_update(from, p, tid); 3762 } 3763 } 3764 3765 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { 3766 3767 HeapWord* obj = NULL; 3768 if (word_sz * 100 < 3769 (size_t)(ParallelGCG1AllocBufferSize / HeapWordSize) * 3770 ParallelGCBufferWastePct) { 3771 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); 3772 add_to_alloc_buffer_waste(alloc_buf->words_remaining()); 3773 alloc_buf->retire(false, false); 3774 3775 HeapWord* buf = 3776 _g1h->par_allocate_during_gc(purpose, ParallelGCG1AllocBufferSize / HeapWordSize); 3777 if (buf == NULL) return NULL; // Let caller handle allocation failure. 3778 // Otherwise. 3779 alloc_buf->set_buf(buf); 3780 3781 obj = alloc_buf->allocate(word_sz); 3782 assert(obj != NULL, "buffer was definitely big enough..."); 3783 } else { 3784 obj = _g1h->par_allocate_during_gc(purpose, word_sz); 3785 } 3786 return obj; 3787 } 3788 3789 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) { 3790 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz); 3791 if (obj != NULL) return obj; 3792 return allocate_slow(purpose, word_sz); 3793 } 3794 3795 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) { 3796 if (alloc_buffer(purpose)->contains(obj)) { 3797 guarantee(alloc_buffer(purpose)->contains(obj + word_sz - 1), 3798 "should contain whole object"); 3799 alloc_buffer(purpose)->undo_allocation(obj, word_sz); 3800 } else { 3801 CollectedHeap::fill_with_object(obj, word_sz); 3802 add_to_undo_waste(word_sz); 3803 } 3804 } 3805 3806 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) { 3807 _evac_failure_cl = evac_failure_cl; 3808 } 3809 OopsInHeapRegionClosure* evac_failure_closure() { 3810 return _evac_failure_cl; 3811 } 3812 3813 void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) { 3814 _evac_cl = evac_cl; 3815 } 3816 3817 void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) { 3818 _partial_scan_cl = partial_scan_cl; 3819 } 3820 3821 int* hash_seed() { return &_hash_seed; } 3822 int queue_num() { return _queue_num; } 3823 3824 int term_attempts() { return _term_attempts; } 3825 void note_term_attempt() { _term_attempts++; } 3826 3827 #if G1_DETAILED_STATS 3828 int pushes() { return _pushes; } 3829 int pops() { return _pops; } 3830 int steals() { return _steals; } 3831 int steal_attempts() { return _steal_attempts; } 3832 int overflow_pushes() { return _overflow_pushes; } 3833 3834 void note_push() { _pushes++; } 3835 void note_pop() { _pops++; } 3836 void note_steal() { _steals++; } 3837 void note_steal_attempt() { _steal_attempts++; } 3838 void note_overflow_push() { _overflow_pushes++; } 3839 #endif 3840 3841 void start_strong_roots() { 3842 _start_strong_roots = os::elapsedTime(); 3843 } 3844 void end_strong_roots() { 3845 _strong_roots_time += (os::elapsedTime() - _start_strong_roots); 3846 } 3847 double strong_roots_time() { return _strong_roots_time; } 3848 3849 void start_term_time() { 3850 note_term_attempt(); 3851 _start_term = os::elapsedTime(); 3852 } 3853 void end_term_time() { 3854 _term_time += (os::elapsedTime() - _start_term); 3855 } 3856 double term_time() { return _term_time; } 3857 3858 double elapsed() { 3859 return os::elapsedTime() - _start; 3860 } 3861 3862 size_t* surviving_young_words() { 3863 // We add on to hide entry 0 which accumulates surviving words for 3864 // age -1 regions (i.e. non-young ones) 3865 return _surviving_young_words; 3866 } 3867 3868 void retire_alloc_buffers() { 3869 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 3870 size_t waste = _alloc_buffers[ap].words_remaining(); 3871 add_to_alloc_buffer_waste(waste); 3872 _alloc_buffers[ap].retire(true, false); 3873 } 3874 } 3875 3876 private: 3877 void deal_with_reference(oop* ref_to_scan) { 3878 if (has_partial_array_mask(ref_to_scan)) { 3879 _partial_scan_cl->do_oop_nv(ref_to_scan); 3880 } else { 3881 // Note: we can use "raw" versions of "region_containing" because 3882 // "obj_to_scan" is definitely in the heap, and is not in a 3883 // humongous region. 3884 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); 3885 _evac_cl->set_region(r); 3886 _evac_cl->do_oop_nv(ref_to_scan); 3887 } 3888 } 3889 3890 public: 3891 void trim_queue() { 3892 // I've replicated the loop twice, first to drain the overflow 3893 // queue, second to drain the task queue. This is better than 3894 // having a single loop, which checks both conditions and, inside 3895 // it, either pops the overflow queue or the task queue, as each 3896 // loop is tighter. Also, the decision to drain the overflow queue 3897 // first is not arbitrary, as the overflow queue is not visible 3898 // to the other workers, whereas the task queue is. So, we want to 3899 // drain the "invisible" entries first, while allowing the other 3900 // workers to potentially steal the "visible" entries. 3901 3902 while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) { 3903 while (overflowed_refs_to_scan() > 0) { 3904 oop *ref_to_scan = NULL; 3905 pop_from_overflow_queue(ref_to_scan); 3906 assert(ref_to_scan != NULL, "invariant"); 3907 // We shouldn't have pushed it on the queue if it was not 3908 // pointing into the CSet. 3909 assert(ref_to_scan != NULL, "sanity"); 3910 assert(has_partial_array_mask(ref_to_scan) || 3911 _g1h->obj_in_cs(*ref_to_scan), "sanity"); 3912 3913 deal_with_reference(ref_to_scan); 3914 } 3915 3916 while (refs_to_scan() > 0) { 3917 oop *ref_to_scan = NULL; 3918 pop_from_queue(ref_to_scan); 3919 3920 if (ref_to_scan != NULL) { 3921 // We shouldn't have pushed it on the queue if it was not 3922 // pointing into the CSet. 3923 assert(has_partial_array_mask(ref_to_scan) || 3924 _g1h->obj_in_cs(*ref_to_scan), "sanity"); 3925 3926 deal_with_reference(ref_to_scan); 3927 } 3928 } 3929 } 3930 } 3931 }; 3932 3933 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : 3934 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), 3935 _par_scan_state(par_scan_state) { } 3936 3937 // This closure is applied to the fields of the objects that have just been copied. 3938 // Should probably be made inline and moved in g1OopClosures.inline.hpp. 3939 void G1ParScanClosure::do_oop_nv(oop* p) { 3940 oop obj = *p; 3941 3942 if (obj != NULL) { 3943 if (_g1->in_cset_fast_test(obj)) { 3944 // We're not going to even bother checking whether the object is 3945 // already forwarded or not, as this usually causes an immediate 3946 // stall. We'll try to prefetch the object (for write, given that 3947 // we might need to install the forwarding reference) and we'll 3948 // get back to it when pop it from the queue 3949 Prefetch::write(obj->mark_addr(), 0); 3950 Prefetch::read(obj->mark_addr(), (HeapWordSize*2)); 3951 3952 // slightly paranoid test; I'm trying to catch potential 3953 // problems before we go into push_on_queue to know where the 3954 // problem is coming from 3955 assert(obj == *p, "the value of *p should not have changed"); 3956 _par_scan_state->push_on_queue(p); 3957 } else { 3958 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); 3959 } 3960 } 3961 } 3962 3963 void G1ParCopyHelper::mark_forwardee(oop* p) { 3964 // This is called _after_ do_oop_work has been called, hence after 3965 // the object has been relocated to its new location and *p points 3966 // to its new location. 3967 3968 oop thisOop = *p; 3969 if (thisOop != NULL) { 3970 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(thisOop)), 3971 "shouldn't still be in the CSet if evacuation didn't fail."); 3972 HeapWord* addr = (HeapWord*)thisOop; 3973 if (_g1->is_in_g1_reserved(addr)) 3974 _cm->grayRoot(oop(addr)); 3975 } 3976 } 3977 3978 oop G1ParCopyHelper::copy_to_survivor_space(oop old) { 3979 size_t word_sz = old->size(); 3980 HeapRegion* from_region = _g1->heap_region_containing_raw(old); 3981 // +1 to make the -1 indexes valid... 3982 int young_index = from_region->young_index_in_cset()+1; 3983 assert( (from_region->is_young() && young_index > 0) || 3984 (!from_region->is_young() && young_index == 0), "invariant" ); 3985 G1CollectorPolicy* g1p = _g1->g1_policy(); 3986 markOop m = old->mark(); 3987 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() 3988 : m->age(); 3989 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, 3990 word_sz); 3991 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); 3992 oop obj = oop(obj_ptr); 3993 3994 if (obj_ptr == NULL) { 3995 // This will either forward-to-self, or detect that someone else has 3996 // installed a forwarding pointer. 3997 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); 3998 return _g1->handle_evacuation_failure_par(cl, old); 3999 } 4000 4001 // We're going to allocate linearly, so might as well prefetch ahead. 4002 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); 4003 4004 oop forward_ptr = old->forward_to_atomic(obj); 4005 if (forward_ptr == NULL) { 4006 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); 4007 if (g1p->track_object_age(alloc_purpose)) { 4008 // We could simply do obj->incr_age(). However, this causes a 4009 // performance issue. obj->incr_age() will first check whether 4010 // the object has a displaced mark by checking its mark word; 4011 // getting the mark word from the new location of the object 4012 // stalls. So, given that we already have the mark word and we 4013 // are about to install it anyway, it's better to increase the 4014 // age on the mark word, when the object does not have a 4015 // displaced mark word. We're not expecting many objects to have 4016 // a displaced marked word, so that case is not optimized 4017 // further (it could be...) and we simply call obj->incr_age(). 4018 4019 if (m->has_displaced_mark_helper()) { 4020 // in this case, we have to install the mark word first, 4021 // otherwise obj looks to be forwarded (the old mark word, 4022 // which contains the forward pointer, was copied) 4023 obj->set_mark(m); 4024 obj->incr_age(); 4025 } else { 4026 m = m->incr_age(); 4027 obj->set_mark(m); 4028 } 4029 _par_scan_state->age_table()->add(obj, word_sz); 4030 } else { 4031 obj->set_mark(m); 4032 } 4033 4034 // preserve "next" mark bit 4035 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { 4036 if (!use_local_bitmaps || 4037 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { 4038 // if we couldn't mark it on the local bitmap (this happens when 4039 // the object was not allocated in the GCLab), we have to bite 4040 // the bullet and do the standard parallel mark 4041 _cm->markAndGrayObjectIfNecessary(obj); 4042 } 4043 #if 1 4044 if (_g1->isMarkedNext(old)) { 4045 _cm->nextMarkBitMap()->parClear((HeapWord*)old); 4046 } 4047 #endif 4048 } 4049 4050 size_t* surv_young_words = _par_scan_state->surviving_young_words(); 4051 surv_young_words[young_index] += word_sz; 4052 4053 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { 4054 arrayOop(old)->set_length(0); 4055 _par_scan_state->push_on_queue(set_partial_array_mask(old)); 4056 } else { 4057 // No point in using the slower heap_region_containing() method, 4058 // given that we know obj is in the heap. 4059 _scanner->set_region(_g1->heap_region_containing_raw(obj)); 4060 obj->oop_iterate_backwards(_scanner); 4061 } 4062 } else { 4063 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); 4064 obj = forward_ptr; 4065 } 4066 return obj; 4067 } 4068 4069 template<bool do_gen_barrier, G1Barrier barrier, 4070 bool do_mark_forwardee, bool skip_cset_test> 4071 void G1ParCopyClosure<do_gen_barrier, barrier, 4072 do_mark_forwardee, skip_cset_test>::do_oop_work(oop* p) { 4073 oop obj = *p; 4074 assert(barrier != G1BarrierRS || obj != NULL, 4075 "Precondition: G1BarrierRS implies obj is nonNull"); 4076 4077 // The only time we skip the cset test is when we're scanning 4078 // references popped from the queue. And we only push on the queue 4079 // references that we know point into the cset, so no point in 4080 // checking again. But we'll leave an assert here for peace of mind. 4081 assert(!skip_cset_test || _g1->obj_in_cs(obj), "invariant"); 4082 4083 // here the null check is implicit in the cset_fast_test() test 4084 if (skip_cset_test || _g1->in_cset_fast_test(obj)) { 4085 #if G1_REM_SET_LOGGING 4086 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " 4087 "into CS.", p, (void*) obj); 4088 #endif 4089 if (obj->is_forwarded()) { 4090 *p = obj->forwardee(); 4091 } else { 4092 *p = copy_to_survivor_space(obj); 4093 } 4094 // When scanning the RS, we only care about objs in CS. 4095 if (barrier == G1BarrierRS) { 4096 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); 4097 } 4098 } 4099 4100 // When scanning moved objs, must look at all oops. 4101 if (barrier == G1BarrierEvac && obj != NULL) { 4102 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); 4103 } 4104 4105 if (do_gen_barrier && obj != NULL) { 4106 par_do_barrier(p); 4107 } 4108 } 4109 4110 template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p); 4111 4112 template<class T> void G1ParScanPartialArrayClosure::process_array_chunk( 4113 oop obj, int start, int end) { 4114 // process our set of indices (include header in first chunk) 4115 assert(start < end, "invariant"); 4116 T* const base = (T*)objArrayOop(obj)->base(); 4117 T* const start_addr = (start == 0) ? (T*) obj : base + start; 4118 T* const end_addr = base + end; 4119 MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr); 4120 _scanner.set_region(_g1->heap_region_containing(obj)); 4121 obj->oop_iterate(&_scanner, mr); 4122 } 4123 4124 void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) { 4125 assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops"); 4126 assert(has_partial_array_mask(p), "invariant"); 4127 oop old = clear_partial_array_mask(p); 4128 assert(old->is_objArray(), "must be obj array"); 4129 assert(old->is_forwarded(), "must be forwarded"); 4130 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); 4131 4132 objArrayOop obj = objArrayOop(old->forwardee()); 4133 assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); 4134 // Process ParGCArrayScanChunk elements now 4135 // and push the remainder back onto queue 4136 int start = arrayOop(old)->length(); 4137 int end = obj->length(); 4138 int remainder = end - start; 4139 assert(start <= end, "just checking"); 4140 if (remainder > 2 * ParGCArrayScanChunk) { 4141 // Test above combines last partial chunk with a full chunk 4142 end = start + ParGCArrayScanChunk; 4143 arrayOop(old)->set_length(end); 4144 // Push remainder. 4145 _par_scan_state->push_on_queue(set_partial_array_mask(old)); 4146 } else { 4147 // Restore length so that the heap remains parsable in 4148 // case of evacuation failure. 4149 arrayOop(old)->set_length(end); 4150 } 4151 4152 // process our set of indices (include header in first chunk) 4153 process_array_chunk<oop>(obj, start, end); 4154 } 4155 4156 int G1ScanAndBalanceClosure::_nq = 0; 4157 4158 class G1ParEvacuateFollowersClosure : public VoidClosure { 4159 protected: 4160 G1CollectedHeap* _g1h; 4161 G1ParScanThreadState* _par_scan_state; 4162 RefToScanQueueSet* _queues; 4163 ParallelTaskTerminator* _terminator; 4164 4165 G1ParScanThreadState* par_scan_state() { return _par_scan_state; } 4166 RefToScanQueueSet* queues() { return _queues; } 4167 ParallelTaskTerminator* terminator() { return _terminator; } 4168 4169 public: 4170 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, 4171 G1ParScanThreadState* par_scan_state, 4172 RefToScanQueueSet* queues, 4173 ParallelTaskTerminator* terminator) 4174 : _g1h(g1h), _par_scan_state(par_scan_state), 4175 _queues(queues), _terminator(terminator) {} 4176 4177 void do_void() { 4178 G1ParScanThreadState* pss = par_scan_state(); 4179 while (true) { 4180 oop* ref_to_scan; 4181 pss->trim_queue(); 4182 IF_G1_DETAILED_STATS(pss->note_steal_attempt()); 4183 if (queues()->steal(pss->queue_num(), 4184 pss->hash_seed(), 4185 ref_to_scan)) { 4186 IF_G1_DETAILED_STATS(pss->note_steal()); 4187 4188 // slightly paranoid tests; I'm trying to catch potential 4189 // problems before we go into push_on_queue to know where the 4190 // problem is coming from 4191 assert(ref_to_scan != NULL, "invariant"); 4192 assert(has_partial_array_mask(ref_to_scan) || 4193 _g1h->obj_in_cs(*ref_to_scan), "invariant"); 4194 pss->push_on_queue(ref_to_scan); 4195 continue; 4196 } 4197 pss->start_term_time(); 4198 if (terminator()->offer_termination()) break; 4199 pss->end_term_time(); 4200 } 4201 pss->end_term_time(); 4202 pss->retire_alloc_buffers(); 4203 } 4204 }; 4205 4206 class G1ParTask : public AbstractGangTask { 4207 protected: 4208 G1CollectedHeap* _g1h; 4209 RefToScanQueueSet *_queues; 4210 ParallelTaskTerminator _terminator; 4211 4212 Mutex _stats_lock; 4213 Mutex* stats_lock() { return &_stats_lock; } 4214 4215 size_t getNCards() { 4216 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) 4217 / G1BlockOffsetSharedArray::N_bytes; 4218 } 4219 4220 public: 4221 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) 4222 : AbstractGangTask("G1 collection"), 4223 _g1h(g1h), 4224 _queues(task_queues), 4225 _terminator(workers, _queues), 4226 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true) 4227 {} 4228 4229 RefToScanQueueSet* queues() { return _queues; } 4230 4231 RefToScanQueue *work_queue(int i) { 4232 return queues()->queue(i); 4233 } 4234 4235 void work(int i) { 4236 ResourceMark rm; 4237 HandleMark hm; 4238 4239 G1ParScanThreadState pss(_g1h, i); 4240 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); 4241 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); 4242 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); 4243 4244 pss.set_evac_closure(&scan_evac_cl); 4245 pss.set_evac_failure_closure(&evac_failure_cl); 4246 pss.set_partial_scan_closure(&partial_scan_cl); 4247 4248 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); 4249 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); 4250 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); 4251 4252 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); 4253 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); 4254 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); 4255 4256 OopsInHeapRegionClosure *scan_root_cl; 4257 OopsInHeapRegionClosure *scan_perm_cl; 4258 OopsInHeapRegionClosure *scan_so_cl; 4259 4260 if (_g1h->g1_policy()->should_initiate_conc_mark()) { 4261 scan_root_cl = &scan_mark_root_cl; 4262 scan_perm_cl = &scan_mark_perm_cl; 4263 scan_so_cl = &scan_mark_heap_rs_cl; 4264 } else { 4265 scan_root_cl = &only_scan_root_cl; 4266 scan_perm_cl = &only_scan_perm_cl; 4267 scan_so_cl = &only_scan_heap_rs_cl; 4268 } 4269 4270 pss.start_strong_roots(); 4271 _g1h->g1_process_strong_roots(/* not collecting perm */ false, 4272 SharedHeap::SO_AllClasses, 4273 scan_root_cl, 4274 &only_scan_heap_rs_cl, 4275 scan_so_cl, 4276 scan_perm_cl, 4277 i); 4278 pss.end_strong_roots(); 4279 { 4280 double start = os::elapsedTime(); 4281 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); 4282 evac.do_void(); 4283 double elapsed_ms = (os::elapsedTime()-start)*1000.0; 4284 double term_ms = pss.term_time()*1000.0; 4285 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); 4286 _g1h->g1_policy()->record_termination_time(i, term_ms); 4287 } 4288 if (G1UseSurvivorSpace) { 4289 _g1h->g1_policy()->record_thread_age_table(pss.age_table()); 4290 } 4291 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); 4292 4293 // Clean up any par-expanded rem sets. 4294 HeapRegionRemSet::par_cleanup(); 4295 4296 MutexLocker x(stats_lock()); 4297 if (ParallelGCVerbose) { 4298 gclog_or_tty->print("Thread %d complete:\n", i); 4299 #if G1_DETAILED_STATS 4300 gclog_or_tty->print(" Pushes: %7d Pops: %7d Overflows: %7d Steals %7d (in %d attempts)\n", 4301 pss.pushes(), 4302 pss.pops(), 4303 pss.overflow_pushes(), 4304 pss.steals(), 4305 pss.steal_attempts()); 4306 #endif 4307 double elapsed = pss.elapsed(); 4308 double strong_roots = pss.strong_roots_time(); 4309 double term = pss.term_time(); 4310 gclog_or_tty->print(" Elapsed: %7.2f ms.\n" 4311 " Strong roots: %7.2f ms (%6.2f%%)\n" 4312 " Termination: %7.2f ms (%6.2f%%) (in %d entries)\n", 4313 elapsed * 1000.0, 4314 strong_roots * 1000.0, (strong_roots*100.0/elapsed), 4315 term * 1000.0, (term*100.0/elapsed), 4316 pss.term_attempts()); 4317 size_t total_waste = pss.alloc_buffer_waste() + pss.undo_waste(); 4318 gclog_or_tty->print(" Waste: %8dK\n" 4319 " Alloc Buffer: %8dK\n" 4320 " Undo: %8dK\n", 4321 (total_waste * HeapWordSize) / K, 4322 (pss.alloc_buffer_waste() * HeapWordSize) / K, 4323 (pss.undo_waste() * HeapWordSize) / K); 4324 } 4325 4326 assert(pss.refs_to_scan() == 0, "Task queue should be empty"); 4327 assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty"); 4328 } 4329 }; 4330 4331 // *** Common G1 Evacuation Stuff 4332 4333 class G1CountClosure: public OopsInHeapRegionClosure { 4334 public: 4335 int n; 4336 G1CountClosure() : n(0) {} 4337 void do_oop(narrowOop* p) { 4338 guarantee(false, "NYI"); 4339 } 4340 void do_oop(oop* p) { 4341 oop obj = *p; 4342 assert(obj != NULL && G1CollectedHeap::heap()->obj_in_cs(obj), 4343 "Rem set closure called on non-rem-set pointer."); 4344 n++; 4345 } 4346 }; 4347 4348 static G1CountClosure count_closure; 4349 4350 void 4351 G1CollectedHeap:: 4352 g1_process_strong_roots(bool collecting_perm_gen, 4353 SharedHeap::ScanningOption so, 4354 OopClosure* scan_non_heap_roots, 4355 OopsInHeapRegionClosure* scan_rs, 4356 OopsInHeapRegionClosure* scan_so, 4357 OopsInGenClosure* scan_perm, 4358 int worker_i) { 4359 // First scan the strong roots, including the perm gen. 4360 double ext_roots_start = os::elapsedTime(); 4361 double closure_app_time_sec = 0.0; 4362 4363 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); 4364 BufferingOopsInGenClosure buf_scan_perm(scan_perm); 4365 buf_scan_perm.set_generation(perm_gen()); 4366 4367 process_strong_roots(collecting_perm_gen, so, 4368 &buf_scan_non_heap_roots, 4369 &buf_scan_perm); 4370 // Finish up any enqueued closure apps. 4371 buf_scan_non_heap_roots.done(); 4372 buf_scan_perm.done(); 4373 double ext_roots_end = os::elapsedTime(); 4374 g1_policy()->reset_obj_copy_time(worker_i); 4375 double obj_copy_time_sec = 4376 buf_scan_non_heap_roots.closure_app_seconds() + 4377 buf_scan_perm.closure_app_seconds(); 4378 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); 4379 double ext_root_time_ms = 4380 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; 4381 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); 4382 4383 // Scan strong roots in mark stack. 4384 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { 4385 concurrent_mark()->oops_do(scan_non_heap_roots); 4386 } 4387 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; 4388 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); 4389 4390 // XXX What should this be doing in the parallel case? 4391 g1_policy()->record_collection_pause_end_CH_strong_roots(); 4392 if (G1VerifyRemSet) { 4393 // :::: FIXME :::: 4394 // The stupid remembered set doesn't know how to filter out dead 4395 // objects, which the smart one does, and so when it is created 4396 // and then compared the number of entries in each differs and 4397 // the verification code fails. 4398 guarantee(false, "verification code is broken, see note"); 4399 4400 // Let's make sure that the current rem set agrees with the stupidest 4401 // one possible! 4402 bool refs_enabled = ref_processor()->discovery_enabled(); 4403 if (refs_enabled) ref_processor()->disable_discovery(); 4404 StupidG1RemSet stupid(this); 4405 count_closure.n = 0; 4406 stupid.oops_into_collection_set_do(&count_closure, worker_i); 4407 int stupid_n = count_closure.n; 4408 count_closure.n = 0; 4409 g1_rem_set()->oops_into_collection_set_do(&count_closure, worker_i); 4410 guarantee(count_closure.n == stupid_n, "Old and new rem sets differ."); 4411 gclog_or_tty->print_cr("\nFound %d pointers in heap RS.", count_closure.n); 4412 if (refs_enabled) ref_processor()->enable_discovery(); 4413 } 4414 if (scan_so != NULL) { 4415 scan_scan_only_set(scan_so, worker_i); 4416 } 4417 // Now scan the complement of the collection set. 4418 if (scan_rs != NULL) { 4419 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); 4420 } 4421 // Finish with the ref_processor roots. 4422 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { 4423 ref_processor()->oops_do(scan_non_heap_roots); 4424 } 4425 g1_policy()->record_collection_pause_end_G1_strong_roots(); 4426 _process_strong_tasks->all_tasks_completed(); 4427 } 4428 4429 void 4430 G1CollectedHeap::scan_scan_only_region(HeapRegion* r, 4431 OopsInHeapRegionClosure* oc, 4432 int worker_i) { 4433 HeapWord* startAddr = r->bottom(); 4434 HeapWord* endAddr = r->used_region().end(); 4435 4436 oc->set_region(r); 4437 4438 HeapWord* p = r->bottom(); 4439 HeapWord* t = r->top(); 4440 guarantee( p == r->next_top_at_mark_start(), "invariant" ); 4441 while (p < t) { 4442 oop obj = oop(p); 4443 p += obj->oop_iterate(oc); 4444 } 4445 } 4446 4447 void 4448 G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc, 4449 int worker_i) { 4450 double start = os::elapsedTime(); 4451 4452 BufferingOopsInHeapRegionClosure boc(oc); 4453 4454 FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc); 4455 FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark()); 4456 4457 OopsInHeapRegionClosure *foc; 4458 if (g1_policy()->should_initiate_conc_mark()) 4459 foc = &scan_and_mark; 4460 else 4461 foc = &scan_only; 4462 4463 HeapRegion* hr; 4464 int n = 0; 4465 while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) { 4466 scan_scan_only_region(hr, foc, worker_i); 4467 ++n; 4468 } 4469 boc.done(); 4470 4471 double closure_app_s = boc.closure_app_seconds(); 4472 g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0); 4473 double ms = (os::elapsedTime() - start - closure_app_s)*1000.0; 4474 g1_policy()->record_scan_only_time(worker_i, ms, n); 4475 } 4476 4477 void 4478 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, 4479 OopClosure* non_root_closure) { 4480 SharedHeap::process_weak_roots(root_closure, non_root_closure); 4481 } 4482 4483 4484 class SaveMarksClosure: public HeapRegionClosure { 4485 public: 4486 bool doHeapRegion(HeapRegion* r) { 4487 r->save_marks(); 4488 return false; 4489 } 4490 }; 4491 4492 void G1CollectedHeap::save_marks() { 4493 if (ParallelGCThreads == 0) { 4494 SaveMarksClosure sm; 4495 heap_region_iterate(&sm); 4496 } 4497 // We do this even in the parallel case 4498 perm_gen()->save_marks(); 4499 } 4500 4501 void G1CollectedHeap::evacuate_collection_set() { 4502 set_evacuation_failed(false); 4503 4504 g1_rem_set()->prepare_for_oops_into_collection_set_do(); 4505 concurrent_g1_refine()->set_use_cache(false); 4506 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); 4507 set_par_threads(n_workers); 4508 G1ParTask g1_par_task(this, n_workers, _task_queues); 4509 4510 init_for_evac_failure(NULL); 4511 4512 change_strong_roots_parity(); // In preparation for parallel strong roots. 4513 rem_set()->prepare_for_younger_refs_iterate(true); 4514 4515 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); 4516 double start_par = os::elapsedTime(); 4517 if (ParallelGCThreads > 0) { 4518 // The individual threads will set their evac-failure closures. 4519 workers()->run_task(&g1_par_task); 4520 } else { 4521 g1_par_task.work(0); 4522 } 4523 4524 double par_time = (os::elapsedTime() - start_par) * 1000.0; 4525 g1_policy()->record_par_time(par_time); 4526 set_par_threads(0); 4527 // Is this the right thing to do here? We don't save marks 4528 // on individual heap regions when we allocate from 4529 // them in parallel, so this seems like the correct place for this. 4530 retire_all_alloc_regions(); 4531 { 4532 G1IsAliveClosure is_alive(this); 4533 G1KeepAliveClosure keep_alive(this); 4534 JNIHandles::weak_oops_do(&is_alive, &keep_alive); 4535 } 4536 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); 4537 4538 concurrent_g1_refine()->set_use_cache(true); 4539 4540 finalize_for_evac_failure(); 4541 4542 // Must do this before removing self-forwarding pointers, which clears 4543 // the per-region evac-failure flags. 4544 concurrent_mark()->complete_marking_in_collection_set(); 4545 4546 if (evacuation_failed()) { 4547 remove_self_forwarding_pointers(); 4548 if (PrintGCDetails) { 4549 gclog_or_tty->print(" (evacuation failed)"); 4550 } else if (PrintGC) { 4551 gclog_or_tty->print("--"); 4552 } 4553 } 4554 4555 if (G1DeferredRSUpdate) { 4556 RedirtyLoggedCardTableEntryFastClosure redirty; 4557 dirty_card_queue_set().set_closure(&redirty); 4558 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); 4559 JavaThread::dirty_card_queue_set().merge_bufferlists(&dirty_card_queue_set()); 4560 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); 4561 } 4562 4563 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 4564 } 4565 4566 void G1CollectedHeap::free_region(HeapRegion* hr) { 4567 size_t pre_used = 0; 4568 size_t cleared_h_regions = 0; 4569 size_t freed_regions = 0; 4570 UncleanRegionList local_list; 4571 4572 HeapWord* start = hr->bottom(); 4573 HeapWord* end = hr->prev_top_at_mark_start(); 4574 size_t used_bytes = hr->used(); 4575 size_t live_bytes = hr->max_live_bytes(); 4576 if (used_bytes > 0) { 4577 guarantee( live_bytes <= used_bytes, "invariant" ); 4578 } else { 4579 guarantee( live_bytes == 0, "invariant" ); 4580 } 4581 4582 size_t garbage_bytes = used_bytes - live_bytes; 4583 if (garbage_bytes > 0) 4584 g1_policy()->decrease_known_garbage_bytes(garbage_bytes); 4585 4586 free_region_work(hr, pre_used, cleared_h_regions, freed_regions, 4587 &local_list); 4588 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, 4589 &local_list); 4590 } 4591 4592 void 4593 G1CollectedHeap::free_region_work(HeapRegion* hr, 4594 size_t& pre_used, 4595 size_t& cleared_h_regions, 4596 size_t& freed_regions, 4597 UncleanRegionList* list, 4598 bool par) { 4599 assert(!hr->popular(), "should not free popular regions"); 4600 pre_used += hr->used(); 4601 if (hr->isHumongous()) { 4602 assert(hr->startsHumongous(), 4603 "Only the start of a humongous region should be freed."); 4604 int ind = _hrs->find(hr); 4605 assert(ind != -1, "Should have an index."); 4606 // Clear the start region. 4607 hr->hr_clear(par, true /*clear_space*/); 4608 list->insert_before_head(hr); 4609 cleared_h_regions++; 4610 freed_regions++; 4611 // Clear any continued regions. 4612 ind++; 4613 while ((size_t)ind < n_regions()) { 4614 HeapRegion* hrc = _hrs->at(ind); 4615 if (!hrc->continuesHumongous()) break; 4616 // Otherwise, does continue the H region. 4617 assert(hrc->humongous_start_region() == hr, "Huh?"); 4618 hrc->hr_clear(par, true /*clear_space*/); 4619 cleared_h_regions++; 4620 freed_regions++; 4621 list->insert_before_head(hrc); 4622 ind++; 4623 } 4624 } else { 4625 hr->hr_clear(par, true /*clear_space*/); 4626 list->insert_before_head(hr); 4627 freed_regions++; 4628 // If we're using clear2, this should not be enabled. 4629 // assert(!hr->in_cohort(), "Can't be both free and in a cohort."); 4630 } 4631 } 4632 4633 void G1CollectedHeap::finish_free_region_work(size_t pre_used, 4634 size_t cleared_h_regions, 4635 size_t freed_regions, 4636 UncleanRegionList* list) { 4637 if (list != NULL && list->sz() > 0) { 4638 prepend_region_list_on_unclean_list(list); 4639 } 4640 // Acquire a lock, if we're parallel, to update possibly-shared 4641 // variables. 4642 Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL; 4643 { 4644 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); 4645 _summary_bytes_used -= pre_used; 4646 _num_humongous_regions -= (int) cleared_h_regions; 4647 _free_regions += freed_regions; 4648 } 4649 } 4650 4651 4652 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { 4653 while (list != NULL) { 4654 guarantee( list->is_young(), "invariant" ); 4655 4656 HeapWord* bottom = list->bottom(); 4657 HeapWord* end = list->end(); 4658 MemRegion mr(bottom, end); 4659 ct_bs->dirty(mr); 4660 4661 list = list->get_next_young_region(); 4662 } 4663 } 4664 4665 void G1CollectedHeap::cleanUpCardTable() { 4666 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); 4667 double start = os::elapsedTime(); 4668 4669 ct_bs->clear(_g1_committed); 4670 4671 // now, redirty the cards of the scan-only and survivor regions 4672 // (it seemed faster to do it this way, instead of iterating over 4673 // all regions and then clearing / dirtying as approprite) 4674 dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region()); 4675 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); 4676 4677 double elapsed = os::elapsedTime() - start; 4678 g1_policy()->record_clear_ct_time( elapsed * 1000.0); 4679 } 4680 4681 4682 void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) { 4683 // First do any popular regions. 4684 HeapRegion* hr; 4685 while ((hr = popular_region_to_evac()) != NULL) { 4686 evac_popular_region(hr); 4687 } 4688 // Now do heuristic pauses. 4689 if (g1_policy()->should_do_collection_pause(word_size)) { 4690 do_collection_pause(); 4691 } 4692 } 4693 4694 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { 4695 double young_time_ms = 0.0; 4696 double non_young_time_ms = 0.0; 4697 4698 G1CollectorPolicy* policy = g1_policy(); 4699 4700 double start_sec = os::elapsedTime(); 4701 bool non_young = true; 4702 4703 HeapRegion* cur = cs_head; 4704 int age_bound = -1; 4705 size_t rs_lengths = 0; 4706 4707 while (cur != NULL) { 4708 if (non_young) { 4709 if (cur->is_young()) { 4710 double end_sec = os::elapsedTime(); 4711 double elapsed_ms = (end_sec - start_sec) * 1000.0; 4712 non_young_time_ms += elapsed_ms; 4713 4714 start_sec = os::elapsedTime(); 4715 non_young = false; 4716 } 4717 } else { 4718 if (!cur->is_on_free_list()) { 4719 double end_sec = os::elapsedTime(); 4720 double elapsed_ms = (end_sec - start_sec) * 1000.0; 4721 young_time_ms += elapsed_ms; 4722 4723 start_sec = os::elapsedTime(); 4724 non_young = true; 4725 } 4726 } 4727 4728 rs_lengths += cur->rem_set()->occupied(); 4729 4730 HeapRegion* next = cur->next_in_collection_set(); 4731 assert(cur->in_collection_set(), "bad CS"); 4732 cur->set_next_in_collection_set(NULL); 4733 cur->set_in_collection_set(false); 4734 4735 if (cur->is_young()) { 4736 int index = cur->young_index_in_cset(); 4737 guarantee( index != -1, "invariant" ); 4738 guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); 4739 size_t words_survived = _surviving_young_words[index]; 4740 cur->record_surv_words_in_group(words_survived); 4741 } else { 4742 int index = cur->young_index_in_cset(); 4743 guarantee( index == -1, "invariant" ); 4744 } 4745 4746 assert( (cur->is_young() && cur->young_index_in_cset() > -1) || 4747 (!cur->is_young() && cur->young_index_in_cset() == -1), 4748 "invariant" ); 4749 4750 if (!cur->evacuation_failed()) { 4751 // And the region is empty. 4752 assert(!cur->is_empty(), 4753 "Should not have empty regions in a CS."); 4754 free_region(cur); 4755 } else { 4756 guarantee( !cur->is_scan_only(), "should not be scan only" ); 4757 cur->uninstall_surv_rate_group(); 4758 if (cur->is_young()) 4759 cur->set_young_index_in_cset(-1); 4760 cur->set_not_young(); 4761 cur->set_evacuation_failed(false); 4762 } 4763 cur = next; 4764 } 4765 4766 policy->record_max_rs_lengths(rs_lengths); 4767 policy->cset_regions_freed(); 4768 4769 double end_sec = os::elapsedTime(); 4770 double elapsed_ms = (end_sec - start_sec) * 1000.0; 4771 if (non_young) 4772 non_young_time_ms += elapsed_ms; 4773 else 4774 young_time_ms += elapsed_ms; 4775 4776 policy->record_young_free_cset_time_ms(young_time_ms); 4777 policy->record_non_young_free_cset_time_ms(non_young_time_ms); 4778 } 4779 4780 HeapRegion* 4781 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) { 4782 assert(ZF_mon->owned_by_self(), "Precondition"); 4783 HeapRegion* res = pop_unclean_region_list_locked(); 4784 if (res != NULL) { 4785 assert(!res->continuesHumongous() && 4786 res->zero_fill_state() != HeapRegion::Allocated, 4787 "Only free regions on unclean list."); 4788 if (zero_filled) { 4789 res->ensure_zero_filled_locked(); 4790 res->set_zero_fill_allocated(); 4791 } 4792 } 4793 return res; 4794 } 4795 4796 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) { 4797 MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag); 4798 return alloc_region_from_unclean_list_locked(zero_filled); 4799 } 4800 4801 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) { 4802 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4803 put_region_on_unclean_list_locked(r); 4804 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. 4805 } 4806 4807 void G1CollectedHeap::set_unclean_regions_coming(bool b) { 4808 MutexLockerEx x(Cleanup_mon); 4809 set_unclean_regions_coming_locked(b); 4810 } 4811 4812 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) { 4813 assert(Cleanup_mon->owned_by_self(), "Precondition"); 4814 _unclean_regions_coming = b; 4815 // Wake up mutator threads that might be waiting for completeCleanup to 4816 // finish. 4817 if (!b) Cleanup_mon->notify_all(); 4818 } 4819 4820 void G1CollectedHeap::wait_for_cleanup_complete() { 4821 MutexLockerEx x(Cleanup_mon); 4822 wait_for_cleanup_complete_locked(); 4823 } 4824 4825 void G1CollectedHeap::wait_for_cleanup_complete_locked() { 4826 assert(Cleanup_mon->owned_by_self(), "precondition"); 4827 while (_unclean_regions_coming) { 4828 Cleanup_mon->wait(); 4829 } 4830 } 4831 4832 void 4833 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) { 4834 assert(ZF_mon->owned_by_self(), "precondition."); 4835 _unclean_region_list.insert_before_head(r); 4836 } 4837 4838 void 4839 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) { 4840 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4841 prepend_region_list_on_unclean_list_locked(list); 4842 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. 4843 } 4844 4845 void 4846 G1CollectedHeap:: 4847 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) { 4848 assert(ZF_mon->owned_by_self(), "precondition."); 4849 _unclean_region_list.prepend_list(list); 4850 } 4851 4852 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() { 4853 assert(ZF_mon->owned_by_self(), "precondition."); 4854 HeapRegion* res = _unclean_region_list.pop(); 4855 if (res != NULL) { 4856 // Inform ZF thread that there's a new unclean head. 4857 if (_unclean_region_list.hd() != NULL && should_zf()) 4858 ZF_mon->notify_all(); 4859 } 4860 return res; 4861 } 4862 4863 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() { 4864 assert(ZF_mon->owned_by_self(), "precondition."); 4865 return _unclean_region_list.hd(); 4866 } 4867 4868 4869 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() { 4870 assert(ZF_mon->owned_by_self(), "Precondition"); 4871 HeapRegion* r = peek_unclean_region_list_locked(); 4872 if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) { 4873 // Result of below must be equal to "r", since we hold the lock. 4874 (void)pop_unclean_region_list_locked(); 4875 put_free_region_on_list_locked(r); 4876 return true; 4877 } else { 4878 return false; 4879 } 4880 } 4881 4882 bool G1CollectedHeap::move_cleaned_region_to_free_list() { 4883 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4884 return move_cleaned_region_to_free_list_locked(); 4885 } 4886 4887 4888 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) { 4889 assert(ZF_mon->owned_by_self(), "precondition."); 4890 assert(_free_region_list_size == free_region_list_length(), "Inv"); 4891 assert(r->zero_fill_state() == HeapRegion::ZeroFilled, 4892 "Regions on free list must be zero filled"); 4893 assert(!r->isHumongous(), "Must not be humongous."); 4894 assert(r->is_empty(), "Better be empty"); 4895 assert(!r->is_on_free_list(), 4896 "Better not already be on free list"); 4897 assert(!r->is_on_unclean_list(), 4898 "Better not already be on unclean list"); 4899 r->set_on_free_list(true); 4900 r->set_next_on_free_list(_free_region_list); 4901 _free_region_list = r; 4902 _free_region_list_size++; 4903 assert(_free_region_list_size == free_region_list_length(), "Inv"); 4904 } 4905 4906 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) { 4907 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4908 put_free_region_on_list_locked(r); 4909 } 4910 4911 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() { 4912 assert(ZF_mon->owned_by_self(), "precondition."); 4913 assert(_free_region_list_size == free_region_list_length(), "Inv"); 4914 HeapRegion* res = _free_region_list; 4915 if (res != NULL) { 4916 _free_region_list = res->next_from_free_list(); 4917 _free_region_list_size--; 4918 res->set_on_free_list(false); 4919 res->set_next_on_free_list(NULL); 4920 assert(_free_region_list_size == free_region_list_length(), "Inv"); 4921 } 4922 return res; 4923 } 4924 4925 4926 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) { 4927 // By self, or on behalf of self. 4928 assert(Heap_lock->is_locked(), "Precondition"); 4929 HeapRegion* res = NULL; 4930 bool first = true; 4931 while (res == NULL) { 4932 if (zero_filled || !first) { 4933 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4934 res = pop_free_region_list_locked(); 4935 if (res != NULL) { 4936 assert(!res->zero_fill_is_allocated(), 4937 "No allocated regions on free list."); 4938 res->set_zero_fill_allocated(); 4939 } else if (!first) { 4940 break; // We tried both, time to return NULL. 4941 } 4942 } 4943 4944 if (res == NULL) { 4945 res = alloc_region_from_unclean_list(zero_filled); 4946 } 4947 assert(res == NULL || 4948 !zero_filled || 4949 res->zero_fill_is_allocated(), 4950 "We must have allocated the region we're returning"); 4951 first = false; 4952 } 4953 return res; 4954 } 4955 4956 void G1CollectedHeap::remove_allocated_regions_from_lists() { 4957 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4958 { 4959 HeapRegion* prev = NULL; 4960 HeapRegion* cur = _unclean_region_list.hd(); 4961 while (cur != NULL) { 4962 HeapRegion* next = cur->next_from_unclean_list(); 4963 if (cur->zero_fill_is_allocated()) { 4964 // Remove from the list. 4965 if (prev == NULL) { 4966 (void)_unclean_region_list.pop(); 4967 } else { 4968 _unclean_region_list.delete_after(prev); 4969 } 4970 cur->set_on_unclean_list(false); 4971 cur->set_next_on_unclean_list(NULL); 4972 } else { 4973 prev = cur; 4974 } 4975 cur = next; 4976 } 4977 assert(_unclean_region_list.sz() == unclean_region_list_length(), 4978 "Inv"); 4979 } 4980 4981 { 4982 HeapRegion* prev = NULL; 4983 HeapRegion* cur = _free_region_list; 4984 while (cur != NULL) { 4985 HeapRegion* next = cur->next_from_free_list(); 4986 if (cur->zero_fill_is_allocated()) { 4987 // Remove from the list. 4988 if (prev == NULL) { 4989 _free_region_list = cur->next_from_free_list(); 4990 } else { 4991 prev->set_next_on_free_list(cur->next_from_free_list()); 4992 } 4993 cur->set_on_free_list(false); 4994 cur->set_next_on_free_list(NULL); 4995 _free_region_list_size--; 4996 } else { 4997 prev = cur; 4998 } 4999 cur = next; 5000 } 5001 assert(_free_region_list_size == free_region_list_length(), "Inv"); 5002 } 5003 } 5004 5005 bool G1CollectedHeap::verify_region_lists() { 5006 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 5007 return verify_region_lists_locked(); 5008 } 5009 5010 bool G1CollectedHeap::verify_region_lists_locked() { 5011 HeapRegion* unclean = _unclean_region_list.hd(); 5012 while (unclean != NULL) { 5013 guarantee(unclean->is_on_unclean_list(), "Well, it is!"); 5014 guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!"); 5015 guarantee(unclean->zero_fill_state() != HeapRegion::Allocated, 5016 "Everything else is possible."); 5017 unclean = unclean->next_from_unclean_list(); 5018 } 5019 guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv"); 5020 5021 HeapRegion* free_r = _free_region_list; 5022 while (free_r != NULL) { 5023 assert(free_r->is_on_free_list(), "Well, it is!"); 5024 assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!"); 5025 switch (free_r->zero_fill_state()) { 5026 case HeapRegion::NotZeroFilled: 5027 case HeapRegion::ZeroFilling: 5028 guarantee(false, "Should not be on free list."); 5029 break; 5030 default: 5031 // Everything else is possible. 5032 break; 5033 } 5034 free_r = free_r->next_from_free_list(); 5035 } 5036 guarantee(_free_region_list_size == free_region_list_length(), "Inv"); 5037 // If we didn't do an assertion... 5038 return true; 5039 } 5040 5041 size_t G1CollectedHeap::free_region_list_length() { 5042 assert(ZF_mon->owned_by_self(), "precondition."); 5043 size_t len = 0; 5044 HeapRegion* cur = _free_region_list; 5045 while (cur != NULL) { 5046 len++; 5047 cur = cur->next_from_free_list(); 5048 } 5049 return len; 5050 } 5051 5052 size_t G1CollectedHeap::unclean_region_list_length() { 5053 assert(ZF_mon->owned_by_self(), "precondition."); 5054 return _unclean_region_list.length(); 5055 } 5056 5057 size_t G1CollectedHeap::n_regions() { 5058 return _hrs->length(); 5059 } 5060 5061 size_t G1CollectedHeap::max_regions() { 5062 return 5063 (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) / 5064 HeapRegion::GrainBytes; 5065 } 5066 5067 size_t G1CollectedHeap::free_regions() { 5068 /* Possibly-expensive assert. 5069 assert(_free_regions == count_free_regions(), 5070 "_free_regions is off."); 5071 */ 5072 return _free_regions; 5073 } 5074 5075 bool G1CollectedHeap::should_zf() { 5076 return _free_region_list_size < (size_t) G1ConcZFMaxRegions; 5077 } 5078 5079 class RegionCounter: public HeapRegionClosure { 5080 size_t _n; 5081 public: 5082 RegionCounter() : _n(0) {} 5083 bool doHeapRegion(HeapRegion* r) { 5084 if (r->is_empty() && !r->popular()) { 5085 assert(!r->isHumongous(), "H regions should not be empty."); 5086 _n++; 5087 } 5088 return false; 5089 } 5090 int res() { return (int) _n; } 5091 }; 5092 5093 size_t G1CollectedHeap::count_free_regions() { 5094 RegionCounter rc; 5095 heap_region_iterate(&rc); 5096 size_t n = rc.res(); 5097 if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty()) 5098 n--; 5099 return n; 5100 } 5101 5102 size_t G1CollectedHeap::count_free_regions_list() { 5103 size_t n = 0; 5104 size_t o = 0; 5105 ZF_mon->lock_without_safepoint_check(); 5106 HeapRegion* cur = _free_region_list; 5107 while (cur != NULL) { 5108 cur = cur->next_from_free_list(); 5109 n++; 5110 } 5111 size_t m = unclean_region_list_length(); 5112 ZF_mon->unlock(); 5113 return n + m; 5114 } 5115 5116 bool G1CollectedHeap::should_set_young_locked() { 5117 assert(heap_lock_held_for_gc(), 5118 "the heap lock should already be held by or for this thread"); 5119 return (g1_policy()->in_young_gc_mode() && 5120 g1_policy()->should_add_next_region_to_young_list()); 5121 } 5122 5123 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { 5124 assert(heap_lock_held_for_gc(), 5125 "the heap lock should already be held by or for this thread"); 5126 _young_list->push_region(hr); 5127 g1_policy()->set_region_short_lived(hr); 5128 } 5129 5130 class NoYoungRegionsClosure: public HeapRegionClosure { 5131 private: 5132 bool _success; 5133 public: 5134 NoYoungRegionsClosure() : _success(true) { } 5135 bool doHeapRegion(HeapRegion* r) { 5136 if (r->is_young()) { 5137 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", 5138 r->bottom(), r->end()); 5139 _success = false; 5140 } 5141 return false; 5142 } 5143 bool success() { return _success; } 5144 }; 5145 5146 bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list, 5147 bool check_sample) { 5148 bool ret = true; 5149 5150 ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample); 5151 if (!ignore_scan_only_list) { 5152 NoYoungRegionsClosure closure; 5153 heap_region_iterate(&closure); 5154 ret = ret && closure.success(); 5155 } 5156 5157 return ret; 5158 } 5159 5160 void G1CollectedHeap::empty_young_list() { 5161 assert(heap_lock_held_for_gc(), 5162 "the heap lock should already be held by or for this thread"); 5163 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); 5164 5165 _young_list->empty_list(); 5166 } 5167 5168 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { 5169 bool no_allocs = true; 5170 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { 5171 HeapRegion* r = _gc_alloc_regions[ap]; 5172 no_allocs = r == NULL || r->saved_mark_at_top(); 5173 } 5174 return no_allocs; 5175 } 5176 5177 void G1CollectedHeap::retire_all_alloc_regions() { 5178 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 5179 HeapRegion* r = _gc_alloc_regions[ap]; 5180 if (r != NULL) { 5181 // Check for aliases. 5182 bool has_processed_alias = false; 5183 for (int i = 0; i < ap; ++i) { 5184 if (_gc_alloc_regions[i] == r) { 5185 has_processed_alias = true; 5186 break; 5187 } 5188 } 5189 if (!has_processed_alias) { 5190 retire_alloc_region(r, false /* par */); 5191 } 5192 } 5193 } 5194 } 5195 5196 5197 // Done at the start of full GC. 5198 void G1CollectedHeap::tear_down_region_lists() { 5199 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 5200 while (pop_unclean_region_list_locked() != NULL) ; 5201 assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0, 5202 "Postconditions of loop.") 5203 while (pop_free_region_list_locked() != NULL) ; 5204 assert(_free_region_list == NULL, "Postcondition of loop."); 5205 if (_free_region_list_size != 0) { 5206 gclog_or_tty->print_cr("Size is %d.", _free_region_list_size); 5207 print(); 5208 } 5209 assert(_free_region_list_size == 0, "Postconditions of loop."); 5210 } 5211 5212 5213 class RegionResetter: public HeapRegionClosure { 5214 G1CollectedHeap* _g1; 5215 int _n; 5216 public: 5217 RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {} 5218 bool doHeapRegion(HeapRegion* r) { 5219 if (r->continuesHumongous()) return false; 5220 if (r->top() > r->bottom()) { 5221 if (r->top() < r->end()) { 5222 Copy::fill_to_words(r->top(), 5223 pointer_delta(r->end(), r->top())); 5224 } 5225 r->set_zero_fill_allocated(); 5226 } else { 5227 assert(r->is_empty(), "tautology"); 5228 if (r->popular()) { 5229 if (r->zero_fill_state() != HeapRegion::Allocated) { 5230 r->ensure_zero_filled_locked(); 5231 r->set_zero_fill_allocated(); 5232 } 5233 } else { 5234 _n++; 5235 switch (r->zero_fill_state()) { 5236 case HeapRegion::NotZeroFilled: 5237 case HeapRegion::ZeroFilling: 5238 _g1->put_region_on_unclean_list_locked(r); 5239 break; 5240 case HeapRegion::Allocated: 5241 r->set_zero_fill_complete(); 5242 // no break; go on to put on free list. 5243 case HeapRegion::ZeroFilled: 5244 _g1->put_free_region_on_list_locked(r); 5245 break; 5246 } 5247 } 5248 } 5249 return false; 5250 } 5251 5252 int getFreeRegionCount() {return _n;} 5253 }; 5254 5255 // Done at the end of full GC. 5256 void G1CollectedHeap::rebuild_region_lists() { 5257 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 5258 // This needs to go at the end of the full GC. 5259 RegionResetter rs; 5260 heap_region_iterate(&rs); 5261 _free_regions = rs.getFreeRegionCount(); 5262 // Tell the ZF thread it may have work to do. 5263 if (should_zf()) ZF_mon->notify_all(); 5264 } 5265 5266 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure { 5267 G1CollectedHeap* _g1; 5268 int _n; 5269 public: 5270 UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {} 5271 bool doHeapRegion(HeapRegion* r) { 5272 if (r->continuesHumongous()) return false; 5273 if (r->top() > r->bottom()) { 5274 // There are assertions in "set_zero_fill_needed()" below that 5275 // require top() == bottom(), so this is technically illegal. 5276 // We'll skirt the law here, by making that true temporarily. 5277 DEBUG_ONLY(HeapWord* save_top = r->top(); 5278 r->set_top(r->bottom())); 5279 r->set_zero_fill_needed(); 5280 DEBUG_ONLY(r->set_top(save_top)); 5281 } 5282 return false; 5283 } 5284 }; 5285 5286 // Done at the start of full GC. 5287 void G1CollectedHeap::set_used_regions_to_need_zero_fill() { 5288 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 5289 // This needs to go at the end of the full GC. 5290 UsedRegionsNeedZeroFillSetter rs; 5291 heap_region_iterate(&rs); 5292 } 5293 5294 class CountObjClosure: public ObjectClosure { 5295 size_t _n; 5296 public: 5297 CountObjClosure() : _n(0) {} 5298 void do_object(oop obj) { _n++; } 5299 size_t n() { return _n; } 5300 }; 5301 5302 size_t G1CollectedHeap::pop_object_used_objs() { 5303 size_t sum_objs = 0; 5304 for (int i = 0; i < G1NumPopularRegions; i++) { 5305 CountObjClosure cl; 5306 _hrs->at(i)->object_iterate(&cl); 5307 sum_objs += cl.n(); 5308 } 5309 return sum_objs; 5310 } 5311 5312 size_t G1CollectedHeap::pop_object_used_bytes() { 5313 size_t sum_bytes = 0; 5314 for (int i = 0; i < G1NumPopularRegions; i++) { 5315 sum_bytes += _hrs->at(i)->used(); 5316 } 5317 return sum_bytes; 5318 } 5319 5320 5321 static int nq = 0; 5322 5323 HeapWord* G1CollectedHeap::allocate_popular_object(size_t word_size) { 5324 while (_cur_pop_hr_index < G1NumPopularRegions) { 5325 HeapRegion* cur_pop_region = _hrs->at(_cur_pop_hr_index); 5326 HeapWord* res = cur_pop_region->allocate(word_size); 5327 if (res != NULL) { 5328 // We account for popular objs directly in the used summary: 5329 _summary_bytes_used += (word_size * HeapWordSize); 5330 return res; 5331 } 5332 // Otherwise, try the next region (first making sure that we remember 5333 // the last "top" value as the "next_top_at_mark_start", so that 5334 // objects made popular during markings aren't automatically considered 5335 // live). 5336 cur_pop_region->note_end_of_copying(); 5337 // Otherwise, try the next region. 5338 _cur_pop_hr_index++; 5339 } 5340 // XXX: For now !!! 5341 vm_exit_out_of_memory(word_size, 5342 "Not enough pop obj space (To Be Fixed)"); 5343 return NULL; 5344 } 5345 5346 class HeapRegionList: public CHeapObj { 5347 public: 5348 HeapRegion* hr; 5349 HeapRegionList* next; 5350 }; 5351 5352 void G1CollectedHeap::schedule_popular_region_evac(HeapRegion* r) { 5353 // This might happen during parallel GC, so protect by this lock. 5354 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 5355 // We don't schedule regions whose evacuations are already pending, or 5356 // are already being evacuated. 5357 if (!r->popular_pending() && !r->in_collection_set()) { 5358 r->set_popular_pending(true); 5359 if (G1TracePopularity) { 5360 gclog_or_tty->print_cr("Scheduling region "PTR_FORMAT" " 5361 "["PTR_FORMAT", "PTR_FORMAT") for pop-object evacuation.", 5362 r, r->bottom(), r->end()); 5363 } 5364 HeapRegionList* hrl = new HeapRegionList; 5365 hrl->hr = r; 5366 hrl->next = _popular_regions_to_be_evacuated; 5367 _popular_regions_to_be_evacuated = hrl; 5368 } 5369 } 5370 5371 HeapRegion* G1CollectedHeap::popular_region_to_evac() { 5372 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 5373 HeapRegion* res = NULL; 5374 while (_popular_regions_to_be_evacuated != NULL && res == NULL) { 5375 HeapRegionList* hrl = _popular_regions_to_be_evacuated; 5376 _popular_regions_to_be_evacuated = hrl->next; 5377 res = hrl->hr; 5378 // The G1RSPopLimit may have increased, so recheck here... 5379 if (res->rem_set()->occupied() < (size_t) G1RSPopLimit) { 5380 // Hah: don't need to schedule. 5381 if (G1TracePopularity) { 5382 gclog_or_tty->print_cr("Unscheduling region "PTR_FORMAT" " 5383 "["PTR_FORMAT", "PTR_FORMAT") " 5384 "for pop-object evacuation (size %d < limit %d)", 5385 res, res->bottom(), res->end(), 5386 res->rem_set()->occupied(), G1RSPopLimit); 5387 } 5388 res->set_popular_pending(false); 5389 res = NULL; 5390 } 5391 // We do not reset res->popular() here; if we did so, it would allow 5392 // the region to be "rescheduled" for popularity evacuation. Instead, 5393 // this is done in the collection pause, with the world stopped. 5394 // So the invariant is that the regions in the list have the popularity 5395 // boolean set, but having the boolean set does not imply membership 5396 // on the list (though there can at most one such pop-pending region 5397 // not on the list at any time). 5398 delete hrl; 5399 } 5400 return res; 5401 } 5402 5403 void G1CollectedHeap::evac_popular_region(HeapRegion* hr) { 5404 while (true) { 5405 // Don't want to do a GC pause while cleanup is being completed! 5406 wait_for_cleanup_complete(); 5407 5408 // Read the GC count while holding the Heap_lock 5409 int gc_count_before = SharedHeap::heap()->total_collections(); 5410 g1_policy()->record_stop_world_start(); 5411 5412 { 5413 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back 5414 VM_G1PopRegionCollectionPause op(gc_count_before, hr); 5415 VMThread::execute(&op); 5416 5417 // If the prolog succeeded, we didn't do a GC for this. 5418 if (op.prologue_succeeded()) break; 5419 } 5420 // Otherwise we didn't. We should recheck the size, though, since 5421 // the limit may have increased... 5422 if (hr->rem_set()->occupied() < (size_t) G1RSPopLimit) { 5423 hr->set_popular_pending(false); 5424 break; 5425 } 5426 } 5427 } 5428 5429 void G1CollectedHeap::atomic_inc_obj_rc(oop obj) { 5430 Atomic::inc(obj_rc_addr(obj)); 5431 } 5432 5433 class CountRCClosure: public OopsInHeapRegionClosure { 5434 G1CollectedHeap* _g1h; 5435 bool _parallel; 5436 public: 5437 CountRCClosure(G1CollectedHeap* g1h) : 5438 _g1h(g1h), _parallel(ParallelGCThreads > 0) 5439 {} 5440 void do_oop(narrowOop* p) { 5441 guarantee(false, "NYI"); 5442 } 5443 void do_oop(oop* p) { 5444 oop obj = *p; 5445 assert(obj != NULL, "Precondition."); 5446 if (_parallel) { 5447 // We go sticky at the limit to avoid excess contention. 5448 // If we want to track the actual RC's further, we'll need to keep a 5449 // per-thread hash table or something for the popular objects. 5450 if (_g1h->obj_rc(obj) < G1ObjPopLimit) { 5451 _g1h->atomic_inc_obj_rc(obj); 5452 } 5453 } else { 5454 _g1h->inc_obj_rc(obj); 5455 } 5456 } 5457 }; 5458 5459 class EvacPopObjClosure: public ObjectClosure { 5460 G1CollectedHeap* _g1h; 5461 size_t _pop_objs; 5462 size_t _max_rc; 5463 public: 5464 EvacPopObjClosure(G1CollectedHeap* g1h) : 5465 _g1h(g1h), _pop_objs(0), _max_rc(0) {} 5466 5467 void do_object(oop obj) { 5468 size_t rc = _g1h->obj_rc(obj); 5469 _max_rc = MAX2(rc, _max_rc); 5470 if (rc >= (size_t) G1ObjPopLimit) { 5471 _g1h->_pop_obj_rc_at_copy.add((double)rc); 5472 size_t word_sz = obj->size(); 5473 HeapWord* new_pop_loc = _g1h->allocate_popular_object(word_sz); 5474 oop new_pop_obj = (oop)new_pop_loc; 5475 Copy::aligned_disjoint_words((HeapWord*)obj, new_pop_loc, word_sz); 5476 obj->forward_to(new_pop_obj); 5477 G1ScanAndBalanceClosure scan_and_balance(_g1h); 5478 new_pop_obj->oop_iterate_backwards(&scan_and_balance); 5479 // preserve "next" mark bit if marking is in progress. 5480 if (_g1h->mark_in_progress() && !_g1h->is_obj_ill(obj)) { 5481 _g1h->concurrent_mark()->markAndGrayObjectIfNecessary(new_pop_obj); 5482 } 5483 5484 if (G1TracePopularity) { 5485 gclog_or_tty->print_cr("Found obj " PTR_FORMAT " of word size " SIZE_FORMAT 5486 " pop (%d), move to " PTR_FORMAT, 5487 (void*) obj, word_sz, 5488 _g1h->obj_rc(obj), (void*) new_pop_obj); 5489 } 5490 _pop_objs++; 5491 } 5492 } 5493 size_t pop_objs() { return _pop_objs; } 5494 size_t max_rc() { return _max_rc; } 5495 }; 5496 5497 class G1ParCountRCTask : public AbstractGangTask { 5498 G1CollectedHeap* _g1h; 5499 BitMap _bm; 5500 5501 size_t getNCards() { 5502 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) 5503 / G1BlockOffsetSharedArray::N_bytes; 5504 } 5505 CountRCClosure _count_rc_closure; 5506 public: 5507 G1ParCountRCTask(G1CollectedHeap* g1h) : 5508 AbstractGangTask("G1 Par RC Count task"), 5509 _g1h(g1h), _bm(getNCards()), _count_rc_closure(g1h) 5510 {} 5511 5512 void work(int i) { 5513 ResourceMark rm; 5514 HandleMark hm; 5515 _g1h->g1_rem_set()->oops_into_collection_set_do(&_count_rc_closure, i); 5516 } 5517 }; 5518 5519 void G1CollectedHeap::popularity_pause_preamble(HeapRegion* popular_region) { 5520 // We're evacuating a single region (for popularity). 5521 if (G1TracePopularity) { 5522 gclog_or_tty->print_cr("Doing pop region pause for ["PTR_FORMAT", "PTR_FORMAT")", 5523 popular_region->bottom(), popular_region->end()); 5524 } 5525 g1_policy()->set_single_region_collection_set(popular_region); 5526 size_t max_rc; 5527 if (!compute_reference_counts_and_evac_popular(popular_region, 5528 &max_rc)) { 5529 // We didn't evacuate any popular objects. 5530 // We increase the RS popularity limit, to prevent this from 5531 // happening in the future. 5532 if (G1RSPopLimit < (1 << 30)) { 5533 G1RSPopLimit *= 2; 5534 } 5535 // For now, interesting enough for a message: 5536 #if 1 5537 gclog_or_tty->print_cr("In pop region pause for ["PTR_FORMAT", "PTR_FORMAT"), " 5538 "failed to find a pop object (max = %d).", 5539 popular_region->bottom(), popular_region->end(), 5540 max_rc); 5541 gclog_or_tty->print_cr("Increased G1RSPopLimit to %d.", G1RSPopLimit); 5542 #endif // 0 5543 // Also, we reset the collection set to NULL, to make the rest of 5544 // the collection do nothing. 5545 assert(popular_region->next_in_collection_set() == NULL, 5546 "should be single-region."); 5547 popular_region->set_in_collection_set(false); 5548 popular_region->set_popular_pending(false); 5549 g1_policy()->clear_collection_set(); 5550 } 5551 } 5552 5553 bool G1CollectedHeap:: 5554 compute_reference_counts_and_evac_popular(HeapRegion* popular_region, 5555 size_t* max_rc) { 5556 HeapWord* rc_region_bot; 5557 HeapWord* rc_region_end; 5558 5559 // Set up the reference count region. 5560 HeapRegion* rc_region = newAllocRegion(HeapRegion::GrainWords); 5561 if (rc_region != NULL) { 5562 rc_region_bot = rc_region->bottom(); 5563 rc_region_end = rc_region->end(); 5564 } else { 5565 rc_region_bot = NEW_C_HEAP_ARRAY(HeapWord, HeapRegion::GrainWords); 5566 if (rc_region_bot == NULL) { 5567 vm_exit_out_of_memory(HeapRegion::GrainWords, 5568 "No space for RC region."); 5569 } 5570 rc_region_end = rc_region_bot + HeapRegion::GrainWords; 5571 } 5572 5573 if (G1TracePopularity) 5574 gclog_or_tty->print_cr("RC region is ["PTR_FORMAT", "PTR_FORMAT")", 5575 rc_region_bot, rc_region_end); 5576 if (rc_region_bot > popular_region->bottom()) { 5577 _rc_region_above = true; 5578 _rc_region_diff = 5579 pointer_delta(rc_region_bot, popular_region->bottom(), 1); 5580 } else { 5581 assert(rc_region_bot < popular_region->bottom(), "Can't be equal."); 5582 _rc_region_above = false; 5583 _rc_region_diff = 5584 pointer_delta(popular_region->bottom(), rc_region_bot, 1); 5585 } 5586 g1_policy()->record_pop_compute_rc_start(); 5587 // Count external references. 5588 g1_rem_set()->prepare_for_oops_into_collection_set_do(); 5589 if (ParallelGCThreads > 0) { 5590 5591 set_par_threads(workers()->total_workers()); 5592 G1ParCountRCTask par_count_rc_task(this); 5593 workers()->run_task(&par_count_rc_task); 5594 set_par_threads(0); 5595 5596 } else { 5597 CountRCClosure count_rc_closure(this); 5598 g1_rem_set()->oops_into_collection_set_do(&count_rc_closure, 0); 5599 } 5600 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); 5601 g1_policy()->record_pop_compute_rc_end(); 5602 5603 // Now evacuate popular objects. 5604 g1_policy()->record_pop_evac_start(); 5605 EvacPopObjClosure evac_pop_obj_cl(this); 5606 popular_region->object_iterate(&evac_pop_obj_cl); 5607 *max_rc = evac_pop_obj_cl.max_rc(); 5608 5609 // Make sure the last "top" value of the current popular region is copied 5610 // as the "next_top_at_mark_start", so that objects made popular during 5611 // markings aren't automatically considered live. 5612 HeapRegion* cur_pop_region = _hrs->at(_cur_pop_hr_index); 5613 cur_pop_region->note_end_of_copying(); 5614 5615 if (rc_region != NULL) { 5616 free_region(rc_region); 5617 } else { 5618 FREE_C_HEAP_ARRAY(HeapWord, rc_region_bot); 5619 } 5620 g1_policy()->record_pop_evac_end(); 5621 5622 return evac_pop_obj_cl.pop_objs() > 0; 5623 } 5624 5625 class CountPopObjInfoClosure: public HeapRegionClosure { 5626 size_t _objs; 5627 size_t _bytes; 5628 5629 class CountObjClosure: public ObjectClosure { 5630 int _n; 5631 public: 5632 CountObjClosure() : _n(0) {} 5633 void do_object(oop obj) { _n++; } 5634 size_t n() { return _n; } 5635 }; 5636 5637 public: 5638 CountPopObjInfoClosure() : _objs(0), _bytes(0) {} 5639 bool doHeapRegion(HeapRegion* r) { 5640 _bytes += r->used(); 5641 CountObjClosure blk; 5642 r->object_iterate(&blk); 5643 _objs += blk.n(); 5644 return false; 5645 } 5646 size_t objs() { return _objs; } 5647 size_t bytes() { return _bytes; } 5648 }; 5649 5650 5651 void G1CollectedHeap::print_popularity_summary_info() const { 5652 CountPopObjInfoClosure blk; 5653 for (int i = 0; i <= _cur_pop_hr_index; i++) { 5654 blk.doHeapRegion(_hrs->at(i)); 5655 } 5656 gclog_or_tty->print_cr("\nPopular objects: %d objs, %d bytes.", 5657 blk.objs(), blk.bytes()); 5658 gclog_or_tty->print_cr(" RC at copy = [avg = %5.2f, max = %5.2f, sd = %5.2f].", 5659 _pop_obj_rc_at_copy.avg(), 5660 _pop_obj_rc_at_copy.maximum(), 5661 _pop_obj_rc_at_copy.sd()); 5662 } 5663 5664 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { 5665 _refine_cte_cl->set_concurrent(concurrent); 5666 } 5667 5668 #ifndef PRODUCT 5669 5670 class PrintHeapRegionClosure: public HeapRegionClosure { 5671 public: 5672 bool doHeapRegion(HeapRegion *r) { 5673 gclog_or_tty->print("Region: "PTR_FORMAT":", r); 5674 if (r != NULL) { 5675 if (r->is_on_free_list()) 5676 gclog_or_tty->print("Free "); 5677 if (r->is_young()) 5678 gclog_or_tty->print("Young "); 5679 if (r->isHumongous()) 5680 gclog_or_tty->print("Is Humongous "); 5681 r->print(); 5682 } 5683 return false; 5684 } 5685 }; 5686 5687 class SortHeapRegionClosure : public HeapRegionClosure { 5688 size_t young_regions,free_regions, unclean_regions; 5689 size_t hum_regions, count; 5690 size_t unaccounted, cur_unclean, cur_alloc; 5691 size_t total_free; 5692 HeapRegion* cur; 5693 public: 5694 SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0), 5695 free_regions(0), unclean_regions(0), 5696 hum_regions(0), 5697 count(0), unaccounted(0), 5698 cur_alloc(0), total_free(0) 5699 {} 5700 bool doHeapRegion(HeapRegion *r) { 5701 count++; 5702 if (r->is_on_free_list()) free_regions++; 5703 else if (r->is_on_unclean_list()) unclean_regions++; 5704 else if (r->isHumongous()) hum_regions++; 5705 else if (r->is_young()) young_regions++; 5706 else if (r == cur) cur_alloc++; 5707 else unaccounted++; 5708 return false; 5709 } 5710 void print() { 5711 total_free = free_regions + unclean_regions; 5712 gclog_or_tty->print("%d regions\n", count); 5713 gclog_or_tty->print("%d free: free_list = %d unclean = %d\n", 5714 total_free, free_regions, unclean_regions); 5715 gclog_or_tty->print("%d humongous %d young\n", 5716 hum_regions, young_regions); 5717 gclog_or_tty->print("%d cur_alloc\n", cur_alloc); 5718 gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted); 5719 } 5720 }; 5721 5722 void G1CollectedHeap::print_region_counts() { 5723 SortHeapRegionClosure sc(_cur_alloc_region); 5724 PrintHeapRegionClosure cl; 5725 heap_region_iterate(&cl); 5726 heap_region_iterate(&sc); 5727 sc.print(); 5728 print_region_accounting_info(); 5729 }; 5730 5731 bool G1CollectedHeap::regions_accounted_for() { 5732 // TODO: regions accounting for young/survivor/tenured 5733 return true; 5734 } 5735 5736 bool G1CollectedHeap::print_region_accounting_info() { 5737 gclog_or_tty->print_cr("P regions: %d.", G1NumPopularRegions); 5738 gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).", 5739 free_regions(), 5740 count_free_regions(), count_free_regions_list(), 5741 _free_region_list_size, _unclean_region_list.sz()); 5742 gclog_or_tty->print_cr("cur_alloc: %d.", 5743 (_cur_alloc_region == NULL ? 0 : 1)); 5744 gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions); 5745 5746 // TODO: check regions accounting for young/survivor/tenured 5747 return true; 5748 } 5749 5750 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { 5751 HeapRegion* hr = heap_region_containing(p); 5752 if (hr == NULL) { 5753 return is_in_permanent(p); 5754 } else { 5755 return hr->is_in(p); 5756 } 5757 } 5758 #endif // PRODUCT 5759 5760 void G1CollectedHeap::g1_unimplemented() { 5761 // Unimplemented(); 5762 } 5763 5764 5765 // Local Variables: *** 5766 // c-indentation-style: gnu *** 5767 // End: ***