1 /* 2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_g1CollectedHeap.cpp.incl" 27 28 // turn it on so that the contents of the young list (scan-only / 29 // to-be-collected) are printed at "strategic" points before / during 30 // / after the collection --- this is useful for debugging 31 #define SCAN_ONLY_VERBOSE 0 32 // CURRENT STATUS 33 // This file is under construction. Search for "FIXME". 34 35 // INVARIANTS/NOTES 36 // 37 // All allocation activity covered by the G1CollectedHeap interface is 38 // serialized by acquiring the HeapLock. This happens in 39 // mem_allocate_work, which all such allocation functions call. 40 // (Note that this does not apply to TLAB allocation, which is not part 41 // of this interface: it is done by clients of this interface.) 42 43 // Local to this file. 44 45 // Finds the first HeapRegion. 46 // No longer used, but might be handy someday. 47 48 class FindFirstRegionClosure: public HeapRegionClosure { 49 HeapRegion* _a_region; 50 public: 51 FindFirstRegionClosure() : _a_region(NULL) {} 52 bool doHeapRegion(HeapRegion* r) { 53 _a_region = r; 54 return true; 55 } 56 HeapRegion* result() { return _a_region; } 57 }; 58 59 60 class RefineCardTableEntryClosure: public CardTableEntryClosure { 61 SuspendibleThreadSet* _sts; 62 G1RemSet* _g1rs; 63 ConcurrentG1Refine* _cg1r; 64 bool _concurrent; 65 public: 66 RefineCardTableEntryClosure(SuspendibleThreadSet* sts, 67 G1RemSet* g1rs, 68 ConcurrentG1Refine* cg1r) : 69 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) 70 {} 71 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 72 _g1rs->concurrentRefineOneCard(card_ptr, worker_i); 73 if (_concurrent && _sts->should_yield()) { 74 // Caller will actually yield. 75 return false; 76 } 77 // Otherwise, we finished successfully; return true. 78 return true; 79 } 80 void set_concurrent(bool b) { _concurrent = b; } 81 }; 82 83 84 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { 85 int _calls; 86 G1CollectedHeap* _g1h; 87 CardTableModRefBS* _ctbs; 88 int _histo[256]; 89 public: 90 ClearLoggedCardTableEntryClosure() : 91 _calls(0) 92 { 93 _g1h = G1CollectedHeap::heap(); 94 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); 95 for (int i = 0; i < 256; i++) _histo[i] = 0; 96 } 97 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 98 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { 99 _calls++; 100 unsigned char* ujb = (unsigned char*)card_ptr; 101 int ind = (int)(*ujb); 102 _histo[ind]++; 103 *card_ptr = -1; 104 } 105 return true; 106 } 107 int calls() { return _calls; } 108 void print_histo() { 109 gclog_or_tty->print_cr("Card table value histogram:"); 110 for (int i = 0; i < 256; i++) { 111 if (_histo[i] != 0) { 112 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); 113 } 114 } 115 } 116 }; 117 118 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { 119 int _calls; 120 G1CollectedHeap* _g1h; 121 CardTableModRefBS* _ctbs; 122 public: 123 RedirtyLoggedCardTableEntryClosure() : 124 _calls(0) 125 { 126 _g1h = G1CollectedHeap::heap(); 127 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); 128 } 129 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 130 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { 131 _calls++; 132 *card_ptr = 0; 133 } 134 return true; 135 } 136 int calls() { return _calls; } 137 }; 138 139 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { 140 public: 141 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 142 *card_ptr = CardTableModRefBS::dirty_card_val(); 143 return true; 144 } 145 }; 146 147 YoungList::YoungList(G1CollectedHeap* g1h) 148 : _g1h(g1h), _head(NULL), 149 _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL), 150 _length(0), _scan_only_length(0), 151 _last_sampled_rs_lengths(0), 152 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) 153 { 154 guarantee( check_list_empty(false), "just making sure..." ); 155 } 156 157 void YoungList::push_region(HeapRegion *hr) { 158 assert(!hr->is_young(), "should not already be young"); 159 assert(hr->get_next_young_region() == NULL, "cause it should!"); 160 161 hr->set_next_young_region(_head); 162 _head = hr; 163 164 hr->set_young(); 165 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); 166 ++_length; 167 } 168 169 void YoungList::add_survivor_region(HeapRegion* hr) { 170 assert(hr->is_survivor(), "should be flagged as survivor region"); 171 assert(hr->get_next_young_region() == NULL, "cause it should!"); 172 173 hr->set_next_young_region(_survivor_head); 174 if (_survivor_head == NULL) { 175 _survivor_tail = hr; 176 } 177 _survivor_head = hr; 178 179 ++_survivor_length; 180 } 181 182 HeapRegion* YoungList::pop_region() { 183 while (_head != NULL) { 184 assert( length() > 0, "list should not be empty" ); 185 HeapRegion* ret = _head; 186 _head = ret->get_next_young_region(); 187 ret->set_next_young_region(NULL); 188 --_length; 189 assert(ret->is_young(), "region should be very young"); 190 191 // Replace 'Survivor' region type with 'Young'. So the region will 192 // be treated as a young region and will not be 'confused' with 193 // newly created survivor regions. 194 if (ret->is_survivor()) { 195 ret->set_young(); 196 } 197 198 if (!ret->is_scan_only()) { 199 return ret; 200 } 201 202 // scan-only, we'll add it to the scan-only list 203 if (_scan_only_tail == NULL) { 204 guarantee( _scan_only_head == NULL, "invariant" ); 205 206 _scan_only_head = ret; 207 _curr_scan_only = ret; 208 } else { 209 guarantee( _scan_only_head != NULL, "invariant" ); 210 _scan_only_tail->set_next_young_region(ret); 211 } 212 guarantee( ret->get_next_young_region() == NULL, "invariant" ); 213 _scan_only_tail = ret; 214 215 // no need to be tagged as scan-only any more 216 ret->set_young(); 217 218 ++_scan_only_length; 219 } 220 assert( length() == 0, "list should be empty" ); 221 return NULL; 222 } 223 224 void YoungList::empty_list(HeapRegion* list) { 225 while (list != NULL) { 226 HeapRegion* next = list->get_next_young_region(); 227 list->set_next_young_region(NULL); 228 list->uninstall_surv_rate_group(); 229 list->set_not_young(); 230 list = next; 231 } 232 } 233 234 void YoungList::empty_list() { 235 assert(check_list_well_formed(), "young list should be well formed"); 236 237 empty_list(_head); 238 _head = NULL; 239 _length = 0; 240 241 empty_list(_scan_only_head); 242 _scan_only_head = NULL; 243 _scan_only_tail = NULL; 244 _scan_only_length = 0; 245 _curr_scan_only = NULL; 246 247 empty_list(_survivor_head); 248 _survivor_head = NULL; 249 _survivor_tail = NULL; 250 _survivor_length = 0; 251 252 _last_sampled_rs_lengths = 0; 253 254 assert(check_list_empty(false), "just making sure..."); 255 } 256 257 bool YoungList::check_list_well_formed() { 258 bool ret = true; 259 260 size_t length = 0; 261 HeapRegion* curr = _head; 262 HeapRegion* last = NULL; 263 while (curr != NULL) { 264 if (!curr->is_young() || curr->is_scan_only()) { 265 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " 266 "incorrectly tagged (%d, %d)", 267 curr->bottom(), curr->end(), 268 curr->is_young(), curr->is_scan_only()); 269 ret = false; 270 } 271 ++length; 272 last = curr; 273 curr = curr->get_next_young_region(); 274 } 275 ret = ret && (length == _length); 276 277 if (!ret) { 278 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); 279 gclog_or_tty->print_cr("### list has %d entries, _length is %d", 280 length, _length); 281 } 282 283 bool scan_only_ret = true; 284 length = 0; 285 curr = _scan_only_head; 286 last = NULL; 287 while (curr != NULL) { 288 if (!curr->is_young() || curr->is_scan_only()) { 289 gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" " 290 "incorrectly tagged (%d, %d)", 291 curr->bottom(), curr->end(), 292 curr->is_young(), curr->is_scan_only()); 293 scan_only_ret = false; 294 } 295 ++length; 296 last = curr; 297 curr = curr->get_next_young_region(); 298 } 299 scan_only_ret = scan_only_ret && (length == _scan_only_length); 300 301 if ( (last != _scan_only_tail) || 302 (_scan_only_head == NULL && _scan_only_tail != NULL) || 303 (_scan_only_head != NULL && _scan_only_tail == NULL) ) { 304 gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly"); 305 scan_only_ret = false; 306 } 307 308 if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) { 309 gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly"); 310 scan_only_ret = false; 311 } 312 313 if (!scan_only_ret) { 314 gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!"); 315 gclog_or_tty->print_cr("### list has %d entries, _scan_only_length is %d", 316 length, _scan_only_length); 317 } 318 319 return ret && scan_only_ret; 320 } 321 322 bool YoungList::check_list_empty(bool ignore_scan_only_list, 323 bool check_sample) { 324 bool ret = true; 325 326 if (_length != 0) { 327 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", 328 _length); 329 ret = false; 330 } 331 if (check_sample && _last_sampled_rs_lengths != 0) { 332 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); 333 ret = false; 334 } 335 if (_head != NULL) { 336 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); 337 ret = false; 338 } 339 if (!ret) { 340 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); 341 } 342 343 if (ignore_scan_only_list) 344 return ret; 345 346 bool scan_only_ret = true; 347 if (_scan_only_length != 0) { 348 gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d", 349 _scan_only_length); 350 scan_only_ret = false; 351 } 352 if (_scan_only_head != NULL) { 353 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head"); 354 scan_only_ret = false; 355 } 356 if (_scan_only_tail != NULL) { 357 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail"); 358 scan_only_ret = false; 359 } 360 if (!scan_only_ret) { 361 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty"); 362 } 363 364 return ret && scan_only_ret; 365 } 366 367 void 368 YoungList::rs_length_sampling_init() { 369 _sampled_rs_lengths = 0; 370 _curr = _head; 371 } 372 373 bool 374 YoungList::rs_length_sampling_more() { 375 return _curr != NULL; 376 } 377 378 void 379 YoungList::rs_length_sampling_next() { 380 assert( _curr != NULL, "invariant" ); 381 _sampled_rs_lengths += _curr->rem_set()->occupied(); 382 _curr = _curr->get_next_young_region(); 383 if (_curr == NULL) { 384 _last_sampled_rs_lengths = _sampled_rs_lengths; 385 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); 386 } 387 } 388 389 void 390 YoungList::reset_auxilary_lists() { 391 // We could have just "moved" the scan-only list to the young list. 392 // However, the scan-only list is ordered according to the region 393 // age in descending order, so, by moving one entry at a time, we 394 // ensure that it is recreated in ascending order. 395 396 guarantee( is_empty(), "young list should be empty" ); 397 assert(check_list_well_formed(), "young list should be well formed"); 398 399 // Add survivor regions to SurvRateGroup. 400 _g1h->g1_policy()->note_start_adding_survivor_regions(); 401 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); 402 for (HeapRegion* curr = _survivor_head; 403 curr != NULL; 404 curr = curr->get_next_young_region()) { 405 _g1h->g1_policy()->set_region_survivors(curr); 406 } 407 _g1h->g1_policy()->note_stop_adding_survivor_regions(); 408 409 if (_survivor_head != NULL) { 410 _head = _survivor_head; 411 _length = _survivor_length + _scan_only_length; 412 _survivor_tail->set_next_young_region(_scan_only_head); 413 } else { 414 _head = _scan_only_head; 415 _length = _scan_only_length; 416 } 417 418 for (HeapRegion* curr = _scan_only_head; 419 curr != NULL; 420 curr = curr->get_next_young_region()) { 421 curr->recalculate_age_in_surv_rate_group(); 422 } 423 _scan_only_head = NULL; 424 _scan_only_tail = NULL; 425 _scan_only_length = 0; 426 _curr_scan_only = NULL; 427 428 _survivor_head = NULL; 429 _survivor_tail = NULL; 430 _survivor_length = 0; 431 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); 432 433 assert(check_list_well_formed(), "young list should be well formed"); 434 } 435 436 void YoungList::print() { 437 HeapRegion* lists[] = {_head, _scan_only_head, _survivor_head}; 438 const char* names[] = {"YOUNG", "SCAN-ONLY", "SURVIVOR"}; 439 440 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { 441 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); 442 HeapRegion *curr = lists[list]; 443 if (curr == NULL) 444 gclog_or_tty->print_cr(" empty"); 445 while (curr != NULL) { 446 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " 447 "age: %4d, y: %d, s-o: %d, surv: %d", 448 curr->bottom(), curr->end(), 449 curr->top(), 450 curr->prev_top_at_mark_start(), 451 curr->next_top_at_mark_start(), 452 curr->top_at_conc_mark_count(), 453 curr->age_in_surv_rate_group_cond(), 454 curr->is_young(), 455 curr->is_scan_only(), 456 curr->is_survivor()); 457 curr = curr->get_next_young_region(); 458 } 459 } 460 461 gclog_or_tty->print_cr(""); 462 } 463 464 void G1CollectedHeap::stop_conc_gc_threads() { 465 _cg1r->cg1rThread()->stop(); 466 _czft->stop(); 467 _cmThread->stop(); 468 } 469 470 471 void G1CollectedHeap::check_ct_logs_at_safepoint() { 472 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 473 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); 474 475 // Count the dirty cards at the start. 476 CountNonCleanMemRegionClosure count1(this); 477 ct_bs->mod_card_iterate(&count1); 478 int orig_count = count1.n(); 479 480 // First clear the logged cards. 481 ClearLoggedCardTableEntryClosure clear; 482 dcqs.set_closure(&clear); 483 dcqs.apply_closure_to_all_completed_buffers(); 484 dcqs.iterate_closure_all_threads(false); 485 clear.print_histo(); 486 487 // Now ensure that there's no dirty cards. 488 CountNonCleanMemRegionClosure count2(this); 489 ct_bs->mod_card_iterate(&count2); 490 if (count2.n() != 0) { 491 gclog_or_tty->print_cr("Card table has %d entries; %d originally", 492 count2.n(), orig_count); 493 } 494 guarantee(count2.n() == 0, "Card table should be clean."); 495 496 RedirtyLoggedCardTableEntryClosure redirty; 497 JavaThread::dirty_card_queue_set().set_closure(&redirty); 498 dcqs.apply_closure_to_all_completed_buffers(); 499 dcqs.iterate_closure_all_threads(false); 500 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", 501 clear.calls(), orig_count); 502 guarantee(redirty.calls() == clear.calls(), 503 "Or else mechanism is broken."); 504 505 CountNonCleanMemRegionClosure count3(this); 506 ct_bs->mod_card_iterate(&count3); 507 if (count3.n() != orig_count) { 508 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", 509 orig_count, count3.n()); 510 guarantee(count3.n() >= orig_count, "Should have restored them all."); 511 } 512 513 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); 514 } 515 516 // Private class members. 517 518 G1CollectedHeap* G1CollectedHeap::_g1h; 519 520 // Private methods. 521 522 // Finds a HeapRegion that can be used to allocate a given size of block. 523 524 525 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size, 526 bool do_expand, 527 bool zero_filled) { 528 ConcurrentZFThread::note_region_alloc(); 529 HeapRegion* res = alloc_free_region_from_lists(zero_filled); 530 if (res == NULL && do_expand) { 531 expand(word_size * HeapWordSize); 532 res = alloc_free_region_from_lists(zero_filled); 533 assert(res == NULL || 534 (!res->isHumongous() && 535 (!zero_filled || 536 res->zero_fill_state() == HeapRegion::Allocated)), 537 "Alloc Regions must be zero filled (and non-H)"); 538 } 539 if (res != NULL && res->is_empty()) _free_regions--; 540 assert(res == NULL || 541 (!res->isHumongous() && 542 (!zero_filled || 543 res->zero_fill_state() == HeapRegion::Allocated)), 544 "Non-young alloc Regions must be zero filled (and non-H)"); 545 546 if (G1TraceRegions) { 547 if (res != NULL) { 548 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " 549 "top "PTR_FORMAT, 550 res->hrs_index(), res->bottom(), res->end(), res->top()); 551 } 552 } 553 554 return res; 555 } 556 557 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose, 558 size_t word_size, 559 bool zero_filled) { 560 HeapRegion* alloc_region = NULL; 561 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { 562 alloc_region = newAllocRegion_work(word_size, true, zero_filled); 563 if (purpose == GCAllocForSurvived && alloc_region != NULL) { 564 alloc_region->set_survivor(); 565 } 566 ++_gc_alloc_region_counts[purpose]; 567 } else { 568 g1_policy()->note_alloc_region_limit_reached(purpose); 569 } 570 return alloc_region; 571 } 572 573 // If could fit into free regions w/o expansion, try. 574 // Otherwise, if can expand, do so. 575 // Otherwise, if using ex regions might help, try with ex given back. 576 HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) { 577 assert(regions_accounted_for(), "Region leakage!"); 578 579 // We can't allocate H regions while cleanupComplete is running, since 580 // some of the regions we find to be empty might not yet be added to the 581 // unclean list. (If we're already at a safepoint, this call is 582 // unnecessary, not to mention wrong.) 583 if (!SafepointSynchronize::is_at_safepoint()) 584 wait_for_cleanup_complete(); 585 586 size_t num_regions = 587 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; 588 589 // Special case if < one region??? 590 591 // Remember the ft size. 592 size_t x_size = expansion_regions(); 593 594 HeapWord* res = NULL; 595 bool eliminated_allocated_from_lists = false; 596 597 // Can the allocation potentially fit in the free regions? 598 if (free_regions() >= num_regions) { 599 res = _hrs->obj_allocate(word_size); 600 } 601 if (res == NULL) { 602 // Try expansion. 603 size_t fs = _hrs->free_suffix(); 604 if (fs + x_size >= num_regions) { 605 expand((num_regions - fs) * HeapRegion::GrainBytes); 606 res = _hrs->obj_allocate(word_size); 607 assert(res != NULL, "This should have worked."); 608 } else { 609 // Expansion won't help. Are there enough free regions if we get rid 610 // of reservations? 611 size_t avail = free_regions(); 612 if (avail >= num_regions) { 613 res = _hrs->obj_allocate(word_size); 614 if (res != NULL) { 615 remove_allocated_regions_from_lists(); 616 eliminated_allocated_from_lists = true; 617 } 618 } 619 } 620 } 621 if (res != NULL) { 622 // Increment by the number of regions allocated. 623 // FIXME: Assumes regions all of size GrainBytes. 624 #ifndef PRODUCT 625 mr_bs()->verify_clean_region(MemRegion(res, res + num_regions * 626 HeapRegion::GrainWords)); 627 #endif 628 if (!eliminated_allocated_from_lists) 629 remove_allocated_regions_from_lists(); 630 _summary_bytes_used += word_size * HeapWordSize; 631 _free_regions -= num_regions; 632 _num_humongous_regions += (int) num_regions; 633 } 634 assert(regions_accounted_for(), "Region Leakage"); 635 return res; 636 } 637 638 HeapWord* 639 G1CollectedHeap::attempt_allocation_slow(size_t word_size, 640 bool permit_collection_pause) { 641 HeapWord* res = NULL; 642 HeapRegion* allocated_young_region = NULL; 643 644 assert( SafepointSynchronize::is_at_safepoint() || 645 Heap_lock->owned_by_self(), "pre condition of the call" ); 646 647 if (isHumongous(word_size)) { 648 // Allocation of a humongous object can, in a sense, complete a 649 // partial region, if the previous alloc was also humongous, and 650 // caused the test below to succeed. 651 if (permit_collection_pause) 652 do_collection_pause_if_appropriate(word_size); 653 res = humongousObjAllocate(word_size); 654 assert(_cur_alloc_region == NULL 655 || !_cur_alloc_region->isHumongous(), 656 "Prevent a regression of this bug."); 657 658 } else { 659 // We may have concurrent cleanup working at the time. Wait for it 660 // to complete. In the future we would probably want to make the 661 // concurrent cleanup truly concurrent by decoupling it from the 662 // allocation. 663 if (!SafepointSynchronize::is_at_safepoint()) 664 wait_for_cleanup_complete(); 665 // If we do a collection pause, this will be reset to a non-NULL 666 // value. If we don't, nulling here ensures that we allocate a new 667 // region below. 668 if (_cur_alloc_region != NULL) { 669 // We're finished with the _cur_alloc_region. 670 _summary_bytes_used += _cur_alloc_region->used(); 671 _cur_alloc_region = NULL; 672 } 673 assert(_cur_alloc_region == NULL, "Invariant."); 674 // Completion of a heap region is perhaps a good point at which to do 675 // a collection pause. 676 if (permit_collection_pause) 677 do_collection_pause_if_appropriate(word_size); 678 // Make sure we have an allocation region available. 679 if (_cur_alloc_region == NULL) { 680 if (!SafepointSynchronize::is_at_safepoint()) 681 wait_for_cleanup_complete(); 682 bool next_is_young = should_set_young_locked(); 683 // If the next region is not young, make sure it's zero-filled. 684 _cur_alloc_region = newAllocRegion(word_size, !next_is_young); 685 if (_cur_alloc_region != NULL) { 686 _summary_bytes_used -= _cur_alloc_region->used(); 687 if (next_is_young) { 688 set_region_short_lived_locked(_cur_alloc_region); 689 allocated_young_region = _cur_alloc_region; 690 } 691 } 692 } 693 assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(), 694 "Prevent a regression of this bug."); 695 696 // Now retry the allocation. 697 if (_cur_alloc_region != NULL) { 698 res = _cur_alloc_region->allocate(word_size); 699 } 700 } 701 702 // NOTE: fails frequently in PRT 703 assert(regions_accounted_for(), "Region leakage!"); 704 705 if (res != NULL) { 706 if (!SafepointSynchronize::is_at_safepoint()) { 707 assert( permit_collection_pause, "invariant" ); 708 assert( Heap_lock->owned_by_self(), "invariant" ); 709 Heap_lock->unlock(); 710 } 711 712 if (allocated_young_region != NULL) { 713 HeapRegion* hr = allocated_young_region; 714 HeapWord* bottom = hr->bottom(); 715 HeapWord* end = hr->end(); 716 MemRegion mr(bottom, end); 717 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr); 718 } 719 } 720 721 assert( SafepointSynchronize::is_at_safepoint() || 722 (res == NULL && Heap_lock->owned_by_self()) || 723 (res != NULL && !Heap_lock->owned_by_self()), 724 "post condition of the call" ); 725 726 return res; 727 } 728 729 HeapWord* 730 G1CollectedHeap::mem_allocate(size_t word_size, 731 bool is_noref, 732 bool is_tlab, 733 bool* gc_overhead_limit_was_exceeded) { 734 debug_only(check_for_valid_allocation_state()); 735 assert(no_gc_in_progress(), "Allocation during gc not allowed"); 736 HeapWord* result = NULL; 737 738 // Loop until the allocation is satisified, 739 // or unsatisfied after GC. 740 for (int try_count = 1; /* return or throw */; try_count += 1) { 741 int gc_count_before; 742 { 743 Heap_lock->lock(); 744 result = attempt_allocation(word_size); 745 if (result != NULL) { 746 // attempt_allocation should have unlocked the heap lock 747 assert(is_in(result), "result not in heap"); 748 return result; 749 } 750 // Read the gc count while the heap lock is held. 751 gc_count_before = SharedHeap::heap()->total_collections(); 752 Heap_lock->unlock(); 753 } 754 755 // Create the garbage collection operation... 756 VM_G1CollectForAllocation op(word_size, 757 gc_count_before); 758 759 // ...and get the VM thread to execute it. 760 VMThread::execute(&op); 761 if (op.prologue_succeeded()) { 762 result = op.result(); 763 assert(result == NULL || is_in(result), "result not in heap"); 764 return result; 765 } 766 767 // Give a warning if we seem to be looping forever. 768 if ((QueuedAllocationWarningCount > 0) && 769 (try_count % QueuedAllocationWarningCount == 0)) { 770 warning("G1CollectedHeap::mem_allocate_work retries %d times", 771 try_count); 772 } 773 } 774 } 775 776 void G1CollectedHeap::abandon_cur_alloc_region() { 777 if (_cur_alloc_region != NULL) { 778 // We're finished with the _cur_alloc_region. 779 if (_cur_alloc_region->is_empty()) { 780 _free_regions++; 781 free_region(_cur_alloc_region); 782 } else { 783 _summary_bytes_used += _cur_alloc_region->used(); 784 } 785 _cur_alloc_region = NULL; 786 } 787 } 788 789 void G1CollectedHeap::abandon_gc_alloc_regions() { 790 // first, make sure that the GC alloc region list is empty (it should!) 791 assert(_gc_alloc_region_list == NULL, "invariant"); 792 release_gc_alloc_regions(true /* totally */); 793 } 794 795 class PostMCRemSetClearClosure: public HeapRegionClosure { 796 ModRefBarrierSet* _mr_bs; 797 public: 798 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} 799 bool doHeapRegion(HeapRegion* r) { 800 r->reset_gc_time_stamp(); 801 if (r->continuesHumongous()) 802 return false; 803 HeapRegionRemSet* hrrs = r->rem_set(); 804 if (hrrs != NULL) hrrs->clear(); 805 // You might think here that we could clear just the cards 806 // corresponding to the used region. But no: if we leave a dirty card 807 // in a region we might allocate into, then it would prevent that card 808 // from being enqueued, and cause it to be missed. 809 // Re: the performance cost: we shouldn't be doing full GC anyway! 810 _mr_bs->clear(MemRegion(r->bottom(), r->end())); 811 return false; 812 } 813 }; 814 815 816 class PostMCRemSetInvalidateClosure: public HeapRegionClosure { 817 ModRefBarrierSet* _mr_bs; 818 public: 819 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} 820 bool doHeapRegion(HeapRegion* r) { 821 if (r->continuesHumongous()) return false; 822 if (r->used_region().word_size() != 0) { 823 _mr_bs->invalidate(r->used_region(), true /*whole heap*/); 824 } 825 return false; 826 } 827 }; 828 829 class RebuildRSOutOfRegionClosure: public HeapRegionClosure { 830 G1CollectedHeap* _g1h; 831 UpdateRSOopClosure _cl; 832 int _worker_i; 833 public: 834 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : 835 _cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i), 836 _worker_i(worker_i), 837 _g1h(g1) 838 { } 839 bool doHeapRegion(HeapRegion* r) { 840 if (!r->continuesHumongous()) { 841 _cl.set_from(r); 842 r->oop_iterate(&_cl); 843 } 844 return false; 845 } 846 }; 847 848 class ParRebuildRSTask: public AbstractGangTask { 849 G1CollectedHeap* _g1; 850 public: 851 ParRebuildRSTask(G1CollectedHeap* g1) 852 : AbstractGangTask("ParRebuildRSTask"), 853 _g1(g1) 854 { } 855 856 void work(int i) { 857 RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); 858 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, 859 HeapRegion::RebuildRSClaimValue); 860 } 861 }; 862 863 void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs, 864 size_t word_size) { 865 ResourceMark rm; 866 867 if (full && DisableExplicitGC) { 868 gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n"); 869 return; 870 } 871 872 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 873 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); 874 875 if (GC_locker::is_active()) { 876 return; // GC is disabled (e.g. JNI GetXXXCritical operation) 877 } 878 879 { 880 IsGCActiveMark x; 881 882 // Timing 883 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 884 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 885 TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty); 886 887 double start = os::elapsedTime(); 888 GCOverheadReporter::recordSTWStart(start); 889 g1_policy()->record_full_collection_start(); 890 891 gc_prologue(true); 892 increment_total_collections(); 893 894 size_t g1h_prev_used = used(); 895 assert(used() == recalculate_used(), "Should be equal"); 896 897 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { 898 HandleMark hm; // Discard invalid handles created during verification 899 prepare_for_verify(); 900 gclog_or_tty->print(" VerifyBeforeGC:"); 901 Universe::verify(true); 902 } 903 assert(regions_accounted_for(), "Region leakage!"); 904 905 COMPILER2_PRESENT(DerivedPointerTable::clear()); 906 907 // We want to discover references, but not process them yet. 908 // This mode is disabled in 909 // instanceRefKlass::process_discovered_references if the 910 // generation does some collection work, or 911 // instanceRefKlass::enqueue_discovered_references if the 912 // generation returns without doing any work. 913 ref_processor()->disable_discovery(); 914 ref_processor()->abandon_partial_discovery(); 915 ref_processor()->verify_no_references_recorded(); 916 917 // Abandon current iterations of concurrent marking and concurrent 918 // refinement, if any are in progress. 919 concurrent_mark()->abort(); 920 921 // Make sure we'll choose a new allocation region afterwards. 922 abandon_cur_alloc_region(); 923 abandon_gc_alloc_regions(); 924 assert(_cur_alloc_region == NULL, "Invariant."); 925 g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS(); 926 tear_down_region_lists(); 927 set_used_regions_to_need_zero_fill(); 928 if (g1_policy()->in_young_gc_mode()) { 929 empty_young_list(); 930 g1_policy()->set_full_young_gcs(true); 931 } 932 933 // Temporarily make reference _discovery_ single threaded (non-MT). 934 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false); 935 936 // Temporarily make refs discovery atomic 937 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); 938 939 // Temporarily clear _is_alive_non_header 940 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); 941 942 ref_processor()->enable_discovery(); 943 ref_processor()->setup_policy(clear_all_soft_refs); 944 945 // Do collection work 946 { 947 HandleMark hm; // Discard invalid handles created during gc 948 G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs); 949 } 950 // Because freeing humongous regions may have added some unclean 951 // regions, it is necessary to tear down again before rebuilding. 952 tear_down_region_lists(); 953 rebuild_region_lists(); 954 955 _summary_bytes_used = recalculate_used(); 956 957 ref_processor()->enqueue_discovered_references(); 958 959 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 960 961 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { 962 HandleMark hm; // Discard invalid handles created during verification 963 gclog_or_tty->print(" VerifyAfterGC:"); 964 Universe::verify(false); 965 } 966 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); 967 968 reset_gc_time_stamp(); 969 // Since everything potentially moved, we will clear all remembered 970 // sets, and clear all cards. Later we will rebuild remebered 971 // sets. We will also reset the GC time stamps of the regions. 972 PostMCRemSetClearClosure rs_clear(mr_bs()); 973 heap_region_iterate(&rs_clear); 974 975 // Resize the heap if necessary. 976 resize_if_necessary_after_full_collection(full ? 0 : word_size); 977 978 if (_cg1r->use_cache()) { 979 _cg1r->clear_and_record_card_counts(); 980 _cg1r->clear_hot_cache(); 981 } 982 983 // Rebuild remembered sets of all regions. 984 if (ParallelGCThreads > 0) { 985 ParRebuildRSTask rebuild_rs_task(this); 986 assert(check_heap_region_claim_values( 987 HeapRegion::InitialClaimValue), "sanity check"); 988 set_par_threads(workers()->total_workers()); 989 workers()->run_task(&rebuild_rs_task); 990 set_par_threads(0); 991 assert(check_heap_region_claim_values( 992 HeapRegion::RebuildRSClaimValue), "sanity check"); 993 reset_heap_region_claim_values(); 994 } else { 995 RebuildRSOutOfRegionClosure rebuild_rs(this); 996 heap_region_iterate(&rebuild_rs); 997 } 998 999 if (PrintGC) { 1000 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); 1001 } 1002 1003 if (true) { // FIXME 1004 // Ask the permanent generation to adjust size for full collections 1005 perm()->compute_new_size(); 1006 } 1007 1008 double end = os::elapsedTime(); 1009 GCOverheadReporter::recordSTWEnd(end); 1010 g1_policy()->record_full_collection_end(); 1011 1012 #ifdef TRACESPINNING 1013 ParallelTaskTerminator::print_termination_counts(); 1014 #endif 1015 1016 gc_epilogue(true); 1017 1018 // Abandon concurrent refinement. This must happen last: in the 1019 // dirty-card logging system, some cards may be dirty by weak-ref 1020 // processing, and may be enqueued. But the whole card table is 1021 // dirtied, so this should abandon those logs, and set "do_traversal" 1022 // to true. 1023 concurrent_g1_refine()->set_pya_restart(); 1024 assert(!G1DeferredRSUpdate 1025 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); 1026 assert(regions_accounted_for(), "Region leakage!"); 1027 } 1028 1029 if (g1_policy()->in_young_gc_mode()) { 1030 _young_list->reset_sampled_info(); 1031 assert( check_young_list_empty(false, false), 1032 "young list should be empty at this point"); 1033 } 1034 } 1035 1036 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { 1037 do_collection(true, clear_all_soft_refs, 0); 1038 } 1039 1040 // This code is mostly copied from TenuredGeneration. 1041 void 1042 G1CollectedHeap:: 1043 resize_if_necessary_after_full_collection(size_t word_size) { 1044 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); 1045 1046 // Include the current allocation, if any, and bytes that will be 1047 // pre-allocated to support collections, as "used". 1048 const size_t used_after_gc = used(); 1049 const size_t capacity_after_gc = capacity(); 1050 const size_t free_after_gc = capacity_after_gc - used_after_gc; 1051 1052 // We don't have floating point command-line arguments 1053 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100; 1054 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 1055 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100; 1056 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 1057 1058 size_t minimum_desired_capacity = (size_t) (used_after_gc / maximum_used_percentage); 1059 size_t maximum_desired_capacity = (size_t) (used_after_gc / minimum_used_percentage); 1060 1061 // Don't shrink less than the initial size. 1062 minimum_desired_capacity = 1063 MAX2(minimum_desired_capacity, 1064 collector_policy()->initial_heap_byte_size()); 1065 maximum_desired_capacity = 1066 MAX2(maximum_desired_capacity, 1067 collector_policy()->initial_heap_byte_size()); 1068 1069 // We are failing here because minimum_desired_capacity is 1070 assert(used_after_gc <= minimum_desired_capacity, "sanity check"); 1071 assert(minimum_desired_capacity <= maximum_desired_capacity, "sanity check"); 1072 1073 if (PrintGC && Verbose) { 1074 const double free_percentage = ((double)free_after_gc) / capacity(); 1075 gclog_or_tty->print_cr("Computing new size after full GC "); 1076 gclog_or_tty->print_cr(" " 1077 " minimum_free_percentage: %6.2f", 1078 minimum_free_percentage); 1079 gclog_or_tty->print_cr(" " 1080 " maximum_free_percentage: %6.2f", 1081 maximum_free_percentage); 1082 gclog_or_tty->print_cr(" " 1083 " capacity: %6.1fK" 1084 " minimum_desired_capacity: %6.1fK" 1085 " maximum_desired_capacity: %6.1fK", 1086 capacity() / (double) K, 1087 minimum_desired_capacity / (double) K, 1088 maximum_desired_capacity / (double) K); 1089 gclog_or_tty->print_cr(" " 1090 " free_after_gc : %6.1fK" 1091 " used_after_gc : %6.1fK", 1092 free_after_gc / (double) K, 1093 used_after_gc / (double) K); 1094 gclog_or_tty->print_cr(" " 1095 " free_percentage: %6.2f", 1096 free_percentage); 1097 } 1098 if (capacity() < minimum_desired_capacity) { 1099 // Don't expand unless it's significant 1100 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; 1101 expand(expand_bytes); 1102 if (PrintGC && Verbose) { 1103 gclog_or_tty->print_cr(" expanding:" 1104 " minimum_desired_capacity: %6.1fK" 1105 " expand_bytes: %6.1fK", 1106 minimum_desired_capacity / (double) K, 1107 expand_bytes / (double) K); 1108 } 1109 1110 // No expansion, now see if we want to shrink 1111 } else if (capacity() > maximum_desired_capacity) { 1112 // Capacity too large, compute shrinking size 1113 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; 1114 shrink(shrink_bytes); 1115 if (PrintGC && Verbose) { 1116 gclog_or_tty->print_cr(" " 1117 " shrinking:" 1118 " initSize: %.1fK" 1119 " maximum_desired_capacity: %.1fK", 1120 collector_policy()->initial_heap_byte_size() / (double) K, 1121 maximum_desired_capacity / (double) K); 1122 gclog_or_tty->print_cr(" " 1123 " shrink_bytes: %.1fK", 1124 shrink_bytes / (double) K); 1125 } 1126 } 1127 } 1128 1129 1130 HeapWord* 1131 G1CollectedHeap::satisfy_failed_allocation(size_t word_size) { 1132 HeapWord* result = NULL; 1133 1134 // In a G1 heap, we're supposed to keep allocation from failing by 1135 // incremental pauses. Therefore, at least for now, we'll favor 1136 // expansion over collection. (This might change in the future if we can 1137 // do something smarter than full collection to satisfy a failed alloc.) 1138 1139 result = expand_and_allocate(word_size); 1140 if (result != NULL) { 1141 assert(is_in(result), "result not in heap"); 1142 return result; 1143 } 1144 1145 // OK, I guess we have to try collection. 1146 1147 do_collection(false, false, word_size); 1148 1149 result = attempt_allocation(word_size, /*permit_collection_pause*/false); 1150 1151 if (result != NULL) { 1152 assert(is_in(result), "result not in heap"); 1153 return result; 1154 } 1155 1156 // Try collecting soft references. 1157 do_collection(false, true, word_size); 1158 result = attempt_allocation(word_size, /*permit_collection_pause*/false); 1159 if (result != NULL) { 1160 assert(is_in(result), "result not in heap"); 1161 return result; 1162 } 1163 1164 // What else? We might try synchronous finalization later. If the total 1165 // space available is large enough for the allocation, then a more 1166 // complete compaction phase than we've tried so far might be 1167 // appropriate. 1168 return NULL; 1169 } 1170 1171 // Attempting to expand the heap sufficiently 1172 // to support an allocation of the given "word_size". If 1173 // successful, perform the allocation and return the address of the 1174 // allocated block, or else "NULL". 1175 1176 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { 1177 size_t expand_bytes = word_size * HeapWordSize; 1178 if (expand_bytes < MinHeapDeltaBytes) { 1179 expand_bytes = MinHeapDeltaBytes; 1180 } 1181 expand(expand_bytes); 1182 assert(regions_accounted_for(), "Region leakage!"); 1183 HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */); 1184 return result; 1185 } 1186 1187 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) { 1188 size_t pre_used = 0; 1189 size_t cleared_h_regions = 0; 1190 size_t freed_regions = 0; 1191 UncleanRegionList local_list; 1192 free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions, 1193 freed_regions, &local_list); 1194 1195 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, 1196 &local_list); 1197 return pre_used; 1198 } 1199 1200 void 1201 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr, 1202 size_t& pre_used, 1203 size_t& cleared_h, 1204 size_t& freed_regions, 1205 UncleanRegionList* list, 1206 bool par) { 1207 assert(!hr->continuesHumongous(), "should have filtered these out"); 1208 size_t res = 0; 1209 if (!hr->popular() && hr->used() > 0 && hr->garbage_bytes() == hr->used()) { 1210 if (!hr->is_young()) { 1211 if (G1PolicyVerbose > 0) 1212 gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)" 1213 " during cleanup", hr, hr->used()); 1214 free_region_work(hr, pre_used, cleared_h, freed_regions, list, par); 1215 } 1216 } 1217 } 1218 1219 // FIXME: both this and shrink could probably be more efficient by 1220 // doing one "VirtualSpace::expand_by" call rather than several. 1221 void G1CollectedHeap::expand(size_t expand_bytes) { 1222 size_t old_mem_size = _g1_storage.committed_size(); 1223 // We expand by a minimum of 1K. 1224 expand_bytes = MAX2(expand_bytes, (size_t)K); 1225 size_t aligned_expand_bytes = 1226 ReservedSpace::page_align_size_up(expand_bytes); 1227 aligned_expand_bytes = align_size_up(aligned_expand_bytes, 1228 HeapRegion::GrainBytes); 1229 expand_bytes = aligned_expand_bytes; 1230 while (expand_bytes > 0) { 1231 HeapWord* base = (HeapWord*)_g1_storage.high(); 1232 // Commit more storage. 1233 bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes); 1234 if (!successful) { 1235 expand_bytes = 0; 1236 } else { 1237 expand_bytes -= HeapRegion::GrainBytes; 1238 // Expand the committed region. 1239 HeapWord* high = (HeapWord*) _g1_storage.high(); 1240 _g1_committed.set_end(high); 1241 // Create a new HeapRegion. 1242 MemRegion mr(base, high); 1243 bool is_zeroed = !_g1_max_committed.contains(base); 1244 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); 1245 1246 // Now update max_committed if necessary. 1247 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high)); 1248 1249 // Add it to the HeapRegionSeq. 1250 _hrs->insert(hr); 1251 // Set the zero-fill state, according to whether it's already 1252 // zeroed. 1253 { 1254 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 1255 if (is_zeroed) { 1256 hr->set_zero_fill_complete(); 1257 put_free_region_on_list_locked(hr); 1258 } else { 1259 hr->set_zero_fill_needed(); 1260 put_region_on_unclean_list_locked(hr); 1261 } 1262 } 1263 _free_regions++; 1264 // And we used up an expansion region to create it. 1265 _expansion_regions--; 1266 // Tell the cardtable about it. 1267 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); 1268 // And the offset table as well. 1269 _bot_shared->resize(_g1_committed.word_size()); 1270 } 1271 } 1272 if (Verbose && PrintGC) { 1273 size_t new_mem_size = _g1_storage.committed_size(); 1274 gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK", 1275 old_mem_size/K, aligned_expand_bytes/K, 1276 new_mem_size/K); 1277 } 1278 } 1279 1280 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) 1281 { 1282 size_t old_mem_size = _g1_storage.committed_size(); 1283 size_t aligned_shrink_bytes = 1284 ReservedSpace::page_align_size_down(shrink_bytes); 1285 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, 1286 HeapRegion::GrainBytes); 1287 size_t num_regions_deleted = 0; 1288 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); 1289 1290 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); 1291 if (mr.byte_size() > 0) 1292 _g1_storage.shrink_by(mr.byte_size()); 1293 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); 1294 1295 _g1_committed.set_end(mr.start()); 1296 _free_regions -= num_regions_deleted; 1297 _expansion_regions += num_regions_deleted; 1298 1299 // Tell the cardtable about it. 1300 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); 1301 1302 // And the offset table as well. 1303 _bot_shared->resize(_g1_committed.word_size()); 1304 1305 HeapRegionRemSet::shrink_heap(n_regions()); 1306 1307 if (Verbose && PrintGC) { 1308 size_t new_mem_size = _g1_storage.committed_size(); 1309 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", 1310 old_mem_size/K, aligned_shrink_bytes/K, 1311 new_mem_size/K); 1312 } 1313 } 1314 1315 void G1CollectedHeap::shrink(size_t shrink_bytes) { 1316 release_gc_alloc_regions(true /* totally */); 1317 tear_down_region_lists(); // We will rebuild them in a moment. 1318 shrink_helper(shrink_bytes); 1319 rebuild_region_lists(); 1320 } 1321 1322 // Public methods. 1323 1324 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 1325 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 1326 #endif // _MSC_VER 1327 1328 1329 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : 1330 SharedHeap(policy_), 1331 _g1_policy(policy_), 1332 _ref_processor(NULL), 1333 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), 1334 _bot_shared(NULL), 1335 _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"), 1336 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), 1337 _evac_failure_scan_stack(NULL) , 1338 _mark_in_progress(false), 1339 _cg1r(NULL), _czft(NULL), _summary_bytes_used(0), 1340 _cur_alloc_region(NULL), 1341 _refine_cte_cl(NULL), 1342 _free_region_list(NULL), _free_region_list_size(0), 1343 _free_regions(0), 1344 _popular_object_boundary(NULL), 1345 _cur_pop_hr_index(0), 1346 _popular_regions_to_be_evacuated(NULL), 1347 _pop_obj_rc_at_copy(), 1348 _full_collection(false), 1349 _unclean_region_list(), 1350 _unclean_regions_coming(false), 1351 _young_list(new YoungList(this)), 1352 _gc_time_stamp(0), 1353 _surviving_young_words(NULL), 1354 _in_cset_fast_test(NULL), 1355 _in_cset_fast_test_base(NULL) { 1356 _g1h = this; // To catch bugs. 1357 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { 1358 vm_exit_during_initialization("Failed necessary allocation."); 1359 } 1360 int n_queues = MAX2((int)ParallelGCThreads, 1); 1361 _task_queues = new RefToScanQueueSet(n_queues); 1362 1363 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); 1364 assert(n_rem_sets > 0, "Invariant."); 1365 1366 HeapRegionRemSetIterator** iter_arr = 1367 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); 1368 for (int i = 0; i < n_queues; i++) { 1369 iter_arr[i] = new HeapRegionRemSetIterator(); 1370 } 1371 _rem_set_iterator = iter_arr; 1372 1373 for (int i = 0; i < n_queues; i++) { 1374 RefToScanQueue* q = new RefToScanQueue(); 1375 q->initialize(); 1376 _task_queues->register_queue(i, q); 1377 } 1378 1379 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 1380 _gc_alloc_regions[ap] = NULL; 1381 _gc_alloc_region_counts[ap] = 0; 1382 _retained_gc_alloc_regions[ap] = NULL; 1383 // by default, we do not retain a GC alloc region for each ap; 1384 // we'll override this, when appropriate, below 1385 _retain_gc_alloc_region[ap] = false; 1386 } 1387 1388 // We will try to remember the last half-full tenured region we 1389 // allocated to at the end of a collection so that we can re-use it 1390 // during the next collection. 1391 _retain_gc_alloc_region[GCAllocForTenured] = true; 1392 1393 guarantee(_task_queues != NULL, "task_queues allocation failure."); 1394 } 1395 1396 jint G1CollectedHeap::initialize() { 1397 os::enable_vtime(); 1398 1399 // Necessary to satisfy locking discipline assertions. 1400 1401 MutexLocker x(Heap_lock); 1402 1403 // While there are no constraints in the GC code that HeapWordSize 1404 // be any particular value, there are multiple other areas in the 1405 // system which believe this to be true (e.g. oop->object_size in some 1406 // cases incorrectly returns the size in wordSize units rather than 1407 // HeapWordSize). 1408 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); 1409 1410 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); 1411 size_t max_byte_size = collector_policy()->max_heap_byte_size(); 1412 1413 // Ensure that the sizes are properly aligned. 1414 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); 1415 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); 1416 1417 // We allocate this in any case, but only do no work if the command line 1418 // param is off. 1419 _cg1r = new ConcurrentG1Refine(); 1420 1421 // Reserve the maximum. 1422 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); 1423 // Includes the perm-gen. 1424 ReservedSpace heap_rs(max_byte_size + pgs->max_size(), 1425 HeapRegion::GrainBytes, 1426 false /*ism*/); 1427 1428 if (!heap_rs.is_reserved()) { 1429 vm_exit_during_initialization("Could not reserve enough space for object heap"); 1430 return JNI_ENOMEM; 1431 } 1432 1433 // It is important to do this in a way such that concurrent readers can't 1434 // temporarily think somethings in the heap. (I've actually seen this 1435 // happen in asserts: DLD.) 1436 _reserved.set_word_size(0); 1437 _reserved.set_start((HeapWord*)heap_rs.base()); 1438 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); 1439 1440 _expansion_regions = max_byte_size/HeapRegion::GrainBytes; 1441 1442 _num_humongous_regions = 0; 1443 1444 // Create the gen rem set (and barrier set) for the entire reserved region. 1445 _rem_set = collector_policy()->create_rem_set(_reserved, 2); 1446 set_barrier_set(rem_set()->bs()); 1447 if (barrier_set()->is_a(BarrierSet::ModRef)) { 1448 _mr_bs = (ModRefBarrierSet*)_barrier_set; 1449 } else { 1450 vm_exit_during_initialization("G1 requires a mod ref bs."); 1451 return JNI_ENOMEM; 1452 } 1453 1454 // Also create a G1 rem set. 1455 if (G1UseHRIntoRS) { 1456 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { 1457 _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs()); 1458 } else { 1459 vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); 1460 return JNI_ENOMEM; 1461 } 1462 } else { 1463 _g1_rem_set = new StupidG1RemSet(this); 1464 } 1465 1466 // Carve out the G1 part of the heap. 1467 1468 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); 1469 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), 1470 g1_rs.size()/HeapWordSize); 1471 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); 1472 1473 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); 1474 1475 _g1_storage.initialize(g1_rs, 0); 1476 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); 1477 _g1_max_committed = _g1_committed; 1478 _hrs = new HeapRegionSeq(_expansion_regions); 1479 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); 1480 guarantee(_cur_alloc_region == NULL, "from constructor"); 1481 1482 _bot_shared = new G1BlockOffsetSharedArray(_reserved, 1483 heap_word_size(init_byte_size)); 1484 1485 _g1h = this; 1486 1487 // Create the ConcurrentMark data structure and thread. 1488 // (Must do this late, so that "max_regions" is defined.) 1489 _cm = new ConcurrentMark(heap_rs, (int) max_regions()); 1490 _cmThread = _cm->cmThread(); 1491 1492 // ...and the concurrent zero-fill thread, if necessary. 1493 if (G1ConcZeroFill) { 1494 _czft = new ConcurrentZFThread(); 1495 } 1496 1497 1498 1499 // Allocate the popular regions; take them off free lists. 1500 size_t pop_byte_size = G1NumPopularRegions * HeapRegion::GrainBytes; 1501 expand(pop_byte_size); 1502 _popular_object_boundary = 1503 _g1_reserved.start() + (G1NumPopularRegions * HeapRegion::GrainWords); 1504 for (int i = 0; i < G1NumPopularRegions; i++) { 1505 HeapRegion* hr = newAllocRegion(HeapRegion::GrainWords); 1506 // assert(hr != NULL && hr->bottom() < _popular_object_boundary, 1507 // "Should be enough, and all should be below boundary."); 1508 hr->set_popular(true); 1509 } 1510 assert(_cur_pop_hr_index == 0, "Start allocating at the first region."); 1511 1512 // Initialize the from_card cache structure of HeapRegionRemSet. 1513 HeapRegionRemSet::init_heap(max_regions()); 1514 1515 // Now expand into the rest of the initial heap size. 1516 expand(init_byte_size - pop_byte_size); 1517 1518 // Perform any initialization actions delegated to the policy. 1519 g1_policy()->init(); 1520 1521 g1_policy()->note_start_of_mark_thread(); 1522 1523 _refine_cte_cl = 1524 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), 1525 g1_rem_set(), 1526 concurrent_g1_refine()); 1527 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); 1528 1529 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, 1530 SATB_Q_FL_lock, 1531 0, 1532 Shared_SATB_Q_lock); 1533 if (G1RSBarrierUseQueue) { 1534 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, 1535 DirtyCardQ_FL_lock, 1536 G1DirtyCardQueueMax, 1537 Shared_DirtyCardQ_lock); 1538 } 1539 if (G1DeferredRSUpdate) { 1540 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, 1541 DirtyCardQ_FL_lock, 1542 0, 1543 Shared_DirtyCardQ_lock, 1544 &JavaThread::dirty_card_queue_set()); 1545 } 1546 // In case we're keeping closure specialization stats, initialize those 1547 // counts and that mechanism. 1548 SpecializationStats::clear(); 1549 1550 _gc_alloc_region_list = NULL; 1551 1552 // Do later initialization work for concurrent refinement. 1553 _cg1r->init(); 1554 1555 const char* group_names[] = { "CR", "ZF", "CM", "CL" }; 1556 GCOverheadReporter::initGCOverheadReporter(4, group_names); 1557 1558 return JNI_OK; 1559 } 1560 1561 void G1CollectedHeap::ref_processing_init() { 1562 SharedHeap::ref_processing_init(); 1563 MemRegion mr = reserved_region(); 1564 _ref_processor = ReferenceProcessor::create_ref_processor( 1565 mr, // span 1566 false, // Reference discovery is not atomic 1567 // (though it shouldn't matter here.) 1568 true, // mt_discovery 1569 NULL, // is alive closure: need to fill this in for efficiency 1570 ParallelGCThreads, 1571 ParallelRefProcEnabled, 1572 true); // Setting next fields of discovered 1573 // lists requires a barrier. 1574 } 1575 1576 size_t G1CollectedHeap::capacity() const { 1577 return _g1_committed.byte_size(); 1578 } 1579 1580 void G1CollectedHeap::iterate_dirty_card_closure(bool concurrent, 1581 int worker_i) { 1582 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1583 int n_completed_buffers = 0; 1584 while (dcqs.apply_closure_to_completed_buffer(worker_i, 0, true)) { 1585 n_completed_buffers++; 1586 } 1587 g1_policy()->record_update_rs_processed_buffers(worker_i, 1588 (double) n_completed_buffers); 1589 dcqs.clear_n_completed_buffers(); 1590 // Finish up the queue... 1591 if (worker_i == 0) concurrent_g1_refine()->clean_up_cache(worker_i, 1592 g1_rem_set()); 1593 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); 1594 } 1595 1596 1597 // Computes the sum of the storage used by the various regions. 1598 1599 size_t G1CollectedHeap::used() const { 1600 assert(Heap_lock->owner() != NULL, 1601 "Should be owned on this thread's behalf."); 1602 size_t result = _summary_bytes_used; 1603 if (_cur_alloc_region != NULL) 1604 result += _cur_alloc_region->used(); 1605 return result; 1606 } 1607 1608 class SumUsedClosure: public HeapRegionClosure { 1609 size_t _used; 1610 public: 1611 SumUsedClosure() : _used(0) {} 1612 bool doHeapRegion(HeapRegion* r) { 1613 if (!r->continuesHumongous()) { 1614 _used += r->used(); 1615 } 1616 return false; 1617 } 1618 size_t result() { return _used; } 1619 }; 1620 1621 size_t G1CollectedHeap::recalculate_used() const { 1622 SumUsedClosure blk; 1623 _hrs->iterate(&blk); 1624 return blk.result(); 1625 } 1626 1627 #ifndef PRODUCT 1628 class SumUsedRegionsClosure: public HeapRegionClosure { 1629 size_t _num; 1630 public: 1631 // _num is set to 1 to account for the popular region 1632 SumUsedRegionsClosure() : _num(G1NumPopularRegions) {} 1633 bool doHeapRegion(HeapRegion* r) { 1634 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { 1635 _num += 1; 1636 } 1637 return false; 1638 } 1639 size_t result() { return _num; } 1640 }; 1641 1642 size_t G1CollectedHeap::recalculate_used_regions() const { 1643 SumUsedRegionsClosure blk; 1644 _hrs->iterate(&blk); 1645 return blk.result(); 1646 } 1647 #endif // PRODUCT 1648 1649 size_t G1CollectedHeap::unsafe_max_alloc() { 1650 if (_free_regions > 0) return HeapRegion::GrainBytes; 1651 // otherwise, is there space in the current allocation region? 1652 1653 // We need to store the current allocation region in a local variable 1654 // here. The problem is that this method doesn't take any locks and 1655 // there may be other threads which overwrite the current allocation 1656 // region field. attempt_allocation(), for example, sets it to NULL 1657 // and this can happen *after* the NULL check here but before the call 1658 // to free(), resulting in a SIGSEGV. Note that this doesn't appear 1659 // to be a problem in the optimized build, since the two loads of the 1660 // current allocation region field are optimized away. 1661 HeapRegion* car = _cur_alloc_region; 1662 1663 // FIXME: should iterate over all regions? 1664 if (car == NULL) { 1665 return 0; 1666 } 1667 return car->free(); 1668 } 1669 1670 void G1CollectedHeap::collect(GCCause::Cause cause) { 1671 // The caller doesn't have the Heap_lock 1672 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 1673 MutexLocker ml(Heap_lock); 1674 collect_locked(cause); 1675 } 1676 1677 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 1678 assert(Thread::current()->is_VM_thread(), "Precondition#1"); 1679 assert(Heap_lock->is_locked(), "Precondition#2"); 1680 GCCauseSetter gcs(this, cause); 1681 switch (cause) { 1682 case GCCause::_heap_inspection: 1683 case GCCause::_heap_dump: { 1684 HandleMark hm; 1685 do_full_collection(false); // don't clear all soft refs 1686 break; 1687 } 1688 default: // XXX FIX ME 1689 ShouldNotReachHere(); // Unexpected use of this function 1690 } 1691 } 1692 1693 1694 void G1CollectedHeap::collect_locked(GCCause::Cause cause) { 1695 // Don't want to do a GC until cleanup is completed. 1696 wait_for_cleanup_complete(); 1697 1698 // Read the GC count while holding the Heap_lock 1699 int gc_count_before = SharedHeap::heap()->total_collections(); 1700 { 1701 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back 1702 VM_G1CollectFull op(gc_count_before, cause); 1703 VMThread::execute(&op); 1704 } 1705 } 1706 1707 bool G1CollectedHeap::is_in(const void* p) const { 1708 if (_g1_committed.contains(p)) { 1709 HeapRegion* hr = _hrs->addr_to_region(p); 1710 return hr->is_in(p); 1711 } else { 1712 return _perm_gen->as_gen()->is_in(p); 1713 } 1714 } 1715 1716 // Iteration functions. 1717 1718 // Iterates an OopClosure over all ref-containing fields of objects 1719 // within a HeapRegion. 1720 1721 class IterateOopClosureRegionClosure: public HeapRegionClosure { 1722 MemRegion _mr; 1723 OopClosure* _cl; 1724 public: 1725 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) 1726 : _mr(mr), _cl(cl) {} 1727 bool doHeapRegion(HeapRegion* r) { 1728 if (! r->continuesHumongous()) { 1729 r->oop_iterate(_cl); 1730 } 1731 return false; 1732 } 1733 }; 1734 1735 void G1CollectedHeap::oop_iterate(OopClosure* cl) { 1736 IterateOopClosureRegionClosure blk(_g1_committed, cl); 1737 _hrs->iterate(&blk); 1738 } 1739 1740 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) { 1741 IterateOopClosureRegionClosure blk(mr, cl); 1742 _hrs->iterate(&blk); 1743 } 1744 1745 // Iterates an ObjectClosure over all objects within a HeapRegion. 1746 1747 class IterateObjectClosureRegionClosure: public HeapRegionClosure { 1748 ObjectClosure* _cl; 1749 public: 1750 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} 1751 bool doHeapRegion(HeapRegion* r) { 1752 if (! r->continuesHumongous()) { 1753 r->object_iterate(_cl); 1754 } 1755 return false; 1756 } 1757 }; 1758 1759 void G1CollectedHeap::object_iterate(ObjectClosure* cl) { 1760 IterateObjectClosureRegionClosure blk(cl); 1761 _hrs->iterate(&blk); 1762 } 1763 1764 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { 1765 // FIXME: is this right? 1766 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); 1767 } 1768 1769 // Calls a SpaceClosure on a HeapRegion. 1770 1771 class SpaceClosureRegionClosure: public HeapRegionClosure { 1772 SpaceClosure* _cl; 1773 public: 1774 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} 1775 bool doHeapRegion(HeapRegion* r) { 1776 _cl->do_space(r); 1777 return false; 1778 } 1779 }; 1780 1781 void G1CollectedHeap::space_iterate(SpaceClosure* cl) { 1782 SpaceClosureRegionClosure blk(cl); 1783 _hrs->iterate(&blk); 1784 } 1785 1786 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { 1787 _hrs->iterate(cl); 1788 } 1789 1790 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, 1791 HeapRegionClosure* cl) { 1792 _hrs->iterate_from(r, cl); 1793 } 1794 1795 void 1796 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { 1797 _hrs->iterate_from(idx, cl); 1798 } 1799 1800 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } 1801 1802 void 1803 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, 1804 int worker, 1805 jint claim_value) { 1806 const size_t regions = n_regions(); 1807 const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1); 1808 // try to spread out the starting points of the workers 1809 const size_t start_index = regions / worker_num * (size_t) worker; 1810 1811 // each worker will actually look at all regions 1812 for (size_t count = 0; count < regions; ++count) { 1813 const size_t index = (start_index + count) % regions; 1814 assert(0 <= index && index < regions, "sanity"); 1815 HeapRegion* r = region_at(index); 1816 // we'll ignore "continues humongous" regions (we'll process them 1817 // when we come across their corresponding "start humongous" 1818 // region) and regions already claimed 1819 if (r->claim_value() == claim_value || r->continuesHumongous()) { 1820 continue; 1821 } 1822 // OK, try to claim it 1823 if (r->claimHeapRegion(claim_value)) { 1824 // success! 1825 assert(!r->continuesHumongous(), "sanity"); 1826 if (r->startsHumongous()) { 1827 // If the region is "starts humongous" we'll iterate over its 1828 // "continues humongous" first; in fact we'll do them 1829 // first. The order is important. In on case, calling the 1830 // closure on the "starts humongous" region might de-allocate 1831 // and clear all its "continues humongous" regions and, as a 1832 // result, we might end up processing them twice. So, we'll do 1833 // them first (notice: most closures will ignore them anyway) and 1834 // then we'll do the "starts humongous" region. 1835 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { 1836 HeapRegion* chr = region_at(ch_index); 1837 1838 // if the region has already been claimed or it's not 1839 // "continues humongous" we're done 1840 if (chr->claim_value() == claim_value || 1841 !chr->continuesHumongous()) { 1842 break; 1843 } 1844 1845 // Noone should have claimed it directly. We can given 1846 // that we claimed its "starts humongous" region. 1847 assert(chr->claim_value() != claim_value, "sanity"); 1848 assert(chr->humongous_start_region() == r, "sanity"); 1849 1850 if (chr->claimHeapRegion(claim_value)) { 1851 // we should always be able to claim it; noone else should 1852 // be trying to claim this region 1853 1854 bool res2 = cl->doHeapRegion(chr); 1855 assert(!res2, "Should not abort"); 1856 1857 // Right now, this holds (i.e., no closure that actually 1858 // does something with "continues humongous" regions 1859 // clears them). We might have to weaken it in the future, 1860 // but let's leave these two asserts here for extra safety. 1861 assert(chr->continuesHumongous(), "should still be the case"); 1862 assert(chr->humongous_start_region() == r, "sanity"); 1863 } else { 1864 guarantee(false, "we should not reach here"); 1865 } 1866 } 1867 } 1868 1869 assert(!r->continuesHumongous(), "sanity"); 1870 bool res = cl->doHeapRegion(r); 1871 assert(!res, "Should not abort"); 1872 } 1873 } 1874 } 1875 1876 class ResetClaimValuesClosure: public HeapRegionClosure { 1877 public: 1878 bool doHeapRegion(HeapRegion* r) { 1879 r->set_claim_value(HeapRegion::InitialClaimValue); 1880 return false; 1881 } 1882 }; 1883 1884 void 1885 G1CollectedHeap::reset_heap_region_claim_values() { 1886 ResetClaimValuesClosure blk; 1887 heap_region_iterate(&blk); 1888 } 1889 1890 #ifdef ASSERT 1891 // This checks whether all regions in the heap have the correct claim 1892 // value. I also piggy-backed on this a check to ensure that the 1893 // humongous_start_region() information on "continues humongous" 1894 // regions is correct. 1895 1896 class CheckClaimValuesClosure : public HeapRegionClosure { 1897 private: 1898 jint _claim_value; 1899 size_t _failures; 1900 HeapRegion* _sh_region; 1901 public: 1902 CheckClaimValuesClosure(jint claim_value) : 1903 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } 1904 bool doHeapRegion(HeapRegion* r) { 1905 if (r->claim_value() != _claim_value) { 1906 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " 1907 "claim value = %d, should be %d", 1908 r->bottom(), r->end(), r->claim_value(), 1909 _claim_value); 1910 ++_failures; 1911 } 1912 if (!r->isHumongous()) { 1913 _sh_region = NULL; 1914 } else if (r->startsHumongous()) { 1915 _sh_region = r; 1916 } else if (r->continuesHumongous()) { 1917 if (r->humongous_start_region() != _sh_region) { 1918 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " 1919 "HS = "PTR_FORMAT", should be "PTR_FORMAT, 1920 r->bottom(), r->end(), 1921 r->humongous_start_region(), 1922 _sh_region); 1923 ++_failures; 1924 } 1925 } 1926 return false; 1927 } 1928 size_t failures() { 1929 return _failures; 1930 } 1931 }; 1932 1933 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { 1934 CheckClaimValuesClosure cl(claim_value); 1935 heap_region_iterate(&cl); 1936 return cl.failures() == 0; 1937 } 1938 #endif // ASSERT 1939 1940 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { 1941 HeapRegion* r = g1_policy()->collection_set(); 1942 while (r != NULL) { 1943 HeapRegion* next = r->next_in_collection_set(); 1944 if (cl->doHeapRegion(r)) { 1945 cl->incomplete(); 1946 return; 1947 } 1948 r = next; 1949 } 1950 } 1951 1952 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, 1953 HeapRegionClosure *cl) { 1954 assert(r->in_collection_set(), 1955 "Start region must be a member of the collection set."); 1956 HeapRegion* cur = r; 1957 while (cur != NULL) { 1958 HeapRegion* next = cur->next_in_collection_set(); 1959 if (cl->doHeapRegion(cur) && false) { 1960 cl->incomplete(); 1961 return; 1962 } 1963 cur = next; 1964 } 1965 cur = g1_policy()->collection_set(); 1966 while (cur != r) { 1967 HeapRegion* next = cur->next_in_collection_set(); 1968 if (cl->doHeapRegion(cur) && false) { 1969 cl->incomplete(); 1970 return; 1971 } 1972 cur = next; 1973 } 1974 } 1975 1976 CompactibleSpace* G1CollectedHeap::first_compactible_space() { 1977 return _hrs->length() > 0 ? _hrs->at(0) : NULL; 1978 } 1979 1980 1981 Space* G1CollectedHeap::space_containing(const void* addr) const { 1982 Space* res = heap_region_containing(addr); 1983 if (res == NULL) 1984 res = perm_gen()->space_containing(addr); 1985 return res; 1986 } 1987 1988 HeapWord* G1CollectedHeap::block_start(const void* addr) const { 1989 Space* sp = space_containing(addr); 1990 if (sp != NULL) { 1991 return sp->block_start(addr); 1992 } 1993 return NULL; 1994 } 1995 1996 size_t G1CollectedHeap::block_size(const HeapWord* addr) const { 1997 Space* sp = space_containing(addr); 1998 assert(sp != NULL, "block_size of address outside of heap"); 1999 return sp->block_size(addr); 2000 } 2001 2002 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { 2003 Space* sp = space_containing(addr); 2004 return sp->block_is_obj(addr); 2005 } 2006 2007 bool G1CollectedHeap::supports_tlab_allocation() const { 2008 return true; 2009 } 2010 2011 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { 2012 return HeapRegion::GrainBytes; 2013 } 2014 2015 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { 2016 // Return the remaining space in the cur alloc region, but not less than 2017 // the min TLAB size. 2018 // Also, no more than half the region size, since we can't allow tlabs to 2019 // grow big enough to accomodate humongous objects. 2020 2021 // We need to story it locally, since it might change between when we 2022 // test for NULL and when we use it later. 2023 ContiguousSpace* cur_alloc_space = _cur_alloc_region; 2024 if (cur_alloc_space == NULL) { 2025 return HeapRegion::GrainBytes/2; 2026 } else { 2027 return MAX2(MIN2(cur_alloc_space->free(), 2028 (size_t)(HeapRegion::GrainBytes/2)), 2029 (size_t)MinTLABSize); 2030 } 2031 } 2032 2033 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t size) { 2034 bool dummy; 2035 return G1CollectedHeap::mem_allocate(size, false, true, &dummy); 2036 } 2037 2038 bool G1CollectedHeap::allocs_are_zero_filled() { 2039 return false; 2040 } 2041 2042 size_t G1CollectedHeap::large_typearray_limit() { 2043 // FIXME 2044 return HeapRegion::GrainBytes/HeapWordSize; 2045 } 2046 2047 size_t G1CollectedHeap::max_capacity() const { 2048 return _g1_committed.byte_size(); 2049 } 2050 2051 jlong G1CollectedHeap::millis_since_last_gc() { 2052 // assert(false, "NYI"); 2053 return 0; 2054 } 2055 2056 2057 void G1CollectedHeap::prepare_for_verify() { 2058 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { 2059 ensure_parsability(false); 2060 } 2061 g1_rem_set()->prepare_for_verify(); 2062 } 2063 2064 class VerifyLivenessOopClosure: public OopClosure { 2065 G1CollectedHeap* g1h; 2066 public: 2067 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { 2068 g1h = _g1h; 2069 } 2070 void do_oop(narrowOop *p) { 2071 guarantee(false, "NYI"); 2072 } 2073 void do_oop(oop *p) { 2074 oop obj = *p; 2075 assert(obj == NULL || !g1h->is_obj_dead(obj), 2076 "Dead object referenced by a not dead object"); 2077 } 2078 }; 2079 2080 class VerifyObjsInRegionClosure: public ObjectClosure { 2081 G1CollectedHeap* _g1h; 2082 size_t _live_bytes; 2083 HeapRegion *_hr; 2084 public: 2085 VerifyObjsInRegionClosure(HeapRegion *hr) : _live_bytes(0), _hr(hr) { 2086 _g1h = G1CollectedHeap::heap(); 2087 } 2088 void do_object(oop o) { 2089 VerifyLivenessOopClosure isLive(_g1h); 2090 assert(o != NULL, "Huh?"); 2091 if (!_g1h->is_obj_dead(o)) { 2092 o->oop_iterate(&isLive); 2093 if (!_hr->obj_allocated_since_prev_marking(o)) 2094 _live_bytes += (o->size() * HeapWordSize); 2095 } 2096 } 2097 size_t live_bytes() { return _live_bytes; } 2098 }; 2099 2100 class PrintObjsInRegionClosure : public ObjectClosure { 2101 HeapRegion *_hr; 2102 G1CollectedHeap *_g1; 2103 public: 2104 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { 2105 _g1 = G1CollectedHeap::heap(); 2106 }; 2107 2108 void do_object(oop o) { 2109 if (o != NULL) { 2110 HeapWord *start = (HeapWord *) o; 2111 size_t word_sz = o->size(); 2112 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT 2113 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", 2114 (void*) o, word_sz, 2115 _g1->isMarkedPrev(o), 2116 _g1->isMarkedNext(o), 2117 _hr->obj_allocated_since_prev_marking(o)); 2118 HeapWord *end = start + word_sz; 2119 HeapWord *cur; 2120 int *val; 2121 for (cur = start; cur < end; cur++) { 2122 val = (int *) cur; 2123 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); 2124 } 2125 } 2126 } 2127 }; 2128 2129 class VerifyRegionClosure: public HeapRegionClosure { 2130 public: 2131 bool _allow_dirty; 2132 bool _par; 2133 VerifyRegionClosure(bool allow_dirty, bool par = false) 2134 : _allow_dirty(allow_dirty), _par(par) {} 2135 bool doHeapRegion(HeapRegion* r) { 2136 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, 2137 "Should be unclaimed at verify points."); 2138 if (r->isHumongous()) { 2139 if (r->startsHumongous()) { 2140 // Verify the single H object. 2141 oop(r->bottom())->verify(); 2142 size_t word_sz = oop(r->bottom())->size(); 2143 guarantee(r->top() == r->bottom() + word_sz, 2144 "Only one object in a humongous region"); 2145 } 2146 } else { 2147 VerifyObjsInRegionClosure not_dead_yet_cl(r); 2148 r->verify(_allow_dirty); 2149 r->object_iterate(¬_dead_yet_cl); 2150 guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(), 2151 "More live objects than counted in last complete marking."); 2152 } 2153 return false; 2154 } 2155 }; 2156 2157 class VerifyRootsClosure: public OopsInGenClosure { 2158 private: 2159 G1CollectedHeap* _g1h; 2160 bool _failures; 2161 2162 public: 2163 VerifyRootsClosure() : 2164 _g1h(G1CollectedHeap::heap()), _failures(false) { } 2165 2166 bool failures() { return _failures; } 2167 2168 void do_oop(narrowOop* p) { 2169 guarantee(false, "NYI"); 2170 } 2171 2172 void do_oop(oop* p) { 2173 oop obj = *p; 2174 if (obj != NULL) { 2175 if (_g1h->is_obj_dead(obj)) { 2176 gclog_or_tty->print_cr("Root location "PTR_FORMAT" " 2177 "points to dead obj "PTR_FORMAT, p, (void*) obj); 2178 obj->print_on(gclog_or_tty); 2179 _failures = true; 2180 } 2181 } 2182 } 2183 }; 2184 2185 // This is the task used for parallel heap verification. 2186 2187 class G1ParVerifyTask: public AbstractGangTask { 2188 private: 2189 G1CollectedHeap* _g1h; 2190 bool _allow_dirty; 2191 2192 public: 2193 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty) : 2194 AbstractGangTask("Parallel verify task"), 2195 _g1h(g1h), _allow_dirty(allow_dirty) { } 2196 2197 void work(int worker_i) { 2198 VerifyRegionClosure blk(_allow_dirty, true); 2199 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, 2200 HeapRegion::ParVerifyClaimValue); 2201 } 2202 }; 2203 2204 void G1CollectedHeap::verify(bool allow_dirty, bool silent) { 2205 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { 2206 if (!silent) { gclog_or_tty->print("roots "); } 2207 VerifyRootsClosure rootsCl; 2208 process_strong_roots(false, 2209 SharedHeap::SO_AllClasses, 2210 &rootsCl, 2211 &rootsCl); 2212 rem_set()->invalidate(perm_gen()->used_region(), false); 2213 if (!silent) { gclog_or_tty->print("heapRegions "); } 2214 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { 2215 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), 2216 "sanity check"); 2217 2218 G1ParVerifyTask task(this, allow_dirty); 2219 int n_workers = workers()->total_workers(); 2220 set_par_threads(n_workers); 2221 workers()->run_task(&task); 2222 set_par_threads(0); 2223 2224 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), 2225 "sanity check"); 2226 2227 reset_heap_region_claim_values(); 2228 2229 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), 2230 "sanity check"); 2231 } else { 2232 VerifyRegionClosure blk(allow_dirty); 2233 _hrs->iterate(&blk); 2234 } 2235 if (!silent) gclog_or_tty->print("remset "); 2236 rem_set()->verify(); 2237 guarantee(!rootsCl.failures(), "should not have had failures"); 2238 } else { 2239 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); 2240 } 2241 } 2242 2243 class PrintRegionClosure: public HeapRegionClosure { 2244 outputStream* _st; 2245 public: 2246 PrintRegionClosure(outputStream* st) : _st(st) {} 2247 bool doHeapRegion(HeapRegion* r) { 2248 r->print_on(_st); 2249 return false; 2250 } 2251 }; 2252 2253 void G1CollectedHeap::print() const { print_on(gclog_or_tty); } 2254 2255 void G1CollectedHeap::print_on(outputStream* st) const { 2256 PrintRegionClosure blk(st); 2257 _hrs->iterate(&blk); 2258 } 2259 2260 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { 2261 if (ParallelGCThreads > 0) { 2262 workers()->print_worker_threads(); 2263 } 2264 st->print("\"G1 concurrent mark GC Thread\" "); 2265 _cmThread->print(); 2266 st->cr(); 2267 st->print("\"G1 concurrent refinement GC Thread\" "); 2268 _cg1r->cg1rThread()->print_on(st); 2269 st->cr(); 2270 st->print("\"G1 zero-fill GC Thread\" "); 2271 _czft->print_on(st); 2272 st->cr(); 2273 } 2274 2275 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { 2276 if (ParallelGCThreads > 0) { 2277 workers()->threads_do(tc); 2278 } 2279 tc->do_thread(_cmThread); 2280 tc->do_thread(_cg1r->cg1rThread()); 2281 tc->do_thread(_czft); 2282 } 2283 2284 void G1CollectedHeap::print_tracing_info() const { 2285 concurrent_g1_refine()->print_final_card_counts(); 2286 2287 // We'll overload this to mean "trace GC pause statistics." 2288 if (TraceGen0Time || TraceGen1Time) { 2289 // The "G1CollectorPolicy" is keeping track of these stats, so delegate 2290 // to that. 2291 g1_policy()->print_tracing_info(); 2292 } 2293 if (SummarizeG1RSStats) { 2294 g1_rem_set()->print_summary_info(); 2295 } 2296 if (SummarizeG1ConcMark) { 2297 concurrent_mark()->print_summary_info(); 2298 } 2299 if (SummarizeG1ZFStats) { 2300 ConcurrentZFThread::print_summary_info(); 2301 } 2302 if (G1SummarizePopularity) { 2303 print_popularity_summary_info(); 2304 } 2305 g1_policy()->print_yg_surv_rate_info(); 2306 2307 GCOverheadReporter::printGCOverhead(); 2308 2309 SpecializationStats::print(); 2310 } 2311 2312 2313 int G1CollectedHeap::addr_to_arena_id(void* addr) const { 2314 HeapRegion* hr = heap_region_containing(addr); 2315 if (hr == NULL) { 2316 return 0; 2317 } else { 2318 return 1; 2319 } 2320 } 2321 2322 G1CollectedHeap* G1CollectedHeap::heap() { 2323 assert(_sh->kind() == CollectedHeap::G1CollectedHeap, 2324 "not a garbage-first heap"); 2325 return _g1h; 2326 } 2327 2328 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { 2329 if (PrintHeapAtGC){ 2330 gclog_or_tty->print_cr(" {Heap before GC collections=%d:", total_collections()); 2331 Universe::print(); 2332 } 2333 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); 2334 // Call allocation profiler 2335 AllocationProfiler::iterate_since_last_gc(); 2336 // Fill TLAB's and such 2337 ensure_parsability(true); 2338 } 2339 2340 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { 2341 // FIXME: what is this about? 2342 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" 2343 // is set. 2344 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), 2345 "derived pointer present")); 2346 2347 if (PrintHeapAtGC){ 2348 gclog_or_tty->print_cr(" Heap after GC collections=%d:", total_collections()); 2349 Universe::print(); 2350 gclog_or_tty->print("} "); 2351 } 2352 } 2353 2354 void G1CollectedHeap::do_collection_pause() { 2355 // Read the GC count while holding the Heap_lock 2356 // we need to do this _before_ wait_for_cleanup_complete(), to 2357 // ensure that we do not give up the heap lock and potentially 2358 // pick up the wrong count 2359 int gc_count_before = SharedHeap::heap()->total_collections(); 2360 2361 // Don't want to do a GC pause while cleanup is being completed! 2362 wait_for_cleanup_complete(); 2363 2364 g1_policy()->record_stop_world_start(); 2365 { 2366 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back 2367 VM_G1IncCollectionPause op(gc_count_before); 2368 VMThread::execute(&op); 2369 } 2370 } 2371 2372 void 2373 G1CollectedHeap::doConcurrentMark() { 2374 if (G1ConcMark) { 2375 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); 2376 if (!_cmThread->in_progress()) { 2377 _cmThread->set_started(); 2378 CGC_lock->notify(); 2379 } 2380 } 2381 } 2382 2383 class VerifyMarkedObjsClosure: public ObjectClosure { 2384 G1CollectedHeap* _g1h; 2385 public: 2386 VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} 2387 void do_object(oop obj) { 2388 assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, 2389 "markandsweep mark should agree with concurrent deadness"); 2390 } 2391 }; 2392 2393 void 2394 G1CollectedHeap::checkConcurrentMark() { 2395 VerifyMarkedObjsClosure verifycl(this); 2396 // MutexLockerEx x(getMarkBitMapLock(), 2397 // Mutex::_no_safepoint_check_flag); 2398 object_iterate(&verifycl); 2399 } 2400 2401 void G1CollectedHeap::do_sync_mark() { 2402 _cm->checkpointRootsInitial(); 2403 _cm->markFromRoots(); 2404 _cm->checkpointRootsFinal(false); 2405 } 2406 2407 // <NEW PREDICTION> 2408 2409 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, 2410 bool young) { 2411 return _g1_policy->predict_region_elapsed_time_ms(hr, young); 2412 } 2413 2414 void G1CollectedHeap::check_if_region_is_too_expensive(double 2415 predicted_time_ms) { 2416 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); 2417 } 2418 2419 size_t G1CollectedHeap::pending_card_num() { 2420 size_t extra_cards = 0; 2421 JavaThread *curr = Threads::first(); 2422 while (curr != NULL) { 2423 DirtyCardQueue& dcq = curr->dirty_card_queue(); 2424 extra_cards += dcq.size(); 2425 curr = curr->next(); 2426 } 2427 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 2428 size_t buffer_size = dcqs.buffer_size(); 2429 size_t buffer_num = dcqs.completed_buffers_num(); 2430 return buffer_size * buffer_num + extra_cards; 2431 } 2432 2433 size_t G1CollectedHeap::max_pending_card_num() { 2434 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 2435 size_t buffer_size = dcqs.buffer_size(); 2436 size_t buffer_num = dcqs.completed_buffers_num(); 2437 int thread_num = Threads::number_of_threads(); 2438 return (buffer_num + thread_num) * buffer_size; 2439 } 2440 2441 size_t G1CollectedHeap::cards_scanned() { 2442 HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set(); 2443 return g1_rset->cardsScanned(); 2444 } 2445 2446 void 2447 G1CollectedHeap::setup_surviving_young_words() { 2448 guarantee( _surviving_young_words == NULL, "pre-condition" ); 2449 size_t array_length = g1_policy()->young_cset_length(); 2450 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); 2451 if (_surviving_young_words == NULL) { 2452 vm_exit_out_of_memory(sizeof(size_t) * array_length, 2453 "Not enough space for young surv words summary."); 2454 } 2455 memset(_surviving_young_words, 0, array_length * sizeof(size_t)); 2456 for (size_t i = 0; i < array_length; ++i) { 2457 guarantee( _surviving_young_words[i] == 0, "invariant" ); 2458 } 2459 } 2460 2461 void 2462 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { 2463 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 2464 size_t array_length = g1_policy()->young_cset_length(); 2465 for (size_t i = 0; i < array_length; ++i) 2466 _surviving_young_words[i] += surv_young_words[i]; 2467 } 2468 2469 void 2470 G1CollectedHeap::cleanup_surviving_young_words() { 2471 guarantee( _surviving_young_words != NULL, "pre-condition" ); 2472 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); 2473 _surviving_young_words = NULL; 2474 } 2475 2476 // </NEW PREDICTION> 2477 2478 void 2479 G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) { 2480 char verbose_str[128]; 2481 sprintf(verbose_str, "GC pause "); 2482 if (popular_region != NULL) 2483 strcat(verbose_str, "(popular)"); 2484 else if (g1_policy()->in_young_gc_mode()) { 2485 if (g1_policy()->full_young_gcs()) 2486 strcat(verbose_str, "(young)"); 2487 else 2488 strcat(verbose_str, "(partial)"); 2489 } 2490 bool reset_should_initiate_conc_mark = false; 2491 if (popular_region != NULL && g1_policy()->should_initiate_conc_mark()) { 2492 // we currently do not allow an initial mark phase to be piggy-backed 2493 // on a popular pause 2494 reset_should_initiate_conc_mark = true; 2495 g1_policy()->unset_should_initiate_conc_mark(); 2496 } 2497 if (g1_policy()->should_initiate_conc_mark()) 2498 strcat(verbose_str, " (initial-mark)"); 2499 2500 GCCauseSetter x(this, (popular_region == NULL ? 2501 GCCause::_g1_inc_collection_pause : 2502 GCCause::_g1_pop_region_collection_pause)); 2503 2504 // if PrintGCDetails is on, we'll print long statistics information 2505 // in the collector policy code, so let's not print this as the output 2506 // is messy if we do. 2507 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 2508 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 2509 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); 2510 2511 ResourceMark rm; 2512 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 2513 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); 2514 guarantee(!is_gc_active(), "collection is not reentrant"); 2515 assert(regions_accounted_for(), "Region leakage!"); 2516 2517 increment_gc_time_stamp(); 2518 2519 if (g1_policy()->in_young_gc_mode()) { 2520 assert(check_young_list_well_formed(), 2521 "young list should be well formed"); 2522 } 2523 2524 if (GC_locker::is_active()) { 2525 return; // GC is disabled (e.g. JNI GetXXXCritical operation) 2526 } 2527 2528 bool abandoned = false; 2529 { // Call to jvmpi::post_class_unload_events must occur outside of active GC 2530 IsGCActiveMark x; 2531 2532 gc_prologue(false); 2533 increment_total_collections(); 2534 2535 #if G1_REM_SET_LOGGING 2536 gclog_or_tty->print_cr("\nJust chose CS, heap:"); 2537 print(); 2538 #endif 2539 2540 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { 2541 HandleMark hm; // Discard invalid handles created during verification 2542 prepare_for_verify(); 2543 gclog_or_tty->print(" VerifyBeforeGC:"); 2544 Universe::verify(false); 2545 } 2546 2547 COMPILER2_PRESENT(DerivedPointerTable::clear()); 2548 2549 // We want to turn off ref discovery, if necessary, and turn it back on 2550 // on again later if we do. 2551 bool was_enabled = ref_processor()->discovery_enabled(); 2552 if (was_enabled) ref_processor()->disable_discovery(); 2553 2554 // Forget the current alloc region (we might even choose it to be part 2555 // of the collection set!). 2556 abandon_cur_alloc_region(); 2557 2558 // The elapsed time induced by the start time below deliberately elides 2559 // the possible verification above. 2560 double start_time_sec = os::elapsedTime(); 2561 GCOverheadReporter::recordSTWStart(start_time_sec); 2562 size_t start_used_bytes = used(); 2563 if (!G1ConcMark) { 2564 do_sync_mark(); 2565 } 2566 2567 g1_policy()->record_collection_pause_start(start_time_sec, 2568 start_used_bytes); 2569 2570 guarantee(_in_cset_fast_test == NULL, "invariant"); 2571 guarantee(_in_cset_fast_test_base == NULL, "invariant"); 2572 _in_cset_fast_test_length = max_regions(); 2573 _in_cset_fast_test_base = 2574 NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); 2575 memset(_in_cset_fast_test_base, false, 2576 _in_cset_fast_test_length * sizeof(bool)); 2577 // We're biasing _in_cset_fast_test to avoid subtracting the 2578 // beginning of the heap every time we want to index; basically 2579 // it's the same with what we do with the card table. 2580 _in_cset_fast_test = _in_cset_fast_test_base - 2581 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); 2582 2583 #if SCAN_ONLY_VERBOSE 2584 _young_list->print(); 2585 #endif // SCAN_ONLY_VERBOSE 2586 2587 if (g1_policy()->should_initiate_conc_mark()) { 2588 concurrent_mark()->checkpointRootsInitialPre(); 2589 } 2590 save_marks(); 2591 2592 // We must do this before any possible evacuation that should propagate 2593 // marks, including evacuation of popular objects in a popular pause. 2594 if (mark_in_progress()) { 2595 double start_time_sec = os::elapsedTime(); 2596 2597 _cm->drainAllSATBBuffers(); 2598 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; 2599 g1_policy()->record_satb_drain_time(finish_mark_ms); 2600 2601 } 2602 // Record the number of elements currently on the mark stack, so we 2603 // only iterate over these. (Since evacuation may add to the mark 2604 // stack, doing more exposes race conditions.) If no mark is in 2605 // progress, this will be zero. 2606 _cm->set_oops_do_bound(); 2607 2608 assert(regions_accounted_for(), "Region leakage."); 2609 2610 bool abandoned = false; 2611 2612 if (mark_in_progress()) 2613 concurrent_mark()->newCSet(); 2614 2615 // Now choose the CS. 2616 if (popular_region == NULL) { 2617 g1_policy()->choose_collection_set(); 2618 } else { 2619 // We may be evacuating a single region (for popularity). 2620 g1_policy()->record_popular_pause_preamble_start(); 2621 popularity_pause_preamble(popular_region); 2622 g1_policy()->record_popular_pause_preamble_end(); 2623 abandoned = (g1_policy()->collection_set() == NULL); 2624 // Now we allow more regions to be added (we have to collect 2625 // all popular regions). 2626 if (!abandoned) { 2627 g1_policy()->choose_collection_set(popular_region); 2628 } 2629 } 2630 // We may abandon a pause if we find no region that will fit in the MMU 2631 // pause. 2632 abandoned = (g1_policy()->collection_set() == NULL); 2633 2634 // Nothing to do if we were unable to choose a collection set. 2635 if (!abandoned) { 2636 #if G1_REM_SET_LOGGING 2637 gclog_or_tty->print_cr("\nAfter pause, heap:"); 2638 print(); 2639 #endif 2640 2641 setup_surviving_young_words(); 2642 2643 // Set up the gc allocation regions. 2644 get_gc_alloc_regions(); 2645 2646 // Actually do the work... 2647 evacuate_collection_set(); 2648 free_collection_set(g1_policy()->collection_set()); 2649 g1_policy()->clear_collection_set(); 2650 2651 FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base); 2652 // this is more for peace of mind; we're nulling them here and 2653 // we're expecting them to be null at the beginning of the next GC 2654 _in_cset_fast_test = NULL; 2655 _in_cset_fast_test_base = NULL; 2656 2657 if (popular_region != NULL) { 2658 // We have to wait until now, because we don't want the region to 2659 // be rescheduled for pop-evac during RS update. 2660 popular_region->set_popular_pending(false); 2661 } 2662 2663 release_gc_alloc_regions(false /* totally */); 2664 2665 cleanup_surviving_young_words(); 2666 2667 if (g1_policy()->in_young_gc_mode()) { 2668 _young_list->reset_sampled_info(); 2669 assert(check_young_list_empty(true), 2670 "young list should be empty"); 2671 2672 #if SCAN_ONLY_VERBOSE 2673 _young_list->print(); 2674 #endif // SCAN_ONLY_VERBOSE 2675 2676 g1_policy()->record_survivor_regions(_young_list->survivor_length(), 2677 _young_list->first_survivor_region(), 2678 _young_list->last_survivor_region()); 2679 _young_list->reset_auxilary_lists(); 2680 } 2681 } else { 2682 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 2683 } 2684 2685 if (evacuation_failed()) { 2686 _summary_bytes_used = recalculate_used(); 2687 } else { 2688 // The "used" of the the collection set have already been subtracted 2689 // when they were freed. Add in the bytes evacuated. 2690 _summary_bytes_used += g1_policy()->bytes_in_to_space(); 2691 } 2692 2693 if (g1_policy()->in_young_gc_mode() && 2694 g1_policy()->should_initiate_conc_mark()) { 2695 concurrent_mark()->checkpointRootsInitialPost(); 2696 set_marking_started(); 2697 doConcurrentMark(); 2698 } 2699 2700 #if SCAN_ONLY_VERBOSE 2701 _young_list->print(); 2702 #endif // SCAN_ONLY_VERBOSE 2703 2704 double end_time_sec = os::elapsedTime(); 2705 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; 2706 g1_policy()->record_pause_time_ms(pause_time_ms); 2707 GCOverheadReporter::recordSTWEnd(end_time_sec); 2708 g1_policy()->record_collection_pause_end(popular_region != NULL, 2709 abandoned); 2710 2711 assert(regions_accounted_for(), "Region leakage."); 2712 2713 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { 2714 HandleMark hm; // Discard invalid handles created during verification 2715 gclog_or_tty->print(" VerifyAfterGC:"); 2716 Universe::verify(false); 2717 } 2718 2719 if (was_enabled) ref_processor()->enable_discovery(); 2720 2721 { 2722 size_t expand_bytes = g1_policy()->expansion_amount(); 2723 if (expand_bytes > 0) { 2724 size_t bytes_before = capacity(); 2725 expand(expand_bytes); 2726 } 2727 } 2728 2729 if (mark_in_progress()) { 2730 concurrent_mark()->update_g1_committed(); 2731 } 2732 2733 #ifdef TRACESPINNING 2734 ParallelTaskTerminator::print_termination_counts(); 2735 #endif 2736 2737 gc_epilogue(false); 2738 } 2739 2740 assert(verify_region_lists(), "Bad region lists."); 2741 2742 if (reset_should_initiate_conc_mark) 2743 g1_policy()->set_should_initiate_conc_mark(); 2744 2745 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { 2746 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); 2747 print_tracing_info(); 2748 vm_exit(-1); 2749 } 2750 } 2751 2752 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { 2753 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); 2754 // make sure we don't call set_gc_alloc_region() multiple times on 2755 // the same region 2756 assert(r == NULL || !r->is_gc_alloc_region(), 2757 "shouldn't already be a GC alloc region"); 2758 HeapWord* original_top = NULL; 2759 if (r != NULL) 2760 original_top = r->top(); 2761 2762 // We will want to record the used space in r as being there before gc. 2763 // One we install it as a GC alloc region it's eligible for allocation. 2764 // So record it now and use it later. 2765 size_t r_used = 0; 2766 if (r != NULL) { 2767 r_used = r->used(); 2768 2769 if (ParallelGCThreads > 0) { 2770 // need to take the lock to guard against two threads calling 2771 // get_gc_alloc_region concurrently (very unlikely but...) 2772 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 2773 r->save_marks(); 2774 } 2775 } 2776 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; 2777 _gc_alloc_regions[purpose] = r; 2778 if (old_alloc_region != NULL) { 2779 // Replace aliases too. 2780 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 2781 if (_gc_alloc_regions[ap] == old_alloc_region) { 2782 _gc_alloc_regions[ap] = r; 2783 } 2784 } 2785 } 2786 if (r != NULL) { 2787 push_gc_alloc_region(r); 2788 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { 2789 // We are using a region as a GC alloc region after it has been used 2790 // as a mutator allocation region during the current marking cycle. 2791 // The mutator-allocated objects are currently implicitly marked, but 2792 // when we move hr->next_top_at_mark_start() forward at the the end 2793 // of the GC pause, they won't be. We therefore mark all objects in 2794 // the "gap". We do this object-by-object, since marking densely 2795 // does not currently work right with marking bitmap iteration. This 2796 // means we rely on TLAB filling at the start of pauses, and no 2797 // "resuscitation" of filled TLAB's. If we want to do this, we need 2798 // to fix the marking bitmap iteration. 2799 HeapWord* curhw = r->next_top_at_mark_start(); 2800 HeapWord* t = original_top; 2801 2802 while (curhw < t) { 2803 oop cur = (oop)curhw; 2804 // We'll assume parallel for generality. This is rare code. 2805 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? 2806 curhw = curhw + cur->size(); 2807 } 2808 assert(curhw == t, "Should have parsed correctly."); 2809 } 2810 if (G1PolicyVerbose > 1) { 2811 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " 2812 "for survivors:", r->bottom(), original_top, r->end()); 2813 r->print(); 2814 } 2815 g1_policy()->record_before_bytes(r_used); 2816 } 2817 } 2818 2819 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { 2820 assert(Thread::current()->is_VM_thread() || 2821 par_alloc_during_gc_lock()->owned_by_self(), "Precondition"); 2822 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), 2823 "Precondition."); 2824 hr->set_is_gc_alloc_region(true); 2825 hr->set_next_gc_alloc_region(_gc_alloc_region_list); 2826 _gc_alloc_region_list = hr; 2827 } 2828 2829 #ifdef G1_DEBUG 2830 class FindGCAllocRegion: public HeapRegionClosure { 2831 public: 2832 bool doHeapRegion(HeapRegion* r) { 2833 if (r->is_gc_alloc_region()) { 2834 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", 2835 r->hrs_index(), r->bottom()); 2836 } 2837 return false; 2838 } 2839 }; 2840 #endif // G1_DEBUG 2841 2842 void G1CollectedHeap::forget_alloc_region_list() { 2843 assert(Thread::current()->is_VM_thread(), "Precondition"); 2844 while (_gc_alloc_region_list != NULL) { 2845 HeapRegion* r = _gc_alloc_region_list; 2846 assert(r->is_gc_alloc_region(), "Invariant."); 2847 _gc_alloc_region_list = r->next_gc_alloc_region(); 2848 r->set_next_gc_alloc_region(NULL); 2849 r->set_is_gc_alloc_region(false); 2850 if (r->is_survivor()) { 2851 if (r->is_empty()) { 2852 r->set_not_young(); 2853 } else { 2854 _young_list->add_survivor_region(r); 2855 } 2856 } 2857 if (r->is_empty()) { 2858 ++_free_regions; 2859 } 2860 } 2861 #ifdef G1_DEBUG 2862 FindGCAllocRegion fa; 2863 heap_region_iterate(&fa); 2864 #endif // G1_DEBUG 2865 } 2866 2867 2868 bool G1CollectedHeap::check_gc_alloc_regions() { 2869 // TODO: allocation regions check 2870 return true; 2871 } 2872 2873 void G1CollectedHeap::get_gc_alloc_regions() { 2874 // First, let's check that the GC alloc region list is empty (it should) 2875 assert(_gc_alloc_region_list == NULL, "invariant"); 2876 2877 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 2878 assert(_gc_alloc_regions[ap] == NULL, "invariant"); 2879 2880 // Create new GC alloc regions. 2881 HeapRegion* alloc_region = _retained_gc_alloc_regions[ap]; 2882 _retained_gc_alloc_regions[ap] = NULL; 2883 2884 if (alloc_region != NULL) { 2885 assert(_retain_gc_alloc_region[ap], "only way to retain a GC region"); 2886 2887 // let's make sure that the GC alloc region is not tagged as such 2888 // outside a GC operation 2889 assert(!alloc_region->is_gc_alloc_region(), "sanity"); 2890 2891 if (alloc_region->in_collection_set() || 2892 alloc_region->top() == alloc_region->end() || 2893 alloc_region->top() == alloc_region->bottom()) { 2894 // we will discard the current GC alloc region if it's in the 2895 // collection set (it can happen!), if it's already full (no 2896 // point in using it), or if it's empty (this means that it 2897 // was emptied during a cleanup and it should be on the free 2898 // list now). 2899 2900 alloc_region = NULL; 2901 } 2902 } 2903 2904 if (alloc_region == NULL) { 2905 // we will get a new GC alloc region 2906 alloc_region = newAllocRegionWithExpansion(ap, 0); 2907 } 2908 2909 if (alloc_region != NULL) { 2910 assert(_gc_alloc_regions[ap] == NULL, "pre-condition"); 2911 set_gc_alloc_region(ap, alloc_region); 2912 } 2913 2914 assert(_gc_alloc_regions[ap] == NULL || 2915 _gc_alloc_regions[ap]->is_gc_alloc_region(), 2916 "the GC alloc region should be tagged as such"); 2917 assert(_gc_alloc_regions[ap] == NULL || 2918 _gc_alloc_regions[ap] == _gc_alloc_region_list, 2919 "the GC alloc region should be the same as the GC alloc list head"); 2920 } 2921 // Set alternative regions for allocation purposes that have reached 2922 // their limit. 2923 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 2924 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); 2925 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { 2926 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; 2927 } 2928 } 2929 assert(check_gc_alloc_regions(), "alloc regions messed up"); 2930 } 2931 2932 void G1CollectedHeap::release_gc_alloc_regions(bool totally) { 2933 // We keep a separate list of all regions that have been alloc regions in 2934 // the current collection pause. Forget that now. This method will 2935 // untag the GC alloc regions and tear down the GC alloc region 2936 // list. It's desirable that no regions are tagged as GC alloc 2937 // outside GCs. 2938 forget_alloc_region_list(); 2939 2940 // The current alloc regions contain objs that have survived 2941 // collection. Make them no longer GC alloc regions. 2942 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 2943 HeapRegion* r = _gc_alloc_regions[ap]; 2944 _retained_gc_alloc_regions[ap] = NULL; 2945 2946 if (r != NULL) { 2947 // we retain nothing on _gc_alloc_regions between GCs 2948 set_gc_alloc_region(ap, NULL); 2949 _gc_alloc_region_counts[ap] = 0; 2950 2951 if (r->is_empty()) { 2952 // we didn't actually allocate anything in it; let's just put 2953 // it on the free list 2954 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 2955 r->set_zero_fill_complete(); 2956 put_free_region_on_list_locked(r); 2957 } else if (_retain_gc_alloc_region[ap] && !totally) { 2958 // retain it so that we can use it at the beginning of the next GC 2959 _retained_gc_alloc_regions[ap] = r; 2960 } 2961 } 2962 } 2963 } 2964 2965 #ifndef PRODUCT 2966 // Useful for debugging 2967 2968 void G1CollectedHeap::print_gc_alloc_regions() { 2969 gclog_or_tty->print_cr("GC alloc regions"); 2970 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 2971 HeapRegion* r = _gc_alloc_regions[ap]; 2972 if (r == NULL) { 2973 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL); 2974 } else { 2975 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT, 2976 ap, r->bottom(), r->used()); 2977 } 2978 } 2979 } 2980 #endif // PRODUCT 2981 2982 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { 2983 _drain_in_progress = false; 2984 set_evac_failure_closure(cl); 2985 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); 2986 } 2987 2988 void G1CollectedHeap::finalize_for_evac_failure() { 2989 assert(_evac_failure_scan_stack != NULL && 2990 _evac_failure_scan_stack->length() == 0, 2991 "Postcondition"); 2992 assert(!_drain_in_progress, "Postcondition"); 2993 // Don't have to delete, since the scan stack is a resource object. 2994 _evac_failure_scan_stack = NULL; 2995 } 2996 2997 2998 2999 // *** Sequential G1 Evacuation 3000 3001 HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) { 3002 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; 3003 // let the caller handle alloc failure 3004 if (alloc_region == NULL) return NULL; 3005 assert(isHumongous(word_size) || !alloc_region->isHumongous(), 3006 "Either the object is humongous or the region isn't"); 3007 HeapWord* block = alloc_region->allocate(word_size); 3008 if (block == NULL) { 3009 block = allocate_during_gc_slow(purpose, alloc_region, false, word_size); 3010 } 3011 return block; 3012 } 3013 3014 class G1IsAliveClosure: public BoolObjectClosure { 3015 G1CollectedHeap* _g1; 3016 public: 3017 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} 3018 void do_object(oop p) { assert(false, "Do not call."); } 3019 bool do_object_b(oop p) { 3020 // It is reachable if it is outside the collection set, or is inside 3021 // and forwarded. 3022 3023 #ifdef G1_DEBUG 3024 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", 3025 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), 3026 !_g1->obj_in_cs(p) || p->is_forwarded()); 3027 #endif // G1_DEBUG 3028 3029 return !_g1->obj_in_cs(p) || p->is_forwarded(); 3030 } 3031 }; 3032 3033 class G1KeepAliveClosure: public OopClosure { 3034 G1CollectedHeap* _g1; 3035 public: 3036 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} 3037 void do_oop(narrowOop* p) { 3038 guarantee(false, "NYI"); 3039 } 3040 void do_oop(oop* p) { 3041 oop obj = *p; 3042 #ifdef G1_DEBUG 3043 if (PrintGC && Verbose) { 3044 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, 3045 p, (void*) obj, (void*) *p); 3046 } 3047 #endif // G1_DEBUG 3048 3049 if (_g1->obj_in_cs(obj)) { 3050 assert( obj->is_forwarded(), "invariant" ); 3051 *p = obj->forwardee(); 3052 3053 #ifdef G1_DEBUG 3054 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, 3055 (void*) obj, (void*) *p); 3056 #endif // G1_DEBUG 3057 } 3058 } 3059 }; 3060 3061 class UpdateRSetImmediate : public OopsInHeapRegionClosure { 3062 private: 3063 G1CollectedHeap* _g1; 3064 G1RemSet* _g1_rem_set; 3065 public: 3066 UpdateRSetImmediate(G1CollectedHeap* g1) : 3067 _g1(g1), _g1_rem_set(g1->g1_rem_set()) {} 3068 3069 void do_oop(narrowOop* p) { 3070 guarantee(false, "NYI"); 3071 } 3072 void do_oop(oop* p) { 3073 assert(_from->is_in_reserved(p), "paranoia"); 3074 if (*p != NULL && !_from->is_survivor()) { 3075 _g1_rem_set->par_write_ref(_from, p, 0); 3076 } 3077 } 3078 }; 3079 3080 class UpdateRSetDeferred : public OopsInHeapRegionClosure { 3081 private: 3082 G1CollectedHeap* _g1; 3083 DirtyCardQueue *_dcq; 3084 CardTableModRefBS* _ct_bs; 3085 3086 public: 3087 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : 3088 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} 3089 3090 void do_oop(narrowOop* p) { 3091 guarantee(false, "NYI"); 3092 } 3093 void do_oop(oop* p) { 3094 assert(_from->is_in_reserved(p), "paranoia"); 3095 if (!_from->is_in_reserved(*p) && !_from->is_survivor()) { 3096 size_t card_index = _ct_bs->index_for(p); 3097 if (_ct_bs->mark_card_deferred(card_index)) { 3098 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); 3099 } 3100 } 3101 } 3102 }; 3103 3104 3105 3106 class RemoveSelfPointerClosure: public ObjectClosure { 3107 private: 3108 G1CollectedHeap* _g1; 3109 ConcurrentMark* _cm; 3110 HeapRegion* _hr; 3111 size_t _prev_marked_bytes; 3112 size_t _next_marked_bytes; 3113 OopsInHeapRegionClosure *_cl; 3114 public: 3115 RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) : 3116 _g1(g1), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), 3117 _next_marked_bytes(0), _cl(cl) {} 3118 3119 size_t prev_marked_bytes() { return _prev_marked_bytes; } 3120 size_t next_marked_bytes() { return _next_marked_bytes; } 3121 3122 // The original idea here was to coalesce evacuated and dead objects. 3123 // However that caused complications with the block offset table (BOT). 3124 // In particular if there were two TLABs, one of them partially refined. 3125 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| 3126 // The BOT entries of the unrefined part of TLAB_2 point to the start 3127 // of TLAB_2. If the last object of the TLAB_1 and the first object 3128 // of TLAB_2 are coalesced, then the cards of the unrefined part 3129 // would point into middle of the filler object. 3130 // 3131 // The current approach is to not coalesce and leave the BOT contents intact. 3132 void do_object(oop obj) { 3133 if (obj->is_forwarded() && obj->forwardee() == obj) { 3134 // The object failed to move. 3135 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); 3136 _cm->markPrev(obj); 3137 assert(_cm->isPrevMarked(obj), "Should be marked!"); 3138 _prev_marked_bytes += (obj->size() * HeapWordSize); 3139 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { 3140 _cm->markAndGrayObjectIfNecessary(obj); 3141 } 3142 obj->set_mark(markOopDesc::prototype()); 3143 // While we were processing RSet buffers during the 3144 // collection, we actually didn't scan any cards on the 3145 // collection set, since we didn't want to update remebered 3146 // sets with entries that point into the collection set, given 3147 // that live objects fromthe collection set are about to move 3148 // and such entries will be stale very soon. This change also 3149 // dealt with a reliability issue which involved scanning a 3150 // card in the collection set and coming across an array that 3151 // was being chunked and looking malformed. The problem is 3152 // that, if evacuation fails, we might have remembered set 3153 // entries missing given that we skipped cards on the 3154 // collection set. So, we'll recreate such entries now. 3155 obj->oop_iterate(_cl); 3156 assert(_cm->isPrevMarked(obj), "Should be marked!"); 3157 } else { 3158 // The object has been either evacuated or is dead. Fill it with a 3159 // dummy object. 3160 MemRegion mr((HeapWord*)obj, obj->size()); 3161 CollectedHeap::fill_with_object(mr); 3162 _cm->clearRangeBothMaps(mr); 3163 } 3164 } 3165 }; 3166 3167 void G1CollectedHeap::remove_self_forwarding_pointers() { 3168 UpdateRSetImmediate immediate_update(_g1h); 3169 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); 3170 UpdateRSetDeferred deferred_update(_g1h, &dcq); 3171 OopsInHeapRegionClosure *cl; 3172 if (G1DeferredRSUpdate) { 3173 cl = &deferred_update; 3174 } else { 3175 cl = &immediate_update; 3176 } 3177 HeapRegion* cur = g1_policy()->collection_set(); 3178 while (cur != NULL) { 3179 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); 3180 3181 RemoveSelfPointerClosure rspc(_g1h, cl); 3182 if (cur->evacuation_failed()) { 3183 assert(cur->in_collection_set(), "bad CS"); 3184 cl->set_region(cur); 3185 cur->object_iterate(&rspc); 3186 3187 // A number of manipulations to make the TAMS be the current top, 3188 // and the marked bytes be the ones observed in the iteration. 3189 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { 3190 // The comments below are the postconditions achieved by the 3191 // calls. Note especially the last such condition, which says that 3192 // the count of marked bytes has been properly restored. 3193 cur->note_start_of_marking(false); 3194 // _next_top_at_mark_start == top, _next_marked_bytes == 0 3195 cur->add_to_marked_bytes(rspc.prev_marked_bytes()); 3196 // _next_marked_bytes == prev_marked_bytes. 3197 cur->note_end_of_marking(); 3198 // _prev_top_at_mark_start == top(), 3199 // _prev_marked_bytes == prev_marked_bytes 3200 } 3201 // If there is no mark in progress, we modified the _next variables 3202 // above needlessly, but harmlessly. 3203 if (_g1h->mark_in_progress()) { 3204 cur->note_start_of_marking(false); 3205 // _next_top_at_mark_start == top, _next_marked_bytes == 0 3206 // _next_marked_bytes == next_marked_bytes. 3207 } 3208 3209 // Now make sure the region has the right index in the sorted array. 3210 g1_policy()->note_change_in_marked_bytes(cur); 3211 } 3212 cur = cur->next_in_collection_set(); 3213 } 3214 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); 3215 3216 // Now restore saved marks, if any. 3217 if (_objs_with_preserved_marks != NULL) { 3218 assert(_preserved_marks_of_objs != NULL, "Both or none."); 3219 assert(_objs_with_preserved_marks->length() == 3220 _preserved_marks_of_objs->length(), "Both or none."); 3221 guarantee(_objs_with_preserved_marks->length() == 3222 _preserved_marks_of_objs->length(), "Both or none."); 3223 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { 3224 oop obj = _objs_with_preserved_marks->at(i); 3225 markOop m = _preserved_marks_of_objs->at(i); 3226 obj->set_mark(m); 3227 } 3228 // Delete the preserved marks growable arrays (allocated on the C heap). 3229 delete _objs_with_preserved_marks; 3230 delete _preserved_marks_of_objs; 3231 _objs_with_preserved_marks = NULL; 3232 _preserved_marks_of_objs = NULL; 3233 } 3234 } 3235 3236 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { 3237 _evac_failure_scan_stack->push(obj); 3238 } 3239 3240 void G1CollectedHeap::drain_evac_failure_scan_stack() { 3241 assert(_evac_failure_scan_stack != NULL, "precondition"); 3242 3243 while (_evac_failure_scan_stack->length() > 0) { 3244 oop obj = _evac_failure_scan_stack->pop(); 3245 _evac_failure_closure->set_region(heap_region_containing(obj)); 3246 obj->oop_iterate_backwards(_evac_failure_closure); 3247 } 3248 } 3249 3250 void G1CollectedHeap::handle_evacuation_failure(oop old) { 3251 markOop m = old->mark(); 3252 // forward to self 3253 assert(!old->is_forwarded(), "precondition"); 3254 3255 old->forward_to(old); 3256 handle_evacuation_failure_common(old, m); 3257 } 3258 3259 oop 3260 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, 3261 oop old) { 3262 markOop m = old->mark(); 3263 oop forward_ptr = old->forward_to_atomic(old); 3264 if (forward_ptr == NULL) { 3265 // Forward-to-self succeeded. 3266 if (_evac_failure_closure != cl) { 3267 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); 3268 assert(!_drain_in_progress, 3269 "Should only be true while someone holds the lock."); 3270 // Set the global evac-failure closure to the current thread's. 3271 assert(_evac_failure_closure == NULL, "Or locking has failed."); 3272 set_evac_failure_closure(cl); 3273 // Now do the common part. 3274 handle_evacuation_failure_common(old, m); 3275 // Reset to NULL. 3276 set_evac_failure_closure(NULL); 3277 } else { 3278 // The lock is already held, and this is recursive. 3279 assert(_drain_in_progress, "This should only be the recursive case."); 3280 handle_evacuation_failure_common(old, m); 3281 } 3282 return old; 3283 } else { 3284 // Someone else had a place to copy it. 3285 return forward_ptr; 3286 } 3287 } 3288 3289 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { 3290 set_evacuation_failed(true); 3291 3292 preserve_mark_if_necessary(old, m); 3293 3294 HeapRegion* r = heap_region_containing(old); 3295 if (!r->evacuation_failed()) { 3296 r->set_evacuation_failed(true); 3297 if (G1TraceRegions) { 3298 gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" " 3299 "["PTR_FORMAT","PTR_FORMAT")\n", 3300 r, r->bottom(), r->end()); 3301 } 3302 } 3303 3304 push_on_evac_failure_scan_stack(old); 3305 3306 if (!_drain_in_progress) { 3307 // prevent recursion in copy_to_survivor_space() 3308 _drain_in_progress = true; 3309 drain_evac_failure_scan_stack(); 3310 _drain_in_progress = false; 3311 } 3312 } 3313 3314 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { 3315 if (m != markOopDesc::prototype()) { 3316 if (_objs_with_preserved_marks == NULL) { 3317 assert(_preserved_marks_of_objs == NULL, "Both or none."); 3318 _objs_with_preserved_marks = 3319 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); 3320 _preserved_marks_of_objs = 3321 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); 3322 } 3323 _objs_with_preserved_marks->push(obj); 3324 _preserved_marks_of_objs->push(m); 3325 } 3326 } 3327 3328 // *** Parallel G1 Evacuation 3329 3330 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, 3331 size_t word_size) { 3332 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; 3333 // let the caller handle alloc failure 3334 if (alloc_region == NULL) return NULL; 3335 3336 HeapWord* block = alloc_region->par_allocate(word_size); 3337 if (block == NULL) { 3338 MutexLockerEx x(par_alloc_during_gc_lock(), 3339 Mutex::_no_safepoint_check_flag); 3340 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); 3341 } 3342 return block; 3343 } 3344 3345 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region, 3346 bool par) { 3347 // Another thread might have obtained alloc_region for the given 3348 // purpose, and might be attempting to allocate in it, and might 3349 // succeed. Therefore, we can't do the "finalization" stuff on the 3350 // region below until we're sure the last allocation has happened. 3351 // We ensure this by allocating the remaining space with a garbage 3352 // object. 3353 if (par) par_allocate_remaining_space(alloc_region); 3354 // Now we can do the post-GC stuff on the region. 3355 alloc_region->note_end_of_copying(); 3356 g1_policy()->record_after_bytes(alloc_region->used()); 3357 } 3358 3359 HeapWord* 3360 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, 3361 HeapRegion* alloc_region, 3362 bool par, 3363 size_t word_size) { 3364 HeapWord* block = NULL; 3365 // In the parallel case, a previous thread to obtain the lock may have 3366 // already assigned a new gc_alloc_region. 3367 if (alloc_region != _gc_alloc_regions[purpose]) { 3368 assert(par, "But should only happen in parallel case."); 3369 alloc_region = _gc_alloc_regions[purpose]; 3370 if (alloc_region == NULL) return NULL; 3371 block = alloc_region->par_allocate(word_size); 3372 if (block != NULL) return block; 3373 // Otherwise, continue; this new region is empty, too. 3374 } 3375 assert(alloc_region != NULL, "We better have an allocation region"); 3376 retire_alloc_region(alloc_region, par); 3377 3378 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { 3379 // Cannot allocate more regions for the given purpose. 3380 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); 3381 // Is there an alternative? 3382 if (purpose != alt_purpose) { 3383 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; 3384 // Has not the alternative region been aliased? 3385 if (alloc_region != alt_region && alt_region != NULL) { 3386 // Try to allocate in the alternative region. 3387 if (par) { 3388 block = alt_region->par_allocate(word_size); 3389 } else { 3390 block = alt_region->allocate(word_size); 3391 } 3392 // Make an alias. 3393 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; 3394 if (block != NULL) { 3395 return block; 3396 } 3397 retire_alloc_region(alt_region, par); 3398 } 3399 // Both the allocation region and the alternative one are full 3400 // and aliased, replace them with a new allocation region. 3401 purpose = alt_purpose; 3402 } else { 3403 set_gc_alloc_region(purpose, NULL); 3404 return NULL; 3405 } 3406 } 3407 3408 // Now allocate a new region for allocation. 3409 alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/); 3410 3411 // let the caller handle alloc failure 3412 if (alloc_region != NULL) { 3413 3414 assert(check_gc_alloc_regions(), "alloc regions messed up"); 3415 assert(alloc_region->saved_mark_at_top(), 3416 "Mark should have been saved already."); 3417 // We used to assert that the region was zero-filled here, but no 3418 // longer. 3419 3420 // This must be done last: once it's installed, other regions may 3421 // allocate in it (without holding the lock.) 3422 set_gc_alloc_region(purpose, alloc_region); 3423 3424 if (par) { 3425 block = alloc_region->par_allocate(word_size); 3426 } else { 3427 block = alloc_region->allocate(word_size); 3428 } 3429 // Caller handles alloc failure. 3430 } else { 3431 // This sets other apis using the same old alloc region to NULL, also. 3432 set_gc_alloc_region(purpose, NULL); 3433 } 3434 return block; // May be NULL. 3435 } 3436 3437 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { 3438 HeapWord* block = NULL; 3439 size_t free_words; 3440 do { 3441 free_words = r->free()/HeapWordSize; 3442 // If there's too little space, no one can allocate, so we're done. 3443 if (free_words < (size_t)oopDesc::header_size()) return; 3444 // Otherwise, try to claim it. 3445 block = r->par_allocate(free_words); 3446 } while (block == NULL); 3447 fill_with_object(block, free_words); 3448 } 3449 3450 #define use_local_bitmaps 1 3451 #define verify_local_bitmaps 0 3452 3453 #ifndef PRODUCT 3454 3455 class GCLabBitMap; 3456 class GCLabBitMapClosure: public BitMapClosure { 3457 private: 3458 ConcurrentMark* _cm; 3459 GCLabBitMap* _bitmap; 3460 3461 public: 3462 GCLabBitMapClosure(ConcurrentMark* cm, 3463 GCLabBitMap* bitmap) { 3464 _cm = cm; 3465 _bitmap = bitmap; 3466 } 3467 3468 virtual bool do_bit(size_t offset); 3469 }; 3470 3471 #endif // PRODUCT 3472 3473 #define oop_buffer_length 256 3474 3475 class GCLabBitMap: public BitMap { 3476 private: 3477 ConcurrentMark* _cm; 3478 3479 int _shifter; 3480 size_t _bitmap_word_covers_words; 3481 3482 // beginning of the heap 3483 HeapWord* _heap_start; 3484 3485 // this is the actual start of the GCLab 3486 HeapWord* _real_start_word; 3487 3488 // this is the actual end of the GCLab 3489 HeapWord* _real_end_word; 3490 3491 // this is the first word, possibly located before the actual start 3492 // of the GCLab, that corresponds to the first bit of the bitmap 3493 HeapWord* _start_word; 3494 3495 // size of a GCLab in words 3496 size_t _gclab_word_size; 3497 3498 static int shifter() { 3499 return MinObjAlignment - 1; 3500 } 3501 3502 // how many heap words does a single bitmap word corresponds to? 3503 static size_t bitmap_word_covers_words() { 3504 return BitsPerWord << shifter(); 3505 } 3506 3507 static size_t gclab_word_size() { 3508 return ParallelGCG1AllocBufferSize / HeapWordSize; 3509 } 3510 3511 static size_t bitmap_size_in_bits() { 3512 size_t bits_in_bitmap = gclab_word_size() >> shifter(); 3513 // We are going to ensure that the beginning of a word in this 3514 // bitmap also corresponds to the beginning of a word in the 3515 // global marking bitmap. To handle the case where a GCLab 3516 // starts from the middle of the bitmap, we need to add enough 3517 // space (i.e. up to a bitmap word) to ensure that we have 3518 // enough bits in the bitmap. 3519 return bits_in_bitmap + BitsPerWord - 1; 3520 } 3521 public: 3522 GCLabBitMap(HeapWord* heap_start) 3523 : BitMap(bitmap_size_in_bits()), 3524 _cm(G1CollectedHeap::heap()->concurrent_mark()), 3525 _shifter(shifter()), 3526 _bitmap_word_covers_words(bitmap_word_covers_words()), 3527 _heap_start(heap_start), 3528 _gclab_word_size(gclab_word_size()), 3529 _real_start_word(NULL), 3530 _real_end_word(NULL), 3531 _start_word(NULL) 3532 { 3533 guarantee( size_in_words() >= bitmap_size_in_words(), 3534 "just making sure"); 3535 } 3536 3537 inline unsigned heapWordToOffset(HeapWord* addr) { 3538 unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter; 3539 assert(offset < size(), "offset should be within bounds"); 3540 return offset; 3541 } 3542 3543 inline HeapWord* offsetToHeapWord(size_t offset) { 3544 HeapWord* addr = _start_word + (offset << _shifter); 3545 assert(_real_start_word <= addr && addr < _real_end_word, "invariant"); 3546 return addr; 3547 } 3548 3549 bool fields_well_formed() { 3550 bool ret1 = (_real_start_word == NULL) && 3551 (_real_end_word == NULL) && 3552 (_start_word == NULL); 3553 if (ret1) 3554 return true; 3555 3556 bool ret2 = _real_start_word >= _start_word && 3557 _start_word < _real_end_word && 3558 (_real_start_word + _gclab_word_size) == _real_end_word && 3559 (_start_word + _gclab_word_size + _bitmap_word_covers_words) 3560 > _real_end_word; 3561 return ret2; 3562 } 3563 3564 inline bool mark(HeapWord* addr) { 3565 guarantee(use_local_bitmaps, "invariant"); 3566 assert(fields_well_formed(), "invariant"); 3567 3568 if (addr >= _real_start_word && addr < _real_end_word) { 3569 assert(!isMarked(addr), "should not have already been marked"); 3570 3571 // first mark it on the bitmap 3572 at_put(heapWordToOffset(addr), true); 3573 3574 return true; 3575 } else { 3576 return false; 3577 } 3578 } 3579 3580 inline bool isMarked(HeapWord* addr) { 3581 guarantee(use_local_bitmaps, "invariant"); 3582 assert(fields_well_formed(), "invariant"); 3583 3584 return at(heapWordToOffset(addr)); 3585 } 3586 3587 void set_buffer(HeapWord* start) { 3588 guarantee(use_local_bitmaps, "invariant"); 3589 clear(); 3590 3591 assert(start != NULL, "invariant"); 3592 _real_start_word = start; 3593 _real_end_word = start + _gclab_word_size; 3594 3595 size_t diff = 3596 pointer_delta(start, _heap_start) % _bitmap_word_covers_words; 3597 _start_word = start - diff; 3598 3599 assert(fields_well_formed(), "invariant"); 3600 } 3601 3602 #ifndef PRODUCT 3603 void verify() { 3604 // verify that the marks have been propagated 3605 GCLabBitMapClosure cl(_cm, this); 3606 iterate(&cl); 3607 } 3608 #endif // PRODUCT 3609 3610 void retire() { 3611 guarantee(use_local_bitmaps, "invariant"); 3612 assert(fields_well_formed(), "invariant"); 3613 3614 if (_start_word != NULL) { 3615 CMBitMap* mark_bitmap = _cm->nextMarkBitMap(); 3616 3617 // this means that the bitmap was set up for the GCLab 3618 assert(_real_start_word != NULL && _real_end_word != NULL, "invariant"); 3619 3620 mark_bitmap->mostly_disjoint_range_union(this, 3621 0, // always start from the start of the bitmap 3622 _start_word, 3623 size_in_words()); 3624 _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word)); 3625 3626 #ifndef PRODUCT 3627 if (use_local_bitmaps && verify_local_bitmaps) 3628 verify(); 3629 #endif // PRODUCT 3630 } else { 3631 assert(_real_start_word == NULL && _real_end_word == NULL, "invariant"); 3632 } 3633 } 3634 3635 static size_t bitmap_size_in_words() { 3636 return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord; 3637 } 3638 }; 3639 3640 #ifndef PRODUCT 3641 3642 bool GCLabBitMapClosure::do_bit(size_t offset) { 3643 HeapWord* addr = _bitmap->offsetToHeapWord(offset); 3644 guarantee(_cm->isMarked(oop(addr)), "it should be!"); 3645 return true; 3646 } 3647 3648 #endif // PRODUCT 3649 3650 class G1ParGCAllocBuffer: public ParGCAllocBuffer { 3651 private: 3652 bool _retired; 3653 bool _during_marking; 3654 GCLabBitMap _bitmap; 3655 3656 public: 3657 G1ParGCAllocBuffer() : 3658 ParGCAllocBuffer(ParallelGCG1AllocBufferSize / HeapWordSize), 3659 _during_marking(G1CollectedHeap::heap()->mark_in_progress()), 3660 _bitmap(G1CollectedHeap::heap()->reserved_region().start()), 3661 _retired(false) 3662 { } 3663 3664 inline bool mark(HeapWord* addr) { 3665 guarantee(use_local_bitmaps, "invariant"); 3666 assert(_during_marking, "invariant"); 3667 return _bitmap.mark(addr); 3668 } 3669 3670 inline void set_buf(HeapWord* buf) { 3671 if (use_local_bitmaps && _during_marking) 3672 _bitmap.set_buffer(buf); 3673 ParGCAllocBuffer::set_buf(buf); 3674 _retired = false; 3675 } 3676 3677 inline void retire(bool end_of_gc, bool retain) { 3678 if (_retired) 3679 return; 3680 if (use_local_bitmaps && _during_marking) { 3681 _bitmap.retire(); 3682 } 3683 ParGCAllocBuffer::retire(end_of_gc, retain); 3684 _retired = true; 3685 } 3686 }; 3687 3688 3689 class G1ParScanThreadState : public StackObj { 3690 protected: 3691 G1CollectedHeap* _g1h; 3692 RefToScanQueue* _refs; 3693 DirtyCardQueue _dcq; 3694 CardTableModRefBS* _ct_bs; 3695 G1RemSet* _g1_rem; 3696 3697 typedef GrowableArray<oop*> OverflowQueue; 3698 OverflowQueue* _overflowed_refs; 3699 3700 G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount]; 3701 ageTable _age_table; 3702 3703 size_t _alloc_buffer_waste; 3704 size_t _undo_waste; 3705 3706 OopsInHeapRegionClosure* _evac_failure_cl; 3707 G1ParScanHeapEvacClosure* _evac_cl; 3708 G1ParScanPartialArrayClosure* _partial_scan_cl; 3709 3710 int _hash_seed; 3711 int _queue_num; 3712 3713 int _term_attempts; 3714 #if G1_DETAILED_STATS 3715 int _pushes, _pops, _steals, _steal_attempts; 3716 int _overflow_pushes; 3717 #endif 3718 3719 double _start; 3720 double _start_strong_roots; 3721 double _strong_roots_time; 3722 double _start_term; 3723 double _term_time; 3724 3725 // Map from young-age-index (0 == not young, 1 is youngest) to 3726 // surviving words. base is what we get back from the malloc call 3727 size_t* _surviving_young_words_base; 3728 // this points into the array, as we use the first few entries for padding 3729 size_t* _surviving_young_words; 3730 3731 #define PADDING_ELEM_NUM (64 / sizeof(size_t)) 3732 3733 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } 3734 3735 void add_to_undo_waste(size_t waste) { _undo_waste += waste; } 3736 3737 DirtyCardQueue& dirty_card_queue() { return _dcq; } 3738 CardTableModRefBS* ctbs() { return _ct_bs; } 3739 3740 void immediate_rs_update(HeapRegion* from, oop* p, int tid) { 3741 _g1_rem->par_write_ref(from, p, tid); 3742 } 3743 3744 void deferred_rs_update(HeapRegion* from, oop* p, int tid) { 3745 // If the new value of the field points to the same region or 3746 // is the to-space, we don't need to include it in the Rset updates. 3747 if (!from->is_in_reserved(*p) && !from->is_survivor()) { 3748 size_t card_index = ctbs()->index_for(p); 3749 // If the card hasn't been added to the buffer, do it. 3750 if (ctbs()->mark_card_deferred(card_index)) { 3751 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index)); 3752 } 3753 } 3754 } 3755 3756 public: 3757 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) 3758 : _g1h(g1h), 3759 _refs(g1h->task_queue(queue_num)), 3760 _dcq(&g1h->dirty_card_queue_set()), 3761 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), 3762 _g1_rem(g1h->g1_rem_set()), 3763 _hash_seed(17), _queue_num(queue_num), 3764 _term_attempts(0), 3765 _age_table(false), 3766 #if G1_DETAILED_STATS 3767 _pushes(0), _pops(0), _steals(0), 3768 _steal_attempts(0), _overflow_pushes(0), 3769 #endif 3770 _strong_roots_time(0), _term_time(0), 3771 _alloc_buffer_waste(0), _undo_waste(0) 3772 { 3773 // we allocate G1YoungSurvRateNumRegions plus one entries, since 3774 // we "sacrifice" entry 0 to keep track of surviving bytes for 3775 // non-young regions (where the age is -1) 3776 // We also add a few elements at the beginning and at the end in 3777 // an attempt to eliminate cache contention 3778 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); 3779 size_t array_length = PADDING_ELEM_NUM + 3780 real_length + 3781 PADDING_ELEM_NUM; 3782 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); 3783 if (_surviving_young_words_base == NULL) 3784 vm_exit_out_of_memory(array_length * sizeof(size_t), 3785 "Not enough space for young surv histo."); 3786 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; 3787 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); 3788 3789 _overflowed_refs = new OverflowQueue(10); 3790 3791 _start = os::elapsedTime(); 3792 } 3793 3794 ~G1ParScanThreadState() { 3795 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); 3796 } 3797 3798 RefToScanQueue* refs() { return _refs; } 3799 OverflowQueue* overflowed_refs() { return _overflowed_refs; } 3800 ageTable* age_table() { return &_age_table; } 3801 3802 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { 3803 return &_alloc_buffers[purpose]; 3804 } 3805 3806 size_t alloc_buffer_waste() { return _alloc_buffer_waste; } 3807 size_t undo_waste() { return _undo_waste; } 3808 3809 void push_on_queue(oop* ref) { 3810 assert(ref != NULL, "invariant"); 3811 assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), "invariant"); 3812 3813 if (!refs()->push(ref)) { 3814 overflowed_refs()->push(ref); 3815 IF_G1_DETAILED_STATS(note_overflow_push()); 3816 } else { 3817 IF_G1_DETAILED_STATS(note_push()); 3818 } 3819 } 3820 3821 void pop_from_queue(oop*& ref) { 3822 if (!refs()->pop_local(ref)) { 3823 ref = NULL; 3824 } else { 3825 assert(ref != NULL, "invariant"); 3826 assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), 3827 "invariant"); 3828 3829 IF_G1_DETAILED_STATS(note_pop()); 3830 } 3831 } 3832 3833 void pop_from_overflow_queue(oop*& ref) { 3834 ref = overflowed_refs()->pop(); 3835 } 3836 3837 int refs_to_scan() { return refs()->size(); } 3838 int overflowed_refs_to_scan() { return overflowed_refs()->length(); } 3839 3840 void update_rs(HeapRegion* from, oop* p, int tid) { 3841 if (G1DeferredRSUpdate) { 3842 deferred_rs_update(from, p, tid); 3843 } else { 3844 immediate_rs_update(from, p, tid); 3845 } 3846 } 3847 3848 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { 3849 3850 HeapWord* obj = NULL; 3851 if (word_sz * 100 < 3852 (size_t)(ParallelGCG1AllocBufferSize / HeapWordSize) * 3853 ParallelGCBufferWastePct) { 3854 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); 3855 add_to_alloc_buffer_waste(alloc_buf->words_remaining()); 3856 alloc_buf->retire(false, false); 3857 3858 HeapWord* buf = 3859 _g1h->par_allocate_during_gc(purpose, ParallelGCG1AllocBufferSize / HeapWordSize); 3860 if (buf == NULL) return NULL; // Let caller handle allocation failure. 3861 // Otherwise. 3862 alloc_buf->set_buf(buf); 3863 3864 obj = alloc_buf->allocate(word_sz); 3865 assert(obj != NULL, "buffer was definitely big enough..."); 3866 } else { 3867 obj = _g1h->par_allocate_during_gc(purpose, word_sz); 3868 } 3869 return obj; 3870 } 3871 3872 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) { 3873 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz); 3874 if (obj != NULL) return obj; 3875 return allocate_slow(purpose, word_sz); 3876 } 3877 3878 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) { 3879 if (alloc_buffer(purpose)->contains(obj)) { 3880 guarantee(alloc_buffer(purpose)->contains(obj + word_sz - 1), 3881 "should contain whole object"); 3882 alloc_buffer(purpose)->undo_allocation(obj, word_sz); 3883 } else { 3884 CollectedHeap::fill_with_object(obj, word_sz); 3885 add_to_undo_waste(word_sz); 3886 } 3887 } 3888 3889 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) { 3890 _evac_failure_cl = evac_failure_cl; 3891 } 3892 OopsInHeapRegionClosure* evac_failure_closure() { 3893 return _evac_failure_cl; 3894 } 3895 3896 void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) { 3897 _evac_cl = evac_cl; 3898 } 3899 3900 void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) { 3901 _partial_scan_cl = partial_scan_cl; 3902 } 3903 3904 int* hash_seed() { return &_hash_seed; } 3905 int queue_num() { return _queue_num; } 3906 3907 int term_attempts() { return _term_attempts; } 3908 void note_term_attempt() { _term_attempts++; } 3909 3910 #if G1_DETAILED_STATS 3911 int pushes() { return _pushes; } 3912 int pops() { return _pops; } 3913 int steals() { return _steals; } 3914 int steal_attempts() { return _steal_attempts; } 3915 int overflow_pushes() { return _overflow_pushes; } 3916 3917 void note_push() { _pushes++; } 3918 void note_pop() { _pops++; } 3919 void note_steal() { _steals++; } 3920 void note_steal_attempt() { _steal_attempts++; } 3921 void note_overflow_push() { _overflow_pushes++; } 3922 #endif 3923 3924 void start_strong_roots() { 3925 _start_strong_roots = os::elapsedTime(); 3926 } 3927 void end_strong_roots() { 3928 _strong_roots_time += (os::elapsedTime() - _start_strong_roots); 3929 } 3930 double strong_roots_time() { return _strong_roots_time; } 3931 3932 void start_term_time() { 3933 note_term_attempt(); 3934 _start_term = os::elapsedTime(); 3935 } 3936 void end_term_time() { 3937 _term_time += (os::elapsedTime() - _start_term); 3938 } 3939 double term_time() { return _term_time; } 3940 3941 double elapsed() { 3942 return os::elapsedTime() - _start; 3943 } 3944 3945 size_t* surviving_young_words() { 3946 // We add on to hide entry 0 which accumulates surviving words for 3947 // age -1 regions (i.e. non-young ones) 3948 return _surviving_young_words; 3949 } 3950 3951 void retire_alloc_buffers() { 3952 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 3953 size_t waste = _alloc_buffers[ap].words_remaining(); 3954 add_to_alloc_buffer_waste(waste); 3955 _alloc_buffers[ap].retire(true, false); 3956 } 3957 } 3958 3959 private: 3960 void deal_with_reference(oop* ref_to_scan) { 3961 if (has_partial_array_mask(ref_to_scan)) { 3962 _partial_scan_cl->do_oop_nv(ref_to_scan); 3963 } else { 3964 // Note: we can use "raw" versions of "region_containing" because 3965 // "obj_to_scan" is definitely in the heap, and is not in a 3966 // humongous region. 3967 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); 3968 _evac_cl->set_region(r); 3969 _evac_cl->do_oop_nv(ref_to_scan); 3970 } 3971 } 3972 3973 public: 3974 void trim_queue() { 3975 // I've replicated the loop twice, first to drain the overflow 3976 // queue, second to drain the task queue. This is better than 3977 // having a single loop, which checks both conditions and, inside 3978 // it, either pops the overflow queue or the task queue, as each 3979 // loop is tighter. Also, the decision to drain the overflow queue 3980 // first is not arbitrary, as the overflow queue is not visible 3981 // to the other workers, whereas the task queue is. So, we want to 3982 // drain the "invisible" entries first, while allowing the other 3983 // workers to potentially steal the "visible" entries. 3984 3985 while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) { 3986 while (overflowed_refs_to_scan() > 0) { 3987 oop *ref_to_scan = NULL; 3988 pop_from_overflow_queue(ref_to_scan); 3989 assert(ref_to_scan != NULL, "invariant"); 3990 // We shouldn't have pushed it on the queue if it was not 3991 // pointing into the CSet. 3992 assert(ref_to_scan != NULL, "sanity"); 3993 assert(has_partial_array_mask(ref_to_scan) || 3994 _g1h->obj_in_cs(*ref_to_scan), "sanity"); 3995 3996 deal_with_reference(ref_to_scan); 3997 } 3998 3999 while (refs_to_scan() > 0) { 4000 oop *ref_to_scan = NULL; 4001 pop_from_queue(ref_to_scan); 4002 4003 if (ref_to_scan != NULL) { 4004 // We shouldn't have pushed it on the queue if it was not 4005 // pointing into the CSet. 4006 assert(has_partial_array_mask(ref_to_scan) || 4007 _g1h->obj_in_cs(*ref_to_scan), "sanity"); 4008 4009 deal_with_reference(ref_to_scan); 4010 } 4011 } 4012 } 4013 } 4014 }; 4015 4016 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : 4017 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), 4018 _par_scan_state(par_scan_state) { } 4019 4020 // This closure is applied to the fields of the objects that have just been copied. 4021 // Should probably be made inline and moved in g1OopClosures.inline.hpp. 4022 void G1ParScanClosure::do_oop_nv(oop* p) { 4023 oop obj = *p; 4024 4025 if (obj != NULL) { 4026 if (_g1->in_cset_fast_test(obj)) { 4027 // We're not going to even bother checking whether the object is 4028 // already forwarded or not, as this usually causes an immediate 4029 // stall. We'll try to prefetch the object (for write, given that 4030 // we might need to install the forwarding reference) and we'll 4031 // get back to it when pop it from the queue 4032 Prefetch::write(obj->mark_addr(), 0); 4033 Prefetch::read(obj->mark_addr(), (HeapWordSize*2)); 4034 4035 // slightly paranoid test; I'm trying to catch potential 4036 // problems before we go into push_on_queue to know where the 4037 // problem is coming from 4038 assert(obj == *p, "the value of *p should not have changed"); 4039 _par_scan_state->push_on_queue(p); 4040 } else { 4041 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); 4042 } 4043 } 4044 } 4045 4046 void G1ParCopyHelper::mark_forwardee(oop* p) { 4047 // This is called _after_ do_oop_work has been called, hence after 4048 // the object has been relocated to its new location and *p points 4049 // to its new location. 4050 4051 oop thisOop = *p; 4052 if (thisOop != NULL) { 4053 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(thisOop)), 4054 "shouldn't still be in the CSet if evacuation didn't fail."); 4055 HeapWord* addr = (HeapWord*)thisOop; 4056 if (_g1->is_in_g1_reserved(addr)) 4057 _cm->grayRoot(oop(addr)); 4058 } 4059 } 4060 4061 oop G1ParCopyHelper::copy_to_survivor_space(oop old) { 4062 size_t word_sz = old->size(); 4063 HeapRegion* from_region = _g1->heap_region_containing_raw(old); 4064 // +1 to make the -1 indexes valid... 4065 int young_index = from_region->young_index_in_cset()+1; 4066 assert( (from_region->is_young() && young_index > 0) || 4067 (!from_region->is_young() && young_index == 0), "invariant" ); 4068 G1CollectorPolicy* g1p = _g1->g1_policy(); 4069 markOop m = old->mark(); 4070 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() 4071 : m->age(); 4072 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, 4073 word_sz); 4074 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); 4075 oop obj = oop(obj_ptr); 4076 4077 if (obj_ptr == NULL) { 4078 // This will either forward-to-self, or detect that someone else has 4079 // installed a forwarding pointer. 4080 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); 4081 return _g1->handle_evacuation_failure_par(cl, old); 4082 } 4083 4084 // We're going to allocate linearly, so might as well prefetch ahead. 4085 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); 4086 4087 oop forward_ptr = old->forward_to_atomic(obj); 4088 if (forward_ptr == NULL) { 4089 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); 4090 if (g1p->track_object_age(alloc_purpose)) { 4091 // We could simply do obj->incr_age(). However, this causes a 4092 // performance issue. obj->incr_age() will first check whether 4093 // the object has a displaced mark by checking its mark word; 4094 // getting the mark word from the new location of the object 4095 // stalls. So, given that we already have the mark word and we 4096 // are about to install it anyway, it's better to increase the 4097 // age on the mark word, when the object does not have a 4098 // displaced mark word. We're not expecting many objects to have 4099 // a displaced marked word, so that case is not optimized 4100 // further (it could be...) and we simply call obj->incr_age(). 4101 4102 if (m->has_displaced_mark_helper()) { 4103 // in this case, we have to install the mark word first, 4104 // otherwise obj looks to be forwarded (the old mark word, 4105 // which contains the forward pointer, was copied) 4106 obj->set_mark(m); 4107 obj->incr_age(); 4108 } else { 4109 m = m->incr_age(); 4110 obj->set_mark(m); 4111 } 4112 _par_scan_state->age_table()->add(obj, word_sz); 4113 } else { 4114 obj->set_mark(m); 4115 } 4116 4117 // preserve "next" mark bit 4118 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { 4119 if (!use_local_bitmaps || 4120 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { 4121 // if we couldn't mark it on the local bitmap (this happens when 4122 // the object was not allocated in the GCLab), we have to bite 4123 // the bullet and do the standard parallel mark 4124 _cm->markAndGrayObjectIfNecessary(obj); 4125 } 4126 #if 1 4127 if (_g1->isMarkedNext(old)) { 4128 _cm->nextMarkBitMap()->parClear((HeapWord*)old); 4129 } 4130 #endif 4131 } 4132 4133 size_t* surv_young_words = _par_scan_state->surviving_young_words(); 4134 surv_young_words[young_index] += word_sz; 4135 4136 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { 4137 arrayOop(old)->set_length(0); 4138 _par_scan_state->push_on_queue(set_partial_array_mask(old)); 4139 } else { 4140 // No point in using the slower heap_region_containing() method, 4141 // given that we know obj is in the heap. 4142 _scanner->set_region(_g1->heap_region_containing_raw(obj)); 4143 obj->oop_iterate_backwards(_scanner); 4144 } 4145 } else { 4146 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); 4147 obj = forward_ptr; 4148 } 4149 return obj; 4150 } 4151 4152 template<bool do_gen_barrier, G1Barrier barrier, 4153 bool do_mark_forwardee, bool skip_cset_test> 4154 void G1ParCopyClosure<do_gen_barrier, barrier, 4155 do_mark_forwardee, skip_cset_test>::do_oop_work(oop* p) { 4156 oop obj = *p; 4157 assert(barrier != G1BarrierRS || obj != NULL, 4158 "Precondition: G1BarrierRS implies obj is nonNull"); 4159 4160 // The only time we skip the cset test is when we're scanning 4161 // references popped from the queue. And we only push on the queue 4162 // references that we know point into the cset, so no point in 4163 // checking again. But we'll leave an assert here for peace of mind. 4164 assert(!skip_cset_test || _g1->obj_in_cs(obj), "invariant"); 4165 4166 // here the null check is implicit in the cset_fast_test() test 4167 if (skip_cset_test || _g1->in_cset_fast_test(obj)) { 4168 #if G1_REM_SET_LOGGING 4169 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " 4170 "into CS.", p, (void*) obj); 4171 #endif 4172 if (obj->is_forwarded()) { 4173 *p = obj->forwardee(); 4174 } else { 4175 *p = copy_to_survivor_space(obj); 4176 } 4177 // When scanning the RS, we only care about objs in CS. 4178 if (barrier == G1BarrierRS) { 4179 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); 4180 } 4181 } 4182 4183 // When scanning moved objs, must look at all oops. 4184 if (barrier == G1BarrierEvac && obj != NULL) { 4185 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); 4186 } 4187 4188 if (do_gen_barrier && obj != NULL) { 4189 par_do_barrier(p); 4190 } 4191 } 4192 4193 template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p); 4194 4195 template<class T> void G1ParScanPartialArrayClosure::process_array_chunk( 4196 oop obj, int start, int end) { 4197 // process our set of indices (include header in first chunk) 4198 assert(start < end, "invariant"); 4199 T* const base = (T*)objArrayOop(obj)->base(); 4200 T* const start_addr = (start == 0) ? (T*) obj : base + start; 4201 T* const end_addr = base + end; 4202 MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr); 4203 _scanner.set_region(_g1->heap_region_containing(obj)); 4204 obj->oop_iterate(&_scanner, mr); 4205 } 4206 4207 void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) { 4208 assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops"); 4209 assert(has_partial_array_mask(p), "invariant"); 4210 oop old = clear_partial_array_mask(p); 4211 assert(old->is_objArray(), "must be obj array"); 4212 assert(old->is_forwarded(), "must be forwarded"); 4213 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); 4214 4215 objArrayOop obj = objArrayOop(old->forwardee()); 4216 assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); 4217 // Process ParGCArrayScanChunk elements now 4218 // and push the remainder back onto queue 4219 int start = arrayOop(old)->length(); 4220 int end = obj->length(); 4221 int remainder = end - start; 4222 assert(start <= end, "just checking"); 4223 if (remainder > 2 * ParGCArrayScanChunk) { 4224 // Test above combines last partial chunk with a full chunk 4225 end = start + ParGCArrayScanChunk; 4226 arrayOop(old)->set_length(end); 4227 // Push remainder. 4228 _par_scan_state->push_on_queue(set_partial_array_mask(old)); 4229 } else { 4230 // Restore length so that the heap remains parsable in 4231 // case of evacuation failure. 4232 arrayOop(old)->set_length(end); 4233 } 4234 4235 // process our set of indices (include header in first chunk) 4236 process_array_chunk<oop>(obj, start, end); 4237 } 4238 4239 int G1ScanAndBalanceClosure::_nq = 0; 4240 4241 class G1ParEvacuateFollowersClosure : public VoidClosure { 4242 protected: 4243 G1CollectedHeap* _g1h; 4244 G1ParScanThreadState* _par_scan_state; 4245 RefToScanQueueSet* _queues; 4246 ParallelTaskTerminator* _terminator; 4247 4248 G1ParScanThreadState* par_scan_state() { return _par_scan_state; } 4249 RefToScanQueueSet* queues() { return _queues; } 4250 ParallelTaskTerminator* terminator() { return _terminator; } 4251 4252 public: 4253 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, 4254 G1ParScanThreadState* par_scan_state, 4255 RefToScanQueueSet* queues, 4256 ParallelTaskTerminator* terminator) 4257 : _g1h(g1h), _par_scan_state(par_scan_state), 4258 _queues(queues), _terminator(terminator) {} 4259 4260 void do_void() { 4261 G1ParScanThreadState* pss = par_scan_state(); 4262 while (true) { 4263 oop* ref_to_scan; 4264 pss->trim_queue(); 4265 IF_G1_DETAILED_STATS(pss->note_steal_attempt()); 4266 if (queues()->steal(pss->queue_num(), 4267 pss->hash_seed(), 4268 ref_to_scan)) { 4269 IF_G1_DETAILED_STATS(pss->note_steal()); 4270 4271 // slightly paranoid tests; I'm trying to catch potential 4272 // problems before we go into push_on_queue to know where the 4273 // problem is coming from 4274 assert(ref_to_scan != NULL, "invariant"); 4275 assert(has_partial_array_mask(ref_to_scan) || 4276 _g1h->obj_in_cs(*ref_to_scan), "invariant"); 4277 pss->push_on_queue(ref_to_scan); 4278 continue; 4279 } 4280 pss->start_term_time(); 4281 if (terminator()->offer_termination()) break; 4282 pss->end_term_time(); 4283 } 4284 pss->end_term_time(); 4285 pss->retire_alloc_buffers(); 4286 } 4287 }; 4288 4289 class G1ParTask : public AbstractGangTask { 4290 protected: 4291 G1CollectedHeap* _g1h; 4292 RefToScanQueueSet *_queues; 4293 ParallelTaskTerminator _terminator; 4294 4295 Mutex _stats_lock; 4296 Mutex* stats_lock() { return &_stats_lock; } 4297 4298 size_t getNCards() { 4299 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) 4300 / G1BlockOffsetSharedArray::N_bytes; 4301 } 4302 4303 public: 4304 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) 4305 : AbstractGangTask("G1 collection"), 4306 _g1h(g1h), 4307 _queues(task_queues), 4308 _terminator(workers, _queues), 4309 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true) 4310 {} 4311 4312 RefToScanQueueSet* queues() { return _queues; } 4313 4314 RefToScanQueue *work_queue(int i) { 4315 return queues()->queue(i); 4316 } 4317 4318 void work(int i) { 4319 ResourceMark rm; 4320 HandleMark hm; 4321 4322 G1ParScanThreadState pss(_g1h, i); 4323 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); 4324 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); 4325 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); 4326 4327 pss.set_evac_closure(&scan_evac_cl); 4328 pss.set_evac_failure_closure(&evac_failure_cl); 4329 pss.set_partial_scan_closure(&partial_scan_cl); 4330 4331 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); 4332 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); 4333 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); 4334 4335 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); 4336 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); 4337 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); 4338 4339 OopsInHeapRegionClosure *scan_root_cl; 4340 OopsInHeapRegionClosure *scan_perm_cl; 4341 OopsInHeapRegionClosure *scan_so_cl; 4342 4343 if (_g1h->g1_policy()->should_initiate_conc_mark()) { 4344 scan_root_cl = &scan_mark_root_cl; 4345 scan_perm_cl = &scan_mark_perm_cl; 4346 scan_so_cl = &scan_mark_heap_rs_cl; 4347 } else { 4348 scan_root_cl = &only_scan_root_cl; 4349 scan_perm_cl = &only_scan_perm_cl; 4350 scan_so_cl = &only_scan_heap_rs_cl; 4351 } 4352 4353 pss.start_strong_roots(); 4354 _g1h->g1_process_strong_roots(/* not collecting perm */ false, 4355 SharedHeap::SO_AllClasses, 4356 scan_root_cl, 4357 &only_scan_heap_rs_cl, 4358 scan_so_cl, 4359 scan_perm_cl, 4360 i); 4361 pss.end_strong_roots(); 4362 { 4363 double start = os::elapsedTime(); 4364 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); 4365 evac.do_void(); 4366 double elapsed_ms = (os::elapsedTime()-start)*1000.0; 4367 double term_ms = pss.term_time()*1000.0; 4368 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); 4369 _g1h->g1_policy()->record_termination_time(i, term_ms); 4370 } 4371 if (G1UseSurvivorSpace) { 4372 _g1h->g1_policy()->record_thread_age_table(pss.age_table()); 4373 } 4374 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); 4375 4376 // Clean up any par-expanded rem sets. 4377 HeapRegionRemSet::par_cleanup(); 4378 4379 MutexLocker x(stats_lock()); 4380 if (ParallelGCVerbose) { 4381 gclog_or_tty->print("Thread %d complete:\n", i); 4382 #if G1_DETAILED_STATS 4383 gclog_or_tty->print(" Pushes: %7d Pops: %7d Overflows: %7d Steals %7d (in %d attempts)\n", 4384 pss.pushes(), 4385 pss.pops(), 4386 pss.overflow_pushes(), 4387 pss.steals(), 4388 pss.steal_attempts()); 4389 #endif 4390 double elapsed = pss.elapsed(); 4391 double strong_roots = pss.strong_roots_time(); 4392 double term = pss.term_time(); 4393 gclog_or_tty->print(" Elapsed: %7.2f ms.\n" 4394 " Strong roots: %7.2f ms (%6.2f%%)\n" 4395 " Termination: %7.2f ms (%6.2f%%) (in %d entries)\n", 4396 elapsed * 1000.0, 4397 strong_roots * 1000.0, (strong_roots*100.0/elapsed), 4398 term * 1000.0, (term*100.0/elapsed), 4399 pss.term_attempts()); 4400 size_t total_waste = pss.alloc_buffer_waste() + pss.undo_waste(); 4401 gclog_or_tty->print(" Waste: %8dK\n" 4402 " Alloc Buffer: %8dK\n" 4403 " Undo: %8dK\n", 4404 (total_waste * HeapWordSize) / K, 4405 (pss.alloc_buffer_waste() * HeapWordSize) / K, 4406 (pss.undo_waste() * HeapWordSize) / K); 4407 } 4408 4409 assert(pss.refs_to_scan() == 0, "Task queue should be empty"); 4410 assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty"); 4411 } 4412 }; 4413 4414 // *** Common G1 Evacuation Stuff 4415 4416 class G1CountClosure: public OopsInHeapRegionClosure { 4417 public: 4418 int n; 4419 G1CountClosure() : n(0) {} 4420 void do_oop(narrowOop* p) { 4421 guarantee(false, "NYI"); 4422 } 4423 void do_oop(oop* p) { 4424 oop obj = *p; 4425 assert(obj != NULL && G1CollectedHeap::heap()->obj_in_cs(obj), 4426 "Rem set closure called on non-rem-set pointer."); 4427 n++; 4428 } 4429 }; 4430 4431 static G1CountClosure count_closure; 4432 4433 void 4434 G1CollectedHeap:: 4435 g1_process_strong_roots(bool collecting_perm_gen, 4436 SharedHeap::ScanningOption so, 4437 OopClosure* scan_non_heap_roots, 4438 OopsInHeapRegionClosure* scan_rs, 4439 OopsInHeapRegionClosure* scan_so, 4440 OopsInGenClosure* scan_perm, 4441 int worker_i) { 4442 // First scan the strong roots, including the perm gen. 4443 double ext_roots_start = os::elapsedTime(); 4444 double closure_app_time_sec = 0.0; 4445 4446 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); 4447 BufferingOopsInGenClosure buf_scan_perm(scan_perm); 4448 buf_scan_perm.set_generation(perm_gen()); 4449 4450 process_strong_roots(collecting_perm_gen, so, 4451 &buf_scan_non_heap_roots, 4452 &buf_scan_perm); 4453 // Finish up any enqueued closure apps. 4454 buf_scan_non_heap_roots.done(); 4455 buf_scan_perm.done(); 4456 double ext_roots_end = os::elapsedTime(); 4457 g1_policy()->reset_obj_copy_time(worker_i); 4458 double obj_copy_time_sec = 4459 buf_scan_non_heap_roots.closure_app_seconds() + 4460 buf_scan_perm.closure_app_seconds(); 4461 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); 4462 double ext_root_time_ms = 4463 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; 4464 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); 4465 4466 // Scan strong roots in mark stack. 4467 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { 4468 concurrent_mark()->oops_do(scan_non_heap_roots); 4469 } 4470 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; 4471 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); 4472 4473 // XXX What should this be doing in the parallel case? 4474 g1_policy()->record_collection_pause_end_CH_strong_roots(); 4475 if (G1VerifyRemSet) { 4476 // :::: FIXME :::: 4477 // The stupid remembered set doesn't know how to filter out dead 4478 // objects, which the smart one does, and so when it is created 4479 // and then compared the number of entries in each differs and 4480 // the verification code fails. 4481 guarantee(false, "verification code is broken, see note"); 4482 4483 // Let's make sure that the current rem set agrees with the stupidest 4484 // one possible! 4485 bool refs_enabled = ref_processor()->discovery_enabled(); 4486 if (refs_enabled) ref_processor()->disable_discovery(); 4487 StupidG1RemSet stupid(this); 4488 count_closure.n = 0; 4489 stupid.oops_into_collection_set_do(&count_closure, worker_i); 4490 int stupid_n = count_closure.n; 4491 count_closure.n = 0; 4492 g1_rem_set()->oops_into_collection_set_do(&count_closure, worker_i); 4493 guarantee(count_closure.n == stupid_n, "Old and new rem sets differ."); 4494 gclog_or_tty->print_cr("\nFound %d pointers in heap RS.", count_closure.n); 4495 if (refs_enabled) ref_processor()->enable_discovery(); 4496 } 4497 if (scan_so != NULL) { 4498 scan_scan_only_set(scan_so, worker_i); 4499 } 4500 // Now scan the complement of the collection set. 4501 if (scan_rs != NULL) { 4502 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); 4503 } 4504 // Finish with the ref_processor roots. 4505 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { 4506 ref_processor()->oops_do(scan_non_heap_roots); 4507 } 4508 g1_policy()->record_collection_pause_end_G1_strong_roots(); 4509 _process_strong_tasks->all_tasks_completed(); 4510 } 4511 4512 void 4513 G1CollectedHeap::scan_scan_only_region(HeapRegion* r, 4514 OopsInHeapRegionClosure* oc, 4515 int worker_i) { 4516 HeapWord* startAddr = r->bottom(); 4517 HeapWord* endAddr = r->used_region().end(); 4518 4519 oc->set_region(r); 4520 4521 HeapWord* p = r->bottom(); 4522 HeapWord* t = r->top(); 4523 guarantee( p == r->next_top_at_mark_start(), "invariant" ); 4524 while (p < t) { 4525 oop obj = oop(p); 4526 p += obj->oop_iterate(oc); 4527 } 4528 } 4529 4530 void 4531 G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc, 4532 int worker_i) { 4533 double start = os::elapsedTime(); 4534 4535 BufferingOopsInHeapRegionClosure boc(oc); 4536 4537 FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc); 4538 FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark()); 4539 4540 OopsInHeapRegionClosure *foc; 4541 if (g1_policy()->should_initiate_conc_mark()) 4542 foc = &scan_and_mark; 4543 else 4544 foc = &scan_only; 4545 4546 HeapRegion* hr; 4547 int n = 0; 4548 while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) { 4549 scan_scan_only_region(hr, foc, worker_i); 4550 ++n; 4551 } 4552 boc.done(); 4553 4554 double closure_app_s = boc.closure_app_seconds(); 4555 g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0); 4556 double ms = (os::elapsedTime() - start - closure_app_s)*1000.0; 4557 g1_policy()->record_scan_only_time(worker_i, ms, n); 4558 } 4559 4560 void 4561 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, 4562 OopClosure* non_root_closure) { 4563 SharedHeap::process_weak_roots(root_closure, non_root_closure); 4564 } 4565 4566 4567 class SaveMarksClosure: public HeapRegionClosure { 4568 public: 4569 bool doHeapRegion(HeapRegion* r) { 4570 r->save_marks(); 4571 return false; 4572 } 4573 }; 4574 4575 void G1CollectedHeap::save_marks() { 4576 if (ParallelGCThreads == 0) { 4577 SaveMarksClosure sm; 4578 heap_region_iterate(&sm); 4579 } 4580 // We do this even in the parallel case 4581 perm_gen()->save_marks(); 4582 } 4583 4584 void G1CollectedHeap::evacuate_collection_set() { 4585 set_evacuation_failed(false); 4586 4587 g1_rem_set()->prepare_for_oops_into_collection_set_do(); 4588 concurrent_g1_refine()->set_use_cache(false); 4589 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); 4590 set_par_threads(n_workers); 4591 G1ParTask g1_par_task(this, n_workers, _task_queues); 4592 4593 init_for_evac_failure(NULL); 4594 4595 change_strong_roots_parity(); // In preparation for parallel strong roots. 4596 rem_set()->prepare_for_younger_refs_iterate(true); 4597 4598 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); 4599 double start_par = os::elapsedTime(); 4600 if (ParallelGCThreads > 0) { 4601 // The individual threads will set their evac-failure closures. 4602 workers()->run_task(&g1_par_task); 4603 } else { 4604 g1_par_task.work(0); 4605 } 4606 4607 double par_time = (os::elapsedTime() - start_par) * 1000.0; 4608 g1_policy()->record_par_time(par_time); 4609 set_par_threads(0); 4610 // Is this the right thing to do here? We don't save marks 4611 // on individual heap regions when we allocate from 4612 // them in parallel, so this seems like the correct place for this. 4613 retire_all_alloc_regions(); 4614 { 4615 G1IsAliveClosure is_alive(this); 4616 G1KeepAliveClosure keep_alive(this); 4617 JNIHandles::weak_oops_do(&is_alive, &keep_alive); 4618 } 4619 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); 4620 4621 concurrent_g1_refine()->set_use_cache(true); 4622 4623 finalize_for_evac_failure(); 4624 4625 // Must do this before removing self-forwarding pointers, which clears 4626 // the per-region evac-failure flags. 4627 concurrent_mark()->complete_marking_in_collection_set(); 4628 4629 if (evacuation_failed()) { 4630 remove_self_forwarding_pointers(); 4631 if (PrintGCDetails) { 4632 gclog_or_tty->print(" (evacuation failed)"); 4633 } else if (PrintGC) { 4634 gclog_or_tty->print("--"); 4635 } 4636 } 4637 4638 if (G1DeferredRSUpdate) { 4639 RedirtyLoggedCardTableEntryFastClosure redirty; 4640 dirty_card_queue_set().set_closure(&redirty); 4641 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); 4642 JavaThread::dirty_card_queue_set().merge_bufferlists(&dirty_card_queue_set()); 4643 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); 4644 } 4645 4646 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 4647 } 4648 4649 void G1CollectedHeap::free_region(HeapRegion* hr) { 4650 size_t pre_used = 0; 4651 size_t cleared_h_regions = 0; 4652 size_t freed_regions = 0; 4653 UncleanRegionList local_list; 4654 4655 HeapWord* start = hr->bottom(); 4656 HeapWord* end = hr->prev_top_at_mark_start(); 4657 size_t used_bytes = hr->used(); 4658 size_t live_bytes = hr->max_live_bytes(); 4659 if (used_bytes > 0) { 4660 guarantee( live_bytes <= used_bytes, "invariant" ); 4661 } else { 4662 guarantee( live_bytes == 0, "invariant" ); 4663 } 4664 4665 size_t garbage_bytes = used_bytes - live_bytes; 4666 if (garbage_bytes > 0) 4667 g1_policy()->decrease_known_garbage_bytes(garbage_bytes); 4668 4669 free_region_work(hr, pre_used, cleared_h_regions, freed_regions, 4670 &local_list); 4671 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, 4672 &local_list); 4673 } 4674 4675 void 4676 G1CollectedHeap::free_region_work(HeapRegion* hr, 4677 size_t& pre_used, 4678 size_t& cleared_h_regions, 4679 size_t& freed_regions, 4680 UncleanRegionList* list, 4681 bool par) { 4682 assert(!hr->popular(), "should not free popular regions"); 4683 pre_used += hr->used(); 4684 if (hr->isHumongous()) { 4685 assert(hr->startsHumongous(), 4686 "Only the start of a humongous region should be freed."); 4687 int ind = _hrs->find(hr); 4688 assert(ind != -1, "Should have an index."); 4689 // Clear the start region. 4690 hr->hr_clear(par, true /*clear_space*/); 4691 list->insert_before_head(hr); 4692 cleared_h_regions++; 4693 freed_regions++; 4694 // Clear any continued regions. 4695 ind++; 4696 while ((size_t)ind < n_regions()) { 4697 HeapRegion* hrc = _hrs->at(ind); 4698 if (!hrc->continuesHumongous()) break; 4699 // Otherwise, does continue the H region. 4700 assert(hrc->humongous_start_region() == hr, "Huh?"); 4701 hrc->hr_clear(par, true /*clear_space*/); 4702 cleared_h_regions++; 4703 freed_regions++; 4704 list->insert_before_head(hrc); 4705 ind++; 4706 } 4707 } else { 4708 hr->hr_clear(par, true /*clear_space*/); 4709 list->insert_before_head(hr); 4710 freed_regions++; 4711 // If we're using clear2, this should not be enabled. 4712 // assert(!hr->in_cohort(), "Can't be both free and in a cohort."); 4713 } 4714 } 4715 4716 void G1CollectedHeap::finish_free_region_work(size_t pre_used, 4717 size_t cleared_h_regions, 4718 size_t freed_regions, 4719 UncleanRegionList* list) { 4720 if (list != NULL && list->sz() > 0) { 4721 prepend_region_list_on_unclean_list(list); 4722 } 4723 // Acquire a lock, if we're parallel, to update possibly-shared 4724 // variables. 4725 Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL; 4726 { 4727 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); 4728 _summary_bytes_used -= pre_used; 4729 _num_humongous_regions -= (int) cleared_h_regions; 4730 _free_regions += freed_regions; 4731 } 4732 } 4733 4734 4735 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { 4736 while (list != NULL) { 4737 guarantee( list->is_young(), "invariant" ); 4738 4739 HeapWord* bottom = list->bottom(); 4740 HeapWord* end = list->end(); 4741 MemRegion mr(bottom, end); 4742 ct_bs->dirty(mr); 4743 4744 list = list->get_next_young_region(); 4745 } 4746 } 4747 4748 void G1CollectedHeap::cleanUpCardTable() { 4749 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); 4750 double start = os::elapsedTime(); 4751 4752 ct_bs->clear(_g1_committed); 4753 4754 // now, redirty the cards of the scan-only and survivor regions 4755 // (it seemed faster to do it this way, instead of iterating over 4756 // all regions and then clearing / dirtying as approprite) 4757 dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region()); 4758 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); 4759 4760 double elapsed = os::elapsedTime() - start; 4761 g1_policy()->record_clear_ct_time( elapsed * 1000.0); 4762 } 4763 4764 4765 void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) { 4766 // First do any popular regions. 4767 HeapRegion* hr; 4768 while ((hr = popular_region_to_evac()) != NULL) { 4769 evac_popular_region(hr); 4770 } 4771 // Now do heuristic pauses. 4772 if (g1_policy()->should_do_collection_pause(word_size)) { 4773 do_collection_pause(); 4774 } 4775 } 4776 4777 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { 4778 double young_time_ms = 0.0; 4779 double non_young_time_ms = 0.0; 4780 4781 G1CollectorPolicy* policy = g1_policy(); 4782 4783 double start_sec = os::elapsedTime(); 4784 bool non_young = true; 4785 4786 HeapRegion* cur = cs_head; 4787 int age_bound = -1; 4788 size_t rs_lengths = 0; 4789 4790 while (cur != NULL) { 4791 if (non_young) { 4792 if (cur->is_young()) { 4793 double end_sec = os::elapsedTime(); 4794 double elapsed_ms = (end_sec - start_sec) * 1000.0; 4795 non_young_time_ms += elapsed_ms; 4796 4797 start_sec = os::elapsedTime(); 4798 non_young = false; 4799 } 4800 } else { 4801 if (!cur->is_on_free_list()) { 4802 double end_sec = os::elapsedTime(); 4803 double elapsed_ms = (end_sec - start_sec) * 1000.0; 4804 young_time_ms += elapsed_ms; 4805 4806 start_sec = os::elapsedTime(); 4807 non_young = true; 4808 } 4809 } 4810 4811 rs_lengths += cur->rem_set()->occupied(); 4812 4813 HeapRegion* next = cur->next_in_collection_set(); 4814 assert(cur->in_collection_set(), "bad CS"); 4815 cur->set_next_in_collection_set(NULL); 4816 cur->set_in_collection_set(false); 4817 4818 if (cur->is_young()) { 4819 int index = cur->young_index_in_cset(); 4820 guarantee( index != -1, "invariant" ); 4821 guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); 4822 size_t words_survived = _surviving_young_words[index]; 4823 cur->record_surv_words_in_group(words_survived); 4824 } else { 4825 int index = cur->young_index_in_cset(); 4826 guarantee( index == -1, "invariant" ); 4827 } 4828 4829 assert( (cur->is_young() && cur->young_index_in_cset() > -1) || 4830 (!cur->is_young() && cur->young_index_in_cset() == -1), 4831 "invariant" ); 4832 4833 if (!cur->evacuation_failed()) { 4834 // And the region is empty. 4835 assert(!cur->is_empty(), 4836 "Should not have empty regions in a CS."); 4837 free_region(cur); 4838 } else { 4839 guarantee( !cur->is_scan_only(), "should not be scan only" ); 4840 cur->uninstall_surv_rate_group(); 4841 if (cur->is_young()) 4842 cur->set_young_index_in_cset(-1); 4843 cur->set_not_young(); 4844 cur->set_evacuation_failed(false); 4845 } 4846 cur = next; 4847 } 4848 4849 policy->record_max_rs_lengths(rs_lengths); 4850 policy->cset_regions_freed(); 4851 4852 double end_sec = os::elapsedTime(); 4853 double elapsed_ms = (end_sec - start_sec) * 1000.0; 4854 if (non_young) 4855 non_young_time_ms += elapsed_ms; 4856 else 4857 young_time_ms += elapsed_ms; 4858 4859 policy->record_young_free_cset_time_ms(young_time_ms); 4860 policy->record_non_young_free_cset_time_ms(non_young_time_ms); 4861 } 4862 4863 HeapRegion* 4864 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) { 4865 assert(ZF_mon->owned_by_self(), "Precondition"); 4866 HeapRegion* res = pop_unclean_region_list_locked(); 4867 if (res != NULL) { 4868 assert(!res->continuesHumongous() && 4869 res->zero_fill_state() != HeapRegion::Allocated, 4870 "Only free regions on unclean list."); 4871 if (zero_filled) { 4872 res->ensure_zero_filled_locked(); 4873 res->set_zero_fill_allocated(); 4874 } 4875 } 4876 return res; 4877 } 4878 4879 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) { 4880 MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag); 4881 return alloc_region_from_unclean_list_locked(zero_filled); 4882 } 4883 4884 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) { 4885 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4886 put_region_on_unclean_list_locked(r); 4887 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. 4888 } 4889 4890 void G1CollectedHeap::set_unclean_regions_coming(bool b) { 4891 MutexLockerEx x(Cleanup_mon); 4892 set_unclean_regions_coming_locked(b); 4893 } 4894 4895 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) { 4896 assert(Cleanup_mon->owned_by_self(), "Precondition"); 4897 _unclean_regions_coming = b; 4898 // Wake up mutator threads that might be waiting for completeCleanup to 4899 // finish. 4900 if (!b) Cleanup_mon->notify_all(); 4901 } 4902 4903 void G1CollectedHeap::wait_for_cleanup_complete() { 4904 MutexLockerEx x(Cleanup_mon); 4905 wait_for_cleanup_complete_locked(); 4906 } 4907 4908 void G1CollectedHeap::wait_for_cleanup_complete_locked() { 4909 assert(Cleanup_mon->owned_by_self(), "precondition"); 4910 while (_unclean_regions_coming) { 4911 Cleanup_mon->wait(); 4912 } 4913 } 4914 4915 void 4916 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) { 4917 assert(ZF_mon->owned_by_self(), "precondition."); 4918 _unclean_region_list.insert_before_head(r); 4919 } 4920 4921 void 4922 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) { 4923 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4924 prepend_region_list_on_unclean_list_locked(list); 4925 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. 4926 } 4927 4928 void 4929 G1CollectedHeap:: 4930 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) { 4931 assert(ZF_mon->owned_by_self(), "precondition."); 4932 _unclean_region_list.prepend_list(list); 4933 } 4934 4935 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() { 4936 assert(ZF_mon->owned_by_self(), "precondition."); 4937 HeapRegion* res = _unclean_region_list.pop(); 4938 if (res != NULL) { 4939 // Inform ZF thread that there's a new unclean head. 4940 if (_unclean_region_list.hd() != NULL && should_zf()) 4941 ZF_mon->notify_all(); 4942 } 4943 return res; 4944 } 4945 4946 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() { 4947 assert(ZF_mon->owned_by_self(), "precondition."); 4948 return _unclean_region_list.hd(); 4949 } 4950 4951 4952 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() { 4953 assert(ZF_mon->owned_by_self(), "Precondition"); 4954 HeapRegion* r = peek_unclean_region_list_locked(); 4955 if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) { 4956 // Result of below must be equal to "r", since we hold the lock. 4957 (void)pop_unclean_region_list_locked(); 4958 put_free_region_on_list_locked(r); 4959 return true; 4960 } else { 4961 return false; 4962 } 4963 } 4964 4965 bool G1CollectedHeap::move_cleaned_region_to_free_list() { 4966 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4967 return move_cleaned_region_to_free_list_locked(); 4968 } 4969 4970 4971 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) { 4972 assert(ZF_mon->owned_by_self(), "precondition."); 4973 assert(_free_region_list_size == free_region_list_length(), "Inv"); 4974 assert(r->zero_fill_state() == HeapRegion::ZeroFilled, 4975 "Regions on free list must be zero filled"); 4976 assert(!r->isHumongous(), "Must not be humongous."); 4977 assert(r->is_empty(), "Better be empty"); 4978 assert(!r->is_on_free_list(), 4979 "Better not already be on free list"); 4980 assert(!r->is_on_unclean_list(), 4981 "Better not already be on unclean list"); 4982 r->set_on_free_list(true); 4983 r->set_next_on_free_list(_free_region_list); 4984 _free_region_list = r; 4985 _free_region_list_size++; 4986 assert(_free_region_list_size == free_region_list_length(), "Inv"); 4987 } 4988 4989 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) { 4990 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4991 put_free_region_on_list_locked(r); 4992 } 4993 4994 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() { 4995 assert(ZF_mon->owned_by_self(), "precondition."); 4996 assert(_free_region_list_size == free_region_list_length(), "Inv"); 4997 HeapRegion* res = _free_region_list; 4998 if (res != NULL) { 4999 _free_region_list = res->next_from_free_list(); 5000 _free_region_list_size--; 5001 res->set_on_free_list(false); 5002 res->set_next_on_free_list(NULL); 5003 assert(_free_region_list_size == free_region_list_length(), "Inv"); 5004 } 5005 return res; 5006 } 5007 5008 5009 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) { 5010 // By self, or on behalf of self. 5011 assert(Heap_lock->is_locked(), "Precondition"); 5012 HeapRegion* res = NULL; 5013 bool first = true; 5014 while (res == NULL) { 5015 if (zero_filled || !first) { 5016 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 5017 res = pop_free_region_list_locked(); 5018 if (res != NULL) { 5019 assert(!res->zero_fill_is_allocated(), 5020 "No allocated regions on free list."); 5021 res->set_zero_fill_allocated(); 5022 } else if (!first) { 5023 break; // We tried both, time to return NULL. 5024 } 5025 } 5026 5027 if (res == NULL) { 5028 res = alloc_region_from_unclean_list(zero_filled); 5029 } 5030 assert(res == NULL || 5031 !zero_filled || 5032 res->zero_fill_is_allocated(), 5033 "We must have allocated the region we're returning"); 5034 first = false; 5035 } 5036 return res; 5037 } 5038 5039 void G1CollectedHeap::remove_allocated_regions_from_lists() { 5040 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 5041 { 5042 HeapRegion* prev = NULL; 5043 HeapRegion* cur = _unclean_region_list.hd(); 5044 while (cur != NULL) { 5045 HeapRegion* next = cur->next_from_unclean_list(); 5046 if (cur->zero_fill_is_allocated()) { 5047 // Remove from the list. 5048 if (prev == NULL) { 5049 (void)_unclean_region_list.pop(); 5050 } else { 5051 _unclean_region_list.delete_after(prev); 5052 } 5053 cur->set_on_unclean_list(false); 5054 cur->set_next_on_unclean_list(NULL); 5055 } else { 5056 prev = cur; 5057 } 5058 cur = next; 5059 } 5060 assert(_unclean_region_list.sz() == unclean_region_list_length(), 5061 "Inv"); 5062 } 5063 5064 { 5065 HeapRegion* prev = NULL; 5066 HeapRegion* cur = _free_region_list; 5067 while (cur != NULL) { 5068 HeapRegion* next = cur->next_from_free_list(); 5069 if (cur->zero_fill_is_allocated()) { 5070 // Remove from the list. 5071 if (prev == NULL) { 5072 _free_region_list = cur->next_from_free_list(); 5073 } else { 5074 prev->set_next_on_free_list(cur->next_from_free_list()); 5075 } 5076 cur->set_on_free_list(false); 5077 cur->set_next_on_free_list(NULL); 5078 _free_region_list_size--; 5079 } else { 5080 prev = cur; 5081 } 5082 cur = next; 5083 } 5084 assert(_free_region_list_size == free_region_list_length(), "Inv"); 5085 } 5086 } 5087 5088 bool G1CollectedHeap::verify_region_lists() { 5089 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 5090 return verify_region_lists_locked(); 5091 } 5092 5093 bool G1CollectedHeap::verify_region_lists_locked() { 5094 HeapRegion* unclean = _unclean_region_list.hd(); 5095 while (unclean != NULL) { 5096 guarantee(unclean->is_on_unclean_list(), "Well, it is!"); 5097 guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!"); 5098 guarantee(unclean->zero_fill_state() != HeapRegion::Allocated, 5099 "Everything else is possible."); 5100 unclean = unclean->next_from_unclean_list(); 5101 } 5102 guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv"); 5103 5104 HeapRegion* free_r = _free_region_list; 5105 while (free_r != NULL) { 5106 assert(free_r->is_on_free_list(), "Well, it is!"); 5107 assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!"); 5108 switch (free_r->zero_fill_state()) { 5109 case HeapRegion::NotZeroFilled: 5110 case HeapRegion::ZeroFilling: 5111 guarantee(false, "Should not be on free list."); 5112 break; 5113 default: 5114 // Everything else is possible. 5115 break; 5116 } 5117 free_r = free_r->next_from_free_list(); 5118 } 5119 guarantee(_free_region_list_size == free_region_list_length(), "Inv"); 5120 // If we didn't do an assertion... 5121 return true; 5122 } 5123 5124 size_t G1CollectedHeap::free_region_list_length() { 5125 assert(ZF_mon->owned_by_self(), "precondition."); 5126 size_t len = 0; 5127 HeapRegion* cur = _free_region_list; 5128 while (cur != NULL) { 5129 len++; 5130 cur = cur->next_from_free_list(); 5131 } 5132 return len; 5133 } 5134 5135 size_t G1CollectedHeap::unclean_region_list_length() { 5136 assert(ZF_mon->owned_by_self(), "precondition."); 5137 return _unclean_region_list.length(); 5138 } 5139 5140 size_t G1CollectedHeap::n_regions() { 5141 return _hrs->length(); 5142 } 5143 5144 size_t G1CollectedHeap::max_regions() { 5145 return 5146 (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) / 5147 HeapRegion::GrainBytes; 5148 } 5149 5150 size_t G1CollectedHeap::free_regions() { 5151 /* Possibly-expensive assert. 5152 assert(_free_regions == count_free_regions(), 5153 "_free_regions is off."); 5154 */ 5155 return _free_regions; 5156 } 5157 5158 bool G1CollectedHeap::should_zf() { 5159 return _free_region_list_size < (size_t) G1ConcZFMaxRegions; 5160 } 5161 5162 class RegionCounter: public HeapRegionClosure { 5163 size_t _n; 5164 public: 5165 RegionCounter() : _n(0) {} 5166 bool doHeapRegion(HeapRegion* r) { 5167 if (r->is_empty() && !r->popular()) { 5168 assert(!r->isHumongous(), "H regions should not be empty."); 5169 _n++; 5170 } 5171 return false; 5172 } 5173 int res() { return (int) _n; } 5174 }; 5175 5176 size_t G1CollectedHeap::count_free_regions() { 5177 RegionCounter rc; 5178 heap_region_iterate(&rc); 5179 size_t n = rc.res(); 5180 if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty()) 5181 n--; 5182 return n; 5183 } 5184 5185 size_t G1CollectedHeap::count_free_regions_list() { 5186 size_t n = 0; 5187 size_t o = 0; 5188 ZF_mon->lock_without_safepoint_check(); 5189 HeapRegion* cur = _free_region_list; 5190 while (cur != NULL) { 5191 cur = cur->next_from_free_list(); 5192 n++; 5193 } 5194 size_t m = unclean_region_list_length(); 5195 ZF_mon->unlock(); 5196 return n + m; 5197 } 5198 5199 bool G1CollectedHeap::should_set_young_locked() { 5200 assert(heap_lock_held_for_gc(), 5201 "the heap lock should already be held by or for this thread"); 5202 return (g1_policy()->in_young_gc_mode() && 5203 g1_policy()->should_add_next_region_to_young_list()); 5204 } 5205 5206 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { 5207 assert(heap_lock_held_for_gc(), 5208 "the heap lock should already be held by or for this thread"); 5209 _young_list->push_region(hr); 5210 g1_policy()->set_region_short_lived(hr); 5211 } 5212 5213 class NoYoungRegionsClosure: public HeapRegionClosure { 5214 private: 5215 bool _success; 5216 public: 5217 NoYoungRegionsClosure() : _success(true) { } 5218 bool doHeapRegion(HeapRegion* r) { 5219 if (r->is_young()) { 5220 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", 5221 r->bottom(), r->end()); 5222 _success = false; 5223 } 5224 return false; 5225 } 5226 bool success() { return _success; } 5227 }; 5228 5229 bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list, 5230 bool check_sample) { 5231 bool ret = true; 5232 5233 ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample); 5234 if (!ignore_scan_only_list) { 5235 NoYoungRegionsClosure closure; 5236 heap_region_iterate(&closure); 5237 ret = ret && closure.success(); 5238 } 5239 5240 return ret; 5241 } 5242 5243 void G1CollectedHeap::empty_young_list() { 5244 assert(heap_lock_held_for_gc(), 5245 "the heap lock should already be held by or for this thread"); 5246 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); 5247 5248 _young_list->empty_list(); 5249 } 5250 5251 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { 5252 bool no_allocs = true; 5253 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { 5254 HeapRegion* r = _gc_alloc_regions[ap]; 5255 no_allocs = r == NULL || r->saved_mark_at_top(); 5256 } 5257 return no_allocs; 5258 } 5259 5260 void G1CollectedHeap::retire_all_alloc_regions() { 5261 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 5262 HeapRegion* r = _gc_alloc_regions[ap]; 5263 if (r != NULL) { 5264 // Check for aliases. 5265 bool has_processed_alias = false; 5266 for (int i = 0; i < ap; ++i) { 5267 if (_gc_alloc_regions[i] == r) { 5268 has_processed_alias = true; 5269 break; 5270 } 5271 } 5272 if (!has_processed_alias) { 5273 retire_alloc_region(r, false /* par */); 5274 } 5275 } 5276 } 5277 } 5278 5279 5280 // Done at the start of full GC. 5281 void G1CollectedHeap::tear_down_region_lists() { 5282 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 5283 while (pop_unclean_region_list_locked() != NULL) ; 5284 assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0, 5285 "Postconditions of loop.") 5286 while (pop_free_region_list_locked() != NULL) ; 5287 assert(_free_region_list == NULL, "Postcondition of loop."); 5288 if (_free_region_list_size != 0) { 5289 gclog_or_tty->print_cr("Size is %d.", _free_region_list_size); 5290 print(); 5291 } 5292 assert(_free_region_list_size == 0, "Postconditions of loop."); 5293 } 5294 5295 5296 class RegionResetter: public HeapRegionClosure { 5297 G1CollectedHeap* _g1; 5298 int _n; 5299 public: 5300 RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {} 5301 bool doHeapRegion(HeapRegion* r) { 5302 if (r->continuesHumongous()) return false; 5303 if (r->top() > r->bottom()) { 5304 if (r->top() < r->end()) { 5305 Copy::fill_to_words(r->top(), 5306 pointer_delta(r->end(), r->top())); 5307 } 5308 r->set_zero_fill_allocated(); 5309 } else { 5310 assert(r->is_empty(), "tautology"); 5311 if (r->popular()) { 5312 if (r->zero_fill_state() != HeapRegion::Allocated) { 5313 r->ensure_zero_filled_locked(); 5314 r->set_zero_fill_allocated(); 5315 } 5316 } else { 5317 _n++; 5318 switch (r->zero_fill_state()) { 5319 case HeapRegion::NotZeroFilled: 5320 case HeapRegion::ZeroFilling: 5321 _g1->put_region_on_unclean_list_locked(r); 5322 break; 5323 case HeapRegion::Allocated: 5324 r->set_zero_fill_complete(); 5325 // no break; go on to put on free list. 5326 case HeapRegion::ZeroFilled: 5327 _g1->put_free_region_on_list_locked(r); 5328 break; 5329 } 5330 } 5331 } 5332 return false; 5333 } 5334 5335 int getFreeRegionCount() {return _n;} 5336 }; 5337 5338 // Done at the end of full GC. 5339 void G1CollectedHeap::rebuild_region_lists() { 5340 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 5341 // This needs to go at the end of the full GC. 5342 RegionResetter rs; 5343 heap_region_iterate(&rs); 5344 _free_regions = rs.getFreeRegionCount(); 5345 // Tell the ZF thread it may have work to do. 5346 if (should_zf()) ZF_mon->notify_all(); 5347 } 5348 5349 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure { 5350 G1CollectedHeap* _g1; 5351 int _n; 5352 public: 5353 UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {} 5354 bool doHeapRegion(HeapRegion* r) { 5355 if (r->continuesHumongous()) return false; 5356 if (r->top() > r->bottom()) { 5357 // There are assertions in "set_zero_fill_needed()" below that 5358 // require top() == bottom(), so this is technically illegal. 5359 // We'll skirt the law here, by making that true temporarily. 5360 DEBUG_ONLY(HeapWord* save_top = r->top(); 5361 r->set_top(r->bottom())); 5362 r->set_zero_fill_needed(); 5363 DEBUG_ONLY(r->set_top(save_top)); 5364 } 5365 return false; 5366 } 5367 }; 5368 5369 // Done at the start of full GC. 5370 void G1CollectedHeap::set_used_regions_to_need_zero_fill() { 5371 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 5372 // This needs to go at the end of the full GC. 5373 UsedRegionsNeedZeroFillSetter rs; 5374 heap_region_iterate(&rs); 5375 } 5376 5377 class CountObjClosure: public ObjectClosure { 5378 size_t _n; 5379 public: 5380 CountObjClosure() : _n(0) {} 5381 void do_object(oop obj) { _n++; } 5382 size_t n() { return _n; } 5383 }; 5384 5385 size_t G1CollectedHeap::pop_object_used_objs() { 5386 size_t sum_objs = 0; 5387 for (int i = 0; i < G1NumPopularRegions; i++) { 5388 CountObjClosure cl; 5389 _hrs->at(i)->object_iterate(&cl); 5390 sum_objs += cl.n(); 5391 } 5392 return sum_objs; 5393 } 5394 5395 size_t G1CollectedHeap::pop_object_used_bytes() { 5396 size_t sum_bytes = 0; 5397 for (int i = 0; i < G1NumPopularRegions; i++) { 5398 sum_bytes += _hrs->at(i)->used(); 5399 } 5400 return sum_bytes; 5401 } 5402 5403 5404 static int nq = 0; 5405 5406 HeapWord* G1CollectedHeap::allocate_popular_object(size_t word_size) { 5407 while (_cur_pop_hr_index < G1NumPopularRegions) { 5408 HeapRegion* cur_pop_region = _hrs->at(_cur_pop_hr_index); 5409 HeapWord* res = cur_pop_region->allocate(word_size); 5410 if (res != NULL) { 5411 // We account for popular objs directly in the used summary: 5412 _summary_bytes_used += (word_size * HeapWordSize); 5413 return res; 5414 } 5415 // Otherwise, try the next region (first making sure that we remember 5416 // the last "top" value as the "next_top_at_mark_start", so that 5417 // objects made popular during markings aren't automatically considered 5418 // live). 5419 cur_pop_region->note_end_of_copying(); 5420 // Otherwise, try the next region. 5421 _cur_pop_hr_index++; 5422 } 5423 // XXX: For now !!! 5424 vm_exit_out_of_memory(word_size, 5425 "Not enough pop obj space (To Be Fixed)"); 5426 return NULL; 5427 } 5428 5429 class HeapRegionList: public CHeapObj { 5430 public: 5431 HeapRegion* hr; 5432 HeapRegionList* next; 5433 }; 5434 5435 void G1CollectedHeap::schedule_popular_region_evac(HeapRegion* r) { 5436 // This might happen during parallel GC, so protect by this lock. 5437 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 5438 // We don't schedule regions whose evacuations are already pending, or 5439 // are already being evacuated. 5440 if (!r->popular_pending() && !r->in_collection_set()) { 5441 r->set_popular_pending(true); 5442 if (G1TracePopularity) { 5443 gclog_or_tty->print_cr("Scheduling region "PTR_FORMAT" " 5444 "["PTR_FORMAT", "PTR_FORMAT") for pop-object evacuation.", 5445 r, r->bottom(), r->end()); 5446 } 5447 HeapRegionList* hrl = new HeapRegionList; 5448 hrl->hr = r; 5449 hrl->next = _popular_regions_to_be_evacuated; 5450 _popular_regions_to_be_evacuated = hrl; 5451 } 5452 } 5453 5454 HeapRegion* G1CollectedHeap::popular_region_to_evac() { 5455 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 5456 HeapRegion* res = NULL; 5457 while (_popular_regions_to_be_evacuated != NULL && res == NULL) { 5458 HeapRegionList* hrl = _popular_regions_to_be_evacuated; 5459 _popular_regions_to_be_evacuated = hrl->next; 5460 res = hrl->hr; 5461 // The G1RSPopLimit may have increased, so recheck here... 5462 if (res->rem_set()->occupied() < (size_t) G1RSPopLimit) { 5463 // Hah: don't need to schedule. 5464 if (G1TracePopularity) { 5465 gclog_or_tty->print_cr("Unscheduling region "PTR_FORMAT" " 5466 "["PTR_FORMAT", "PTR_FORMAT") " 5467 "for pop-object evacuation (size %d < limit %d)", 5468 res, res->bottom(), res->end(), 5469 res->rem_set()->occupied(), G1RSPopLimit); 5470 } 5471 res->set_popular_pending(false); 5472 res = NULL; 5473 } 5474 // We do not reset res->popular() here; if we did so, it would allow 5475 // the region to be "rescheduled" for popularity evacuation. Instead, 5476 // this is done in the collection pause, with the world stopped. 5477 // So the invariant is that the regions in the list have the popularity 5478 // boolean set, but having the boolean set does not imply membership 5479 // on the list (though there can at most one such pop-pending region 5480 // not on the list at any time). 5481 delete hrl; 5482 } 5483 return res; 5484 } 5485 5486 void G1CollectedHeap::evac_popular_region(HeapRegion* hr) { 5487 while (true) { 5488 // Don't want to do a GC pause while cleanup is being completed! 5489 wait_for_cleanup_complete(); 5490 5491 // Read the GC count while holding the Heap_lock 5492 int gc_count_before = SharedHeap::heap()->total_collections(); 5493 g1_policy()->record_stop_world_start(); 5494 5495 { 5496 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back 5497 VM_G1PopRegionCollectionPause op(gc_count_before, hr); 5498 VMThread::execute(&op); 5499 5500 // If the prolog succeeded, we didn't do a GC for this. 5501 if (op.prologue_succeeded()) break; 5502 } 5503 // Otherwise we didn't. We should recheck the size, though, since 5504 // the limit may have increased... 5505 if (hr->rem_set()->occupied() < (size_t) G1RSPopLimit) { 5506 hr->set_popular_pending(false); 5507 break; 5508 } 5509 } 5510 } 5511 5512 void G1CollectedHeap::atomic_inc_obj_rc(oop obj) { 5513 Atomic::inc(obj_rc_addr(obj)); 5514 } 5515 5516 class CountRCClosure: public OopsInHeapRegionClosure { 5517 G1CollectedHeap* _g1h; 5518 bool _parallel; 5519 public: 5520 CountRCClosure(G1CollectedHeap* g1h) : 5521 _g1h(g1h), _parallel(ParallelGCThreads > 0) 5522 {} 5523 void do_oop(narrowOop* p) { 5524 guarantee(false, "NYI"); 5525 } 5526 void do_oop(oop* p) { 5527 oop obj = *p; 5528 assert(obj != NULL, "Precondition."); 5529 if (_parallel) { 5530 // We go sticky at the limit to avoid excess contention. 5531 // If we want to track the actual RC's further, we'll need to keep a 5532 // per-thread hash table or something for the popular objects. 5533 if (_g1h->obj_rc(obj) < G1ObjPopLimit) { 5534 _g1h->atomic_inc_obj_rc(obj); 5535 } 5536 } else { 5537 _g1h->inc_obj_rc(obj); 5538 } 5539 } 5540 }; 5541 5542 class EvacPopObjClosure: public ObjectClosure { 5543 G1CollectedHeap* _g1h; 5544 size_t _pop_objs; 5545 size_t _max_rc; 5546 public: 5547 EvacPopObjClosure(G1CollectedHeap* g1h) : 5548 _g1h(g1h), _pop_objs(0), _max_rc(0) {} 5549 5550 void do_object(oop obj) { 5551 size_t rc = _g1h->obj_rc(obj); 5552 _max_rc = MAX2(rc, _max_rc); 5553 if (rc >= (size_t) G1ObjPopLimit) { 5554 _g1h->_pop_obj_rc_at_copy.add((double)rc); 5555 size_t word_sz = obj->size(); 5556 HeapWord* new_pop_loc = _g1h->allocate_popular_object(word_sz); 5557 oop new_pop_obj = (oop)new_pop_loc; 5558 Copy::aligned_disjoint_words((HeapWord*)obj, new_pop_loc, word_sz); 5559 obj->forward_to(new_pop_obj); 5560 G1ScanAndBalanceClosure scan_and_balance(_g1h); 5561 new_pop_obj->oop_iterate_backwards(&scan_and_balance); 5562 // preserve "next" mark bit if marking is in progress. 5563 if (_g1h->mark_in_progress() && !_g1h->is_obj_ill(obj)) { 5564 _g1h->concurrent_mark()->markAndGrayObjectIfNecessary(new_pop_obj); 5565 } 5566 5567 if (G1TracePopularity) { 5568 gclog_or_tty->print_cr("Found obj " PTR_FORMAT " of word size " SIZE_FORMAT 5569 " pop (%d), move to " PTR_FORMAT, 5570 (void*) obj, word_sz, 5571 _g1h->obj_rc(obj), (void*) new_pop_obj); 5572 } 5573 _pop_objs++; 5574 } 5575 } 5576 size_t pop_objs() { return _pop_objs; } 5577 size_t max_rc() { return _max_rc; } 5578 }; 5579 5580 class G1ParCountRCTask : public AbstractGangTask { 5581 G1CollectedHeap* _g1h; 5582 BitMap _bm; 5583 5584 size_t getNCards() { 5585 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) 5586 / G1BlockOffsetSharedArray::N_bytes; 5587 } 5588 CountRCClosure _count_rc_closure; 5589 public: 5590 G1ParCountRCTask(G1CollectedHeap* g1h) : 5591 AbstractGangTask("G1 Par RC Count task"), 5592 _g1h(g1h), _bm(getNCards()), _count_rc_closure(g1h) 5593 {} 5594 5595 void work(int i) { 5596 ResourceMark rm; 5597 HandleMark hm; 5598 _g1h->g1_rem_set()->oops_into_collection_set_do(&_count_rc_closure, i); 5599 } 5600 }; 5601 5602 void G1CollectedHeap::popularity_pause_preamble(HeapRegion* popular_region) { 5603 // We're evacuating a single region (for popularity). 5604 if (G1TracePopularity) { 5605 gclog_or_tty->print_cr("Doing pop region pause for ["PTR_FORMAT", "PTR_FORMAT")", 5606 popular_region->bottom(), popular_region->end()); 5607 } 5608 g1_policy()->set_single_region_collection_set(popular_region); 5609 size_t max_rc; 5610 if (!compute_reference_counts_and_evac_popular(popular_region, 5611 &max_rc)) { 5612 // We didn't evacuate any popular objects. 5613 // We increase the RS popularity limit, to prevent this from 5614 // happening in the future. 5615 if (G1RSPopLimit < (1 << 30)) { 5616 G1RSPopLimit *= 2; 5617 } 5618 // For now, interesting enough for a message: 5619 #if 1 5620 gclog_or_tty->print_cr("In pop region pause for ["PTR_FORMAT", "PTR_FORMAT"), " 5621 "failed to find a pop object (max = %d).", 5622 popular_region->bottom(), popular_region->end(), 5623 max_rc); 5624 gclog_or_tty->print_cr("Increased G1RSPopLimit to %d.", G1RSPopLimit); 5625 #endif // 0 5626 // Also, we reset the collection set to NULL, to make the rest of 5627 // the collection do nothing. 5628 assert(popular_region->next_in_collection_set() == NULL, 5629 "should be single-region."); 5630 popular_region->set_in_collection_set(false); 5631 popular_region->set_popular_pending(false); 5632 g1_policy()->clear_collection_set(); 5633 } 5634 } 5635 5636 bool G1CollectedHeap:: 5637 compute_reference_counts_and_evac_popular(HeapRegion* popular_region, 5638 size_t* max_rc) { 5639 HeapWord* rc_region_bot; 5640 HeapWord* rc_region_end; 5641 5642 // Set up the reference count region. 5643 HeapRegion* rc_region = newAllocRegion(HeapRegion::GrainWords); 5644 if (rc_region != NULL) { 5645 rc_region_bot = rc_region->bottom(); 5646 rc_region_end = rc_region->end(); 5647 } else { 5648 rc_region_bot = NEW_C_HEAP_ARRAY(HeapWord, HeapRegion::GrainWords); 5649 if (rc_region_bot == NULL) { 5650 vm_exit_out_of_memory(HeapRegion::GrainWords, 5651 "No space for RC region."); 5652 } 5653 rc_region_end = rc_region_bot + HeapRegion::GrainWords; 5654 } 5655 5656 if (G1TracePopularity) 5657 gclog_or_tty->print_cr("RC region is ["PTR_FORMAT", "PTR_FORMAT")", 5658 rc_region_bot, rc_region_end); 5659 if (rc_region_bot > popular_region->bottom()) { 5660 _rc_region_above = true; 5661 _rc_region_diff = 5662 pointer_delta(rc_region_bot, popular_region->bottom(), 1); 5663 } else { 5664 assert(rc_region_bot < popular_region->bottom(), "Can't be equal."); 5665 _rc_region_above = false; 5666 _rc_region_diff = 5667 pointer_delta(popular_region->bottom(), rc_region_bot, 1); 5668 } 5669 g1_policy()->record_pop_compute_rc_start(); 5670 // Count external references. 5671 g1_rem_set()->prepare_for_oops_into_collection_set_do(); 5672 if (ParallelGCThreads > 0) { 5673 5674 set_par_threads(workers()->total_workers()); 5675 G1ParCountRCTask par_count_rc_task(this); 5676 workers()->run_task(&par_count_rc_task); 5677 set_par_threads(0); 5678 5679 } else { 5680 CountRCClosure count_rc_closure(this); 5681 g1_rem_set()->oops_into_collection_set_do(&count_rc_closure, 0); 5682 } 5683 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); 5684 g1_policy()->record_pop_compute_rc_end(); 5685 5686 // Now evacuate popular objects. 5687 g1_policy()->record_pop_evac_start(); 5688 EvacPopObjClosure evac_pop_obj_cl(this); 5689 popular_region->object_iterate(&evac_pop_obj_cl); 5690 *max_rc = evac_pop_obj_cl.max_rc(); 5691 5692 // Make sure the last "top" value of the current popular region is copied 5693 // as the "next_top_at_mark_start", so that objects made popular during 5694 // markings aren't automatically considered live. 5695 HeapRegion* cur_pop_region = _hrs->at(_cur_pop_hr_index); 5696 cur_pop_region->note_end_of_copying(); 5697 5698 if (rc_region != NULL) { 5699 free_region(rc_region); 5700 } else { 5701 FREE_C_HEAP_ARRAY(HeapWord, rc_region_bot); 5702 } 5703 g1_policy()->record_pop_evac_end(); 5704 5705 return evac_pop_obj_cl.pop_objs() > 0; 5706 } 5707 5708 class CountPopObjInfoClosure: public HeapRegionClosure { 5709 size_t _objs; 5710 size_t _bytes; 5711 5712 class CountObjClosure: public ObjectClosure { 5713 int _n; 5714 public: 5715 CountObjClosure() : _n(0) {} 5716 void do_object(oop obj) { _n++; } 5717 size_t n() { return _n; } 5718 }; 5719 5720 public: 5721 CountPopObjInfoClosure() : _objs(0), _bytes(0) {} 5722 bool doHeapRegion(HeapRegion* r) { 5723 _bytes += r->used(); 5724 CountObjClosure blk; 5725 r->object_iterate(&blk); 5726 _objs += blk.n(); 5727 return false; 5728 } 5729 size_t objs() { return _objs; } 5730 size_t bytes() { return _bytes; } 5731 }; 5732 5733 5734 void G1CollectedHeap::print_popularity_summary_info() const { 5735 CountPopObjInfoClosure blk; 5736 for (int i = 0; i <= _cur_pop_hr_index; i++) { 5737 blk.doHeapRegion(_hrs->at(i)); 5738 } 5739 gclog_or_tty->print_cr("\nPopular objects: %d objs, %d bytes.", 5740 blk.objs(), blk.bytes()); 5741 gclog_or_tty->print_cr(" RC at copy = [avg = %5.2f, max = %5.2f, sd = %5.2f].", 5742 _pop_obj_rc_at_copy.avg(), 5743 _pop_obj_rc_at_copy.maximum(), 5744 _pop_obj_rc_at_copy.sd()); 5745 } 5746 5747 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { 5748 _refine_cte_cl->set_concurrent(concurrent); 5749 } 5750 5751 #ifndef PRODUCT 5752 5753 class PrintHeapRegionClosure: public HeapRegionClosure { 5754 public: 5755 bool doHeapRegion(HeapRegion *r) { 5756 gclog_or_tty->print("Region: "PTR_FORMAT":", r); 5757 if (r != NULL) { 5758 if (r->is_on_free_list()) 5759 gclog_or_tty->print("Free "); 5760 if (r->is_young()) 5761 gclog_or_tty->print("Young "); 5762 if (r->isHumongous()) 5763 gclog_or_tty->print("Is Humongous "); 5764 r->print(); 5765 } 5766 return false; 5767 } 5768 }; 5769 5770 class SortHeapRegionClosure : public HeapRegionClosure { 5771 size_t young_regions,free_regions, unclean_regions; 5772 size_t hum_regions, count; 5773 size_t unaccounted, cur_unclean, cur_alloc; 5774 size_t total_free; 5775 HeapRegion* cur; 5776 public: 5777 SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0), 5778 free_regions(0), unclean_regions(0), 5779 hum_regions(0), 5780 count(0), unaccounted(0), 5781 cur_alloc(0), total_free(0) 5782 {} 5783 bool doHeapRegion(HeapRegion *r) { 5784 count++; 5785 if (r->is_on_free_list()) free_regions++; 5786 else if (r->is_on_unclean_list()) unclean_regions++; 5787 else if (r->isHumongous()) hum_regions++; 5788 else if (r->is_young()) young_regions++; 5789 else if (r == cur) cur_alloc++; 5790 else unaccounted++; 5791 return false; 5792 } 5793 void print() { 5794 total_free = free_regions + unclean_regions; 5795 gclog_or_tty->print("%d regions\n", count); 5796 gclog_or_tty->print("%d free: free_list = %d unclean = %d\n", 5797 total_free, free_regions, unclean_regions); 5798 gclog_or_tty->print("%d humongous %d young\n", 5799 hum_regions, young_regions); 5800 gclog_or_tty->print("%d cur_alloc\n", cur_alloc); 5801 gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted); 5802 } 5803 }; 5804 5805 void G1CollectedHeap::print_region_counts() { 5806 SortHeapRegionClosure sc(_cur_alloc_region); 5807 PrintHeapRegionClosure cl; 5808 heap_region_iterate(&cl); 5809 heap_region_iterate(&sc); 5810 sc.print(); 5811 print_region_accounting_info(); 5812 }; 5813 5814 bool G1CollectedHeap::regions_accounted_for() { 5815 // TODO: regions accounting for young/survivor/tenured 5816 return true; 5817 } 5818 5819 bool G1CollectedHeap::print_region_accounting_info() { 5820 gclog_or_tty->print_cr("P regions: %d.", G1NumPopularRegions); 5821 gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).", 5822 free_regions(), 5823 count_free_regions(), count_free_regions_list(), 5824 _free_region_list_size, _unclean_region_list.sz()); 5825 gclog_or_tty->print_cr("cur_alloc: %d.", 5826 (_cur_alloc_region == NULL ? 0 : 1)); 5827 gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions); 5828 5829 // TODO: check regions accounting for young/survivor/tenured 5830 return true; 5831 } 5832 5833 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { 5834 HeapRegion* hr = heap_region_containing(p); 5835 if (hr == NULL) { 5836 return is_in_permanent(p); 5837 } else { 5838 return hr->is_in(p); 5839 } 5840 } 5841 #endif // PRODUCT 5842 5843 void G1CollectedHeap::g1_unimplemented() { 5844 // Unimplemented(); 5845 } 5846 5847 5848 // Local Variables: *** 5849 // c-indentation-style: gnu *** 5850 // End: ***