1 /* 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_g1CollectedHeap.cpp.incl" 27 28 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; 29 30 // turn it on so that the contents of the young list (scan-only / 31 // to-be-collected) are printed at "strategic" points before / during 32 // / after the collection --- this is useful for debugging 33 #define YOUNG_LIST_VERBOSE 0 34 // CURRENT STATUS 35 // This file is under construction. Search for "FIXME". 36 37 // INVARIANTS/NOTES 38 // 39 // All allocation activity covered by the G1CollectedHeap interface is 40 // serialized by acquiring the HeapLock. This happens in 41 // mem_allocate_work, which all such allocation functions call. 42 // (Note that this does not apply to TLAB allocation, which is not part 43 // of this interface: it is done by clients of this interface.) 44 45 // Local to this file. 46 47 class RefineCardTableEntryClosure: public CardTableEntryClosure { 48 SuspendibleThreadSet* _sts; 49 G1RemSet* _g1rs; 50 ConcurrentG1Refine* _cg1r; 51 bool _concurrent; 52 public: 53 RefineCardTableEntryClosure(SuspendibleThreadSet* sts, 54 G1RemSet* g1rs, 55 ConcurrentG1Refine* cg1r) : 56 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) 57 {} 58 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 59 bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false); 60 // This path is executed by the concurrent refine or mutator threads, 61 // concurrently, and so we do not care if card_ptr contains references 62 // that point into the collection set. 63 assert(!oops_into_cset, "should be"); 64 65 if (_concurrent && _sts->should_yield()) { 66 // Caller will actually yield. 67 return false; 68 } 69 // Otherwise, we finished successfully; return true. 70 return true; 71 } 72 void set_concurrent(bool b) { _concurrent = b; } 73 }; 74 75 76 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { 77 int _calls; 78 G1CollectedHeap* _g1h; 79 CardTableModRefBS* _ctbs; 80 int _histo[256]; 81 public: 82 ClearLoggedCardTableEntryClosure() : 83 _calls(0) 84 { 85 _g1h = G1CollectedHeap::heap(); 86 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); 87 for (int i = 0; i < 256; i++) _histo[i] = 0; 88 } 89 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 90 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { 91 _calls++; 92 unsigned char* ujb = (unsigned char*)card_ptr; 93 int ind = (int)(*ujb); 94 _histo[ind]++; 95 *card_ptr = -1; 96 } 97 return true; 98 } 99 int calls() { return _calls; } 100 void print_histo() { 101 gclog_or_tty->print_cr("Card table value histogram:"); 102 for (int i = 0; i < 256; i++) { 103 if (_histo[i] != 0) { 104 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); 105 } 106 } 107 } 108 }; 109 110 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { 111 int _calls; 112 G1CollectedHeap* _g1h; 113 CardTableModRefBS* _ctbs; 114 public: 115 RedirtyLoggedCardTableEntryClosure() : 116 _calls(0) 117 { 118 _g1h = G1CollectedHeap::heap(); 119 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); 120 } 121 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 122 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { 123 _calls++; 124 *card_ptr = 0; 125 } 126 return true; 127 } 128 int calls() { return _calls; } 129 }; 130 131 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { 132 public: 133 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 134 *card_ptr = CardTableModRefBS::dirty_card_val(); 135 return true; 136 } 137 }; 138 139 YoungList::YoungList(G1CollectedHeap* g1h) 140 : _g1h(g1h), _head(NULL), 141 _length(0), 142 _last_sampled_rs_lengths(0), 143 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) 144 { 145 guarantee( check_list_empty(false), "just making sure..." ); 146 } 147 148 void YoungList::push_region(HeapRegion *hr) { 149 assert(!hr->is_young(), "should not already be young"); 150 assert(hr->get_next_young_region() == NULL, "cause it should!"); 151 152 hr->set_next_young_region(_head); 153 _head = hr; 154 155 hr->set_young(); 156 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); 157 ++_length; 158 } 159 160 void YoungList::add_survivor_region(HeapRegion* hr) { 161 assert(hr->is_survivor(), "should be flagged as survivor region"); 162 assert(hr->get_next_young_region() == NULL, "cause it should!"); 163 164 hr->set_next_young_region(_survivor_head); 165 if (_survivor_head == NULL) { 166 _survivor_tail = hr; 167 } 168 _survivor_head = hr; 169 170 ++_survivor_length; 171 } 172 173 void YoungList::empty_list(HeapRegion* list) { 174 while (list != NULL) { 175 HeapRegion* next = list->get_next_young_region(); 176 list->set_next_young_region(NULL); 177 list->uninstall_surv_rate_group(); 178 list->set_not_young(); 179 list = next; 180 } 181 } 182 183 void YoungList::empty_list() { 184 assert(check_list_well_formed(), "young list should be well formed"); 185 186 empty_list(_head); 187 _head = NULL; 188 _length = 0; 189 190 empty_list(_survivor_head); 191 _survivor_head = NULL; 192 _survivor_tail = NULL; 193 _survivor_length = 0; 194 195 _last_sampled_rs_lengths = 0; 196 197 assert(check_list_empty(false), "just making sure..."); 198 } 199 200 bool YoungList::check_list_well_formed() { 201 bool ret = true; 202 203 size_t length = 0; 204 HeapRegion* curr = _head; 205 HeapRegion* last = NULL; 206 while (curr != NULL) { 207 if (!curr->is_young()) { 208 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " 209 "incorrectly tagged (y: %d, surv: %d)", 210 curr->bottom(), curr->end(), 211 curr->is_young(), curr->is_survivor()); 212 ret = false; 213 } 214 ++length; 215 last = curr; 216 curr = curr->get_next_young_region(); 217 } 218 ret = ret && (length == _length); 219 220 if (!ret) { 221 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); 222 gclog_or_tty->print_cr("### list has %d entries, _length is %d", 223 length, _length); 224 } 225 226 return ret; 227 } 228 229 bool YoungList::check_list_empty(bool check_sample) { 230 bool ret = true; 231 232 if (_length != 0) { 233 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", 234 _length); 235 ret = false; 236 } 237 if (check_sample && _last_sampled_rs_lengths != 0) { 238 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); 239 ret = false; 240 } 241 if (_head != NULL) { 242 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); 243 ret = false; 244 } 245 if (!ret) { 246 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); 247 } 248 249 return ret; 250 } 251 252 void 253 YoungList::rs_length_sampling_init() { 254 _sampled_rs_lengths = 0; 255 _curr = _head; 256 } 257 258 bool 259 YoungList::rs_length_sampling_more() { 260 return _curr != NULL; 261 } 262 263 void 264 YoungList::rs_length_sampling_next() { 265 assert( _curr != NULL, "invariant" ); 266 size_t rs_length = _curr->rem_set()->occupied(); 267 268 _sampled_rs_lengths += rs_length; 269 270 // The current region may not yet have been added to the 271 // incremental collection set (it gets added when it is 272 // retired as the current allocation region). 273 if (_curr->in_collection_set()) { 274 // Update the collection set policy information for this region 275 _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length); 276 } 277 278 _curr = _curr->get_next_young_region(); 279 if (_curr == NULL) { 280 _last_sampled_rs_lengths = _sampled_rs_lengths; 281 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); 282 } 283 } 284 285 void 286 YoungList::reset_auxilary_lists() { 287 guarantee( is_empty(), "young list should be empty" ); 288 assert(check_list_well_formed(), "young list should be well formed"); 289 290 // Add survivor regions to SurvRateGroup. 291 _g1h->g1_policy()->note_start_adding_survivor_regions(); 292 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); 293 294 for (HeapRegion* curr = _survivor_head; 295 curr != NULL; 296 curr = curr->get_next_young_region()) { 297 _g1h->g1_policy()->set_region_survivors(curr); 298 299 // The region is a non-empty survivor so let's add it to 300 // the incremental collection set for the next evacuation 301 // pause. 302 _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr); 303 } 304 _g1h->g1_policy()->note_stop_adding_survivor_regions(); 305 306 _head = _survivor_head; 307 _length = _survivor_length; 308 if (_survivor_head != NULL) { 309 assert(_survivor_tail != NULL, "cause it shouldn't be"); 310 assert(_survivor_length > 0, "invariant"); 311 _survivor_tail->set_next_young_region(NULL); 312 } 313 314 // Don't clear the survivor list handles until the start of 315 // the next evacuation pause - we need it in order to re-tag 316 // the survivor regions from this evacuation pause as 'young' 317 // at the start of the next. 318 319 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); 320 321 assert(check_list_well_formed(), "young list should be well formed"); 322 } 323 324 void YoungList::print() { 325 HeapRegion* lists[] = {_head, _survivor_head}; 326 const char* names[] = {"YOUNG", "SURVIVOR"}; 327 328 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { 329 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); 330 HeapRegion *curr = lists[list]; 331 if (curr == NULL) 332 gclog_or_tty->print_cr(" empty"); 333 while (curr != NULL) { 334 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " 335 "age: %4d, y: %d, surv: %d", 336 curr->bottom(), curr->end(), 337 curr->top(), 338 curr->prev_top_at_mark_start(), 339 curr->next_top_at_mark_start(), 340 curr->top_at_conc_mark_count(), 341 curr->age_in_surv_rate_group_cond(), 342 curr->is_young(), 343 curr->is_survivor()); 344 curr = curr->get_next_young_region(); 345 } 346 } 347 348 gclog_or_tty->print_cr(""); 349 } 350 351 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) 352 { 353 // Claim the right to put the region on the dirty cards region list 354 // by installing a self pointer. 355 HeapRegion* next = hr->get_next_dirty_cards_region(); 356 if (next == NULL) { 357 HeapRegion* res = (HeapRegion*) 358 Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(), 359 NULL); 360 if (res == NULL) { 361 HeapRegion* head; 362 do { 363 // Put the region to the dirty cards region list. 364 head = _dirty_cards_region_list; 365 next = (HeapRegion*) 366 Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head); 367 if (next == head) { 368 assert(hr->get_next_dirty_cards_region() == hr, 369 "hr->get_next_dirty_cards_region() != hr"); 370 if (next == NULL) { 371 // The last region in the list points to itself. 372 hr->set_next_dirty_cards_region(hr); 373 } else { 374 hr->set_next_dirty_cards_region(next); 375 } 376 } 377 } while (next != head); 378 } 379 } 380 } 381 382 HeapRegion* G1CollectedHeap::pop_dirty_cards_region() 383 { 384 HeapRegion* head; 385 HeapRegion* hr; 386 do { 387 head = _dirty_cards_region_list; 388 if (head == NULL) { 389 return NULL; 390 } 391 HeapRegion* new_head = head->get_next_dirty_cards_region(); 392 if (head == new_head) { 393 // The last region. 394 new_head = NULL; 395 } 396 hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list, 397 head); 398 } while (hr != head); 399 assert(hr != NULL, "invariant"); 400 hr->set_next_dirty_cards_region(NULL); 401 return hr; 402 } 403 404 void G1CollectedHeap::stop_conc_gc_threads() { 405 _cg1r->stop(); 406 _czft->stop(); 407 _cmThread->stop(); 408 } 409 410 411 void G1CollectedHeap::check_ct_logs_at_safepoint() { 412 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 413 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); 414 415 // Count the dirty cards at the start. 416 CountNonCleanMemRegionClosure count1(this); 417 ct_bs->mod_card_iterate(&count1); 418 int orig_count = count1.n(); 419 420 // First clear the logged cards. 421 ClearLoggedCardTableEntryClosure clear; 422 dcqs.set_closure(&clear); 423 dcqs.apply_closure_to_all_completed_buffers(); 424 dcqs.iterate_closure_all_threads(false); 425 clear.print_histo(); 426 427 // Now ensure that there's no dirty cards. 428 CountNonCleanMemRegionClosure count2(this); 429 ct_bs->mod_card_iterate(&count2); 430 if (count2.n() != 0) { 431 gclog_or_tty->print_cr("Card table has %d entries; %d originally", 432 count2.n(), orig_count); 433 } 434 guarantee(count2.n() == 0, "Card table should be clean."); 435 436 RedirtyLoggedCardTableEntryClosure redirty; 437 JavaThread::dirty_card_queue_set().set_closure(&redirty); 438 dcqs.apply_closure_to_all_completed_buffers(); 439 dcqs.iterate_closure_all_threads(false); 440 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", 441 clear.calls(), orig_count); 442 guarantee(redirty.calls() == clear.calls(), 443 "Or else mechanism is broken."); 444 445 CountNonCleanMemRegionClosure count3(this); 446 ct_bs->mod_card_iterate(&count3); 447 if (count3.n() != orig_count) { 448 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", 449 orig_count, count3.n()); 450 guarantee(count3.n() >= orig_count, "Should have restored them all."); 451 } 452 453 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); 454 } 455 456 // Private class members. 457 458 G1CollectedHeap* G1CollectedHeap::_g1h; 459 460 // Private methods. 461 462 // Finds a HeapRegion that can be used to allocate a given size of block. 463 464 465 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size, 466 bool do_expand, 467 bool zero_filled) { 468 ConcurrentZFThread::note_region_alloc(); 469 HeapRegion* res = alloc_free_region_from_lists(zero_filled); 470 if (res == NULL && do_expand) { 471 expand(word_size * HeapWordSize); 472 res = alloc_free_region_from_lists(zero_filled); 473 assert(res == NULL || 474 (!res->isHumongous() && 475 (!zero_filled || 476 res->zero_fill_state() == HeapRegion::Allocated)), 477 "Alloc Regions must be zero filled (and non-H)"); 478 } 479 if (res != NULL) { 480 if (res->is_empty()) { 481 _free_regions--; 482 } 483 assert(!res->isHumongous() && 484 (!zero_filled || res->zero_fill_state() == HeapRegion::Allocated), 485 err_msg("Non-young alloc Regions must be zero filled (and non-H):" 486 " res->isHumongous()=%d, zero_filled=%d, res->zero_fill_state()=%d", 487 res->isHumongous(), zero_filled, res->zero_fill_state())); 488 assert(!res->is_on_unclean_list(), 489 "Alloc Regions must not be on the unclean list"); 490 if (G1PrintHeapRegions) { 491 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " 492 "top "PTR_FORMAT, 493 res->hrs_index(), res->bottom(), res->end(), res->top()); 494 } 495 } 496 return res; 497 } 498 499 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose, 500 size_t word_size, 501 bool zero_filled) { 502 HeapRegion* alloc_region = NULL; 503 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { 504 alloc_region = newAllocRegion_work(word_size, true, zero_filled); 505 if (purpose == GCAllocForSurvived && alloc_region != NULL) { 506 alloc_region->set_survivor(); 507 } 508 ++_gc_alloc_region_counts[purpose]; 509 } else { 510 g1_policy()->note_alloc_region_limit_reached(purpose); 511 } 512 return alloc_region; 513 } 514 515 // If could fit into free regions w/o expansion, try. 516 // Otherwise, if can expand, do so. 517 // Otherwise, if using ex regions might help, try with ex given back. 518 HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) { 519 assert(regions_accounted_for(), "Region leakage!"); 520 521 // We can't allocate H regions while cleanupComplete is running, since 522 // some of the regions we find to be empty might not yet be added to the 523 // unclean list. (If we're already at a safepoint, this call is 524 // unnecessary, not to mention wrong.) 525 if (!SafepointSynchronize::is_at_safepoint()) 526 wait_for_cleanup_complete(); 527 528 size_t num_regions = 529 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; 530 531 // Special case if < one region??? 532 533 // Remember the ft size. 534 size_t x_size = expansion_regions(); 535 536 HeapWord* res = NULL; 537 bool eliminated_allocated_from_lists = false; 538 539 // Can the allocation potentially fit in the free regions? 540 if (free_regions() >= num_regions) { 541 res = _hrs->obj_allocate(word_size); 542 } 543 if (res == NULL) { 544 // Try expansion. 545 size_t fs = _hrs->free_suffix(); 546 if (fs + x_size >= num_regions) { 547 expand((num_regions - fs) * HeapRegion::GrainBytes); 548 res = _hrs->obj_allocate(word_size); 549 assert(res != NULL, "This should have worked."); 550 } else { 551 // Expansion won't help. Are there enough free regions if we get rid 552 // of reservations? 553 size_t avail = free_regions(); 554 if (avail >= num_regions) { 555 res = _hrs->obj_allocate(word_size); 556 if (res != NULL) { 557 remove_allocated_regions_from_lists(); 558 eliminated_allocated_from_lists = true; 559 } 560 } 561 } 562 } 563 if (res != NULL) { 564 // Increment by the number of regions allocated. 565 // FIXME: Assumes regions all of size GrainBytes. 566 #ifndef PRODUCT 567 mr_bs()->verify_clean_region(MemRegion(res, res + num_regions * 568 HeapRegion::GrainWords)); 569 #endif 570 if (!eliminated_allocated_from_lists) 571 remove_allocated_regions_from_lists(); 572 _summary_bytes_used += word_size * HeapWordSize; 573 _free_regions -= num_regions; 574 _num_humongous_regions += (int) num_regions; 575 } 576 assert(regions_accounted_for(), "Region Leakage"); 577 return res; 578 } 579 580 HeapWord* 581 G1CollectedHeap::attempt_allocation_slow(size_t word_size, 582 bool permit_collection_pause) { 583 HeapWord* res = NULL; 584 HeapRegion* allocated_young_region = NULL; 585 586 assert( SafepointSynchronize::is_at_safepoint() || 587 Heap_lock->owned_by_self(), "pre condition of the call" ); 588 589 if (isHumongous(word_size)) { 590 // Allocation of a humongous object can, in a sense, complete a 591 // partial region, if the previous alloc was also humongous, and 592 // caused the test below to succeed. 593 if (permit_collection_pause) 594 do_collection_pause_if_appropriate(word_size); 595 res = humongousObjAllocate(word_size); 596 assert(_cur_alloc_region == NULL 597 || !_cur_alloc_region->isHumongous(), 598 "Prevent a regression of this bug."); 599 600 } else { 601 // We may have concurrent cleanup working at the time. Wait for it 602 // to complete. In the future we would probably want to make the 603 // concurrent cleanup truly concurrent by decoupling it from the 604 // allocation. 605 if (!SafepointSynchronize::is_at_safepoint()) 606 wait_for_cleanup_complete(); 607 // If we do a collection pause, this will be reset to a non-NULL 608 // value. If we don't, nulling here ensures that we allocate a new 609 // region below. 610 if (_cur_alloc_region != NULL) { 611 // We're finished with the _cur_alloc_region. 612 // As we're builing (at least the young portion) of the collection 613 // set incrementally we'll add the current allocation region to 614 // the collection set here. 615 if (_cur_alloc_region->is_young()) { 616 g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region); 617 } 618 _summary_bytes_used += _cur_alloc_region->used(); 619 _cur_alloc_region = NULL; 620 } 621 assert(_cur_alloc_region == NULL, "Invariant."); 622 // Completion of a heap region is perhaps a good point at which to do 623 // a collection pause. 624 if (permit_collection_pause) 625 do_collection_pause_if_appropriate(word_size); 626 // Make sure we have an allocation region available. 627 if (_cur_alloc_region == NULL) { 628 if (!SafepointSynchronize::is_at_safepoint()) 629 wait_for_cleanup_complete(); 630 bool next_is_young = should_set_young_locked(); 631 // If the next region is not young, make sure it's zero-filled. 632 _cur_alloc_region = newAllocRegion(word_size, !next_is_young); 633 if (_cur_alloc_region != NULL) { 634 _summary_bytes_used -= _cur_alloc_region->used(); 635 if (next_is_young) { 636 set_region_short_lived_locked(_cur_alloc_region); 637 allocated_young_region = _cur_alloc_region; 638 } 639 } 640 } 641 assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(), 642 "Prevent a regression of this bug."); 643 644 // Now retry the allocation. 645 if (_cur_alloc_region != NULL) { 646 if (allocated_young_region != NULL) { 647 // We need to ensure that the store to top does not 648 // float above the setting of the young type. 649 OrderAccess::storestore(); 650 } 651 res = _cur_alloc_region->allocate(word_size); 652 } 653 } 654 655 // NOTE: fails frequently in PRT 656 assert(regions_accounted_for(), "Region leakage!"); 657 658 if (res != NULL) { 659 if (!SafepointSynchronize::is_at_safepoint()) { 660 assert( permit_collection_pause, "invariant" ); 661 assert( Heap_lock->owned_by_self(), "invariant" ); 662 Heap_lock->unlock(); 663 } 664 665 if (allocated_young_region != NULL) { 666 HeapRegion* hr = allocated_young_region; 667 HeapWord* bottom = hr->bottom(); 668 HeapWord* end = hr->end(); 669 MemRegion mr(bottom, end); 670 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr); 671 } 672 } 673 674 assert( SafepointSynchronize::is_at_safepoint() || 675 (res == NULL && Heap_lock->owned_by_self()) || 676 (res != NULL && !Heap_lock->owned_by_self()), 677 "post condition of the call" ); 678 679 return res; 680 } 681 682 HeapWord* 683 G1CollectedHeap::mem_allocate(size_t word_size, 684 bool is_noref, 685 bool is_tlab, 686 bool* gc_overhead_limit_was_exceeded) { 687 debug_only(check_for_valid_allocation_state()); 688 assert(no_gc_in_progress(), "Allocation during gc not allowed"); 689 HeapWord* result = NULL; 690 691 // Loop until the allocation is satisified, 692 // or unsatisfied after GC. 693 for (int try_count = 1; /* return or throw */; try_count += 1) { 694 int gc_count_before; 695 { 696 Heap_lock->lock(); 697 result = attempt_allocation(word_size); 698 if (result != NULL) { 699 // attempt_allocation should have unlocked the heap lock 700 assert(is_in(result), "result not in heap"); 701 return result; 702 } 703 // Read the gc count while the heap lock is held. 704 gc_count_before = SharedHeap::heap()->total_collections(); 705 Heap_lock->unlock(); 706 } 707 708 // Create the garbage collection operation... 709 VM_G1CollectForAllocation op(word_size, 710 gc_count_before); 711 712 // ...and get the VM thread to execute it. 713 VMThread::execute(&op); 714 if (op.prologue_succeeded()) { 715 result = op.result(); 716 assert(result == NULL || is_in(result), "result not in heap"); 717 return result; 718 } 719 720 // Give a warning if we seem to be looping forever. 721 if ((QueuedAllocationWarningCount > 0) && 722 (try_count % QueuedAllocationWarningCount == 0)) { 723 warning("G1CollectedHeap::mem_allocate_work retries %d times", 724 try_count); 725 } 726 } 727 } 728 729 void G1CollectedHeap::abandon_cur_alloc_region() { 730 if (_cur_alloc_region != NULL) { 731 // We're finished with the _cur_alloc_region. 732 if (_cur_alloc_region->is_empty()) { 733 _free_regions++; 734 free_region(_cur_alloc_region); 735 } else { 736 // As we're builing (at least the young portion) of the collection 737 // set incrementally we'll add the current allocation region to 738 // the collection set here. 739 if (_cur_alloc_region->is_young()) { 740 g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region); 741 } 742 _summary_bytes_used += _cur_alloc_region->used(); 743 } 744 _cur_alloc_region = NULL; 745 } 746 } 747 748 void G1CollectedHeap::abandon_gc_alloc_regions() { 749 // first, make sure that the GC alloc region list is empty (it should!) 750 assert(_gc_alloc_region_list == NULL, "invariant"); 751 release_gc_alloc_regions(true /* totally */); 752 } 753 754 class PostMCRemSetClearClosure: public HeapRegionClosure { 755 ModRefBarrierSet* _mr_bs; 756 public: 757 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} 758 bool doHeapRegion(HeapRegion* r) { 759 r->reset_gc_time_stamp(); 760 if (r->continuesHumongous()) 761 return false; 762 HeapRegionRemSet* hrrs = r->rem_set(); 763 if (hrrs != NULL) hrrs->clear(); 764 // You might think here that we could clear just the cards 765 // corresponding to the used region. But no: if we leave a dirty card 766 // in a region we might allocate into, then it would prevent that card 767 // from being enqueued, and cause it to be missed. 768 // Re: the performance cost: we shouldn't be doing full GC anyway! 769 _mr_bs->clear(MemRegion(r->bottom(), r->end())); 770 return false; 771 } 772 }; 773 774 775 class PostMCRemSetInvalidateClosure: public HeapRegionClosure { 776 ModRefBarrierSet* _mr_bs; 777 public: 778 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} 779 bool doHeapRegion(HeapRegion* r) { 780 if (r->continuesHumongous()) return false; 781 if (r->used_region().word_size() != 0) { 782 _mr_bs->invalidate(r->used_region(), true /*whole heap*/); 783 } 784 return false; 785 } 786 }; 787 788 class RebuildRSOutOfRegionClosure: public HeapRegionClosure { 789 G1CollectedHeap* _g1h; 790 UpdateRSOopClosure _cl; 791 int _worker_i; 792 public: 793 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : 794 _cl(g1->g1_rem_set(), worker_i), 795 _worker_i(worker_i), 796 _g1h(g1) 797 { } 798 bool doHeapRegion(HeapRegion* r) { 799 if (!r->continuesHumongous()) { 800 _cl.set_from(r); 801 r->oop_iterate(&_cl); 802 } 803 return false; 804 } 805 }; 806 807 class ParRebuildRSTask: public AbstractGangTask { 808 G1CollectedHeap* _g1; 809 public: 810 ParRebuildRSTask(G1CollectedHeap* g1) 811 : AbstractGangTask("ParRebuildRSTask"), 812 _g1(g1) 813 { } 814 815 void work(int i) { 816 RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); 817 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, 818 HeapRegion::RebuildRSClaimValue); 819 } 820 }; 821 822 void G1CollectedHeap::do_collection(bool explicit_gc, 823 bool clear_all_soft_refs, 824 size_t word_size) { 825 if (GC_locker::check_active_before_gc()) { 826 return; // GC is disabled (e.g. JNI GetXXXCritical operation) 827 } 828 829 ResourceMark rm; 830 831 if (PrintHeapAtGC) { 832 Universe::print_heap_before_gc(); 833 } 834 835 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 836 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); 837 838 const bool do_clear_all_soft_refs = clear_all_soft_refs || 839 collector_policy()->should_clear_all_soft_refs(); 840 841 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); 842 843 { 844 IsGCActiveMark x; 845 846 // Timing 847 bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc); 848 assert(!system_gc || explicit_gc, "invariant"); 849 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 850 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 851 TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC", 852 PrintGC, true, gclog_or_tty); 853 854 TraceMemoryManagerStats tms(true /* fullGC */); 855 856 double start = os::elapsedTime(); 857 g1_policy()->record_full_collection_start(); 858 859 gc_prologue(true); 860 increment_total_collections(true /* full gc */); 861 862 size_t g1h_prev_used = used(); 863 assert(used() == recalculate_used(), "Should be equal"); 864 865 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { 866 HandleMark hm; // Discard invalid handles created during verification 867 prepare_for_verify(); 868 gclog_or_tty->print(" VerifyBeforeGC:"); 869 Universe::verify(true); 870 } 871 assert(regions_accounted_for(), "Region leakage!"); 872 873 COMPILER2_PRESENT(DerivedPointerTable::clear()); 874 875 // We want to discover references, but not process them yet. 876 // This mode is disabled in 877 // instanceRefKlass::process_discovered_references if the 878 // generation does some collection work, or 879 // instanceRefKlass::enqueue_discovered_references if the 880 // generation returns without doing any work. 881 ref_processor()->disable_discovery(); 882 ref_processor()->abandon_partial_discovery(); 883 ref_processor()->verify_no_references_recorded(); 884 885 // Abandon current iterations of concurrent marking and concurrent 886 // refinement, if any are in progress. 887 concurrent_mark()->abort(); 888 889 // Make sure we'll choose a new allocation region afterwards. 890 abandon_cur_alloc_region(); 891 abandon_gc_alloc_regions(); 892 assert(_cur_alloc_region == NULL, "Invariant."); 893 g1_rem_set()->cleanupHRRS(); 894 tear_down_region_lists(); 895 set_used_regions_to_need_zero_fill(); 896 897 // We may have added regions to the current incremental collection 898 // set between the last GC or pause and now. We need to clear the 899 // incremental collection set and then start rebuilding it afresh 900 // after this full GC. 901 abandon_collection_set(g1_policy()->inc_cset_head()); 902 g1_policy()->clear_incremental_cset(); 903 g1_policy()->stop_incremental_cset_building(); 904 905 if (g1_policy()->in_young_gc_mode()) { 906 empty_young_list(); 907 g1_policy()->set_full_young_gcs(true); 908 } 909 910 // Temporarily make reference _discovery_ single threaded (non-MT). 911 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false); 912 913 // Temporarily make refs discovery atomic 914 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); 915 916 // Temporarily clear _is_alive_non_header 917 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); 918 919 ref_processor()->enable_discovery(); 920 ref_processor()->setup_policy(do_clear_all_soft_refs); 921 922 // Do collection work 923 { 924 HandleMark hm; // Discard invalid handles created during gc 925 G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs); 926 } 927 // Because freeing humongous regions may have added some unclean 928 // regions, it is necessary to tear down again before rebuilding. 929 tear_down_region_lists(); 930 rebuild_region_lists(); 931 932 _summary_bytes_used = recalculate_used(); 933 934 ref_processor()->enqueue_discovered_references(); 935 936 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 937 938 MemoryService::track_memory_usage(); 939 940 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { 941 HandleMark hm; // Discard invalid handles created during verification 942 gclog_or_tty->print(" VerifyAfterGC:"); 943 prepare_for_verify(); 944 Universe::verify(false); 945 } 946 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); 947 948 reset_gc_time_stamp(); 949 // Since everything potentially moved, we will clear all remembered 950 // sets, and clear all cards. Later we will rebuild remebered 951 // sets. We will also reset the GC time stamps of the regions. 952 PostMCRemSetClearClosure rs_clear(mr_bs()); 953 heap_region_iterate(&rs_clear); 954 955 // Resize the heap if necessary. 956 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); 957 958 if (_cg1r->use_cache()) { 959 _cg1r->clear_and_record_card_counts(); 960 _cg1r->clear_hot_cache(); 961 } 962 963 // Rebuild remembered sets of all regions. 964 965 if (G1CollectedHeap::use_parallel_gc_threads()) { 966 ParRebuildRSTask rebuild_rs_task(this); 967 assert(check_heap_region_claim_values( 968 HeapRegion::InitialClaimValue), "sanity check"); 969 set_par_threads(workers()->total_workers()); 970 workers()->run_task(&rebuild_rs_task); 971 set_par_threads(0); 972 assert(check_heap_region_claim_values( 973 HeapRegion::RebuildRSClaimValue), "sanity check"); 974 reset_heap_region_claim_values(); 975 } else { 976 RebuildRSOutOfRegionClosure rebuild_rs(this); 977 heap_region_iterate(&rebuild_rs); 978 } 979 980 if (PrintGC) { 981 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); 982 } 983 984 if (true) { // FIXME 985 // Ask the permanent generation to adjust size for full collections 986 perm()->compute_new_size(); 987 } 988 989 // Start a new incremental collection set for the next pause 990 assert(g1_policy()->collection_set() == NULL, "must be"); 991 g1_policy()->start_incremental_cset_building(); 992 993 // Clear the _cset_fast_test bitmap in anticipation of adding 994 // regions to the incremental collection set for the next 995 // evacuation pause. 996 clear_cset_fast_test(); 997 998 double end = os::elapsedTime(); 999 g1_policy()->record_full_collection_end(); 1000 1001 #ifdef TRACESPINNING 1002 ParallelTaskTerminator::print_termination_counts(); 1003 #endif 1004 1005 gc_epilogue(true); 1006 1007 // Discard all rset updates 1008 JavaThread::dirty_card_queue_set().abandon_logs(); 1009 assert(!G1DeferredRSUpdate 1010 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); 1011 assert(regions_accounted_for(), "Region leakage!"); 1012 } 1013 1014 if (g1_policy()->in_young_gc_mode()) { 1015 _young_list->reset_sampled_info(); 1016 // At this point there should be no regions in the 1017 // entire heap tagged as young. 1018 assert( check_young_list_empty(true /* check_heap */), 1019 "young list should be empty at this point"); 1020 } 1021 1022 // Update the number of full collections that have been completed. 1023 increment_full_collections_completed(false /* outer */); 1024 1025 if (PrintHeapAtGC) { 1026 Universe::print_heap_after_gc(); 1027 } 1028 } 1029 1030 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { 1031 do_collection(true, /* explicit_gc */ 1032 clear_all_soft_refs, 1033 0 /* word_size */); 1034 } 1035 1036 // This code is mostly copied from TenuredGeneration. 1037 void 1038 G1CollectedHeap:: 1039 resize_if_necessary_after_full_collection(size_t word_size) { 1040 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); 1041 1042 // Include the current allocation, if any, and bytes that will be 1043 // pre-allocated to support collections, as "used". 1044 const size_t used_after_gc = used(); 1045 const size_t capacity_after_gc = capacity(); 1046 const size_t free_after_gc = capacity_after_gc - used_after_gc; 1047 1048 // This is enforced in arguments.cpp. 1049 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, 1050 "otherwise the code below doesn't make sense"); 1051 1052 // We don't have floating point command-line arguments 1053 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0; 1054 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 1055 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0; 1056 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 1057 1058 const size_t min_heap_size = collector_policy()->min_heap_byte_size(); 1059 const size_t max_heap_size = collector_policy()->max_heap_byte_size(); 1060 1061 // We have to be careful here as these two calculations can overflow 1062 // 32-bit size_t's. 1063 double used_after_gc_d = (double) used_after_gc; 1064 double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage; 1065 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage; 1066 1067 // Let's make sure that they are both under the max heap size, which 1068 // by default will make them fit into a size_t. 1069 double desired_capacity_upper_bound = (double) max_heap_size; 1070 minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d, 1071 desired_capacity_upper_bound); 1072 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d, 1073 desired_capacity_upper_bound); 1074 1075 // We can now safely turn them into size_t's. 1076 size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d; 1077 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d; 1078 1079 // This assert only makes sense here, before we adjust them 1080 // with respect to the min and max heap size. 1081 assert(minimum_desired_capacity <= maximum_desired_capacity, 1082 err_msg("minimum_desired_capacity = "SIZE_FORMAT", " 1083 "maximum_desired_capacity = "SIZE_FORMAT, 1084 minimum_desired_capacity, maximum_desired_capacity)); 1085 1086 // Should not be greater than the heap max size. No need to adjust 1087 // it with respect to the heap min size as it's a lower bound (i.e., 1088 // we'll try to make the capacity larger than it, not smaller). 1089 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size); 1090 // Should not be less than the heap min size. No need to adjust it 1091 // with respect to the heap max size as it's an upper bound (i.e., 1092 // we'll try to make the capacity smaller than it, not greater). 1093 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size); 1094 1095 if (PrintGC && Verbose) { 1096 const double free_percentage = 1097 (double) free_after_gc / (double) capacity_after_gc; 1098 gclog_or_tty->print_cr("Computing new size after full GC "); 1099 gclog_or_tty->print_cr(" " 1100 " minimum_free_percentage: %6.2f", 1101 minimum_free_percentage); 1102 gclog_or_tty->print_cr(" " 1103 " maximum_free_percentage: %6.2f", 1104 maximum_free_percentage); 1105 gclog_or_tty->print_cr(" " 1106 " capacity: %6.1fK" 1107 " minimum_desired_capacity: %6.1fK" 1108 " maximum_desired_capacity: %6.1fK", 1109 (double) capacity_after_gc / (double) K, 1110 (double) minimum_desired_capacity / (double) K, 1111 (double) maximum_desired_capacity / (double) K); 1112 gclog_or_tty->print_cr(" " 1113 " free_after_gc: %6.1fK" 1114 " used_after_gc: %6.1fK", 1115 (double) free_after_gc / (double) K, 1116 (double) used_after_gc / (double) K); 1117 gclog_or_tty->print_cr(" " 1118 " free_percentage: %6.2f", 1119 free_percentage); 1120 } 1121 if (capacity_after_gc < minimum_desired_capacity) { 1122 // Don't expand unless it's significant 1123 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; 1124 expand(expand_bytes); 1125 if (PrintGC && Verbose) { 1126 gclog_or_tty->print_cr(" " 1127 " expanding:" 1128 " max_heap_size: %6.1fK" 1129 " minimum_desired_capacity: %6.1fK" 1130 " expand_bytes: %6.1fK", 1131 (double) max_heap_size / (double) K, 1132 (double) minimum_desired_capacity / (double) K, 1133 (double) expand_bytes / (double) K); 1134 } 1135 1136 // No expansion, now see if we want to shrink 1137 } else if (capacity_after_gc > maximum_desired_capacity) { 1138 // Capacity too large, compute shrinking size 1139 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; 1140 shrink(shrink_bytes); 1141 if (PrintGC && Verbose) { 1142 gclog_or_tty->print_cr(" " 1143 " shrinking:" 1144 " min_heap_size: %6.1fK" 1145 " maximum_desired_capacity: %6.1fK" 1146 " shrink_bytes: %6.1fK", 1147 (double) min_heap_size / (double) K, 1148 (double) maximum_desired_capacity / (double) K, 1149 (double) shrink_bytes / (double) K); 1150 } 1151 } 1152 } 1153 1154 1155 HeapWord* 1156 G1CollectedHeap::satisfy_failed_allocation(size_t word_size) { 1157 HeapWord* result = NULL; 1158 1159 // In a G1 heap, we're supposed to keep allocation from failing by 1160 // incremental pauses. Therefore, at least for now, we'll favor 1161 // expansion over collection. (This might change in the future if we can 1162 // do something smarter than full collection to satisfy a failed alloc.) 1163 1164 result = expand_and_allocate(word_size); 1165 if (result != NULL) { 1166 assert(is_in(result), "result not in heap"); 1167 return result; 1168 } 1169 1170 // OK, I guess we have to try collection. 1171 1172 do_collection(false, false, word_size); 1173 1174 result = attempt_allocation(word_size, /*permit_collection_pause*/false); 1175 1176 if (result != NULL) { 1177 assert(is_in(result), "result not in heap"); 1178 return result; 1179 } 1180 1181 // Try collecting soft references. 1182 do_collection(false, true, word_size); 1183 result = attempt_allocation(word_size, /*permit_collection_pause*/false); 1184 if (result != NULL) { 1185 assert(is_in(result), "result not in heap"); 1186 return result; 1187 } 1188 1189 assert(!collector_policy()->should_clear_all_soft_refs(), 1190 "Flag should have been handled and cleared prior to this point"); 1191 1192 // What else? We might try synchronous finalization later. If the total 1193 // space available is large enough for the allocation, then a more 1194 // complete compaction phase than we've tried so far might be 1195 // appropriate. 1196 return NULL; 1197 } 1198 1199 // Attempting to expand the heap sufficiently 1200 // to support an allocation of the given "word_size". If 1201 // successful, perform the allocation and return the address of the 1202 // allocated block, or else "NULL". 1203 1204 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { 1205 size_t expand_bytes = word_size * HeapWordSize; 1206 if (expand_bytes < MinHeapDeltaBytes) { 1207 expand_bytes = MinHeapDeltaBytes; 1208 } 1209 expand(expand_bytes); 1210 assert(regions_accounted_for(), "Region leakage!"); 1211 HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */); 1212 return result; 1213 } 1214 1215 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) { 1216 size_t pre_used = 0; 1217 size_t cleared_h_regions = 0; 1218 size_t freed_regions = 0; 1219 UncleanRegionList local_list; 1220 free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions, 1221 freed_regions, &local_list); 1222 1223 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, 1224 &local_list); 1225 return pre_used; 1226 } 1227 1228 void 1229 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr, 1230 size_t& pre_used, 1231 size_t& cleared_h, 1232 size_t& freed_regions, 1233 UncleanRegionList* list, 1234 bool par) { 1235 assert(!hr->continuesHumongous(), "should have filtered these out"); 1236 size_t res = 0; 1237 if (hr->used() > 0 && hr->garbage_bytes() == hr->used() && 1238 !hr->is_young()) { 1239 if (G1PolicyVerbose > 0) 1240 gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)" 1241 " during cleanup", hr, hr->used()); 1242 free_region_work(hr, pre_used, cleared_h, freed_regions, list, par); 1243 } 1244 } 1245 1246 // FIXME: both this and shrink could probably be more efficient by 1247 // doing one "VirtualSpace::expand_by" call rather than several. 1248 void G1CollectedHeap::expand(size_t expand_bytes) { 1249 size_t old_mem_size = _g1_storage.committed_size(); 1250 // We expand by a minimum of 1K. 1251 expand_bytes = MAX2(expand_bytes, (size_t)K); 1252 size_t aligned_expand_bytes = 1253 ReservedSpace::page_align_size_up(expand_bytes); 1254 aligned_expand_bytes = align_size_up(aligned_expand_bytes, 1255 HeapRegion::GrainBytes); 1256 expand_bytes = aligned_expand_bytes; 1257 while (expand_bytes > 0) { 1258 HeapWord* base = (HeapWord*)_g1_storage.high(); 1259 // Commit more storage. 1260 bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes); 1261 if (!successful) { 1262 expand_bytes = 0; 1263 } else { 1264 expand_bytes -= HeapRegion::GrainBytes; 1265 // Expand the committed region. 1266 HeapWord* high = (HeapWord*) _g1_storage.high(); 1267 _g1_committed.set_end(high); 1268 // Create a new HeapRegion. 1269 MemRegion mr(base, high); 1270 bool is_zeroed = !_g1_max_committed.contains(base); 1271 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); 1272 1273 // Now update max_committed if necessary. 1274 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high)); 1275 1276 // Add it to the HeapRegionSeq. 1277 _hrs->insert(hr); 1278 // Set the zero-fill state, according to whether it's already 1279 // zeroed. 1280 { 1281 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 1282 if (is_zeroed) { 1283 hr->set_zero_fill_complete(); 1284 put_free_region_on_list_locked(hr); 1285 } else { 1286 hr->set_zero_fill_needed(); 1287 put_region_on_unclean_list_locked(hr); 1288 } 1289 } 1290 _free_regions++; 1291 // And we used up an expansion region to create it. 1292 _expansion_regions--; 1293 // Tell the cardtable about it. 1294 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); 1295 // And the offset table as well. 1296 _bot_shared->resize(_g1_committed.word_size()); 1297 } 1298 } 1299 if (Verbose && PrintGC) { 1300 size_t new_mem_size = _g1_storage.committed_size(); 1301 gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK", 1302 old_mem_size/K, aligned_expand_bytes/K, 1303 new_mem_size/K); 1304 } 1305 } 1306 1307 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) 1308 { 1309 size_t old_mem_size = _g1_storage.committed_size(); 1310 size_t aligned_shrink_bytes = 1311 ReservedSpace::page_align_size_down(shrink_bytes); 1312 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, 1313 HeapRegion::GrainBytes); 1314 size_t num_regions_deleted = 0; 1315 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); 1316 1317 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); 1318 if (mr.byte_size() > 0) 1319 _g1_storage.shrink_by(mr.byte_size()); 1320 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); 1321 1322 _g1_committed.set_end(mr.start()); 1323 _free_regions -= num_regions_deleted; 1324 _expansion_regions += num_regions_deleted; 1325 1326 // Tell the cardtable about it. 1327 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); 1328 1329 // And the offset table as well. 1330 _bot_shared->resize(_g1_committed.word_size()); 1331 1332 HeapRegionRemSet::shrink_heap(n_regions()); 1333 1334 if (Verbose && PrintGC) { 1335 size_t new_mem_size = _g1_storage.committed_size(); 1336 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", 1337 old_mem_size/K, aligned_shrink_bytes/K, 1338 new_mem_size/K); 1339 } 1340 } 1341 1342 void G1CollectedHeap::shrink(size_t shrink_bytes) { 1343 release_gc_alloc_regions(true /* totally */); 1344 tear_down_region_lists(); // We will rebuild them in a moment. 1345 shrink_helper(shrink_bytes); 1346 rebuild_region_lists(); 1347 } 1348 1349 // Public methods. 1350 1351 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 1352 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 1353 #endif // _MSC_VER 1354 1355 1356 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : 1357 SharedHeap(policy_), 1358 _g1_policy(policy_), 1359 _dirty_card_queue_set(false), 1360 _into_cset_dirty_card_queue_set(false), 1361 _ref_processor(NULL), 1362 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), 1363 _bot_shared(NULL), 1364 _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"), 1365 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), 1366 _evac_failure_scan_stack(NULL) , 1367 _mark_in_progress(false), 1368 _cg1r(NULL), _czft(NULL), _summary_bytes_used(0), 1369 _cur_alloc_region(NULL), 1370 _refine_cte_cl(NULL), 1371 _free_region_list(NULL), _free_region_list_size(0), 1372 _free_regions(0), 1373 _full_collection(false), 1374 _unclean_region_list(), 1375 _unclean_regions_coming(false), 1376 _young_list(new YoungList(this)), 1377 _gc_time_stamp(0), 1378 _surviving_young_words(NULL), 1379 _full_collections_completed(0), 1380 _in_cset_fast_test(NULL), 1381 _in_cset_fast_test_base(NULL), 1382 _dirty_cards_region_list(NULL) { 1383 _g1h = this; // To catch bugs. 1384 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { 1385 vm_exit_during_initialization("Failed necessary allocation."); 1386 } 1387 1388 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; 1389 1390 int n_queues = MAX2((int)ParallelGCThreads, 1); 1391 _task_queues = new RefToScanQueueSet(n_queues); 1392 1393 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); 1394 assert(n_rem_sets > 0, "Invariant."); 1395 1396 HeapRegionRemSetIterator** iter_arr = 1397 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); 1398 for (int i = 0; i < n_queues; i++) { 1399 iter_arr[i] = new HeapRegionRemSetIterator(); 1400 } 1401 _rem_set_iterator = iter_arr; 1402 1403 for (int i = 0; i < n_queues; i++) { 1404 RefToScanQueue* q = new RefToScanQueue(); 1405 q->initialize(); 1406 _task_queues->register_queue(i, q); 1407 } 1408 1409 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 1410 _gc_alloc_regions[ap] = NULL; 1411 _gc_alloc_region_counts[ap] = 0; 1412 _retained_gc_alloc_regions[ap] = NULL; 1413 // by default, we do not retain a GC alloc region for each ap; 1414 // we'll override this, when appropriate, below 1415 _retain_gc_alloc_region[ap] = false; 1416 } 1417 1418 // We will try to remember the last half-full tenured region we 1419 // allocated to at the end of a collection so that we can re-use it 1420 // during the next collection. 1421 _retain_gc_alloc_region[GCAllocForTenured] = true; 1422 1423 guarantee(_task_queues != NULL, "task_queues allocation failure."); 1424 } 1425 1426 jint G1CollectedHeap::initialize() { 1427 CollectedHeap::pre_initialize(); 1428 os::enable_vtime(); 1429 1430 // Necessary to satisfy locking discipline assertions. 1431 1432 MutexLocker x(Heap_lock); 1433 1434 // While there are no constraints in the GC code that HeapWordSize 1435 // be any particular value, there are multiple other areas in the 1436 // system which believe this to be true (e.g. oop->object_size in some 1437 // cases incorrectly returns the size in wordSize units rather than 1438 // HeapWordSize). 1439 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); 1440 1441 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); 1442 size_t max_byte_size = collector_policy()->max_heap_byte_size(); 1443 1444 // Ensure that the sizes are properly aligned. 1445 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); 1446 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); 1447 1448 _cg1r = new ConcurrentG1Refine(); 1449 1450 // Reserve the maximum. 1451 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); 1452 // Includes the perm-gen. 1453 1454 const size_t total_reserved = max_byte_size + pgs->max_size(); 1455 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); 1456 1457 ReservedSpace heap_rs(max_byte_size + pgs->max_size(), 1458 HeapRegion::GrainBytes, 1459 false /*ism*/, addr); 1460 1461 if (UseCompressedOops) { 1462 if (addr != NULL && !heap_rs.is_reserved()) { 1463 // Failed to reserve at specified address - the requested memory 1464 // region is taken already, for example, by 'java' launcher. 1465 // Try again to reserver heap higher. 1466 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); 1467 ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes, 1468 false /*ism*/, addr); 1469 if (addr != NULL && !heap_rs0.is_reserved()) { 1470 // Failed to reserve at specified address again - give up. 1471 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); 1472 assert(addr == NULL, ""); 1473 ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes, 1474 false /*ism*/, addr); 1475 heap_rs = heap_rs1; 1476 } else { 1477 heap_rs = heap_rs0; 1478 } 1479 } 1480 } 1481 1482 if (!heap_rs.is_reserved()) { 1483 vm_exit_during_initialization("Could not reserve enough space for object heap"); 1484 return JNI_ENOMEM; 1485 } 1486 1487 // It is important to do this in a way such that concurrent readers can't 1488 // temporarily think somethings in the heap. (I've actually seen this 1489 // happen in asserts: DLD.) 1490 _reserved.set_word_size(0); 1491 _reserved.set_start((HeapWord*)heap_rs.base()); 1492 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); 1493 1494 _expansion_regions = max_byte_size/HeapRegion::GrainBytes; 1495 1496 _num_humongous_regions = 0; 1497 1498 // Create the gen rem set (and barrier set) for the entire reserved region. 1499 _rem_set = collector_policy()->create_rem_set(_reserved, 2); 1500 set_barrier_set(rem_set()->bs()); 1501 if (barrier_set()->is_a(BarrierSet::ModRef)) { 1502 _mr_bs = (ModRefBarrierSet*)_barrier_set; 1503 } else { 1504 vm_exit_during_initialization("G1 requires a mod ref bs."); 1505 return JNI_ENOMEM; 1506 } 1507 1508 // Also create a G1 rem set. 1509 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { 1510 _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs()); 1511 } else { 1512 vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); 1513 return JNI_ENOMEM; 1514 } 1515 1516 // Carve out the G1 part of the heap. 1517 1518 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); 1519 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), 1520 g1_rs.size()/HeapWordSize); 1521 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); 1522 1523 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); 1524 1525 _g1_storage.initialize(g1_rs, 0); 1526 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); 1527 _g1_max_committed = _g1_committed; 1528 _hrs = new HeapRegionSeq(_expansion_regions); 1529 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); 1530 guarantee(_cur_alloc_region == NULL, "from constructor"); 1531 1532 // 6843694 - ensure that the maximum region index can fit 1533 // in the remembered set structures. 1534 const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; 1535 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); 1536 1537 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; 1538 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized"); 1539 guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region, 1540 "too many cards per region"); 1541 1542 _bot_shared = new G1BlockOffsetSharedArray(_reserved, 1543 heap_word_size(init_byte_size)); 1544 1545 _g1h = this; 1546 1547 _in_cset_fast_test_length = max_regions(); 1548 _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); 1549 1550 // We're biasing _in_cset_fast_test to avoid subtracting the 1551 // beginning of the heap every time we want to index; basically 1552 // it's the same with what we do with the card table. 1553 _in_cset_fast_test = _in_cset_fast_test_base - 1554 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); 1555 1556 // Clear the _cset_fast_test bitmap in anticipation of adding 1557 // regions to the incremental collection set for the first 1558 // evacuation pause. 1559 clear_cset_fast_test(); 1560 1561 // Create the ConcurrentMark data structure and thread. 1562 // (Must do this late, so that "max_regions" is defined.) 1563 _cm = new ConcurrentMark(heap_rs, (int) max_regions()); 1564 _cmThread = _cm->cmThread(); 1565 1566 // ...and the concurrent zero-fill thread, if necessary. 1567 if (G1ConcZeroFill) { 1568 _czft = new ConcurrentZFThread(); 1569 } 1570 1571 // Initialize the from_card cache structure of HeapRegionRemSet. 1572 HeapRegionRemSet::init_heap(max_regions()); 1573 1574 // Now expand into the initial heap size. 1575 expand(init_byte_size); 1576 1577 // Perform any initialization actions delegated to the policy. 1578 g1_policy()->init(); 1579 1580 g1_policy()->note_start_of_mark_thread(); 1581 1582 _refine_cte_cl = 1583 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), 1584 g1_rem_set(), 1585 concurrent_g1_refine()); 1586 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); 1587 1588 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, 1589 SATB_Q_FL_lock, 1590 G1SATBProcessCompletedThreshold, 1591 Shared_SATB_Q_lock); 1592 1593 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, 1594 DirtyCardQ_FL_lock, 1595 concurrent_g1_refine()->yellow_zone(), 1596 concurrent_g1_refine()->red_zone(), 1597 Shared_DirtyCardQ_lock); 1598 1599 if (G1DeferredRSUpdate) { 1600 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, 1601 DirtyCardQ_FL_lock, 1602 -1, // never trigger processing 1603 -1, // no limit on length 1604 Shared_DirtyCardQ_lock, 1605 &JavaThread::dirty_card_queue_set()); 1606 } 1607 1608 // Initialize the card queue set used to hold cards containing 1609 // references into the collection set. 1610 _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon, 1611 DirtyCardQ_FL_lock, 1612 -1, // never trigger processing 1613 -1, // no limit on length 1614 Shared_DirtyCardQ_lock, 1615 &JavaThread::dirty_card_queue_set()); 1616 1617 // In case we're keeping closure specialization stats, initialize those 1618 // counts and that mechanism. 1619 SpecializationStats::clear(); 1620 1621 _gc_alloc_region_list = NULL; 1622 1623 // Do later initialization work for concurrent refinement. 1624 _cg1r->init(); 1625 1626 return JNI_OK; 1627 } 1628 1629 void G1CollectedHeap::ref_processing_init() { 1630 SharedHeap::ref_processing_init(); 1631 MemRegion mr = reserved_region(); 1632 _ref_processor = ReferenceProcessor::create_ref_processor( 1633 mr, // span 1634 false, // Reference discovery is not atomic 1635 // (though it shouldn't matter here.) 1636 true, // mt_discovery 1637 NULL, // is alive closure: need to fill this in for efficiency 1638 ParallelGCThreads, 1639 ParallelRefProcEnabled, 1640 true); // Setting next fields of discovered 1641 // lists requires a barrier. 1642 } 1643 1644 size_t G1CollectedHeap::capacity() const { 1645 return _g1_committed.byte_size(); 1646 } 1647 1648 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, 1649 DirtyCardQueue* into_cset_dcq, 1650 bool concurrent, 1651 int worker_i) { 1652 // Clean cards in the hot card cache 1653 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq); 1654 1655 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1656 int n_completed_buffers = 0; 1657 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) { 1658 n_completed_buffers++; 1659 } 1660 g1_policy()->record_update_rs_processed_buffers(worker_i, 1661 (double) n_completed_buffers); 1662 dcqs.clear_n_completed_buffers(); 1663 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); 1664 } 1665 1666 1667 // Computes the sum of the storage used by the various regions. 1668 1669 size_t G1CollectedHeap::used() const { 1670 assert(Heap_lock->owner() != NULL, 1671 "Should be owned on this thread's behalf."); 1672 size_t result = _summary_bytes_used; 1673 // Read only once in case it is set to NULL concurrently 1674 HeapRegion* hr = _cur_alloc_region; 1675 if (hr != NULL) 1676 result += hr->used(); 1677 return result; 1678 } 1679 1680 size_t G1CollectedHeap::used_unlocked() const { 1681 size_t result = _summary_bytes_used; 1682 return result; 1683 } 1684 1685 class SumUsedClosure: public HeapRegionClosure { 1686 size_t _used; 1687 public: 1688 SumUsedClosure() : _used(0) {} 1689 bool doHeapRegion(HeapRegion* r) { 1690 if (!r->continuesHumongous()) { 1691 _used += r->used(); 1692 } 1693 return false; 1694 } 1695 size_t result() { return _used; } 1696 }; 1697 1698 size_t G1CollectedHeap::recalculate_used() const { 1699 SumUsedClosure blk; 1700 _hrs->iterate(&blk); 1701 return blk.result(); 1702 } 1703 1704 #ifndef PRODUCT 1705 class SumUsedRegionsClosure: public HeapRegionClosure { 1706 size_t _num; 1707 public: 1708 SumUsedRegionsClosure() : _num(0) {} 1709 bool doHeapRegion(HeapRegion* r) { 1710 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { 1711 _num += 1; 1712 } 1713 return false; 1714 } 1715 size_t result() { return _num; } 1716 }; 1717 1718 size_t G1CollectedHeap::recalculate_used_regions() const { 1719 SumUsedRegionsClosure blk; 1720 _hrs->iterate(&blk); 1721 return blk.result(); 1722 } 1723 #endif // PRODUCT 1724 1725 size_t G1CollectedHeap::unsafe_max_alloc() { 1726 if (_free_regions > 0) return HeapRegion::GrainBytes; 1727 // otherwise, is there space in the current allocation region? 1728 1729 // We need to store the current allocation region in a local variable 1730 // here. The problem is that this method doesn't take any locks and 1731 // there may be other threads which overwrite the current allocation 1732 // region field. attempt_allocation(), for example, sets it to NULL 1733 // and this can happen *after* the NULL check here but before the call 1734 // to free(), resulting in a SIGSEGV. Note that this doesn't appear 1735 // to be a problem in the optimized build, since the two loads of the 1736 // current allocation region field are optimized away. 1737 HeapRegion* car = _cur_alloc_region; 1738 1739 // FIXME: should iterate over all regions? 1740 if (car == NULL) { 1741 return 0; 1742 } 1743 return car->free(); 1744 } 1745 1746 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { 1747 return 1748 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || 1749 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); 1750 } 1751 1752 void G1CollectedHeap::increment_full_collections_completed(bool outer) { 1753 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); 1754 1755 // We have already incremented _total_full_collections at the start 1756 // of the GC, so total_full_collections() represents how many full 1757 // collections have been started. 1758 unsigned int full_collections_started = total_full_collections(); 1759 1760 // Given that this method is called at the end of a Full GC or of a 1761 // concurrent cycle, and those can be nested (i.e., a Full GC can 1762 // interrupt a concurrent cycle), the number of full collections 1763 // completed should be either one (in the case where there was no 1764 // nesting) or two (when a Full GC interrupted a concurrent cycle) 1765 // behind the number of full collections started. 1766 1767 // This is the case for the inner caller, i.e. a Full GC. 1768 assert(outer || 1769 (full_collections_started == _full_collections_completed + 1) || 1770 (full_collections_started == _full_collections_completed + 2), 1771 err_msg("for inner caller: full_collections_started = %u " 1772 "is inconsistent with _full_collections_completed = %u", 1773 full_collections_started, _full_collections_completed)); 1774 1775 // This is the case for the outer caller, i.e. the concurrent cycle. 1776 assert(!outer || 1777 (full_collections_started == _full_collections_completed + 1), 1778 err_msg("for outer caller: full_collections_started = %u " 1779 "is inconsistent with _full_collections_completed = %u", 1780 full_collections_started, _full_collections_completed)); 1781 1782 _full_collections_completed += 1; 1783 1784 // We need to clear the "in_progress" flag in the CM thread before 1785 // we wake up any waiters (especially when ExplicitInvokesConcurrent 1786 // is set) so that if a waiter requests another System.gc() it doesn't 1787 // incorrectly see that a marking cyle is still in progress. 1788 if (outer) { 1789 _cmThread->clear_in_progress(); 1790 } 1791 1792 // This notify_all() will ensure that a thread that called 1793 // System.gc() with (with ExplicitGCInvokesConcurrent set or not) 1794 // and it's waiting for a full GC to finish will be woken up. It is 1795 // waiting in VM_G1IncCollectionPause::doit_epilogue(). 1796 FullGCCount_lock->notify_all(); 1797 } 1798 1799 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 1800 assert(Thread::current()->is_VM_thread(), "Precondition#1"); 1801 assert(Heap_lock->is_locked(), "Precondition#2"); 1802 GCCauseSetter gcs(this, cause); 1803 switch (cause) { 1804 case GCCause::_heap_inspection: 1805 case GCCause::_heap_dump: { 1806 HandleMark hm; 1807 do_full_collection(false); // don't clear all soft refs 1808 break; 1809 } 1810 default: // XXX FIX ME 1811 ShouldNotReachHere(); // Unexpected use of this function 1812 } 1813 } 1814 1815 void G1CollectedHeap::collect(GCCause::Cause cause) { 1816 // The caller doesn't have the Heap_lock 1817 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 1818 1819 unsigned int gc_count_before; 1820 unsigned int full_gc_count_before; 1821 { 1822 MutexLocker ml(Heap_lock); 1823 // Read the GC count while holding the Heap_lock 1824 gc_count_before = SharedHeap::heap()->total_collections(); 1825 full_gc_count_before = SharedHeap::heap()->total_full_collections(); 1826 1827 // Don't want to do a GC until cleanup is completed. 1828 wait_for_cleanup_complete(); 1829 1830 // We give up heap lock; VMThread::execute gets it back below 1831 } 1832 1833 if (should_do_concurrent_full_gc(cause)) { 1834 // Schedule an initial-mark evacuation pause that will start a 1835 // concurrent cycle. 1836 VM_G1IncCollectionPause op(gc_count_before, 1837 true, /* should_initiate_conc_mark */ 1838 g1_policy()->max_pause_time_ms(), 1839 cause); 1840 VMThread::execute(&op); 1841 } else { 1842 if (cause == GCCause::_gc_locker 1843 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { 1844 1845 // Schedule a standard evacuation pause. 1846 VM_G1IncCollectionPause op(gc_count_before, 1847 false, /* should_initiate_conc_mark */ 1848 g1_policy()->max_pause_time_ms(), 1849 cause); 1850 VMThread::execute(&op); 1851 } else { 1852 // Schedule a Full GC. 1853 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); 1854 VMThread::execute(&op); 1855 } 1856 } 1857 } 1858 1859 bool G1CollectedHeap::is_in(const void* p) const { 1860 if (_g1_committed.contains(p)) { 1861 HeapRegion* hr = _hrs->addr_to_region(p); 1862 return hr->is_in(p); 1863 } else { 1864 return _perm_gen->as_gen()->is_in(p); 1865 } 1866 } 1867 1868 // Iteration functions. 1869 1870 // Iterates an OopClosure over all ref-containing fields of objects 1871 // within a HeapRegion. 1872 1873 class IterateOopClosureRegionClosure: public HeapRegionClosure { 1874 MemRegion _mr; 1875 OopClosure* _cl; 1876 public: 1877 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) 1878 : _mr(mr), _cl(cl) {} 1879 bool doHeapRegion(HeapRegion* r) { 1880 if (! r->continuesHumongous()) { 1881 r->oop_iterate(_cl); 1882 } 1883 return false; 1884 } 1885 }; 1886 1887 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { 1888 IterateOopClosureRegionClosure blk(_g1_committed, cl); 1889 _hrs->iterate(&blk); 1890 if (do_perm) { 1891 perm_gen()->oop_iterate(cl); 1892 } 1893 } 1894 1895 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { 1896 IterateOopClosureRegionClosure blk(mr, cl); 1897 _hrs->iterate(&blk); 1898 if (do_perm) { 1899 perm_gen()->oop_iterate(cl); 1900 } 1901 } 1902 1903 // Iterates an ObjectClosure over all objects within a HeapRegion. 1904 1905 class IterateObjectClosureRegionClosure: public HeapRegionClosure { 1906 ObjectClosure* _cl; 1907 public: 1908 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} 1909 bool doHeapRegion(HeapRegion* r) { 1910 if (! r->continuesHumongous()) { 1911 r->object_iterate(_cl); 1912 } 1913 return false; 1914 } 1915 }; 1916 1917 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { 1918 IterateObjectClosureRegionClosure blk(cl); 1919 _hrs->iterate(&blk); 1920 if (do_perm) { 1921 perm_gen()->object_iterate(cl); 1922 } 1923 } 1924 1925 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { 1926 // FIXME: is this right? 1927 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); 1928 } 1929 1930 // Calls a SpaceClosure on a HeapRegion. 1931 1932 class SpaceClosureRegionClosure: public HeapRegionClosure { 1933 SpaceClosure* _cl; 1934 public: 1935 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} 1936 bool doHeapRegion(HeapRegion* r) { 1937 _cl->do_space(r); 1938 return false; 1939 } 1940 }; 1941 1942 void G1CollectedHeap::space_iterate(SpaceClosure* cl) { 1943 SpaceClosureRegionClosure blk(cl); 1944 _hrs->iterate(&blk); 1945 } 1946 1947 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { 1948 _hrs->iterate(cl); 1949 } 1950 1951 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, 1952 HeapRegionClosure* cl) { 1953 _hrs->iterate_from(r, cl); 1954 } 1955 1956 void 1957 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { 1958 _hrs->iterate_from(idx, cl); 1959 } 1960 1961 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } 1962 1963 void 1964 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, 1965 int worker, 1966 jint claim_value) { 1967 const size_t regions = n_regions(); 1968 const size_t worker_num = (G1CollectedHeap::use_parallel_gc_threads() ? ParallelGCThreads : 1); 1969 // try to spread out the starting points of the workers 1970 const size_t start_index = regions / worker_num * (size_t) worker; 1971 1972 // each worker will actually look at all regions 1973 for (size_t count = 0; count < regions; ++count) { 1974 const size_t index = (start_index + count) % regions; 1975 assert(0 <= index && index < regions, "sanity"); 1976 HeapRegion* r = region_at(index); 1977 // we'll ignore "continues humongous" regions (we'll process them 1978 // when we come across their corresponding "start humongous" 1979 // region) and regions already claimed 1980 if (r->claim_value() == claim_value || r->continuesHumongous()) { 1981 continue; 1982 } 1983 // OK, try to claim it 1984 if (r->claimHeapRegion(claim_value)) { 1985 // success! 1986 assert(!r->continuesHumongous(), "sanity"); 1987 if (r->startsHumongous()) { 1988 // If the region is "starts humongous" we'll iterate over its 1989 // "continues humongous" first; in fact we'll do them 1990 // first. The order is important. In on case, calling the 1991 // closure on the "starts humongous" region might de-allocate 1992 // and clear all its "continues humongous" regions and, as a 1993 // result, we might end up processing them twice. So, we'll do 1994 // them first (notice: most closures will ignore them anyway) and 1995 // then we'll do the "starts humongous" region. 1996 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { 1997 HeapRegion* chr = region_at(ch_index); 1998 1999 // if the region has already been claimed or it's not 2000 // "continues humongous" we're done 2001 if (chr->claim_value() == claim_value || 2002 !chr->continuesHumongous()) { 2003 break; 2004 } 2005 2006 // Noone should have claimed it directly. We can given 2007 // that we claimed its "starts humongous" region. 2008 assert(chr->claim_value() != claim_value, "sanity"); 2009 assert(chr->humongous_start_region() == r, "sanity"); 2010 2011 if (chr->claimHeapRegion(claim_value)) { 2012 // we should always be able to claim it; noone else should 2013 // be trying to claim this region 2014 2015 bool res2 = cl->doHeapRegion(chr); 2016 assert(!res2, "Should not abort"); 2017 2018 // Right now, this holds (i.e., no closure that actually 2019 // does something with "continues humongous" regions 2020 // clears them). We might have to weaken it in the future, 2021 // but let's leave these two asserts here for extra safety. 2022 assert(chr->continuesHumongous(), "should still be the case"); 2023 assert(chr->humongous_start_region() == r, "sanity"); 2024 } else { 2025 guarantee(false, "we should not reach here"); 2026 } 2027 } 2028 } 2029 2030 assert(!r->continuesHumongous(), "sanity"); 2031 bool res = cl->doHeapRegion(r); 2032 assert(!res, "Should not abort"); 2033 } 2034 } 2035 } 2036 2037 class ResetClaimValuesClosure: public HeapRegionClosure { 2038 public: 2039 bool doHeapRegion(HeapRegion* r) { 2040 r->set_claim_value(HeapRegion::InitialClaimValue); 2041 return false; 2042 } 2043 }; 2044 2045 void 2046 G1CollectedHeap::reset_heap_region_claim_values() { 2047 ResetClaimValuesClosure blk; 2048 heap_region_iterate(&blk); 2049 } 2050 2051 #ifdef ASSERT 2052 // This checks whether all regions in the heap have the correct claim 2053 // value. I also piggy-backed on this a check to ensure that the 2054 // humongous_start_region() information on "continues humongous" 2055 // regions is correct. 2056 2057 class CheckClaimValuesClosure : public HeapRegionClosure { 2058 private: 2059 jint _claim_value; 2060 size_t _failures; 2061 HeapRegion* _sh_region; 2062 public: 2063 CheckClaimValuesClosure(jint claim_value) : 2064 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } 2065 bool doHeapRegion(HeapRegion* r) { 2066 if (r->claim_value() != _claim_value) { 2067 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " 2068 "claim value = %d, should be %d", 2069 r->bottom(), r->end(), r->claim_value(), 2070 _claim_value); 2071 ++_failures; 2072 } 2073 if (!r->isHumongous()) { 2074 _sh_region = NULL; 2075 } else if (r->startsHumongous()) { 2076 _sh_region = r; 2077 } else if (r->continuesHumongous()) { 2078 if (r->humongous_start_region() != _sh_region) { 2079 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " 2080 "HS = "PTR_FORMAT", should be "PTR_FORMAT, 2081 r->bottom(), r->end(), 2082 r->humongous_start_region(), 2083 _sh_region); 2084 ++_failures; 2085 } 2086 } 2087 return false; 2088 } 2089 size_t failures() { 2090 return _failures; 2091 } 2092 }; 2093 2094 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { 2095 CheckClaimValuesClosure cl(claim_value); 2096 heap_region_iterate(&cl); 2097 return cl.failures() == 0; 2098 } 2099 #endif // ASSERT 2100 2101 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { 2102 HeapRegion* r = g1_policy()->collection_set(); 2103 while (r != NULL) { 2104 HeapRegion* next = r->next_in_collection_set(); 2105 if (cl->doHeapRegion(r)) { 2106 cl->incomplete(); 2107 return; 2108 } 2109 r = next; 2110 } 2111 } 2112 2113 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, 2114 HeapRegionClosure *cl) { 2115 if (r == NULL) { 2116 // The CSet is empty so there's nothing to do. 2117 return; 2118 } 2119 2120 assert(r->in_collection_set(), 2121 "Start region must be a member of the collection set."); 2122 HeapRegion* cur = r; 2123 while (cur != NULL) { 2124 HeapRegion* next = cur->next_in_collection_set(); 2125 if (cl->doHeapRegion(cur) && false) { 2126 cl->incomplete(); 2127 return; 2128 } 2129 cur = next; 2130 } 2131 cur = g1_policy()->collection_set(); 2132 while (cur != r) { 2133 HeapRegion* next = cur->next_in_collection_set(); 2134 if (cl->doHeapRegion(cur) && false) { 2135 cl->incomplete(); 2136 return; 2137 } 2138 cur = next; 2139 } 2140 } 2141 2142 CompactibleSpace* G1CollectedHeap::first_compactible_space() { 2143 return _hrs->length() > 0 ? _hrs->at(0) : NULL; 2144 } 2145 2146 2147 Space* G1CollectedHeap::space_containing(const void* addr) const { 2148 Space* res = heap_region_containing(addr); 2149 if (res == NULL) 2150 res = perm_gen()->space_containing(addr); 2151 return res; 2152 } 2153 2154 HeapWord* G1CollectedHeap::block_start(const void* addr) const { 2155 Space* sp = space_containing(addr); 2156 if (sp != NULL) { 2157 return sp->block_start(addr); 2158 } 2159 return NULL; 2160 } 2161 2162 size_t G1CollectedHeap::block_size(const HeapWord* addr) const { 2163 Space* sp = space_containing(addr); 2164 assert(sp != NULL, "block_size of address outside of heap"); 2165 return sp->block_size(addr); 2166 } 2167 2168 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { 2169 Space* sp = space_containing(addr); 2170 return sp->block_is_obj(addr); 2171 } 2172 2173 bool G1CollectedHeap::supports_tlab_allocation() const { 2174 return true; 2175 } 2176 2177 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { 2178 return HeapRegion::GrainBytes; 2179 } 2180 2181 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { 2182 // Return the remaining space in the cur alloc region, but not less than 2183 // the min TLAB size. 2184 2185 // Also, this value can be at most the humongous object threshold, 2186 // since we can't allow tlabs to grow big enough to accomodate 2187 // humongous objects. 2188 2189 // We need to store the cur alloc region locally, since it might change 2190 // between when we test for NULL and when we use it later. 2191 ContiguousSpace* cur_alloc_space = _cur_alloc_region; 2192 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; 2193 2194 if (cur_alloc_space == NULL) { 2195 return max_tlab_size; 2196 } else { 2197 return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize), 2198 max_tlab_size); 2199 } 2200 } 2201 2202 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { 2203 assert(!isHumongous(word_size), 2204 err_msg("a TLAB should not be of humongous size, " 2205 "word_size = "SIZE_FORMAT, word_size)); 2206 bool dummy; 2207 return G1CollectedHeap::mem_allocate(word_size, false, true, &dummy); 2208 } 2209 2210 bool G1CollectedHeap::allocs_are_zero_filled() { 2211 return false; 2212 } 2213 2214 size_t G1CollectedHeap::large_typearray_limit() { 2215 // FIXME 2216 return HeapRegion::GrainBytes/HeapWordSize; 2217 } 2218 2219 size_t G1CollectedHeap::max_capacity() const { 2220 return g1_reserved_obj_bytes(); 2221 } 2222 2223 jlong G1CollectedHeap::millis_since_last_gc() { 2224 // assert(false, "NYI"); 2225 return 0; 2226 } 2227 2228 2229 void G1CollectedHeap::prepare_for_verify() { 2230 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { 2231 ensure_parsability(false); 2232 } 2233 g1_rem_set()->prepare_for_verify(); 2234 } 2235 2236 class VerifyLivenessOopClosure: public OopClosure { 2237 G1CollectedHeap* g1h; 2238 public: 2239 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { 2240 g1h = _g1h; 2241 } 2242 void do_oop(narrowOop *p) { do_oop_work(p); } 2243 void do_oop( oop *p) { do_oop_work(p); } 2244 2245 template <class T> void do_oop_work(T *p) { 2246 oop obj = oopDesc::load_decode_heap_oop(p); 2247 guarantee(obj == NULL || !g1h->is_obj_dead(obj), 2248 "Dead object referenced by a not dead object"); 2249 } 2250 }; 2251 2252 class VerifyObjsInRegionClosure: public ObjectClosure { 2253 private: 2254 G1CollectedHeap* _g1h; 2255 size_t _live_bytes; 2256 HeapRegion *_hr; 2257 bool _use_prev_marking; 2258 public: 2259 // use_prev_marking == true -> use "prev" marking information, 2260 // use_prev_marking == false -> use "next" marking information 2261 VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking) 2262 : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) { 2263 _g1h = G1CollectedHeap::heap(); 2264 } 2265 void do_object(oop o) { 2266 VerifyLivenessOopClosure isLive(_g1h); 2267 assert(o != NULL, "Huh?"); 2268 if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) { 2269 o->oop_iterate(&isLive); 2270 if (!_hr->obj_allocated_since_prev_marking(o)) { 2271 size_t obj_size = o->size(); // Make sure we don't overflow 2272 _live_bytes += (obj_size * HeapWordSize); 2273 } 2274 } 2275 } 2276 size_t live_bytes() { return _live_bytes; } 2277 }; 2278 2279 class PrintObjsInRegionClosure : public ObjectClosure { 2280 HeapRegion *_hr; 2281 G1CollectedHeap *_g1; 2282 public: 2283 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { 2284 _g1 = G1CollectedHeap::heap(); 2285 }; 2286 2287 void do_object(oop o) { 2288 if (o != NULL) { 2289 HeapWord *start = (HeapWord *) o; 2290 size_t word_sz = o->size(); 2291 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT 2292 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", 2293 (void*) o, word_sz, 2294 _g1->isMarkedPrev(o), 2295 _g1->isMarkedNext(o), 2296 _hr->obj_allocated_since_prev_marking(o)); 2297 HeapWord *end = start + word_sz; 2298 HeapWord *cur; 2299 int *val; 2300 for (cur = start; cur < end; cur++) { 2301 val = (int *) cur; 2302 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); 2303 } 2304 } 2305 } 2306 }; 2307 2308 class VerifyRegionClosure: public HeapRegionClosure { 2309 private: 2310 bool _allow_dirty; 2311 bool _par; 2312 bool _use_prev_marking; 2313 bool _failures; 2314 public: 2315 // use_prev_marking == true -> use "prev" marking information, 2316 // use_prev_marking == false -> use "next" marking information 2317 VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking) 2318 : _allow_dirty(allow_dirty), 2319 _par(par), 2320 _use_prev_marking(use_prev_marking), 2321 _failures(false) {} 2322 2323 bool failures() { 2324 return _failures; 2325 } 2326 2327 bool doHeapRegion(HeapRegion* r) { 2328 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, 2329 "Should be unclaimed at verify points."); 2330 if (!r->continuesHumongous()) { 2331 bool failures = false; 2332 r->verify(_allow_dirty, _use_prev_marking, &failures); 2333 if (failures) { 2334 _failures = true; 2335 } else { 2336 VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking); 2337 r->object_iterate(¬_dead_yet_cl); 2338 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { 2339 gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] " 2340 "max_live_bytes "SIZE_FORMAT" " 2341 "< calculated "SIZE_FORMAT, 2342 r->bottom(), r->end(), 2343 r->max_live_bytes(), 2344 not_dead_yet_cl.live_bytes()); 2345 _failures = true; 2346 } 2347 } 2348 } 2349 return false; // stop the region iteration if we hit a failure 2350 } 2351 }; 2352 2353 class VerifyRootsClosure: public OopsInGenClosure { 2354 private: 2355 G1CollectedHeap* _g1h; 2356 bool _use_prev_marking; 2357 bool _failures; 2358 public: 2359 // use_prev_marking == true -> use "prev" marking information, 2360 // use_prev_marking == false -> use "next" marking information 2361 VerifyRootsClosure(bool use_prev_marking) : 2362 _g1h(G1CollectedHeap::heap()), 2363 _use_prev_marking(use_prev_marking), 2364 _failures(false) { } 2365 2366 bool failures() { return _failures; } 2367 2368 template <class T> void do_oop_nv(T* p) { 2369 T heap_oop = oopDesc::load_heap_oop(p); 2370 if (!oopDesc::is_null(heap_oop)) { 2371 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 2372 if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) { 2373 gclog_or_tty->print_cr("Root location "PTR_FORMAT" " 2374 "points to dead obj "PTR_FORMAT, p, (void*) obj); 2375 obj->print_on(gclog_or_tty); 2376 _failures = true; 2377 } 2378 } 2379 } 2380 2381 void do_oop(oop* p) { do_oop_nv(p); } 2382 void do_oop(narrowOop* p) { do_oop_nv(p); } 2383 }; 2384 2385 // This is the task used for parallel heap verification. 2386 2387 class G1ParVerifyTask: public AbstractGangTask { 2388 private: 2389 G1CollectedHeap* _g1h; 2390 bool _allow_dirty; 2391 bool _use_prev_marking; 2392 bool _failures; 2393 2394 public: 2395 // use_prev_marking == true -> use "prev" marking information, 2396 // use_prev_marking == false -> use "next" marking information 2397 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, 2398 bool use_prev_marking) : 2399 AbstractGangTask("Parallel verify task"), 2400 _g1h(g1h), 2401 _allow_dirty(allow_dirty), 2402 _use_prev_marking(use_prev_marking), 2403 _failures(false) { } 2404 2405 bool failures() { 2406 return _failures; 2407 } 2408 2409 void work(int worker_i) { 2410 HandleMark hm; 2411 VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking); 2412 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, 2413 HeapRegion::ParVerifyClaimValue); 2414 if (blk.failures()) { 2415 _failures = true; 2416 } 2417 } 2418 }; 2419 2420 void G1CollectedHeap::verify(bool allow_dirty, bool silent) { 2421 verify(allow_dirty, silent, /* use_prev_marking */ true); 2422 } 2423 2424 void G1CollectedHeap::verify(bool allow_dirty, 2425 bool silent, 2426 bool use_prev_marking) { 2427 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { 2428 if (!silent) { gclog_or_tty->print("roots "); } 2429 VerifyRootsClosure rootsCl(use_prev_marking); 2430 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); 2431 process_strong_roots(true, // activate StrongRootsScope 2432 false, 2433 SharedHeap::SO_AllClasses, 2434 &rootsCl, 2435 &blobsCl, 2436 &rootsCl); 2437 bool failures = rootsCl.failures(); 2438 rem_set()->invalidate(perm_gen()->used_region(), false); 2439 if (!silent) { gclog_or_tty->print("heapRegions "); } 2440 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { 2441 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), 2442 "sanity check"); 2443 2444 G1ParVerifyTask task(this, allow_dirty, use_prev_marking); 2445 int n_workers = workers()->total_workers(); 2446 set_par_threads(n_workers); 2447 workers()->run_task(&task); 2448 set_par_threads(0); 2449 if (task.failures()) { 2450 failures = true; 2451 } 2452 2453 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), 2454 "sanity check"); 2455 2456 reset_heap_region_claim_values(); 2457 2458 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), 2459 "sanity check"); 2460 } else { 2461 VerifyRegionClosure blk(allow_dirty, false, use_prev_marking); 2462 _hrs->iterate(&blk); 2463 if (blk.failures()) { 2464 failures = true; 2465 } 2466 } 2467 if (!silent) gclog_or_tty->print("remset "); 2468 rem_set()->verify(); 2469 2470 if (failures) { 2471 gclog_or_tty->print_cr("Heap:"); 2472 print_on(gclog_or_tty, true /* extended */); 2473 gclog_or_tty->print_cr(""); 2474 #ifndef PRODUCT 2475 if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) { 2476 concurrent_mark()->print_reachable("at-verification-failure", 2477 use_prev_marking, false /* all */); 2478 } 2479 #endif 2480 gclog_or_tty->flush(); 2481 } 2482 guarantee(!failures, "there should not have been any failures"); 2483 } else { 2484 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); 2485 } 2486 } 2487 2488 class PrintRegionClosure: public HeapRegionClosure { 2489 outputStream* _st; 2490 public: 2491 PrintRegionClosure(outputStream* st) : _st(st) {} 2492 bool doHeapRegion(HeapRegion* r) { 2493 r->print_on(_st); 2494 return false; 2495 } 2496 }; 2497 2498 void G1CollectedHeap::print() const { print_on(tty); } 2499 2500 void G1CollectedHeap::print_on(outputStream* st) const { 2501 print_on(st, PrintHeapAtGCExtended); 2502 } 2503 2504 void G1CollectedHeap::print_on(outputStream* st, bool extended) const { 2505 st->print(" %-20s", "garbage-first heap"); 2506 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", 2507 capacity()/K, used_unlocked()/K); 2508 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", 2509 _g1_storage.low_boundary(), 2510 _g1_storage.high(), 2511 _g1_storage.high_boundary()); 2512 st->cr(); 2513 st->print(" region size " SIZE_FORMAT "K, ", 2514 HeapRegion::GrainBytes/K); 2515 size_t young_regions = _young_list->length(); 2516 st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ", 2517 young_regions, young_regions * HeapRegion::GrainBytes / K); 2518 size_t survivor_regions = g1_policy()->recorded_survivor_regions(); 2519 st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)", 2520 survivor_regions, survivor_regions * HeapRegion::GrainBytes / K); 2521 st->cr(); 2522 perm()->as_gen()->print_on(st); 2523 if (extended) { 2524 st->cr(); 2525 print_on_extended(st); 2526 } 2527 } 2528 2529 void G1CollectedHeap::print_on_extended(outputStream* st) const { 2530 PrintRegionClosure blk(st); 2531 _hrs->iterate(&blk); 2532 } 2533 2534 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { 2535 if (G1CollectedHeap::use_parallel_gc_threads()) { 2536 workers()->print_worker_threads_on(st); 2537 } 2538 2539 _cmThread->print_on(st); 2540 st->cr(); 2541 2542 _cm->print_worker_threads_on(st); 2543 2544 _cg1r->print_worker_threads_on(st); 2545 2546 _czft->print_on(st); 2547 st->cr(); 2548 } 2549 2550 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { 2551 if (G1CollectedHeap::use_parallel_gc_threads()) { 2552 workers()->threads_do(tc); 2553 } 2554 tc->do_thread(_cmThread); 2555 _cg1r->threads_do(tc); 2556 tc->do_thread(_czft); 2557 } 2558 2559 void G1CollectedHeap::print_tracing_info() const { 2560 // We'll overload this to mean "trace GC pause statistics." 2561 if (TraceGen0Time || TraceGen1Time) { 2562 // The "G1CollectorPolicy" is keeping track of these stats, so delegate 2563 // to that. 2564 g1_policy()->print_tracing_info(); 2565 } 2566 if (G1SummarizeRSetStats) { 2567 g1_rem_set()->print_summary_info(); 2568 } 2569 if (G1SummarizeConcMark) { 2570 concurrent_mark()->print_summary_info(); 2571 } 2572 if (G1SummarizeZFStats) { 2573 ConcurrentZFThread::print_summary_info(); 2574 } 2575 g1_policy()->print_yg_surv_rate_info(); 2576 2577 SpecializationStats::print(); 2578 } 2579 2580 2581 int G1CollectedHeap::addr_to_arena_id(void* addr) const { 2582 HeapRegion* hr = heap_region_containing(addr); 2583 if (hr == NULL) { 2584 return 0; 2585 } else { 2586 return 1; 2587 } 2588 } 2589 2590 G1CollectedHeap* G1CollectedHeap::heap() { 2591 assert(_sh->kind() == CollectedHeap::G1CollectedHeap, 2592 "not a garbage-first heap"); 2593 return _g1h; 2594 } 2595 2596 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { 2597 // always_do_update_barrier = false; 2598 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); 2599 // Call allocation profiler 2600 AllocationProfiler::iterate_since_last_gc(); 2601 // Fill TLAB's and such 2602 ensure_parsability(true); 2603 } 2604 2605 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { 2606 // FIXME: what is this about? 2607 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" 2608 // is set. 2609 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), 2610 "derived pointer present")); 2611 // always_do_update_barrier = true; 2612 } 2613 2614 void G1CollectedHeap::do_collection_pause() { 2615 assert(Heap_lock->owned_by_self(), "we assume we'reholding the Heap_lock"); 2616 2617 // Read the GC count while holding the Heap_lock 2618 // we need to do this _before_ wait_for_cleanup_complete(), to 2619 // ensure that we do not give up the heap lock and potentially 2620 // pick up the wrong count 2621 unsigned int gc_count_before = SharedHeap::heap()->total_collections(); 2622 2623 // Don't want to do a GC pause while cleanup is being completed! 2624 wait_for_cleanup_complete(); 2625 2626 g1_policy()->record_stop_world_start(); 2627 { 2628 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back 2629 VM_G1IncCollectionPause op(gc_count_before, 2630 false, /* should_initiate_conc_mark */ 2631 g1_policy()->max_pause_time_ms(), 2632 GCCause::_g1_inc_collection_pause); 2633 VMThread::execute(&op); 2634 } 2635 } 2636 2637 void 2638 G1CollectedHeap::doConcurrentMark() { 2639 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); 2640 if (!_cmThread->in_progress()) { 2641 _cmThread->set_started(); 2642 CGC_lock->notify(); 2643 } 2644 } 2645 2646 class VerifyMarkedObjsClosure: public ObjectClosure { 2647 G1CollectedHeap* _g1h; 2648 public: 2649 VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} 2650 void do_object(oop obj) { 2651 assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, 2652 "markandsweep mark should agree with concurrent deadness"); 2653 } 2654 }; 2655 2656 void 2657 G1CollectedHeap::checkConcurrentMark() { 2658 VerifyMarkedObjsClosure verifycl(this); 2659 // MutexLockerEx x(getMarkBitMapLock(), 2660 // Mutex::_no_safepoint_check_flag); 2661 object_iterate(&verifycl, false); 2662 } 2663 2664 void G1CollectedHeap::do_sync_mark() { 2665 _cm->checkpointRootsInitial(); 2666 _cm->markFromRoots(); 2667 _cm->checkpointRootsFinal(false); 2668 } 2669 2670 // <NEW PREDICTION> 2671 2672 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, 2673 bool young) { 2674 return _g1_policy->predict_region_elapsed_time_ms(hr, young); 2675 } 2676 2677 void G1CollectedHeap::check_if_region_is_too_expensive(double 2678 predicted_time_ms) { 2679 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); 2680 } 2681 2682 size_t G1CollectedHeap::pending_card_num() { 2683 size_t extra_cards = 0; 2684 JavaThread *curr = Threads::first(); 2685 while (curr != NULL) { 2686 DirtyCardQueue& dcq = curr->dirty_card_queue(); 2687 extra_cards += dcq.size(); 2688 curr = curr->next(); 2689 } 2690 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 2691 size_t buffer_size = dcqs.buffer_size(); 2692 size_t buffer_num = dcqs.completed_buffers_num(); 2693 return buffer_size * buffer_num + extra_cards; 2694 } 2695 2696 size_t G1CollectedHeap::max_pending_card_num() { 2697 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 2698 size_t buffer_size = dcqs.buffer_size(); 2699 size_t buffer_num = dcqs.completed_buffers_num(); 2700 int thread_num = Threads::number_of_threads(); 2701 return (buffer_num + thread_num) * buffer_size; 2702 } 2703 2704 size_t G1CollectedHeap::cards_scanned() { 2705 return g1_rem_set()->cardsScanned(); 2706 } 2707 2708 void 2709 G1CollectedHeap::setup_surviving_young_words() { 2710 guarantee( _surviving_young_words == NULL, "pre-condition" ); 2711 size_t array_length = g1_policy()->young_cset_length(); 2712 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); 2713 if (_surviving_young_words == NULL) { 2714 vm_exit_out_of_memory(sizeof(size_t) * array_length, 2715 "Not enough space for young surv words summary."); 2716 } 2717 memset(_surviving_young_words, 0, array_length * sizeof(size_t)); 2718 #ifdef ASSERT 2719 for (size_t i = 0; i < array_length; ++i) { 2720 assert( _surviving_young_words[i] == 0, "memset above" ); 2721 } 2722 #endif // !ASSERT 2723 } 2724 2725 void 2726 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { 2727 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 2728 size_t array_length = g1_policy()->young_cset_length(); 2729 for (size_t i = 0; i < array_length; ++i) 2730 _surviving_young_words[i] += surv_young_words[i]; 2731 } 2732 2733 void 2734 G1CollectedHeap::cleanup_surviving_young_words() { 2735 guarantee( _surviving_young_words != NULL, "pre-condition" ); 2736 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); 2737 _surviving_young_words = NULL; 2738 } 2739 2740 // </NEW PREDICTION> 2741 2742 struct PrepareForRSScanningClosure : public HeapRegionClosure { 2743 bool doHeapRegion(HeapRegion *r) { 2744 r->rem_set()->set_iter_claimed(0); 2745 return false; 2746 } 2747 }; 2748 2749 #if TASKQUEUE_STATS 2750 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) { 2751 st->print_raw_cr("GC Task Stats"); 2752 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); 2753 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); 2754 } 2755 2756 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const { 2757 print_taskqueue_stats_hdr(st); 2758 2759 TaskQueueStats totals; 2760 const int n = workers() != NULL ? workers()->total_workers() : 1; 2761 for (int i = 0; i < n; ++i) { 2762 st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr(); 2763 totals += task_queue(i)->stats; 2764 } 2765 st->print_raw("tot "); totals.print(st); st->cr(); 2766 2767 DEBUG_ONLY(totals.verify()); 2768 } 2769 2770 void G1CollectedHeap::reset_taskqueue_stats() { 2771 const int n = workers() != NULL ? workers()->total_workers() : 1; 2772 for (int i = 0; i < n; ++i) { 2773 task_queue(i)->stats.reset(); 2774 } 2775 } 2776 #endif // TASKQUEUE_STATS 2777 2778 void 2779 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { 2780 if (GC_locker::check_active_before_gc()) { 2781 return; // GC is disabled (e.g. JNI GetXXXCritical operation) 2782 } 2783 2784 if (PrintHeapAtGC) { 2785 Universe::print_heap_before_gc(); 2786 } 2787 2788 { 2789 ResourceMark rm; 2790 2791 // This call will decide whether this pause is an initial-mark 2792 // pause. If it is, during_initial_mark_pause() will return true 2793 // for the duration of this pause. 2794 g1_policy()->decide_on_conc_mark_initiation(); 2795 2796 char verbose_str[128]; 2797 sprintf(verbose_str, "GC pause "); 2798 if (g1_policy()->in_young_gc_mode()) { 2799 if (g1_policy()->full_young_gcs()) 2800 strcat(verbose_str, "(young)"); 2801 else 2802 strcat(verbose_str, "(partial)"); 2803 } 2804 if (g1_policy()->during_initial_mark_pause()) { 2805 strcat(verbose_str, " (initial-mark)"); 2806 // We are about to start a marking cycle, so we increment the 2807 // full collection counter. 2808 increment_total_full_collections(); 2809 } 2810 2811 // if PrintGCDetails is on, we'll print long statistics information 2812 // in the collector policy code, so let's not print this as the output 2813 // is messy if we do. 2814 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 2815 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 2816 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); 2817 2818 TraceMemoryManagerStats tms(false /* fullGC */); 2819 2820 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 2821 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); 2822 guarantee(!is_gc_active(), "collection is not reentrant"); 2823 assert(regions_accounted_for(), "Region leakage!"); 2824 2825 increment_gc_time_stamp(); 2826 2827 if (g1_policy()->in_young_gc_mode()) { 2828 assert(check_young_list_well_formed(), 2829 "young list should be well formed"); 2830 } 2831 2832 { // Call to jvmpi::post_class_unload_events must occur outside of active GC 2833 IsGCActiveMark x; 2834 2835 gc_prologue(false); 2836 increment_total_collections(false /* full gc */); 2837 2838 #if G1_REM_SET_LOGGING 2839 gclog_or_tty->print_cr("\nJust chose CS, heap:"); 2840 print(); 2841 #endif 2842 2843 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { 2844 HandleMark hm; // Discard invalid handles created during verification 2845 prepare_for_verify(); 2846 gclog_or_tty->print(" VerifyBeforeGC:"); 2847 Universe::verify(false); 2848 } 2849 2850 COMPILER2_PRESENT(DerivedPointerTable::clear()); 2851 2852 // We want to turn off ref discovery, if necessary, and turn it back on 2853 // on again later if we do. XXX Dubious: why is discovery disabled? 2854 bool was_enabled = ref_processor()->discovery_enabled(); 2855 if (was_enabled) ref_processor()->disable_discovery(); 2856 2857 // Forget the current alloc region (we might even choose it to be part 2858 // of the collection set!). 2859 abandon_cur_alloc_region(); 2860 2861 // The elapsed time induced by the start time below deliberately elides 2862 // the possible verification above. 2863 double start_time_sec = os::elapsedTime(); 2864 size_t start_used_bytes = used(); 2865 2866 #if YOUNG_LIST_VERBOSE 2867 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); 2868 _young_list->print(); 2869 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); 2870 #endif // YOUNG_LIST_VERBOSE 2871 2872 g1_policy()->record_collection_pause_start(start_time_sec, 2873 start_used_bytes); 2874 2875 #if YOUNG_LIST_VERBOSE 2876 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); 2877 _young_list->print(); 2878 #endif // YOUNG_LIST_VERBOSE 2879 2880 if (g1_policy()->during_initial_mark_pause()) { 2881 concurrent_mark()->checkpointRootsInitialPre(); 2882 } 2883 save_marks(); 2884 2885 // We must do this before any possible evacuation that should propagate 2886 // marks. 2887 if (mark_in_progress()) { 2888 double start_time_sec = os::elapsedTime(); 2889 2890 _cm->drainAllSATBBuffers(); 2891 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; 2892 g1_policy()->record_satb_drain_time(finish_mark_ms); 2893 } 2894 // Record the number of elements currently on the mark stack, so we 2895 // only iterate over these. (Since evacuation may add to the mark 2896 // stack, doing more exposes race conditions.) If no mark is in 2897 // progress, this will be zero. 2898 _cm->set_oops_do_bound(); 2899 2900 assert(regions_accounted_for(), "Region leakage."); 2901 2902 if (mark_in_progress()) 2903 concurrent_mark()->newCSet(); 2904 2905 #if YOUNG_LIST_VERBOSE 2906 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); 2907 _young_list->print(); 2908 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); 2909 #endif // YOUNG_LIST_VERBOSE 2910 2911 g1_policy()->choose_collection_set(target_pause_time_ms); 2912 2913 // Nothing to do if we were unable to choose a collection set. 2914 #if G1_REM_SET_LOGGING 2915 gclog_or_tty->print_cr("\nAfter pause, heap:"); 2916 print(); 2917 #endif 2918 PrepareForRSScanningClosure prepare_for_rs_scan; 2919 collection_set_iterate(&prepare_for_rs_scan); 2920 2921 setup_surviving_young_words(); 2922 2923 // Set up the gc allocation regions. 2924 get_gc_alloc_regions(); 2925 2926 // Actually do the work... 2927 evacuate_collection_set(); 2928 2929 free_collection_set(g1_policy()->collection_set()); 2930 g1_policy()->clear_collection_set(); 2931 2932 cleanup_surviving_young_words(); 2933 2934 // Start a new incremental collection set for the next pause. 2935 g1_policy()->start_incremental_cset_building(); 2936 2937 // Clear the _cset_fast_test bitmap in anticipation of adding 2938 // regions to the incremental collection set for the next 2939 // evacuation pause. 2940 clear_cset_fast_test(); 2941 2942 if (g1_policy()->in_young_gc_mode()) { 2943 _young_list->reset_sampled_info(); 2944 2945 // Don't check the whole heap at this point as the 2946 // GC alloc regions from this pause have been tagged 2947 // as survivors and moved on to the survivor list. 2948 // Survivor regions will fail the !is_young() check. 2949 assert(check_young_list_empty(false /* check_heap */), 2950 "young list should be empty"); 2951 2952 #if YOUNG_LIST_VERBOSE 2953 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); 2954 _young_list->print(); 2955 #endif // YOUNG_LIST_VERBOSE 2956 2957 g1_policy()->record_survivor_regions(_young_list->survivor_length(), 2958 _young_list->first_survivor_region(), 2959 _young_list->last_survivor_region()); 2960 2961 _young_list->reset_auxilary_lists(); 2962 } 2963 2964 if (evacuation_failed()) { 2965 _summary_bytes_used = recalculate_used(); 2966 } else { 2967 // The "used" of the the collection set have already been subtracted 2968 // when they were freed. Add in the bytes evacuated. 2969 _summary_bytes_used += g1_policy()->bytes_in_to_space(); 2970 } 2971 2972 if (g1_policy()->in_young_gc_mode() && 2973 g1_policy()->during_initial_mark_pause()) { 2974 concurrent_mark()->checkpointRootsInitialPost(); 2975 set_marking_started(); 2976 // CAUTION: after the doConcurrentMark() call below, 2977 // the concurrent marking thread(s) could be running 2978 // concurrently with us. Make sure that anything after 2979 // this point does not assume that we are the only GC thread 2980 // running. Note: of course, the actual marking work will 2981 // not start until the safepoint itself is released in 2982 // ConcurrentGCThread::safepoint_desynchronize(). 2983 doConcurrentMark(); 2984 } 2985 2986 #if YOUNG_LIST_VERBOSE 2987 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); 2988 _young_list->print(); 2989 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); 2990 #endif // YOUNG_LIST_VERBOSE 2991 2992 double end_time_sec = os::elapsedTime(); 2993 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; 2994 g1_policy()->record_pause_time_ms(pause_time_ms); 2995 g1_policy()->record_collection_pause_end(); 2996 2997 assert(regions_accounted_for(), "Region leakage."); 2998 2999 MemoryService::track_memory_usage(); 3000 3001 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { 3002 HandleMark hm; // Discard invalid handles created during verification 3003 gclog_or_tty->print(" VerifyAfterGC:"); 3004 prepare_for_verify(); 3005 Universe::verify(false); 3006 } 3007 3008 if (was_enabled) ref_processor()->enable_discovery(); 3009 3010 { 3011 size_t expand_bytes = g1_policy()->expansion_amount(); 3012 if (expand_bytes > 0) { 3013 size_t bytes_before = capacity(); 3014 expand(expand_bytes); 3015 } 3016 } 3017 3018 if (mark_in_progress()) { 3019 concurrent_mark()->update_g1_committed(); 3020 } 3021 3022 #ifdef TRACESPINNING 3023 ParallelTaskTerminator::print_termination_counts(); 3024 #endif 3025 3026 gc_epilogue(false); 3027 } 3028 3029 assert(verify_region_lists(), "Bad region lists."); 3030 3031 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { 3032 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); 3033 print_tracing_info(); 3034 vm_exit(-1); 3035 } 3036 } 3037 3038 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); 3039 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); 3040 3041 if (PrintHeapAtGC) { 3042 Universe::print_heap_after_gc(); 3043 } 3044 if (G1SummarizeRSetStats && 3045 (G1SummarizeRSetStatsPeriod > 0) && 3046 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { 3047 g1_rem_set()->print_summary_info(); 3048 } 3049 } 3050 3051 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose) 3052 { 3053 size_t gclab_word_size; 3054 switch (purpose) { 3055 case GCAllocForSurvived: 3056 gclab_word_size = YoungPLABSize; 3057 break; 3058 case GCAllocForTenured: 3059 gclab_word_size = OldPLABSize; 3060 break; 3061 default: 3062 assert(false, "unknown GCAllocPurpose"); 3063 gclab_word_size = OldPLABSize; 3064 break; 3065 } 3066 return gclab_word_size; 3067 } 3068 3069 3070 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { 3071 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); 3072 // make sure we don't call set_gc_alloc_region() multiple times on 3073 // the same region 3074 assert(r == NULL || !r->is_gc_alloc_region(), 3075 "shouldn't already be a GC alloc region"); 3076 assert(r == NULL || !r->isHumongous(), 3077 "humongous regions shouldn't be used as GC alloc regions"); 3078 3079 HeapWord* original_top = NULL; 3080 if (r != NULL) 3081 original_top = r->top(); 3082 3083 // We will want to record the used space in r as being there before gc. 3084 // One we install it as a GC alloc region it's eligible for allocation. 3085 // So record it now and use it later. 3086 size_t r_used = 0; 3087 if (r != NULL) { 3088 r_used = r->used(); 3089 3090 if (G1CollectedHeap::use_parallel_gc_threads()) { 3091 // need to take the lock to guard against two threads calling 3092 // get_gc_alloc_region concurrently (very unlikely but...) 3093 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 3094 r->save_marks(); 3095 } 3096 } 3097 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; 3098 _gc_alloc_regions[purpose] = r; 3099 if (old_alloc_region != NULL) { 3100 // Replace aliases too. 3101 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 3102 if (_gc_alloc_regions[ap] == old_alloc_region) { 3103 _gc_alloc_regions[ap] = r; 3104 } 3105 } 3106 } 3107 if (r != NULL) { 3108 push_gc_alloc_region(r); 3109 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { 3110 // We are using a region as a GC alloc region after it has been used 3111 // as a mutator allocation region during the current marking cycle. 3112 // The mutator-allocated objects are currently implicitly marked, but 3113 // when we move hr->next_top_at_mark_start() forward at the the end 3114 // of the GC pause, they won't be. We therefore mark all objects in 3115 // the "gap". We do this object-by-object, since marking densely 3116 // does not currently work right with marking bitmap iteration. This 3117 // means we rely on TLAB filling at the start of pauses, and no 3118 // "resuscitation" of filled TLAB's. If we want to do this, we need 3119 // to fix the marking bitmap iteration. 3120 HeapWord* curhw = r->next_top_at_mark_start(); 3121 HeapWord* t = original_top; 3122 3123 while (curhw < t) { 3124 oop cur = (oop)curhw; 3125 // We'll assume parallel for generality. This is rare code. 3126 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? 3127 curhw = curhw + cur->size(); 3128 } 3129 assert(curhw == t, "Should have parsed correctly."); 3130 } 3131 if (G1PolicyVerbose > 1) { 3132 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " 3133 "for survivors:", r->bottom(), original_top, r->end()); 3134 r->print(); 3135 } 3136 g1_policy()->record_before_bytes(r_used); 3137 } 3138 } 3139 3140 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { 3141 assert(Thread::current()->is_VM_thread() || 3142 par_alloc_during_gc_lock()->owned_by_self(), "Precondition"); 3143 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), 3144 "Precondition."); 3145 hr->set_is_gc_alloc_region(true); 3146 hr->set_next_gc_alloc_region(_gc_alloc_region_list); 3147 _gc_alloc_region_list = hr; 3148 } 3149 3150 #ifdef G1_DEBUG 3151 class FindGCAllocRegion: public HeapRegionClosure { 3152 public: 3153 bool doHeapRegion(HeapRegion* r) { 3154 if (r->is_gc_alloc_region()) { 3155 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", 3156 r->hrs_index(), r->bottom()); 3157 } 3158 return false; 3159 } 3160 }; 3161 #endif // G1_DEBUG 3162 3163 void G1CollectedHeap::forget_alloc_region_list() { 3164 assert(Thread::current()->is_VM_thread(), "Precondition"); 3165 while (_gc_alloc_region_list != NULL) { 3166 HeapRegion* r = _gc_alloc_region_list; 3167 assert(r->is_gc_alloc_region(), "Invariant."); 3168 // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on 3169 // newly allocated data in order to be able to apply deferred updates 3170 // before the GC is done for verification purposes (i.e to allow 3171 // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the 3172 // collection. 3173 r->ContiguousSpace::set_saved_mark(); 3174 _gc_alloc_region_list = r->next_gc_alloc_region(); 3175 r->set_next_gc_alloc_region(NULL); 3176 r->set_is_gc_alloc_region(false); 3177 if (r->is_survivor()) { 3178 if (r->is_empty()) { 3179 r->set_not_young(); 3180 } else { 3181 _young_list->add_survivor_region(r); 3182 } 3183 } 3184 if (r->is_empty()) { 3185 ++_free_regions; 3186 } 3187 } 3188 #ifdef G1_DEBUG 3189 FindGCAllocRegion fa; 3190 heap_region_iterate(&fa); 3191 #endif // G1_DEBUG 3192 } 3193 3194 3195 bool G1CollectedHeap::check_gc_alloc_regions() { 3196 // TODO: allocation regions check 3197 return true; 3198 } 3199 3200 void G1CollectedHeap::get_gc_alloc_regions() { 3201 // First, let's check that the GC alloc region list is empty (it should) 3202 assert(_gc_alloc_region_list == NULL, "invariant"); 3203 3204 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 3205 assert(_gc_alloc_regions[ap] == NULL, "invariant"); 3206 assert(_gc_alloc_region_counts[ap] == 0, "invariant"); 3207 3208 // Create new GC alloc regions. 3209 HeapRegion* alloc_region = _retained_gc_alloc_regions[ap]; 3210 _retained_gc_alloc_regions[ap] = NULL; 3211 3212 if (alloc_region != NULL) { 3213 assert(_retain_gc_alloc_region[ap], "only way to retain a GC region"); 3214 3215 // let's make sure that the GC alloc region is not tagged as such 3216 // outside a GC operation 3217 assert(!alloc_region->is_gc_alloc_region(), "sanity"); 3218 3219 if (alloc_region->in_collection_set() || 3220 alloc_region->top() == alloc_region->end() || 3221 alloc_region->top() == alloc_region->bottom() || 3222 alloc_region->isHumongous()) { 3223 // we will discard the current GC alloc region if 3224 // * it's in the collection set (it can happen!), 3225 // * it's already full (no point in using it), 3226 // * it's empty (this means that it was emptied during 3227 // a cleanup and it should be on the free list now), or 3228 // * it's humongous (this means that it was emptied 3229 // during a cleanup and was added to the free list, but 3230 // has been subseqently used to allocate a humongous 3231 // object that may be less than the region size). 3232 3233 alloc_region = NULL; 3234 } 3235 } 3236 3237 if (alloc_region == NULL) { 3238 // we will get a new GC alloc region 3239 alloc_region = newAllocRegionWithExpansion(ap, 0); 3240 } else { 3241 // the region was retained from the last collection 3242 ++_gc_alloc_region_counts[ap]; 3243 if (G1PrintHeapRegions) { 3244 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " 3245 "top "PTR_FORMAT, 3246 alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top()); 3247 } 3248 } 3249 3250 if (alloc_region != NULL) { 3251 assert(_gc_alloc_regions[ap] == NULL, "pre-condition"); 3252 set_gc_alloc_region(ap, alloc_region); 3253 } 3254 3255 assert(_gc_alloc_regions[ap] == NULL || 3256 _gc_alloc_regions[ap]->is_gc_alloc_region(), 3257 "the GC alloc region should be tagged as such"); 3258 assert(_gc_alloc_regions[ap] == NULL || 3259 _gc_alloc_regions[ap] == _gc_alloc_region_list, 3260 "the GC alloc region should be the same as the GC alloc list head"); 3261 } 3262 // Set alternative regions for allocation purposes that have reached 3263 // their limit. 3264 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 3265 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); 3266 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { 3267 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; 3268 } 3269 } 3270 assert(check_gc_alloc_regions(), "alloc regions messed up"); 3271 } 3272 3273 void G1CollectedHeap::release_gc_alloc_regions(bool totally) { 3274 // We keep a separate list of all regions that have been alloc regions in 3275 // the current collection pause. Forget that now. This method will 3276 // untag the GC alloc regions and tear down the GC alloc region 3277 // list. It's desirable that no regions are tagged as GC alloc 3278 // outside GCs. 3279 forget_alloc_region_list(); 3280 3281 // The current alloc regions contain objs that have survived 3282 // collection. Make them no longer GC alloc regions. 3283 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 3284 HeapRegion* r = _gc_alloc_regions[ap]; 3285 _retained_gc_alloc_regions[ap] = NULL; 3286 _gc_alloc_region_counts[ap] = 0; 3287 3288 if (r != NULL) { 3289 // we retain nothing on _gc_alloc_regions between GCs 3290 set_gc_alloc_region(ap, NULL); 3291 3292 if (r->is_empty()) { 3293 // we didn't actually allocate anything in it; let's just put 3294 // it on the free list 3295 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 3296 r->set_zero_fill_complete(); 3297 put_free_region_on_list_locked(r); 3298 } else if (_retain_gc_alloc_region[ap] && !totally) { 3299 // retain it so that we can use it at the beginning of the next GC 3300 _retained_gc_alloc_regions[ap] = r; 3301 } 3302 } 3303 } 3304 } 3305 3306 #ifndef PRODUCT 3307 // Useful for debugging 3308 3309 void G1CollectedHeap::print_gc_alloc_regions() { 3310 gclog_or_tty->print_cr("GC alloc regions"); 3311 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 3312 HeapRegion* r = _gc_alloc_regions[ap]; 3313 if (r == NULL) { 3314 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL); 3315 } else { 3316 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT, 3317 ap, r->bottom(), r->used()); 3318 } 3319 } 3320 } 3321 #endif // PRODUCT 3322 3323 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { 3324 _drain_in_progress = false; 3325 set_evac_failure_closure(cl); 3326 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); 3327 } 3328 3329 void G1CollectedHeap::finalize_for_evac_failure() { 3330 assert(_evac_failure_scan_stack != NULL && 3331 _evac_failure_scan_stack->length() == 0, 3332 "Postcondition"); 3333 assert(!_drain_in_progress, "Postcondition"); 3334 delete _evac_failure_scan_stack; 3335 _evac_failure_scan_stack = NULL; 3336 } 3337 3338 3339 3340 // *** Sequential G1 Evacuation 3341 3342 HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) { 3343 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; 3344 // let the caller handle alloc failure 3345 if (alloc_region == NULL) return NULL; 3346 assert(isHumongous(word_size) || !alloc_region->isHumongous(), 3347 "Either the object is humongous or the region isn't"); 3348 HeapWord* block = alloc_region->allocate(word_size); 3349 if (block == NULL) { 3350 block = allocate_during_gc_slow(purpose, alloc_region, false, word_size); 3351 } 3352 return block; 3353 } 3354 3355 class G1IsAliveClosure: public BoolObjectClosure { 3356 G1CollectedHeap* _g1; 3357 public: 3358 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} 3359 void do_object(oop p) { assert(false, "Do not call."); } 3360 bool do_object_b(oop p) { 3361 // It is reachable if it is outside the collection set, or is inside 3362 // and forwarded. 3363 3364 #ifdef G1_DEBUG 3365 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", 3366 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), 3367 !_g1->obj_in_cs(p) || p->is_forwarded()); 3368 #endif // G1_DEBUG 3369 3370 return !_g1->obj_in_cs(p) || p->is_forwarded(); 3371 } 3372 }; 3373 3374 class G1KeepAliveClosure: public OopClosure { 3375 G1CollectedHeap* _g1; 3376 public: 3377 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} 3378 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } 3379 void do_oop( oop* p) { 3380 oop obj = *p; 3381 #ifdef G1_DEBUG 3382 if (PrintGC && Verbose) { 3383 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, 3384 p, (void*) obj, (void*) *p); 3385 } 3386 #endif // G1_DEBUG 3387 3388 if (_g1->obj_in_cs(obj)) { 3389 assert( obj->is_forwarded(), "invariant" ); 3390 *p = obj->forwardee(); 3391 #ifdef G1_DEBUG 3392 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, 3393 (void*) obj, (void*) *p); 3394 #endif // G1_DEBUG 3395 } 3396 } 3397 }; 3398 3399 class UpdateRSetDeferred : public OopsInHeapRegionClosure { 3400 private: 3401 G1CollectedHeap* _g1; 3402 DirtyCardQueue *_dcq; 3403 CardTableModRefBS* _ct_bs; 3404 3405 public: 3406 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : 3407 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} 3408 3409 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 3410 virtual void do_oop( oop* p) { do_oop_work(p); } 3411 template <class T> void do_oop_work(T* p) { 3412 assert(_from->is_in_reserved(p), "paranoia"); 3413 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && 3414 !_from->is_survivor()) { 3415 size_t card_index = _ct_bs->index_for(p); 3416 if (_ct_bs->mark_card_deferred(card_index)) { 3417 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); 3418 } 3419 } 3420 } 3421 }; 3422 3423 class RemoveSelfPointerClosure: public ObjectClosure { 3424 private: 3425 G1CollectedHeap* _g1; 3426 ConcurrentMark* _cm; 3427 HeapRegion* _hr; 3428 size_t _prev_marked_bytes; 3429 size_t _next_marked_bytes; 3430 OopsInHeapRegionClosure *_cl; 3431 public: 3432 RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) : 3433 _g1(g1), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), 3434 _next_marked_bytes(0), _cl(cl) {} 3435 3436 size_t prev_marked_bytes() { return _prev_marked_bytes; } 3437 size_t next_marked_bytes() { return _next_marked_bytes; } 3438 3439 // The original idea here was to coalesce evacuated and dead objects. 3440 // However that caused complications with the block offset table (BOT). 3441 // In particular if there were two TLABs, one of them partially refined. 3442 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| 3443 // The BOT entries of the unrefined part of TLAB_2 point to the start 3444 // of TLAB_2. If the last object of the TLAB_1 and the first object 3445 // of TLAB_2 are coalesced, then the cards of the unrefined part 3446 // would point into middle of the filler object. 3447 // 3448 // The current approach is to not coalesce and leave the BOT contents intact. 3449 void do_object(oop obj) { 3450 if (obj->is_forwarded() && obj->forwardee() == obj) { 3451 // The object failed to move. 3452 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); 3453 _cm->markPrev(obj); 3454 assert(_cm->isPrevMarked(obj), "Should be marked!"); 3455 _prev_marked_bytes += (obj->size() * HeapWordSize); 3456 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { 3457 _cm->markAndGrayObjectIfNecessary(obj); 3458 } 3459 obj->set_mark(markOopDesc::prototype()); 3460 // While we were processing RSet buffers during the 3461 // collection, we actually didn't scan any cards on the 3462 // collection set, since we didn't want to update remebered 3463 // sets with entries that point into the collection set, given 3464 // that live objects fromthe collection set are about to move 3465 // and such entries will be stale very soon. This change also 3466 // dealt with a reliability issue which involved scanning a 3467 // card in the collection set and coming across an array that 3468 // was being chunked and looking malformed. The problem is 3469 // that, if evacuation fails, we might have remembered set 3470 // entries missing given that we skipped cards on the 3471 // collection set. So, we'll recreate such entries now. 3472 obj->oop_iterate(_cl); 3473 assert(_cm->isPrevMarked(obj), "Should be marked!"); 3474 } else { 3475 // The object has been either evacuated or is dead. Fill it with a 3476 // dummy object. 3477 MemRegion mr((HeapWord*)obj, obj->size()); 3478 CollectedHeap::fill_with_object(mr); 3479 _cm->clearRangeBothMaps(mr); 3480 } 3481 } 3482 }; 3483 3484 void G1CollectedHeap::remove_self_forwarding_pointers() { 3485 UpdateRSetImmediate immediate_update(_g1h->g1_rem_set()); 3486 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); 3487 UpdateRSetDeferred deferred_update(_g1h, &dcq); 3488 OopsInHeapRegionClosure *cl; 3489 if (G1DeferredRSUpdate) { 3490 cl = &deferred_update; 3491 } else { 3492 cl = &immediate_update; 3493 } 3494 HeapRegion* cur = g1_policy()->collection_set(); 3495 while (cur != NULL) { 3496 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); 3497 3498 RemoveSelfPointerClosure rspc(_g1h, cl); 3499 if (cur->evacuation_failed()) { 3500 assert(cur->in_collection_set(), "bad CS"); 3501 cl->set_region(cur); 3502 cur->object_iterate(&rspc); 3503 3504 // A number of manipulations to make the TAMS be the current top, 3505 // and the marked bytes be the ones observed in the iteration. 3506 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { 3507 // The comments below are the postconditions achieved by the 3508 // calls. Note especially the last such condition, which says that 3509 // the count of marked bytes has been properly restored. 3510 cur->note_start_of_marking(false); 3511 // _next_top_at_mark_start == top, _next_marked_bytes == 0 3512 cur->add_to_marked_bytes(rspc.prev_marked_bytes()); 3513 // _next_marked_bytes == prev_marked_bytes. 3514 cur->note_end_of_marking(); 3515 // _prev_top_at_mark_start == top(), 3516 // _prev_marked_bytes == prev_marked_bytes 3517 } 3518 // If there is no mark in progress, we modified the _next variables 3519 // above needlessly, but harmlessly. 3520 if (_g1h->mark_in_progress()) { 3521 cur->note_start_of_marking(false); 3522 // _next_top_at_mark_start == top, _next_marked_bytes == 0 3523 // _next_marked_bytes == next_marked_bytes. 3524 } 3525 3526 // Now make sure the region has the right index in the sorted array. 3527 g1_policy()->note_change_in_marked_bytes(cur); 3528 } 3529 cur = cur->next_in_collection_set(); 3530 } 3531 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); 3532 3533 // Now restore saved marks, if any. 3534 if (_objs_with_preserved_marks != NULL) { 3535 assert(_preserved_marks_of_objs != NULL, "Both or none."); 3536 assert(_objs_with_preserved_marks->length() == 3537 _preserved_marks_of_objs->length(), "Both or none."); 3538 guarantee(_objs_with_preserved_marks->length() == 3539 _preserved_marks_of_objs->length(), "Both or none."); 3540 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { 3541 oop obj = _objs_with_preserved_marks->at(i); 3542 markOop m = _preserved_marks_of_objs->at(i); 3543 obj->set_mark(m); 3544 } 3545 // Delete the preserved marks growable arrays (allocated on the C heap). 3546 delete _objs_with_preserved_marks; 3547 delete _preserved_marks_of_objs; 3548 _objs_with_preserved_marks = NULL; 3549 _preserved_marks_of_objs = NULL; 3550 } 3551 } 3552 3553 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { 3554 _evac_failure_scan_stack->push(obj); 3555 } 3556 3557 void G1CollectedHeap::drain_evac_failure_scan_stack() { 3558 assert(_evac_failure_scan_stack != NULL, "precondition"); 3559 3560 while (_evac_failure_scan_stack->length() > 0) { 3561 oop obj = _evac_failure_scan_stack->pop(); 3562 _evac_failure_closure->set_region(heap_region_containing(obj)); 3563 obj->oop_iterate_backwards(_evac_failure_closure); 3564 } 3565 } 3566 3567 void G1CollectedHeap::handle_evacuation_failure(oop old) { 3568 markOop m = old->mark(); 3569 // forward to self 3570 assert(!old->is_forwarded(), "precondition"); 3571 3572 old->forward_to(old); 3573 handle_evacuation_failure_common(old, m); 3574 } 3575 3576 oop 3577 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, 3578 oop old) { 3579 markOop m = old->mark(); 3580 oop forward_ptr = old->forward_to_atomic(old); 3581 if (forward_ptr == NULL) { 3582 // Forward-to-self succeeded. 3583 if (_evac_failure_closure != cl) { 3584 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); 3585 assert(!_drain_in_progress, 3586 "Should only be true while someone holds the lock."); 3587 // Set the global evac-failure closure to the current thread's. 3588 assert(_evac_failure_closure == NULL, "Or locking has failed."); 3589 set_evac_failure_closure(cl); 3590 // Now do the common part. 3591 handle_evacuation_failure_common(old, m); 3592 // Reset to NULL. 3593 set_evac_failure_closure(NULL); 3594 } else { 3595 // The lock is already held, and this is recursive. 3596 assert(_drain_in_progress, "This should only be the recursive case."); 3597 handle_evacuation_failure_common(old, m); 3598 } 3599 return old; 3600 } else { 3601 // Someone else had a place to copy it. 3602 return forward_ptr; 3603 } 3604 } 3605 3606 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { 3607 set_evacuation_failed(true); 3608 3609 preserve_mark_if_necessary(old, m); 3610 3611 HeapRegion* r = heap_region_containing(old); 3612 if (!r->evacuation_failed()) { 3613 r->set_evacuation_failed(true); 3614 if (G1PrintHeapRegions) { 3615 gclog_or_tty->print("overflow in heap region "PTR_FORMAT" " 3616 "["PTR_FORMAT","PTR_FORMAT")\n", 3617 r, r->bottom(), r->end()); 3618 } 3619 } 3620 3621 push_on_evac_failure_scan_stack(old); 3622 3623 if (!_drain_in_progress) { 3624 // prevent recursion in copy_to_survivor_space() 3625 _drain_in_progress = true; 3626 drain_evac_failure_scan_stack(); 3627 _drain_in_progress = false; 3628 } 3629 } 3630 3631 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { 3632 if (m != markOopDesc::prototype()) { 3633 if (_objs_with_preserved_marks == NULL) { 3634 assert(_preserved_marks_of_objs == NULL, "Both or none."); 3635 _objs_with_preserved_marks = 3636 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); 3637 _preserved_marks_of_objs = 3638 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); 3639 } 3640 _objs_with_preserved_marks->push(obj); 3641 _preserved_marks_of_objs->push(m); 3642 } 3643 } 3644 3645 // *** Parallel G1 Evacuation 3646 3647 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, 3648 size_t word_size) { 3649 assert(!isHumongous(word_size), 3650 err_msg("we should not be seeing humongous allocation requests " 3651 "during GC, word_size = "SIZE_FORMAT, word_size)); 3652 3653 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; 3654 // let the caller handle alloc failure 3655 if (alloc_region == NULL) return NULL; 3656 3657 HeapWord* block = alloc_region->par_allocate(word_size); 3658 if (block == NULL) { 3659 MutexLockerEx x(par_alloc_during_gc_lock(), 3660 Mutex::_no_safepoint_check_flag); 3661 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); 3662 } 3663 return block; 3664 } 3665 3666 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region, 3667 bool par) { 3668 // Another thread might have obtained alloc_region for the given 3669 // purpose, and might be attempting to allocate in it, and might 3670 // succeed. Therefore, we can't do the "finalization" stuff on the 3671 // region below until we're sure the last allocation has happened. 3672 // We ensure this by allocating the remaining space with a garbage 3673 // object. 3674 if (par) par_allocate_remaining_space(alloc_region); 3675 // Now we can do the post-GC stuff on the region. 3676 alloc_region->note_end_of_copying(); 3677 g1_policy()->record_after_bytes(alloc_region->used()); 3678 } 3679 3680 HeapWord* 3681 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, 3682 HeapRegion* alloc_region, 3683 bool par, 3684 size_t word_size) { 3685 assert(!isHumongous(word_size), 3686 err_msg("we should not be seeing humongous allocation requests " 3687 "during GC, word_size = "SIZE_FORMAT, word_size)); 3688 3689 HeapWord* block = NULL; 3690 // In the parallel case, a previous thread to obtain the lock may have 3691 // already assigned a new gc_alloc_region. 3692 if (alloc_region != _gc_alloc_regions[purpose]) { 3693 assert(par, "But should only happen in parallel case."); 3694 alloc_region = _gc_alloc_regions[purpose]; 3695 if (alloc_region == NULL) return NULL; 3696 block = alloc_region->par_allocate(word_size); 3697 if (block != NULL) return block; 3698 // Otherwise, continue; this new region is empty, too. 3699 } 3700 assert(alloc_region != NULL, "We better have an allocation region"); 3701 retire_alloc_region(alloc_region, par); 3702 3703 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { 3704 // Cannot allocate more regions for the given purpose. 3705 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); 3706 // Is there an alternative? 3707 if (purpose != alt_purpose) { 3708 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; 3709 // Has not the alternative region been aliased? 3710 if (alloc_region != alt_region && alt_region != NULL) { 3711 // Try to allocate in the alternative region. 3712 if (par) { 3713 block = alt_region->par_allocate(word_size); 3714 } else { 3715 block = alt_region->allocate(word_size); 3716 } 3717 // Make an alias. 3718 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; 3719 if (block != NULL) { 3720 return block; 3721 } 3722 retire_alloc_region(alt_region, par); 3723 } 3724 // Both the allocation region and the alternative one are full 3725 // and aliased, replace them with a new allocation region. 3726 purpose = alt_purpose; 3727 } else { 3728 set_gc_alloc_region(purpose, NULL); 3729 return NULL; 3730 } 3731 } 3732 3733 // Now allocate a new region for allocation. 3734 alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/); 3735 3736 // let the caller handle alloc failure 3737 if (alloc_region != NULL) { 3738 3739 assert(check_gc_alloc_regions(), "alloc regions messed up"); 3740 assert(alloc_region->saved_mark_at_top(), 3741 "Mark should have been saved already."); 3742 // We used to assert that the region was zero-filled here, but no 3743 // longer. 3744 3745 // This must be done last: once it's installed, other regions may 3746 // allocate in it (without holding the lock.) 3747 set_gc_alloc_region(purpose, alloc_region); 3748 3749 if (par) { 3750 block = alloc_region->par_allocate(word_size); 3751 } else { 3752 block = alloc_region->allocate(word_size); 3753 } 3754 // Caller handles alloc failure. 3755 } else { 3756 // This sets other apis using the same old alloc region to NULL, also. 3757 set_gc_alloc_region(purpose, NULL); 3758 } 3759 return block; // May be NULL. 3760 } 3761 3762 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { 3763 HeapWord* block = NULL; 3764 size_t free_words; 3765 do { 3766 free_words = r->free()/HeapWordSize; 3767 // If there's too little space, no one can allocate, so we're done. 3768 if (free_words < CollectedHeap::min_fill_size()) return; 3769 // Otherwise, try to claim it. 3770 block = r->par_allocate(free_words); 3771 } while (block == NULL); 3772 fill_with_object(block, free_words); 3773 } 3774 3775 #ifndef PRODUCT 3776 bool GCLabBitMapClosure::do_bit(size_t offset) { 3777 HeapWord* addr = _bitmap->offsetToHeapWord(offset); 3778 guarantee(_cm->isMarked(oop(addr)), "it should be!"); 3779 return true; 3780 } 3781 #endif // PRODUCT 3782 3783 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) 3784 : _g1h(g1h), 3785 _refs(g1h->task_queue(queue_num)), 3786 _dcq(&g1h->dirty_card_queue_set()), 3787 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), 3788 _g1_rem(g1h->g1_rem_set()), 3789 _hash_seed(17), _queue_num(queue_num), 3790 _term_attempts(0), 3791 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), 3792 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), 3793 _age_table(false), 3794 _strong_roots_time(0), _term_time(0), 3795 _alloc_buffer_waste(0), _undo_waste(0) 3796 { 3797 // we allocate G1YoungSurvRateNumRegions plus one entries, since 3798 // we "sacrifice" entry 0 to keep track of surviving bytes for 3799 // non-young regions (where the age is -1) 3800 // We also add a few elements at the beginning and at the end in 3801 // an attempt to eliminate cache contention 3802 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); 3803 size_t array_length = PADDING_ELEM_NUM + 3804 real_length + 3805 PADDING_ELEM_NUM; 3806 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); 3807 if (_surviving_young_words_base == NULL) 3808 vm_exit_out_of_memory(array_length * sizeof(size_t), 3809 "Not enough space for young surv histo."); 3810 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; 3811 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); 3812 3813 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; 3814 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer; 3815 3816 _start = os::elapsedTime(); 3817 } 3818 3819 void 3820 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st) 3821 { 3822 st->print_raw_cr("GC Termination Stats"); 3823 st->print_raw_cr(" elapsed --strong roots-- -------termination-------" 3824 " ------waste (KiB)------"); 3825 st->print_raw_cr("thr ms ms % ms % attempts" 3826 " total alloc undo"); 3827 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------" 3828 " ------- ------- -------"); 3829 } 3830 3831 void 3832 G1ParScanThreadState::print_termination_stats(int i, 3833 outputStream* const st) const 3834 { 3835 const double elapsed_ms = elapsed_time() * 1000.0; 3836 const double s_roots_ms = strong_roots_time() * 1000.0; 3837 const double term_ms = term_time() * 1000.0; 3838 st->print_cr("%3d %9.2f %9.2f %6.2f " 3839 "%9.2f %6.2f " SIZE_FORMAT_W(8) " " 3840 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7), 3841 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, 3842 term_ms, term_ms * 100 / elapsed_ms, term_attempts(), 3843 (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K, 3844 alloc_buffer_waste() * HeapWordSize / K, 3845 undo_waste() * HeapWordSize / K); 3846 } 3847 3848 #ifdef ASSERT 3849 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const { 3850 assert(ref != NULL, "invariant"); 3851 assert(UseCompressedOops, "sanity"); 3852 assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref)); 3853 oop p = oopDesc::load_decode_heap_oop(ref); 3854 assert(_g1h->is_in_g1_reserved(p), 3855 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); 3856 return true; 3857 } 3858 3859 bool G1ParScanThreadState::verify_ref(oop* ref) const { 3860 assert(ref != NULL, "invariant"); 3861 if (has_partial_array_mask(ref)) { 3862 // Must be in the collection set--it's already been copied. 3863 oop p = clear_partial_array_mask(ref); 3864 assert(_g1h->obj_in_cs(p), 3865 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); 3866 } else { 3867 oop p = oopDesc::load_decode_heap_oop(ref); 3868 assert(_g1h->is_in_g1_reserved(p), 3869 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); 3870 } 3871 return true; 3872 } 3873 3874 bool G1ParScanThreadState::verify_task(StarTask ref) const { 3875 if (ref.is_narrow()) { 3876 return verify_ref((narrowOop*) ref); 3877 } else { 3878 return verify_ref((oop*) ref); 3879 } 3880 } 3881 #endif // ASSERT 3882 3883 void G1ParScanThreadState::trim_queue() { 3884 StarTask ref; 3885 do { 3886 // Drain the overflow stack first, so other threads can steal. 3887 while (refs()->pop_overflow(ref)) { 3888 deal_with_reference(ref); 3889 } 3890 while (refs()->pop_local(ref)) { 3891 deal_with_reference(ref); 3892 } 3893 } while (!refs()->is_empty()); 3894 } 3895 3896 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : 3897 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), 3898 _par_scan_state(par_scan_state) { } 3899 3900 template <class T> void G1ParCopyHelper::mark_forwardee(T* p) { 3901 // This is called _after_ do_oop_work has been called, hence after 3902 // the object has been relocated to its new location and *p points 3903 // to its new location. 3904 3905 T heap_oop = oopDesc::load_heap_oop(p); 3906 if (!oopDesc::is_null(heap_oop)) { 3907 oop obj = oopDesc::decode_heap_oop(heap_oop); 3908 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)), 3909 "shouldn't still be in the CSet if evacuation didn't fail."); 3910 HeapWord* addr = (HeapWord*)obj; 3911 if (_g1->is_in_g1_reserved(addr)) 3912 _cm->grayRoot(oop(addr)); 3913 } 3914 } 3915 3916 oop G1ParCopyHelper::copy_to_survivor_space(oop old) { 3917 size_t word_sz = old->size(); 3918 HeapRegion* from_region = _g1->heap_region_containing_raw(old); 3919 // +1 to make the -1 indexes valid... 3920 int young_index = from_region->young_index_in_cset()+1; 3921 assert( (from_region->is_young() && young_index > 0) || 3922 (!from_region->is_young() && young_index == 0), "invariant" ); 3923 G1CollectorPolicy* g1p = _g1->g1_policy(); 3924 markOop m = old->mark(); 3925 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() 3926 : m->age(); 3927 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, 3928 word_sz); 3929 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); 3930 oop obj = oop(obj_ptr); 3931 3932 if (obj_ptr == NULL) { 3933 // This will either forward-to-self, or detect that someone else has 3934 // installed a forwarding pointer. 3935 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); 3936 return _g1->handle_evacuation_failure_par(cl, old); 3937 } 3938 3939 // We're going to allocate linearly, so might as well prefetch ahead. 3940 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); 3941 3942 oop forward_ptr = old->forward_to_atomic(obj); 3943 if (forward_ptr == NULL) { 3944 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); 3945 if (g1p->track_object_age(alloc_purpose)) { 3946 // We could simply do obj->incr_age(). However, this causes a 3947 // performance issue. obj->incr_age() will first check whether 3948 // the object has a displaced mark by checking its mark word; 3949 // getting the mark word from the new location of the object 3950 // stalls. So, given that we already have the mark word and we 3951 // are about to install it anyway, it's better to increase the 3952 // age on the mark word, when the object does not have a 3953 // displaced mark word. We're not expecting many objects to have 3954 // a displaced marked word, so that case is not optimized 3955 // further (it could be...) and we simply call obj->incr_age(). 3956 3957 if (m->has_displaced_mark_helper()) { 3958 // in this case, we have to install the mark word first, 3959 // otherwise obj looks to be forwarded (the old mark word, 3960 // which contains the forward pointer, was copied) 3961 obj->set_mark(m); 3962 obj->incr_age(); 3963 } else { 3964 m = m->incr_age(); 3965 obj->set_mark(m); 3966 } 3967 _par_scan_state->age_table()->add(obj, word_sz); 3968 } else { 3969 obj->set_mark(m); 3970 } 3971 3972 // preserve "next" mark bit 3973 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { 3974 if (!use_local_bitmaps || 3975 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { 3976 // if we couldn't mark it on the local bitmap (this happens when 3977 // the object was not allocated in the GCLab), we have to bite 3978 // the bullet and do the standard parallel mark 3979 _cm->markAndGrayObjectIfNecessary(obj); 3980 } 3981 #if 1 3982 if (_g1->isMarkedNext(old)) { 3983 _cm->nextMarkBitMap()->parClear((HeapWord*)old); 3984 } 3985 #endif 3986 } 3987 3988 size_t* surv_young_words = _par_scan_state->surviving_young_words(); 3989 surv_young_words[young_index] += word_sz; 3990 3991 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { 3992 arrayOop(old)->set_length(0); 3993 oop* old_p = set_partial_array_mask(old); 3994 _par_scan_state->push_on_queue(old_p); 3995 } else { 3996 // No point in using the slower heap_region_containing() method, 3997 // given that we know obj is in the heap. 3998 _scanner->set_region(_g1->heap_region_containing_raw(obj)); 3999 obj->oop_iterate_backwards(_scanner); 4000 } 4001 } else { 4002 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); 4003 obj = forward_ptr; 4004 } 4005 return obj; 4006 } 4007 4008 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee> 4009 template <class T> 4010 void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee> 4011 ::do_oop_work(T* p) { 4012 oop obj = oopDesc::load_decode_heap_oop(p); 4013 assert(barrier != G1BarrierRS || obj != NULL, 4014 "Precondition: G1BarrierRS implies obj is nonNull"); 4015 4016 // here the null check is implicit in the cset_fast_test() test 4017 if (_g1->in_cset_fast_test(obj)) { 4018 #if G1_REM_SET_LOGGING 4019 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " 4020 "into CS.", p, (void*) obj); 4021 #endif 4022 if (obj->is_forwarded()) { 4023 oopDesc::encode_store_heap_oop(p, obj->forwardee()); 4024 } else { 4025 oop copy_oop = copy_to_survivor_space(obj); 4026 oopDesc::encode_store_heap_oop(p, copy_oop); 4027 } 4028 // When scanning the RS, we only care about objs in CS. 4029 if (barrier == G1BarrierRS) { 4030 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); 4031 } 4032 } 4033 4034 if (barrier == G1BarrierEvac && obj != NULL) { 4035 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); 4036 } 4037 4038 if (do_gen_barrier && obj != NULL) { 4039 par_do_barrier(p); 4040 } 4041 } 4042 4043 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p); 4044 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p); 4045 4046 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) { 4047 assert(has_partial_array_mask(p), "invariant"); 4048 oop old = clear_partial_array_mask(p); 4049 assert(old->is_objArray(), "must be obj array"); 4050 assert(old->is_forwarded(), "must be forwarded"); 4051 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); 4052 4053 objArrayOop obj = objArrayOop(old->forwardee()); 4054 assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); 4055 // Process ParGCArrayScanChunk elements now 4056 // and push the remainder back onto queue 4057 int start = arrayOop(old)->length(); 4058 int end = obj->length(); 4059 int remainder = end - start; 4060 assert(start <= end, "just checking"); 4061 if (remainder > 2 * ParGCArrayScanChunk) { 4062 // Test above combines last partial chunk with a full chunk 4063 end = start + ParGCArrayScanChunk; 4064 arrayOop(old)->set_length(end); 4065 // Push remainder. 4066 oop* old_p = set_partial_array_mask(old); 4067 assert(arrayOop(old)->length() < obj->length(), "Empty push?"); 4068 _par_scan_state->push_on_queue(old_p); 4069 } else { 4070 // Restore length so that the heap remains parsable in 4071 // case of evacuation failure. 4072 arrayOop(old)->set_length(end); 4073 } 4074 _scanner.set_region(_g1->heap_region_containing_raw(obj)); 4075 // process our set of indices (include header in first chunk) 4076 obj->oop_iterate_range(&_scanner, start, end); 4077 } 4078 4079 class G1ParEvacuateFollowersClosure : public VoidClosure { 4080 protected: 4081 G1CollectedHeap* _g1h; 4082 G1ParScanThreadState* _par_scan_state; 4083 RefToScanQueueSet* _queues; 4084 ParallelTaskTerminator* _terminator; 4085 4086 G1ParScanThreadState* par_scan_state() { return _par_scan_state; } 4087 RefToScanQueueSet* queues() { return _queues; } 4088 ParallelTaskTerminator* terminator() { return _terminator; } 4089 4090 public: 4091 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, 4092 G1ParScanThreadState* par_scan_state, 4093 RefToScanQueueSet* queues, 4094 ParallelTaskTerminator* terminator) 4095 : _g1h(g1h), _par_scan_state(par_scan_state), 4096 _queues(queues), _terminator(terminator) {} 4097 4098 void do_void(); 4099 4100 private: 4101 inline bool offer_termination(); 4102 }; 4103 4104 bool G1ParEvacuateFollowersClosure::offer_termination() { 4105 G1ParScanThreadState* const pss = par_scan_state(); 4106 pss->start_term_time(); 4107 const bool res = terminator()->offer_termination(); 4108 pss->end_term_time(); 4109 return res; 4110 } 4111 4112 void G1ParEvacuateFollowersClosure::do_void() { 4113 StarTask stolen_task; 4114 G1ParScanThreadState* const pss = par_scan_state(); 4115 pss->trim_queue(); 4116 4117 do { 4118 while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { 4119 assert(pss->verify_task(stolen_task), "sanity"); 4120 if (stolen_task.is_narrow()) { 4121 pss->push_on_queue((narrowOop*) stolen_task); 4122 } else { 4123 pss->push_on_queue((oop*) stolen_task); 4124 } 4125 pss->trim_queue(); 4126 } 4127 } while (!offer_termination()); 4128 4129 pss->retire_alloc_buffers(); 4130 } 4131 4132 class G1ParTask : public AbstractGangTask { 4133 protected: 4134 G1CollectedHeap* _g1h; 4135 RefToScanQueueSet *_queues; 4136 ParallelTaskTerminator _terminator; 4137 int _n_workers; 4138 4139 Mutex _stats_lock; 4140 Mutex* stats_lock() { return &_stats_lock; } 4141 4142 size_t getNCards() { 4143 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) 4144 / G1BlockOffsetSharedArray::N_bytes; 4145 } 4146 4147 public: 4148 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) 4149 : AbstractGangTask("G1 collection"), 4150 _g1h(g1h), 4151 _queues(task_queues), 4152 _terminator(workers, _queues), 4153 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true), 4154 _n_workers(workers) 4155 {} 4156 4157 RefToScanQueueSet* queues() { return _queues; } 4158 4159 RefToScanQueue *work_queue(int i) { 4160 return queues()->queue(i); 4161 } 4162 4163 void work(int i) { 4164 if (i >= _n_workers) return; // no work needed this round 4165 4166 double start_time_ms = os::elapsedTime() * 1000.0; 4167 _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms); 4168 4169 ResourceMark rm; 4170 HandleMark hm; 4171 4172 G1ParScanThreadState pss(_g1h, i); 4173 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); 4174 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); 4175 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); 4176 4177 pss.set_evac_closure(&scan_evac_cl); 4178 pss.set_evac_failure_closure(&evac_failure_cl); 4179 pss.set_partial_scan_closure(&partial_scan_cl); 4180 4181 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); 4182 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); 4183 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); 4184 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); 4185 4186 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); 4187 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); 4188 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); 4189 4190 OopsInHeapRegionClosure *scan_root_cl; 4191 OopsInHeapRegionClosure *scan_perm_cl; 4192 4193 if (_g1h->g1_policy()->during_initial_mark_pause()) { 4194 scan_root_cl = &scan_mark_root_cl; 4195 scan_perm_cl = &scan_mark_perm_cl; 4196 } else { 4197 scan_root_cl = &only_scan_root_cl; 4198 scan_perm_cl = &only_scan_perm_cl; 4199 } 4200 4201 pss.start_strong_roots(); 4202 _g1h->g1_process_strong_roots(/* not collecting perm */ false, 4203 SharedHeap::SO_AllClasses, 4204 scan_root_cl, 4205 &push_heap_rs_cl, 4206 scan_perm_cl, 4207 i); 4208 pss.end_strong_roots(); 4209 { 4210 double start = os::elapsedTime(); 4211 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); 4212 evac.do_void(); 4213 double elapsed_ms = (os::elapsedTime()-start)*1000.0; 4214 double term_ms = pss.term_time()*1000.0; 4215 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); 4216 _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts()); 4217 } 4218 _g1h->g1_policy()->record_thread_age_table(pss.age_table()); 4219 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); 4220 4221 // Clean up any par-expanded rem sets. 4222 HeapRegionRemSet::par_cleanup(); 4223 4224 if (ParallelGCVerbose) { 4225 MutexLocker x(stats_lock()); 4226 pss.print_termination_stats(i); 4227 } 4228 4229 assert(pss.refs()->is_empty(), "should be empty"); 4230 double end_time_ms = os::elapsedTime() * 1000.0; 4231 _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms); 4232 } 4233 }; 4234 4235 // *** Common G1 Evacuation Stuff 4236 4237 // This method is run in a GC worker. 4238 4239 void 4240 G1CollectedHeap:: 4241 g1_process_strong_roots(bool collecting_perm_gen, 4242 SharedHeap::ScanningOption so, 4243 OopClosure* scan_non_heap_roots, 4244 OopsInHeapRegionClosure* scan_rs, 4245 OopsInGenClosure* scan_perm, 4246 int worker_i) { 4247 // First scan the strong roots, including the perm gen. 4248 double ext_roots_start = os::elapsedTime(); 4249 double closure_app_time_sec = 0.0; 4250 4251 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); 4252 BufferingOopsInGenClosure buf_scan_perm(scan_perm); 4253 buf_scan_perm.set_generation(perm_gen()); 4254 4255 // Walk the code cache w/o buffering, because StarTask cannot handle 4256 // unaligned oop locations. 4257 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true); 4258 4259 process_strong_roots(false, // no scoping; this is parallel code 4260 collecting_perm_gen, so, 4261 &buf_scan_non_heap_roots, 4262 &eager_scan_code_roots, 4263 &buf_scan_perm); 4264 4265 // Finish up any enqueued closure apps. 4266 buf_scan_non_heap_roots.done(); 4267 buf_scan_perm.done(); 4268 double ext_roots_end = os::elapsedTime(); 4269 g1_policy()->reset_obj_copy_time(worker_i); 4270 double obj_copy_time_sec = 4271 buf_scan_non_heap_roots.closure_app_seconds() + 4272 buf_scan_perm.closure_app_seconds(); 4273 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); 4274 double ext_root_time_ms = 4275 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; 4276 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); 4277 4278 // Scan strong roots in mark stack. 4279 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { 4280 concurrent_mark()->oops_do(scan_non_heap_roots); 4281 } 4282 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; 4283 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); 4284 4285 // XXX What should this be doing in the parallel case? 4286 g1_policy()->record_collection_pause_end_CH_strong_roots(); 4287 // Now scan the complement of the collection set. 4288 if (scan_rs != NULL) { 4289 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); 4290 } 4291 // Finish with the ref_processor roots. 4292 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { 4293 ref_processor()->oops_do(scan_non_heap_roots); 4294 } 4295 g1_policy()->record_collection_pause_end_G1_strong_roots(); 4296 _process_strong_tasks->all_tasks_completed(); 4297 } 4298 4299 void 4300 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, 4301 OopClosure* non_root_closure) { 4302 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); 4303 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); 4304 } 4305 4306 4307 class SaveMarksClosure: public HeapRegionClosure { 4308 public: 4309 bool doHeapRegion(HeapRegion* r) { 4310 r->save_marks(); 4311 return false; 4312 } 4313 }; 4314 4315 void G1CollectedHeap::save_marks() { 4316 if (!CollectedHeap::use_parallel_gc_threads()) { 4317 SaveMarksClosure sm; 4318 heap_region_iterate(&sm); 4319 } 4320 // We do this even in the parallel case 4321 perm_gen()->save_marks(); 4322 } 4323 4324 void G1CollectedHeap::evacuate_collection_set() { 4325 set_evacuation_failed(false); 4326 4327 g1_rem_set()->prepare_for_oops_into_collection_set_do(); 4328 concurrent_g1_refine()->set_use_cache(false); 4329 concurrent_g1_refine()->clear_hot_cache_claimed_index(); 4330 4331 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); 4332 set_par_threads(n_workers); 4333 G1ParTask g1_par_task(this, n_workers, _task_queues); 4334 4335 init_for_evac_failure(NULL); 4336 4337 rem_set()->prepare_for_younger_refs_iterate(true); 4338 4339 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); 4340 double start_par = os::elapsedTime(); 4341 if (G1CollectedHeap::use_parallel_gc_threads()) { 4342 // The individual threads will set their evac-failure closures. 4343 StrongRootsScope srs(this); 4344 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr(); 4345 workers()->run_task(&g1_par_task); 4346 } else { 4347 StrongRootsScope srs(this); 4348 g1_par_task.work(0); 4349 } 4350 4351 double par_time = (os::elapsedTime() - start_par) * 1000.0; 4352 g1_policy()->record_par_time(par_time); 4353 set_par_threads(0); 4354 // Is this the right thing to do here? We don't save marks 4355 // on individual heap regions when we allocate from 4356 // them in parallel, so this seems like the correct place for this. 4357 retire_all_alloc_regions(); 4358 { 4359 G1IsAliveClosure is_alive(this); 4360 G1KeepAliveClosure keep_alive(this); 4361 JNIHandles::weak_oops_do(&is_alive, &keep_alive); 4362 } 4363 release_gc_alloc_regions(false /* totally */); 4364 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); 4365 4366 concurrent_g1_refine()->clear_hot_cache(); 4367 concurrent_g1_refine()->set_use_cache(true); 4368 4369 finalize_for_evac_failure(); 4370 4371 // Must do this before removing self-forwarding pointers, which clears 4372 // the per-region evac-failure flags. 4373 concurrent_mark()->complete_marking_in_collection_set(); 4374 4375 if (evacuation_failed()) { 4376 remove_self_forwarding_pointers(); 4377 if (PrintGCDetails) { 4378 gclog_or_tty->print(" (to-space overflow)"); 4379 } else if (PrintGC) { 4380 gclog_or_tty->print("--"); 4381 } 4382 } 4383 4384 if (G1DeferredRSUpdate) { 4385 RedirtyLoggedCardTableEntryFastClosure redirty; 4386 dirty_card_queue_set().set_closure(&redirty); 4387 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); 4388 4389 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set(); 4390 dcq.merge_bufferlists(&dirty_card_queue_set()); 4391 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); 4392 } 4393 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 4394 } 4395 4396 void G1CollectedHeap::free_region(HeapRegion* hr) { 4397 size_t pre_used = 0; 4398 size_t cleared_h_regions = 0; 4399 size_t freed_regions = 0; 4400 UncleanRegionList local_list; 4401 4402 HeapWord* start = hr->bottom(); 4403 HeapWord* end = hr->prev_top_at_mark_start(); 4404 size_t used_bytes = hr->used(); 4405 size_t live_bytes = hr->max_live_bytes(); 4406 if (used_bytes > 0) { 4407 guarantee( live_bytes <= used_bytes, "invariant" ); 4408 } else { 4409 guarantee( live_bytes == 0, "invariant" ); 4410 } 4411 4412 size_t garbage_bytes = used_bytes - live_bytes; 4413 if (garbage_bytes > 0) 4414 g1_policy()->decrease_known_garbage_bytes(garbage_bytes); 4415 4416 free_region_work(hr, pre_used, cleared_h_regions, freed_regions, 4417 &local_list); 4418 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, 4419 &local_list); 4420 } 4421 4422 void 4423 G1CollectedHeap::free_region_work(HeapRegion* hr, 4424 size_t& pre_used, 4425 size_t& cleared_h_regions, 4426 size_t& freed_regions, 4427 UncleanRegionList* list, 4428 bool par) { 4429 pre_used += hr->used(); 4430 if (hr->isHumongous()) { 4431 assert(hr->startsHumongous(), 4432 "Only the start of a humongous region should be freed."); 4433 int ind = _hrs->find(hr); 4434 assert(ind != -1, "Should have an index."); 4435 // Clear the start region. 4436 hr->hr_clear(par, true /*clear_space*/); 4437 list->insert_before_head(hr); 4438 cleared_h_regions++; 4439 freed_regions++; 4440 // Clear any continued regions. 4441 ind++; 4442 while ((size_t)ind < n_regions()) { 4443 HeapRegion* hrc = _hrs->at(ind); 4444 if (!hrc->continuesHumongous()) break; 4445 // Otherwise, does continue the H region. 4446 assert(hrc->humongous_start_region() == hr, "Huh?"); 4447 hrc->hr_clear(par, true /*clear_space*/); 4448 cleared_h_regions++; 4449 freed_regions++; 4450 list->insert_before_head(hrc); 4451 ind++; 4452 } 4453 } else { 4454 hr->hr_clear(par, true /*clear_space*/); 4455 list->insert_before_head(hr); 4456 freed_regions++; 4457 // If we're using clear2, this should not be enabled. 4458 // assert(!hr->in_cohort(), "Can't be both free and in a cohort."); 4459 } 4460 } 4461 4462 void G1CollectedHeap::finish_free_region_work(size_t pre_used, 4463 size_t cleared_h_regions, 4464 size_t freed_regions, 4465 UncleanRegionList* list) { 4466 if (list != NULL && list->sz() > 0) { 4467 prepend_region_list_on_unclean_list(list); 4468 } 4469 // Acquire a lock, if we're parallel, to update possibly-shared 4470 // variables. 4471 Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL; 4472 { 4473 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); 4474 _summary_bytes_used -= pre_used; 4475 _num_humongous_regions -= (int) cleared_h_regions; 4476 _free_regions += freed_regions; 4477 } 4478 } 4479 4480 4481 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { 4482 while (list != NULL) { 4483 guarantee( list->is_young(), "invariant" ); 4484 4485 HeapWord* bottom = list->bottom(); 4486 HeapWord* end = list->end(); 4487 MemRegion mr(bottom, end); 4488 ct_bs->dirty(mr); 4489 4490 list = list->get_next_young_region(); 4491 } 4492 } 4493 4494 4495 class G1ParCleanupCTTask : public AbstractGangTask { 4496 CardTableModRefBS* _ct_bs; 4497 G1CollectedHeap* _g1h; 4498 HeapRegion* volatile _su_head; 4499 public: 4500 G1ParCleanupCTTask(CardTableModRefBS* ct_bs, 4501 G1CollectedHeap* g1h, 4502 HeapRegion* survivor_list) : 4503 AbstractGangTask("G1 Par Cleanup CT Task"), 4504 _ct_bs(ct_bs), 4505 _g1h(g1h), 4506 _su_head(survivor_list) 4507 { } 4508 4509 void work(int i) { 4510 HeapRegion* r; 4511 while (r = _g1h->pop_dirty_cards_region()) { 4512 clear_cards(r); 4513 } 4514 // Redirty the cards of the survivor regions. 4515 dirty_list(&this->_su_head); 4516 } 4517 4518 void clear_cards(HeapRegion* r) { 4519 // Cards for Survivor regions will be dirtied later. 4520 if (!r->is_survivor()) { 4521 _ct_bs->clear(MemRegion(r->bottom(), r->end())); 4522 } 4523 } 4524 4525 void dirty_list(HeapRegion* volatile * head_ptr) { 4526 HeapRegion* head; 4527 do { 4528 // Pop region off the list. 4529 head = *head_ptr; 4530 if (head != NULL) { 4531 HeapRegion* r = (HeapRegion*) 4532 Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head); 4533 if (r == head) { 4534 assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list"); 4535 _ct_bs->dirty(MemRegion(r->bottom(), r->end())); 4536 } 4537 } 4538 } while (*head_ptr != NULL); 4539 } 4540 }; 4541 4542 4543 #ifndef PRODUCT 4544 class G1VerifyCardTableCleanup: public HeapRegionClosure { 4545 CardTableModRefBS* _ct_bs; 4546 public: 4547 G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs) 4548 : _ct_bs(ct_bs) 4549 { } 4550 virtual bool doHeapRegion(HeapRegion* r) 4551 { 4552 MemRegion mr(r->bottom(), r->end()); 4553 if (r->is_survivor()) { 4554 _ct_bs->verify_dirty_region(mr); 4555 } else { 4556 _ct_bs->verify_clean_region(mr); 4557 } 4558 return false; 4559 } 4560 }; 4561 #endif 4562 4563 void G1CollectedHeap::cleanUpCardTable() { 4564 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); 4565 double start = os::elapsedTime(); 4566 4567 // Iterate over the dirty cards region list. 4568 G1ParCleanupCTTask cleanup_task(ct_bs, this, 4569 _young_list->first_survivor_region()); 4570 4571 if (ParallelGCThreads > 0) { 4572 set_par_threads(workers()->total_workers()); 4573 workers()->run_task(&cleanup_task); 4574 set_par_threads(0); 4575 } else { 4576 while (_dirty_cards_region_list) { 4577 HeapRegion* r = _dirty_cards_region_list; 4578 cleanup_task.clear_cards(r); 4579 _dirty_cards_region_list = r->get_next_dirty_cards_region(); 4580 if (_dirty_cards_region_list == r) { 4581 // The last region. 4582 _dirty_cards_region_list = NULL; 4583 } 4584 r->set_next_dirty_cards_region(NULL); 4585 } 4586 // now, redirty the cards of the survivor regions 4587 // (it seemed faster to do it this way, instead of iterating over 4588 // all regions and then clearing / dirtying as appropriate) 4589 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); 4590 } 4591 4592 double elapsed = os::elapsedTime() - start; 4593 g1_policy()->record_clear_ct_time( elapsed * 1000.0); 4594 #ifndef PRODUCT 4595 if (G1VerifyCTCleanup || VerifyAfterGC) { 4596 G1VerifyCardTableCleanup cleanup_verifier(ct_bs); 4597 heap_region_iterate(&cleanup_verifier); 4598 } 4599 #endif 4600 } 4601 4602 void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) { 4603 if (g1_policy()->should_do_collection_pause(word_size)) { 4604 do_collection_pause(); 4605 } 4606 } 4607 4608 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { 4609 double young_time_ms = 0.0; 4610 double non_young_time_ms = 0.0; 4611 4612 // Since the collection set is a superset of the the young list, 4613 // all we need to do to clear the young list is clear its 4614 // head and length, and unlink any young regions in the code below 4615 _young_list->clear(); 4616 4617 G1CollectorPolicy* policy = g1_policy(); 4618 4619 double start_sec = os::elapsedTime(); 4620 bool non_young = true; 4621 4622 HeapRegion* cur = cs_head; 4623 int age_bound = -1; 4624 size_t rs_lengths = 0; 4625 4626 while (cur != NULL) { 4627 if (non_young) { 4628 if (cur->is_young()) { 4629 double end_sec = os::elapsedTime(); 4630 double elapsed_ms = (end_sec - start_sec) * 1000.0; 4631 non_young_time_ms += elapsed_ms; 4632 4633 start_sec = os::elapsedTime(); 4634 non_young = false; 4635 } 4636 } else { 4637 if (!cur->is_on_free_list()) { 4638 double end_sec = os::elapsedTime(); 4639 double elapsed_ms = (end_sec - start_sec) * 1000.0; 4640 young_time_ms += elapsed_ms; 4641 4642 start_sec = os::elapsedTime(); 4643 non_young = true; 4644 } 4645 } 4646 4647 rs_lengths += cur->rem_set()->occupied(); 4648 4649 HeapRegion* next = cur->next_in_collection_set(); 4650 assert(cur->in_collection_set(), "bad CS"); 4651 cur->set_next_in_collection_set(NULL); 4652 cur->set_in_collection_set(false); 4653 4654 if (cur->is_young()) { 4655 int index = cur->young_index_in_cset(); 4656 guarantee( index != -1, "invariant" ); 4657 guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); 4658 size_t words_survived = _surviving_young_words[index]; 4659 cur->record_surv_words_in_group(words_survived); 4660 4661 // At this point the we have 'popped' cur from the collection set 4662 // (linked via next_in_collection_set()) but it is still in the 4663 // young list (linked via next_young_region()). Clear the 4664 // _next_young_region field. 4665 cur->set_next_young_region(NULL); 4666 } else { 4667 int index = cur->young_index_in_cset(); 4668 guarantee( index == -1, "invariant" ); 4669 } 4670 4671 assert( (cur->is_young() && cur->young_index_in_cset() > -1) || 4672 (!cur->is_young() && cur->young_index_in_cset() == -1), 4673 "invariant" ); 4674 4675 if (!cur->evacuation_failed()) { 4676 // And the region is empty. 4677 assert(!cur->is_empty(), 4678 "Should not have empty regions in a CS."); 4679 free_region(cur); 4680 } else { 4681 cur->uninstall_surv_rate_group(); 4682 if (cur->is_young()) 4683 cur->set_young_index_in_cset(-1); 4684 cur->set_not_young(); 4685 cur->set_evacuation_failed(false); 4686 } 4687 cur = next; 4688 } 4689 4690 policy->record_max_rs_lengths(rs_lengths); 4691 policy->cset_regions_freed(); 4692 4693 double end_sec = os::elapsedTime(); 4694 double elapsed_ms = (end_sec - start_sec) * 1000.0; 4695 if (non_young) 4696 non_young_time_ms += elapsed_ms; 4697 else 4698 young_time_ms += elapsed_ms; 4699 4700 policy->record_young_free_cset_time_ms(young_time_ms); 4701 policy->record_non_young_free_cset_time_ms(non_young_time_ms); 4702 } 4703 4704 // This routine is similar to the above but does not record 4705 // any policy statistics or update free lists; we are abandoning 4706 // the current incremental collection set in preparation of a 4707 // full collection. After the full GC we will start to build up 4708 // the incremental collection set again. 4709 // This is only called when we're doing a full collection 4710 // and is immediately followed by the tearing down of the young list. 4711 4712 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) { 4713 HeapRegion* cur = cs_head; 4714 4715 while (cur != NULL) { 4716 HeapRegion* next = cur->next_in_collection_set(); 4717 assert(cur->in_collection_set(), "bad CS"); 4718 cur->set_next_in_collection_set(NULL); 4719 cur->set_in_collection_set(false); 4720 cur->set_young_index_in_cset(-1); 4721 cur = next; 4722 } 4723 } 4724 4725 HeapRegion* 4726 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) { 4727 assert(ZF_mon->owned_by_self(), "Precondition"); 4728 HeapRegion* res = pop_unclean_region_list_locked(); 4729 if (res != NULL) { 4730 assert(!res->continuesHumongous() && 4731 res->zero_fill_state() != HeapRegion::Allocated, 4732 "Only free regions on unclean list."); 4733 if (zero_filled) { 4734 res->ensure_zero_filled_locked(); 4735 res->set_zero_fill_allocated(); 4736 } 4737 } 4738 return res; 4739 } 4740 4741 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) { 4742 MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag); 4743 return alloc_region_from_unclean_list_locked(zero_filled); 4744 } 4745 4746 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) { 4747 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4748 put_region_on_unclean_list_locked(r); 4749 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. 4750 } 4751 4752 void G1CollectedHeap::set_unclean_regions_coming(bool b) { 4753 MutexLockerEx x(Cleanup_mon); 4754 set_unclean_regions_coming_locked(b); 4755 } 4756 4757 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) { 4758 assert(Cleanup_mon->owned_by_self(), "Precondition"); 4759 _unclean_regions_coming = b; 4760 // Wake up mutator threads that might be waiting for completeCleanup to 4761 // finish. 4762 if (!b) Cleanup_mon->notify_all(); 4763 } 4764 4765 void G1CollectedHeap::wait_for_cleanup_complete() { 4766 MutexLockerEx x(Cleanup_mon); 4767 wait_for_cleanup_complete_locked(); 4768 } 4769 4770 void G1CollectedHeap::wait_for_cleanup_complete_locked() { 4771 assert(Cleanup_mon->owned_by_self(), "precondition"); 4772 while (_unclean_regions_coming) { 4773 Cleanup_mon->wait(); 4774 } 4775 } 4776 4777 void 4778 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) { 4779 assert(ZF_mon->owned_by_self(), "precondition."); 4780 #ifdef ASSERT 4781 if (r->is_gc_alloc_region()) { 4782 ResourceMark rm; 4783 stringStream region_str; 4784 print_on(®ion_str); 4785 assert(!r->is_gc_alloc_region(), err_msg("Unexpected GC allocation region: %s", 4786 region_str.as_string())); 4787 } 4788 #endif 4789 _unclean_region_list.insert_before_head(r); 4790 } 4791 4792 void 4793 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) { 4794 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4795 prepend_region_list_on_unclean_list_locked(list); 4796 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. 4797 } 4798 4799 void 4800 G1CollectedHeap:: 4801 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) { 4802 assert(ZF_mon->owned_by_self(), "precondition."); 4803 _unclean_region_list.prepend_list(list); 4804 } 4805 4806 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() { 4807 assert(ZF_mon->owned_by_self(), "precondition."); 4808 HeapRegion* res = _unclean_region_list.pop(); 4809 if (res != NULL) { 4810 // Inform ZF thread that there's a new unclean head. 4811 if (_unclean_region_list.hd() != NULL && should_zf()) 4812 ZF_mon->notify_all(); 4813 } 4814 return res; 4815 } 4816 4817 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() { 4818 assert(ZF_mon->owned_by_self(), "precondition."); 4819 return _unclean_region_list.hd(); 4820 } 4821 4822 4823 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() { 4824 assert(ZF_mon->owned_by_self(), "Precondition"); 4825 HeapRegion* r = peek_unclean_region_list_locked(); 4826 if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) { 4827 // Result of below must be equal to "r", since we hold the lock. 4828 (void)pop_unclean_region_list_locked(); 4829 put_free_region_on_list_locked(r); 4830 return true; 4831 } else { 4832 return false; 4833 } 4834 } 4835 4836 bool G1CollectedHeap::move_cleaned_region_to_free_list() { 4837 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4838 return move_cleaned_region_to_free_list_locked(); 4839 } 4840 4841 4842 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) { 4843 assert(ZF_mon->owned_by_self(), "precondition."); 4844 assert(_free_region_list_size == free_region_list_length(), "Inv"); 4845 assert(r->zero_fill_state() == HeapRegion::ZeroFilled, 4846 "Regions on free list must be zero filled"); 4847 assert(!r->isHumongous(), "Must not be humongous."); 4848 assert(r->is_empty(), "Better be empty"); 4849 assert(!r->is_on_free_list(), 4850 "Better not already be on free list"); 4851 assert(!r->is_on_unclean_list(), 4852 "Better not already be on unclean list"); 4853 r->set_on_free_list(true); 4854 r->set_next_on_free_list(_free_region_list); 4855 _free_region_list = r; 4856 _free_region_list_size++; 4857 assert(_free_region_list_size == free_region_list_length(), "Inv"); 4858 } 4859 4860 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) { 4861 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4862 put_free_region_on_list_locked(r); 4863 } 4864 4865 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() { 4866 assert(ZF_mon->owned_by_self(), "precondition."); 4867 assert(_free_region_list_size == free_region_list_length(), "Inv"); 4868 HeapRegion* res = _free_region_list; 4869 if (res != NULL) { 4870 _free_region_list = res->next_from_free_list(); 4871 _free_region_list_size--; 4872 res->set_on_free_list(false); 4873 res->set_next_on_free_list(NULL); 4874 assert(_free_region_list_size == free_region_list_length(), "Inv"); 4875 } 4876 return res; 4877 } 4878 4879 4880 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) { 4881 // By self, or on behalf of self. 4882 assert(Heap_lock->is_locked(), "Precondition"); 4883 HeapRegion* res = NULL; 4884 bool first = true; 4885 while (res == NULL) { 4886 if (zero_filled || !first) { 4887 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4888 res = pop_free_region_list_locked(); 4889 if (res != NULL) { 4890 assert(!res->zero_fill_is_allocated(), 4891 "No allocated regions on free list."); 4892 res->set_zero_fill_allocated(); 4893 } else if (!first) { 4894 break; // We tried both, time to return NULL. 4895 } 4896 } 4897 4898 if (res == NULL) { 4899 res = alloc_region_from_unclean_list(zero_filled); 4900 } 4901 assert(res == NULL || 4902 !zero_filled || 4903 res->zero_fill_is_allocated(), 4904 "We must have allocated the region we're returning"); 4905 first = false; 4906 } 4907 return res; 4908 } 4909 4910 void G1CollectedHeap::remove_allocated_regions_from_lists() { 4911 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4912 { 4913 HeapRegion* prev = NULL; 4914 HeapRegion* cur = _unclean_region_list.hd(); 4915 while (cur != NULL) { 4916 HeapRegion* next = cur->next_from_unclean_list(); 4917 if (cur->zero_fill_is_allocated()) { 4918 // Remove from the list. 4919 if (prev == NULL) { 4920 (void)_unclean_region_list.pop(); 4921 } else { 4922 _unclean_region_list.delete_after(prev); 4923 } 4924 cur->set_on_unclean_list(false); 4925 cur->set_next_on_unclean_list(NULL); 4926 } else { 4927 prev = cur; 4928 } 4929 cur = next; 4930 } 4931 assert(_unclean_region_list.sz() == unclean_region_list_length(), 4932 "Inv"); 4933 } 4934 4935 { 4936 HeapRegion* prev = NULL; 4937 HeapRegion* cur = _free_region_list; 4938 while (cur != NULL) { 4939 HeapRegion* next = cur->next_from_free_list(); 4940 if (cur->zero_fill_is_allocated()) { 4941 // Remove from the list. 4942 if (prev == NULL) { 4943 _free_region_list = cur->next_from_free_list(); 4944 } else { 4945 prev->set_next_on_free_list(cur->next_from_free_list()); 4946 } 4947 cur->set_on_free_list(false); 4948 cur->set_next_on_free_list(NULL); 4949 _free_region_list_size--; 4950 } else { 4951 prev = cur; 4952 } 4953 cur = next; 4954 } 4955 assert(_free_region_list_size == free_region_list_length(), "Inv"); 4956 } 4957 } 4958 4959 bool G1CollectedHeap::verify_region_lists() { 4960 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4961 return verify_region_lists_locked(); 4962 } 4963 4964 bool G1CollectedHeap::verify_region_lists_locked() { 4965 HeapRegion* unclean = _unclean_region_list.hd(); 4966 while (unclean != NULL) { 4967 guarantee(unclean->is_on_unclean_list(), "Well, it is!"); 4968 guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!"); 4969 guarantee(unclean->zero_fill_state() != HeapRegion::Allocated, 4970 "Everything else is possible."); 4971 unclean = unclean->next_from_unclean_list(); 4972 } 4973 guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv"); 4974 4975 HeapRegion* free_r = _free_region_list; 4976 while (free_r != NULL) { 4977 assert(free_r->is_on_free_list(), "Well, it is!"); 4978 assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!"); 4979 switch (free_r->zero_fill_state()) { 4980 case HeapRegion::NotZeroFilled: 4981 case HeapRegion::ZeroFilling: 4982 guarantee(false, "Should not be on free list."); 4983 break; 4984 default: 4985 // Everything else is possible. 4986 break; 4987 } 4988 free_r = free_r->next_from_free_list(); 4989 } 4990 guarantee(_free_region_list_size == free_region_list_length(), "Inv"); 4991 // If we didn't do an assertion... 4992 return true; 4993 } 4994 4995 size_t G1CollectedHeap::free_region_list_length() { 4996 assert(ZF_mon->owned_by_self(), "precondition."); 4997 size_t len = 0; 4998 HeapRegion* cur = _free_region_list; 4999 while (cur != NULL) { 5000 len++; 5001 cur = cur->next_from_free_list(); 5002 } 5003 return len; 5004 } 5005 5006 size_t G1CollectedHeap::unclean_region_list_length() { 5007 assert(ZF_mon->owned_by_self(), "precondition."); 5008 return _unclean_region_list.length(); 5009 } 5010 5011 size_t G1CollectedHeap::n_regions() { 5012 return _hrs->length(); 5013 } 5014 5015 size_t G1CollectedHeap::max_regions() { 5016 return 5017 (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) / 5018 HeapRegion::GrainBytes; 5019 } 5020 5021 size_t G1CollectedHeap::free_regions() { 5022 /* Possibly-expensive assert. 5023 assert(_free_regions == count_free_regions(), 5024 "_free_regions is off."); 5025 */ 5026 return _free_regions; 5027 } 5028 5029 bool G1CollectedHeap::should_zf() { 5030 return _free_region_list_size < (size_t) G1ConcZFMaxRegions; 5031 } 5032 5033 class RegionCounter: public HeapRegionClosure { 5034 size_t _n; 5035 public: 5036 RegionCounter() : _n(0) {} 5037 bool doHeapRegion(HeapRegion* r) { 5038 if (r->is_empty()) { 5039 assert(!r->isHumongous(), "H regions should not be empty."); 5040 _n++; 5041 } 5042 return false; 5043 } 5044 int res() { return (int) _n; } 5045 }; 5046 5047 size_t G1CollectedHeap::count_free_regions() { 5048 RegionCounter rc; 5049 heap_region_iterate(&rc); 5050 size_t n = rc.res(); 5051 if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty()) 5052 n--; 5053 return n; 5054 } 5055 5056 size_t G1CollectedHeap::count_free_regions_list() { 5057 size_t n = 0; 5058 size_t o = 0; 5059 ZF_mon->lock_without_safepoint_check(); 5060 HeapRegion* cur = _free_region_list; 5061 while (cur != NULL) { 5062 cur = cur->next_from_free_list(); 5063 n++; 5064 } 5065 size_t m = unclean_region_list_length(); 5066 ZF_mon->unlock(); 5067 return n + m; 5068 } 5069 5070 bool G1CollectedHeap::should_set_young_locked() { 5071 assert(heap_lock_held_for_gc(), 5072 "the heap lock should already be held by or for this thread"); 5073 return (g1_policy()->in_young_gc_mode() && 5074 g1_policy()->should_add_next_region_to_young_list()); 5075 } 5076 5077 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { 5078 assert(heap_lock_held_for_gc(), 5079 "the heap lock should already be held by or for this thread"); 5080 _young_list->push_region(hr); 5081 g1_policy()->set_region_short_lived(hr); 5082 } 5083 5084 class NoYoungRegionsClosure: public HeapRegionClosure { 5085 private: 5086 bool _success; 5087 public: 5088 NoYoungRegionsClosure() : _success(true) { } 5089 bool doHeapRegion(HeapRegion* r) { 5090 if (r->is_young()) { 5091 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", 5092 r->bottom(), r->end()); 5093 _success = false; 5094 } 5095 return false; 5096 } 5097 bool success() { return _success; } 5098 }; 5099 5100 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) { 5101 bool ret = _young_list->check_list_empty(check_sample); 5102 5103 if (check_heap) { 5104 NoYoungRegionsClosure closure; 5105 heap_region_iterate(&closure); 5106 ret = ret && closure.success(); 5107 } 5108 5109 return ret; 5110 } 5111 5112 void G1CollectedHeap::empty_young_list() { 5113 assert(heap_lock_held_for_gc(), 5114 "the heap lock should already be held by or for this thread"); 5115 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); 5116 5117 _young_list->empty_list(); 5118 } 5119 5120 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { 5121 bool no_allocs = true; 5122 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { 5123 HeapRegion* r = _gc_alloc_regions[ap]; 5124 no_allocs = r == NULL || r->saved_mark_at_top(); 5125 } 5126 return no_allocs; 5127 } 5128 5129 void G1CollectedHeap::retire_all_alloc_regions() { 5130 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 5131 HeapRegion* r = _gc_alloc_regions[ap]; 5132 if (r != NULL) { 5133 // Check for aliases. 5134 bool has_processed_alias = false; 5135 for (int i = 0; i < ap; ++i) { 5136 if (_gc_alloc_regions[i] == r) { 5137 has_processed_alias = true; 5138 break; 5139 } 5140 } 5141 if (!has_processed_alias) { 5142 retire_alloc_region(r, false /* par */); 5143 } 5144 } 5145 } 5146 } 5147 5148 5149 // Done at the start of full GC. 5150 void G1CollectedHeap::tear_down_region_lists() { 5151 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 5152 while (pop_unclean_region_list_locked() != NULL) ; 5153 assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0, 5154 "Postconditions of loop."); 5155 while (pop_free_region_list_locked() != NULL) ; 5156 assert(_free_region_list == NULL, "Postcondition of loop."); 5157 if (_free_region_list_size != 0) { 5158 gclog_or_tty->print_cr("Size is %d.", _free_region_list_size); 5159 print_on(gclog_or_tty, true /* extended */); 5160 } 5161 assert(_free_region_list_size == 0, "Postconditions of loop."); 5162 } 5163 5164 5165 class RegionResetter: public HeapRegionClosure { 5166 G1CollectedHeap* _g1; 5167 int _n; 5168 public: 5169 RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {} 5170 bool doHeapRegion(HeapRegion* r) { 5171 if (r->continuesHumongous()) return false; 5172 if (r->top() > r->bottom()) { 5173 if (r->top() < r->end()) { 5174 Copy::fill_to_words(r->top(), 5175 pointer_delta(r->end(), r->top())); 5176 } 5177 r->set_zero_fill_allocated(); 5178 } else { 5179 assert(r->is_empty(), "tautology"); 5180 _n++; 5181 switch (r->zero_fill_state()) { 5182 case HeapRegion::NotZeroFilled: 5183 case HeapRegion::ZeroFilling: 5184 _g1->put_region_on_unclean_list_locked(r); 5185 break; 5186 case HeapRegion::Allocated: 5187 r->set_zero_fill_complete(); 5188 // no break; go on to put on free list. 5189 case HeapRegion::ZeroFilled: 5190 _g1->put_free_region_on_list_locked(r); 5191 break; 5192 } 5193 } 5194 return false; 5195 } 5196 5197 int getFreeRegionCount() {return _n;} 5198 }; 5199 5200 // Done at the end of full GC. 5201 void G1CollectedHeap::rebuild_region_lists() { 5202 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 5203 // This needs to go at the end of the full GC. 5204 RegionResetter rs; 5205 heap_region_iterate(&rs); 5206 _free_regions = rs.getFreeRegionCount(); 5207 // Tell the ZF thread it may have work to do. 5208 if (should_zf()) ZF_mon->notify_all(); 5209 } 5210 5211 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure { 5212 G1CollectedHeap* _g1; 5213 int _n; 5214 public: 5215 UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {} 5216 bool doHeapRegion(HeapRegion* r) { 5217 if (r->continuesHumongous()) return false; 5218 if (r->top() > r->bottom()) { 5219 // There are assertions in "set_zero_fill_needed()" below that 5220 // require top() == bottom(), so this is technically illegal. 5221 // We'll skirt the law here, by making that true temporarily. 5222 DEBUG_ONLY(HeapWord* save_top = r->top(); 5223 r->set_top(r->bottom())); 5224 r->set_zero_fill_needed(); 5225 DEBUG_ONLY(r->set_top(save_top)); 5226 } 5227 return false; 5228 } 5229 }; 5230 5231 // Done at the start of full GC. 5232 void G1CollectedHeap::set_used_regions_to_need_zero_fill() { 5233 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 5234 // This needs to go at the end of the full GC. 5235 UsedRegionsNeedZeroFillSetter rs; 5236 heap_region_iterate(&rs); 5237 } 5238 5239 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { 5240 _refine_cte_cl->set_concurrent(concurrent); 5241 } 5242 5243 #ifndef PRODUCT 5244 5245 class PrintHeapRegionClosure: public HeapRegionClosure { 5246 public: 5247 bool doHeapRegion(HeapRegion *r) { 5248 gclog_or_tty->print("Region: "PTR_FORMAT":", r); 5249 if (r != NULL) { 5250 if (r->is_on_free_list()) 5251 gclog_or_tty->print("Free "); 5252 if (r->is_young()) 5253 gclog_or_tty->print("Young "); 5254 if (r->isHumongous()) 5255 gclog_or_tty->print("Is Humongous "); 5256 r->print(); 5257 } 5258 return false; 5259 } 5260 }; 5261 5262 class SortHeapRegionClosure : public HeapRegionClosure { 5263 size_t young_regions,free_regions, unclean_regions; 5264 size_t hum_regions, count; 5265 size_t unaccounted, cur_unclean, cur_alloc; 5266 size_t total_free; 5267 HeapRegion* cur; 5268 public: 5269 SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0), 5270 free_regions(0), unclean_regions(0), 5271 hum_regions(0), 5272 count(0), unaccounted(0), 5273 cur_alloc(0), total_free(0) 5274 {} 5275 bool doHeapRegion(HeapRegion *r) { 5276 count++; 5277 if (r->is_on_free_list()) free_regions++; 5278 else if (r->is_on_unclean_list()) unclean_regions++; 5279 else if (r->isHumongous()) hum_regions++; 5280 else if (r->is_young()) young_regions++; 5281 else if (r == cur) cur_alloc++; 5282 else unaccounted++; 5283 return false; 5284 } 5285 void print() { 5286 total_free = free_regions + unclean_regions; 5287 gclog_or_tty->print("%d regions\n", count); 5288 gclog_or_tty->print("%d free: free_list = %d unclean = %d\n", 5289 total_free, free_regions, unclean_regions); 5290 gclog_or_tty->print("%d humongous %d young\n", 5291 hum_regions, young_regions); 5292 gclog_or_tty->print("%d cur_alloc\n", cur_alloc); 5293 gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted); 5294 } 5295 }; 5296 5297 void G1CollectedHeap::print_region_counts() { 5298 SortHeapRegionClosure sc(_cur_alloc_region); 5299 PrintHeapRegionClosure cl; 5300 heap_region_iterate(&cl); 5301 heap_region_iterate(&sc); 5302 sc.print(); 5303 print_region_accounting_info(); 5304 }; 5305 5306 bool G1CollectedHeap::regions_accounted_for() { 5307 // TODO: regions accounting for young/survivor/tenured 5308 return true; 5309 } 5310 5311 bool G1CollectedHeap::print_region_accounting_info() { 5312 gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).", 5313 free_regions(), 5314 count_free_regions(), count_free_regions_list(), 5315 _free_region_list_size, _unclean_region_list.sz()); 5316 gclog_or_tty->print_cr("cur_alloc: %d.", 5317 (_cur_alloc_region == NULL ? 0 : 1)); 5318 gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions); 5319 5320 // TODO: check regions accounting for young/survivor/tenured 5321 return true; 5322 } 5323 5324 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { 5325 HeapRegion* hr = heap_region_containing(p); 5326 if (hr == NULL) { 5327 return is_in_permanent(p); 5328 } else { 5329 return hr->is_in(p); 5330 } 5331 } 5332 #endif // !PRODUCT 5333 5334 void G1CollectedHeap::g1_unimplemented() { 5335 // Unimplemented(); 5336 }